Commit | Line | Data |
---|---|---|
ff7b0479 SB |
1 | /* |
2 | * offload engine driver for the Marvell XOR engine | |
3 | * Copyright (C) 2007, 2008, Marvell International Ltd. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., | |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | */ | |
18 | ||
19 | #include <linux/init.h> | |
20 | #include <linux/module.h> | |
ff7b0479 SB |
21 | #include <linux/delay.h> |
22 | #include <linux/dma-mapping.h> | |
23 | #include <linux/spinlock.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/platform_device.h> | |
26 | #include <linux/memory.h> | |
6f088f1d | 27 | #include <plat/mv_xor.h> |
ff7b0479 SB |
28 | #include "mv_xor.h" |
29 | ||
30 | static void mv_xor_issue_pending(struct dma_chan *chan); | |
31 | ||
32 | #define to_mv_xor_chan(chan) \ | |
33 | container_of(chan, struct mv_xor_chan, common) | |
34 | ||
35 | #define to_mv_xor_device(dev) \ | |
36 | container_of(dev, struct mv_xor_device, common) | |
37 | ||
38 | #define to_mv_xor_slot(tx) \ | |
39 | container_of(tx, struct mv_xor_desc_slot, async_tx) | |
40 | ||
41 | static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) | |
42 | { | |
43 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
44 | ||
45 | hw_desc->status = (1 << 31); | |
46 | hw_desc->phy_next_desc = 0; | |
47 | hw_desc->desc_command = (1 << 31); | |
48 | } | |
49 | ||
50 | static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) | |
51 | { | |
52 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
53 | return hw_desc->phy_dest_addr; | |
54 | } | |
55 | ||
56 | static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, | |
57 | int src_idx) | |
58 | { | |
59 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
60 | return hw_desc->phy_src_addr[src_idx]; | |
61 | } | |
62 | ||
63 | ||
64 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | |
65 | u32 byte_count) | |
66 | { | |
67 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
68 | hw_desc->byte_count = byte_count; | |
69 | } | |
70 | ||
71 | static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, | |
72 | u32 next_desc_addr) | |
73 | { | |
74 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
75 | BUG_ON(hw_desc->phy_next_desc); | |
76 | hw_desc->phy_next_desc = next_desc_addr; | |
77 | } | |
78 | ||
79 | static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) | |
80 | { | |
81 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
82 | hw_desc->phy_next_desc = 0; | |
83 | } | |
84 | ||
85 | static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) | |
86 | { | |
87 | desc->value = val; | |
88 | } | |
89 | ||
90 | static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, | |
91 | dma_addr_t addr) | |
92 | { | |
93 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
94 | hw_desc->phy_dest_addr = addr; | |
95 | } | |
96 | ||
97 | static int mv_chan_memset_slot_count(size_t len) | |
98 | { | |
99 | return 1; | |
100 | } | |
101 | ||
102 | #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) | |
103 | ||
104 | static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, | |
105 | int index, dma_addr_t addr) | |
106 | { | |
107 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
108 | hw_desc->phy_src_addr[index] = addr; | |
109 | if (desc->type == DMA_XOR) | |
110 | hw_desc->desc_command |= (1 << index); | |
111 | } | |
112 | ||
113 | static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) | |
114 | { | |
115 | return __raw_readl(XOR_CURR_DESC(chan)); | |
116 | } | |
117 | ||
118 | static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, | |
119 | u32 next_desc_addr) | |
120 | { | |
121 | __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); | |
122 | } | |
123 | ||
124 | static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) | |
125 | { | |
126 | __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); | |
127 | } | |
128 | ||
129 | static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) | |
130 | { | |
131 | __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); | |
132 | } | |
133 | ||
134 | static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) | |
135 | { | |
136 | __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); | |
137 | __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); | |
138 | } | |
139 | ||
140 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) | |
141 | { | |
142 | u32 val = __raw_readl(XOR_INTR_MASK(chan)); | |
143 | val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); | |
144 | __raw_writel(val, XOR_INTR_MASK(chan)); | |
145 | } | |
146 | ||
147 | static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) | |
148 | { | |
149 | u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); | |
150 | intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; | |
151 | return intr_cause; | |
152 | } | |
153 | ||
154 | static int mv_is_err_intr(u32 intr_cause) | |
155 | { | |
156 | if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) | |
157 | return 1; | |
158 | ||
159 | return 0; | |
160 | } | |
161 | ||
162 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) | |
163 | { | |
164 | u32 val = (1 << (1 + (chan->idx * 16))); | |
165 | dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); | |
166 | __raw_writel(val, XOR_INTR_CAUSE(chan)); | |
167 | } | |
168 | ||
169 | static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) | |
170 | { | |
171 | u32 val = 0xFFFF0000 >> (chan->idx * 16); | |
172 | __raw_writel(val, XOR_INTR_CAUSE(chan)); | |
173 | } | |
174 | ||
175 | static int mv_can_chain(struct mv_xor_desc_slot *desc) | |
176 | { | |
177 | struct mv_xor_desc_slot *chain_old_tail = list_entry( | |
178 | desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); | |
179 | ||
180 | if (chain_old_tail->type != desc->type) | |
181 | return 0; | |
182 | if (desc->type == DMA_MEMSET) | |
183 | return 0; | |
184 | ||
185 | return 1; | |
186 | } | |
187 | ||
188 | static void mv_set_mode(struct mv_xor_chan *chan, | |
189 | enum dma_transaction_type type) | |
190 | { | |
191 | u32 op_mode; | |
192 | u32 config = __raw_readl(XOR_CONFIG(chan)); | |
193 | ||
194 | switch (type) { | |
195 | case DMA_XOR: | |
196 | op_mode = XOR_OPERATION_MODE_XOR; | |
197 | break; | |
198 | case DMA_MEMCPY: | |
199 | op_mode = XOR_OPERATION_MODE_MEMCPY; | |
200 | break; | |
201 | case DMA_MEMSET: | |
202 | op_mode = XOR_OPERATION_MODE_MEMSET; | |
203 | break; | |
204 | default: | |
205 | dev_printk(KERN_ERR, chan->device->common.dev, | |
206 | "error: unsupported operation %d.\n", | |
207 | type); | |
208 | BUG(); | |
209 | return; | |
210 | } | |
211 | ||
212 | config &= ~0x7; | |
213 | config |= op_mode; | |
214 | __raw_writel(config, XOR_CONFIG(chan)); | |
215 | chan->current_type = type; | |
216 | } | |
217 | ||
218 | static void mv_chan_activate(struct mv_xor_chan *chan) | |
219 | { | |
220 | u32 activation; | |
221 | ||
222 | dev_dbg(chan->device->common.dev, " activate chan.\n"); | |
223 | activation = __raw_readl(XOR_ACTIVATION(chan)); | |
224 | activation |= 0x1; | |
225 | __raw_writel(activation, XOR_ACTIVATION(chan)); | |
226 | } | |
227 | ||
228 | static char mv_chan_is_busy(struct mv_xor_chan *chan) | |
229 | { | |
230 | u32 state = __raw_readl(XOR_ACTIVATION(chan)); | |
231 | ||
232 | state = (state >> 4) & 0x3; | |
233 | ||
234 | return (state == 1) ? 1 : 0; | |
235 | } | |
236 | ||
237 | static int mv_chan_xor_slot_count(size_t len, int src_cnt) | |
238 | { | |
239 | return 1; | |
240 | } | |
241 | ||
242 | /** | |
243 | * mv_xor_free_slots - flags descriptor slots for reuse | |
244 | * @slot: Slot to free | |
245 | * Caller must hold &mv_chan->lock while calling this function | |
246 | */ | |
247 | static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, | |
248 | struct mv_xor_desc_slot *slot) | |
249 | { | |
250 | dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", | |
251 | __func__, __LINE__, slot); | |
252 | ||
253 | slot->slots_per_op = 0; | |
254 | ||
255 | } | |
256 | ||
257 | /* | |
258 | * mv_xor_start_new_chain - program the engine to operate on new chain headed by | |
259 | * sw_desc | |
260 | * Caller must hold &mv_chan->lock while calling this function | |
261 | */ | |
262 | static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, | |
263 | struct mv_xor_desc_slot *sw_desc) | |
264 | { | |
265 | dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", | |
266 | __func__, __LINE__, sw_desc); | |
267 | if (sw_desc->type != mv_chan->current_type) | |
268 | mv_set_mode(mv_chan, sw_desc->type); | |
269 | ||
270 | if (sw_desc->type == DMA_MEMSET) { | |
271 | /* for memset requests we need to program the engine, no | |
272 | * descriptors used. | |
273 | */ | |
274 | struct mv_xor_desc *hw_desc = sw_desc->hw_desc; | |
275 | mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); | |
276 | mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); | |
277 | mv_chan_set_value(mv_chan, sw_desc->value); | |
278 | } else { | |
279 | /* set the hardware chain */ | |
280 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | |
281 | } | |
282 | mv_chan->pending += sw_desc->slot_cnt; | |
283 | mv_xor_issue_pending(&mv_chan->common); | |
284 | } | |
285 | ||
286 | static dma_cookie_t | |
287 | mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |
288 | struct mv_xor_chan *mv_chan, dma_cookie_t cookie) | |
289 | { | |
290 | BUG_ON(desc->async_tx.cookie < 0); | |
291 | ||
292 | if (desc->async_tx.cookie > 0) { | |
293 | cookie = desc->async_tx.cookie; | |
294 | ||
295 | /* call the callback (must not sleep or submit new | |
296 | * operations to this channel) | |
297 | */ | |
298 | if (desc->async_tx.callback) | |
299 | desc->async_tx.callback( | |
300 | desc->async_tx.callback_param); | |
301 | ||
302 | /* unmap dma addresses | |
303 | * (unmap_single vs unmap_page?) | |
304 | */ | |
305 | if (desc->group_head && desc->unmap_len) { | |
306 | struct mv_xor_desc_slot *unmap = desc->group_head; | |
307 | struct device *dev = | |
308 | &mv_chan->device->pdev->dev; | |
309 | u32 len = unmap->unmap_len; | |
e1d181ef DW |
310 | enum dma_ctrl_flags flags = desc->async_tx.flags; |
311 | u32 src_cnt; | |
312 | dma_addr_t addr; | |
a06d568f | 313 | dma_addr_t dest; |
ff7b0479 | 314 | |
a06d568f DW |
315 | src_cnt = unmap->unmap_src_cnt; |
316 | dest = mv_desc_get_dest_addr(unmap); | |
e1d181ef | 317 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
a06d568f DW |
318 | enum dma_data_direction dir; |
319 | ||
320 | if (src_cnt > 1) /* is xor ? */ | |
321 | dir = DMA_BIDIRECTIONAL; | |
322 | else | |
323 | dir = DMA_FROM_DEVICE; | |
324 | dma_unmap_page(dev, dest, len, dir); | |
e1d181ef DW |
325 | } |
326 | ||
327 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
e1d181ef DW |
328 | while (src_cnt--) { |
329 | addr = mv_desc_get_src_addr(unmap, | |
330 | src_cnt); | |
a06d568f DW |
331 | if (addr == dest) |
332 | continue; | |
e1d181ef DW |
333 | dma_unmap_page(dev, addr, len, |
334 | DMA_TO_DEVICE); | |
335 | } | |
ff7b0479 SB |
336 | } |
337 | desc->group_head = NULL; | |
338 | } | |
339 | } | |
340 | ||
341 | /* run dependent operations */ | |
07f2211e | 342 | dma_run_dependencies(&desc->async_tx); |
ff7b0479 SB |
343 | |
344 | return cookie; | |
345 | } | |
346 | ||
347 | static int | |
348 | mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) | |
349 | { | |
350 | struct mv_xor_desc_slot *iter, *_iter; | |
351 | ||
352 | dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); | |
353 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | |
354 | completed_node) { | |
355 | ||
356 | if (async_tx_test_ack(&iter->async_tx)) { | |
357 | list_del(&iter->completed_node); | |
358 | mv_xor_free_slots(mv_chan, iter); | |
359 | } | |
360 | } | |
361 | return 0; | |
362 | } | |
363 | ||
364 | static int | |
365 | mv_xor_clean_slot(struct mv_xor_desc_slot *desc, | |
366 | struct mv_xor_chan *mv_chan) | |
367 | { | |
368 | dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", | |
369 | __func__, __LINE__, desc, desc->async_tx.flags); | |
370 | list_del(&desc->chain_node); | |
371 | /* the client is allowed to attach dependent operations | |
372 | * until 'ack' is set | |
373 | */ | |
374 | if (!async_tx_test_ack(&desc->async_tx)) { | |
375 | /* move this slot to the completed_slots */ | |
376 | list_add_tail(&desc->completed_node, &mv_chan->completed_slots); | |
377 | return 0; | |
378 | } | |
379 | ||
380 | mv_xor_free_slots(mv_chan, desc); | |
381 | return 0; | |
382 | } | |
383 | ||
384 | static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | |
385 | { | |
386 | struct mv_xor_desc_slot *iter, *_iter; | |
387 | dma_cookie_t cookie = 0; | |
388 | int busy = mv_chan_is_busy(mv_chan); | |
389 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | |
390 | int seen_current = 0; | |
391 | ||
392 | dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); | |
393 | dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); | |
394 | mv_xor_clean_completed_slots(mv_chan); | |
395 | ||
396 | /* free completed slots from the chain starting with | |
397 | * the oldest descriptor | |
398 | */ | |
399 | ||
400 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, | |
401 | chain_node) { | |
402 | prefetch(_iter); | |
403 | prefetch(&_iter->async_tx); | |
404 | ||
405 | /* do not advance past the current descriptor loaded into the | |
406 | * hardware channel, subsequent descriptors are either in | |
407 | * process or have not been submitted | |
408 | */ | |
409 | if (seen_current) | |
410 | break; | |
411 | ||
412 | /* stop the search if we reach the current descriptor and the | |
413 | * channel is busy | |
414 | */ | |
415 | if (iter->async_tx.phys == current_desc) { | |
416 | seen_current = 1; | |
417 | if (busy) | |
418 | break; | |
419 | } | |
420 | ||
421 | cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); | |
422 | ||
423 | if (mv_xor_clean_slot(iter, mv_chan)) | |
424 | break; | |
425 | } | |
426 | ||
427 | if ((busy == 0) && !list_empty(&mv_chan->chain)) { | |
428 | struct mv_xor_desc_slot *chain_head; | |
429 | chain_head = list_entry(mv_chan->chain.next, | |
430 | struct mv_xor_desc_slot, | |
431 | chain_node); | |
432 | ||
433 | mv_xor_start_new_chain(mv_chan, chain_head); | |
434 | } | |
435 | ||
436 | if (cookie > 0) | |
437 | mv_chan->completed_cookie = cookie; | |
438 | } | |
439 | ||
440 | static void | |
441 | mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | |
442 | { | |
443 | spin_lock_bh(&mv_chan->lock); | |
444 | __mv_xor_slot_cleanup(mv_chan); | |
445 | spin_unlock_bh(&mv_chan->lock); | |
446 | } | |
447 | ||
448 | static void mv_xor_tasklet(unsigned long data) | |
449 | { | |
450 | struct mv_xor_chan *chan = (struct mv_xor_chan *) data; | |
451 | __mv_xor_slot_cleanup(chan); | |
452 | } | |
453 | ||
454 | static struct mv_xor_desc_slot * | |
455 | mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, | |
456 | int slots_per_op) | |
457 | { | |
458 | struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; | |
459 | LIST_HEAD(chain); | |
460 | int slots_found, retry = 0; | |
461 | ||
462 | /* start search from the last allocated descrtiptor | |
463 | * if a contiguous allocation can not be found start searching | |
464 | * from the beginning of the list | |
465 | */ | |
466 | retry: | |
467 | slots_found = 0; | |
468 | if (retry == 0) | |
469 | iter = mv_chan->last_used; | |
470 | else | |
471 | iter = list_entry(&mv_chan->all_slots, | |
472 | struct mv_xor_desc_slot, | |
473 | slot_node); | |
474 | ||
475 | list_for_each_entry_safe_continue( | |
476 | iter, _iter, &mv_chan->all_slots, slot_node) { | |
477 | prefetch(_iter); | |
478 | prefetch(&_iter->async_tx); | |
479 | if (iter->slots_per_op) { | |
480 | /* give up after finding the first busy slot | |
481 | * on the second pass through the list | |
482 | */ | |
483 | if (retry) | |
484 | break; | |
485 | ||
486 | slots_found = 0; | |
487 | continue; | |
488 | } | |
489 | ||
490 | /* start the allocation if the slot is correctly aligned */ | |
491 | if (!slots_found++) | |
492 | alloc_start = iter; | |
493 | ||
494 | if (slots_found == num_slots) { | |
495 | struct mv_xor_desc_slot *alloc_tail = NULL; | |
496 | struct mv_xor_desc_slot *last_used = NULL; | |
497 | iter = alloc_start; | |
498 | while (num_slots) { | |
499 | int i; | |
500 | ||
501 | /* pre-ack all but the last descriptor */ | |
502 | async_tx_ack(&iter->async_tx); | |
503 | ||
504 | list_add_tail(&iter->chain_node, &chain); | |
505 | alloc_tail = iter; | |
506 | iter->async_tx.cookie = 0; | |
507 | iter->slot_cnt = num_slots; | |
508 | iter->xor_check_result = NULL; | |
509 | for (i = 0; i < slots_per_op; i++) { | |
510 | iter->slots_per_op = slots_per_op - i; | |
511 | last_used = iter; | |
512 | iter = list_entry(iter->slot_node.next, | |
513 | struct mv_xor_desc_slot, | |
514 | slot_node); | |
515 | } | |
516 | num_slots -= slots_per_op; | |
517 | } | |
518 | alloc_tail->group_head = alloc_start; | |
519 | alloc_tail->async_tx.cookie = -EBUSY; | |
520 | list_splice(&chain, &alloc_tail->async_tx.tx_list); | |
521 | mv_chan->last_used = last_used; | |
522 | mv_desc_clear_next_desc(alloc_start); | |
523 | mv_desc_clear_next_desc(alloc_tail); | |
524 | return alloc_tail; | |
525 | } | |
526 | } | |
527 | if (!retry++) | |
528 | goto retry; | |
529 | ||
530 | /* try to free some slots if the allocation fails */ | |
531 | tasklet_schedule(&mv_chan->irq_tasklet); | |
532 | ||
533 | return NULL; | |
534 | } | |
535 | ||
536 | static dma_cookie_t | |
537 | mv_desc_assign_cookie(struct mv_xor_chan *mv_chan, | |
538 | struct mv_xor_desc_slot *desc) | |
539 | { | |
540 | dma_cookie_t cookie = mv_chan->common.cookie; | |
541 | ||
542 | if (++cookie < 0) | |
543 | cookie = 1; | |
544 | mv_chan->common.cookie = desc->async_tx.cookie = cookie; | |
545 | return cookie; | |
546 | } | |
547 | ||
548 | /************************ DMA engine API functions ****************************/ | |
549 | static dma_cookie_t | |
550 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |
551 | { | |
552 | struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); | |
553 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); | |
554 | struct mv_xor_desc_slot *grp_start, *old_chain_tail; | |
555 | dma_cookie_t cookie; | |
556 | int new_hw_chain = 1; | |
557 | ||
558 | dev_dbg(mv_chan->device->common.dev, | |
559 | "%s sw_desc %p: async_tx %p\n", | |
560 | __func__, sw_desc, &sw_desc->async_tx); | |
561 | ||
562 | grp_start = sw_desc->group_head; | |
563 | ||
564 | spin_lock_bh(&mv_chan->lock); | |
565 | cookie = mv_desc_assign_cookie(mv_chan, sw_desc); | |
566 | ||
567 | if (list_empty(&mv_chan->chain)) | |
568 | list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain); | |
569 | else { | |
570 | new_hw_chain = 0; | |
571 | ||
572 | old_chain_tail = list_entry(mv_chan->chain.prev, | |
573 | struct mv_xor_desc_slot, | |
574 | chain_node); | |
575 | list_splice_init(&grp_start->async_tx.tx_list, | |
576 | &old_chain_tail->chain_node); | |
577 | ||
578 | if (!mv_can_chain(grp_start)) | |
579 | goto submit_done; | |
580 | ||
581 | dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", | |
582 | old_chain_tail->async_tx.phys); | |
583 | ||
584 | /* fix up the hardware chain */ | |
585 | mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); | |
586 | ||
587 | /* if the channel is not busy */ | |
588 | if (!mv_chan_is_busy(mv_chan)) { | |
589 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | |
590 | /* | |
591 | * and the curren desc is the end of the chain before | |
592 | * the append, then we need to start the channel | |
593 | */ | |
594 | if (current_desc == old_chain_tail->async_tx.phys) | |
595 | new_hw_chain = 1; | |
596 | } | |
597 | } | |
598 | ||
599 | if (new_hw_chain) | |
600 | mv_xor_start_new_chain(mv_chan, grp_start); | |
601 | ||
602 | submit_done: | |
603 | spin_unlock_bh(&mv_chan->lock); | |
604 | ||
605 | return cookie; | |
606 | } | |
607 | ||
608 | /* returns the number of allocated descriptors */ | |
aa1e6f1a | 609 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan) |
ff7b0479 SB |
610 | { |
611 | char *hw_desc; | |
612 | int idx; | |
613 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
614 | struct mv_xor_desc_slot *slot = NULL; | |
615 | struct mv_xor_platform_data *plat_data = | |
616 | mv_chan->device->pdev->dev.platform_data; | |
617 | int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE; | |
618 | ||
619 | /* Allocate descriptor slots */ | |
620 | idx = mv_chan->slots_allocated; | |
621 | while (idx < num_descs_in_pool) { | |
622 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | |
623 | if (!slot) { | |
624 | printk(KERN_INFO "MV XOR Channel only initialized" | |
625 | " %d descriptor slots", idx); | |
626 | break; | |
627 | } | |
628 | hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; | |
629 | slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | |
630 | ||
631 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | |
632 | slot->async_tx.tx_submit = mv_xor_tx_submit; | |
633 | INIT_LIST_HEAD(&slot->chain_node); | |
634 | INIT_LIST_HEAD(&slot->slot_node); | |
ff7b0479 SB |
635 | hw_desc = (char *) mv_chan->device->dma_desc_pool; |
636 | slot->async_tx.phys = | |
637 | (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | |
638 | slot->idx = idx++; | |
639 | ||
640 | spin_lock_bh(&mv_chan->lock); | |
641 | mv_chan->slots_allocated = idx; | |
642 | list_add_tail(&slot->slot_node, &mv_chan->all_slots); | |
643 | spin_unlock_bh(&mv_chan->lock); | |
644 | } | |
645 | ||
646 | if (mv_chan->slots_allocated && !mv_chan->last_used) | |
647 | mv_chan->last_used = list_entry(mv_chan->all_slots.next, | |
648 | struct mv_xor_desc_slot, | |
649 | slot_node); | |
650 | ||
651 | dev_dbg(mv_chan->device->common.dev, | |
652 | "allocated %d descriptor slots last_used: %p\n", | |
653 | mv_chan->slots_allocated, mv_chan->last_used); | |
654 | ||
655 | return mv_chan->slots_allocated ? : -ENOMEM; | |
656 | } | |
657 | ||
658 | static struct dma_async_tx_descriptor * | |
659 | mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
660 | size_t len, unsigned long flags) | |
661 | { | |
662 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
663 | struct mv_xor_desc_slot *sw_desc, *grp_start; | |
664 | int slot_cnt; | |
665 | ||
666 | dev_dbg(mv_chan->device->common.dev, | |
667 | "%s dest: %x src %x len: %u flags: %ld\n", | |
668 | __func__, dest, src, len, flags); | |
669 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | |
670 | return NULL; | |
671 | ||
672 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | |
673 | ||
674 | spin_lock_bh(&mv_chan->lock); | |
675 | slot_cnt = mv_chan_memcpy_slot_count(len); | |
676 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | |
677 | if (sw_desc) { | |
678 | sw_desc->type = DMA_MEMCPY; | |
679 | sw_desc->async_tx.flags = flags; | |
680 | grp_start = sw_desc->group_head; | |
681 | mv_desc_init(grp_start, flags); | |
682 | mv_desc_set_byte_count(grp_start, len); | |
683 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | |
684 | mv_desc_set_src_addr(grp_start, 0, src); | |
685 | sw_desc->unmap_src_cnt = 1; | |
686 | sw_desc->unmap_len = len; | |
687 | } | |
688 | spin_unlock_bh(&mv_chan->lock); | |
689 | ||
690 | dev_dbg(mv_chan->device->common.dev, | |
691 | "%s sw_desc %p async_tx %p\n", | |
692 | __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); | |
693 | ||
694 | return sw_desc ? &sw_desc->async_tx : NULL; | |
695 | } | |
696 | ||
697 | static struct dma_async_tx_descriptor * | |
698 | mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | |
699 | size_t len, unsigned long flags) | |
700 | { | |
701 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
702 | struct mv_xor_desc_slot *sw_desc, *grp_start; | |
703 | int slot_cnt; | |
704 | ||
705 | dev_dbg(mv_chan->device->common.dev, | |
706 | "%s dest: %x len: %u flags: %ld\n", | |
707 | __func__, dest, len, flags); | |
708 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | |
709 | return NULL; | |
710 | ||
711 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | |
712 | ||
713 | spin_lock_bh(&mv_chan->lock); | |
714 | slot_cnt = mv_chan_memset_slot_count(len); | |
715 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | |
716 | if (sw_desc) { | |
717 | sw_desc->type = DMA_MEMSET; | |
718 | sw_desc->async_tx.flags = flags; | |
719 | grp_start = sw_desc->group_head; | |
720 | mv_desc_init(grp_start, flags); | |
721 | mv_desc_set_byte_count(grp_start, len); | |
722 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | |
723 | mv_desc_set_block_fill_val(grp_start, value); | |
724 | sw_desc->unmap_src_cnt = 1; | |
725 | sw_desc->unmap_len = len; | |
726 | } | |
727 | spin_unlock_bh(&mv_chan->lock); | |
728 | dev_dbg(mv_chan->device->common.dev, | |
729 | "%s sw_desc %p async_tx %p \n", | |
730 | __func__, sw_desc, &sw_desc->async_tx); | |
731 | return sw_desc ? &sw_desc->async_tx : NULL; | |
732 | } | |
733 | ||
734 | static struct dma_async_tx_descriptor * | |
735 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |
736 | unsigned int src_cnt, size_t len, unsigned long flags) | |
737 | { | |
738 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
739 | struct mv_xor_desc_slot *sw_desc, *grp_start; | |
740 | int slot_cnt; | |
741 | ||
742 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | |
743 | return NULL; | |
744 | ||
745 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | |
746 | ||
747 | dev_dbg(mv_chan->device->common.dev, | |
748 | "%s src_cnt: %d len: dest %x %u flags: %ld\n", | |
749 | __func__, src_cnt, len, dest, flags); | |
750 | ||
751 | spin_lock_bh(&mv_chan->lock); | |
752 | slot_cnt = mv_chan_xor_slot_count(len, src_cnt); | |
753 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | |
754 | if (sw_desc) { | |
755 | sw_desc->type = DMA_XOR; | |
756 | sw_desc->async_tx.flags = flags; | |
757 | grp_start = sw_desc->group_head; | |
758 | mv_desc_init(grp_start, flags); | |
759 | /* the byte count field is the same as in memcpy desc*/ | |
760 | mv_desc_set_byte_count(grp_start, len); | |
761 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | |
762 | sw_desc->unmap_src_cnt = src_cnt; | |
763 | sw_desc->unmap_len = len; | |
764 | while (src_cnt--) | |
765 | mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); | |
766 | } | |
767 | spin_unlock_bh(&mv_chan->lock); | |
768 | dev_dbg(mv_chan->device->common.dev, | |
769 | "%s sw_desc %p async_tx %p \n", | |
770 | __func__, sw_desc, &sw_desc->async_tx); | |
771 | return sw_desc ? &sw_desc->async_tx : NULL; | |
772 | } | |
773 | ||
774 | static void mv_xor_free_chan_resources(struct dma_chan *chan) | |
775 | { | |
776 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
777 | struct mv_xor_desc_slot *iter, *_iter; | |
778 | int in_use_descs = 0; | |
779 | ||
780 | mv_xor_slot_cleanup(mv_chan); | |
781 | ||
782 | spin_lock_bh(&mv_chan->lock); | |
783 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, | |
784 | chain_node) { | |
785 | in_use_descs++; | |
786 | list_del(&iter->chain_node); | |
787 | } | |
788 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | |
789 | completed_node) { | |
790 | in_use_descs++; | |
791 | list_del(&iter->completed_node); | |
792 | } | |
793 | list_for_each_entry_safe_reverse( | |
794 | iter, _iter, &mv_chan->all_slots, slot_node) { | |
795 | list_del(&iter->slot_node); | |
796 | kfree(iter); | |
797 | mv_chan->slots_allocated--; | |
798 | } | |
799 | mv_chan->last_used = NULL; | |
800 | ||
801 | dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", | |
802 | __func__, mv_chan->slots_allocated); | |
803 | spin_unlock_bh(&mv_chan->lock); | |
804 | ||
805 | if (in_use_descs) | |
806 | dev_err(mv_chan->device->common.dev, | |
807 | "freeing %d in use descriptors!\n", in_use_descs); | |
808 | } | |
809 | ||
810 | /** | |
811 | * mv_xor_is_complete - poll the status of an XOR transaction | |
812 | * @chan: XOR channel handle | |
813 | * @cookie: XOR transaction identifier | |
814 | */ | |
815 | static enum dma_status mv_xor_is_complete(struct dma_chan *chan, | |
816 | dma_cookie_t cookie, | |
817 | dma_cookie_t *done, | |
818 | dma_cookie_t *used) | |
819 | { | |
820 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
821 | dma_cookie_t last_used; | |
822 | dma_cookie_t last_complete; | |
823 | enum dma_status ret; | |
824 | ||
825 | last_used = chan->cookie; | |
826 | last_complete = mv_chan->completed_cookie; | |
827 | mv_chan->is_complete_cookie = cookie; | |
828 | if (done) | |
829 | *done = last_complete; | |
830 | if (used) | |
831 | *used = last_used; | |
832 | ||
833 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
834 | if (ret == DMA_SUCCESS) { | |
835 | mv_xor_clean_completed_slots(mv_chan); | |
836 | return ret; | |
837 | } | |
838 | mv_xor_slot_cleanup(mv_chan); | |
839 | ||
840 | last_used = chan->cookie; | |
841 | last_complete = mv_chan->completed_cookie; | |
842 | ||
843 | if (done) | |
844 | *done = last_complete; | |
845 | if (used) | |
846 | *used = last_used; | |
847 | ||
848 | return dma_async_is_complete(cookie, last_complete, last_used); | |
849 | } | |
850 | ||
851 | static void mv_dump_xor_regs(struct mv_xor_chan *chan) | |
852 | { | |
853 | u32 val; | |
854 | ||
855 | val = __raw_readl(XOR_CONFIG(chan)); | |
856 | dev_printk(KERN_ERR, chan->device->common.dev, | |
857 | "config 0x%08x.\n", val); | |
858 | ||
859 | val = __raw_readl(XOR_ACTIVATION(chan)); | |
860 | dev_printk(KERN_ERR, chan->device->common.dev, | |
861 | "activation 0x%08x.\n", val); | |
862 | ||
863 | val = __raw_readl(XOR_INTR_CAUSE(chan)); | |
864 | dev_printk(KERN_ERR, chan->device->common.dev, | |
865 | "intr cause 0x%08x.\n", val); | |
866 | ||
867 | val = __raw_readl(XOR_INTR_MASK(chan)); | |
868 | dev_printk(KERN_ERR, chan->device->common.dev, | |
869 | "intr mask 0x%08x.\n", val); | |
870 | ||
871 | val = __raw_readl(XOR_ERROR_CAUSE(chan)); | |
872 | dev_printk(KERN_ERR, chan->device->common.dev, | |
873 | "error cause 0x%08x.\n", val); | |
874 | ||
875 | val = __raw_readl(XOR_ERROR_ADDR(chan)); | |
876 | dev_printk(KERN_ERR, chan->device->common.dev, | |
877 | "error addr 0x%08x.\n", val); | |
878 | } | |
879 | ||
880 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, | |
881 | u32 intr_cause) | |
882 | { | |
883 | if (intr_cause & (1 << 4)) { | |
884 | dev_dbg(chan->device->common.dev, | |
885 | "ignore this error\n"); | |
886 | return; | |
887 | } | |
888 | ||
889 | dev_printk(KERN_ERR, chan->device->common.dev, | |
890 | "error on chan %d. intr cause 0x%08x.\n", | |
891 | chan->idx, intr_cause); | |
892 | ||
893 | mv_dump_xor_regs(chan); | |
894 | BUG(); | |
895 | } | |
896 | ||
897 | static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) | |
898 | { | |
899 | struct mv_xor_chan *chan = data; | |
900 | u32 intr_cause = mv_chan_get_intr_cause(chan); | |
901 | ||
902 | dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); | |
903 | ||
904 | if (mv_is_err_intr(intr_cause)) | |
905 | mv_xor_err_interrupt_handler(chan, intr_cause); | |
906 | ||
907 | tasklet_schedule(&chan->irq_tasklet); | |
908 | ||
909 | mv_xor_device_clear_eoc_cause(chan); | |
910 | ||
911 | return IRQ_HANDLED; | |
912 | } | |
913 | ||
914 | static void mv_xor_issue_pending(struct dma_chan *chan) | |
915 | { | |
916 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
917 | ||
918 | if (mv_chan->pending >= MV_XOR_THRESHOLD) { | |
919 | mv_chan->pending = 0; | |
920 | mv_chan_activate(mv_chan); | |
921 | } | |
922 | } | |
923 | ||
924 | /* | |
925 | * Perform a transaction to verify the HW works. | |
926 | */ | |
927 | #define MV_XOR_TEST_SIZE 2000 | |
928 | ||
929 | static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) | |
930 | { | |
931 | int i; | |
932 | void *src, *dest; | |
933 | dma_addr_t src_dma, dest_dma; | |
934 | struct dma_chan *dma_chan; | |
935 | dma_cookie_t cookie; | |
936 | struct dma_async_tx_descriptor *tx; | |
937 | int err = 0; | |
938 | struct mv_xor_chan *mv_chan; | |
939 | ||
940 | src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | |
941 | if (!src) | |
942 | return -ENOMEM; | |
943 | ||
944 | dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | |
945 | if (!dest) { | |
946 | kfree(src); | |
947 | return -ENOMEM; | |
948 | } | |
949 | ||
950 | /* Fill in src buffer */ | |
951 | for (i = 0; i < MV_XOR_TEST_SIZE; i++) | |
952 | ((u8 *) src)[i] = (u8)i; | |
953 | ||
954 | /* Start copy, using first DMA channel */ | |
955 | dma_chan = container_of(device->common.channels.next, | |
956 | struct dma_chan, | |
957 | device_node); | |
aa1e6f1a | 958 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b0479 SB |
959 | err = -ENODEV; |
960 | goto out; | |
961 | } | |
962 | ||
963 | dest_dma = dma_map_single(dma_chan->device->dev, dest, | |
964 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | |
965 | ||
966 | src_dma = dma_map_single(dma_chan->device->dev, src, | |
967 | MV_XOR_TEST_SIZE, DMA_TO_DEVICE); | |
968 | ||
969 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | |
970 | MV_XOR_TEST_SIZE, 0); | |
971 | cookie = mv_xor_tx_submit(tx); | |
972 | mv_xor_issue_pending(dma_chan); | |
973 | async_tx_ack(tx); | |
974 | msleep(1); | |
975 | ||
976 | if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != | |
977 | DMA_SUCCESS) { | |
978 | dev_printk(KERN_ERR, dma_chan->device->dev, | |
979 | "Self-test copy timed out, disabling\n"); | |
980 | err = -ENODEV; | |
981 | goto free_resources; | |
982 | } | |
983 | ||
984 | mv_chan = to_mv_xor_chan(dma_chan); | |
985 | dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, | |
986 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | |
987 | if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { | |
988 | dev_printk(KERN_ERR, dma_chan->device->dev, | |
989 | "Self-test copy failed compare, disabling\n"); | |
990 | err = -ENODEV; | |
991 | goto free_resources; | |
992 | } | |
993 | ||
994 | free_resources: | |
995 | mv_xor_free_chan_resources(dma_chan); | |
996 | out: | |
997 | kfree(src); | |
998 | kfree(dest); | |
999 | return err; | |
1000 | } | |
1001 | ||
1002 | #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ | |
1003 | static int __devinit | |
1004 | mv_xor_xor_self_test(struct mv_xor_device *device) | |
1005 | { | |
1006 | int i, src_idx; | |
1007 | struct page *dest; | |
1008 | struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; | |
1009 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; | |
1010 | dma_addr_t dest_dma; | |
1011 | struct dma_async_tx_descriptor *tx; | |
1012 | struct dma_chan *dma_chan; | |
1013 | dma_cookie_t cookie; | |
1014 | u8 cmp_byte = 0; | |
1015 | u32 cmp_word; | |
1016 | int err = 0; | |
1017 | struct mv_xor_chan *mv_chan; | |
1018 | ||
1019 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | |
1020 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | |
a09b09ae RK |
1021 | if (!xor_srcs[src_idx]) { |
1022 | while (src_idx--) | |
ff7b0479 | 1023 | __free_page(xor_srcs[src_idx]); |
a09b09ae RK |
1024 | return -ENOMEM; |
1025 | } | |
ff7b0479 SB |
1026 | } |
1027 | ||
1028 | dest = alloc_page(GFP_KERNEL); | |
a09b09ae RK |
1029 | if (!dest) { |
1030 | while (src_idx--) | |
ff7b0479 | 1031 | __free_page(xor_srcs[src_idx]); |
a09b09ae RK |
1032 | return -ENOMEM; |
1033 | } | |
ff7b0479 SB |
1034 | |
1035 | /* Fill in src buffers */ | |
1036 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | |
1037 | u8 *ptr = page_address(xor_srcs[src_idx]); | |
1038 | for (i = 0; i < PAGE_SIZE; i++) | |
1039 | ptr[i] = (1 << src_idx); | |
1040 | } | |
1041 | ||
1042 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) | |
1043 | cmp_byte ^= (u8) (1 << src_idx); | |
1044 | ||
1045 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | |
1046 | (cmp_byte << 8) | cmp_byte; | |
1047 | ||
1048 | memset(page_address(dest), 0, PAGE_SIZE); | |
1049 | ||
1050 | dma_chan = container_of(device->common.channels.next, | |
1051 | struct dma_chan, | |
1052 | device_node); | |
aa1e6f1a | 1053 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b0479 SB |
1054 | err = -ENODEV; |
1055 | goto out; | |
1056 | } | |
1057 | ||
1058 | /* test xor */ | |
1059 | dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, | |
1060 | DMA_FROM_DEVICE); | |
1061 | ||
1062 | for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) | |
1063 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | |
1064 | 0, PAGE_SIZE, DMA_TO_DEVICE); | |
1065 | ||
1066 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | |
1067 | MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); | |
1068 | ||
1069 | cookie = mv_xor_tx_submit(tx); | |
1070 | mv_xor_issue_pending(dma_chan); | |
1071 | async_tx_ack(tx); | |
1072 | msleep(8); | |
1073 | ||
1074 | if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != | |
1075 | DMA_SUCCESS) { | |
1076 | dev_printk(KERN_ERR, dma_chan->device->dev, | |
1077 | "Self-test xor timed out, disabling\n"); | |
1078 | err = -ENODEV; | |
1079 | goto free_resources; | |
1080 | } | |
1081 | ||
1082 | mv_chan = to_mv_xor_chan(dma_chan); | |
1083 | dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, | |
1084 | PAGE_SIZE, DMA_FROM_DEVICE); | |
1085 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | |
1086 | u32 *ptr = page_address(dest); | |
1087 | if (ptr[i] != cmp_word) { | |
1088 | dev_printk(KERN_ERR, dma_chan->device->dev, | |
1089 | "Self-test xor failed compare, disabling." | |
1090 | " index %d, data %x, expected %x\n", i, | |
1091 | ptr[i], cmp_word); | |
1092 | err = -ENODEV; | |
1093 | goto free_resources; | |
1094 | } | |
1095 | } | |
1096 | ||
1097 | free_resources: | |
1098 | mv_xor_free_chan_resources(dma_chan); | |
1099 | out: | |
1100 | src_idx = MV_XOR_NUM_SRC_TEST; | |
1101 | while (src_idx--) | |
1102 | __free_page(xor_srcs[src_idx]); | |
1103 | __free_page(dest); | |
1104 | return err; | |
1105 | } | |
1106 | ||
1107 | static int __devexit mv_xor_remove(struct platform_device *dev) | |
1108 | { | |
1109 | struct mv_xor_device *device = platform_get_drvdata(dev); | |
1110 | struct dma_chan *chan, *_chan; | |
1111 | struct mv_xor_chan *mv_chan; | |
1112 | struct mv_xor_platform_data *plat_data = dev->dev.platform_data; | |
1113 | ||
1114 | dma_async_device_unregister(&device->common); | |
1115 | ||
1116 | dma_free_coherent(&dev->dev, plat_data->pool_size, | |
1117 | device->dma_desc_pool_virt, device->dma_desc_pool); | |
1118 | ||
1119 | list_for_each_entry_safe(chan, _chan, &device->common.channels, | |
1120 | device_node) { | |
1121 | mv_chan = to_mv_xor_chan(chan); | |
1122 | list_del(&chan->device_node); | |
1123 | } | |
1124 | ||
1125 | return 0; | |
1126 | } | |
1127 | ||
1128 | static int __devinit mv_xor_probe(struct platform_device *pdev) | |
1129 | { | |
1130 | int ret = 0; | |
1131 | int irq; | |
1132 | struct mv_xor_device *adev; | |
1133 | struct mv_xor_chan *mv_chan; | |
1134 | struct dma_device *dma_dev; | |
1135 | struct mv_xor_platform_data *plat_data = pdev->dev.platform_data; | |
1136 | ||
1137 | ||
1138 | adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); | |
1139 | if (!adev) | |
1140 | return -ENOMEM; | |
1141 | ||
1142 | dma_dev = &adev->common; | |
1143 | ||
1144 | /* allocate coherent memory for hardware descriptors | |
1145 | * note: writecombine gives slightly better performance, but | |
1146 | * requires that we explicitly flush the writes | |
1147 | */ | |
1148 | adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, | |
1149 | plat_data->pool_size, | |
1150 | &adev->dma_desc_pool, | |
1151 | GFP_KERNEL); | |
1152 | if (!adev->dma_desc_pool_virt) | |
1153 | return -ENOMEM; | |
1154 | ||
1155 | adev->id = plat_data->hw_id; | |
1156 | ||
1157 | /* discover transaction capabilites from the platform data */ | |
1158 | dma_dev->cap_mask = plat_data->cap_mask; | |
1159 | adev->pdev = pdev; | |
1160 | platform_set_drvdata(pdev, adev); | |
1161 | ||
1162 | adev->shared = platform_get_drvdata(plat_data->shared); | |
1163 | ||
1164 | INIT_LIST_HEAD(&dma_dev->channels); | |
1165 | ||
1166 | /* set base routines */ | |
1167 | dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; | |
1168 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; | |
1169 | dma_dev->device_is_tx_complete = mv_xor_is_complete; | |
1170 | dma_dev->device_issue_pending = mv_xor_issue_pending; | |
1171 | dma_dev->dev = &pdev->dev; | |
1172 | ||
1173 | /* set prep routines based on capability */ | |
1174 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | |
1175 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | |
1176 | if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) | |
1177 | dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; | |
1178 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | |
c019894e | 1179 | dma_dev->max_xor = 8; |
ff7b0479 SB |
1180 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; |
1181 | } | |
1182 | ||
1183 | mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); | |
1184 | if (!mv_chan) { | |
1185 | ret = -ENOMEM; | |
1186 | goto err_free_dma; | |
1187 | } | |
1188 | mv_chan->device = adev; | |
1189 | mv_chan->idx = plat_data->hw_id; | |
1190 | mv_chan->mmr_base = adev->shared->xor_base; | |
1191 | ||
1192 | if (!mv_chan->mmr_base) { | |
1193 | ret = -ENOMEM; | |
1194 | goto err_free_dma; | |
1195 | } | |
1196 | tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) | |
1197 | mv_chan); | |
1198 | ||
1199 | /* clear errors before enabling interrupts */ | |
1200 | mv_xor_device_clear_err_status(mv_chan); | |
1201 | ||
1202 | irq = platform_get_irq(pdev, 0); | |
1203 | if (irq < 0) { | |
1204 | ret = irq; | |
1205 | goto err_free_dma; | |
1206 | } | |
1207 | ret = devm_request_irq(&pdev->dev, irq, | |
1208 | mv_xor_interrupt_handler, | |
1209 | 0, dev_name(&pdev->dev), mv_chan); | |
1210 | if (ret) | |
1211 | goto err_free_dma; | |
1212 | ||
1213 | mv_chan_unmask_interrupts(mv_chan); | |
1214 | ||
1215 | mv_set_mode(mv_chan, DMA_MEMCPY); | |
1216 | ||
1217 | spin_lock_init(&mv_chan->lock); | |
1218 | INIT_LIST_HEAD(&mv_chan->chain); | |
1219 | INIT_LIST_HEAD(&mv_chan->completed_slots); | |
1220 | INIT_LIST_HEAD(&mv_chan->all_slots); | |
ff7b0479 SB |
1221 | mv_chan->common.device = dma_dev; |
1222 | ||
1223 | list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); | |
1224 | ||
1225 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { | |
1226 | ret = mv_xor_memcpy_self_test(adev); | |
1227 | dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); | |
1228 | if (ret) | |
1229 | goto err_free_dma; | |
1230 | } | |
1231 | ||
1232 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | |
1233 | ret = mv_xor_xor_self_test(adev); | |
1234 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); | |
1235 | if (ret) | |
1236 | goto err_free_dma; | |
1237 | } | |
1238 | ||
1239 | dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: " | |
1240 | "( %s%s%s%s)\n", | |
1241 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | |
1242 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", | |
1243 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | |
1244 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | |
1245 | ||
1246 | dma_async_device_register(dma_dev); | |
1247 | goto out; | |
1248 | ||
1249 | err_free_dma: | |
1250 | dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, | |
1251 | adev->dma_desc_pool_virt, adev->dma_desc_pool); | |
1252 | out: | |
1253 | return ret; | |
1254 | } | |
1255 | ||
1256 | static void | |
1257 | mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, | |
1258 | struct mbus_dram_target_info *dram) | |
1259 | { | |
1260 | void __iomem *base = msp->xor_base; | |
1261 | u32 win_enable = 0; | |
1262 | int i; | |
1263 | ||
1264 | for (i = 0; i < 8; i++) { | |
1265 | writel(0, base + WINDOW_BASE(i)); | |
1266 | writel(0, base + WINDOW_SIZE(i)); | |
1267 | if (i < 4) | |
1268 | writel(0, base + WINDOW_REMAP_HIGH(i)); | |
1269 | } | |
1270 | ||
1271 | for (i = 0; i < dram->num_cs; i++) { | |
1272 | struct mbus_dram_window *cs = dram->cs + i; | |
1273 | ||
1274 | writel((cs->base & 0xffff0000) | | |
1275 | (cs->mbus_attr << 8) | | |
1276 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); | |
1277 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); | |
1278 | ||
1279 | win_enable |= (1 << i); | |
1280 | win_enable |= 3 << (16 + (2 * i)); | |
1281 | } | |
1282 | ||
1283 | writel(win_enable, base + WINDOW_BAR_ENABLE(0)); | |
1284 | writel(win_enable, base + WINDOW_BAR_ENABLE(1)); | |
1285 | } | |
1286 | ||
1287 | static struct platform_driver mv_xor_driver = { | |
1288 | .probe = mv_xor_probe, | |
bdf602bd | 1289 | .remove = __devexit_p(mv_xor_remove), |
ff7b0479 SB |
1290 | .driver = { |
1291 | .owner = THIS_MODULE, | |
1292 | .name = MV_XOR_NAME, | |
1293 | }, | |
1294 | }; | |
1295 | ||
1296 | static int mv_xor_shared_probe(struct platform_device *pdev) | |
1297 | { | |
1298 | struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data; | |
1299 | struct mv_xor_shared_private *msp; | |
1300 | struct resource *res; | |
1301 | ||
1302 | dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n"); | |
1303 | ||
1304 | msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); | |
1305 | if (!msp) | |
1306 | return -ENOMEM; | |
1307 | ||
1308 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1309 | if (!res) | |
1310 | return -ENODEV; | |
1311 | ||
1312 | msp->xor_base = devm_ioremap(&pdev->dev, res->start, | |
1313 | res->end - res->start + 1); | |
1314 | if (!msp->xor_base) | |
1315 | return -EBUSY; | |
1316 | ||
1317 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
1318 | if (!res) | |
1319 | return -ENODEV; | |
1320 | ||
1321 | msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, | |
1322 | res->end - res->start + 1); | |
1323 | if (!msp->xor_high_base) | |
1324 | return -EBUSY; | |
1325 | ||
1326 | platform_set_drvdata(pdev, msp); | |
1327 | ||
1328 | /* | |
1329 | * (Re-)program MBUS remapping windows if we are asked to. | |
1330 | */ | |
1331 | if (msd != NULL && msd->dram != NULL) | |
1332 | mv_xor_conf_mbus_windows(msp, msd->dram); | |
1333 | ||
1334 | return 0; | |
1335 | } | |
1336 | ||
1337 | static int mv_xor_shared_remove(struct platform_device *pdev) | |
1338 | { | |
1339 | return 0; | |
1340 | } | |
1341 | ||
1342 | static struct platform_driver mv_xor_shared_driver = { | |
1343 | .probe = mv_xor_shared_probe, | |
1344 | .remove = mv_xor_shared_remove, | |
1345 | .driver = { | |
1346 | .owner = THIS_MODULE, | |
1347 | .name = MV_XOR_SHARED_NAME, | |
1348 | }, | |
1349 | }; | |
1350 | ||
1351 | ||
1352 | static int __init mv_xor_init(void) | |
1353 | { | |
1354 | int rc; | |
1355 | ||
1356 | rc = platform_driver_register(&mv_xor_shared_driver); | |
1357 | if (!rc) { | |
1358 | rc = platform_driver_register(&mv_xor_driver); | |
1359 | if (rc) | |
1360 | platform_driver_unregister(&mv_xor_shared_driver); | |
1361 | } | |
1362 | return rc; | |
1363 | } | |
1364 | module_init(mv_xor_init); | |
1365 | ||
1366 | /* it's currently unsafe to unload this module */ | |
1367 | #if 0 | |
1368 | static void __exit mv_xor_exit(void) | |
1369 | { | |
1370 | platform_driver_unregister(&mv_xor_driver); | |
1371 | platform_driver_unregister(&mv_xor_shared_driver); | |
1372 | return; | |
1373 | } | |
1374 | ||
1375 | module_exit(mv_xor_exit); | |
1376 | #endif | |
1377 | ||
1378 | MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); | |
1379 | MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); | |
1380 | MODULE_LICENSE("GPL"); |