2 * offload engine driver for the Intel Xscale series of i/o processors
3 * Copyright © 2006, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * This driver supports the asynchrounous DMA copy and RAID engines available
22 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/async_tx.h>
28 #include <linux/delay.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/spinlock.h>
31 #include <linux/interrupt.h>
32 #include <linux/platform_device.h>
33 #include <linux/memory.h>
34 #include <linux/ioport.h>
36 #include <mach/adma.h>
38 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
39 #define to_iop_adma_device(dev) \
40 container_of(dev, struct iop_adma_device, common)
41 #define tx_to_iop_adma_slot(tx) \
42 container_of(tx, struct iop_adma_desc_slot, async_tx)
45 * iop_adma_free_slots - flags descriptor slots for reuse
47 * Caller must hold &iop_chan->lock while calling this function
49 static void iop_adma_free_slots(struct iop_adma_desc_slot
*slot
)
51 int stride
= slot
->slots_per_op
;
54 slot
->slots_per_op
= 0;
55 slot
= list_entry(slot
->slot_node
.next
,
56 struct iop_adma_desc_slot
,
62 iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot
*desc
,
63 struct iop_adma_chan
*iop_chan
, dma_cookie_t cookie
)
65 BUG_ON(desc
->async_tx
.cookie
< 0);
66 if (desc
->async_tx
.cookie
> 0) {
67 cookie
= desc
->async_tx
.cookie
;
68 desc
->async_tx
.cookie
= 0;
70 /* call the callback (must not sleep or submit new
71 * operations to this channel)
73 if (desc
->async_tx
.callback
)
74 desc
->async_tx
.callback(
75 desc
->async_tx
.callback_param
);
77 /* unmap dma addresses
78 * (unmap_single vs unmap_page?)
80 if (desc
->group_head
&& desc
->unmap_len
) {
81 struct iop_adma_desc_slot
*unmap
= desc
->group_head
;
83 &iop_chan
->device
->pdev
->dev
;
84 u32 len
= unmap
->unmap_len
;
85 enum dma_ctrl_flags flags
= desc
->async_tx
.flags
;
89 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
90 addr
= iop_desc_get_dest_addr(unmap
, iop_chan
);
91 dma_unmap_page(dev
, addr
, len
, DMA_FROM_DEVICE
);
94 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
95 src_cnt
= unmap
->unmap_src_cnt
;
97 addr
= iop_desc_get_src_addr(unmap
,
100 dma_unmap_page(dev
, addr
, len
,
104 desc
->group_head
= NULL
;
108 /* run dependent operations */
109 async_tx_run_dependencies(&desc
->async_tx
);
115 iop_adma_clean_slot(struct iop_adma_desc_slot
*desc
,
116 struct iop_adma_chan
*iop_chan
)
118 /* the client is allowed to attach dependent operations
121 if (!async_tx_test_ack(&desc
->async_tx
))
124 /* leave the last descriptor in the chain
125 * so we can append to it
127 if (desc
->chain_node
.next
== &iop_chan
->chain
)
130 dev_dbg(iop_chan
->device
->common
.dev
,
131 "\tfree slot: %d slots_per_op: %d\n",
132 desc
->idx
, desc
->slots_per_op
);
134 list_del(&desc
->chain_node
);
135 iop_adma_free_slots(desc
);
140 static void __iop_adma_slot_cleanup(struct iop_adma_chan
*iop_chan
)
142 struct iop_adma_desc_slot
*iter
, *_iter
, *grp_start
= NULL
;
143 dma_cookie_t cookie
= 0;
144 u32 current_desc
= iop_chan_get_current_descriptor(iop_chan
);
145 int busy
= iop_chan_is_busy(iop_chan
);
146 int seen_current
= 0, slot_cnt
= 0, slots_per_op
= 0;
148 dev_dbg(iop_chan
->device
->common
.dev
, "%s\n", __func__
);
149 /* free completed slots from the chain starting with
150 * the oldest descriptor
152 list_for_each_entry_safe(iter
, _iter
, &iop_chan
->chain
,
154 pr_debug("\tcookie: %d slot: %d busy: %d "
155 "this_desc: %#x next_desc: %#x ack: %d\n",
156 iter
->async_tx
.cookie
, iter
->idx
, busy
,
157 iter
->async_tx
.phys
, iop_desc_get_next_desc(iter
),
158 async_tx_test_ack(&iter
->async_tx
));
160 prefetch(&_iter
->async_tx
);
162 /* do not advance past the current descriptor loaded into the
163 * hardware channel, subsequent descriptors are either in
164 * process or have not been submitted
169 /* stop the search if we reach the current descriptor and the
170 * channel is busy, or if it appears that the current descriptor
171 * needs to be re-read (i.e. has been appended to)
173 if (iter
->async_tx
.phys
== current_desc
) {
174 BUG_ON(seen_current
++);
175 if (busy
|| iop_desc_get_next_desc(iter
))
179 /* detect the start of a group transaction */
180 if (!slot_cnt
&& !slots_per_op
) {
181 slot_cnt
= iter
->slot_cnt
;
182 slots_per_op
= iter
->slots_per_op
;
183 if (slot_cnt
<= slots_per_op
) {
190 pr_debug("\tgroup++\n");
193 slot_cnt
-= slots_per_op
;
196 /* all the members of a group are complete */
197 if (slots_per_op
!= 0 && slot_cnt
== 0) {
198 struct iop_adma_desc_slot
*grp_iter
, *_grp_iter
;
199 int end_of_chain
= 0;
200 pr_debug("\tgroup end\n");
202 /* collect the total results */
203 if (grp_start
->xor_check_result
) {
204 u32 zero_sum_result
= 0;
205 slot_cnt
= grp_start
->slot_cnt
;
206 grp_iter
= grp_start
;
208 list_for_each_entry_from(grp_iter
,
209 &iop_chan
->chain
, chain_node
) {
211 iop_desc_get_zero_result(grp_iter
);
212 pr_debug("\titer%d result: %d\n",
213 grp_iter
->idx
, zero_sum_result
);
214 slot_cnt
-= slots_per_op
;
218 pr_debug("\tgrp_start->xor_check_result: %p\n",
219 grp_start
->xor_check_result
);
220 *grp_start
->xor_check_result
= zero_sum_result
;
223 /* clean up the group */
224 slot_cnt
= grp_start
->slot_cnt
;
225 grp_iter
= grp_start
;
226 list_for_each_entry_safe_from(grp_iter
, _grp_iter
,
227 &iop_chan
->chain
, chain_node
) {
228 cookie
= iop_adma_run_tx_complete_actions(
229 grp_iter
, iop_chan
, cookie
);
231 slot_cnt
-= slots_per_op
;
232 end_of_chain
= iop_adma_clean_slot(grp_iter
,
235 if (slot_cnt
== 0 || end_of_chain
)
239 /* the group should be complete at this point */
248 } else if (slots_per_op
) /* wait for group completion */
251 /* write back zero sum results (single descriptor case) */
252 if (iter
->xor_check_result
&& iter
->async_tx
.cookie
)
253 *iter
->xor_check_result
=
254 iop_desc_get_zero_result(iter
);
256 cookie
= iop_adma_run_tx_complete_actions(
257 iter
, iop_chan
, cookie
);
259 if (iop_adma_clean_slot(iter
, iop_chan
))
263 BUG_ON(!seen_current
);
266 iop_chan
->completed_cookie
= cookie
;
267 pr_debug("\tcompleted cookie %d\n", cookie
);
272 iop_adma_slot_cleanup(struct iop_adma_chan
*iop_chan
)
274 spin_lock_bh(&iop_chan
->lock
);
275 __iop_adma_slot_cleanup(iop_chan
);
276 spin_unlock_bh(&iop_chan
->lock
);
279 static void iop_adma_tasklet(unsigned long data
)
281 struct iop_adma_chan
*iop_chan
= (struct iop_adma_chan
*) data
;
283 spin_lock(&iop_chan
->lock
);
284 __iop_adma_slot_cleanup(iop_chan
);
285 spin_unlock(&iop_chan
->lock
);
288 static struct iop_adma_desc_slot
*
289 iop_adma_alloc_slots(struct iop_adma_chan
*iop_chan
, int num_slots
,
292 struct iop_adma_desc_slot
*iter
, *_iter
, *alloc_start
= NULL
;
294 int slots_found
, retry
= 0;
296 /* start search from the last allocated descrtiptor
297 * if a contiguous allocation can not be found start searching
298 * from the beginning of the list
303 iter
= iop_chan
->last_used
;
305 iter
= list_entry(&iop_chan
->all_slots
,
306 struct iop_adma_desc_slot
,
309 list_for_each_entry_safe_continue(
310 iter
, _iter
, &iop_chan
->all_slots
, slot_node
) {
312 prefetch(&_iter
->async_tx
);
313 if (iter
->slots_per_op
) {
314 /* give up after finding the first busy slot
315 * on the second pass through the list
324 /* start the allocation if the slot is correctly aligned */
325 if (!slots_found
++) {
326 if (iop_desc_is_aligned(iter
, slots_per_op
))
334 if (slots_found
== num_slots
) {
335 struct iop_adma_desc_slot
*alloc_tail
= NULL
;
336 struct iop_adma_desc_slot
*last_used
= NULL
;
340 dev_dbg(iop_chan
->device
->common
.dev
,
341 "allocated slot: %d "
342 "(desc %p phys: %#x) slots_per_op %d\n",
343 iter
->idx
, iter
->hw_desc
,
344 iter
->async_tx
.phys
, slots_per_op
);
346 /* pre-ack all but the last descriptor */
347 if (num_slots
!= slots_per_op
)
348 async_tx_ack(&iter
->async_tx
);
350 list_add_tail(&iter
->chain_node
, &chain
);
352 iter
->async_tx
.cookie
= 0;
353 iter
->slot_cnt
= num_slots
;
354 iter
->xor_check_result
= NULL
;
355 for (i
= 0; i
< slots_per_op
; i
++) {
356 iter
->slots_per_op
= slots_per_op
- i
;
358 iter
= list_entry(iter
->slot_node
.next
,
359 struct iop_adma_desc_slot
,
362 num_slots
-= slots_per_op
;
364 alloc_tail
->group_head
= alloc_start
;
365 alloc_tail
->async_tx
.cookie
= -EBUSY
;
366 list_splice(&chain
, &alloc_tail
->async_tx
.tx_list
);
367 iop_chan
->last_used
= last_used
;
368 iop_desc_clear_next_desc(alloc_start
);
369 iop_desc_clear_next_desc(alloc_tail
);
376 /* perform direct reclaim if the allocation fails */
377 __iop_adma_slot_cleanup(iop_chan
);
383 iop_desc_assign_cookie(struct iop_adma_chan
*iop_chan
,
384 struct iop_adma_desc_slot
*desc
)
386 dma_cookie_t cookie
= iop_chan
->common
.cookie
;
390 iop_chan
->common
.cookie
= desc
->async_tx
.cookie
= cookie
;
394 static void iop_adma_check_threshold(struct iop_adma_chan
*iop_chan
)
396 dev_dbg(iop_chan
->device
->common
.dev
, "pending: %d\n",
399 if (iop_chan
->pending
>= IOP_ADMA_THRESHOLD
) {
400 iop_chan
->pending
= 0;
401 iop_chan_append(iop_chan
);
406 iop_adma_tx_submit(struct dma_async_tx_descriptor
*tx
)
408 struct iop_adma_desc_slot
*sw_desc
= tx_to_iop_adma_slot(tx
);
409 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(tx
->chan
);
410 struct iop_adma_desc_slot
*grp_start
, *old_chain_tail
;
416 grp_start
= sw_desc
->group_head
;
417 slot_cnt
= grp_start
->slot_cnt
;
418 slots_per_op
= grp_start
->slots_per_op
;
420 spin_lock_bh(&iop_chan
->lock
);
421 cookie
= iop_desc_assign_cookie(iop_chan
, sw_desc
);
423 old_chain_tail
= list_entry(iop_chan
->chain
.prev
,
424 struct iop_adma_desc_slot
, chain_node
);
425 list_splice_init(&sw_desc
->async_tx
.tx_list
,
426 &old_chain_tail
->chain_node
);
428 /* fix up the hardware chain */
429 next_dma
= grp_start
->async_tx
.phys
;
430 iop_desc_set_next_desc(old_chain_tail
, next_dma
);
431 BUG_ON(iop_desc_get_next_desc(old_chain_tail
) != next_dma
); /* flush */
433 /* check for pre-chained descriptors */
434 iop_paranoia(iop_desc_get_next_desc(sw_desc
));
436 /* increment the pending count by the number of slots
437 * memcpy operations have a 1:1 (slot:operation) relation
438 * other operations are heavier and will pop the threshold
441 iop_chan
->pending
+= slot_cnt
;
442 iop_adma_check_threshold(iop_chan
);
443 spin_unlock_bh(&iop_chan
->lock
);
445 dev_dbg(iop_chan
->device
->common
.dev
, "%s cookie: %d slot: %d\n",
446 __func__
, sw_desc
->async_tx
.cookie
, sw_desc
->idx
);
451 static void iop_chan_start_null_memcpy(struct iop_adma_chan
*iop_chan
);
452 static void iop_chan_start_null_xor(struct iop_adma_chan
*iop_chan
);
455 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors
456 * @chan - allocate descriptor resources for this channel
457 * @client - current client requesting the channel be ready for requests
459 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
460 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
461 * greater than 2x the number slots needed to satisfy a device->max_xor
464 static int iop_adma_alloc_chan_resources(struct dma_chan
*chan
,
465 struct dma_client
*client
)
469 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
470 struct iop_adma_desc_slot
*slot
= NULL
;
471 int init
= iop_chan
->slots_allocated
? 0 : 1;
472 struct iop_adma_platform_data
*plat_data
=
473 iop_chan
->device
->pdev
->dev
.platform_data
;
474 int num_descs_in_pool
= plat_data
->pool_size
/IOP_ADMA_SLOT_SIZE
;
476 /* Allocate descriptor slots */
478 idx
= iop_chan
->slots_allocated
;
479 if (idx
== num_descs_in_pool
)
482 slot
= kzalloc(sizeof(*slot
), GFP_KERNEL
);
484 printk(KERN_INFO
"IOP ADMA Channel only initialized"
485 " %d descriptor slots", idx
);
488 hw_desc
= (char *) iop_chan
->device
->dma_desc_pool_virt
;
489 slot
->hw_desc
= (void *) &hw_desc
[idx
* IOP_ADMA_SLOT_SIZE
];
491 dma_async_tx_descriptor_init(&slot
->async_tx
, chan
);
492 slot
->async_tx
.tx_submit
= iop_adma_tx_submit
;
493 INIT_LIST_HEAD(&slot
->chain_node
);
494 INIT_LIST_HEAD(&slot
->slot_node
);
495 INIT_LIST_HEAD(&slot
->async_tx
.tx_list
);
496 hw_desc
= (char *) iop_chan
->device
->dma_desc_pool
;
497 slot
->async_tx
.phys
=
498 (dma_addr_t
) &hw_desc
[idx
* IOP_ADMA_SLOT_SIZE
];
501 spin_lock_bh(&iop_chan
->lock
);
502 iop_chan
->slots_allocated
++;
503 list_add_tail(&slot
->slot_node
, &iop_chan
->all_slots
);
504 spin_unlock_bh(&iop_chan
->lock
);
505 } while (iop_chan
->slots_allocated
< num_descs_in_pool
);
507 if (idx
&& !iop_chan
->last_used
)
508 iop_chan
->last_used
= list_entry(iop_chan
->all_slots
.next
,
509 struct iop_adma_desc_slot
,
512 dev_dbg(iop_chan
->device
->common
.dev
,
513 "allocated %d descriptor slots last_used: %p\n",
514 iop_chan
->slots_allocated
, iop_chan
->last_used
);
516 /* initialize the channel and the chain with a null operation */
518 if (dma_has_cap(DMA_MEMCPY
,
519 iop_chan
->device
->common
.cap_mask
))
520 iop_chan_start_null_memcpy(iop_chan
);
521 else if (dma_has_cap(DMA_XOR
,
522 iop_chan
->device
->common
.cap_mask
))
523 iop_chan_start_null_xor(iop_chan
);
528 return (idx
> 0) ? idx
: -ENOMEM
;
531 static struct dma_async_tx_descriptor
*
532 iop_adma_prep_dma_interrupt(struct dma_chan
*chan
, unsigned long flags
)
534 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
535 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
536 int slot_cnt
, slots_per_op
;
538 dev_dbg(iop_chan
->device
->common
.dev
, "%s\n", __func__
);
540 spin_lock_bh(&iop_chan
->lock
);
541 slot_cnt
= iop_chan_interrupt_slot_count(&slots_per_op
, iop_chan
);
542 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
544 grp_start
= sw_desc
->group_head
;
545 iop_desc_init_interrupt(grp_start
, iop_chan
);
546 grp_start
->unmap_len
= 0;
547 sw_desc
->async_tx
.flags
= flags
;
549 spin_unlock_bh(&iop_chan
->lock
);
551 return sw_desc
? &sw_desc
->async_tx
: NULL
;
554 static struct dma_async_tx_descriptor
*
555 iop_adma_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dma_dest
,
556 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
558 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
559 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
560 int slot_cnt
, slots_per_op
;
564 BUG_ON(unlikely(len
> IOP_ADMA_MAX_BYTE_COUNT
));
566 dev_dbg(iop_chan
->device
->common
.dev
, "%s len: %u\n",
569 spin_lock_bh(&iop_chan
->lock
);
570 slot_cnt
= iop_chan_memcpy_slot_count(len
, &slots_per_op
);
571 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
573 grp_start
= sw_desc
->group_head
;
574 iop_desc_init_memcpy(grp_start
, flags
);
575 iop_desc_set_byte_count(grp_start
, iop_chan
, len
);
576 iop_desc_set_dest_addr(grp_start
, iop_chan
, dma_dest
);
577 iop_desc_set_memcpy_src_addr(grp_start
, dma_src
);
578 sw_desc
->unmap_src_cnt
= 1;
579 sw_desc
->unmap_len
= len
;
580 sw_desc
->async_tx
.flags
= flags
;
582 spin_unlock_bh(&iop_chan
->lock
);
584 return sw_desc
? &sw_desc
->async_tx
: NULL
;
587 static struct dma_async_tx_descriptor
*
588 iop_adma_prep_dma_memset(struct dma_chan
*chan
, dma_addr_t dma_dest
,
589 int value
, size_t len
, unsigned long flags
)
591 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
592 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
593 int slot_cnt
, slots_per_op
;
597 BUG_ON(unlikely(len
> IOP_ADMA_MAX_BYTE_COUNT
));
599 dev_dbg(iop_chan
->device
->common
.dev
, "%s len: %u\n",
602 spin_lock_bh(&iop_chan
->lock
);
603 slot_cnt
= iop_chan_memset_slot_count(len
, &slots_per_op
);
604 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
606 grp_start
= sw_desc
->group_head
;
607 iop_desc_init_memset(grp_start
, flags
);
608 iop_desc_set_byte_count(grp_start
, iop_chan
, len
);
609 iop_desc_set_block_fill_val(grp_start
, value
);
610 iop_desc_set_dest_addr(grp_start
, iop_chan
, dma_dest
);
611 sw_desc
->unmap_src_cnt
= 1;
612 sw_desc
->unmap_len
= len
;
613 sw_desc
->async_tx
.flags
= flags
;
615 spin_unlock_bh(&iop_chan
->lock
);
617 return sw_desc
? &sw_desc
->async_tx
: NULL
;
620 static struct dma_async_tx_descriptor
*
621 iop_adma_prep_dma_xor(struct dma_chan
*chan
, dma_addr_t dma_dest
,
622 dma_addr_t
*dma_src
, unsigned int src_cnt
, size_t len
,
625 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
626 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
627 int slot_cnt
, slots_per_op
;
631 BUG_ON(unlikely(len
> IOP_ADMA_XOR_MAX_BYTE_COUNT
));
633 dev_dbg(iop_chan
->device
->common
.dev
,
634 "%s src_cnt: %d len: %u flags: %lx\n",
635 __func__
, src_cnt
, len
, flags
);
637 spin_lock_bh(&iop_chan
->lock
);
638 slot_cnt
= iop_chan_xor_slot_count(len
, src_cnt
, &slots_per_op
);
639 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
641 grp_start
= sw_desc
->group_head
;
642 iop_desc_init_xor(grp_start
, src_cnt
, flags
);
643 iop_desc_set_byte_count(grp_start
, iop_chan
, len
);
644 iop_desc_set_dest_addr(grp_start
, iop_chan
, dma_dest
);
645 sw_desc
->unmap_src_cnt
= src_cnt
;
646 sw_desc
->unmap_len
= len
;
647 sw_desc
->async_tx
.flags
= flags
;
649 iop_desc_set_xor_src_addr(grp_start
, src_cnt
,
652 spin_unlock_bh(&iop_chan
->lock
);
654 return sw_desc
? &sw_desc
->async_tx
: NULL
;
657 static struct dma_async_tx_descriptor
*
658 iop_adma_prep_dma_zero_sum(struct dma_chan
*chan
, dma_addr_t
*dma_src
,
659 unsigned int src_cnt
, size_t len
, u32
*result
,
662 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
663 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
664 int slot_cnt
, slots_per_op
;
669 dev_dbg(iop_chan
->device
->common
.dev
, "%s src_cnt: %d len: %u\n",
670 __func__
, src_cnt
, len
);
672 spin_lock_bh(&iop_chan
->lock
);
673 slot_cnt
= iop_chan_zero_sum_slot_count(len
, src_cnt
, &slots_per_op
);
674 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
676 grp_start
= sw_desc
->group_head
;
677 iop_desc_init_zero_sum(grp_start
, src_cnt
, flags
);
678 iop_desc_set_zero_sum_byte_count(grp_start
, len
);
679 grp_start
->xor_check_result
= result
;
680 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
681 __func__
, grp_start
->xor_check_result
);
682 sw_desc
->unmap_src_cnt
= src_cnt
;
683 sw_desc
->unmap_len
= len
;
684 sw_desc
->async_tx
.flags
= flags
;
686 iop_desc_set_zero_sum_src_addr(grp_start
, src_cnt
,
689 spin_unlock_bh(&iop_chan
->lock
);
691 return sw_desc
? &sw_desc
->async_tx
: NULL
;
694 static void iop_adma_free_chan_resources(struct dma_chan
*chan
)
696 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
697 struct iop_adma_desc_slot
*iter
, *_iter
;
698 int in_use_descs
= 0;
700 iop_adma_slot_cleanup(iop_chan
);
702 spin_lock_bh(&iop_chan
->lock
);
703 list_for_each_entry_safe(iter
, _iter
, &iop_chan
->chain
,
706 list_del(&iter
->chain_node
);
708 list_for_each_entry_safe_reverse(
709 iter
, _iter
, &iop_chan
->all_slots
, slot_node
) {
710 list_del(&iter
->slot_node
);
712 iop_chan
->slots_allocated
--;
714 iop_chan
->last_used
= NULL
;
716 dev_dbg(iop_chan
->device
->common
.dev
, "%s slots_allocated %d\n",
717 __func__
, iop_chan
->slots_allocated
);
718 spin_unlock_bh(&iop_chan
->lock
);
720 /* one is ok since we left it on there on purpose */
721 if (in_use_descs
> 1)
722 printk(KERN_ERR
"IOP: Freeing %d in use descriptors!\n",
727 * iop_adma_is_complete - poll the status of an ADMA transaction
728 * @chan: ADMA channel handle
729 * @cookie: ADMA transaction identifier
731 static enum dma_status
iop_adma_is_complete(struct dma_chan
*chan
,
736 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
737 dma_cookie_t last_used
;
738 dma_cookie_t last_complete
;
741 last_used
= chan
->cookie
;
742 last_complete
= iop_chan
->completed_cookie
;
745 *done
= last_complete
;
749 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
750 if (ret
== DMA_SUCCESS
)
753 iop_adma_slot_cleanup(iop_chan
);
755 last_used
= chan
->cookie
;
756 last_complete
= iop_chan
->completed_cookie
;
759 *done
= last_complete
;
763 return dma_async_is_complete(cookie
, last_complete
, last_used
);
766 static irqreturn_t
iop_adma_eot_handler(int irq
, void *data
)
768 struct iop_adma_chan
*chan
= data
;
770 dev_dbg(chan
->device
->common
.dev
, "%s\n", __func__
);
772 tasklet_schedule(&chan
->irq_tasklet
);
774 iop_adma_device_clear_eot_status(chan
);
779 static irqreturn_t
iop_adma_eoc_handler(int irq
, void *data
)
781 struct iop_adma_chan
*chan
= data
;
783 dev_dbg(chan
->device
->common
.dev
, "%s\n", __func__
);
785 tasklet_schedule(&chan
->irq_tasklet
);
787 iop_adma_device_clear_eoc_status(chan
);
792 static irqreturn_t
iop_adma_err_handler(int irq
, void *data
)
794 struct iop_adma_chan
*chan
= data
;
795 unsigned long status
= iop_chan_get_status(chan
);
797 dev_printk(KERN_ERR
, chan
->device
->common
.dev
,
798 "error ( %s%s%s%s%s%s%s)\n",
799 iop_is_err_int_parity(status
, chan
) ? "int_parity " : "",
800 iop_is_err_mcu_abort(status
, chan
) ? "mcu_abort " : "",
801 iop_is_err_int_tabort(status
, chan
) ? "int_tabort " : "",
802 iop_is_err_int_mabort(status
, chan
) ? "int_mabort " : "",
803 iop_is_err_pci_tabort(status
, chan
) ? "pci_tabort " : "",
804 iop_is_err_pci_mabort(status
, chan
) ? "pci_mabort " : "",
805 iop_is_err_split_tx(status
, chan
) ? "split_tx " : "");
807 iop_adma_device_clear_err_status(chan
);
814 static void iop_adma_issue_pending(struct dma_chan
*chan
)
816 struct iop_adma_chan
*iop_chan
= to_iop_adma_chan(chan
);
818 if (iop_chan
->pending
) {
819 iop_chan
->pending
= 0;
820 iop_chan_append(iop_chan
);
825 * Perform a transaction to verify the HW works.
827 #define IOP_ADMA_TEST_SIZE 2000
829 static int __devinit
iop_adma_memcpy_self_test(struct iop_adma_device
*device
)
833 dma_addr_t src_dma
, dest_dma
;
834 struct dma_chan
*dma_chan
;
836 struct dma_async_tx_descriptor
*tx
;
838 struct iop_adma_chan
*iop_chan
;
840 dev_dbg(device
->common
.dev
, "%s\n", __func__
);
842 src
= kmalloc(IOP_ADMA_TEST_SIZE
, GFP_KERNEL
);
845 dest
= kzalloc(IOP_ADMA_TEST_SIZE
, GFP_KERNEL
);
851 /* Fill in src buffer */
852 for (i
= 0; i
< IOP_ADMA_TEST_SIZE
; i
++)
853 ((u8
*) src
)[i
] = (u8
)i
;
855 /* Start copy, using first DMA channel */
856 dma_chan
= container_of(device
->common
.channels
.next
,
859 if (iop_adma_alloc_chan_resources(dma_chan
, NULL
) < 1) {
864 dest_dma
= dma_map_single(dma_chan
->device
->dev
, dest
,
865 IOP_ADMA_TEST_SIZE
, DMA_FROM_DEVICE
);
866 src_dma
= dma_map_single(dma_chan
->device
->dev
, src
,
867 IOP_ADMA_TEST_SIZE
, DMA_TO_DEVICE
);
868 tx
= iop_adma_prep_dma_memcpy(dma_chan
, dest_dma
, src_dma
,
870 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
872 cookie
= iop_adma_tx_submit(tx
);
873 iop_adma_issue_pending(dma_chan
);
876 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) !=
878 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
879 "Self-test copy timed out, disabling\n");
884 iop_chan
= to_iop_adma_chan(dma_chan
);
885 dma_sync_single_for_cpu(&iop_chan
->device
->pdev
->dev
, dest_dma
,
886 IOP_ADMA_TEST_SIZE
, DMA_FROM_DEVICE
);
887 if (memcmp(src
, dest
, IOP_ADMA_TEST_SIZE
)) {
888 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
889 "Self-test copy failed compare, disabling\n");
895 iop_adma_free_chan_resources(dma_chan
);
902 #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
904 iop_adma_xor_zero_sum_self_test(struct iop_adma_device
*device
)
908 struct page
*xor_srcs
[IOP_ADMA_NUM_SRC_TEST
];
909 struct page
*zero_sum_srcs
[IOP_ADMA_NUM_SRC_TEST
+ 1];
910 dma_addr_t dma_srcs
[IOP_ADMA_NUM_SRC_TEST
+ 1];
911 dma_addr_t dma_addr
, dest_dma
;
912 struct dma_async_tx_descriptor
*tx
;
913 struct dma_chan
*dma_chan
;
919 struct iop_adma_chan
*iop_chan
;
921 dev_dbg(device
->common
.dev
, "%s\n", __func__
);
923 for (src_idx
= 0; src_idx
< IOP_ADMA_NUM_SRC_TEST
; src_idx
++) {
924 xor_srcs
[src_idx
] = alloc_page(GFP_KERNEL
);
925 if (!xor_srcs
[src_idx
])
927 __free_page(xor_srcs
[src_idx
]);
932 dest
= alloc_page(GFP_KERNEL
);
935 __free_page(xor_srcs
[src_idx
]);
939 /* Fill in src buffers */
940 for (src_idx
= 0; src_idx
< IOP_ADMA_NUM_SRC_TEST
; src_idx
++) {
941 u8
*ptr
= page_address(xor_srcs
[src_idx
]);
942 for (i
= 0; i
< PAGE_SIZE
; i
++)
943 ptr
[i
] = (1 << src_idx
);
946 for (src_idx
= 0; src_idx
< IOP_ADMA_NUM_SRC_TEST
; src_idx
++)
947 cmp_byte
^= (u8
) (1 << src_idx
);
949 cmp_word
= (cmp_byte
<< 24) | (cmp_byte
<< 16) |
950 (cmp_byte
<< 8) | cmp_byte
;
952 memset(page_address(dest
), 0, PAGE_SIZE
);
954 dma_chan
= container_of(device
->common
.channels
.next
,
957 if (iop_adma_alloc_chan_resources(dma_chan
, NULL
) < 1) {
963 dest_dma
= dma_map_page(dma_chan
->device
->dev
, dest
, 0,
964 PAGE_SIZE
, DMA_FROM_DEVICE
);
965 for (i
= 0; i
< IOP_ADMA_NUM_SRC_TEST
; i
++)
966 dma_srcs
[i
] = dma_map_page(dma_chan
->device
->dev
, xor_srcs
[i
],
967 0, PAGE_SIZE
, DMA_TO_DEVICE
);
968 tx
= iop_adma_prep_dma_xor(dma_chan
, dest_dma
, dma_srcs
,
969 IOP_ADMA_NUM_SRC_TEST
, PAGE_SIZE
,
970 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
972 cookie
= iop_adma_tx_submit(tx
);
973 iop_adma_issue_pending(dma_chan
);
976 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) !=
978 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
979 "Self-test xor timed out, disabling\n");
984 iop_chan
= to_iop_adma_chan(dma_chan
);
985 dma_sync_single_for_cpu(&iop_chan
->device
->pdev
->dev
, dest_dma
,
986 PAGE_SIZE
, DMA_FROM_DEVICE
);
987 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(u32
)); i
++) {
988 u32
*ptr
= page_address(dest
);
989 if (ptr
[i
] != cmp_word
) {
990 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
991 "Self-test xor failed compare, disabling\n");
996 dma_sync_single_for_device(&iop_chan
->device
->pdev
->dev
, dest_dma
,
997 PAGE_SIZE
, DMA_TO_DEVICE
);
999 /* skip zero sum if the capability is not present */
1000 if (!dma_has_cap(DMA_ZERO_SUM
, dma_chan
->device
->cap_mask
))
1001 goto free_resources
;
1003 /* zero sum the sources with the destintation page */
1004 for (i
= 0; i
< IOP_ADMA_NUM_SRC_TEST
; i
++)
1005 zero_sum_srcs
[i
] = xor_srcs
[i
];
1006 zero_sum_srcs
[i
] = dest
;
1008 zero_sum_result
= 1;
1010 for (i
= 0; i
< IOP_ADMA_NUM_SRC_TEST
+ 1; i
++)
1011 dma_srcs
[i
] = dma_map_page(dma_chan
->device
->dev
,
1012 zero_sum_srcs
[i
], 0, PAGE_SIZE
,
1014 tx
= iop_adma_prep_dma_zero_sum(dma_chan
, dma_srcs
,
1015 IOP_ADMA_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1017 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1019 cookie
= iop_adma_tx_submit(tx
);
1020 iop_adma_issue_pending(dma_chan
);
1023 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
1024 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1025 "Self-test zero sum timed out, disabling\n");
1027 goto free_resources
;
1030 if (zero_sum_result
!= 0) {
1031 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1032 "Self-test zero sum failed compare, disabling\n");
1034 goto free_resources
;
1038 dma_addr
= dma_map_page(dma_chan
->device
->dev
, dest
, 0,
1039 PAGE_SIZE
, DMA_FROM_DEVICE
);
1040 tx
= iop_adma_prep_dma_memset(dma_chan
, dma_addr
, 0, PAGE_SIZE
,
1041 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1043 cookie
= iop_adma_tx_submit(tx
);
1044 iop_adma_issue_pending(dma_chan
);
1047 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
1048 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1049 "Self-test memset timed out, disabling\n");
1051 goto free_resources
;
1054 for (i
= 0; i
< PAGE_SIZE
/sizeof(u32
); i
++) {
1055 u32
*ptr
= page_address(dest
);
1057 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1058 "Self-test memset failed compare, disabling\n");
1060 goto free_resources
;
1064 /* test for non-zero parity sum */
1065 zero_sum_result
= 0;
1066 for (i
= 0; i
< IOP_ADMA_NUM_SRC_TEST
+ 1; i
++)
1067 dma_srcs
[i
] = dma_map_page(dma_chan
->device
->dev
,
1068 zero_sum_srcs
[i
], 0, PAGE_SIZE
,
1070 tx
= iop_adma_prep_dma_zero_sum(dma_chan
, dma_srcs
,
1071 IOP_ADMA_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1073 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1075 cookie
= iop_adma_tx_submit(tx
);
1076 iop_adma_issue_pending(dma_chan
);
1079 if (iop_adma_is_complete(dma_chan
, cookie
, NULL
, NULL
) != DMA_SUCCESS
) {
1080 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1081 "Self-test non-zero sum timed out, disabling\n");
1083 goto free_resources
;
1086 if (zero_sum_result
!= 1) {
1087 dev_printk(KERN_ERR
, dma_chan
->device
->dev
,
1088 "Self-test non-zero sum failed compare, disabling\n");
1090 goto free_resources
;
1094 iop_adma_free_chan_resources(dma_chan
);
1096 src_idx
= IOP_ADMA_NUM_SRC_TEST
;
1098 __free_page(xor_srcs
[src_idx
]);
1103 static int __devexit
iop_adma_remove(struct platform_device
*dev
)
1105 struct iop_adma_device
*device
= platform_get_drvdata(dev
);
1106 struct dma_chan
*chan
, *_chan
;
1107 struct iop_adma_chan
*iop_chan
;
1109 struct iop_adma_platform_data
*plat_data
= dev
->dev
.platform_data
;
1111 dma_async_device_unregister(&device
->common
);
1113 for (i
= 0; i
< 3; i
++) {
1115 irq
= platform_get_irq(dev
, i
);
1116 free_irq(irq
, device
);
1119 dma_free_coherent(&dev
->dev
, plat_data
->pool_size
,
1120 device
->dma_desc_pool_virt
, device
->dma_desc_pool
);
1123 struct resource
*res
;
1124 res
= platform_get_resource(dev
, IORESOURCE_MEM
, 0);
1125 release_mem_region(res
->start
, res
->end
- res
->start
);
1128 list_for_each_entry_safe(chan
, _chan
, &device
->common
.channels
,
1130 iop_chan
= to_iop_adma_chan(chan
);
1131 list_del(&chan
->device_node
);
1139 static int __devinit
iop_adma_probe(struct platform_device
*pdev
)
1141 struct resource
*res
;
1143 struct iop_adma_device
*adev
;
1144 struct iop_adma_chan
*iop_chan
;
1145 struct dma_device
*dma_dev
;
1146 struct iop_adma_platform_data
*plat_data
= pdev
->dev
.platform_data
;
1148 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1152 if (!devm_request_mem_region(&pdev
->dev
, res
->start
,
1153 res
->end
- res
->start
, pdev
->name
))
1156 adev
= kzalloc(sizeof(*adev
), GFP_KERNEL
);
1159 dma_dev
= &adev
->common
;
1161 /* allocate coherent memory for hardware descriptors
1162 * note: writecombine gives slightly better performance, but
1163 * requires that we explicitly flush the writes
1165 if ((adev
->dma_desc_pool_virt
= dma_alloc_writecombine(&pdev
->dev
,
1166 plat_data
->pool_size
,
1167 &adev
->dma_desc_pool
,
1168 GFP_KERNEL
)) == NULL
) {
1173 dev_dbg(&pdev
->dev
, "%s: allocted descriptor pool virt %p phys %p\n",
1174 __func__
, adev
->dma_desc_pool_virt
,
1175 (void *) adev
->dma_desc_pool
);
1177 adev
->id
= plat_data
->hw_id
;
1179 /* discover transaction capabilites from the platform data */
1180 dma_dev
->cap_mask
= plat_data
->cap_mask
;
1183 platform_set_drvdata(pdev
, adev
);
1185 INIT_LIST_HEAD(&dma_dev
->channels
);
1187 /* set base routines */
1188 dma_dev
->device_alloc_chan_resources
= iop_adma_alloc_chan_resources
;
1189 dma_dev
->device_free_chan_resources
= iop_adma_free_chan_resources
;
1190 dma_dev
->device_is_tx_complete
= iop_adma_is_complete
;
1191 dma_dev
->device_issue_pending
= iop_adma_issue_pending
;
1192 dma_dev
->dev
= &pdev
->dev
;
1194 /* set prep routines based on capability */
1195 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
))
1196 dma_dev
->device_prep_dma_memcpy
= iop_adma_prep_dma_memcpy
;
1197 if (dma_has_cap(DMA_MEMSET
, dma_dev
->cap_mask
))
1198 dma_dev
->device_prep_dma_memset
= iop_adma_prep_dma_memset
;
1199 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1200 dma_dev
->max_xor
= iop_adma_get_max_xor();
1201 dma_dev
->device_prep_dma_xor
= iop_adma_prep_dma_xor
;
1203 if (dma_has_cap(DMA_ZERO_SUM
, dma_dev
->cap_mask
))
1204 dma_dev
->device_prep_dma_zero_sum
=
1205 iop_adma_prep_dma_zero_sum
;
1206 if (dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
))
1207 dma_dev
->device_prep_dma_interrupt
=
1208 iop_adma_prep_dma_interrupt
;
1210 iop_chan
= kzalloc(sizeof(*iop_chan
), GFP_KERNEL
);
1215 iop_chan
->device
= adev
;
1217 iop_chan
->mmr_base
= devm_ioremap(&pdev
->dev
, res
->start
,
1218 res
->end
- res
->start
);
1219 if (!iop_chan
->mmr_base
) {
1221 goto err_free_iop_chan
;
1223 tasklet_init(&iop_chan
->irq_tasklet
, iop_adma_tasklet
, (unsigned long)
1226 /* clear errors before enabling interrupts */
1227 iop_adma_device_clear_err_status(iop_chan
);
1229 for (i
= 0; i
< 3; i
++) {
1230 irq_handler_t handler
[] = { iop_adma_eot_handler
,
1231 iop_adma_eoc_handler
,
1232 iop_adma_err_handler
};
1233 int irq
= platform_get_irq(pdev
, i
);
1236 goto err_free_iop_chan
;
1238 ret
= devm_request_irq(&pdev
->dev
, irq
,
1239 handler
[i
], 0, pdev
->name
, iop_chan
);
1241 goto err_free_iop_chan
;
1245 spin_lock_init(&iop_chan
->lock
);
1246 INIT_LIST_HEAD(&iop_chan
->chain
);
1247 INIT_LIST_HEAD(&iop_chan
->all_slots
);
1248 INIT_RCU_HEAD(&iop_chan
->common
.rcu
);
1249 iop_chan
->common
.device
= dma_dev
;
1250 list_add_tail(&iop_chan
->common
.device_node
, &dma_dev
->channels
);
1252 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
)) {
1253 ret
= iop_adma_memcpy_self_test(adev
);
1254 dev_dbg(&pdev
->dev
, "memcpy self test returned %d\n", ret
);
1256 goto err_free_iop_chan
;
1259 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ||
1260 dma_has_cap(DMA_MEMSET
, dma_dev
->cap_mask
)) {
1261 ret
= iop_adma_xor_zero_sum_self_test(adev
);
1262 dev_dbg(&pdev
->dev
, "xor self test returned %d\n", ret
);
1264 goto err_free_iop_chan
;
1267 dev_printk(KERN_INFO
, &pdev
->dev
, "Intel(R) IOP: "
1268 "( %s%s%s%s%s%s%s%s%s%s)\n",
1269 dma_has_cap(DMA_PQ_XOR
, dma_dev
->cap_mask
) ? "pq_xor " : "",
1270 dma_has_cap(DMA_PQ_UPDATE
, dma_dev
->cap_mask
) ? "pq_update " : "",
1271 dma_has_cap(DMA_PQ_ZERO_SUM
, dma_dev
->cap_mask
) ? "pq_zero_sum " : "",
1272 dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ? "xor " : "",
1273 dma_has_cap(DMA_DUAL_XOR
, dma_dev
->cap_mask
) ? "dual_xor " : "",
1274 dma_has_cap(DMA_ZERO_SUM
, dma_dev
->cap_mask
) ? "xor_zero_sum " : "",
1275 dma_has_cap(DMA_MEMSET
, dma_dev
->cap_mask
) ? "fill " : "",
1276 dma_has_cap(DMA_MEMCPY_CRC32C
, dma_dev
->cap_mask
) ? "cpy+crc " : "",
1277 dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
) ? "cpy " : "",
1278 dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
) ? "intr " : "");
1280 dma_async_device_register(dma_dev
);
1286 dma_free_coherent(&adev
->pdev
->dev
, plat_data
->pool_size
,
1287 adev
->dma_desc_pool_virt
, adev
->dma_desc_pool
);
1294 static void iop_chan_start_null_memcpy(struct iop_adma_chan
*iop_chan
)
1296 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
1297 dma_cookie_t cookie
;
1298 int slot_cnt
, slots_per_op
;
1300 dev_dbg(iop_chan
->device
->common
.dev
, "%s\n", __func__
);
1302 spin_lock_bh(&iop_chan
->lock
);
1303 slot_cnt
= iop_chan_memcpy_slot_count(0, &slots_per_op
);
1304 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
1306 grp_start
= sw_desc
->group_head
;
1308 list_splice_init(&sw_desc
->async_tx
.tx_list
, &iop_chan
->chain
);
1309 async_tx_ack(&sw_desc
->async_tx
);
1310 iop_desc_init_memcpy(grp_start
, 0);
1311 iop_desc_set_byte_count(grp_start
, iop_chan
, 0);
1312 iop_desc_set_dest_addr(grp_start
, iop_chan
, 0);
1313 iop_desc_set_memcpy_src_addr(grp_start
, 0);
1315 cookie
= iop_chan
->common
.cookie
;
1320 /* initialize the completed cookie to be less than
1321 * the most recently used cookie
1323 iop_chan
->completed_cookie
= cookie
- 1;
1324 iop_chan
->common
.cookie
= sw_desc
->async_tx
.cookie
= cookie
;
1326 /* channel should not be busy */
1327 BUG_ON(iop_chan_is_busy(iop_chan
));
1329 /* clear any prior error-status bits */
1330 iop_adma_device_clear_err_status(iop_chan
);
1332 /* disable operation */
1333 iop_chan_disable(iop_chan
);
1335 /* set the descriptor address */
1336 iop_chan_set_next_descriptor(iop_chan
, sw_desc
->async_tx
.phys
);
1338 /* 1/ don't add pre-chained descriptors
1339 * 2/ dummy read to flush next_desc write
1341 BUG_ON(iop_desc_get_next_desc(sw_desc
));
1343 /* run the descriptor */
1344 iop_chan_enable(iop_chan
);
1346 dev_printk(KERN_ERR
, iop_chan
->device
->common
.dev
,
1347 "failed to allocate null descriptor\n");
1348 spin_unlock_bh(&iop_chan
->lock
);
1351 static void iop_chan_start_null_xor(struct iop_adma_chan
*iop_chan
)
1353 struct iop_adma_desc_slot
*sw_desc
, *grp_start
;
1354 dma_cookie_t cookie
;
1355 int slot_cnt
, slots_per_op
;
1357 dev_dbg(iop_chan
->device
->common
.dev
, "%s\n", __func__
);
1359 spin_lock_bh(&iop_chan
->lock
);
1360 slot_cnt
= iop_chan_xor_slot_count(0, 2, &slots_per_op
);
1361 sw_desc
= iop_adma_alloc_slots(iop_chan
, slot_cnt
, slots_per_op
);
1363 grp_start
= sw_desc
->group_head
;
1364 list_splice_init(&sw_desc
->async_tx
.tx_list
, &iop_chan
->chain
);
1365 async_tx_ack(&sw_desc
->async_tx
);
1366 iop_desc_init_null_xor(grp_start
, 2, 0);
1367 iop_desc_set_byte_count(grp_start
, iop_chan
, 0);
1368 iop_desc_set_dest_addr(grp_start
, iop_chan
, 0);
1369 iop_desc_set_xor_src_addr(grp_start
, 0, 0);
1370 iop_desc_set_xor_src_addr(grp_start
, 1, 0);
1372 cookie
= iop_chan
->common
.cookie
;
1377 /* initialize the completed cookie to be less than
1378 * the most recently used cookie
1380 iop_chan
->completed_cookie
= cookie
- 1;
1381 iop_chan
->common
.cookie
= sw_desc
->async_tx
.cookie
= cookie
;
1383 /* channel should not be busy */
1384 BUG_ON(iop_chan_is_busy(iop_chan
));
1386 /* clear any prior error-status bits */
1387 iop_adma_device_clear_err_status(iop_chan
);
1389 /* disable operation */
1390 iop_chan_disable(iop_chan
);
1392 /* set the descriptor address */
1393 iop_chan_set_next_descriptor(iop_chan
, sw_desc
->async_tx
.phys
);
1395 /* 1/ don't add pre-chained descriptors
1396 * 2/ dummy read to flush next_desc write
1398 BUG_ON(iop_desc_get_next_desc(sw_desc
));
1400 /* run the descriptor */
1401 iop_chan_enable(iop_chan
);
1403 dev_printk(KERN_ERR
, iop_chan
->device
->common
.dev
,
1404 "failed to allocate null descriptor\n");
1405 spin_unlock_bh(&iop_chan
->lock
);
1408 MODULE_ALIAS("platform:iop-adma");
1410 static struct platform_driver iop_adma_driver
= {
1411 .probe
= iop_adma_probe
,
1412 .remove
= iop_adma_remove
,
1414 .owner
= THIS_MODULE
,
1419 static int __init
iop_adma_init (void)
1421 return platform_driver_register(&iop_adma_driver
);
1424 /* it's currently unsafe to unload this module */
1426 static void __exit
iop_adma_exit (void)
1428 platform_driver_unregister(&iop_adma_driver
);
1431 module_exit(iop_adma_exit
);
1434 module_init(iop_adma_init
);
1436 MODULE_AUTHOR("Intel Corporation");
1437 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1438 MODULE_LICENSE("GPL");