2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 * The full GNU General Public License is included in this distribution in
23 * the file called "COPYING".
27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52 * POSSIBILITY OF SUCH DAMAGE.
56 * Support routines for v3+ hardware
59 #include <linux/pci.h>
60 #include <linux/gfp.h>
61 #include <linux/dmaengine.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/prefetch.h>
64 #include "../dmaengine.h"
65 #include "registers.h"
70 /* ioat hardware assumes at least two sources for raid operations */
71 #define src_cnt_to_sw(x) ((x) + 2)
72 #define src_cnt_to_hw(x) ((x) - 2)
74 /* provide a lookup table for setting the source address in the base or
75 * extended descriptor of an xor or pq descriptor
77 static const u8 xor_idx_to_desc
= 0xe0;
78 static const u8 xor_idx_to_field
[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
79 static const u8 pq_idx_to_desc
= 0xf8;
80 static const u8 pq_idx_to_field
[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
82 static dma_addr_t
xor_get_src(struct ioat_raw_descriptor
*descs
[2], int idx
)
84 struct ioat_raw_descriptor
*raw
= descs
[xor_idx_to_desc
>> idx
& 1];
86 return raw
->field
[xor_idx_to_field
[idx
]];
89 static void xor_set_src(struct ioat_raw_descriptor
*descs
[2],
90 dma_addr_t addr
, u32 offset
, int idx
)
92 struct ioat_raw_descriptor
*raw
= descs
[xor_idx_to_desc
>> idx
& 1];
94 raw
->field
[xor_idx_to_field
[idx
]] = addr
+ offset
;
97 static dma_addr_t
pq_get_src(struct ioat_raw_descriptor
*descs
[2], int idx
)
99 struct ioat_raw_descriptor
*raw
= descs
[pq_idx_to_desc
>> idx
& 1];
101 return raw
->field
[pq_idx_to_field
[idx
]];
104 static void pq_set_src(struct ioat_raw_descriptor
*descs
[2],
105 dma_addr_t addr
, u32 offset
, u8 coef
, int idx
)
107 struct ioat_pq_descriptor
*pq
= (struct ioat_pq_descriptor
*) descs
[0];
108 struct ioat_raw_descriptor
*raw
= descs
[pq_idx_to_desc
>> idx
& 1];
110 raw
->field
[pq_idx_to_field
[idx
]] = addr
+ offset
;
111 pq
->coef
[idx
] = coef
;
114 static void ioat3_dma_unmap(struct ioat2_dma_chan
*ioat
,
115 struct ioat_ring_ent
*desc
, int idx
)
117 struct ioat_chan_common
*chan
= &ioat
->base
;
118 struct pci_dev
*pdev
= chan
->device
->pdev
;
119 size_t len
= desc
->len
;
120 size_t offset
= len
- desc
->hw
->size
;
121 struct dma_async_tx_descriptor
*tx
= &desc
->txd
;
122 enum dma_ctrl_flags flags
= tx
->flags
;
124 switch (desc
->hw
->ctl_f
.op
) {
126 if (!desc
->hw
->ctl_f
.null
) /* skip 'interrupt' ops */
127 ioat_dma_unmap(chan
, flags
, len
, desc
->hw
);
130 struct ioat_fill_descriptor
*hw
= desc
->fill
;
132 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
))
133 ioat_unmap(pdev
, hw
->dst_addr
- offset
, len
,
134 PCI_DMA_FROMDEVICE
, flags
, 1);
137 case IOAT_OP_XOR_VAL
:
139 struct ioat_xor_descriptor
*xor = desc
->xor;
140 struct ioat_ring_ent
*ext
;
141 struct ioat_xor_ext_descriptor
*xor_ex
= NULL
;
142 int src_cnt
= src_cnt_to_sw(xor->ctl_f
.src_cnt
);
143 struct ioat_raw_descriptor
*descs
[2];
147 ext
= ioat2_get_ring_ent(ioat
, idx
+ 1);
148 xor_ex
= ext
->xor_ex
;
151 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
152 descs
[0] = (struct ioat_raw_descriptor
*) xor;
153 descs
[1] = (struct ioat_raw_descriptor
*) xor_ex
;
154 for (i
= 0; i
< src_cnt
; i
++) {
155 dma_addr_t src
= xor_get_src(descs
, i
);
157 ioat_unmap(pdev
, src
- offset
, len
,
158 PCI_DMA_TODEVICE
, flags
, 0);
161 /* dest is a source in xor validate operations */
162 if (xor->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
163 ioat_unmap(pdev
, xor->dst_addr
- offset
, len
,
164 PCI_DMA_TODEVICE
, flags
, 1);
169 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
))
170 ioat_unmap(pdev
, xor->dst_addr
- offset
, len
,
171 PCI_DMA_FROMDEVICE
, flags
, 1);
176 struct ioat_pq_descriptor
*pq
= desc
->pq
;
177 struct ioat_ring_ent
*ext
;
178 struct ioat_pq_ext_descriptor
*pq_ex
= NULL
;
179 int src_cnt
= src_cnt_to_sw(pq
->ctl_f
.src_cnt
);
180 struct ioat_raw_descriptor
*descs
[2];
184 ext
= ioat2_get_ring_ent(ioat
, idx
+ 1);
188 /* in the 'continue' case don't unmap the dests as sources */
189 if (dmaf_p_disabled_continue(flags
))
191 else if (dmaf_continue(flags
))
194 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
195 descs
[0] = (struct ioat_raw_descriptor
*) pq
;
196 descs
[1] = (struct ioat_raw_descriptor
*) pq_ex
;
197 for (i
= 0; i
< src_cnt
; i
++) {
198 dma_addr_t src
= pq_get_src(descs
, i
);
200 ioat_unmap(pdev
, src
- offset
, len
,
201 PCI_DMA_TODEVICE
, flags
, 0);
204 /* the dests are sources in pq validate operations */
205 if (pq
->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
206 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
207 ioat_unmap(pdev
, pq
->p_addr
- offset
,
208 len
, PCI_DMA_TODEVICE
, flags
, 0);
209 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
210 ioat_unmap(pdev
, pq
->q_addr
- offset
,
211 len
, PCI_DMA_TODEVICE
, flags
, 0);
216 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
217 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
218 ioat_unmap(pdev
, pq
->p_addr
- offset
, len
,
219 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
220 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
221 ioat_unmap(pdev
, pq
->q_addr
- offset
, len
,
222 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
227 dev_err(&pdev
->dev
, "%s: unknown op type: %#x\n",
228 __func__
, desc
->hw
->ctl_f
.op
);
232 static bool desc_has_ext(struct ioat_ring_ent
*desc
)
234 struct ioat_dma_descriptor
*hw
= desc
->hw
;
236 if (hw
->ctl_f
.op
== IOAT_OP_XOR
||
237 hw
->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
238 struct ioat_xor_descriptor
*xor = desc
->xor;
240 if (src_cnt_to_sw(xor->ctl_f
.src_cnt
) > 5)
242 } else if (hw
->ctl_f
.op
== IOAT_OP_PQ
||
243 hw
->ctl_f
.op
== IOAT_OP_PQ_VAL
) {
244 struct ioat_pq_descriptor
*pq
= desc
->pq
;
246 if (src_cnt_to_sw(pq
->ctl_f
.src_cnt
) > 3)
254 * __cleanup - reclaim used descriptors
255 * @ioat: channel (ring) to clean
257 * The difference from the dma_v2.c __cleanup() is that this routine
258 * handles extended descriptors and dma-unmapping raid operations.
260 static void __cleanup(struct ioat2_dma_chan
*ioat
, dma_addr_t phys_complete
)
262 struct ioat_chan_common
*chan
= &ioat
->base
;
263 struct ioat_ring_ent
*desc
;
264 bool seen_current
= false;
265 int idx
= ioat
->tail
, i
;
268 dev_dbg(to_dev(chan
), "%s: head: %#x tail: %#x issued: %#x\n",
269 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
271 active
= ioat2_ring_active(ioat
);
272 for (i
= 0; i
< active
&& !seen_current
; i
++) {
273 struct dma_async_tx_descriptor
*tx
;
275 smp_read_barrier_depends();
276 prefetch(ioat2_get_ring_ent(ioat
, idx
+ i
+ 1));
277 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
278 dump_desc_dbg(ioat
, desc
);
281 dma_cookie_complete(tx
);
282 ioat3_dma_unmap(ioat
, desc
, idx
+ i
);
284 tx
->callback(tx
->callback_param
);
289 if (tx
->phys
== phys_complete
)
292 /* skip extended descriptors */
293 if (desc_has_ext(desc
)) {
294 BUG_ON(i
+ 1 >= active
);
298 smp_mb(); /* finish all descriptor reads before incrementing tail */
299 ioat
->tail
= idx
+ i
;
300 BUG_ON(active
&& !seen_current
); /* no active descs have written a completion? */
301 chan
->last_completion
= phys_complete
;
303 if (active
- i
== 0) {
304 dev_dbg(to_dev(chan
), "%s: cancel completion timeout\n",
306 clear_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
307 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
309 /* 5 microsecond delay per pending descriptor */
310 writew(min((5 * (active
- i
)), IOAT_INTRDELAY_MASK
),
311 chan
->device
->reg_base
+ IOAT_INTRDELAY_OFFSET
);
314 static void ioat3_cleanup(struct ioat2_dma_chan
*ioat
)
316 struct ioat_chan_common
*chan
= &ioat
->base
;
317 dma_addr_t phys_complete
;
319 spin_lock_bh(&chan
->cleanup_lock
);
320 if (ioat_cleanup_preamble(chan
, &phys_complete
))
321 __cleanup(ioat
, phys_complete
);
322 spin_unlock_bh(&chan
->cleanup_lock
);
325 static void ioat3_cleanup_event(unsigned long data
)
327 struct ioat2_dma_chan
*ioat
= to_ioat2_chan((void *) data
);
330 writew(IOAT_CHANCTRL_RUN
, ioat
->base
.reg_base
+ IOAT_CHANCTRL_OFFSET
);
333 static void ioat3_restart_channel(struct ioat2_dma_chan
*ioat
)
335 struct ioat_chan_common
*chan
= &ioat
->base
;
336 dma_addr_t phys_complete
;
338 ioat2_quiesce(chan
, 0);
339 if (ioat_cleanup_preamble(chan
, &phys_complete
))
340 __cleanup(ioat
, phys_complete
);
342 __ioat2_restart_chan(ioat
);
345 static void check_active(struct ioat2_dma_chan
*ioat
)
347 struct ioat_chan_common
*chan
= &ioat
->base
;
349 if (ioat2_ring_active(ioat
)) {
350 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
354 if (test_and_clear_bit(IOAT_CHAN_ACTIVE
, &chan
->state
))
355 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
356 else if (ioat
->alloc_order
> ioat_get_alloc_order()) {
357 /* if the ring is idle, empty, and oversized try to step
360 reshape_ring(ioat
, ioat
->alloc_order
- 1);
362 /* keep shrinking until we get back to our minimum
365 if (ioat
->alloc_order
> ioat_get_alloc_order())
366 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
371 static void ioat3_timer_event(unsigned long data
)
373 struct ioat2_dma_chan
*ioat
= to_ioat2_chan((void *) data
);
374 struct ioat_chan_common
*chan
= &ioat
->base
;
375 dma_addr_t phys_complete
;
378 status
= ioat_chansts(chan
);
380 /* when halted due to errors check for channel
381 * programming errors before advancing the completion state
383 if (is_ioat_halted(status
)) {
386 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
387 dev_err(to_dev(chan
), "%s: Channel halted (%x)\n",
389 if (test_bit(IOAT_RUN
, &chan
->state
))
390 BUG_ON(is_ioat_bug(chanerr
));
391 else /* we never got off the ground */
395 /* if we haven't made progress and we have already
396 * acknowledged a pending completion once, then be more
397 * forceful with a restart
399 spin_lock_bh(&chan
->cleanup_lock
);
400 if (ioat_cleanup_preamble(chan
, &phys_complete
))
401 __cleanup(ioat
, phys_complete
);
402 else if (test_bit(IOAT_COMPLETION_ACK
, &chan
->state
)) {
403 spin_lock_bh(&ioat
->prep_lock
);
404 ioat3_restart_channel(ioat
);
405 spin_unlock_bh(&ioat
->prep_lock
);
406 spin_unlock_bh(&chan
->cleanup_lock
);
409 set_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
410 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
414 if (ioat2_ring_active(ioat
))
415 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
417 spin_lock_bh(&ioat
->prep_lock
);
419 spin_unlock_bh(&ioat
->prep_lock
);
421 spin_unlock_bh(&chan
->cleanup_lock
);
424 static enum dma_status
425 ioat3_tx_status(struct dma_chan
*c
, dma_cookie_t cookie
,
426 struct dma_tx_state
*txstate
)
428 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
431 ret
= dma_cookie_status(c
, cookie
, txstate
);
432 if (ret
== DMA_SUCCESS
)
437 return dma_cookie_status(c
, cookie
, txstate
);
440 static struct dma_async_tx_descriptor
*
441 ioat3_prep_memset_lock(struct dma_chan
*c
, dma_addr_t dest
, int value
,
442 size_t len
, unsigned long flags
)
444 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
445 struct ioat_ring_ent
*desc
;
446 size_t total_len
= len
;
447 struct ioat_fill_descriptor
*fill
;
448 u64 src_data
= (0x0101010101010101ULL
) * (value
& 0xff);
449 int num_descs
, idx
, i
;
451 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
452 if (likely(num_descs
) && ioat2_check_space_lock(ioat
, num_descs
) == 0)
458 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
460 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
463 fill
->size
= xfer_size
;
464 fill
->src_data
= src_data
;
465 fill
->dst_addr
= dest
;
467 fill
->ctl_f
.op
= IOAT_OP_FILL
;
471 dump_desc_dbg(ioat
, desc
);
472 } while (++i
< num_descs
);
474 desc
->txd
.flags
= flags
;
475 desc
->len
= total_len
;
476 fill
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
477 fill
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
478 fill
->ctl_f
.compl_write
= 1;
479 dump_desc_dbg(ioat
, desc
);
481 /* we leave the channel locked to ensure in order submission */
485 static struct dma_async_tx_descriptor
*
486 __ioat3_prep_xor_lock(struct dma_chan
*c
, enum sum_check_flags
*result
,
487 dma_addr_t dest
, dma_addr_t
*src
, unsigned int src_cnt
,
488 size_t len
, unsigned long flags
)
490 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
491 struct ioat_ring_ent
*compl_desc
;
492 struct ioat_ring_ent
*desc
;
493 struct ioat_ring_ent
*ext
;
494 size_t total_len
= len
;
495 struct ioat_xor_descriptor
*xor;
496 struct ioat_xor_ext_descriptor
*xor_ex
= NULL
;
497 struct ioat_dma_descriptor
*hw
;
498 int num_descs
, with_ext
, idx
, i
;
500 u8 op
= result
? IOAT_OP_XOR_VAL
: IOAT_OP_XOR
;
504 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
505 /* we need 2x the number of descriptors to cover greater than 5
514 /* completion writes from the raid engine may pass completion
515 * writes from the legacy engine, so we need one extra null
516 * (legacy) descriptor to ensure all completion writes arrive in
519 if (likely(num_descs
) && ioat2_check_space_lock(ioat
, num_descs
+1) == 0)
525 struct ioat_raw_descriptor
*descs
[2];
526 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
529 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
532 /* save a branch by unconditionally retrieving the
533 * extended descriptor xor_set_src() knows to not write
534 * to it in the single descriptor case
536 ext
= ioat2_get_ring_ent(ioat
, idx
+ i
+ 1);
537 xor_ex
= ext
->xor_ex
;
539 descs
[0] = (struct ioat_raw_descriptor
*) xor;
540 descs
[1] = (struct ioat_raw_descriptor
*) xor_ex
;
541 for (s
= 0; s
< src_cnt
; s
++)
542 xor_set_src(descs
, src
[s
], offset
, s
);
543 xor->size
= xfer_size
;
544 xor->dst_addr
= dest
+ offset
;
547 xor->ctl_f
.src_cnt
= src_cnt_to_hw(src_cnt
);
551 dump_desc_dbg(ioat
, desc
);
552 } while ((i
+= 1 + with_ext
) < num_descs
);
554 /* last xor descriptor carries the unmap parameters and fence bit */
555 desc
->txd
.flags
= flags
;
556 desc
->len
= total_len
;
558 desc
->result
= result
;
559 xor->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
561 /* completion descriptor carries interrupt bit */
562 compl_desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
563 compl_desc
->txd
.flags
= flags
& DMA_PREP_INTERRUPT
;
567 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
568 hw
->ctl_f
.compl_write
= 1;
569 hw
->size
= NULL_DESC_BUFFER_SIZE
;
570 dump_desc_dbg(ioat
, compl_desc
);
572 /* we leave the channel locked to ensure in order submission */
573 return &compl_desc
->txd
;
576 static struct dma_async_tx_descriptor
*
577 ioat3_prep_xor(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t
*src
,
578 unsigned int src_cnt
, size_t len
, unsigned long flags
)
580 return __ioat3_prep_xor_lock(chan
, NULL
, dest
, src
, src_cnt
, len
, flags
);
583 struct dma_async_tx_descriptor
*
584 ioat3_prep_xor_val(struct dma_chan
*chan
, dma_addr_t
*src
,
585 unsigned int src_cnt
, size_t len
,
586 enum sum_check_flags
*result
, unsigned long flags
)
588 /* the cleanup routine only sets bits on validate failure, it
589 * does not clear bits on validate success... so clear it here
593 return __ioat3_prep_xor_lock(chan
, result
, src
[0], &src
[1],
594 src_cnt
- 1, len
, flags
);
598 dump_pq_desc_dbg(struct ioat2_dma_chan
*ioat
, struct ioat_ring_ent
*desc
, struct ioat_ring_ent
*ext
)
600 struct device
*dev
= to_dev(&ioat
->base
);
601 struct ioat_pq_descriptor
*pq
= desc
->pq
;
602 struct ioat_pq_ext_descriptor
*pq_ex
= ext
? ext
->pq_ex
: NULL
;
603 struct ioat_raw_descriptor
*descs
[] = { (void *) pq
, (void *) pq_ex
};
604 int src_cnt
= src_cnt_to_sw(pq
->ctl_f
.src_cnt
);
607 dev_dbg(dev
, "desc[%d]: (%#llx->%#llx) flags: %#x"
608 " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s' src_cnt: %d)\n",
609 desc_id(desc
), (unsigned long long) desc
->txd
.phys
,
610 (unsigned long long) (pq_ex
? pq_ex
->next
: pq
->next
),
611 desc
->txd
.flags
, pq
->size
, pq
->ctl
, pq
->ctl_f
.op
, pq
->ctl_f
.int_en
,
612 pq
->ctl_f
.compl_write
,
613 pq
->ctl_f
.p_disable
? "" : "p", pq
->ctl_f
.q_disable
? "" : "q",
615 for (i
= 0; i
< src_cnt
; i
++)
616 dev_dbg(dev
, "\tsrc[%d]: %#llx coef: %#x\n", i
,
617 (unsigned long long) pq_get_src(descs
, i
), pq
->coef
[i
]);
618 dev_dbg(dev
, "\tP: %#llx\n", pq
->p_addr
);
619 dev_dbg(dev
, "\tQ: %#llx\n", pq
->q_addr
);
620 dev_dbg(dev
, "\tNEXT: %#llx\n", pq
->next
);
623 static struct dma_async_tx_descriptor
*
624 __ioat3_prep_pq_lock(struct dma_chan
*c
, enum sum_check_flags
*result
,
625 const dma_addr_t
*dst
, const dma_addr_t
*src
,
626 unsigned int src_cnt
, const unsigned char *scf
,
627 size_t len
, unsigned long flags
)
629 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
630 struct ioat_chan_common
*chan
= &ioat
->base
;
631 struct ioat_ring_ent
*compl_desc
;
632 struct ioat_ring_ent
*desc
;
633 struct ioat_ring_ent
*ext
;
634 size_t total_len
= len
;
635 struct ioat_pq_descriptor
*pq
;
636 struct ioat_pq_ext_descriptor
*pq_ex
= NULL
;
637 struct ioat_dma_descriptor
*hw
;
639 u8 op
= result
? IOAT_OP_PQ_VAL
: IOAT_OP_PQ
;
640 int i
, s
, idx
, with_ext
, num_descs
;
642 dev_dbg(to_dev(chan
), "%s\n", __func__
);
643 /* the engine requires at least two sources (we provide
644 * at least 1 implied source in the DMA_PREP_CONTINUE case)
646 BUG_ON(src_cnt
+ dmaf_continue(flags
) < 2);
648 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
649 /* we need 2x the number of descriptors to cover greater than 3
650 * sources (we need 1 extra source in the q-only continuation
651 * case and 3 extra sources in the p+q continuation case.
653 if (src_cnt
+ dmaf_p_disabled_continue(flags
) > 3 ||
654 (dmaf_continue(flags
) && !dmaf_p_disabled_continue(flags
))) {
660 /* completion writes from the raid engine may pass completion
661 * writes from the legacy engine, so we need one extra null
662 * (legacy) descriptor to ensure all completion writes arrive in
665 if (likely(num_descs
) &&
666 ioat2_check_space_lock(ioat
, num_descs
+1) == 0)
672 struct ioat_raw_descriptor
*descs
[2];
673 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
675 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
678 /* save a branch by unconditionally retrieving the
679 * extended descriptor pq_set_src() knows to not write
680 * to it in the single descriptor case
682 ext
= ioat2_get_ring_ent(ioat
, idx
+ i
+ with_ext
);
685 descs
[0] = (struct ioat_raw_descriptor
*) pq
;
686 descs
[1] = (struct ioat_raw_descriptor
*) pq_ex
;
688 for (s
= 0; s
< src_cnt
; s
++)
689 pq_set_src(descs
, src
[s
], offset
, scf
[s
], s
);
691 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
692 if (dmaf_p_disabled_continue(flags
))
693 pq_set_src(descs
, dst
[1], offset
, 1, s
++);
694 else if (dmaf_continue(flags
)) {
695 pq_set_src(descs
, dst
[0], offset
, 0, s
++);
696 pq_set_src(descs
, dst
[1], offset
, 1, s
++);
697 pq_set_src(descs
, dst
[1], offset
, 0, s
++);
699 pq
->size
= xfer_size
;
700 pq
->p_addr
= dst
[0] + offset
;
701 pq
->q_addr
= dst
[1] + offset
;
704 pq
->ctl_f
.src_cnt
= src_cnt_to_hw(s
);
705 pq
->ctl_f
.p_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_P
);
706 pq
->ctl_f
.q_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_Q
);
710 } while ((i
+= 1 + with_ext
) < num_descs
);
712 /* last pq descriptor carries the unmap parameters and fence bit */
713 desc
->txd
.flags
= flags
;
714 desc
->len
= total_len
;
716 desc
->result
= result
;
717 pq
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
718 dump_pq_desc_dbg(ioat
, desc
, ext
);
720 /* completion descriptor carries interrupt bit */
721 compl_desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
722 compl_desc
->txd
.flags
= flags
& DMA_PREP_INTERRUPT
;
726 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
727 hw
->ctl_f
.compl_write
= 1;
728 hw
->size
= NULL_DESC_BUFFER_SIZE
;
729 dump_desc_dbg(ioat
, compl_desc
);
731 /* we leave the channel locked to ensure in order submission */
732 return &compl_desc
->txd
;
735 static struct dma_async_tx_descriptor
*
736 ioat3_prep_pq(struct dma_chan
*chan
, dma_addr_t
*dst
, dma_addr_t
*src
,
737 unsigned int src_cnt
, const unsigned char *scf
, size_t len
,
740 /* specify valid address for disabled result */
741 if (flags
& DMA_PREP_PQ_DISABLE_P
)
743 if (flags
& DMA_PREP_PQ_DISABLE_Q
)
746 /* handle the single source multiply case from the raid6
749 if ((flags
& DMA_PREP_PQ_DISABLE_P
) && src_cnt
== 1) {
750 dma_addr_t single_source
[2];
751 unsigned char single_source_coef
[2];
753 BUG_ON(flags
& DMA_PREP_PQ_DISABLE_Q
);
754 single_source
[0] = src
[0];
755 single_source
[1] = src
[0];
756 single_source_coef
[0] = scf
[0];
757 single_source_coef
[1] = 0;
759 return __ioat3_prep_pq_lock(chan
, NULL
, dst
, single_source
, 2,
760 single_source_coef
, len
, flags
);
762 return __ioat3_prep_pq_lock(chan
, NULL
, dst
, src
, src_cnt
, scf
,
766 struct dma_async_tx_descriptor
*
767 ioat3_prep_pq_val(struct dma_chan
*chan
, dma_addr_t
*pq
, dma_addr_t
*src
,
768 unsigned int src_cnt
, const unsigned char *scf
, size_t len
,
769 enum sum_check_flags
*pqres
, unsigned long flags
)
771 /* specify valid address for disabled result */
772 if (flags
& DMA_PREP_PQ_DISABLE_P
)
774 if (flags
& DMA_PREP_PQ_DISABLE_Q
)
777 /* the cleanup routine only sets bits on validate failure, it
778 * does not clear bits on validate success... so clear it here
782 return __ioat3_prep_pq_lock(chan
, pqres
, pq
, src
, src_cnt
, scf
, len
,
786 static struct dma_async_tx_descriptor
*
787 ioat3_prep_pqxor(struct dma_chan
*chan
, dma_addr_t dst
, dma_addr_t
*src
,
788 unsigned int src_cnt
, size_t len
, unsigned long flags
)
790 unsigned char scf
[src_cnt
];
793 memset(scf
, 0, src_cnt
);
795 flags
|= DMA_PREP_PQ_DISABLE_Q
;
796 pq
[1] = dst
; /* specify valid address for disabled result */
798 return __ioat3_prep_pq_lock(chan
, NULL
, pq
, src
, src_cnt
, scf
, len
,
802 struct dma_async_tx_descriptor
*
803 ioat3_prep_pqxor_val(struct dma_chan
*chan
, dma_addr_t
*src
,
804 unsigned int src_cnt
, size_t len
,
805 enum sum_check_flags
*result
, unsigned long flags
)
807 unsigned char scf
[src_cnt
];
810 /* the cleanup routine only sets bits on validate failure, it
811 * does not clear bits on validate success... so clear it here
815 memset(scf
, 0, src_cnt
);
817 flags
|= DMA_PREP_PQ_DISABLE_Q
;
818 pq
[1] = pq
[0]; /* specify valid address for disabled result */
820 return __ioat3_prep_pq_lock(chan
, result
, pq
, &src
[1], src_cnt
- 1, scf
,
824 static struct dma_async_tx_descriptor
*
825 ioat3_prep_interrupt_lock(struct dma_chan
*c
, unsigned long flags
)
827 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
828 struct ioat_ring_ent
*desc
;
829 struct ioat_dma_descriptor
*hw
;
831 if (ioat2_check_space_lock(ioat
, 1) == 0)
832 desc
= ioat2_get_ring_ent(ioat
, ioat
->head
);
839 hw
->ctl_f
.int_en
= 1;
840 hw
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
841 hw
->ctl_f
.compl_write
= 1;
842 hw
->size
= NULL_DESC_BUFFER_SIZE
;
846 desc
->txd
.flags
= flags
;
849 dump_desc_dbg(ioat
, desc
);
851 /* we leave the channel locked to ensure in order submission */
855 static void ioat3_dma_test_callback(void *dma_async_param
)
857 struct completion
*cmp
= dma_async_param
;
862 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
863 static int ioat_xor_val_self_test(struct ioatdma_device
*device
)
867 struct page
*xor_srcs
[IOAT_NUM_SRC_TEST
];
868 struct page
*xor_val_srcs
[IOAT_NUM_SRC_TEST
+ 1];
869 dma_addr_t dma_srcs
[IOAT_NUM_SRC_TEST
+ 1];
870 dma_addr_t dma_addr
, dest_dma
;
871 struct dma_async_tx_descriptor
*tx
;
872 struct dma_chan
*dma_chan
;
878 struct completion cmp
;
880 struct device
*dev
= &device
->pdev
->dev
;
881 struct dma_device
*dma
= &device
->common
;
884 dev_dbg(dev
, "%s\n", __func__
);
886 if (!dma_has_cap(DMA_XOR
, dma
->cap_mask
))
889 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++) {
890 xor_srcs
[src_idx
] = alloc_page(GFP_KERNEL
);
891 if (!xor_srcs
[src_idx
]) {
893 __free_page(xor_srcs
[src_idx
]);
898 dest
= alloc_page(GFP_KERNEL
);
901 __free_page(xor_srcs
[src_idx
]);
905 /* Fill in src buffers */
906 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++) {
907 u8
*ptr
= page_address(xor_srcs
[src_idx
]);
908 for (i
= 0; i
< PAGE_SIZE
; i
++)
909 ptr
[i
] = (1 << src_idx
);
912 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++)
913 cmp_byte
^= (u8
) (1 << src_idx
);
915 cmp_word
= (cmp_byte
<< 24) | (cmp_byte
<< 16) |
916 (cmp_byte
<< 8) | cmp_byte
;
918 memset(page_address(dest
), 0, PAGE_SIZE
);
920 dma_chan
= container_of(dma
->channels
.next
, struct dma_chan
,
922 if (dma
->device_alloc_chan_resources(dma_chan
) < 1) {
930 dest_dma
= dma_map_page(dev
, dest
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
931 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
932 dma_srcs
[i
] = dma_map_page(dev
, xor_srcs
[i
], 0, PAGE_SIZE
,
934 tx
= dma
->device_prep_dma_xor(dma_chan
, dest_dma
, dma_srcs
,
935 IOAT_NUM_SRC_TEST
, PAGE_SIZE
,
937 DMA_COMPL_SKIP_SRC_UNMAP
|
938 DMA_COMPL_SKIP_DEST_UNMAP
);
941 dev_err(dev
, "Self-test xor prep failed\n");
947 init_completion(&cmp
);
948 tx
->callback
= ioat3_dma_test_callback
;
949 tx
->callback_param
= &cmp
;
950 cookie
= tx
->tx_submit(tx
);
952 dev_err(dev
, "Self-test xor setup failed\n");
956 dma
->device_issue_pending(dma_chan
);
958 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
960 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
961 dev_err(dev
, "Self-test xor timed out\n");
966 dma_unmap_page(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
967 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
968 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
970 dma_sync_single_for_cpu(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
971 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(u32
)); i
++) {
972 u32
*ptr
= page_address(dest
);
973 if (ptr
[i
] != cmp_word
) {
974 dev_err(dev
, "Self-test xor failed compare\n");
979 dma_sync_single_for_device(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
981 /* skip validate if the capability is not present */
982 if (!dma_has_cap(DMA_XOR_VAL
, dma_chan
->device
->cap_mask
))
985 op
= IOAT_OP_XOR_VAL
;
987 /* validate the sources with the destintation page */
988 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
989 xor_val_srcs
[i
] = xor_srcs
[i
];
990 xor_val_srcs
[i
] = dest
;
994 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
995 dma_srcs
[i
] = dma_map_page(dev
, xor_val_srcs
[i
], 0, PAGE_SIZE
,
997 tx
= dma
->device_prep_dma_xor_val(dma_chan
, dma_srcs
,
998 IOAT_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
999 &xor_val_result
, DMA_PREP_INTERRUPT
|
1000 DMA_COMPL_SKIP_SRC_UNMAP
|
1001 DMA_COMPL_SKIP_DEST_UNMAP
);
1003 dev_err(dev
, "Self-test zero prep failed\n");
1009 init_completion(&cmp
);
1010 tx
->callback
= ioat3_dma_test_callback
;
1011 tx
->callback_param
= &cmp
;
1012 cookie
= tx
->tx_submit(tx
);
1014 dev_err(dev
, "Self-test zero setup failed\n");
1018 dma
->device_issue_pending(dma_chan
);
1020 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1022 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1023 dev_err(dev
, "Self-test validate timed out\n");
1028 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1029 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
1031 if (xor_val_result
!= 0) {
1032 dev_err(dev
, "Self-test validate failed compare\n");
1034 goto free_resources
;
1037 /* skip memset if the capability is not present */
1038 if (!dma_has_cap(DMA_MEMSET
, dma_chan
->device
->cap_mask
))
1039 goto free_resources
;
1044 dma_addr
= dma_map_page(dev
, dest
, 0,
1045 PAGE_SIZE
, DMA_FROM_DEVICE
);
1046 tx
= dma
->device_prep_dma_memset(dma_chan
, dma_addr
, 0, PAGE_SIZE
,
1047 DMA_PREP_INTERRUPT
|
1048 DMA_COMPL_SKIP_SRC_UNMAP
|
1049 DMA_COMPL_SKIP_DEST_UNMAP
);
1051 dev_err(dev
, "Self-test memset prep failed\n");
1057 init_completion(&cmp
);
1058 tx
->callback
= ioat3_dma_test_callback
;
1059 tx
->callback_param
= &cmp
;
1060 cookie
= tx
->tx_submit(tx
);
1062 dev_err(dev
, "Self-test memset setup failed\n");
1066 dma
->device_issue_pending(dma_chan
);
1068 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1070 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1071 dev_err(dev
, "Self-test memset timed out\n");
1076 dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1078 for (i
= 0; i
< PAGE_SIZE
/sizeof(u32
); i
++) {
1079 u32
*ptr
= page_address(dest
);
1081 dev_err(dev
, "Self-test memset failed compare\n");
1083 goto free_resources
;
1087 /* test for non-zero parity sum */
1088 op
= IOAT_OP_XOR_VAL
;
1091 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1092 dma_srcs
[i
] = dma_map_page(dev
, xor_val_srcs
[i
], 0, PAGE_SIZE
,
1094 tx
= dma
->device_prep_dma_xor_val(dma_chan
, dma_srcs
,
1095 IOAT_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1096 &xor_val_result
, DMA_PREP_INTERRUPT
|
1097 DMA_COMPL_SKIP_SRC_UNMAP
|
1098 DMA_COMPL_SKIP_DEST_UNMAP
);
1100 dev_err(dev
, "Self-test 2nd zero prep failed\n");
1106 init_completion(&cmp
);
1107 tx
->callback
= ioat3_dma_test_callback
;
1108 tx
->callback_param
= &cmp
;
1109 cookie
= tx
->tx_submit(tx
);
1111 dev_err(dev
, "Self-test 2nd zero setup failed\n");
1115 dma
->device_issue_pending(dma_chan
);
1117 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1119 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1120 dev_err(dev
, "Self-test 2nd validate timed out\n");
1125 if (xor_val_result
!= SUM_CHECK_P_RESULT
) {
1126 dev_err(dev
, "Self-test validate failed compare\n");
1131 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1132 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
1134 goto free_resources
;
1136 if (op
== IOAT_OP_XOR
) {
1137 dma_unmap_page(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1138 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1139 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
,
1141 } else if (op
== IOAT_OP_XOR_VAL
) {
1142 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1143 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
,
1145 } else if (op
== IOAT_OP_FILL
)
1146 dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1148 dma
->device_free_chan_resources(dma_chan
);
1150 src_idx
= IOAT_NUM_SRC_TEST
;
1152 __free_page(xor_srcs
[src_idx
]);
1157 static int ioat3_dma_self_test(struct ioatdma_device
*device
)
1159 int rc
= ioat_dma_self_test(device
);
1164 rc
= ioat_xor_val_self_test(device
);
1171 static int ioat3_reset_hw(struct ioat_chan_common
*chan
)
1173 /* throw away whatever the channel was doing and get it
1174 * initialized, with ioat3 specific workarounds
1176 struct ioatdma_device
*device
= chan
->device
;
1177 struct pci_dev
*pdev
= device
->pdev
;
1182 ioat2_quiesce(chan
, msecs_to_jiffies(100));
1184 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
1185 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
1187 /* clear any pending errors */
1188 err
= pci_read_config_dword(pdev
, IOAT_PCI_CHANERR_INT_OFFSET
, &chanerr
);
1190 dev_err(&pdev
->dev
, "channel error register unreachable\n");
1193 pci_write_config_dword(pdev
, IOAT_PCI_CHANERR_INT_OFFSET
, chanerr
);
1195 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1196 * (workaround for spurious config parity error after restart)
1198 pci_read_config_word(pdev
, IOAT_PCI_DEVICE_ID_OFFSET
, &dev_id
);
1199 if (dev_id
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
)
1200 pci_write_config_dword(pdev
, IOAT_PCI_DMAUNCERRSTS_OFFSET
, 0x10);
1202 return ioat2_reset_sync(chan
, msecs_to_jiffies(200));
1205 static bool is_jf_ioat(struct pci_dev
*pdev
)
1207 switch (pdev
->device
) {
1208 case PCI_DEVICE_ID_INTEL_IOAT_JSF0
:
1209 case PCI_DEVICE_ID_INTEL_IOAT_JSF1
:
1210 case PCI_DEVICE_ID_INTEL_IOAT_JSF2
:
1211 case PCI_DEVICE_ID_INTEL_IOAT_JSF3
:
1212 case PCI_DEVICE_ID_INTEL_IOAT_JSF4
:
1213 case PCI_DEVICE_ID_INTEL_IOAT_JSF5
:
1214 case PCI_DEVICE_ID_INTEL_IOAT_JSF6
:
1215 case PCI_DEVICE_ID_INTEL_IOAT_JSF7
:
1216 case PCI_DEVICE_ID_INTEL_IOAT_JSF8
:
1217 case PCI_DEVICE_ID_INTEL_IOAT_JSF9
:
1224 static bool is_snb_ioat(struct pci_dev
*pdev
)
1226 switch (pdev
->device
) {
1227 case PCI_DEVICE_ID_INTEL_IOAT_SNB0
:
1228 case PCI_DEVICE_ID_INTEL_IOAT_SNB1
:
1229 case PCI_DEVICE_ID_INTEL_IOAT_SNB2
:
1230 case PCI_DEVICE_ID_INTEL_IOAT_SNB3
:
1231 case PCI_DEVICE_ID_INTEL_IOAT_SNB4
:
1232 case PCI_DEVICE_ID_INTEL_IOAT_SNB5
:
1233 case PCI_DEVICE_ID_INTEL_IOAT_SNB6
:
1234 case PCI_DEVICE_ID_INTEL_IOAT_SNB7
:
1235 case PCI_DEVICE_ID_INTEL_IOAT_SNB8
:
1236 case PCI_DEVICE_ID_INTEL_IOAT_SNB9
:
1243 static bool is_ivb_ioat(struct pci_dev
*pdev
)
1245 switch (pdev
->device
) {
1246 case PCI_DEVICE_ID_INTEL_IOAT_IVB0
:
1247 case PCI_DEVICE_ID_INTEL_IOAT_IVB1
:
1248 case PCI_DEVICE_ID_INTEL_IOAT_IVB2
:
1249 case PCI_DEVICE_ID_INTEL_IOAT_IVB3
:
1250 case PCI_DEVICE_ID_INTEL_IOAT_IVB4
:
1251 case PCI_DEVICE_ID_INTEL_IOAT_IVB5
:
1252 case PCI_DEVICE_ID_INTEL_IOAT_IVB6
:
1253 case PCI_DEVICE_ID_INTEL_IOAT_IVB7
:
1254 case PCI_DEVICE_ID_INTEL_IOAT_IVB8
:
1255 case PCI_DEVICE_ID_INTEL_IOAT_IVB9
:
1263 int ioat3_dma_probe(struct ioatdma_device
*device
, int dca
)
1265 struct pci_dev
*pdev
= device
->pdev
;
1266 int dca_en
= system_has_dca_enabled(pdev
);
1267 struct dma_device
*dma
;
1269 struct ioat_chan_common
*chan
;
1270 bool is_raid_device
= false;
1274 device
->enumerate_channels
= ioat2_enumerate_channels
;
1275 device
->reset_hw
= ioat3_reset_hw
;
1276 device
->self_test
= ioat3_dma_self_test
;
1277 dma
= &device
->common
;
1278 dma
->device_prep_dma_memcpy
= ioat2_dma_prep_memcpy_lock
;
1279 dma
->device_issue_pending
= ioat2_issue_pending
;
1280 dma
->device_alloc_chan_resources
= ioat2_alloc_chan_resources
;
1281 dma
->device_free_chan_resources
= ioat2_free_chan_resources
;
1283 if (is_jf_ioat(pdev
) || is_snb_ioat(pdev
) || is_ivb_ioat(pdev
))
1284 dma
->copy_align
= 6;
1286 dma_cap_set(DMA_INTERRUPT
, dma
->cap_mask
);
1287 dma
->device_prep_dma_interrupt
= ioat3_prep_interrupt_lock
;
1289 cap
= readl(device
->reg_base
+ IOAT_DMA_CAP_OFFSET
);
1291 /* dca is incompatible with raid operations */
1292 if (dca_en
&& (cap
& (IOAT_CAP_XOR
|IOAT_CAP_PQ
)))
1293 cap
&= ~(IOAT_CAP_XOR
|IOAT_CAP_PQ
);
1295 if (cap
& IOAT_CAP_XOR
) {
1296 is_raid_device
= true;
1300 dma_cap_set(DMA_XOR
, dma
->cap_mask
);
1301 dma
->device_prep_dma_xor
= ioat3_prep_xor
;
1303 dma_cap_set(DMA_XOR_VAL
, dma
->cap_mask
);
1304 dma
->device_prep_dma_xor_val
= ioat3_prep_xor_val
;
1306 if (cap
& IOAT_CAP_PQ
) {
1307 is_raid_device
= true;
1308 dma_set_maxpq(dma
, 8, 0);
1311 dma_cap_set(DMA_PQ
, dma
->cap_mask
);
1312 dma
->device_prep_dma_pq
= ioat3_prep_pq
;
1314 dma_cap_set(DMA_PQ_VAL
, dma
->cap_mask
);
1315 dma
->device_prep_dma_pq_val
= ioat3_prep_pq_val
;
1317 if (!(cap
& IOAT_CAP_XOR
)) {
1321 dma_cap_set(DMA_XOR
, dma
->cap_mask
);
1322 dma
->device_prep_dma_xor
= ioat3_prep_pqxor
;
1324 dma_cap_set(DMA_XOR_VAL
, dma
->cap_mask
);
1325 dma
->device_prep_dma_xor_val
= ioat3_prep_pqxor_val
;
1328 if (is_raid_device
&& (cap
& IOAT_CAP_FILL_BLOCK
)) {
1329 dma_cap_set(DMA_MEMSET
, dma
->cap_mask
);
1330 dma
->device_prep_dma_memset
= ioat3_prep_memset_lock
;
1334 dma
->device_tx_status
= ioat3_tx_status
;
1335 device
->cleanup_fn
= ioat3_cleanup_event
;
1336 device
->timer_fn
= ioat3_timer_event
;
1338 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1339 dma_cap_clear(DMA_PQ_VAL
, dma
->cap_mask
);
1340 dma
->device_prep_dma_pq_val
= NULL
;
1343 #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1344 dma_cap_clear(DMA_XOR_VAL
, dma
->cap_mask
);
1345 dma
->device_prep_dma_xor_val
= NULL
;
1348 err
= ioat_probe(device
);
1351 ioat_set_tcp_copy_break(262144);
1353 list_for_each_entry(c
, &dma
->channels
, device_node
) {
1354 chan
= to_chan_common(c
);
1355 writel(IOAT_DMA_DCA_ANY_CPU
,
1356 chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
1359 err
= ioat_register(device
);
1363 ioat_kobject_add(device
, &ioat2_ktype
);
1366 device
->dca
= ioat3_dca_init(pdev
, device
->reg_base
);