2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 * The full GNU General Public License is included in this distribution in
23 * the file called "COPYING".
27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52 * POSSIBILITY OF SUCH DAMAGE.
56 * Support routines for v3+ hardware
59 #include <linux/pci.h>
60 #include <linux/gfp.h>
61 #include <linux/dmaengine.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/prefetch.h>
64 #include "../dmaengine.h"
65 #include "registers.h"
70 /* ioat hardware assumes at least two sources for raid operations */
71 #define src_cnt_to_sw(x) ((x) + 2)
72 #define src_cnt_to_hw(x) ((x) - 2)
74 /* provide a lookup table for setting the source address in the base or
75 * extended descriptor of an xor or pq descriptor
77 static const u8 xor_idx_to_desc
= 0xe0;
78 static const u8 xor_idx_to_field
[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
79 static const u8 pq_idx_to_desc
= 0xf8;
80 static const u8 pq_idx_to_field
[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
82 static void ioat3_eh(struct ioat2_dma_chan
*ioat
);
84 static dma_addr_t
xor_get_src(struct ioat_raw_descriptor
*descs
[2], int idx
)
86 struct ioat_raw_descriptor
*raw
= descs
[xor_idx_to_desc
>> idx
& 1];
88 return raw
->field
[xor_idx_to_field
[idx
]];
91 static void xor_set_src(struct ioat_raw_descriptor
*descs
[2],
92 dma_addr_t addr
, u32 offset
, int idx
)
94 struct ioat_raw_descriptor
*raw
= descs
[xor_idx_to_desc
>> idx
& 1];
96 raw
->field
[xor_idx_to_field
[idx
]] = addr
+ offset
;
99 static dma_addr_t
pq_get_src(struct ioat_raw_descriptor
*descs
[2], int idx
)
101 struct ioat_raw_descriptor
*raw
= descs
[pq_idx_to_desc
>> idx
& 1];
103 return raw
->field
[pq_idx_to_field
[idx
]];
106 static void pq_set_src(struct ioat_raw_descriptor
*descs
[2],
107 dma_addr_t addr
, u32 offset
, u8 coef
, int idx
)
109 struct ioat_pq_descriptor
*pq
= (struct ioat_pq_descriptor
*) descs
[0];
110 struct ioat_raw_descriptor
*raw
= descs
[pq_idx_to_desc
>> idx
& 1];
112 raw
->field
[pq_idx_to_field
[idx
]] = addr
+ offset
;
113 pq
->coef
[idx
] = coef
;
116 static bool is_jf_ioat(struct pci_dev
*pdev
)
118 switch (pdev
->device
) {
119 case PCI_DEVICE_ID_INTEL_IOAT_JSF0
:
120 case PCI_DEVICE_ID_INTEL_IOAT_JSF1
:
121 case PCI_DEVICE_ID_INTEL_IOAT_JSF2
:
122 case PCI_DEVICE_ID_INTEL_IOAT_JSF3
:
123 case PCI_DEVICE_ID_INTEL_IOAT_JSF4
:
124 case PCI_DEVICE_ID_INTEL_IOAT_JSF5
:
125 case PCI_DEVICE_ID_INTEL_IOAT_JSF6
:
126 case PCI_DEVICE_ID_INTEL_IOAT_JSF7
:
127 case PCI_DEVICE_ID_INTEL_IOAT_JSF8
:
128 case PCI_DEVICE_ID_INTEL_IOAT_JSF9
:
135 static bool is_snb_ioat(struct pci_dev
*pdev
)
137 switch (pdev
->device
) {
138 case PCI_DEVICE_ID_INTEL_IOAT_SNB0
:
139 case PCI_DEVICE_ID_INTEL_IOAT_SNB1
:
140 case PCI_DEVICE_ID_INTEL_IOAT_SNB2
:
141 case PCI_DEVICE_ID_INTEL_IOAT_SNB3
:
142 case PCI_DEVICE_ID_INTEL_IOAT_SNB4
:
143 case PCI_DEVICE_ID_INTEL_IOAT_SNB5
:
144 case PCI_DEVICE_ID_INTEL_IOAT_SNB6
:
145 case PCI_DEVICE_ID_INTEL_IOAT_SNB7
:
146 case PCI_DEVICE_ID_INTEL_IOAT_SNB8
:
147 case PCI_DEVICE_ID_INTEL_IOAT_SNB9
:
154 static bool is_ivb_ioat(struct pci_dev
*pdev
)
156 switch (pdev
->device
) {
157 case PCI_DEVICE_ID_INTEL_IOAT_IVB0
:
158 case PCI_DEVICE_ID_INTEL_IOAT_IVB1
:
159 case PCI_DEVICE_ID_INTEL_IOAT_IVB2
:
160 case PCI_DEVICE_ID_INTEL_IOAT_IVB3
:
161 case PCI_DEVICE_ID_INTEL_IOAT_IVB4
:
162 case PCI_DEVICE_ID_INTEL_IOAT_IVB5
:
163 case PCI_DEVICE_ID_INTEL_IOAT_IVB6
:
164 case PCI_DEVICE_ID_INTEL_IOAT_IVB7
:
165 case PCI_DEVICE_ID_INTEL_IOAT_IVB8
:
166 case PCI_DEVICE_ID_INTEL_IOAT_IVB9
:
174 static bool is_hsw_ioat(struct pci_dev
*pdev
)
176 switch (pdev
->device
) {
177 case PCI_DEVICE_ID_INTEL_IOAT_HSW0
:
178 case PCI_DEVICE_ID_INTEL_IOAT_HSW1
:
179 case PCI_DEVICE_ID_INTEL_IOAT_HSW2
:
180 case PCI_DEVICE_ID_INTEL_IOAT_HSW3
:
181 case PCI_DEVICE_ID_INTEL_IOAT_HSW4
:
182 case PCI_DEVICE_ID_INTEL_IOAT_HSW5
:
183 case PCI_DEVICE_ID_INTEL_IOAT_HSW6
:
184 case PCI_DEVICE_ID_INTEL_IOAT_HSW7
:
185 case PCI_DEVICE_ID_INTEL_IOAT_HSW8
:
186 case PCI_DEVICE_ID_INTEL_IOAT_HSW9
:
194 static bool is_xeon_cb32(struct pci_dev
*pdev
)
196 return is_jf_ioat(pdev
) || is_snb_ioat(pdev
) || is_ivb_ioat(pdev
) ||
200 static bool is_bwd_ioat(struct pci_dev
*pdev
)
202 switch (pdev
->device
) {
203 case PCI_DEVICE_ID_INTEL_IOAT_BWD0
:
204 case PCI_DEVICE_ID_INTEL_IOAT_BWD1
:
205 case PCI_DEVICE_ID_INTEL_IOAT_BWD2
:
206 case PCI_DEVICE_ID_INTEL_IOAT_BWD3
:
213 static void ioat3_dma_unmap(struct ioat2_dma_chan
*ioat
,
214 struct ioat_ring_ent
*desc
, int idx
)
216 struct ioat_chan_common
*chan
= &ioat
->base
;
217 struct pci_dev
*pdev
= chan
->device
->pdev
;
218 size_t len
= desc
->len
;
219 size_t offset
= len
- desc
->hw
->size
;
220 struct dma_async_tx_descriptor
*tx
= &desc
->txd
;
221 enum dma_ctrl_flags flags
= tx
->flags
;
223 switch (desc
->hw
->ctl_f
.op
) {
225 if (!desc
->hw
->ctl_f
.null
) /* skip 'interrupt' ops */
226 ioat_dma_unmap(chan
, flags
, len
, desc
->hw
);
229 struct ioat_fill_descriptor
*hw
= desc
->fill
;
231 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
))
232 ioat_unmap(pdev
, hw
->dst_addr
- offset
, len
,
233 PCI_DMA_FROMDEVICE
, flags
, 1);
236 case IOAT_OP_XOR_VAL
:
238 struct ioat_xor_descriptor
*xor = desc
->xor;
239 struct ioat_ring_ent
*ext
;
240 struct ioat_xor_ext_descriptor
*xor_ex
= NULL
;
241 int src_cnt
= src_cnt_to_sw(xor->ctl_f
.src_cnt
);
242 struct ioat_raw_descriptor
*descs
[2];
246 ext
= ioat2_get_ring_ent(ioat
, idx
+ 1);
247 xor_ex
= ext
->xor_ex
;
250 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
251 descs
[0] = (struct ioat_raw_descriptor
*) xor;
252 descs
[1] = (struct ioat_raw_descriptor
*) xor_ex
;
253 for (i
= 0; i
< src_cnt
; i
++) {
254 dma_addr_t src
= xor_get_src(descs
, i
);
256 ioat_unmap(pdev
, src
- offset
, len
,
257 PCI_DMA_TODEVICE
, flags
, 0);
260 /* dest is a source in xor validate operations */
261 if (xor->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
262 ioat_unmap(pdev
, xor->dst_addr
- offset
, len
,
263 PCI_DMA_TODEVICE
, flags
, 1);
268 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
))
269 ioat_unmap(pdev
, xor->dst_addr
- offset
, len
,
270 PCI_DMA_FROMDEVICE
, flags
, 1);
275 struct ioat_pq_descriptor
*pq
= desc
->pq
;
276 struct ioat_ring_ent
*ext
;
277 struct ioat_pq_ext_descriptor
*pq_ex
= NULL
;
278 int src_cnt
= src_cnt_to_sw(pq
->ctl_f
.src_cnt
);
279 struct ioat_raw_descriptor
*descs
[2];
283 ext
= ioat2_get_ring_ent(ioat
, idx
+ 1);
287 /* in the 'continue' case don't unmap the dests as sources */
288 if (dmaf_p_disabled_continue(flags
))
290 else if (dmaf_continue(flags
))
293 if (!(flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
294 descs
[0] = (struct ioat_raw_descriptor
*) pq
;
295 descs
[1] = (struct ioat_raw_descriptor
*) pq_ex
;
296 for (i
= 0; i
< src_cnt
; i
++) {
297 dma_addr_t src
= pq_get_src(descs
, i
);
299 ioat_unmap(pdev
, src
- offset
, len
,
300 PCI_DMA_TODEVICE
, flags
, 0);
303 /* the dests are sources in pq validate operations */
304 if (pq
->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
305 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
306 ioat_unmap(pdev
, pq
->p_addr
- offset
,
307 len
, PCI_DMA_TODEVICE
, flags
, 0);
308 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
309 ioat_unmap(pdev
, pq
->q_addr
- offset
,
310 len
, PCI_DMA_TODEVICE
, flags
, 0);
315 if (!(flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
316 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
317 ioat_unmap(pdev
, pq
->p_addr
- offset
, len
,
318 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
319 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
320 ioat_unmap(pdev
, pq
->q_addr
- offset
, len
,
321 PCI_DMA_BIDIRECTIONAL
, flags
, 1);
326 dev_err(&pdev
->dev
, "%s: unknown op type: %#x\n",
327 __func__
, desc
->hw
->ctl_f
.op
);
331 static bool desc_has_ext(struct ioat_ring_ent
*desc
)
333 struct ioat_dma_descriptor
*hw
= desc
->hw
;
335 if (hw
->ctl_f
.op
== IOAT_OP_XOR
||
336 hw
->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
337 struct ioat_xor_descriptor
*xor = desc
->xor;
339 if (src_cnt_to_sw(xor->ctl_f
.src_cnt
) > 5)
341 } else if (hw
->ctl_f
.op
== IOAT_OP_PQ
||
342 hw
->ctl_f
.op
== IOAT_OP_PQ_VAL
) {
343 struct ioat_pq_descriptor
*pq
= desc
->pq
;
345 if (src_cnt_to_sw(pq
->ctl_f
.src_cnt
) > 3)
352 static u64
ioat3_get_current_completion(struct ioat_chan_common
*chan
)
357 completion
= *chan
->completion
;
358 phys_complete
= ioat_chansts_to_addr(completion
);
360 dev_dbg(to_dev(chan
), "%s: phys_complete: %#llx\n", __func__
,
361 (unsigned long long) phys_complete
);
363 return phys_complete
;
366 static bool ioat3_cleanup_preamble(struct ioat_chan_common
*chan
,
369 *phys_complete
= ioat3_get_current_completion(chan
);
370 if (*phys_complete
== chan
->last_completion
)
373 clear_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
374 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
380 * __cleanup - reclaim used descriptors
381 * @ioat: channel (ring) to clean
383 * The difference from the dma_v2.c __cleanup() is that this routine
384 * handles extended descriptors and dma-unmapping raid operations.
386 static void __cleanup(struct ioat2_dma_chan
*ioat
, dma_addr_t phys_complete
)
388 struct ioat_chan_common
*chan
= &ioat
->base
;
389 struct ioat_ring_ent
*desc
;
390 bool seen_current
= false;
391 int idx
= ioat
->tail
, i
;
394 dev_dbg(to_dev(chan
), "%s: head: %#x tail: %#x issued: %#x\n",
395 __func__
, ioat
->head
, ioat
->tail
, ioat
->issued
);
398 * At restart of the channel, the completion address and the
399 * channel status will be 0 due to starting a new chain. Since
400 * it's new chain and the first descriptor "fails", there is
401 * nothing to clean up. We do not want to reap the entire submitted
402 * chain due to this 0 address value and then BUG.
407 active
= ioat2_ring_active(ioat
);
408 for (i
= 0; i
< active
&& !seen_current
; i
++) {
409 struct dma_async_tx_descriptor
*tx
;
411 smp_read_barrier_depends();
412 prefetch(ioat2_get_ring_ent(ioat
, idx
+ i
+ 1));
413 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
414 dump_desc_dbg(ioat
, desc
);
417 dma_cookie_complete(tx
);
418 ioat3_dma_unmap(ioat
, desc
, idx
+ i
);
420 tx
->callback(tx
->callback_param
);
425 if (tx
->phys
== phys_complete
)
428 /* skip extended descriptors */
429 if (desc_has_ext(desc
)) {
430 BUG_ON(i
+ 1 >= active
);
434 smp_mb(); /* finish all descriptor reads before incrementing tail */
435 ioat
->tail
= idx
+ i
;
436 BUG_ON(active
&& !seen_current
); /* no active descs have written a completion? */
437 chan
->last_completion
= phys_complete
;
439 if (active
- i
== 0) {
440 dev_dbg(to_dev(chan
), "%s: cancel completion timeout\n",
442 clear_bit(IOAT_COMPLETION_PENDING
, &chan
->state
);
443 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
445 /* 5 microsecond delay per pending descriptor */
446 writew(min((5 * (active
- i
)), IOAT_INTRDELAY_MASK
),
447 chan
->device
->reg_base
+ IOAT_INTRDELAY_OFFSET
);
450 static void ioat3_cleanup(struct ioat2_dma_chan
*ioat
)
452 struct ioat_chan_common
*chan
= &ioat
->base
;
455 spin_lock_bh(&chan
->cleanup_lock
);
457 if (ioat3_cleanup_preamble(chan
, &phys_complete
))
458 __cleanup(ioat
, phys_complete
);
460 if (is_ioat_halted(*chan
->completion
)) {
461 u32 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
463 if (chanerr
& IOAT_CHANERR_HANDLE_MASK
) {
464 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
469 spin_unlock_bh(&chan
->cleanup_lock
);
472 static void ioat3_cleanup_event(unsigned long data
)
474 struct ioat2_dma_chan
*ioat
= to_ioat2_chan((void *) data
);
477 writew(IOAT_CHANCTRL_RUN
, ioat
->base
.reg_base
+ IOAT_CHANCTRL_OFFSET
);
480 static void ioat3_restart_channel(struct ioat2_dma_chan
*ioat
)
482 struct ioat_chan_common
*chan
= &ioat
->base
;
485 ioat2_quiesce(chan
, 0);
486 if (ioat3_cleanup_preamble(chan
, &phys_complete
))
487 __cleanup(ioat
, phys_complete
);
489 __ioat2_restart_chan(ioat
);
492 static void ioat3_eh(struct ioat2_dma_chan
*ioat
)
494 struct ioat_chan_common
*chan
= &ioat
->base
;
495 struct pci_dev
*pdev
= to_pdev(chan
);
496 struct ioat_dma_descriptor
*hw
;
498 struct ioat_ring_ent
*desc
;
503 /* cleanup so tail points to descriptor that caused the error */
504 if (ioat3_cleanup_preamble(chan
, &phys_complete
))
505 __cleanup(ioat
, phys_complete
);
507 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
508 pci_read_config_dword(pdev
, IOAT_PCI_CHANERR_INT_OFFSET
, &chanerr_int
);
510 dev_dbg(to_dev(chan
), "%s: error = %x:%x\n",
511 __func__
, chanerr
, chanerr_int
);
513 desc
= ioat2_get_ring_ent(ioat
, ioat
->tail
);
515 dump_desc_dbg(ioat
, desc
);
517 switch (hw
->ctl_f
.op
) {
518 case IOAT_OP_XOR_VAL
:
519 if (chanerr
& IOAT_CHANERR_XOR_P_OR_CRC_ERR
) {
520 *desc
->result
|= SUM_CHECK_P_RESULT
;
521 err_handled
|= IOAT_CHANERR_XOR_P_OR_CRC_ERR
;
525 if (chanerr
& IOAT_CHANERR_XOR_P_OR_CRC_ERR
) {
526 *desc
->result
|= SUM_CHECK_P_RESULT
;
527 err_handled
|= IOAT_CHANERR_XOR_P_OR_CRC_ERR
;
529 if (chanerr
& IOAT_CHANERR_XOR_Q_ERR
) {
530 *desc
->result
|= SUM_CHECK_Q_RESULT
;
531 err_handled
|= IOAT_CHANERR_XOR_Q_ERR
;
536 /* fault on unhandled error or spurious halt */
537 if (chanerr
^ err_handled
|| chanerr
== 0) {
538 dev_err(to_dev(chan
), "%s: fatal error (%x:%x)\n",
539 __func__
, chanerr
, err_handled
);
543 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
544 pci_write_config_dword(pdev
, IOAT_PCI_CHANERR_INT_OFFSET
, chanerr_int
);
546 /* mark faulting descriptor as complete */
547 *chan
->completion
= desc
->txd
.phys
;
549 spin_lock_bh(&ioat
->prep_lock
);
550 ioat3_restart_channel(ioat
);
551 spin_unlock_bh(&ioat
->prep_lock
);
554 static void check_active(struct ioat2_dma_chan
*ioat
)
556 struct ioat_chan_common
*chan
= &ioat
->base
;
558 if (ioat2_ring_active(ioat
)) {
559 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
563 if (test_and_clear_bit(IOAT_CHAN_ACTIVE
, &chan
->state
))
564 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
565 else if (ioat
->alloc_order
> ioat_get_alloc_order()) {
566 /* if the ring is idle, empty, and oversized try to step
569 reshape_ring(ioat
, ioat
->alloc_order
- 1);
571 /* keep shrinking until we get back to our minimum
574 if (ioat
->alloc_order
> ioat_get_alloc_order())
575 mod_timer(&chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
580 static void ioat3_timer_event(unsigned long data
)
582 struct ioat2_dma_chan
*ioat
= to_ioat2_chan((void *) data
);
583 struct ioat_chan_common
*chan
= &ioat
->base
;
584 dma_addr_t phys_complete
;
587 status
= ioat_chansts(chan
);
589 /* when halted due to errors check for channel
590 * programming errors before advancing the completion state
592 if (is_ioat_halted(status
)) {
595 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
596 dev_err(to_dev(chan
), "%s: Channel halted (%x)\n",
598 if (test_bit(IOAT_RUN
, &chan
->state
))
599 BUG_ON(is_ioat_bug(chanerr
));
600 else /* we never got off the ground */
604 /* if we haven't made progress and we have already
605 * acknowledged a pending completion once, then be more
606 * forceful with a restart
608 spin_lock_bh(&chan
->cleanup_lock
);
609 if (ioat_cleanup_preamble(chan
, &phys_complete
))
610 __cleanup(ioat
, phys_complete
);
611 else if (test_bit(IOAT_COMPLETION_ACK
, &chan
->state
)) {
612 spin_lock_bh(&ioat
->prep_lock
);
613 ioat3_restart_channel(ioat
);
614 spin_unlock_bh(&ioat
->prep_lock
);
615 spin_unlock_bh(&chan
->cleanup_lock
);
618 set_bit(IOAT_COMPLETION_ACK
, &chan
->state
);
619 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
623 if (ioat2_ring_active(ioat
))
624 mod_timer(&chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
626 spin_lock_bh(&ioat
->prep_lock
);
628 spin_unlock_bh(&ioat
->prep_lock
);
630 spin_unlock_bh(&chan
->cleanup_lock
);
633 static enum dma_status
634 ioat3_tx_status(struct dma_chan
*c
, dma_cookie_t cookie
,
635 struct dma_tx_state
*txstate
)
637 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
640 ret
= dma_cookie_status(c
, cookie
, txstate
);
641 if (ret
== DMA_SUCCESS
)
646 return dma_cookie_status(c
, cookie
, txstate
);
649 static struct dma_async_tx_descriptor
*
650 ioat3_prep_memset_lock(struct dma_chan
*c
, dma_addr_t dest
, int value
,
651 size_t len
, unsigned long flags
)
653 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
654 struct ioat_ring_ent
*desc
;
655 size_t total_len
= len
;
656 struct ioat_fill_descriptor
*fill
;
657 u64 src_data
= (0x0101010101010101ULL
) * (value
& 0xff);
658 int num_descs
, idx
, i
;
660 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
661 if (likely(num_descs
) && ioat2_check_space_lock(ioat
, num_descs
) == 0)
667 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
669 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
672 fill
->size
= xfer_size
;
673 fill
->src_data
= src_data
;
674 fill
->dst_addr
= dest
;
676 fill
->ctl_f
.op
= IOAT_OP_FILL
;
680 dump_desc_dbg(ioat
, desc
);
681 } while (++i
< num_descs
);
683 desc
->txd
.flags
= flags
;
684 desc
->len
= total_len
;
685 fill
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
686 fill
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
687 fill
->ctl_f
.compl_write
= 1;
688 dump_desc_dbg(ioat
, desc
);
690 /* we leave the channel locked to ensure in order submission */
694 static struct dma_async_tx_descriptor
*
695 __ioat3_prep_xor_lock(struct dma_chan
*c
, enum sum_check_flags
*result
,
696 dma_addr_t dest
, dma_addr_t
*src
, unsigned int src_cnt
,
697 size_t len
, unsigned long flags
)
699 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
700 struct ioat_ring_ent
*compl_desc
;
701 struct ioat_ring_ent
*desc
;
702 struct ioat_ring_ent
*ext
;
703 size_t total_len
= len
;
704 struct ioat_xor_descriptor
*xor;
705 struct ioat_xor_ext_descriptor
*xor_ex
= NULL
;
706 struct ioat_dma_descriptor
*hw
;
707 int num_descs
, with_ext
, idx
, i
;
709 u8 op
= result
? IOAT_OP_XOR_VAL
: IOAT_OP_XOR
;
713 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
714 /* we need 2x the number of descriptors to cover greater than 5
723 /* completion writes from the raid engine may pass completion
724 * writes from the legacy engine, so we need one extra null
725 * (legacy) descriptor to ensure all completion writes arrive in
728 if (likely(num_descs
) && ioat2_check_space_lock(ioat
, num_descs
+1) == 0)
734 struct ioat_raw_descriptor
*descs
[2];
735 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
738 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
741 /* save a branch by unconditionally retrieving the
742 * extended descriptor xor_set_src() knows to not write
743 * to it in the single descriptor case
745 ext
= ioat2_get_ring_ent(ioat
, idx
+ i
+ 1);
746 xor_ex
= ext
->xor_ex
;
748 descs
[0] = (struct ioat_raw_descriptor
*) xor;
749 descs
[1] = (struct ioat_raw_descriptor
*) xor_ex
;
750 for (s
= 0; s
< src_cnt
; s
++)
751 xor_set_src(descs
, src
[s
], offset
, s
);
752 xor->size
= xfer_size
;
753 xor->dst_addr
= dest
+ offset
;
756 xor->ctl_f
.src_cnt
= src_cnt_to_hw(src_cnt
);
760 dump_desc_dbg(ioat
, desc
);
761 } while ((i
+= 1 + with_ext
) < num_descs
);
763 /* last xor descriptor carries the unmap parameters and fence bit */
764 desc
->txd
.flags
= flags
;
765 desc
->len
= total_len
;
767 desc
->result
= result
;
768 xor->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
770 /* completion descriptor carries interrupt bit */
771 compl_desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
772 compl_desc
->txd
.flags
= flags
& DMA_PREP_INTERRUPT
;
776 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
777 hw
->ctl_f
.compl_write
= 1;
778 hw
->size
= NULL_DESC_BUFFER_SIZE
;
779 dump_desc_dbg(ioat
, compl_desc
);
781 /* we leave the channel locked to ensure in order submission */
782 return &compl_desc
->txd
;
785 static struct dma_async_tx_descriptor
*
786 ioat3_prep_xor(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t
*src
,
787 unsigned int src_cnt
, size_t len
, unsigned long flags
)
789 return __ioat3_prep_xor_lock(chan
, NULL
, dest
, src
, src_cnt
, len
, flags
);
792 struct dma_async_tx_descriptor
*
793 ioat3_prep_xor_val(struct dma_chan
*chan
, dma_addr_t
*src
,
794 unsigned int src_cnt
, size_t len
,
795 enum sum_check_flags
*result
, unsigned long flags
)
797 /* the cleanup routine only sets bits on validate failure, it
798 * does not clear bits on validate success... so clear it here
802 return __ioat3_prep_xor_lock(chan
, result
, src
[0], &src
[1],
803 src_cnt
- 1, len
, flags
);
807 dump_pq_desc_dbg(struct ioat2_dma_chan
*ioat
, struct ioat_ring_ent
*desc
, struct ioat_ring_ent
*ext
)
809 struct device
*dev
= to_dev(&ioat
->base
);
810 struct ioat_pq_descriptor
*pq
= desc
->pq
;
811 struct ioat_pq_ext_descriptor
*pq_ex
= ext
? ext
->pq_ex
: NULL
;
812 struct ioat_raw_descriptor
*descs
[] = { (void *) pq
, (void *) pq_ex
};
813 int src_cnt
= src_cnt_to_sw(pq
->ctl_f
.src_cnt
);
816 dev_dbg(dev
, "desc[%d]: (%#llx->%#llx) flags: %#x"
817 " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s' src_cnt: %d)\n",
818 desc_id(desc
), (unsigned long long) desc
->txd
.phys
,
819 (unsigned long long) (pq_ex
? pq_ex
->next
: pq
->next
),
820 desc
->txd
.flags
, pq
->size
, pq
->ctl
, pq
->ctl_f
.op
, pq
->ctl_f
.int_en
,
821 pq
->ctl_f
.compl_write
,
822 pq
->ctl_f
.p_disable
? "" : "p", pq
->ctl_f
.q_disable
? "" : "q",
824 for (i
= 0; i
< src_cnt
; i
++)
825 dev_dbg(dev
, "\tsrc[%d]: %#llx coef: %#x\n", i
,
826 (unsigned long long) pq_get_src(descs
, i
), pq
->coef
[i
]);
827 dev_dbg(dev
, "\tP: %#llx\n", pq
->p_addr
);
828 dev_dbg(dev
, "\tQ: %#llx\n", pq
->q_addr
);
829 dev_dbg(dev
, "\tNEXT: %#llx\n", pq
->next
);
832 static struct dma_async_tx_descriptor
*
833 __ioat3_prep_pq_lock(struct dma_chan
*c
, enum sum_check_flags
*result
,
834 const dma_addr_t
*dst
, const dma_addr_t
*src
,
835 unsigned int src_cnt
, const unsigned char *scf
,
836 size_t len
, unsigned long flags
)
838 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
839 struct ioat_chan_common
*chan
= &ioat
->base
;
840 struct ioatdma_device
*device
= chan
->device
;
841 struct ioat_ring_ent
*compl_desc
;
842 struct ioat_ring_ent
*desc
;
843 struct ioat_ring_ent
*ext
;
844 size_t total_len
= len
;
845 struct ioat_pq_descriptor
*pq
;
846 struct ioat_pq_ext_descriptor
*pq_ex
= NULL
;
847 struct ioat_dma_descriptor
*hw
;
849 u8 op
= result
? IOAT_OP_PQ_VAL
: IOAT_OP_PQ
;
850 int i
, s
, idx
, with_ext
, num_descs
;
851 int cb32
= (device
->version
< IOAT_VER_3_3
) ? 1 : 0;
853 dev_dbg(to_dev(chan
), "%s\n", __func__
);
854 /* the engine requires at least two sources (we provide
855 * at least 1 implied source in the DMA_PREP_CONTINUE case)
857 BUG_ON(src_cnt
+ dmaf_continue(flags
) < 2);
859 num_descs
= ioat2_xferlen_to_descs(ioat
, len
);
860 /* we need 2x the number of descriptors to cover greater than 3
861 * sources (we need 1 extra source in the q-only continuation
862 * case and 3 extra sources in the p+q continuation case.
864 if (src_cnt
+ dmaf_p_disabled_continue(flags
) > 3 ||
865 (dmaf_continue(flags
) && !dmaf_p_disabled_continue(flags
))) {
871 /* completion writes from the raid engine may pass completion
872 * writes from the legacy engine, so we need one extra null
873 * (legacy) descriptor to ensure all completion writes arrive in
876 if (likely(num_descs
) &&
877 ioat2_check_space_lock(ioat
, num_descs
+ cb32
) == 0)
883 struct ioat_raw_descriptor
*descs
[2];
884 size_t xfer_size
= min_t(size_t, len
, 1 << ioat
->xfercap_log
);
886 desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
889 /* save a branch by unconditionally retrieving the
890 * extended descriptor pq_set_src() knows to not write
891 * to it in the single descriptor case
893 ext
= ioat2_get_ring_ent(ioat
, idx
+ i
+ with_ext
);
896 descs
[0] = (struct ioat_raw_descriptor
*) pq
;
897 descs
[1] = (struct ioat_raw_descriptor
*) pq_ex
;
899 for (s
= 0; s
< src_cnt
; s
++)
900 pq_set_src(descs
, src
[s
], offset
, scf
[s
], s
);
902 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
903 if (dmaf_p_disabled_continue(flags
))
904 pq_set_src(descs
, dst
[1], offset
, 1, s
++);
905 else if (dmaf_continue(flags
)) {
906 pq_set_src(descs
, dst
[0], offset
, 0, s
++);
907 pq_set_src(descs
, dst
[1], offset
, 1, s
++);
908 pq_set_src(descs
, dst
[1], offset
, 0, s
++);
910 pq
->size
= xfer_size
;
911 pq
->p_addr
= dst
[0] + offset
;
912 pq
->q_addr
= dst
[1] + offset
;
915 pq
->ctl_f
.src_cnt
= src_cnt_to_hw(s
);
916 pq
->ctl_f
.p_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_P
);
917 pq
->ctl_f
.q_disable
= !!(flags
& DMA_PREP_PQ_DISABLE_Q
);
921 } while ((i
+= 1 + with_ext
) < num_descs
);
923 /* last pq descriptor carries the unmap parameters and fence bit */
924 desc
->txd
.flags
= flags
;
925 desc
->len
= total_len
;
927 desc
->result
= result
;
928 pq
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
929 dump_pq_desc_dbg(ioat
, desc
, ext
);
932 pq
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
933 pq
->ctl_f
.compl_write
= 1;
936 /* completion descriptor carries interrupt bit */
937 compl_desc
= ioat2_get_ring_ent(ioat
, idx
+ i
);
938 compl_desc
->txd
.flags
= flags
& DMA_PREP_INTERRUPT
;
942 hw
->ctl_f
.int_en
= !!(flags
& DMA_PREP_INTERRUPT
);
943 hw
->ctl_f
.compl_write
= 1;
944 hw
->size
= NULL_DESC_BUFFER_SIZE
;
945 dump_desc_dbg(ioat
, compl_desc
);
949 /* we leave the channel locked to ensure in order submission */
950 return &compl_desc
->txd
;
953 static struct dma_async_tx_descriptor
*
954 ioat3_prep_pq(struct dma_chan
*chan
, dma_addr_t
*dst
, dma_addr_t
*src
,
955 unsigned int src_cnt
, const unsigned char *scf
, size_t len
,
958 /* specify valid address for disabled result */
959 if (flags
& DMA_PREP_PQ_DISABLE_P
)
961 if (flags
& DMA_PREP_PQ_DISABLE_Q
)
964 /* handle the single source multiply case from the raid6
967 if ((flags
& DMA_PREP_PQ_DISABLE_P
) && src_cnt
== 1) {
968 dma_addr_t single_source
[2];
969 unsigned char single_source_coef
[2];
971 BUG_ON(flags
& DMA_PREP_PQ_DISABLE_Q
);
972 single_source
[0] = src
[0];
973 single_source
[1] = src
[0];
974 single_source_coef
[0] = scf
[0];
975 single_source_coef
[1] = 0;
977 return __ioat3_prep_pq_lock(chan
, NULL
, dst
, single_source
, 2,
978 single_source_coef
, len
, flags
);
980 return __ioat3_prep_pq_lock(chan
, NULL
, dst
, src
, src_cnt
, scf
,
984 struct dma_async_tx_descriptor
*
985 ioat3_prep_pq_val(struct dma_chan
*chan
, dma_addr_t
*pq
, dma_addr_t
*src
,
986 unsigned int src_cnt
, const unsigned char *scf
, size_t len
,
987 enum sum_check_flags
*pqres
, unsigned long flags
)
989 /* specify valid address for disabled result */
990 if (flags
& DMA_PREP_PQ_DISABLE_P
)
992 if (flags
& DMA_PREP_PQ_DISABLE_Q
)
995 /* the cleanup routine only sets bits on validate failure, it
996 * does not clear bits on validate success... so clear it here
1000 return __ioat3_prep_pq_lock(chan
, pqres
, pq
, src
, src_cnt
, scf
, len
,
1004 static struct dma_async_tx_descriptor
*
1005 ioat3_prep_pqxor(struct dma_chan
*chan
, dma_addr_t dst
, dma_addr_t
*src
,
1006 unsigned int src_cnt
, size_t len
, unsigned long flags
)
1008 unsigned char scf
[src_cnt
];
1011 memset(scf
, 0, src_cnt
);
1013 flags
|= DMA_PREP_PQ_DISABLE_Q
;
1014 pq
[1] = dst
; /* specify valid address for disabled result */
1016 return __ioat3_prep_pq_lock(chan
, NULL
, pq
, src
, src_cnt
, scf
, len
,
1020 struct dma_async_tx_descriptor
*
1021 ioat3_prep_pqxor_val(struct dma_chan
*chan
, dma_addr_t
*src
,
1022 unsigned int src_cnt
, size_t len
,
1023 enum sum_check_flags
*result
, unsigned long flags
)
1025 unsigned char scf
[src_cnt
];
1028 /* the cleanup routine only sets bits on validate failure, it
1029 * does not clear bits on validate success... so clear it here
1033 memset(scf
, 0, src_cnt
);
1035 flags
|= DMA_PREP_PQ_DISABLE_Q
;
1036 pq
[1] = pq
[0]; /* specify valid address for disabled result */
1038 return __ioat3_prep_pq_lock(chan
, result
, pq
, &src
[1], src_cnt
- 1, scf
,
1042 static struct dma_async_tx_descriptor
*
1043 ioat3_prep_interrupt_lock(struct dma_chan
*c
, unsigned long flags
)
1045 struct ioat2_dma_chan
*ioat
= to_ioat2_chan(c
);
1046 struct ioat_ring_ent
*desc
;
1047 struct ioat_dma_descriptor
*hw
;
1049 if (ioat2_check_space_lock(ioat
, 1) == 0)
1050 desc
= ioat2_get_ring_ent(ioat
, ioat
->head
);
1057 hw
->ctl_f
.int_en
= 1;
1058 hw
->ctl_f
.fence
= !!(flags
& DMA_PREP_FENCE
);
1059 hw
->ctl_f
.compl_write
= 1;
1060 hw
->size
= NULL_DESC_BUFFER_SIZE
;
1064 desc
->txd
.flags
= flags
;
1067 dump_desc_dbg(ioat
, desc
);
1069 /* we leave the channel locked to ensure in order submission */
1073 static void ioat3_dma_test_callback(void *dma_async_param
)
1075 struct completion
*cmp
= dma_async_param
;
1080 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
1081 static int ioat_xor_val_self_test(struct ioatdma_device
*device
)
1085 struct page
*xor_srcs
[IOAT_NUM_SRC_TEST
];
1086 struct page
*xor_val_srcs
[IOAT_NUM_SRC_TEST
+ 1];
1087 dma_addr_t dma_srcs
[IOAT_NUM_SRC_TEST
+ 1];
1088 dma_addr_t dma_addr
, dest_dma
;
1089 struct dma_async_tx_descriptor
*tx
;
1090 struct dma_chan
*dma_chan
;
1091 dma_cookie_t cookie
;
1096 struct completion cmp
;
1098 struct device
*dev
= &device
->pdev
->dev
;
1099 struct dma_device
*dma
= &device
->common
;
1102 dev_dbg(dev
, "%s\n", __func__
);
1104 if (!dma_has_cap(DMA_XOR
, dma
->cap_mask
))
1107 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++) {
1108 xor_srcs
[src_idx
] = alloc_page(GFP_KERNEL
);
1109 if (!xor_srcs
[src_idx
]) {
1111 __free_page(xor_srcs
[src_idx
]);
1116 dest
= alloc_page(GFP_KERNEL
);
1119 __free_page(xor_srcs
[src_idx
]);
1123 /* Fill in src buffers */
1124 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++) {
1125 u8
*ptr
= page_address(xor_srcs
[src_idx
]);
1126 for (i
= 0; i
< PAGE_SIZE
; i
++)
1127 ptr
[i
] = (1 << src_idx
);
1130 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++)
1131 cmp_byte
^= (u8
) (1 << src_idx
);
1133 cmp_word
= (cmp_byte
<< 24) | (cmp_byte
<< 16) |
1134 (cmp_byte
<< 8) | cmp_byte
;
1136 memset(page_address(dest
), 0, PAGE_SIZE
);
1138 dma_chan
= container_of(dma
->channels
.next
, struct dma_chan
,
1140 if (dma
->device_alloc_chan_resources(dma_chan
) < 1) {
1148 dest_dma
= dma_map_page(dev
, dest
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
1149 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1150 dma_srcs
[i
] = dma_map_page(dev
, xor_srcs
[i
], 0, PAGE_SIZE
,
1152 tx
= dma
->device_prep_dma_xor(dma_chan
, dest_dma
, dma_srcs
,
1153 IOAT_NUM_SRC_TEST
, PAGE_SIZE
,
1154 DMA_PREP_INTERRUPT
|
1155 DMA_COMPL_SKIP_SRC_UNMAP
|
1156 DMA_COMPL_SKIP_DEST_UNMAP
);
1159 dev_err(dev
, "Self-test xor prep failed\n");
1165 init_completion(&cmp
);
1166 tx
->callback
= ioat3_dma_test_callback
;
1167 tx
->callback_param
= &cmp
;
1168 cookie
= tx
->tx_submit(tx
);
1170 dev_err(dev
, "Self-test xor setup failed\n");
1174 dma
->device_issue_pending(dma_chan
);
1176 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1178 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1179 dev_err(dev
, "Self-test xor timed out\n");
1184 dma_unmap_page(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1185 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1186 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
1188 dma_sync_single_for_cpu(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1189 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(u32
)); i
++) {
1190 u32
*ptr
= page_address(dest
);
1191 if (ptr
[i
] != cmp_word
) {
1192 dev_err(dev
, "Self-test xor failed compare\n");
1194 goto free_resources
;
1197 dma_sync_single_for_device(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1199 /* skip validate if the capability is not present */
1200 if (!dma_has_cap(DMA_XOR_VAL
, dma_chan
->device
->cap_mask
))
1201 goto free_resources
;
1203 op
= IOAT_OP_XOR_VAL
;
1205 /* validate the sources with the destintation page */
1206 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1207 xor_val_srcs
[i
] = xor_srcs
[i
];
1208 xor_val_srcs
[i
] = dest
;
1212 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1213 dma_srcs
[i
] = dma_map_page(dev
, xor_val_srcs
[i
], 0, PAGE_SIZE
,
1215 tx
= dma
->device_prep_dma_xor_val(dma_chan
, dma_srcs
,
1216 IOAT_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1217 &xor_val_result
, DMA_PREP_INTERRUPT
|
1218 DMA_COMPL_SKIP_SRC_UNMAP
|
1219 DMA_COMPL_SKIP_DEST_UNMAP
);
1221 dev_err(dev
, "Self-test zero prep failed\n");
1227 init_completion(&cmp
);
1228 tx
->callback
= ioat3_dma_test_callback
;
1229 tx
->callback_param
= &cmp
;
1230 cookie
= tx
->tx_submit(tx
);
1232 dev_err(dev
, "Self-test zero setup failed\n");
1236 dma
->device_issue_pending(dma_chan
);
1238 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1240 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1241 dev_err(dev
, "Self-test validate timed out\n");
1246 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1247 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
1249 if (xor_val_result
!= 0) {
1250 dev_err(dev
, "Self-test validate failed compare\n");
1252 goto free_resources
;
1255 /* skip memset if the capability is not present */
1256 if (!dma_has_cap(DMA_MEMSET
, dma_chan
->device
->cap_mask
))
1257 goto free_resources
;
1262 dma_addr
= dma_map_page(dev
, dest
, 0,
1263 PAGE_SIZE
, DMA_FROM_DEVICE
);
1264 tx
= dma
->device_prep_dma_memset(dma_chan
, dma_addr
, 0, PAGE_SIZE
,
1265 DMA_PREP_INTERRUPT
|
1266 DMA_COMPL_SKIP_SRC_UNMAP
|
1267 DMA_COMPL_SKIP_DEST_UNMAP
);
1269 dev_err(dev
, "Self-test memset prep failed\n");
1275 init_completion(&cmp
);
1276 tx
->callback
= ioat3_dma_test_callback
;
1277 tx
->callback_param
= &cmp
;
1278 cookie
= tx
->tx_submit(tx
);
1280 dev_err(dev
, "Self-test memset setup failed\n");
1284 dma
->device_issue_pending(dma_chan
);
1286 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1288 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1289 dev_err(dev
, "Self-test memset timed out\n");
1294 dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1296 for (i
= 0; i
< PAGE_SIZE
/sizeof(u32
); i
++) {
1297 u32
*ptr
= page_address(dest
);
1299 dev_err(dev
, "Self-test memset failed compare\n");
1301 goto free_resources
;
1305 /* test for non-zero parity sum */
1306 op
= IOAT_OP_XOR_VAL
;
1309 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1310 dma_srcs
[i
] = dma_map_page(dev
, xor_val_srcs
[i
], 0, PAGE_SIZE
,
1312 tx
= dma
->device_prep_dma_xor_val(dma_chan
, dma_srcs
,
1313 IOAT_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
1314 &xor_val_result
, DMA_PREP_INTERRUPT
|
1315 DMA_COMPL_SKIP_SRC_UNMAP
|
1316 DMA_COMPL_SKIP_DEST_UNMAP
);
1318 dev_err(dev
, "Self-test 2nd zero prep failed\n");
1324 init_completion(&cmp
);
1325 tx
->callback
= ioat3_dma_test_callback
;
1326 tx
->callback_param
= &cmp
;
1327 cookie
= tx
->tx_submit(tx
);
1329 dev_err(dev
, "Self-test 2nd zero setup failed\n");
1333 dma
->device_issue_pending(dma_chan
);
1335 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
1337 if (dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_SUCCESS
) {
1338 dev_err(dev
, "Self-test 2nd validate timed out\n");
1343 if (xor_val_result
!= SUM_CHECK_P_RESULT
) {
1344 dev_err(dev
, "Self-test validate failed compare\n");
1349 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1350 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
1352 goto free_resources
;
1354 if (op
== IOAT_OP_XOR
) {
1355 dma_unmap_page(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1356 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1357 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
,
1359 } else if (op
== IOAT_OP_XOR_VAL
) {
1360 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1361 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
,
1363 } else if (op
== IOAT_OP_FILL
)
1364 dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
, DMA_FROM_DEVICE
);
1366 dma
->device_free_chan_resources(dma_chan
);
1368 src_idx
= IOAT_NUM_SRC_TEST
;
1370 __free_page(xor_srcs
[src_idx
]);
1375 static int ioat3_dma_self_test(struct ioatdma_device
*device
)
1377 int rc
= ioat_dma_self_test(device
);
1382 rc
= ioat_xor_val_self_test(device
);
1389 static int ioat3_irq_reinit(struct ioatdma_device
*device
)
1391 int msixcnt
= device
->common
.chancnt
;
1392 struct pci_dev
*pdev
= device
->pdev
;
1394 struct msix_entry
*msix
;
1395 struct ioat_chan_common
*chan
;
1398 switch (device
->irq_mode
) {
1401 for (i
= 0; i
< msixcnt
; i
++) {
1402 msix
= &device
->msix_entries
[i
];
1403 chan
= ioat_chan_by_index(device
, i
);
1404 devm_free_irq(&pdev
->dev
, msix
->vector
, chan
);
1407 pci_disable_msix(pdev
);
1410 case IOAT_MSIX_SINGLE
:
1411 msix
= &device
->msix_entries
[0];
1412 chan
= ioat_chan_by_index(device
, 0);
1413 devm_free_irq(&pdev
->dev
, msix
->vector
, chan
);
1414 pci_disable_msix(pdev
);
1418 chan
= ioat_chan_by_index(device
, 0);
1419 devm_free_irq(&pdev
->dev
, pdev
->irq
, chan
);
1420 pci_disable_msi(pdev
);
1424 chan
= ioat_chan_by_index(device
, 0);
1425 devm_free_irq(&pdev
->dev
, pdev
->irq
, chan
);
1432 device
->irq_mode
= IOAT_NOIRQ
;
1434 err
= ioat_dma_setup_interrupts(device
);
1439 static int ioat3_reset_hw(struct ioat_chan_common
*chan
)
1441 /* throw away whatever the channel was doing and get it
1442 * initialized, with ioat3 specific workarounds
1444 struct ioatdma_device
*device
= chan
->device
;
1445 struct pci_dev
*pdev
= device
->pdev
;
1450 ioat2_quiesce(chan
, msecs_to_jiffies(100));
1452 chanerr
= readl(chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
1453 writel(chanerr
, chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
1455 if (device
->version
< IOAT_VER_3_3
) {
1456 /* clear any pending errors */
1457 err
= pci_read_config_dword(pdev
,
1458 IOAT_PCI_CHANERR_INT_OFFSET
, &chanerr
);
1461 "channel error register unreachable\n");
1464 pci_write_config_dword(pdev
,
1465 IOAT_PCI_CHANERR_INT_OFFSET
, chanerr
);
1467 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1468 * (workaround for spurious config parity error after restart)
1470 pci_read_config_word(pdev
, IOAT_PCI_DEVICE_ID_OFFSET
, &dev_id
);
1471 if (dev_id
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
) {
1472 pci_write_config_dword(pdev
,
1473 IOAT_PCI_DMAUNCERRSTS_OFFSET
,
1478 err
= ioat2_reset_sync(chan
, msecs_to_jiffies(200));
1480 dev_err(&pdev
->dev
, "Failed to reset!\n");
1484 if (device
->irq_mode
!= IOAT_NOIRQ
&& is_bwd_ioat(pdev
))
1485 err
= ioat3_irq_reinit(device
);
1490 int ioat3_dma_probe(struct ioatdma_device
*device
, int dca
)
1492 struct pci_dev
*pdev
= device
->pdev
;
1493 int dca_en
= system_has_dca_enabled(pdev
);
1494 struct dma_device
*dma
;
1496 struct ioat_chan_common
*chan
;
1497 bool is_raid_device
= false;
1501 device
->enumerate_channels
= ioat2_enumerate_channels
;
1502 device
->reset_hw
= ioat3_reset_hw
;
1503 device
->self_test
= ioat3_dma_self_test
;
1504 dma
= &device
->common
;
1505 dma
->device_prep_dma_memcpy
= ioat2_dma_prep_memcpy_lock
;
1506 dma
->device_issue_pending
= ioat2_issue_pending
;
1507 dma
->device_alloc_chan_resources
= ioat2_alloc_chan_resources
;
1508 dma
->device_free_chan_resources
= ioat2_free_chan_resources
;
1510 if (is_xeon_cb32(pdev
))
1511 dma
->copy_align
= 6;
1513 dma_cap_set(DMA_INTERRUPT
, dma
->cap_mask
);
1514 dma
->device_prep_dma_interrupt
= ioat3_prep_interrupt_lock
;
1516 cap
= readl(device
->reg_base
+ IOAT_DMA_CAP_OFFSET
);
1518 /* dca is incompatible with raid operations */
1519 if (dca_en
&& (cap
& (IOAT_CAP_XOR
|IOAT_CAP_PQ
)))
1520 cap
&= ~(IOAT_CAP_XOR
|IOAT_CAP_PQ
);
1522 if (cap
& IOAT_CAP_XOR
) {
1523 is_raid_device
= true;
1527 dma_cap_set(DMA_XOR
, dma
->cap_mask
);
1528 dma
->device_prep_dma_xor
= ioat3_prep_xor
;
1530 dma_cap_set(DMA_XOR_VAL
, dma
->cap_mask
);
1531 dma
->device_prep_dma_xor_val
= ioat3_prep_xor_val
;
1534 if (cap
& IOAT_CAP_PQ
) {
1535 is_raid_device
= true;
1536 dma_set_maxpq(dma
, 8, 0);
1537 if (is_xeon_cb32(pdev
))
1542 dma_cap_set(DMA_PQ
, dma
->cap_mask
);
1543 dma
->device_prep_dma_pq
= ioat3_prep_pq
;
1545 dma_cap_set(DMA_PQ_VAL
, dma
->cap_mask
);
1546 dma
->device_prep_dma_pq_val
= ioat3_prep_pq_val
;
1548 if (!(cap
& IOAT_CAP_XOR
)) {
1550 if (is_xeon_cb32(pdev
))
1555 dma_cap_set(DMA_XOR
, dma
->cap_mask
);
1556 dma
->device_prep_dma_xor
= ioat3_prep_pqxor
;
1558 dma_cap_set(DMA_XOR_VAL
, dma
->cap_mask
);
1559 dma
->device_prep_dma_xor_val
= ioat3_prep_pqxor_val
;
1563 if (is_raid_device
&& (cap
& IOAT_CAP_FILL_BLOCK
)) {
1564 dma_cap_set(DMA_MEMSET
, dma
->cap_mask
);
1565 dma
->device_prep_dma_memset
= ioat3_prep_memset_lock
;
1569 dma
->device_tx_status
= ioat3_tx_status
;
1570 device
->cleanup_fn
= ioat3_cleanup_event
;
1571 device
->timer_fn
= ioat3_timer_event
;
1573 if (is_xeon_cb32(pdev
)) {
1574 dma_cap_clear(DMA_XOR_VAL
, dma
->cap_mask
);
1575 dma
->device_prep_dma_xor_val
= NULL
;
1577 dma_cap_clear(DMA_PQ_VAL
, dma
->cap_mask
);
1578 dma
->device_prep_dma_pq_val
= NULL
;
1581 err
= ioat_probe(device
);
1584 ioat_set_tcp_copy_break(262144);
1586 list_for_each_entry(c
, &dma
->channels
, device_node
) {
1587 chan
= to_chan_common(c
);
1588 writel(IOAT_DMA_DCA_ANY_CPU
,
1589 chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
1592 err
= ioat_register(device
);
1596 ioat_kobject_add(device
, &ioat2_ktype
);
1599 device
->dca
= ioat3_dca_init(pdev
, device
->reg_base
);