2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
20 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/interrupt.h>
29 #include <linux/dmaengine.h>
30 #include <linux/delay.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/workqueue.h>
33 #include <linux/prefetch.h>
34 #include <linux/sizes.h>
36 #include "registers.h"
39 #include "../dmaengine.h"
41 static void ioat_eh(struct ioatdma_chan
*ioat_chan
);
44 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
46 * @data: interrupt data
48 irqreturn_t
ioat_dma_do_interrupt(int irq
, void *data
)
50 struct ioatdma_device
*instance
= data
;
51 struct ioatdma_chan
*ioat_chan
;
52 unsigned long attnstatus
;
56 intrctrl
= readb(instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
58 if (!(intrctrl
& IOAT_INTRCTRL_MASTER_INT_EN
))
61 if (!(intrctrl
& IOAT_INTRCTRL_INT_STATUS
)) {
62 writeb(intrctrl
, instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
66 attnstatus
= readl(instance
->reg_base
+ IOAT_ATTNSTATUS_OFFSET
);
67 for_each_set_bit(bit
, &attnstatus
, BITS_PER_LONG
) {
68 ioat_chan
= ioat_chan_by_index(instance
, bit
);
69 if (test_bit(IOAT_RUN
, &ioat_chan
->state
))
70 tasklet_schedule(&ioat_chan
->cleanup_task
);
73 writeb(intrctrl
, instance
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
78 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
80 * @data: interrupt data
82 irqreturn_t
ioat_dma_do_interrupt_msix(int irq
, void *data
)
84 struct ioatdma_chan
*ioat_chan
= data
;
86 if (test_bit(IOAT_RUN
, &ioat_chan
->state
))
87 tasklet_schedule(&ioat_chan
->cleanup_task
);
92 void ioat_stop(struct ioatdma_chan
*ioat_chan
)
94 struct ioatdma_device
*ioat_dma
= ioat_chan
->ioat_dma
;
95 struct pci_dev
*pdev
= ioat_dma
->pdev
;
96 int chan_id
= chan_num(ioat_chan
);
97 struct msix_entry
*msix
;
99 /* 1/ stop irq from firing tasklets
100 * 2/ stop the tasklet from re-arming irqs
102 clear_bit(IOAT_RUN
, &ioat_chan
->state
);
104 /* flush inflight interrupts */
105 switch (ioat_dma
->irq_mode
) {
107 msix
= &ioat_dma
->msix_entries
[chan_id
];
108 synchronize_irq(msix
->vector
);
112 synchronize_irq(pdev
->irq
);
118 /* flush inflight timers */
119 del_timer_sync(&ioat_chan
->timer
);
121 /* flush inflight tasklet runs */
122 tasklet_kill(&ioat_chan
->cleanup_task
);
124 /* final cleanup now that everything is quiesced and can't re-arm */
125 ioat_cleanup_event((unsigned long)&ioat_chan
->dma_chan
);
128 static void __ioat_issue_pending(struct ioatdma_chan
*ioat_chan
)
130 ioat_chan
->dmacount
+= ioat_ring_pending(ioat_chan
);
131 ioat_chan
->issued
= ioat_chan
->head
;
132 writew(ioat_chan
->dmacount
,
133 ioat_chan
->reg_base
+ IOAT_CHAN_DMACOUNT_OFFSET
);
134 dev_dbg(to_dev(ioat_chan
),
135 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
136 __func__
, ioat_chan
->head
, ioat_chan
->tail
,
137 ioat_chan
->issued
, ioat_chan
->dmacount
);
140 void ioat_issue_pending(struct dma_chan
*c
)
142 struct ioatdma_chan
*ioat_chan
= to_ioat_chan(c
);
144 if (ioat_ring_pending(ioat_chan
)) {
145 spin_lock_bh(&ioat_chan
->prep_lock
);
146 __ioat_issue_pending(ioat_chan
);
147 spin_unlock_bh(&ioat_chan
->prep_lock
);
152 * ioat_update_pending - log pending descriptors
153 * @ioat: ioat+ channel
155 * Check if the number of unsubmitted descriptors has exceeded the
156 * watermark. Called with prep_lock held
158 static void ioat_update_pending(struct ioatdma_chan
*ioat_chan
)
160 if (ioat_ring_pending(ioat_chan
) > ioat_pending_level
)
161 __ioat_issue_pending(ioat_chan
);
164 static void __ioat_start_null_desc(struct ioatdma_chan
*ioat_chan
)
166 struct ioat_ring_ent
*desc
;
167 struct ioat_dma_descriptor
*hw
;
169 if (ioat_ring_space(ioat_chan
) < 1) {
170 dev_err(to_dev(ioat_chan
),
171 "Unable to start null desc - ring full\n");
175 dev_dbg(to_dev(ioat_chan
),
176 "%s: head: %#x tail: %#x issued: %#x\n",
177 __func__
, ioat_chan
->head
, ioat_chan
->tail
, ioat_chan
->issued
);
178 desc
= ioat_get_ring_ent(ioat_chan
, ioat_chan
->head
);
183 hw
->ctl_f
.int_en
= 1;
184 hw
->ctl_f
.compl_write
= 1;
185 /* set size to non-zero value (channel returns error when size is 0) */
186 hw
->size
= NULL_DESC_BUFFER_SIZE
;
189 async_tx_ack(&desc
->txd
);
190 ioat_set_chainaddr(ioat_chan
, desc
->txd
.phys
);
191 dump_desc_dbg(ioat_chan
, desc
);
192 /* make sure descriptors are written before we submit */
194 ioat_chan
->head
+= 1;
195 __ioat_issue_pending(ioat_chan
);
198 void ioat_start_null_desc(struct ioatdma_chan
*ioat_chan
)
200 spin_lock_bh(&ioat_chan
->prep_lock
);
201 if (!test_bit(IOAT_CHAN_DOWN
, &ioat_chan
->state
))
202 __ioat_start_null_desc(ioat_chan
);
203 spin_unlock_bh(&ioat_chan
->prep_lock
);
206 static void __ioat_restart_chan(struct ioatdma_chan
*ioat_chan
)
208 /* set the tail to be re-issued */
209 ioat_chan
->issued
= ioat_chan
->tail
;
210 ioat_chan
->dmacount
= 0;
211 mod_timer(&ioat_chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
213 dev_dbg(to_dev(ioat_chan
),
214 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
215 __func__
, ioat_chan
->head
, ioat_chan
->tail
,
216 ioat_chan
->issued
, ioat_chan
->dmacount
);
218 if (ioat_ring_pending(ioat_chan
)) {
219 struct ioat_ring_ent
*desc
;
221 desc
= ioat_get_ring_ent(ioat_chan
, ioat_chan
->tail
);
222 ioat_set_chainaddr(ioat_chan
, desc
->txd
.phys
);
223 __ioat_issue_pending(ioat_chan
);
225 __ioat_start_null_desc(ioat_chan
);
228 static int ioat_quiesce(struct ioatdma_chan
*ioat_chan
, unsigned long tmo
)
230 unsigned long end
= jiffies
+ tmo
;
234 status
= ioat_chansts(ioat_chan
);
235 if (is_ioat_active(status
) || is_ioat_idle(status
))
236 ioat_suspend(ioat_chan
);
237 while (is_ioat_active(status
) || is_ioat_idle(status
)) {
238 if (tmo
&& time_after(jiffies
, end
)) {
242 status
= ioat_chansts(ioat_chan
);
249 static int ioat_reset_sync(struct ioatdma_chan
*ioat_chan
, unsigned long tmo
)
251 unsigned long end
= jiffies
+ tmo
;
254 ioat_reset(ioat_chan
);
255 while (ioat_reset_pending(ioat_chan
)) {
256 if (end
&& time_after(jiffies
, end
)) {
266 static dma_cookie_t
ioat_tx_submit_unlock(struct dma_async_tx_descriptor
*tx
)
267 __releases(&ioat_chan
->prep_lock
)
269 struct dma_chan
*c
= tx
->chan
;
270 struct ioatdma_chan
*ioat_chan
= to_ioat_chan(c
);
273 cookie
= dma_cookie_assign(tx
);
274 dev_dbg(to_dev(ioat_chan
), "%s: cookie: %d\n", __func__
, cookie
);
276 if (!test_and_set_bit(IOAT_CHAN_ACTIVE
, &ioat_chan
->state
))
277 mod_timer(&ioat_chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
279 /* make descriptor updates visible before advancing ioat->head,
280 * this is purposefully not smp_wmb() since we are also
281 * publishing the descriptor updates to a dma device
285 ioat_chan
->head
+= ioat_chan
->produce
;
287 ioat_update_pending(ioat_chan
);
288 spin_unlock_bh(&ioat_chan
->prep_lock
);
293 static struct ioat_ring_ent
*
294 ioat_alloc_ring_ent(struct dma_chan
*chan
, int idx
, gfp_t flags
)
296 struct ioat_dma_descriptor
*hw
;
297 struct ioat_ring_ent
*desc
;
298 struct ioatdma_device
*ioat_dma
;
299 struct ioatdma_chan
*ioat_chan
= to_ioat_chan(chan
);
305 ioat_dma
= to_ioatdma_device(chan
->device
);
307 chunk
= idx
/ IOAT_DESCS_PER_2M
;
308 idx
&= (IOAT_DESCS_PER_2M
- 1);
309 offs
= idx
* IOAT_DESC_SZ
;
310 pos
= (u8
*)ioat_chan
->descs
[chunk
].virt
+ offs
;
311 phys
= ioat_chan
->descs
[chunk
].hw
+ offs
;
312 hw
= (struct ioat_dma_descriptor
*)pos
;
313 memset(hw
, 0, sizeof(*hw
));
315 desc
= kmem_cache_zalloc(ioat_cache
, flags
);
319 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
320 desc
->txd
.tx_submit
= ioat_tx_submit_unlock
;
322 desc
->txd
.phys
= phys
;
326 void ioat_free_ring_ent(struct ioat_ring_ent
*desc
, struct dma_chan
*chan
)
328 kmem_cache_free(ioat_cache
, desc
);
331 struct ioat_ring_ent
**
332 ioat_alloc_ring(struct dma_chan
*c
, int order
, gfp_t flags
)
334 struct ioatdma_chan
*ioat_chan
= to_ioat_chan(c
);
335 struct ioat_ring_ent
**ring
;
336 int total_descs
= 1 << order
;
339 /* allocate the array to hold the software ring */
340 ring
= kcalloc(total_descs
, sizeof(*ring
), flags
);
344 ioat_chan
->desc_chunks
= chunks
= (total_descs
* IOAT_DESC_SZ
) / SZ_2M
;
346 for (i
= 0; i
< chunks
; i
++) {
347 struct ioat_descs
*descs
= &ioat_chan
->descs
[i
];
349 descs
->virt
= dma_alloc_coherent(to_dev(ioat_chan
),
350 SZ_2M
, &descs
->hw
, flags
);
351 if (!descs
->virt
&& (i
> 0)) {
354 for (idx
= 0; idx
< i
; idx
++) {
355 dma_free_coherent(to_dev(ioat_chan
), SZ_2M
,
356 descs
->virt
, descs
->hw
);
361 ioat_chan
->desc_chunks
= 0;
367 for (i
= 0; i
< total_descs
; i
++) {
368 ring
[i
] = ioat_alloc_ring_ent(c
, i
, flags
);
373 ioat_free_ring_ent(ring
[i
], c
);
375 for (idx
= 0; idx
< ioat_chan
->desc_chunks
; idx
++) {
376 dma_free_coherent(to_dev(ioat_chan
),
378 ioat_chan
->descs
[idx
].virt
,
379 ioat_chan
->descs
[idx
].hw
);
380 ioat_chan
->descs
[idx
].virt
= NULL
;
381 ioat_chan
->descs
[idx
].hw
= 0;
384 ioat_chan
->desc_chunks
= 0;
388 set_desc_id(ring
[i
], i
);
392 for (i
= 0; i
< total_descs
-1; i
++) {
393 struct ioat_ring_ent
*next
= ring
[i
+1];
394 struct ioat_dma_descriptor
*hw
= ring
[i
]->hw
;
396 hw
->next
= next
->txd
.phys
;
398 ring
[i
]->hw
->next
= ring
[0]->txd
.phys
;
404 * ioat_check_space_lock - verify space and grab ring producer lock
405 * @ioat: ioat,3 channel (ring) to operate on
406 * @num_descs: allocation length
408 int ioat_check_space_lock(struct ioatdma_chan
*ioat_chan
, int num_descs
)
409 __acquires(&ioat_chan
->prep_lock
)
411 spin_lock_bh(&ioat_chan
->prep_lock
);
412 /* never allow the last descriptor to be consumed, we need at
413 * least one free at all times to allow for on-the-fly ring
416 if (likely(ioat_ring_space(ioat_chan
) > num_descs
)) {
417 dev_dbg(to_dev(ioat_chan
), "%s: num_descs: %d (%x:%x:%x)\n",
418 __func__
, num_descs
, ioat_chan
->head
,
419 ioat_chan
->tail
, ioat_chan
->issued
);
420 ioat_chan
->produce
= num_descs
;
421 return 0; /* with ioat->prep_lock held */
423 spin_unlock_bh(&ioat_chan
->prep_lock
);
425 dev_dbg_ratelimited(to_dev(ioat_chan
),
426 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
427 __func__
, num_descs
, ioat_chan
->head
,
428 ioat_chan
->tail
, ioat_chan
->issued
);
430 /* progress reclaim in the allocation failure case we may be
431 * called under bh_disabled so we need to trigger the timer
434 if (time_is_before_jiffies(ioat_chan
->timer
.expires
)
435 && timer_pending(&ioat_chan
->timer
)) {
436 mod_timer(&ioat_chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
437 ioat_timer_event((unsigned long)ioat_chan
);
443 static bool desc_has_ext(struct ioat_ring_ent
*desc
)
445 struct ioat_dma_descriptor
*hw
= desc
->hw
;
447 if (hw
->ctl_f
.op
== IOAT_OP_XOR
||
448 hw
->ctl_f
.op
== IOAT_OP_XOR_VAL
) {
449 struct ioat_xor_descriptor
*xor = desc
->xor;
451 if (src_cnt_to_sw(xor->ctl_f
.src_cnt
) > 5)
453 } else if (hw
->ctl_f
.op
== IOAT_OP_PQ
||
454 hw
->ctl_f
.op
== IOAT_OP_PQ_VAL
) {
455 struct ioat_pq_descriptor
*pq
= desc
->pq
;
457 if (src_cnt_to_sw(pq
->ctl_f
.src_cnt
) > 3)
465 ioat_free_sed(struct ioatdma_device
*ioat_dma
, struct ioat_sed_ent
*sed
)
470 dma_pool_free(ioat_dma
->sed_hw_pool
[sed
->hw_pool
], sed
->hw
, sed
->dma
);
471 kmem_cache_free(ioat_sed_cache
, sed
);
474 static u64
ioat_get_current_completion(struct ioatdma_chan
*ioat_chan
)
479 completion
= *ioat_chan
->completion
;
480 phys_complete
= ioat_chansts_to_addr(completion
);
482 dev_dbg(to_dev(ioat_chan
), "%s: phys_complete: %#llx\n", __func__
,
483 (unsigned long long) phys_complete
);
485 return phys_complete
;
488 static bool ioat_cleanup_preamble(struct ioatdma_chan
*ioat_chan
,
491 *phys_complete
= ioat_get_current_completion(ioat_chan
);
492 if (*phys_complete
== ioat_chan
->last_completion
)
495 clear_bit(IOAT_COMPLETION_ACK
, &ioat_chan
->state
);
496 mod_timer(&ioat_chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
502 desc_get_errstat(struct ioatdma_chan
*ioat_chan
, struct ioat_ring_ent
*desc
)
504 struct ioat_dma_descriptor
*hw
= desc
->hw
;
506 switch (hw
->ctl_f
.op
) {
508 case IOAT_OP_PQ_VAL_16S
:
510 struct ioat_pq_descriptor
*pq
= desc
->pq
;
512 /* check if there's error written */
513 if (!pq
->dwbes_f
.wbes
)
516 /* need to set a chanerr var for checking to clear later */
518 if (pq
->dwbes_f
.p_val_err
)
519 *desc
->result
|= SUM_CHECK_P_RESULT
;
521 if (pq
->dwbes_f
.q_val_err
)
522 *desc
->result
|= SUM_CHECK_Q_RESULT
;
532 * __cleanup - reclaim used descriptors
533 * @ioat: channel (ring) to clean
535 static void __cleanup(struct ioatdma_chan
*ioat_chan
, dma_addr_t phys_complete
)
537 struct ioatdma_device
*ioat_dma
= ioat_chan
->ioat_dma
;
538 struct ioat_ring_ent
*desc
;
539 bool seen_current
= false;
540 int idx
= ioat_chan
->tail
, i
;
543 dev_dbg(to_dev(ioat_chan
), "%s: head: %#x tail: %#x issued: %#x\n",
544 __func__
, ioat_chan
->head
, ioat_chan
->tail
, ioat_chan
->issued
);
547 * At restart of the channel, the completion address and the
548 * channel status will be 0 due to starting a new chain. Since
549 * it's new chain and the first descriptor "fails", there is
550 * nothing to clean up. We do not want to reap the entire submitted
551 * chain due to this 0 address value and then BUG.
556 active
= ioat_ring_active(ioat_chan
);
557 for (i
= 0; i
< active
&& !seen_current
; i
++) {
558 struct dma_async_tx_descriptor
*tx
;
560 smp_read_barrier_depends();
561 prefetch(ioat_get_ring_ent(ioat_chan
, idx
+ i
+ 1));
562 desc
= ioat_get_ring_ent(ioat_chan
, idx
+ i
);
563 dump_desc_dbg(ioat_chan
, desc
);
565 /* set err stat if we are using dwbes */
566 if (ioat_dma
->cap
& IOAT_CAP_DWBES
)
567 desc_get_errstat(ioat_chan
, desc
);
571 dma_cookie_complete(tx
);
572 dma_descriptor_unmap(tx
);
574 tx
->callback(tx
->callback_param
);
579 if (tx
->phys
== phys_complete
)
582 /* skip extended descriptors */
583 if (desc_has_ext(desc
)) {
584 BUG_ON(i
+ 1 >= active
);
588 /* cleanup super extended descriptors */
590 ioat_free_sed(ioat_dma
, desc
->sed
);
595 /* finish all descriptor reads before incrementing tail */
597 ioat_chan
->tail
= idx
+ i
;
598 /* no active descs have written a completion? */
599 BUG_ON(active
&& !seen_current
);
600 ioat_chan
->last_completion
= phys_complete
;
602 if (active
- i
== 0) {
603 dev_dbg(to_dev(ioat_chan
), "%s: cancel completion timeout\n",
605 mod_timer(&ioat_chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
608 /* 5 microsecond delay per pending descriptor */
609 writew(min((5 * (active
- i
)), IOAT_INTRDELAY_MASK
),
610 ioat_chan
->ioat_dma
->reg_base
+ IOAT_INTRDELAY_OFFSET
);
613 static void ioat_cleanup(struct ioatdma_chan
*ioat_chan
)
617 spin_lock_bh(&ioat_chan
->cleanup_lock
);
619 if (ioat_cleanup_preamble(ioat_chan
, &phys_complete
))
620 __cleanup(ioat_chan
, phys_complete
);
622 if (is_ioat_halted(*ioat_chan
->completion
)) {
623 u32 chanerr
= readl(ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
625 if (chanerr
& IOAT_CHANERR_HANDLE_MASK
) {
626 mod_timer(&ioat_chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
631 spin_unlock_bh(&ioat_chan
->cleanup_lock
);
634 void ioat_cleanup_event(unsigned long data
)
636 struct ioatdma_chan
*ioat_chan
= to_ioat_chan((void *)data
);
638 ioat_cleanup(ioat_chan
);
639 if (!test_bit(IOAT_RUN
, &ioat_chan
->state
))
641 writew(IOAT_CHANCTRL_RUN
, ioat_chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
644 static void ioat_restart_channel(struct ioatdma_chan
*ioat_chan
)
648 ioat_quiesce(ioat_chan
, 0);
649 if (ioat_cleanup_preamble(ioat_chan
, &phys_complete
))
650 __cleanup(ioat_chan
, phys_complete
);
652 __ioat_restart_chan(ioat_chan
);
655 static void ioat_eh(struct ioatdma_chan
*ioat_chan
)
657 struct pci_dev
*pdev
= to_pdev(ioat_chan
);
658 struct ioat_dma_descriptor
*hw
;
659 struct dma_async_tx_descriptor
*tx
;
661 struct ioat_ring_ent
*desc
;
666 /* cleanup so tail points to descriptor that caused the error */
667 if (ioat_cleanup_preamble(ioat_chan
, &phys_complete
))
668 __cleanup(ioat_chan
, phys_complete
);
670 chanerr
= readl(ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
671 pci_read_config_dword(pdev
, IOAT_PCI_CHANERR_INT_OFFSET
, &chanerr_int
);
673 dev_dbg(to_dev(ioat_chan
), "%s: error = %x:%x\n",
674 __func__
, chanerr
, chanerr_int
);
676 desc
= ioat_get_ring_ent(ioat_chan
, ioat_chan
->tail
);
678 dump_desc_dbg(ioat_chan
, desc
);
680 switch (hw
->ctl_f
.op
) {
681 case IOAT_OP_XOR_VAL
:
682 if (chanerr
& IOAT_CHANERR_XOR_P_OR_CRC_ERR
) {
683 *desc
->result
|= SUM_CHECK_P_RESULT
;
684 err_handled
|= IOAT_CHANERR_XOR_P_OR_CRC_ERR
;
688 case IOAT_OP_PQ_VAL_16S
:
689 if (chanerr
& IOAT_CHANERR_XOR_P_OR_CRC_ERR
) {
690 *desc
->result
|= SUM_CHECK_P_RESULT
;
691 err_handled
|= IOAT_CHANERR_XOR_P_OR_CRC_ERR
;
693 if (chanerr
& IOAT_CHANERR_XOR_Q_ERR
) {
694 *desc
->result
|= SUM_CHECK_Q_RESULT
;
695 err_handled
|= IOAT_CHANERR_XOR_Q_ERR
;
700 /* fault on unhandled error or spurious halt */
701 if (chanerr
^ err_handled
|| chanerr
== 0) {
702 dev_err(to_dev(ioat_chan
), "%s: fatal error (%x:%x)\n",
703 __func__
, chanerr
, err_handled
);
705 } else { /* cleanup the faulty descriptor */
708 dma_cookie_complete(tx
);
709 dma_descriptor_unmap(tx
);
711 tx
->callback(tx
->callback_param
);
717 writel(chanerr
, ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
718 pci_write_config_dword(pdev
, IOAT_PCI_CHANERR_INT_OFFSET
, chanerr_int
);
720 /* mark faulting descriptor as complete */
721 *ioat_chan
->completion
= desc
->txd
.phys
;
723 spin_lock_bh(&ioat_chan
->prep_lock
);
724 ioat_restart_channel(ioat_chan
);
725 spin_unlock_bh(&ioat_chan
->prep_lock
);
728 static void check_active(struct ioatdma_chan
*ioat_chan
)
730 if (ioat_ring_active(ioat_chan
)) {
731 mod_timer(&ioat_chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
735 if (test_and_clear_bit(IOAT_CHAN_ACTIVE
, &ioat_chan
->state
))
736 mod_timer(&ioat_chan
->timer
, jiffies
+ IDLE_TIMEOUT
);
739 void ioat_timer_event(unsigned long data
)
741 struct ioatdma_chan
*ioat_chan
= to_ioat_chan((void *)data
);
742 dma_addr_t phys_complete
;
745 status
= ioat_chansts(ioat_chan
);
747 /* when halted due to errors check for channel
748 * programming errors before advancing the completion state
750 if (is_ioat_halted(status
)) {
753 chanerr
= readl(ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
754 dev_err(to_dev(ioat_chan
), "%s: Channel halted (%x)\n",
756 if (test_bit(IOAT_RUN
, &ioat_chan
->state
))
757 BUG_ON(is_ioat_bug(chanerr
));
758 else /* we never got off the ground */
762 spin_lock_bh(&ioat_chan
->cleanup_lock
);
764 /* handle the no-actives case */
765 if (!ioat_ring_active(ioat_chan
)) {
766 spin_lock_bh(&ioat_chan
->prep_lock
);
767 check_active(ioat_chan
);
768 spin_unlock_bh(&ioat_chan
->prep_lock
);
769 spin_unlock_bh(&ioat_chan
->cleanup_lock
);
773 /* if we haven't made progress and we have already
774 * acknowledged a pending completion once, then be more
775 * forceful with a restart
777 if (ioat_cleanup_preamble(ioat_chan
, &phys_complete
))
778 __cleanup(ioat_chan
, phys_complete
);
779 else if (test_bit(IOAT_COMPLETION_ACK
, &ioat_chan
->state
)) {
782 chanerr
= readl(ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
783 dev_warn(to_dev(ioat_chan
), "Restarting channel...\n");
784 dev_warn(to_dev(ioat_chan
), "CHANSTS: %#Lx CHANERR: %#x\n",
786 dev_warn(to_dev(ioat_chan
), "Active descriptors: %d\n",
787 ioat_ring_active(ioat_chan
));
789 spin_lock_bh(&ioat_chan
->prep_lock
);
790 ioat_restart_channel(ioat_chan
);
791 spin_unlock_bh(&ioat_chan
->prep_lock
);
792 spin_unlock_bh(&ioat_chan
->cleanup_lock
);
795 set_bit(IOAT_COMPLETION_ACK
, &ioat_chan
->state
);
797 mod_timer(&ioat_chan
->timer
, jiffies
+ COMPLETION_TIMEOUT
);
798 spin_unlock_bh(&ioat_chan
->cleanup_lock
);
802 ioat_tx_status(struct dma_chan
*c
, dma_cookie_t cookie
,
803 struct dma_tx_state
*txstate
)
805 struct ioatdma_chan
*ioat_chan
= to_ioat_chan(c
);
808 ret
= dma_cookie_status(c
, cookie
, txstate
);
809 if (ret
== DMA_COMPLETE
)
812 ioat_cleanup(ioat_chan
);
814 return dma_cookie_status(c
, cookie
, txstate
);
817 int ioat_reset_hw(struct ioatdma_chan
*ioat_chan
)
819 /* throw away whatever the channel was doing and get it
820 * initialized, with ioat3 specific workarounds
822 struct ioatdma_device
*ioat_dma
= ioat_chan
->ioat_dma
;
823 struct pci_dev
*pdev
= ioat_dma
->pdev
;
828 ioat_quiesce(ioat_chan
, msecs_to_jiffies(100));
830 chanerr
= readl(ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
831 writel(chanerr
, ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
833 if (ioat_dma
->version
< IOAT_VER_3_3
) {
834 /* clear any pending errors */
835 err
= pci_read_config_dword(pdev
,
836 IOAT_PCI_CHANERR_INT_OFFSET
, &chanerr
);
839 "channel error register unreachable\n");
842 pci_write_config_dword(pdev
,
843 IOAT_PCI_CHANERR_INT_OFFSET
, chanerr
);
845 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
846 * (workaround for spurious config parity error after restart)
848 pci_read_config_word(pdev
, IOAT_PCI_DEVICE_ID_OFFSET
, &dev_id
);
849 if (dev_id
== PCI_DEVICE_ID_INTEL_IOAT_TBG0
) {
850 pci_write_config_dword(pdev
,
851 IOAT_PCI_DMAUNCERRSTS_OFFSET
,
856 if (is_bwd_ioat(pdev
) && (ioat_dma
->irq_mode
== IOAT_MSIX
)) {
857 ioat_dma
->msixtba0
= readq(ioat_dma
->reg_base
+ 0x1000);
858 ioat_dma
->msixdata0
= readq(ioat_dma
->reg_base
+ 0x1008);
859 ioat_dma
->msixpba
= readq(ioat_dma
->reg_base
+ 0x1800);
863 err
= ioat_reset_sync(ioat_chan
, msecs_to_jiffies(200));
865 if (is_bwd_ioat(pdev
) && (ioat_dma
->irq_mode
== IOAT_MSIX
)) {
866 writeq(ioat_dma
->msixtba0
, ioat_dma
->reg_base
+ 0x1000);
867 writeq(ioat_dma
->msixdata0
, ioat_dma
->reg_base
+ 0x1008);
868 writeq(ioat_dma
->msixpba
, ioat_dma
->reg_base
+ 0x1800);
873 dev_err(&pdev
->dev
, "Failed to reset: %d\n", err
);