2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/spinlock.h>
49 #include <linux/seqlock.h>
50 #include <linux/netdevice.h>
51 #include <linux/moduleparam.h>
52 #include <linux/bitops.h>
53 #include <linux/timer.h>
54 #include <linux/vmalloc.h>
55 #include <linux/highmem.h>
64 /* must be a power of 2 >= 64 <= 32768 */
65 #define SDMA_DESCQ_CNT 2048
66 #define SDMA_DESC_INTR 64
67 #define INVALID_TAIL 0xffff
69 static uint sdma_descq_cnt
= SDMA_DESCQ_CNT
;
70 module_param(sdma_descq_cnt
, uint
, S_IRUGO
);
71 MODULE_PARM_DESC(sdma_descq_cnt
, "Number of SDMA descq entries");
73 static uint sdma_idle_cnt
= 250;
74 module_param(sdma_idle_cnt
, uint
, S_IRUGO
);
75 MODULE_PARM_DESC(sdma_idle_cnt
, "sdma interrupt idle delay (ns,default 250)");
78 module_param_named(num_sdma
, mod_num_sdma
, uint
, S_IRUGO
);
79 MODULE_PARM_DESC(num_sdma
, "Set max number SDMA engines to use");
81 static uint sdma_desct_intr
= SDMA_DESC_INTR
;
82 module_param_named(desct_intr
, sdma_desct_intr
, uint
, S_IRUGO
| S_IWUSR
);
83 MODULE_PARM_DESC(desct_intr
, "Number of SDMA descriptor before interrupt");
85 #define SDMA_WAIT_BATCH_SIZE 20
86 /* max wait time for a SDMA engine to indicate it has halted */
87 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
88 /* all SDMA engine errors that cause a halt */
90 #define SD(name) SEND_DMA_##name
91 #define ALL_SDMA_ENG_HALT_ERRS \
92 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
93 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
94 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
95 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
96 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
97 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
98 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
99 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
100 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
101 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
102 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
103 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
104 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
105 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
106 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
107 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
108 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
109 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
111 /* sdma_sendctrl operations */
112 #define SDMA_SENDCTRL_OP_ENABLE BIT(0)
113 #define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
114 #define SDMA_SENDCTRL_OP_HALT BIT(2)
115 #define SDMA_SENDCTRL_OP_CLEANUP BIT(3)
117 /* handle long defines */
118 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
119 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
120 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
121 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
123 static const char * const sdma_state_names
[] = {
124 [sdma_state_s00_hw_down
] = "s00_HwDown",
125 [sdma_state_s10_hw_start_up_halt_wait
] = "s10_HwStartUpHaltWait",
126 [sdma_state_s15_hw_start_up_clean_wait
] = "s15_HwStartUpCleanWait",
127 [sdma_state_s20_idle
] = "s20_Idle",
128 [sdma_state_s30_sw_clean_up_wait
] = "s30_SwCleanUpWait",
129 [sdma_state_s40_hw_clean_up_wait
] = "s40_HwCleanUpWait",
130 [sdma_state_s50_hw_halt_wait
] = "s50_HwHaltWait",
131 [sdma_state_s60_idle_halt_wait
] = "s60_IdleHaltWait",
132 [sdma_state_s80_hw_freeze
] = "s80_HwFreeze",
133 [sdma_state_s82_freeze_sw_clean
] = "s82_FreezeSwClean",
134 [sdma_state_s99_running
] = "s99_Running",
137 #ifdef CONFIG_SDMA_VERBOSITY
138 static const char * const sdma_event_names
[] = {
139 [sdma_event_e00_go_hw_down
] = "e00_GoHwDown",
140 [sdma_event_e10_go_hw_start
] = "e10_GoHwStart",
141 [sdma_event_e15_hw_halt_done
] = "e15_HwHaltDone",
142 [sdma_event_e25_hw_clean_up_done
] = "e25_HwCleanUpDone",
143 [sdma_event_e30_go_running
] = "e30_GoRunning",
144 [sdma_event_e40_sw_cleaned
] = "e40_SwCleaned",
145 [sdma_event_e50_hw_cleaned
] = "e50_HwCleaned",
146 [sdma_event_e60_hw_halted
] = "e60_HwHalted",
147 [sdma_event_e70_go_idle
] = "e70_GoIdle",
148 [sdma_event_e80_hw_freeze
] = "e80_HwFreeze",
149 [sdma_event_e81_hw_frozen
] = "e81_HwFrozen",
150 [sdma_event_e82_hw_unfreeze
] = "e82_HwUnfreeze",
151 [sdma_event_e85_link_down
] = "e85_LinkDown",
152 [sdma_event_e90_sw_halted
] = "e90_SwHalted",
156 static const struct sdma_set_state_action sdma_action_table
[] = {
157 [sdma_state_s00_hw_down
] = {
158 .go_s99_running_tofalse
= 1,
164 [sdma_state_s10_hw_start_up_halt_wait
] = {
170 [sdma_state_s15_hw_start_up_clean_wait
] = {
176 [sdma_state_s20_idle
] = {
182 [sdma_state_s30_sw_clean_up_wait
] = {
188 [sdma_state_s40_hw_clean_up_wait
] = {
194 [sdma_state_s50_hw_halt_wait
] = {
200 [sdma_state_s60_idle_halt_wait
] = {
201 .go_s99_running_tofalse
= 1,
207 [sdma_state_s80_hw_freeze
] = {
213 [sdma_state_s82_freeze_sw_clean
] = {
219 [sdma_state_s99_running
] = {
224 .go_s99_running_totrue
= 1,
228 #define SDMA_TAIL_UPDATE_THRESH 0x1F
230 /* declare all statics here rather than keep sorting */
231 static void sdma_complete(struct kref
*);
232 static void sdma_finalput(struct sdma_state
*);
233 static void sdma_get(struct sdma_state
*);
234 static void sdma_hw_clean_up_task(unsigned long);
235 static void sdma_put(struct sdma_state
*);
236 static void sdma_set_state(struct sdma_engine
*, enum sdma_states
);
237 static void sdma_start_hw_clean_up(struct sdma_engine
*);
238 static void sdma_sw_clean_up_task(unsigned long);
239 static void sdma_sendctrl(struct sdma_engine
*, unsigned);
240 static void init_sdma_regs(struct sdma_engine
*, u32
, uint
);
241 static void sdma_process_event(
242 struct sdma_engine
*sde
,
243 enum sdma_events event
);
244 static void __sdma_process_event(
245 struct sdma_engine
*sde
,
246 enum sdma_events event
);
247 static void dump_sdma_state(struct sdma_engine
*sde
);
248 static void sdma_make_progress(struct sdma_engine
*sde
, u64 status
);
249 static void sdma_desc_avail(struct sdma_engine
*sde
, unsigned avail
);
250 static void sdma_flush_descq(struct sdma_engine
*sde
);
253 * sdma_state_name() - return state string from enum
256 static const char *sdma_state_name(enum sdma_states state
)
258 return sdma_state_names
[state
];
261 static void sdma_get(struct sdma_state
*ss
)
266 static void sdma_complete(struct kref
*kref
)
268 struct sdma_state
*ss
=
269 container_of(kref
, struct sdma_state
, kref
);
274 static void sdma_put(struct sdma_state
*ss
)
276 kref_put(&ss
->kref
, sdma_complete
);
279 static void sdma_finalput(struct sdma_state
*ss
)
282 wait_for_completion(&ss
->comp
);
285 static inline void write_sde_csr(
286 struct sdma_engine
*sde
,
290 write_kctxt_csr(sde
->dd
, sde
->this_idx
, offset0
, value
);
293 static inline u64
read_sde_csr(
294 struct sdma_engine
*sde
,
297 return read_kctxt_csr(sde
->dd
, sde
->this_idx
, offset0
);
301 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
302 * sdma engine 'sde' to drop to 0.
304 static void sdma_wait_for_packet_egress(struct sdma_engine
*sde
,
307 u64 off
= 8 * sde
->this_idx
;
308 struct hfi1_devdata
*dd
= sde
->dd
;
315 reg
= read_csr(dd
, off
+ SEND_EGRESS_SEND_DMA_STATUS
);
317 reg
&= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
;
318 reg
>>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
;
321 /* counter is reest if accupancy count changes */
325 /* timed out - bounce the link */
326 dd_dev_err(dd
, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
327 __func__
, sde
->this_idx
, (u32
)reg
);
328 queue_work(dd
->pport
->hfi1_wq
,
329 &dd
->pport
->link_bounce_work
);
337 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
338 * and pause for credit return.
340 void sdma_wait(struct hfi1_devdata
*dd
)
344 for (i
= 0; i
< dd
->num_sdma
; i
++) {
345 struct sdma_engine
*sde
= &dd
->per_sdma
[i
];
347 sdma_wait_for_packet_egress(sde
, 0);
351 static inline void sdma_set_desc_cnt(struct sdma_engine
*sde
, unsigned cnt
)
355 if (!(sde
->dd
->flags
& HFI1_HAS_SDMA_TIMEOUT
))
358 reg
&= SD(DESC_CNT_CNT_MASK
);
359 reg
<<= SD(DESC_CNT_CNT_SHIFT
);
360 write_sde_csr(sde
, SD(DESC_CNT
), reg
);
363 static inline void complete_tx(struct sdma_engine
*sde
,
364 struct sdma_txreq
*tx
,
367 /* protect against complete modifying */
368 struct iowait
*wait
= tx
->wait
;
369 callback_t complete
= tx
->complete
;
371 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
372 trace_hfi1_sdma_out_sn(sde
, tx
->sn
);
373 if (WARN_ON_ONCE(sde
->head_sn
!= tx
->sn
))
374 dd_dev_err(sde
->dd
, "expected %llu got %llu\n",
375 sde
->head_sn
, tx
->sn
);
378 sdma_txclean(sde
->dd
, tx
);
380 (*complete
)(tx
, res
);
381 if (wait
&& iowait_sdma_dec(wait
))
382 iowait_drain_wakeup(wait
);
386 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
388 * Depending on timing there can be txreqs in two places:
389 * - in the descq ring
390 * - in the flush list
392 * To avoid ordering issues the descq ring needs to be flushed
393 * first followed by the flush list.
395 * This routine is called from two places
396 * - From a work queue item
397 * - Directly from the state machine just before setting the
400 * Must be called with head_lock held
403 static void sdma_flush(struct sdma_engine
*sde
)
405 struct sdma_txreq
*txp
, *txp_next
;
406 LIST_HEAD(flushlist
);
409 /* flush from head to tail */
410 sdma_flush_descq(sde
);
411 spin_lock_irqsave(&sde
->flushlist_lock
, flags
);
412 /* copy flush list */
413 list_for_each_entry_safe(txp
, txp_next
, &sde
->flushlist
, list
) {
414 list_del_init(&txp
->list
);
415 list_add_tail(&txp
->list
, &flushlist
);
417 spin_unlock_irqrestore(&sde
->flushlist_lock
, flags
);
418 /* flush from flush list */
419 list_for_each_entry_safe(txp
, txp_next
, &flushlist
, list
)
420 complete_tx(sde
, txp
, SDMA_TXREQ_S_ABORTED
);
424 * Fields a work request for flushing the descq ring
427 * If the engine has been brought to running during
428 * the scheduling delay, the flush is ignored, assuming
429 * that the process of bringing the engine to running
430 * would have done this flush prior to going to running.
433 static void sdma_field_flush(struct work_struct
*work
)
436 struct sdma_engine
*sde
=
437 container_of(work
, struct sdma_engine
, flush_worker
);
439 write_seqlock_irqsave(&sde
->head_lock
, flags
);
440 if (!__sdma_running(sde
))
442 write_sequnlock_irqrestore(&sde
->head_lock
, flags
);
445 static void sdma_err_halt_wait(struct work_struct
*work
)
447 struct sdma_engine
*sde
= container_of(work
, struct sdma_engine
,
450 unsigned long timeout
;
452 timeout
= jiffies
+ msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT
);
454 statuscsr
= read_sde_csr(sde
, SD(STATUS
));
455 statuscsr
&= SD(STATUS_ENG_HALTED_SMASK
);
458 if (time_after(jiffies
, timeout
)) {
460 "SDMA engine %d - timeout waiting for engine to halt\n",
463 * Continue anyway. This could happen if there was
464 * an uncorrectable error in the wrong spot.
468 usleep_range(80, 120);
471 sdma_process_event(sde
, sdma_event_e15_hw_halt_done
);
474 static void sdma_err_progress_check_schedule(struct sdma_engine
*sde
)
476 if (!is_bx(sde
->dd
) && HFI1_CAP_IS_KSET(SDMA_AHG
)) {
478 struct hfi1_devdata
*dd
= sde
->dd
;
480 for (index
= 0; index
< dd
->num_sdma
; index
++) {
481 struct sdma_engine
*curr_sdma
= &dd
->per_sdma
[index
];
483 if (curr_sdma
!= sde
)
484 curr_sdma
->progress_check_head
=
485 curr_sdma
->descq_head
;
488 "SDMA engine %d - check scheduled\n",
490 mod_timer(&sde
->err_progress_check_timer
, jiffies
+ 10);
494 static void sdma_err_progress_check(unsigned long data
)
497 struct sdma_engine
*sde
= (struct sdma_engine
*)data
;
499 dd_dev_err(sde
->dd
, "SDE progress check event\n");
500 for (index
= 0; index
< sde
->dd
->num_sdma
; index
++) {
501 struct sdma_engine
*curr_sde
= &sde
->dd
->per_sdma
[index
];
504 /* check progress on each engine except the current one */
508 * We must lock interrupts when acquiring sde->lock,
509 * to avoid a deadlock if interrupt triggers and spins on
510 * the same lock on same CPU
512 spin_lock_irqsave(&curr_sde
->tail_lock
, flags
);
513 write_seqlock(&curr_sde
->head_lock
);
515 /* skip non-running queues */
516 if (curr_sde
->state
.current_state
!= sdma_state_s99_running
) {
517 write_sequnlock(&curr_sde
->head_lock
);
518 spin_unlock_irqrestore(&curr_sde
->tail_lock
, flags
);
522 if ((curr_sde
->descq_head
!= curr_sde
->descq_tail
) &&
523 (curr_sde
->descq_head
==
524 curr_sde
->progress_check_head
))
525 __sdma_process_event(curr_sde
,
526 sdma_event_e90_sw_halted
);
527 write_sequnlock(&curr_sde
->head_lock
);
528 spin_unlock_irqrestore(&curr_sde
->tail_lock
, flags
);
530 schedule_work(&sde
->err_halt_worker
);
533 static void sdma_hw_clean_up_task(unsigned long opaque
)
535 struct sdma_engine
*sde
= (struct sdma_engine
*)opaque
;
539 #ifdef CONFIG_SDMA_VERBOSITY
540 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
541 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
,
544 statuscsr
= read_sde_csr(sde
, SD(STATUS
));
545 statuscsr
&= SD(STATUS_ENG_CLEANED_UP_SMASK
);
551 sdma_process_event(sde
, sdma_event_e25_hw_clean_up_done
);
554 static inline struct sdma_txreq
*get_txhead(struct sdma_engine
*sde
)
556 smp_read_barrier_depends(); /* see sdma_update_tail() */
557 return sde
->tx_ring
[sde
->tx_head
& sde
->sdma_mask
];
561 * flush ring for recovery
563 static void sdma_flush_descq(struct sdma_engine
*sde
)
567 struct sdma_txreq
*txp
= get_txhead(sde
);
569 /* The reason for some of the complexity of this code is that
570 * not all descriptors have corresponding txps. So, we have to
571 * be able to skip over descs until we wander into the range of
572 * the next txp on the list.
574 head
= sde
->descq_head
& sde
->sdma_mask
;
575 tail
= sde
->descq_tail
& sde
->sdma_mask
;
576 while (head
!= tail
) {
577 /* advance head, wrap if needed */
578 head
= ++sde
->descq_head
& sde
->sdma_mask
;
579 /* if now past this txp's descs, do the callback */
580 if (txp
&& txp
->next_descq_idx
== head
) {
581 /* remove from list */
582 sde
->tx_ring
[sde
->tx_head
++ & sde
->sdma_mask
] = NULL
;
583 complete_tx(sde
, txp
, SDMA_TXREQ_S_ABORTED
);
584 trace_hfi1_sdma_progress(sde
, head
, tail
, txp
);
585 txp
= get_txhead(sde
);
590 sdma_desc_avail(sde
, sdma_descq_freecnt(sde
));
593 static void sdma_sw_clean_up_task(unsigned long opaque
)
595 struct sdma_engine
*sde
= (struct sdma_engine
*)opaque
;
598 spin_lock_irqsave(&sde
->tail_lock
, flags
);
599 write_seqlock(&sde
->head_lock
);
602 * At this point, the following should always be true:
603 * - We are halted, so no more descriptors are getting retired.
604 * - We are not running, so no one is submitting new work.
605 * - Only we can send the e40_sw_cleaned, so we can't start
606 * running again until we say so. So, the active list and
607 * descq are ours to play with.
611 * In the error clean up sequence, software clean must be called
612 * before the hardware clean so we can use the hardware head in
613 * the progress routine. A hardware clean or SPC unfreeze will
614 * reset the hardware head.
616 * Process all retired requests. The progress routine will use the
617 * latest physical hardware head - we are not running so speed does
620 sdma_make_progress(sde
, 0);
625 * Reset our notion of head and tail.
626 * Note that the HW registers have been reset via an earlier
631 sde
->desc_avail
= sdma_descq_freecnt(sde
);
634 __sdma_process_event(sde
, sdma_event_e40_sw_cleaned
);
636 write_sequnlock(&sde
->head_lock
);
637 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
640 static void sdma_sw_tear_down(struct sdma_engine
*sde
)
642 struct sdma_state
*ss
= &sde
->state
;
644 /* Releasing this reference means the state machine has stopped. */
647 /* stop waiting for all unfreeze events to complete */
648 atomic_set(&sde
->dd
->sdma_unfreeze_count
, -1);
649 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
652 static void sdma_start_hw_clean_up(struct sdma_engine
*sde
)
654 tasklet_hi_schedule(&sde
->sdma_hw_clean_up_task
);
657 static void sdma_set_state(struct sdma_engine
*sde
,
658 enum sdma_states next_state
)
660 struct sdma_state
*ss
= &sde
->state
;
661 const struct sdma_set_state_action
*action
= sdma_action_table
;
664 trace_hfi1_sdma_state(
666 sdma_state_names
[ss
->current_state
],
667 sdma_state_names
[next_state
]);
669 /* debugging bookkeeping */
670 ss
->previous_state
= ss
->current_state
;
671 ss
->previous_op
= ss
->current_op
;
672 ss
->current_state
= next_state
;
674 if (ss
->previous_state
!= sdma_state_s99_running
&&
675 next_state
== sdma_state_s99_running
)
678 if (action
[next_state
].op_enable
)
679 op
|= SDMA_SENDCTRL_OP_ENABLE
;
681 if (action
[next_state
].op_intenable
)
682 op
|= SDMA_SENDCTRL_OP_INTENABLE
;
684 if (action
[next_state
].op_halt
)
685 op
|= SDMA_SENDCTRL_OP_HALT
;
687 if (action
[next_state
].op_cleanup
)
688 op
|= SDMA_SENDCTRL_OP_CLEANUP
;
690 if (action
[next_state
].go_s99_running_tofalse
)
691 ss
->go_s99_running
= 0;
693 if (action
[next_state
].go_s99_running_totrue
)
694 ss
->go_s99_running
= 1;
697 sdma_sendctrl(sde
, ss
->current_op
);
701 * sdma_get_descq_cnt() - called when device probed
703 * Return a validated descq count.
705 * This is currently only used in the verbs initialization to build the tx
708 * This will probably be deleted in favor of a more scalable approach to
712 u16
sdma_get_descq_cnt(void)
714 u16 count
= sdma_descq_cnt
;
717 return SDMA_DESCQ_CNT
;
718 /* count must be a power of 2 greater than 64 and less than
719 * 32768. Otherwise return default.
721 if (!is_power_of_2(count
))
722 return SDMA_DESCQ_CNT
;
723 if (count
< 64 || count
> 32768)
724 return SDMA_DESCQ_CNT
;
729 * sdma_select_engine_vl() - select sdma engine
731 * @selector: a spreading factor
735 * This function returns an engine based on the selector and a vl. The
736 * mapping fields are protected by RCU.
738 struct sdma_engine
*sdma_select_engine_vl(
739 struct hfi1_devdata
*dd
,
743 struct sdma_vl_map
*m
;
744 struct sdma_map_elem
*e
;
745 struct sdma_engine
*rval
;
747 /* NOTE This should only happen if SC->VL changed after the initial
748 * checks on the QP/AH
749 * Default will return engine 0 below
757 m
= rcu_dereference(dd
->sdma_map
);
760 return &dd
->per_sdma
[0];
762 e
= m
->map
[vl
& m
->mask
];
763 rval
= e
->sde
[selector
& e
->mask
];
767 rval
= !rval
? &dd
->per_sdma
[0] : rval
;
768 trace_hfi1_sdma_engine_select(dd
, selector
, vl
, rval
->this_idx
);
773 * sdma_select_engine_sc() - select sdma engine
775 * @selector: a spreading factor
779 * This function returns an engine based on the selector and an sc.
781 struct sdma_engine
*sdma_select_engine_sc(
782 struct hfi1_devdata
*dd
,
786 u8 vl
= sc_to_vlt(dd
, sc5
);
788 return sdma_select_engine_vl(dd
, selector
, vl
);
792 * Free the indicated map struct
794 static void sdma_map_free(struct sdma_vl_map
*m
)
798 for (i
= 0; m
&& i
< m
->actual_vls
; i
++)
804 * Handle RCU callback
806 static void sdma_map_rcu_callback(struct rcu_head
*list
)
808 struct sdma_vl_map
*m
= container_of(list
, struct sdma_vl_map
, list
);
814 * sdma_map_init - called when # vls change
817 * @num_vls: number of vls
818 * @vl_engines: per vl engine mapping (optional)
820 * This routine changes the mapping based on the number of vls.
822 * vl_engines is used to specify a non-uniform vl/engine loading. NULL
823 * implies auto computing the loading and giving each VLs a uniform
824 * distribution of engines per VL.
826 * The auto algorithm computes the sde_per_vl and the number of extra
827 * engines. Any extra engines are added from the last VL on down.
829 * rcu locking is used here to control access to the mapping fields.
831 * If either the num_vls or num_sdma are non-power of 2, the array sizes
832 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
833 * up to the next highest power of 2 and the first entry is reused
834 * in a round robin fashion.
836 * If an error occurs the map change is not done and the mapping is
840 int sdma_map_init(struct hfi1_devdata
*dd
, u8 port
, u8 num_vls
, u8
*vl_engines
)
843 int extra
, sde_per_vl
;
845 u8 lvl_engines
[OPA_MAX_VLS
];
846 struct sdma_vl_map
*oldmap
, *newmap
;
848 if (!(dd
->flags
& HFI1_HAS_SEND_DMA
))
852 /* truncate divide */
853 sde_per_vl
= dd
->num_sdma
/ num_vls
;
855 extra
= dd
->num_sdma
% num_vls
;
856 vl_engines
= lvl_engines
;
857 /* add extras from last vl down */
858 for (i
= num_vls
- 1; i
>= 0; i
--, extra
--)
859 vl_engines
[i
] = sde_per_vl
+ (extra
> 0 ? 1 : 0);
863 sizeof(struct sdma_vl_map
) +
864 roundup_pow_of_two(num_vls
) *
865 sizeof(struct sdma_map_elem
*),
869 newmap
->actual_vls
= num_vls
;
870 newmap
->vls
= roundup_pow_of_two(num_vls
);
871 newmap
->mask
= (1 << ilog2(newmap
->vls
)) - 1;
872 /* initialize back-map */
873 for (i
= 0; i
< TXE_NUM_SDMA_ENGINES
; i
++)
874 newmap
->engine_to_vl
[i
] = -1;
875 for (i
= 0; i
< newmap
->vls
; i
++) {
876 /* save for wrap around */
877 int first_engine
= engine
;
879 if (i
< newmap
->actual_vls
) {
880 int sz
= roundup_pow_of_two(vl_engines
[i
]);
882 /* only allocate once */
883 newmap
->map
[i
] = kzalloc(
884 sizeof(struct sdma_map_elem
) +
885 sz
* sizeof(struct sdma_engine
*),
889 newmap
->map
[i
]->mask
= (1 << ilog2(sz
)) - 1;
891 for (j
= 0; j
< sz
; j
++) {
892 newmap
->map
[i
]->sde
[j
] =
893 &dd
->per_sdma
[engine
];
894 if (++engine
>= first_engine
+ vl_engines
[i
])
895 /* wrap back to first engine */
896 engine
= first_engine
;
898 /* assign back-map */
899 for (j
= 0; j
< vl_engines
[i
]; j
++)
900 newmap
->engine_to_vl
[first_engine
+ j
] = i
;
902 /* just re-use entry without allocating */
903 newmap
->map
[i
] = newmap
->map
[i
% num_vls
];
905 engine
= first_engine
+ vl_engines
[i
];
907 /* newmap in hand, save old map */
908 spin_lock_irq(&dd
->sde_map_lock
);
909 oldmap
= rcu_dereference_protected(dd
->sdma_map
,
910 lockdep_is_held(&dd
->sde_map_lock
));
913 rcu_assign_pointer(dd
->sdma_map
, newmap
);
915 spin_unlock_irq(&dd
->sde_map_lock
);
916 /* success, free any old map after grace period */
918 call_rcu(&oldmap
->list
, sdma_map_rcu_callback
);
921 /* free any partial allocation */
922 sdma_map_free(newmap
);
927 * Clean up allocated memory.
929 * This routine is can be called regardless of the success of sdma_init()
932 static void sdma_clean(struct hfi1_devdata
*dd
, size_t num_engines
)
935 struct sdma_engine
*sde
;
937 if (dd
->sdma_pad_dma
) {
938 dma_free_coherent(&dd
->pcidev
->dev
, 4,
939 (void *)dd
->sdma_pad_dma
,
941 dd
->sdma_pad_dma
= NULL
;
942 dd
->sdma_pad_phys
= 0;
944 if (dd
->sdma_heads_dma
) {
945 dma_free_coherent(&dd
->pcidev
->dev
, dd
->sdma_heads_size
,
946 (void *)dd
->sdma_heads_dma
,
947 dd
->sdma_heads_phys
);
948 dd
->sdma_heads_dma
= NULL
;
949 dd
->sdma_heads_phys
= 0;
951 for (i
= 0; dd
->per_sdma
&& i
< num_engines
; ++i
) {
952 sde
= &dd
->per_sdma
[i
];
954 sde
->head_dma
= NULL
;
960 sde
->descq_cnt
* sizeof(u64
[2]),
967 kvfree(sde
->tx_ring
);
970 spin_lock_irq(&dd
->sde_map_lock
);
971 sdma_map_free(rcu_access_pointer(dd
->sdma_map
));
972 RCU_INIT_POINTER(dd
->sdma_map
, NULL
);
973 spin_unlock_irq(&dd
->sde_map_lock
);
980 * sdma_init() - called when device probed
982 * @port: port number (currently only zero)
984 * sdma_init initializes the specified number of engines.
986 * The code initializes each sde, its csrs. Interrupts
987 * are not required to be enabled.
990 * 0 - success, -errno on failure
992 int sdma_init(struct hfi1_devdata
*dd
, u8 port
)
995 struct sdma_engine
*sde
;
998 struct hfi1_pportdata
*ppd
= dd
->pport
+ port
;
999 u32 per_sdma_credits
;
1000 uint idle_cnt
= sdma_idle_cnt
;
1001 size_t num_engines
= dd
->chip_sdma_engines
;
1003 if (!HFI1_CAP_IS_KSET(SDMA
)) {
1004 HFI1_CAP_CLEAR(SDMA_AHG
);
1008 /* can't exceed chip support */
1009 mod_num_sdma
<= dd
->chip_sdma_engines
&&
1010 /* count must be >= vls */
1011 mod_num_sdma
>= num_vls
)
1012 num_engines
= mod_num_sdma
;
1014 dd_dev_info(dd
, "SDMA mod_num_sdma: %u\n", mod_num_sdma
);
1015 dd_dev_info(dd
, "SDMA chip_sdma_engines: %u\n", dd
->chip_sdma_engines
);
1016 dd_dev_info(dd
, "SDMA chip_sdma_mem_size: %u\n",
1017 dd
->chip_sdma_mem_size
);
1020 dd
->chip_sdma_mem_size
/ (num_engines
* SDMA_BLOCK_SIZE
);
1022 /* set up freeze waitqueue */
1023 init_waitqueue_head(&dd
->sdma_unfreeze_wq
);
1024 atomic_set(&dd
->sdma_unfreeze_count
, 0);
1026 descq_cnt
= sdma_get_descq_cnt();
1027 dd_dev_info(dd
, "SDMA engines %zu descq_cnt %u\n",
1028 num_engines
, descq_cnt
);
1030 /* alloc memory for array of send engines */
1031 dd
->per_sdma
= kcalloc(num_engines
, sizeof(*dd
->per_sdma
), GFP_KERNEL
);
1035 idle_cnt
= ns_to_cclock(dd
, idle_cnt
);
1036 if (!sdma_desct_intr
)
1037 sdma_desct_intr
= SDMA_DESC_INTR
;
1039 /* Allocate memory for SendDMA descriptor FIFOs */
1040 for (this_idx
= 0; this_idx
< num_engines
; ++this_idx
) {
1041 sde
= &dd
->per_sdma
[this_idx
];
1044 sde
->this_idx
= this_idx
;
1045 sde
->descq_cnt
= descq_cnt
;
1046 sde
->desc_avail
= sdma_descq_freecnt(sde
);
1047 sde
->sdma_shift
= ilog2(descq_cnt
);
1048 sde
->sdma_mask
= (1 << sde
->sdma_shift
) - 1;
1050 /* Create a mask specifically for each interrupt source */
1051 sde
->int_mask
= (u64
)1 << (0 * TXE_NUM_SDMA_ENGINES
+
1053 sde
->progress_mask
= (u64
)1 << (1 * TXE_NUM_SDMA_ENGINES
+
1055 sde
->idle_mask
= (u64
)1 << (2 * TXE_NUM_SDMA_ENGINES
+
1057 /* Create a combined mask to cover all 3 interrupt sources */
1058 sde
->imask
= sde
->int_mask
| sde
->progress_mask
|
1061 spin_lock_init(&sde
->tail_lock
);
1062 seqlock_init(&sde
->head_lock
);
1063 spin_lock_init(&sde
->senddmactrl_lock
);
1064 spin_lock_init(&sde
->flushlist_lock
);
1065 /* insure there is always a zero bit */
1066 sde
->ahg_bits
= 0xfffffffe00000000ULL
;
1068 sdma_set_state(sde
, sdma_state_s00_hw_down
);
1070 /* set up reference counting */
1071 kref_init(&sde
->state
.kref
);
1072 init_completion(&sde
->state
.comp
);
1074 INIT_LIST_HEAD(&sde
->flushlist
);
1075 INIT_LIST_HEAD(&sde
->dmawait
);
1078 get_kctxt_csr_addr(dd
, this_idx
, SD(TAIL
));
1082 SDMA_DESC1_HEAD_TO_HOST_FLAG
;
1085 SDMA_DESC1_INT_REQ_FLAG
;
1087 tasklet_init(&sde
->sdma_hw_clean_up_task
, sdma_hw_clean_up_task
,
1088 (unsigned long)sde
);
1090 tasklet_init(&sde
->sdma_sw_clean_up_task
, sdma_sw_clean_up_task
,
1091 (unsigned long)sde
);
1092 INIT_WORK(&sde
->err_halt_worker
, sdma_err_halt_wait
);
1093 INIT_WORK(&sde
->flush_worker
, sdma_field_flush
);
1095 sde
->progress_check_head
= 0;
1097 setup_timer(&sde
->err_progress_check_timer
,
1098 sdma_err_progress_check
, (unsigned long)sde
);
1100 sde
->descq
= dma_zalloc_coherent(
1102 descq_cnt
* sizeof(u64
[2]),
1109 kcalloc(descq_cnt
, sizeof(struct sdma_txreq
*),
1114 sizeof(struct sdma_txreq
*) *
1120 dd
->sdma_heads_size
= L1_CACHE_BYTES
* num_engines
;
1121 /* Allocate memory for DMA of head registers to memory */
1122 dd
->sdma_heads_dma
= dma_zalloc_coherent(
1124 dd
->sdma_heads_size
,
1125 &dd
->sdma_heads_phys
,
1128 if (!dd
->sdma_heads_dma
) {
1129 dd_dev_err(dd
, "failed to allocate SendDMA head memory\n");
1133 /* Allocate memory for pad */
1134 dd
->sdma_pad_dma
= dma_zalloc_coherent(
1140 if (!dd
->sdma_pad_dma
) {
1141 dd_dev_err(dd
, "failed to allocate SendDMA pad memory\n");
1145 /* assign each engine to different cacheline and init registers */
1146 curr_head
= (void *)dd
->sdma_heads_dma
;
1147 for (this_idx
= 0; this_idx
< num_engines
; ++this_idx
) {
1148 unsigned long phys_offset
;
1150 sde
= &dd
->per_sdma
[this_idx
];
1152 sde
->head_dma
= curr_head
;
1153 curr_head
+= L1_CACHE_BYTES
;
1154 phys_offset
= (unsigned long)sde
->head_dma
-
1155 (unsigned long)dd
->sdma_heads_dma
;
1156 sde
->head_phys
= dd
->sdma_heads_phys
+ phys_offset
;
1157 init_sdma_regs(sde
, per_sdma_credits
, idle_cnt
);
1159 dd
->flags
|= HFI1_HAS_SEND_DMA
;
1160 dd
->flags
|= idle_cnt
? HFI1_HAS_SDMA_TIMEOUT
: 0;
1161 dd
->num_sdma
= num_engines
;
1162 if (sdma_map_init(dd
, port
, ppd
->vls_operational
, NULL
))
1164 dd_dev_info(dd
, "SDMA num_sdma: %u\n", dd
->num_sdma
);
1168 sdma_clean(dd
, num_engines
);
1173 * sdma_all_running() - called when the link goes up
1176 * This routine moves all engines to the running state.
1178 void sdma_all_running(struct hfi1_devdata
*dd
)
1180 struct sdma_engine
*sde
;
1183 /* move all engines to running */
1184 for (i
= 0; i
< dd
->num_sdma
; ++i
) {
1185 sde
= &dd
->per_sdma
[i
];
1186 sdma_process_event(sde
, sdma_event_e30_go_running
);
1191 * sdma_all_idle() - called when the link goes down
1194 * This routine moves all engines to the idle state.
1196 void sdma_all_idle(struct hfi1_devdata
*dd
)
1198 struct sdma_engine
*sde
;
1201 /* idle all engines */
1202 for (i
= 0; i
< dd
->num_sdma
; ++i
) {
1203 sde
= &dd
->per_sdma
[i
];
1204 sdma_process_event(sde
, sdma_event_e70_go_idle
);
1209 * sdma_start() - called to kick off state processing for all engines
1212 * This routine is for kicking off the state processing for all required
1213 * sdma engines. Interrupts need to be working at this point.
1216 void sdma_start(struct hfi1_devdata
*dd
)
1219 struct sdma_engine
*sde
;
1221 /* kick off the engines state processing */
1222 for (i
= 0; i
< dd
->num_sdma
; ++i
) {
1223 sde
= &dd
->per_sdma
[i
];
1224 sdma_process_event(sde
, sdma_event_e10_go_hw_start
);
1229 * sdma_exit() - used when module is removed
1232 void sdma_exit(struct hfi1_devdata
*dd
)
1235 struct sdma_engine
*sde
;
1237 for (this_idx
= 0; dd
->per_sdma
&& this_idx
< dd
->num_sdma
;
1239 sde
= &dd
->per_sdma
[this_idx
];
1240 if (!list_empty(&sde
->dmawait
))
1241 dd_dev_err(dd
, "sde %u: dmawait list not empty!\n",
1243 sdma_process_event(sde
, sdma_event_e00_go_hw_down
);
1245 del_timer_sync(&sde
->err_progress_check_timer
);
1248 * This waits for the state machine to exit so it is not
1249 * necessary to kill the sdma_sw_clean_up_task to make sure
1250 * it is not running.
1252 sdma_finalput(&sde
->state
);
1254 sdma_clean(dd
, dd
->num_sdma
);
1258 * unmap the indicated descriptor
1260 static inline void sdma_unmap_desc(
1261 struct hfi1_devdata
*dd
,
1262 struct sdma_desc
*descp
)
1264 switch (sdma_mapping_type(descp
)) {
1265 case SDMA_MAP_SINGLE
:
1268 sdma_mapping_addr(descp
),
1269 sdma_mapping_len(descp
),
1275 sdma_mapping_addr(descp
),
1276 sdma_mapping_len(descp
),
1283 * return the mode as indicated by the first
1284 * descriptor in the tx.
1286 static inline u8
ahg_mode(struct sdma_txreq
*tx
)
1288 return (tx
->descp
[0].qw
[1] & SDMA_DESC1_HEADER_MODE_SMASK
)
1289 >> SDMA_DESC1_HEADER_MODE_SHIFT
;
1293 * sdma_txclean() - clean tx of mappings, descp *kmalloc's
1294 * @dd: hfi1_devdata for unmapping
1295 * @tx: tx request to clean
1297 * This is used in the progress routine to clean the tx or
1298 * by the ULP to toss an in-process tx build.
1300 * The code can be called multiple times without issue.
1304 struct hfi1_devdata
*dd
,
1305 struct sdma_txreq
*tx
)
1310 u8 skip
= 0, mode
= ahg_mode(tx
);
1313 sdma_unmap_desc(dd
, &tx
->descp
[0]);
1314 /* determine number of AHG descriptors to skip */
1315 if (mode
> SDMA_AHG_APPLY_UPDATE1
)
1317 for (i
= 1 + skip
; i
< tx
->num_desc
; i
++)
1318 sdma_unmap_desc(dd
, &tx
->descp
[i
]);
1321 kfree(tx
->coalesce_buf
);
1322 tx
->coalesce_buf
= NULL
;
1323 /* kmalloc'ed descp */
1324 if (unlikely(tx
->desc_limit
> ARRAY_SIZE(tx
->descs
))) {
1325 tx
->desc_limit
= ARRAY_SIZE(tx
->descs
);
1330 static inline u16
sdma_gethead(struct sdma_engine
*sde
)
1332 struct hfi1_devdata
*dd
= sde
->dd
;
1336 #ifdef CONFIG_SDMA_VERBOSITY
1337 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1338 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1342 use_dmahead
= HFI1_CAP_IS_KSET(USE_SDMA_HEAD
) && __sdma_running(sde
) &&
1343 (dd
->flags
& HFI1_HAS_SDMA_TIMEOUT
);
1344 hwhead
= use_dmahead
?
1345 (u16
)le64_to_cpu(*sde
->head_dma
) :
1346 (u16
)read_sde_csr(sde
, SD(HEAD
));
1348 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK
))) {
1354 swhead
= sde
->descq_head
& sde
->sdma_mask
;
1355 /* this code is really bad for cache line trading */
1356 swtail
= ACCESS_ONCE(sde
->descq_tail
) & sde
->sdma_mask
;
1357 cnt
= sde
->descq_cnt
;
1359 if (swhead
< swtail
)
1361 sane
= (hwhead
>= swhead
) & (hwhead
<= swtail
);
1362 else if (swhead
> swtail
)
1363 /* wrapped around */
1364 sane
= ((hwhead
>= swhead
) && (hwhead
< cnt
)) ||
1368 sane
= (hwhead
== swhead
);
1370 if (unlikely(!sane
)) {
1371 dd_dev_err(dd
, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1373 use_dmahead
? "dma" : "kreg",
1374 hwhead
, swhead
, swtail
, cnt
);
1376 /* try one more time, using csr */
1380 /* proceed as if no progress */
1388 * This is called when there are send DMA descriptors that might be
1391 * This is called with head_lock held.
1393 static void sdma_desc_avail(struct sdma_engine
*sde
, unsigned avail
)
1395 struct iowait
*wait
, *nw
;
1396 struct iowait
*waits
[SDMA_WAIT_BATCH_SIZE
];
1397 unsigned i
, n
= 0, seq
;
1398 struct sdma_txreq
*stx
;
1399 struct hfi1_ibdev
*dev
= &sde
->dd
->verbs_dev
;
1401 #ifdef CONFIG_SDMA_VERBOSITY
1402 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n", sde
->this_idx
,
1403 slashstrip(__FILE__
), __LINE__
, __func__
);
1404 dd_dev_err(sde
->dd
, "avail: %u\n", avail
);
1408 seq
= read_seqbegin(&dev
->iowait_lock
);
1409 if (!list_empty(&sde
->dmawait
)) {
1410 /* at least one item */
1411 write_seqlock(&dev
->iowait_lock
);
1412 /* Harvest waiters wanting DMA descriptors */
1413 list_for_each_entry_safe(
1422 if (n
== ARRAY_SIZE(waits
))
1424 if (!list_empty(&wait
->tx_head
)) {
1425 stx
= list_first_entry(
1429 num_desc
= stx
->num_desc
;
1431 if (num_desc
> avail
)
1434 list_del_init(&wait
->list
);
1437 write_sequnlock(&dev
->iowait_lock
);
1440 } while (read_seqretry(&dev
->iowait_lock
, seq
));
1442 for (i
= 0; i
< n
; i
++)
1443 waits
[i
]->wakeup(waits
[i
], SDMA_AVAIL_REASON
);
1446 /* head_lock must be held */
1447 static void sdma_make_progress(struct sdma_engine
*sde
, u64 status
)
1449 struct sdma_txreq
*txp
= NULL
;
1452 int idle_check_done
= 0;
1454 hwhead
= sdma_gethead(sde
);
1456 /* The reason for some of the complexity of this code is that
1457 * not all descriptors have corresponding txps. So, we have to
1458 * be able to skip over descs until we wander into the range of
1459 * the next txp on the list.
1463 txp
= get_txhead(sde
);
1464 swhead
= sde
->descq_head
& sde
->sdma_mask
;
1465 trace_hfi1_sdma_progress(sde
, hwhead
, swhead
, txp
);
1466 while (swhead
!= hwhead
) {
1467 /* advance head, wrap if needed */
1468 swhead
= ++sde
->descq_head
& sde
->sdma_mask
;
1470 /* if now past this txp's descs, do the callback */
1471 if (txp
&& txp
->next_descq_idx
== swhead
) {
1472 /* remove from list */
1473 sde
->tx_ring
[sde
->tx_head
++ & sde
->sdma_mask
] = NULL
;
1474 complete_tx(sde
, txp
, SDMA_TXREQ_S_OK
);
1475 /* see if there is another txp */
1476 txp
= get_txhead(sde
);
1478 trace_hfi1_sdma_progress(sde
, hwhead
, swhead
, txp
);
1483 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1484 * to updates to the the dma_head location in host memory. The head
1485 * value read might not be fully up to date. If there are pending
1486 * descriptors and the SDMA idle interrupt fired then read from the
1487 * CSR SDMA head instead to get the latest value from the hardware.
1488 * The hardware SDMA head should be read at most once in this invocation
1489 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1491 if ((status
& sde
->idle_mask
) && !idle_check_done
) {
1494 swtail
= ACCESS_ONCE(sde
->descq_tail
) & sde
->sdma_mask
;
1495 if (swtail
!= hwhead
) {
1496 hwhead
= (u16
)read_sde_csr(sde
, SD(HEAD
));
1497 idle_check_done
= 1;
1502 sde
->last_status
= status
;
1504 sdma_desc_avail(sde
, sdma_descq_freecnt(sde
));
1508 * sdma_engine_interrupt() - interrupt handler for engine
1510 * @status: sdma interrupt reason
1512 * Status is a mask of the 3 possible interrupts for this engine. It will
1513 * contain bits _only_ for this SDMA engine. It will contain at least one
1514 * bit, it may contain more.
1516 void sdma_engine_interrupt(struct sdma_engine
*sde
, u64 status
)
1518 trace_hfi1_sdma_engine_interrupt(sde
, status
);
1519 write_seqlock(&sde
->head_lock
);
1520 sdma_set_desc_cnt(sde
, sdma_desct_intr
);
1521 if (status
& sde
->idle_mask
)
1522 sde
->idle_int_cnt
++;
1523 else if (status
& sde
->progress_mask
)
1524 sde
->progress_int_cnt
++;
1525 else if (status
& sde
->int_mask
)
1526 sde
->sdma_int_cnt
++;
1527 sdma_make_progress(sde
, status
);
1528 write_sequnlock(&sde
->head_lock
);
1532 * sdma_engine_error() - error handler for engine
1534 * @status: sdma interrupt reason
1536 void sdma_engine_error(struct sdma_engine
*sde
, u64 status
)
1538 unsigned long flags
;
1540 #ifdef CONFIG_SDMA_VERBOSITY
1541 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1543 (unsigned long long)status
,
1544 sdma_state_names
[sde
->state
.current_state
]);
1546 spin_lock_irqsave(&sde
->tail_lock
, flags
);
1547 write_seqlock(&sde
->head_lock
);
1548 if (status
& ALL_SDMA_ENG_HALT_ERRS
)
1549 __sdma_process_event(sde
, sdma_event_e60_hw_halted
);
1550 if (status
& ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK
)) {
1552 "SDMA (%u) engine error: 0x%llx state %s\n",
1554 (unsigned long long)status
,
1555 sdma_state_names
[sde
->state
.current_state
]);
1556 dump_sdma_state(sde
);
1558 write_sequnlock(&sde
->head_lock
);
1559 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
1562 static void sdma_sendctrl(struct sdma_engine
*sde
, unsigned op
)
1564 u64 set_senddmactrl
= 0;
1565 u64 clr_senddmactrl
= 0;
1566 unsigned long flags
;
1568 #ifdef CONFIG_SDMA_VERBOSITY
1569 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1571 (op
& SDMA_SENDCTRL_OP_ENABLE
) ? 1 : 0,
1572 (op
& SDMA_SENDCTRL_OP_INTENABLE
) ? 1 : 0,
1573 (op
& SDMA_SENDCTRL_OP_HALT
) ? 1 : 0,
1574 (op
& SDMA_SENDCTRL_OP_CLEANUP
) ? 1 : 0);
1577 if (op
& SDMA_SENDCTRL_OP_ENABLE
)
1578 set_senddmactrl
|= SD(CTRL_SDMA_ENABLE_SMASK
);
1580 clr_senddmactrl
|= SD(CTRL_SDMA_ENABLE_SMASK
);
1582 if (op
& SDMA_SENDCTRL_OP_INTENABLE
)
1583 set_senddmactrl
|= SD(CTRL_SDMA_INT_ENABLE_SMASK
);
1585 clr_senddmactrl
|= SD(CTRL_SDMA_INT_ENABLE_SMASK
);
1587 if (op
& SDMA_SENDCTRL_OP_HALT
)
1588 set_senddmactrl
|= SD(CTRL_SDMA_HALT_SMASK
);
1590 clr_senddmactrl
|= SD(CTRL_SDMA_HALT_SMASK
);
1592 spin_lock_irqsave(&sde
->senddmactrl_lock
, flags
);
1594 sde
->p_senddmactrl
|= set_senddmactrl
;
1595 sde
->p_senddmactrl
&= ~clr_senddmactrl
;
1597 if (op
& SDMA_SENDCTRL_OP_CLEANUP
)
1598 write_sde_csr(sde
, SD(CTRL
),
1599 sde
->p_senddmactrl
|
1600 SD(CTRL_SDMA_CLEANUP_SMASK
));
1602 write_sde_csr(sde
, SD(CTRL
), sde
->p_senddmactrl
);
1604 spin_unlock_irqrestore(&sde
->senddmactrl_lock
, flags
);
1606 #ifdef CONFIG_SDMA_VERBOSITY
1607 sdma_dumpstate(sde
);
1611 static void sdma_setlengen(struct sdma_engine
*sde
)
1613 #ifdef CONFIG_SDMA_VERBOSITY
1614 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1615 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1619 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1620 * count to enable generation checking and load the internal
1621 * generation counter.
1623 write_sde_csr(sde
, SD(LEN_GEN
),
1624 (sde
->descq_cnt
/ 64) << SD(LEN_GEN_LENGTH_SHIFT
));
1625 write_sde_csr(sde
, SD(LEN_GEN
),
1626 ((sde
->descq_cnt
/ 64) << SD(LEN_GEN_LENGTH_SHIFT
)) |
1627 (4ULL << SD(LEN_GEN_GENERATION_SHIFT
)));
1630 static inline void sdma_update_tail(struct sdma_engine
*sde
, u16 tail
)
1632 /* Commit writes to memory and advance the tail on the chip */
1633 smp_wmb(); /* see get_txhead() */
1634 writeq(tail
, sde
->tail_csr
);
1638 * This is called when changing to state s10_hw_start_up_halt_wait as
1639 * a result of send buffer errors or send DMA descriptor errors.
1641 static void sdma_hw_start_up(struct sdma_engine
*sde
)
1645 #ifdef CONFIG_SDMA_VERBOSITY
1646 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1647 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1650 sdma_setlengen(sde
);
1651 sdma_update_tail(sde
, 0); /* Set SendDmaTail */
1654 reg
= SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK
) <<
1655 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT
);
1656 write_sde_csr(sde
, SD(ENG_ERR_CLEAR
), reg
);
1659 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
1660 (r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1662 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
1663 (r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1665 * set_sdma_integrity
1667 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
1669 static void set_sdma_integrity(struct sdma_engine
*sde
)
1671 struct hfi1_devdata
*dd
= sde
->dd
;
1674 if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY
)))
1677 reg
= hfi1_pkt_base_sdma_integrity(dd
);
1679 if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL
))
1680 CLEAR_STATIC_RATE_CONTROL_SMASK(reg
);
1682 SET_STATIC_RATE_CONTROL_SMASK(reg
);
1684 write_sde_csr(sde
, SD(CHECK_ENABLE
), reg
);
1687 static void init_sdma_regs(
1688 struct sdma_engine
*sde
,
1693 #ifdef CONFIG_SDMA_VERBOSITY
1694 struct hfi1_devdata
*dd
= sde
->dd
;
1696 dd_dev_err(dd
, "CONFIG SDMA(%u) %s:%d %s()\n",
1697 sde
->this_idx
, slashstrip(__FILE__
), __LINE__
, __func__
);
1700 write_sde_csr(sde
, SD(BASE_ADDR
), sde
->descq_phys
);
1701 sdma_setlengen(sde
);
1702 sdma_update_tail(sde
, 0); /* Set SendDmaTail */
1703 write_sde_csr(sde
, SD(RELOAD_CNT
), idle_cnt
);
1704 write_sde_csr(sde
, SD(DESC_CNT
), 0);
1705 write_sde_csr(sde
, SD(HEAD_ADDR
), sde
->head_phys
);
1706 write_sde_csr(sde
, SD(MEMORY
),
1707 ((u64
)credits
<< SD(MEMORY_SDMA_MEMORY_CNT_SHIFT
)) |
1708 ((u64
)(credits
* sde
->this_idx
) <<
1709 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT
)));
1710 write_sde_csr(sde
, SD(ENG_ERR_MASK
), ~0ull);
1711 set_sdma_integrity(sde
);
1712 opmask
= OPCODE_CHECK_MASK_DISABLED
;
1713 opval
= OPCODE_CHECK_VAL_DISABLED
;
1714 write_sde_csr(sde
, SD(CHECK_OPCODE
),
1715 (opmask
<< SEND_CTXT_CHECK_OPCODE_MASK_SHIFT
) |
1716 (opval
<< SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT
));
1719 #ifdef CONFIG_SDMA_VERBOSITY
1721 #define sdma_dumpstate_helper0(reg) do { \
1722 csr = read_csr(sde->dd, reg); \
1723 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
1726 #define sdma_dumpstate_helper(reg) do { \
1727 csr = read_sde_csr(sde, reg); \
1728 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
1729 #reg, sde->this_idx, csr); \
1732 #define sdma_dumpstate_helper2(reg) do { \
1733 csr = read_csr(sde->dd, reg + (8 * i)); \
1734 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
1738 void sdma_dumpstate(struct sdma_engine
*sde
)
1743 sdma_dumpstate_helper(SD(CTRL
));
1744 sdma_dumpstate_helper(SD(STATUS
));
1745 sdma_dumpstate_helper0(SD(ERR_STATUS
));
1746 sdma_dumpstate_helper0(SD(ERR_MASK
));
1747 sdma_dumpstate_helper(SD(ENG_ERR_STATUS
));
1748 sdma_dumpstate_helper(SD(ENG_ERR_MASK
));
1750 for (i
= 0; i
< CCE_NUM_INT_CSRS
; ++i
) {
1751 sdma_dumpstate_helper2(CCE_INT_STATUS
);
1752 sdma_dumpstate_helper2(CCE_INT_MASK
);
1753 sdma_dumpstate_helper2(CCE_INT_BLOCKED
);
1756 sdma_dumpstate_helper(SD(TAIL
));
1757 sdma_dumpstate_helper(SD(HEAD
));
1758 sdma_dumpstate_helper(SD(PRIORITY_THLD
));
1759 sdma_dumpstate_helper(SD(IDLE_CNT
));
1760 sdma_dumpstate_helper(SD(RELOAD_CNT
));
1761 sdma_dumpstate_helper(SD(DESC_CNT
));
1762 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT
));
1763 sdma_dumpstate_helper(SD(MEMORY
));
1764 sdma_dumpstate_helper0(SD(ENGINES
));
1765 sdma_dumpstate_helper0(SD(MEM_SIZE
));
1766 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
1767 sdma_dumpstate_helper(SD(BASE_ADDR
));
1768 sdma_dumpstate_helper(SD(LEN_GEN
));
1769 sdma_dumpstate_helper(SD(HEAD_ADDR
));
1770 sdma_dumpstate_helper(SD(CHECK_ENABLE
));
1771 sdma_dumpstate_helper(SD(CHECK_VL
));
1772 sdma_dumpstate_helper(SD(CHECK_JOB_KEY
));
1773 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY
));
1774 sdma_dumpstate_helper(SD(CHECK_SLID
));
1775 sdma_dumpstate_helper(SD(CHECK_OPCODE
));
1779 static void dump_sdma_state(struct sdma_engine
*sde
)
1781 struct hw_sdma_desc
*descq
;
1782 struct hw_sdma_desc
*descqp
;
1787 u16 head
, tail
, cnt
;
1789 head
= sde
->descq_head
& sde
->sdma_mask
;
1790 tail
= sde
->descq_tail
& sde
->sdma_mask
;
1791 cnt
= sdma_descq_freecnt(sde
);
1795 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
1796 sde
->this_idx
, head
, tail
, cnt
,
1797 !list_empty(&sde
->flushlist
));
1799 /* print info for each entry in the descriptor queue */
1800 while (head
!= tail
) {
1801 char flags
[6] = { 'x', 'x', 'x', 'x', 0 };
1803 descqp
= &sde
->descq
[head
];
1804 desc
[0] = le64_to_cpu(descqp
->qw
[0]);
1805 desc
[1] = le64_to_cpu(descqp
->qw
[1]);
1806 flags
[0] = (desc
[1] & SDMA_DESC1_INT_REQ_FLAG
) ? 'I' : '-';
1807 flags
[1] = (desc
[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG
) ?
1809 flags
[2] = (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
) ? 'F' : '-';
1810 flags
[3] = (desc
[0] & SDMA_DESC0_LAST_DESC_FLAG
) ? 'L' : '-';
1811 addr
= (desc
[0] >> SDMA_DESC0_PHY_ADDR_SHIFT
)
1812 & SDMA_DESC0_PHY_ADDR_MASK
;
1813 gen
= (desc
[1] >> SDMA_DESC1_GENERATION_SHIFT
)
1814 & SDMA_DESC1_GENERATION_MASK
;
1815 len
= (desc
[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT
)
1816 & SDMA_DESC0_BYTE_COUNT_MASK
;
1818 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1819 head
, flags
, addr
, gen
, len
);
1821 "\tdesc0:0x%016llx desc1 0x%016llx\n",
1823 if (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
)
1825 "\taidx: %u amode: %u alen: %u\n",
1827 SDMA_DESC1_HEADER_INDEX_SMASK
) >>
1828 SDMA_DESC1_HEADER_INDEX_SHIFT
),
1830 SDMA_DESC1_HEADER_MODE_SMASK
) >>
1831 SDMA_DESC1_HEADER_MODE_SHIFT
),
1833 SDMA_DESC1_HEADER_DWS_SMASK
) >>
1834 SDMA_DESC1_HEADER_DWS_SHIFT
));
1836 head
&= sde
->sdma_mask
;
1841 "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
1843 * sdma_seqfile_dump_sde() - debugfs dump of sde
1845 * @sde: send dma engine to dump
1847 * This routine dumps the sde to the indicated seq file.
1849 void sdma_seqfile_dump_sde(struct seq_file
*s
, struct sdma_engine
*sde
)
1852 struct hw_sdma_desc
*descqp
;
1858 head
= sde
->descq_head
& sde
->sdma_mask
;
1859 tail
= ACCESS_ONCE(sde
->descq_tail
) & sde
->sdma_mask
;
1860 seq_printf(s
, SDE_FMT
, sde
->this_idx
,
1862 sdma_state_name(sde
->state
.current_state
),
1863 (unsigned long long)read_sde_csr(sde
, SD(CTRL
)),
1864 (unsigned long long)read_sde_csr(sde
, SD(STATUS
)),
1865 (unsigned long long)read_sde_csr(sde
, SD(ENG_ERR_STATUS
)),
1866 (unsigned long long)read_sde_csr(sde
, SD(TAIL
)), tail
,
1867 (unsigned long long)read_sde_csr(sde
, SD(HEAD
)), head
,
1868 (unsigned long long)le64_to_cpu(*sde
->head_dma
),
1869 (unsigned long long)read_sde_csr(sde
, SD(MEMORY
)),
1870 (unsigned long long)read_sde_csr(sde
, SD(LEN_GEN
)),
1871 (unsigned long long)read_sde_csr(sde
, SD(RELOAD_CNT
)),
1872 (unsigned long long)sde
->last_status
,
1873 (unsigned long long)sde
->ahg_bits
,
1878 !list_empty(&sde
->flushlist
),
1879 sde
->descq_full_count
,
1880 (unsigned long long)read_sde_csr(sde
, SEND_DMA_CHECK_SLID
));
1882 /* print info for each entry in the descriptor queue */
1883 while (head
!= tail
) {
1884 char flags
[6] = { 'x', 'x', 'x', 'x', 0 };
1886 descqp
= &sde
->descq
[head
];
1887 desc
[0] = le64_to_cpu(descqp
->qw
[0]);
1888 desc
[1] = le64_to_cpu(descqp
->qw
[1]);
1889 flags
[0] = (desc
[1] & SDMA_DESC1_INT_REQ_FLAG
) ? 'I' : '-';
1890 flags
[1] = (desc
[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG
) ?
1892 flags
[2] = (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
) ? 'F' : '-';
1893 flags
[3] = (desc
[0] & SDMA_DESC0_LAST_DESC_FLAG
) ? 'L' : '-';
1894 addr
= (desc
[0] >> SDMA_DESC0_PHY_ADDR_SHIFT
)
1895 & SDMA_DESC0_PHY_ADDR_MASK
;
1896 gen
= (desc
[1] >> SDMA_DESC1_GENERATION_SHIFT
)
1897 & SDMA_DESC1_GENERATION_MASK
;
1898 len
= (desc
[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT
)
1899 & SDMA_DESC0_BYTE_COUNT_MASK
;
1901 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1902 head
, flags
, addr
, gen
, len
);
1903 if (desc
[0] & SDMA_DESC0_FIRST_DESC_FLAG
)
1904 seq_printf(s
, "\t\tahgidx: %u ahgmode: %u\n",
1906 SDMA_DESC1_HEADER_INDEX_SMASK
) >>
1907 SDMA_DESC1_HEADER_INDEX_SHIFT
),
1909 SDMA_DESC1_HEADER_MODE_SMASK
) >>
1910 SDMA_DESC1_HEADER_MODE_SHIFT
));
1911 head
= (head
+ 1) & sde
->sdma_mask
;
1916 * add the generation number into
1917 * the qw1 and return
1919 static inline u64
add_gen(struct sdma_engine
*sde
, u64 qw1
)
1921 u8 generation
= (sde
->descq_tail
>> sde
->sdma_shift
) & 3;
1923 qw1
&= ~SDMA_DESC1_GENERATION_SMASK
;
1924 qw1
|= ((u64
)generation
& SDMA_DESC1_GENERATION_MASK
)
1925 << SDMA_DESC1_GENERATION_SHIFT
;
1930 * This routine submits the indicated tx
1932 * Space has already been guaranteed and
1933 * tail side of ring is locked.
1935 * The hardware tail update is done
1936 * in the caller and that is facilitated
1937 * by returning the new tail.
1939 * There is special case logic for ahg
1940 * to not add the generation number for
1941 * up to 2 descriptors that follow the
1945 static inline u16
submit_tx(struct sdma_engine
*sde
, struct sdma_txreq
*tx
)
1949 struct sdma_desc
*descp
= tx
->descp
;
1950 u8 skip
= 0, mode
= ahg_mode(tx
);
1952 tail
= sde
->descq_tail
& sde
->sdma_mask
;
1953 sde
->descq
[tail
].qw
[0] = cpu_to_le64(descp
->qw
[0]);
1954 sde
->descq
[tail
].qw
[1] = cpu_to_le64(add_gen(sde
, descp
->qw
[1]));
1955 trace_hfi1_sdma_descriptor(sde
, descp
->qw
[0], descp
->qw
[1],
1956 tail
, &sde
->descq
[tail
]);
1957 tail
= ++sde
->descq_tail
& sde
->sdma_mask
;
1959 if (mode
> SDMA_AHG_APPLY_UPDATE1
)
1961 for (i
= 1; i
< tx
->num_desc
; i
++, descp
++) {
1964 sde
->descq
[tail
].qw
[0] = cpu_to_le64(descp
->qw
[0]);
1966 /* edits don't have generation */
1970 /* replace generation with real one for non-edits */
1971 qw1
= add_gen(sde
, descp
->qw
[1]);
1973 sde
->descq
[tail
].qw
[1] = cpu_to_le64(qw1
);
1974 trace_hfi1_sdma_descriptor(sde
, descp
->qw
[0], qw1
,
1975 tail
, &sde
->descq
[tail
]);
1976 tail
= ++sde
->descq_tail
& sde
->sdma_mask
;
1978 tx
->next_descq_idx
= tail
;
1979 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
1980 tx
->sn
= sde
->tail_sn
++;
1981 trace_hfi1_sdma_in_sn(sde
, tx
->sn
);
1982 WARN_ON_ONCE(sde
->tx_ring
[sde
->tx_tail
& sde
->sdma_mask
]);
1984 sde
->tx_ring
[sde
->tx_tail
++ & sde
->sdma_mask
] = tx
;
1985 sde
->desc_avail
-= tx
->num_desc
;
1990 * Check for progress
1992 static int sdma_check_progress(
1993 struct sdma_engine
*sde
,
1994 struct iowait
*wait
,
1995 struct sdma_txreq
*tx
)
1999 sde
->desc_avail
= sdma_descq_freecnt(sde
);
2000 if (tx
->num_desc
<= sde
->desc_avail
)
2002 /* pulse the head_lock */
2003 if (wait
&& wait
->sleep
) {
2006 seq
= raw_seqcount_begin(
2007 (const seqcount_t
*)&sde
->head_lock
.seqcount
);
2008 ret
= wait
->sleep(sde
, wait
, tx
, seq
);
2010 sde
->desc_avail
= sdma_descq_freecnt(sde
);
2018 * sdma_send_txreq() - submit a tx req to ring
2019 * @sde: sdma engine to use
2020 * @wait: wait structure to use when full (may be NULL)
2021 * @tx: sdma_txreq to submit
2023 * The call submits the tx into the ring. If a iowait structure is non-NULL
2024 * the packet will be queued to the list in wait.
2027 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2028 * ring (wait == NULL)
2029 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2031 int sdma_send_txreq(struct sdma_engine
*sde
,
2032 struct iowait
*wait
,
2033 struct sdma_txreq
*tx
)
2037 unsigned long flags
;
2039 /* user should have supplied entire packet */
2040 if (unlikely(tx
->tlen
))
2043 spin_lock_irqsave(&sde
->tail_lock
, flags
);
2045 if (unlikely(!__sdma_running(sde
)))
2047 if (unlikely(tx
->num_desc
> sde
->desc_avail
))
2049 tail
= submit_tx(sde
, tx
);
2051 iowait_sdma_inc(wait
);
2052 sdma_update_tail(sde
, tail
);
2054 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
2058 iowait_sdma_inc(wait
);
2059 tx
->next_descq_idx
= 0;
2060 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2061 tx
->sn
= sde
->tail_sn
++;
2062 trace_hfi1_sdma_in_sn(sde
, tx
->sn
);
2064 spin_lock(&sde
->flushlist_lock
);
2065 list_add_tail(&tx
->list
, &sde
->flushlist
);
2066 spin_unlock(&sde
->flushlist_lock
);
2069 wait
->count
+= tx
->num_desc
;
2071 schedule_work(&sde
->flush_worker
);
2075 ret
= sdma_check_progress(sde
, wait
, tx
);
2076 if (ret
== -EAGAIN
) {
2080 sde
->descq_full_count
++;
2085 * sdma_send_txlist() - submit a list of tx req to ring
2086 * @sde: sdma engine to use
2087 * @wait: wait structure to use when full (may be NULL)
2088 * @tx_list: list of sdma_txreqs to submit
2090 * The call submits the list into the ring.
2092 * If the iowait structure is non-NULL and not equal to the iowait list
2093 * the unprocessed part of the list will be appended to the list in wait.
2095 * In all cases, the tx_list will be updated so the head of the tx_list is
2096 * the list of descriptors that have yet to be transmitted.
2098 * The intent of this call is to provide a more efficient
2099 * way of submitting multiple packets to SDMA while holding the tail
2103 * > 0 - Success (value is number of sdma_txreq's submitted),
2104 * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
2105 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2107 int sdma_send_txlist(struct sdma_engine
*sde
, struct iowait
*wait
,
2108 struct list_head
*tx_list
)
2110 struct sdma_txreq
*tx
, *tx_next
;
2112 unsigned long flags
;
2113 u16 tail
= INVALID_TAIL
;
2116 spin_lock_irqsave(&sde
->tail_lock
, flags
);
2118 list_for_each_entry_safe(tx
, tx_next
, tx_list
, list
) {
2120 if (unlikely(!__sdma_running(sde
)))
2122 if (unlikely(tx
->num_desc
> sde
->desc_avail
))
2124 if (unlikely(tx
->tlen
)) {
2128 list_del_init(&tx
->list
);
2129 tail
= submit_tx(sde
, tx
);
2131 if (tail
!= INVALID_TAIL
&&
2132 (count
& SDMA_TAIL_UPDATE_THRESH
) == 0) {
2133 sdma_update_tail(sde
, tail
);
2134 tail
= INVALID_TAIL
;
2139 iowait_sdma_add(wait
, count
);
2140 if (tail
!= INVALID_TAIL
)
2141 sdma_update_tail(sde
, tail
);
2142 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
2143 return ret
== 0 ? count
: ret
;
2145 spin_lock(&sde
->flushlist_lock
);
2146 list_for_each_entry_safe(tx
, tx_next
, tx_list
, list
) {
2148 list_del_init(&tx
->list
);
2150 iowait_sdma_inc(wait
);
2151 tx
->next_descq_idx
= 0;
2152 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2153 tx
->sn
= sde
->tail_sn
++;
2154 trace_hfi1_sdma_in_sn(sde
, tx
->sn
);
2156 list_add_tail(&tx
->list
, &sde
->flushlist
);
2159 wait
->count
+= tx
->num_desc
;
2162 spin_unlock(&sde
->flushlist_lock
);
2163 schedule_work(&sde
->flush_worker
);
2167 ret
= sdma_check_progress(sde
, wait
, tx
);
2168 if (ret
== -EAGAIN
) {
2172 sde
->descq_full_count
++;
2176 static void sdma_process_event(struct sdma_engine
*sde
, enum sdma_events event
)
2178 unsigned long flags
;
2180 spin_lock_irqsave(&sde
->tail_lock
, flags
);
2181 write_seqlock(&sde
->head_lock
);
2183 __sdma_process_event(sde
, event
);
2185 if (sde
->state
.current_state
== sdma_state_s99_running
)
2186 sdma_desc_avail(sde
, sdma_descq_freecnt(sde
));
2188 write_sequnlock(&sde
->head_lock
);
2189 spin_unlock_irqrestore(&sde
->tail_lock
, flags
);
2192 static void __sdma_process_event(struct sdma_engine
*sde
,
2193 enum sdma_events event
)
2195 struct sdma_state
*ss
= &sde
->state
;
2196 int need_progress
= 0;
2198 /* CONFIG SDMA temporary */
2199 #ifdef CONFIG_SDMA_VERBOSITY
2200 dd_dev_err(sde
->dd
, "CONFIG SDMA(%u) [%s] %s\n", sde
->this_idx
,
2201 sdma_state_names
[ss
->current_state
],
2202 sdma_event_names
[event
]);
2205 switch (ss
->current_state
) {
2206 case sdma_state_s00_hw_down
:
2208 case sdma_event_e00_go_hw_down
:
2210 case sdma_event_e30_go_running
:
2212 * If down, but running requested (usually result
2213 * of link up, then we need to start up.
2214 * This can happen when hw down is requested while
2215 * bringing the link up with traffic active on
2218 ss
->go_s99_running
= 1;
2219 /* fall through and start dma engine */
2220 case sdma_event_e10_go_hw_start
:
2221 /* This reference means the state machine is started */
2222 sdma_get(&sde
->state
);
2224 sdma_state_s10_hw_start_up_halt_wait
);
2226 case sdma_event_e15_hw_halt_done
:
2228 case sdma_event_e25_hw_clean_up_done
:
2230 case sdma_event_e40_sw_cleaned
:
2231 sdma_sw_tear_down(sde
);
2233 case sdma_event_e50_hw_cleaned
:
2235 case sdma_event_e60_hw_halted
:
2237 case sdma_event_e70_go_idle
:
2239 case sdma_event_e80_hw_freeze
:
2241 case sdma_event_e81_hw_frozen
:
2243 case sdma_event_e82_hw_unfreeze
:
2245 case sdma_event_e85_link_down
:
2247 case sdma_event_e90_sw_halted
:
2252 case sdma_state_s10_hw_start_up_halt_wait
:
2254 case sdma_event_e00_go_hw_down
:
2255 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2256 sdma_sw_tear_down(sde
);
2258 case sdma_event_e10_go_hw_start
:
2260 case sdma_event_e15_hw_halt_done
:
2262 sdma_state_s15_hw_start_up_clean_wait
);
2263 sdma_start_hw_clean_up(sde
);
2265 case sdma_event_e25_hw_clean_up_done
:
2267 case sdma_event_e30_go_running
:
2268 ss
->go_s99_running
= 1;
2270 case sdma_event_e40_sw_cleaned
:
2272 case sdma_event_e50_hw_cleaned
:
2274 case sdma_event_e60_hw_halted
:
2275 schedule_work(&sde
->err_halt_worker
);
2277 case sdma_event_e70_go_idle
:
2278 ss
->go_s99_running
= 0;
2280 case sdma_event_e80_hw_freeze
:
2282 case sdma_event_e81_hw_frozen
:
2284 case sdma_event_e82_hw_unfreeze
:
2286 case sdma_event_e85_link_down
:
2288 case sdma_event_e90_sw_halted
:
2293 case sdma_state_s15_hw_start_up_clean_wait
:
2295 case sdma_event_e00_go_hw_down
:
2296 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2297 sdma_sw_tear_down(sde
);
2299 case sdma_event_e10_go_hw_start
:
2301 case sdma_event_e15_hw_halt_done
:
2303 case sdma_event_e25_hw_clean_up_done
:
2304 sdma_hw_start_up(sde
);
2305 sdma_set_state(sde
, ss
->go_s99_running
?
2306 sdma_state_s99_running
:
2307 sdma_state_s20_idle
);
2309 case sdma_event_e30_go_running
:
2310 ss
->go_s99_running
= 1;
2312 case sdma_event_e40_sw_cleaned
:
2314 case sdma_event_e50_hw_cleaned
:
2316 case sdma_event_e60_hw_halted
:
2318 case sdma_event_e70_go_idle
:
2319 ss
->go_s99_running
= 0;
2321 case sdma_event_e80_hw_freeze
:
2323 case sdma_event_e81_hw_frozen
:
2325 case sdma_event_e82_hw_unfreeze
:
2327 case sdma_event_e85_link_down
:
2329 case sdma_event_e90_sw_halted
:
2334 case sdma_state_s20_idle
:
2336 case sdma_event_e00_go_hw_down
:
2337 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2338 sdma_sw_tear_down(sde
);
2340 case sdma_event_e10_go_hw_start
:
2342 case sdma_event_e15_hw_halt_done
:
2344 case sdma_event_e25_hw_clean_up_done
:
2346 case sdma_event_e30_go_running
:
2347 sdma_set_state(sde
, sdma_state_s99_running
);
2348 ss
->go_s99_running
= 1;
2350 case sdma_event_e40_sw_cleaned
:
2352 case sdma_event_e50_hw_cleaned
:
2354 case sdma_event_e60_hw_halted
:
2355 sdma_set_state(sde
, sdma_state_s50_hw_halt_wait
);
2356 schedule_work(&sde
->err_halt_worker
);
2358 case sdma_event_e70_go_idle
:
2360 case sdma_event_e85_link_down
:
2362 case sdma_event_e80_hw_freeze
:
2363 sdma_set_state(sde
, sdma_state_s80_hw_freeze
);
2364 atomic_dec(&sde
->dd
->sdma_unfreeze_count
);
2365 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
2367 case sdma_event_e81_hw_frozen
:
2369 case sdma_event_e82_hw_unfreeze
:
2371 case sdma_event_e90_sw_halted
:
2376 case sdma_state_s30_sw_clean_up_wait
:
2378 case sdma_event_e00_go_hw_down
:
2379 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2381 case sdma_event_e10_go_hw_start
:
2383 case sdma_event_e15_hw_halt_done
:
2385 case sdma_event_e25_hw_clean_up_done
:
2387 case sdma_event_e30_go_running
:
2388 ss
->go_s99_running
= 1;
2390 case sdma_event_e40_sw_cleaned
:
2391 sdma_set_state(sde
, sdma_state_s40_hw_clean_up_wait
);
2392 sdma_start_hw_clean_up(sde
);
2394 case sdma_event_e50_hw_cleaned
:
2396 case sdma_event_e60_hw_halted
:
2398 case sdma_event_e70_go_idle
:
2399 ss
->go_s99_running
= 0;
2401 case sdma_event_e80_hw_freeze
:
2403 case sdma_event_e81_hw_frozen
:
2405 case sdma_event_e82_hw_unfreeze
:
2407 case sdma_event_e85_link_down
:
2408 ss
->go_s99_running
= 0;
2410 case sdma_event_e90_sw_halted
:
2415 case sdma_state_s40_hw_clean_up_wait
:
2417 case sdma_event_e00_go_hw_down
:
2418 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2419 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2421 case sdma_event_e10_go_hw_start
:
2423 case sdma_event_e15_hw_halt_done
:
2425 case sdma_event_e25_hw_clean_up_done
:
2426 sdma_hw_start_up(sde
);
2427 sdma_set_state(sde
, ss
->go_s99_running
?
2428 sdma_state_s99_running
:
2429 sdma_state_s20_idle
);
2431 case sdma_event_e30_go_running
:
2432 ss
->go_s99_running
= 1;
2434 case sdma_event_e40_sw_cleaned
:
2436 case sdma_event_e50_hw_cleaned
:
2438 case sdma_event_e60_hw_halted
:
2440 case sdma_event_e70_go_idle
:
2441 ss
->go_s99_running
= 0;
2443 case sdma_event_e80_hw_freeze
:
2445 case sdma_event_e81_hw_frozen
:
2447 case sdma_event_e82_hw_unfreeze
:
2449 case sdma_event_e85_link_down
:
2450 ss
->go_s99_running
= 0;
2452 case sdma_event_e90_sw_halted
:
2457 case sdma_state_s50_hw_halt_wait
:
2459 case sdma_event_e00_go_hw_down
:
2460 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2461 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2463 case sdma_event_e10_go_hw_start
:
2465 case sdma_event_e15_hw_halt_done
:
2466 sdma_set_state(sde
, sdma_state_s30_sw_clean_up_wait
);
2467 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2469 case sdma_event_e25_hw_clean_up_done
:
2471 case sdma_event_e30_go_running
:
2472 ss
->go_s99_running
= 1;
2474 case sdma_event_e40_sw_cleaned
:
2476 case sdma_event_e50_hw_cleaned
:
2478 case sdma_event_e60_hw_halted
:
2479 schedule_work(&sde
->err_halt_worker
);
2481 case sdma_event_e70_go_idle
:
2482 ss
->go_s99_running
= 0;
2484 case sdma_event_e80_hw_freeze
:
2486 case sdma_event_e81_hw_frozen
:
2488 case sdma_event_e82_hw_unfreeze
:
2490 case sdma_event_e85_link_down
:
2491 ss
->go_s99_running
= 0;
2493 case sdma_event_e90_sw_halted
:
2498 case sdma_state_s60_idle_halt_wait
:
2500 case sdma_event_e00_go_hw_down
:
2501 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2502 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2504 case sdma_event_e10_go_hw_start
:
2506 case sdma_event_e15_hw_halt_done
:
2507 sdma_set_state(sde
, sdma_state_s30_sw_clean_up_wait
);
2508 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2510 case sdma_event_e25_hw_clean_up_done
:
2512 case sdma_event_e30_go_running
:
2513 ss
->go_s99_running
= 1;
2515 case sdma_event_e40_sw_cleaned
:
2517 case sdma_event_e50_hw_cleaned
:
2519 case sdma_event_e60_hw_halted
:
2520 schedule_work(&sde
->err_halt_worker
);
2522 case sdma_event_e70_go_idle
:
2523 ss
->go_s99_running
= 0;
2525 case sdma_event_e80_hw_freeze
:
2527 case sdma_event_e81_hw_frozen
:
2529 case sdma_event_e82_hw_unfreeze
:
2531 case sdma_event_e85_link_down
:
2533 case sdma_event_e90_sw_halted
:
2538 case sdma_state_s80_hw_freeze
:
2540 case sdma_event_e00_go_hw_down
:
2541 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2542 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2544 case sdma_event_e10_go_hw_start
:
2546 case sdma_event_e15_hw_halt_done
:
2548 case sdma_event_e25_hw_clean_up_done
:
2550 case sdma_event_e30_go_running
:
2551 ss
->go_s99_running
= 1;
2553 case sdma_event_e40_sw_cleaned
:
2555 case sdma_event_e50_hw_cleaned
:
2557 case sdma_event_e60_hw_halted
:
2559 case sdma_event_e70_go_idle
:
2560 ss
->go_s99_running
= 0;
2562 case sdma_event_e80_hw_freeze
:
2564 case sdma_event_e81_hw_frozen
:
2565 sdma_set_state(sde
, sdma_state_s82_freeze_sw_clean
);
2566 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2568 case sdma_event_e82_hw_unfreeze
:
2570 case sdma_event_e85_link_down
:
2572 case sdma_event_e90_sw_halted
:
2577 case sdma_state_s82_freeze_sw_clean
:
2579 case sdma_event_e00_go_hw_down
:
2580 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2581 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2583 case sdma_event_e10_go_hw_start
:
2585 case sdma_event_e15_hw_halt_done
:
2587 case sdma_event_e25_hw_clean_up_done
:
2589 case sdma_event_e30_go_running
:
2590 ss
->go_s99_running
= 1;
2592 case sdma_event_e40_sw_cleaned
:
2593 /* notify caller this engine is done cleaning */
2594 atomic_dec(&sde
->dd
->sdma_unfreeze_count
);
2595 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
2597 case sdma_event_e50_hw_cleaned
:
2599 case sdma_event_e60_hw_halted
:
2601 case sdma_event_e70_go_idle
:
2602 ss
->go_s99_running
= 0;
2604 case sdma_event_e80_hw_freeze
:
2606 case sdma_event_e81_hw_frozen
:
2608 case sdma_event_e82_hw_unfreeze
:
2609 sdma_hw_start_up(sde
);
2610 sdma_set_state(sde
, ss
->go_s99_running
?
2611 sdma_state_s99_running
:
2612 sdma_state_s20_idle
);
2614 case sdma_event_e85_link_down
:
2616 case sdma_event_e90_sw_halted
:
2621 case sdma_state_s99_running
:
2623 case sdma_event_e00_go_hw_down
:
2624 sdma_set_state(sde
, sdma_state_s00_hw_down
);
2625 tasklet_hi_schedule(&sde
->sdma_sw_clean_up_task
);
2627 case sdma_event_e10_go_hw_start
:
2629 case sdma_event_e15_hw_halt_done
:
2631 case sdma_event_e25_hw_clean_up_done
:
2633 case sdma_event_e30_go_running
:
2635 case sdma_event_e40_sw_cleaned
:
2637 case sdma_event_e50_hw_cleaned
:
2639 case sdma_event_e60_hw_halted
:
2641 sdma_err_progress_check_schedule(sde
);
2642 case sdma_event_e90_sw_halted
:
2644 * SW initiated halt does not perform engines
2647 sdma_set_state(sde
, sdma_state_s50_hw_halt_wait
);
2648 schedule_work(&sde
->err_halt_worker
);
2650 case sdma_event_e70_go_idle
:
2651 sdma_set_state(sde
, sdma_state_s60_idle_halt_wait
);
2653 case sdma_event_e85_link_down
:
2654 ss
->go_s99_running
= 0;
2656 case sdma_event_e80_hw_freeze
:
2657 sdma_set_state(sde
, sdma_state_s80_hw_freeze
);
2658 atomic_dec(&sde
->dd
->sdma_unfreeze_count
);
2659 wake_up_interruptible(&sde
->dd
->sdma_unfreeze_wq
);
2661 case sdma_event_e81_hw_frozen
:
2663 case sdma_event_e82_hw_unfreeze
:
2669 ss
->last_event
= event
;
2671 sdma_make_progress(sde
, 0);
2675 * _extend_sdma_tx_descs() - helper to extend txreq
2677 * This is called once the initial nominal allocation
2678 * of descriptors in the sdma_txreq is exhausted.
2680 * The code will bump the allocation up to the max
2681 * of MAX_DESC (64) descriptors. There doesn't seem
2682 * much point in an interim step. The last descriptor
2683 * is reserved for coalesce buffer in order to support
2684 * cases where input packet has >MAX_DESC iovecs.
2687 static int _extend_sdma_tx_descs(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
)
2691 /* Handle last descriptor */
2692 if (unlikely((tx
->num_desc
== (MAX_DESC
- 1)))) {
2693 /* if tlen is 0, it is for padding, release last descriptor */
2695 tx
->desc_limit
= MAX_DESC
;
2696 } else if (!tx
->coalesce_buf
) {
2697 /* allocate coalesce buffer with space for padding */
2698 tx
->coalesce_buf
= kmalloc(tx
->tlen
+ sizeof(u32
),
2700 if (!tx
->coalesce_buf
)
2702 tx
->coalesce_idx
= 0;
2707 if (unlikely(tx
->num_desc
== MAX_DESC
))
2710 tx
->descp
= kmalloc_array(
2712 sizeof(struct sdma_desc
),
2717 /* reserve last descriptor for coalescing */
2718 tx
->desc_limit
= MAX_DESC
- 1;
2719 /* copy ones already built */
2720 for (i
= 0; i
< tx
->num_desc
; i
++)
2721 tx
->descp
[i
] = tx
->descs
[i
];
2724 sdma_txclean(dd
, tx
);
2729 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
2731 * This is called once the initial nominal allocation of descriptors
2732 * in the sdma_txreq is exhausted.
2734 * This function calls _extend_sdma_tx_descs to extend or allocate
2735 * coalesce buffer. If there is a allocated coalesce buffer, it will
2736 * copy the input packet data into the coalesce buffer. It also adds
2737 * coalesce buffer descriptor once when whole packet is received.
2741 * 0 - coalescing, don't populate descriptor
2742 * 1 - continue with populating descriptor
2744 int ext_coal_sdma_tx_descs(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
,
2745 int type
, void *kvaddr
, struct page
*page
,
2746 unsigned long offset
, u16 len
)
2751 rval
= _extend_sdma_tx_descs(dd
, tx
);
2753 sdma_txclean(dd
, tx
);
2757 /* If coalesce buffer is allocated, copy data into it */
2758 if (tx
->coalesce_buf
) {
2759 if (type
== SDMA_MAP_NONE
) {
2760 sdma_txclean(dd
, tx
);
2764 if (type
== SDMA_MAP_PAGE
) {
2765 kvaddr
= kmap(page
);
2767 } else if (WARN_ON(!kvaddr
)) {
2768 sdma_txclean(dd
, tx
);
2772 memcpy(tx
->coalesce_buf
+ tx
->coalesce_idx
, kvaddr
, len
);
2773 tx
->coalesce_idx
+= len
;
2774 if (type
== SDMA_MAP_PAGE
)
2777 /* If there is more data, return */
2778 if (tx
->tlen
- tx
->coalesce_idx
)
2781 /* Whole packet is received; add any padding */
2782 pad_len
= tx
->packet_len
& (sizeof(u32
) - 1);
2784 pad_len
= sizeof(u32
) - pad_len
;
2785 memset(tx
->coalesce_buf
+ tx
->coalesce_idx
, 0, pad_len
);
2786 /* padding is taken care of for coalescing case */
2787 tx
->packet_len
+= pad_len
;
2788 tx
->tlen
+= pad_len
;
2791 /* dma map the coalesce buffer */
2792 addr
= dma_map_single(&dd
->pcidev
->dev
,
2797 if (unlikely(dma_mapping_error(&dd
->pcidev
->dev
, addr
))) {
2798 sdma_txclean(dd
, tx
);
2802 /* Add descriptor for coalesce buffer */
2803 tx
->desc_limit
= MAX_DESC
;
2804 return _sdma_txadd_daddr(dd
, SDMA_MAP_SINGLE
, tx
,
2811 /* Update sdes when the lmc changes */
2812 void sdma_update_lmc(struct hfi1_devdata
*dd
, u64 mask
, u32 lid
)
2814 struct sdma_engine
*sde
;
2818 sreg
= ((mask
& SD(CHECK_SLID_MASK_MASK
)) <<
2819 SD(CHECK_SLID_MASK_SHIFT
)) |
2820 (((lid
& mask
) & SD(CHECK_SLID_VALUE_MASK
)) <<
2821 SD(CHECK_SLID_VALUE_SHIFT
));
2823 for (i
= 0; i
< dd
->num_sdma
; i
++) {
2824 hfi1_cdbg(LINKVERB
, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
2826 sde
= &dd
->per_sdma
[i
];
2827 write_sde_csr(sde
, SD(CHECK_SLID
), sreg
);
2831 /* tx not dword sized - pad */
2832 int _pad_sdma_tx_descs(struct hfi1_devdata
*dd
, struct sdma_txreq
*tx
)
2837 if ((unlikely(tx
->num_desc
== tx
->desc_limit
))) {
2838 rval
= _extend_sdma_tx_descs(dd
, tx
);
2840 sdma_txclean(dd
, tx
);
2844 /* finish the one just added */
2849 sizeof(u32
) - (tx
->packet_len
& (sizeof(u32
) - 1)));
2850 _sdma_close_tx(dd
, tx
);
2855 * Add ahg to the sdma_txreq
2857 * The logic will consume up to 3
2858 * descriptors at the beginning of
2861 void _sdma_txreq_ahgadd(
2862 struct sdma_txreq
*tx
,
2868 u32 i
, shift
= 0, desc
= 0;
2871 WARN_ON_ONCE(num_ahg
> 9 || (ahg_hlen
& 3) || ahg_hlen
== 4);
2874 mode
= SDMA_AHG_APPLY_UPDATE1
;
2875 else if (num_ahg
<= 5)
2876 mode
= SDMA_AHG_APPLY_UPDATE2
;
2878 mode
= SDMA_AHG_APPLY_UPDATE3
;
2880 /* initialize to consumed descriptors to zero */
2882 case SDMA_AHG_APPLY_UPDATE3
:
2884 tx
->descs
[2].qw
[0] = 0;
2885 tx
->descs
[2].qw
[1] = 0;
2887 case SDMA_AHG_APPLY_UPDATE2
:
2889 tx
->descs
[1].qw
[0] = 0;
2890 tx
->descs
[1].qw
[1] = 0;
2894 tx
->descs
[0].qw
[1] |=
2895 (((u64
)ahg_entry
& SDMA_DESC1_HEADER_INDEX_MASK
)
2896 << SDMA_DESC1_HEADER_INDEX_SHIFT
) |
2897 (((u64
)ahg_hlen
& SDMA_DESC1_HEADER_DWS_MASK
)
2898 << SDMA_DESC1_HEADER_DWS_SHIFT
) |
2899 (((u64
)mode
& SDMA_DESC1_HEADER_MODE_MASK
)
2900 << SDMA_DESC1_HEADER_MODE_SHIFT
) |
2901 (((u64
)ahg
[0] & SDMA_DESC1_HEADER_UPDATE1_MASK
)
2902 << SDMA_DESC1_HEADER_UPDATE1_SHIFT
);
2903 for (i
= 0; i
< (num_ahg
- 1); i
++) {
2904 if (!shift
&& !(i
& 2))
2906 tx
->descs
[desc
].qw
[!!(i
& 2)] |=
2909 shift
= (shift
+ 32) & 63;
2914 * sdma_ahg_alloc - allocate an AHG entry
2915 * @sde: engine to allocate from
2918 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
2919 * -ENOSPC if an entry is not available
2921 int sdma_ahg_alloc(struct sdma_engine
*sde
)
2927 trace_hfi1_ahg_allocate(sde
, -EINVAL
);
2931 nr
= ffz(ACCESS_ONCE(sde
->ahg_bits
));
2933 trace_hfi1_ahg_allocate(sde
, -ENOSPC
);
2936 oldbit
= test_and_set_bit(nr
, &sde
->ahg_bits
);
2941 trace_hfi1_ahg_allocate(sde
, nr
);
2946 * sdma_ahg_free - free an AHG entry
2947 * @sde: engine to return AHG entry
2948 * @ahg_index: index to free
2950 * This routine frees the indicate AHG entry.
2952 void sdma_ahg_free(struct sdma_engine
*sde
, int ahg_index
)
2956 trace_hfi1_ahg_deallocate(sde
, ahg_index
);
2957 if (ahg_index
< 0 || ahg_index
> 31)
2959 clear_bit(ahg_index
, &sde
->ahg_bits
);
2963 * SPC freeze handling for SDMA engines. Called when the driver knows
2964 * the SPC is going into a freeze but before the freeze is fully
2965 * settled. Generally an error interrupt.
2967 * This event will pull the engine out of running so no more entries can be
2968 * added to the engine's queue.
2970 void sdma_freeze_notify(struct hfi1_devdata
*dd
, int link_down
)
2973 enum sdma_events event
= link_down
? sdma_event_e85_link_down
:
2974 sdma_event_e80_hw_freeze
;
2976 /* set up the wait but do not wait here */
2977 atomic_set(&dd
->sdma_unfreeze_count
, dd
->num_sdma
);
2979 /* tell all engines to stop running and wait */
2980 for (i
= 0; i
< dd
->num_sdma
; i
++)
2981 sdma_process_event(&dd
->per_sdma
[i
], event
);
2983 /* sdma_freeze() will wait for all engines to have stopped */
2987 * SPC freeze handling for SDMA engines. Called when the driver knows
2988 * the SPC is fully frozen.
2990 void sdma_freeze(struct hfi1_devdata
*dd
)
2996 * Make sure all engines have moved out of the running state before
2999 ret
= wait_event_interruptible(dd
->sdma_unfreeze_wq
,
3000 atomic_read(&dd
->sdma_unfreeze_count
) <=
3002 /* interrupted or count is negative, then unloading - just exit */
3003 if (ret
|| atomic_read(&dd
->sdma_unfreeze_count
) < 0)
3006 /* set up the count for the next wait */
3007 atomic_set(&dd
->sdma_unfreeze_count
, dd
->num_sdma
);
3009 /* tell all engines that the SPC is frozen, they can start cleaning */
3010 for (i
= 0; i
< dd
->num_sdma
; i
++)
3011 sdma_process_event(&dd
->per_sdma
[i
], sdma_event_e81_hw_frozen
);
3014 * Wait for everyone to finish software clean before exiting. The
3015 * software clean will read engine CSRs, so must be completed before
3016 * the next step, which will clear the engine CSRs.
3018 (void)wait_event_interruptible(dd
->sdma_unfreeze_wq
,
3019 atomic_read(&dd
->sdma_unfreeze_count
) <= 0);
3020 /* no need to check results - done no matter what */
3024 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
3026 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
3027 * that is left is a software clean. We could do it after the SPC is fully
3028 * frozen, but then we'd have to add another state to wait for the unfreeze.
3029 * Instead, just defer the software clean until the unfreeze step.
3031 void sdma_unfreeze(struct hfi1_devdata
*dd
)
3035 /* tell all engines start freeze clean up */
3036 for (i
= 0; i
< dd
->num_sdma
; i
++)
3037 sdma_process_event(&dd
->per_sdma
[i
],
3038 sdma_event_e82_hw_unfreeze
);
3042 * _sdma_engine_progress_schedule() - schedule progress on engine
3043 * @sde: sdma_engine to schedule progress
3046 void _sdma_engine_progress_schedule(
3047 struct sdma_engine
*sde
)
3049 trace_hfi1_sdma_engine_progress(sde
, sde
->progress_mask
);
3050 /* assume we have selected a good cpu */
3052 CCE_INT_FORCE
+ (8 * (IS_SDMA_START
/ 64)),
3053 sde
->progress_mask
);