1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
23 #include "qed_dev_api.h"
28 #include "qed_reg_addr.h"
30 #include "qed_sriov.h"
32 /***************************************************************************
33 * Structures & Definitions
34 ***************************************************************************/
36 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
37 #define SPQ_BLOCK_SLEEP_LENGTH (1000)
39 /***************************************************************************
40 * Blocking Imp. (BLOCK/EBLOCK mode)
41 ***************************************************************************/
42 static void qed_spq_blocking_cb(struct qed_hwfn
*p_hwfn
,
44 union event_ring_data
*data
,
47 struct qed_spq_comp_done
*comp_done
;
49 comp_done
= (struct qed_spq_comp_done
*)cookie
;
51 comp_done
->done
= 0x1;
52 comp_done
->fw_return_code
= fw_return_code
;
54 /* make update visible to waiting thread */
58 static int qed_spq_block(struct qed_hwfn
*p_hwfn
,
59 struct qed_spq_entry
*p_ent
,
62 int sleep_count
= SPQ_BLOCK_SLEEP_LENGTH
;
63 struct qed_spq_comp_done
*comp_done
;
66 comp_done
= (struct qed_spq_comp_done
*)p_ent
->comp_cb
.cookie
;
68 /* validate we receive completion update */
70 if (comp_done
->done
== 1) {
72 *p_fw_ret
= comp_done
->fw_return_code
;
75 usleep_range(5000, 10000);
79 DP_INFO(p_hwfn
, "Ramrod is stuck, requesting MCP drain\n");
80 rc
= qed_mcp_drain(p_hwfn
, p_hwfn
->p_main_ptt
);
82 DP_NOTICE(p_hwfn
, "MCP drain failed\n");
84 /* Retry after drain */
85 sleep_count
= SPQ_BLOCK_SLEEP_LENGTH
;
87 /* validate we receive completion update */
89 if (comp_done
->done
== 1) {
91 *p_fw_ret
= comp_done
->fw_return_code
;
94 usleep_range(5000, 10000);
98 if (comp_done
->done
== 1) {
100 *p_fw_ret
= comp_done
->fw_return_code
;
104 DP_NOTICE(p_hwfn
, "Ramrod is stuck, MCP drain failed\n");
109 /***************************************************************************
110 * SPQ entries inner API
111 ***************************************************************************/
113 qed_spq_fill_entry(struct qed_hwfn
*p_hwfn
,
114 struct qed_spq_entry
*p_ent
)
118 switch (p_ent
->comp_mode
) {
119 case QED_SPQ_MODE_EBLOCK
:
120 case QED_SPQ_MODE_BLOCK
:
121 p_ent
->comp_cb
.function
= qed_spq_blocking_cb
;
123 case QED_SPQ_MODE_CB
:
126 DP_NOTICE(p_hwfn
, "Unknown SPQE completion mode %d\n",
131 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
132 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
134 p_ent
->elem
.hdr
.cmd_id
,
135 p_ent
->elem
.hdr
.protocol_id
,
136 p_ent
->elem
.data_ptr
.hi
,
137 p_ent
->elem
.data_ptr
.lo
,
138 D_TRINE(p_ent
->comp_mode
, QED_SPQ_MODE_EBLOCK
,
139 QED_SPQ_MODE_BLOCK
, "MODE_EBLOCK", "MODE_BLOCK",
145 /***************************************************************************
147 ***************************************************************************/
148 static void qed_spq_hw_initialize(struct qed_hwfn
*p_hwfn
,
149 struct qed_spq
*p_spq
)
152 struct qed_cxt_info cxt_info
;
153 struct core_conn_context
*p_cxt
;
154 union qed_qm_pq_params pq_params
;
157 cxt_info
.iid
= p_spq
->cid
;
159 rc
= qed_cxt_get_cid_info(p_hwfn
, &cxt_info
);
162 DP_NOTICE(p_hwfn
, "Cannot find context info for cid=%d\n",
167 p_cxt
= cxt_info
.p_cxt
;
169 SET_FIELD(p_cxt
->xstorm_ag_context
.flags10
,
170 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN
, 1);
171 SET_FIELD(p_cxt
->xstorm_ag_context
.flags1
,
172 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE
, 1);
173 SET_FIELD(p_cxt
->xstorm_ag_context
.flags9
,
174 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN
, 1);
176 /* QM physical queue */
177 memset(&pq_params
, 0, sizeof(pq_params
));
178 pq_params
.core
.tc
= LB_TC
;
179 pq
= qed_get_qm_pq(p_hwfn
, PROTOCOLID_CORE
, &pq_params
);
180 p_cxt
->xstorm_ag_context
.physical_q0
= cpu_to_le16(pq
);
182 p_cxt
->xstorm_st_context
.spq_base_lo
=
183 DMA_LO_LE(p_spq
->chain
.p_phys_addr
);
184 p_cxt
->xstorm_st_context
.spq_base_hi
=
185 DMA_HI_LE(p_spq
->chain
.p_phys_addr
);
187 DMA_REGPAIR_LE(p_cxt
->xstorm_st_context
.consolid_base_addr
,
188 p_hwfn
->p_consq
->chain
.p_phys_addr
);
191 static int qed_spq_hw_post(struct qed_hwfn
*p_hwfn
,
192 struct qed_spq
*p_spq
,
193 struct qed_spq_entry
*p_ent
)
195 struct qed_chain
*p_chain
= &p_hwfn
->p_spq
->chain
;
196 u16 echo
= qed_chain_get_prod_idx(p_chain
);
197 struct slow_path_element
*elem
;
198 struct core_db_data db
;
200 p_ent
->elem
.hdr
.echo
= cpu_to_le16(echo
);
201 elem
= qed_chain_produce(p_chain
);
203 DP_NOTICE(p_hwfn
, "Failed to produce from SPQ chain\n");
207 *elem
= p_ent
->elem
; /* struct assignment */
209 /* send a doorbell on the slow hwfn session */
210 memset(&db
, 0, sizeof(db
));
211 SET_FIELD(db
.params
, CORE_DB_DATA_DEST
, DB_DEST_XCM
);
212 SET_FIELD(db
.params
, CORE_DB_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
213 SET_FIELD(db
.params
, CORE_DB_DATA_AGG_VAL_SEL
,
214 DQ_XCM_CORE_SPQ_PROD_CMD
);
215 db
.agg_flags
= DQ_XCM_CORE_DQ_CF_CMD
;
217 /* validate producer is up to-date */
220 db
.spq_prod
= cpu_to_le16(qed_chain_get_prod_idx(p_chain
));
225 DOORBELL(p_hwfn
, qed_db_addr(p_spq
->cid
, DQ_DEMS_LEGACY
), *(u32
*)&db
);
227 /* make sure doorbell is rang */
230 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
231 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
232 qed_db_addr(p_spq
->cid
, DQ_DEMS_LEGACY
),
233 p_spq
->cid
, db
.params
, db
.agg_flags
,
234 qed_chain_get_prod_idx(p_chain
));
239 /***************************************************************************
240 * Asynchronous events
241 ***************************************************************************/
243 qed_async_event_completion(struct qed_hwfn
*p_hwfn
,
244 struct event_ring_entry
*p_eqe
)
246 switch (p_eqe
->protocol_id
) {
247 case PROTOCOLID_COMMON
:
248 return qed_sriov_eqe_event(p_hwfn
,
250 p_eqe
->echo
, &p_eqe
->data
);
253 "Unknown Async completion for protocol: %d\n",
259 /***************************************************************************
261 ***************************************************************************/
262 void qed_eq_prod_update(struct qed_hwfn
*p_hwfn
,
265 u32 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+
266 USTORM_EQE_CONS_OFFSET(p_hwfn
->rel_pf_id
);
268 REG_WR16(p_hwfn
, addr
, prod
);
270 /* keep prod updates ordered */
274 int qed_eq_completion(struct qed_hwfn
*p_hwfn
,
278 struct qed_eq
*p_eq
= cookie
;
279 struct qed_chain
*p_chain
= &p_eq
->chain
;
282 /* take a snapshot of the FW consumer */
283 u16 fw_cons_idx
= le16_to_cpu(*p_eq
->p_fw_cons
);
285 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
, "fw_cons_idx %x\n", fw_cons_idx
);
287 /* Need to guarantee the fw_cons index we use points to a usuable
288 * element (to comply with our chain), so our macros would comply
290 if ((fw_cons_idx
& qed_chain_get_usable_per_page(p_chain
)) ==
291 qed_chain_get_usable_per_page(p_chain
))
292 fw_cons_idx
+= qed_chain_get_unusable_per_page(p_chain
);
294 /* Complete current segment of eq entries */
295 while (fw_cons_idx
!= qed_chain_get_cons_idx(p_chain
)) {
296 struct event_ring_entry
*p_eqe
= qed_chain_consume(p_chain
);
303 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
304 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
308 le16_to_cpu(p_eqe
->echo
),
309 p_eqe
->fw_return_code
,
312 if (GET_FIELD(p_eqe
->flags
, EVENT_RING_ENTRY_ASYNC
)) {
313 if (qed_async_event_completion(p_hwfn
, p_eqe
))
315 } else if (qed_spq_completion(p_hwfn
,
317 p_eqe
->fw_return_code
,
322 qed_chain_recycle_consumed(p_chain
);
325 qed_eq_prod_update(p_hwfn
, qed_chain_get_prod_idx(p_chain
));
330 struct qed_eq
*qed_eq_alloc(struct qed_hwfn
*p_hwfn
,
335 /* Allocate EQ struct */
336 p_eq
= kzalloc(sizeof(*p_eq
), GFP_KERNEL
);
338 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_eq'\n");
342 /* Allocate and initialize EQ chain*/
343 if (qed_chain_alloc(p_hwfn
->cdev
,
344 QED_CHAIN_USE_TO_PRODUCE
,
347 sizeof(union event_ring_element
),
349 DP_NOTICE(p_hwfn
, "Failed to allocate eq chain\n");
350 goto eq_allocate_fail
;
353 /* register EQ completion on the SP SB */
354 qed_int_register_cb(p_hwfn
,
363 qed_eq_free(p_hwfn
, p_eq
);
367 void qed_eq_setup(struct qed_hwfn
*p_hwfn
,
370 qed_chain_reset(&p_eq
->chain
);
373 void qed_eq_free(struct qed_hwfn
*p_hwfn
,
378 qed_chain_free(p_hwfn
->cdev
, &p_eq
->chain
);
382 /***************************************************************************
383 * CQE API - manipulate EQ functionality
384 ***************************************************************************/
385 static int qed_cqe_completion(
386 struct qed_hwfn
*p_hwfn
,
387 struct eth_slow_path_rx_cqe
*cqe
,
388 enum protocol_type protocol
)
390 if (IS_VF(p_hwfn
->cdev
))
393 /* @@@tmp - it's possible we'll eventually want to handle some
394 * actual commands that can arrive here, but for now this is only
395 * used to complete the ramrod using the echo value on the cqe
397 return qed_spq_completion(p_hwfn
, cqe
->echo
, 0, NULL
);
400 int qed_eth_cqe_completion(struct qed_hwfn
*p_hwfn
,
401 struct eth_slow_path_rx_cqe
*cqe
)
405 rc
= qed_cqe_completion(p_hwfn
, cqe
, PROTOCOLID_ETH
);
408 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
414 /***************************************************************************
415 * Slow hwfn Queue (spq)
416 ***************************************************************************/
417 void qed_spq_setup(struct qed_hwfn
*p_hwfn
)
419 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
420 struct qed_spq_entry
*p_virt
= NULL
;
421 dma_addr_t p_phys
= 0;
424 INIT_LIST_HEAD(&p_spq
->pending
);
425 INIT_LIST_HEAD(&p_spq
->completion_pending
);
426 INIT_LIST_HEAD(&p_spq
->free_pool
);
427 INIT_LIST_HEAD(&p_spq
->unlimited_pending
);
428 spin_lock_init(&p_spq
->lock
);
431 p_phys
= p_spq
->p_phys
+ offsetof(struct qed_spq_entry
, ramrod
);
432 p_virt
= p_spq
->p_virt
;
434 for (i
= 0; i
< p_spq
->chain
.capacity
; i
++) {
435 DMA_REGPAIR_LE(p_virt
->elem
.data_ptr
, p_phys
);
437 list_add_tail(&p_virt
->list
, &p_spq
->free_pool
);
440 p_phys
+= sizeof(struct qed_spq_entry
);
444 p_spq
->normal_count
= 0;
445 p_spq
->comp_count
= 0;
446 p_spq
->comp_sent_count
= 0;
447 p_spq
->unlimited_pending_count
= 0;
449 bitmap_zero(p_spq
->p_comp_bitmap
, SPQ_RING_SIZE
);
450 p_spq
->comp_bitmap_idx
= 0;
452 /* SPQ cid, cannot fail */
453 qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_CORE
, &p_spq
->cid
);
454 qed_spq_hw_initialize(p_hwfn
, p_spq
);
456 /* reset the chain itself */
457 qed_chain_reset(&p_spq
->chain
);
460 int qed_spq_alloc(struct qed_hwfn
*p_hwfn
)
462 struct qed_spq
*p_spq
= NULL
;
463 dma_addr_t p_phys
= 0;
464 struct qed_spq_entry
*p_virt
= NULL
;
468 kzalloc(sizeof(struct qed_spq
), GFP_KERNEL
);
470 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_spq'\n");
475 if (qed_chain_alloc(p_hwfn
->cdev
,
476 QED_CHAIN_USE_TO_PRODUCE
,
477 QED_CHAIN_MODE_SINGLE
,
478 0, /* N/A when the mode is SINGLE */
479 sizeof(struct slow_path_element
),
481 DP_NOTICE(p_hwfn
, "Failed to allocate spq chain\n");
482 goto spq_allocate_fail
;
485 /* allocate and fill the SPQ elements (incl. ramrod data list) */
486 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
487 p_spq
->chain
.capacity
*
488 sizeof(struct qed_spq_entry
),
493 goto spq_allocate_fail
;
495 p_spq
->p_virt
= p_virt
;
496 p_spq
->p_phys
= p_phys
;
497 p_hwfn
->p_spq
= p_spq
;
502 qed_chain_free(p_hwfn
->cdev
, &p_spq
->chain
);
507 void qed_spq_free(struct qed_hwfn
*p_hwfn
)
509 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
515 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
516 p_spq
->chain
.capacity
*
517 sizeof(struct qed_spq_entry
),
521 qed_chain_free(p_hwfn
->cdev
, &p_spq
->chain
);
527 qed_spq_get_entry(struct qed_hwfn
*p_hwfn
,
528 struct qed_spq_entry
**pp_ent
)
530 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
531 struct qed_spq_entry
*p_ent
= NULL
;
534 spin_lock_bh(&p_spq
->lock
);
536 if (list_empty(&p_spq
->free_pool
)) {
537 p_ent
= kzalloc(sizeof(*p_ent
), GFP_ATOMIC
);
542 p_ent
->queue
= &p_spq
->unlimited_pending
;
544 p_ent
= list_first_entry(&p_spq
->free_pool
,
545 struct qed_spq_entry
,
547 list_del(&p_ent
->list
);
548 p_ent
->queue
= &p_spq
->pending
;
554 spin_unlock_bh(&p_spq
->lock
);
558 /* Locked variant; Should be called while the SPQ lock is taken */
559 static void __qed_spq_return_entry(struct qed_hwfn
*p_hwfn
,
560 struct qed_spq_entry
*p_ent
)
562 list_add_tail(&p_ent
->list
, &p_hwfn
->p_spq
->free_pool
);
565 void qed_spq_return_entry(struct qed_hwfn
*p_hwfn
,
566 struct qed_spq_entry
*p_ent
)
568 spin_lock_bh(&p_hwfn
->p_spq
->lock
);
569 __qed_spq_return_entry(p_hwfn
, p_ent
);
570 spin_unlock_bh(&p_hwfn
->p_spq
->lock
);
574 * @brief qed_spq_add_entry - adds a new entry to the pending
575 * list. Should be used while lock is being held.
577 * Addes an entry to the pending list is there is room (en empty
578 * element is available in the free_pool), or else places the
579 * entry in the unlimited_pending pool.
588 qed_spq_add_entry(struct qed_hwfn
*p_hwfn
,
589 struct qed_spq_entry
*p_ent
,
590 enum spq_priority priority
)
592 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
594 if (p_ent
->queue
== &p_spq
->unlimited_pending
) {
596 if (list_empty(&p_spq
->free_pool
)) {
597 list_add_tail(&p_ent
->list
, &p_spq
->unlimited_pending
);
598 p_spq
->unlimited_pending_count
++;
602 struct qed_spq_entry
*p_en2
;
604 p_en2
= list_first_entry(&p_spq
->free_pool
,
605 struct qed_spq_entry
,
607 list_del(&p_en2
->list
);
609 /* Copy the ring element physical pointer to the new
610 * entry, since we are about to override the entire ring
611 * entry and don't want to lose the pointer.
613 p_ent
->elem
.data_ptr
= p_en2
->elem
.data_ptr
;
623 /* entry is to be placed in 'pending' queue */
625 case QED_SPQ_PRIORITY_NORMAL
:
626 list_add_tail(&p_ent
->list
, &p_spq
->pending
);
627 p_spq
->normal_count
++;
629 case QED_SPQ_PRIORITY_HIGH
:
630 list_add(&p_ent
->list
, &p_spq
->pending
);
640 /***************************************************************************
642 ***************************************************************************/
643 u32
qed_spq_get_cid(struct qed_hwfn
*p_hwfn
)
646 return 0xffffffff; /* illegal */
647 return p_hwfn
->p_spq
->cid
;
650 /***************************************************************************
651 * Posting new Ramrods
652 ***************************************************************************/
653 static int qed_spq_post_list(struct qed_hwfn
*p_hwfn
,
654 struct list_head
*head
,
657 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
660 while (qed_chain_get_elem_left(&p_spq
->chain
) > keep_reserve
&&
662 struct qed_spq_entry
*p_ent
=
663 list_first_entry(head
, struct qed_spq_entry
, list
);
664 list_del(&p_ent
->list
);
665 list_add_tail(&p_ent
->list
, &p_spq
->completion_pending
);
666 p_spq
->comp_sent_count
++;
668 rc
= qed_spq_hw_post(p_hwfn
, p_spq
, p_ent
);
670 list_del(&p_ent
->list
);
671 __qed_spq_return_entry(p_hwfn
, p_ent
);
679 static int qed_spq_pend_post(struct qed_hwfn
*p_hwfn
)
681 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
682 struct qed_spq_entry
*p_ent
= NULL
;
684 while (!list_empty(&p_spq
->free_pool
)) {
685 if (list_empty(&p_spq
->unlimited_pending
))
688 p_ent
= list_first_entry(&p_spq
->unlimited_pending
,
689 struct qed_spq_entry
,
694 list_del(&p_ent
->list
);
696 qed_spq_add_entry(p_hwfn
, p_ent
, p_ent
->priority
);
699 return qed_spq_post_list(p_hwfn
, &p_spq
->pending
,
700 SPQ_HIGH_PRI_RESERVE_DEFAULT
);
703 int qed_spq_post(struct qed_hwfn
*p_hwfn
,
704 struct qed_spq_entry
*p_ent
,
708 struct qed_spq
*p_spq
= p_hwfn
? p_hwfn
->p_spq
: NULL
;
709 bool b_ret_ent
= true;
715 DP_NOTICE(p_hwfn
, "Got a NULL pointer\n");
719 /* Complete the entry */
720 rc
= qed_spq_fill_entry(p_hwfn
, p_ent
);
722 spin_lock_bh(&p_spq
->lock
);
724 /* Check return value after LOCK is taken for cleaner error flow */
728 /* Add the request to the pending queue */
729 rc
= qed_spq_add_entry(p_hwfn
, p_ent
, p_ent
->priority
);
733 rc
= qed_spq_pend_post(p_hwfn
);
735 /* Since it's possible that pending failed for a different
736 * entry [although unlikely], the failed entry was already
737 * dealt with; No need to return it here.
743 spin_unlock_bh(&p_spq
->lock
);
745 if (p_ent
->comp_mode
== QED_SPQ_MODE_EBLOCK
) {
746 /* For entries in QED BLOCK mode, the completion code cannot
747 * perform the necessary cleanup - if it did, we couldn't
748 * access p_ent here to see whether it's successful or not.
749 * Thus, after gaining the answer perform the cleanup here.
751 rc
= qed_spq_block(p_hwfn
, p_ent
, fw_return_code
);
756 qed_spq_return_entry(p_hwfn
, p_ent
);
761 spin_lock_bh(&p_spq
->lock
);
762 list_del(&p_ent
->list
);
763 qed_chain_return_produced(&p_spq
->chain
);
766 /* return to the free pool */
768 __qed_spq_return_entry(p_hwfn
, p_ent
);
769 spin_unlock_bh(&p_spq
->lock
);
774 int qed_spq_completion(struct qed_hwfn
*p_hwfn
,
777 union event_ring_data
*p_data
)
779 struct qed_spq
*p_spq
;
780 struct qed_spq_entry
*p_ent
= NULL
;
781 struct qed_spq_entry
*tmp
;
782 struct qed_spq_entry
*found
= NULL
;
788 p_spq
= p_hwfn
->p_spq
;
792 spin_lock_bh(&p_spq
->lock
);
793 list_for_each_entry_safe(p_ent
, tmp
, &p_spq
->completion_pending
,
795 if (p_ent
->elem
.hdr
.echo
== echo
) {
796 u16 pos
= le16_to_cpu(echo
) % SPQ_RING_SIZE
;
798 list_del(&p_ent
->list
);
800 /* Avoid overriding of SPQ entries when getting
801 * out-of-order completions, by marking the completions
802 * in a bitmap and increasing the chain consumer only
803 * for the first successive completed entries.
805 bitmap_set(p_spq
->p_comp_bitmap
, pos
, SPQ_RING_SIZE
);
807 while (test_bit(p_spq
->comp_bitmap_idx
,
808 p_spq
->p_comp_bitmap
)) {
809 bitmap_clear(p_spq
->p_comp_bitmap
,
810 p_spq
->comp_bitmap_idx
,
812 p_spq
->comp_bitmap_idx
++;
813 qed_chain_return_produced(&p_spq
->chain
);
821 /* This is relatively uncommon - depends on scenarios
822 * which have mutliple per-PF sent ramrods.
824 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
825 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
827 le16_to_cpu(p_ent
->elem
.hdr
.echo
));
830 /* Release lock before callback, as callback may post
831 * an additional ramrod.
833 spin_unlock_bh(&p_spq
->lock
);
837 "Failed to find an entry this EQE completes\n");
841 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
, "Complete: func %p cookie %p)\n",
842 p_ent
->comp_cb
.function
, p_ent
->comp_cb
.cookie
);
843 if (found
->comp_cb
.function
)
844 found
->comp_cb
.function(p_hwfn
, found
->comp_cb
.cookie
, p_data
,
847 if (found
->comp_mode
!= QED_SPQ_MODE_EBLOCK
)
848 /* EBLOCK is responsible for freeing its own entry */
849 qed_spq_return_entry(p_hwfn
, found
);
851 /* Attempt to post pending requests */
852 spin_lock_bh(&p_spq
->lock
);
853 rc
= qed_spq_pend_post(p_hwfn
);
854 spin_unlock_bh(&p_spq
->lock
);
859 struct qed_consq
*qed_consq_alloc(struct qed_hwfn
*p_hwfn
)
861 struct qed_consq
*p_consq
;
863 /* Allocate ConsQ struct */
864 p_consq
= kzalloc(sizeof(*p_consq
), GFP_KERNEL
);
866 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_consq'\n");
870 /* Allocate and initialize EQ chain*/
871 if (qed_chain_alloc(p_hwfn
->cdev
,
872 QED_CHAIN_USE_TO_PRODUCE
,
874 QED_CHAIN_PAGE_SIZE
/ 0x80,
877 DP_NOTICE(p_hwfn
, "Failed to allocate consq chain");
878 goto consq_allocate_fail
;
884 qed_consq_free(p_hwfn
, p_consq
);
888 void qed_consq_setup(struct qed_hwfn
*p_hwfn
,
889 struct qed_consq
*p_consq
)
891 qed_chain_reset(&p_consq
->chain
);
894 void qed_consq_free(struct qed_hwfn
*p_hwfn
,
895 struct qed_consq
*p_consq
)
899 qed_chain_free(p_hwfn
->cdev
, &p_consq
->chain
);