1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
23 #include "qed_dev_api.h"
28 #include "qed_reg_addr.h"
30 #include "qed_sriov.h"
32 /***************************************************************************
33 * Structures & Definitions
34 ***************************************************************************/
36 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
37 #define SPQ_BLOCK_SLEEP_LENGTH (1000)
39 /***************************************************************************
40 * Blocking Imp. (BLOCK/EBLOCK mode)
41 ***************************************************************************/
42 static void qed_spq_blocking_cb(struct qed_hwfn
*p_hwfn
,
44 union event_ring_data
*data
,
47 struct qed_spq_comp_done
*comp_done
;
49 comp_done
= (struct qed_spq_comp_done
*)cookie
;
51 comp_done
->done
= 0x1;
52 comp_done
->fw_return_code
= fw_return_code
;
54 /* make update visible to waiting thread */
58 static int qed_spq_block(struct qed_hwfn
*p_hwfn
,
59 struct qed_spq_entry
*p_ent
,
62 int sleep_count
= SPQ_BLOCK_SLEEP_LENGTH
;
63 struct qed_spq_comp_done
*comp_done
;
66 comp_done
= (struct qed_spq_comp_done
*)p_ent
->comp_cb
.cookie
;
68 /* validate we receive completion update */
70 if (comp_done
->done
== 1) {
72 *p_fw_ret
= comp_done
->fw_return_code
;
75 usleep_range(5000, 10000);
79 DP_INFO(p_hwfn
, "Ramrod is stuck, requesting MCP drain\n");
80 rc
= qed_mcp_drain(p_hwfn
, p_hwfn
->p_main_ptt
);
82 DP_NOTICE(p_hwfn
, "MCP drain failed\n");
84 /* Retry after drain */
85 sleep_count
= SPQ_BLOCK_SLEEP_LENGTH
;
87 /* validate we receive completion update */
89 if (comp_done
->done
== 1) {
91 *p_fw_ret
= comp_done
->fw_return_code
;
94 usleep_range(5000, 10000);
98 if (comp_done
->done
== 1) {
100 *p_fw_ret
= comp_done
->fw_return_code
;
104 DP_NOTICE(p_hwfn
, "Ramrod is stuck, MCP drain failed\n");
109 /***************************************************************************
110 * SPQ entries inner API
111 ***************************************************************************/
113 qed_spq_fill_entry(struct qed_hwfn
*p_hwfn
,
114 struct qed_spq_entry
*p_ent
)
118 switch (p_ent
->comp_mode
) {
119 case QED_SPQ_MODE_EBLOCK
:
120 case QED_SPQ_MODE_BLOCK
:
121 p_ent
->comp_cb
.function
= qed_spq_blocking_cb
;
123 case QED_SPQ_MODE_CB
:
126 DP_NOTICE(p_hwfn
, "Unknown SPQE completion mode %d\n",
131 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
132 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
134 p_ent
->elem
.hdr
.cmd_id
,
135 p_ent
->elem
.hdr
.protocol_id
,
136 p_ent
->elem
.data_ptr
.hi
,
137 p_ent
->elem
.data_ptr
.lo
,
138 D_TRINE(p_ent
->comp_mode
, QED_SPQ_MODE_EBLOCK
,
139 QED_SPQ_MODE_BLOCK
, "MODE_EBLOCK", "MODE_BLOCK",
145 /***************************************************************************
147 ***************************************************************************/
148 static void qed_spq_hw_initialize(struct qed_hwfn
*p_hwfn
,
149 struct qed_spq
*p_spq
)
152 struct qed_cxt_info cxt_info
;
153 struct core_conn_context
*p_cxt
;
154 union qed_qm_pq_params pq_params
;
157 cxt_info
.iid
= p_spq
->cid
;
159 rc
= qed_cxt_get_cid_info(p_hwfn
, &cxt_info
);
162 DP_NOTICE(p_hwfn
, "Cannot find context info for cid=%d\n",
167 p_cxt
= cxt_info
.p_cxt
;
169 SET_FIELD(p_cxt
->xstorm_ag_context
.flags10
,
170 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN
, 1);
171 SET_FIELD(p_cxt
->xstorm_ag_context
.flags1
,
172 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE
, 1);
173 SET_FIELD(p_cxt
->xstorm_ag_context
.flags9
,
174 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN
, 1);
176 /* QM physical queue */
177 memset(&pq_params
, 0, sizeof(pq_params
));
178 pq_params
.core
.tc
= LB_TC
;
179 pq
= qed_get_qm_pq(p_hwfn
, PROTOCOLID_CORE
, &pq_params
);
180 p_cxt
->xstorm_ag_context
.physical_q0
= cpu_to_le16(pq
);
182 p_cxt
->xstorm_st_context
.spq_base_lo
=
183 DMA_LO_LE(p_spq
->chain
.p_phys_addr
);
184 p_cxt
->xstorm_st_context
.spq_base_hi
=
185 DMA_HI_LE(p_spq
->chain
.p_phys_addr
);
187 DMA_REGPAIR_LE(p_cxt
->xstorm_st_context
.consolid_base_addr
,
188 p_hwfn
->p_consq
->chain
.p_phys_addr
);
191 static int qed_spq_hw_post(struct qed_hwfn
*p_hwfn
,
192 struct qed_spq
*p_spq
,
193 struct qed_spq_entry
*p_ent
)
195 struct qed_chain
*p_chain
= &p_hwfn
->p_spq
->chain
;
196 u16 echo
= qed_chain_get_prod_idx(p_chain
);
197 struct slow_path_element
*elem
;
198 struct core_db_data db
;
200 p_ent
->elem
.hdr
.echo
= cpu_to_le16(echo
);
201 elem
= qed_chain_produce(p_chain
);
203 DP_NOTICE(p_hwfn
, "Failed to produce from SPQ chain\n");
207 *elem
= p_ent
->elem
; /* struct assignment */
209 /* send a doorbell on the slow hwfn session */
210 memset(&db
, 0, sizeof(db
));
211 SET_FIELD(db
.params
, CORE_DB_DATA_DEST
, DB_DEST_XCM
);
212 SET_FIELD(db
.params
, CORE_DB_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
213 SET_FIELD(db
.params
, CORE_DB_DATA_AGG_VAL_SEL
,
214 DQ_XCM_CORE_SPQ_PROD_CMD
);
215 db
.agg_flags
= DQ_XCM_CORE_DQ_CF_CMD
;
216 db
.spq_prod
= cpu_to_le16(qed_chain_get_prod_idx(p_chain
));
218 /* make sure the SPQE is updated before the doorbell */
221 DOORBELL(p_hwfn
, qed_db_addr(p_spq
->cid
, DQ_DEMS_LEGACY
), *(u32
*)&db
);
223 /* make sure doorbell is rang */
226 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
227 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
228 qed_db_addr(p_spq
->cid
, DQ_DEMS_LEGACY
),
229 p_spq
->cid
, db
.params
, db
.agg_flags
,
230 qed_chain_get_prod_idx(p_chain
));
235 /***************************************************************************
236 * Asynchronous events
237 ***************************************************************************/
239 qed_async_event_completion(struct qed_hwfn
*p_hwfn
,
240 struct event_ring_entry
*p_eqe
)
242 switch (p_eqe
->protocol_id
) {
243 case PROTOCOLID_COMMON
:
244 return qed_sriov_eqe_event(p_hwfn
,
246 p_eqe
->echo
, &p_eqe
->data
);
249 "Unknown Async completion for protocol: %d\n",
255 /***************************************************************************
257 ***************************************************************************/
258 void qed_eq_prod_update(struct qed_hwfn
*p_hwfn
,
261 u32 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+
262 USTORM_EQE_CONS_OFFSET(p_hwfn
->rel_pf_id
);
264 REG_WR16(p_hwfn
, addr
, prod
);
266 /* keep prod updates ordered */
270 int qed_eq_completion(struct qed_hwfn
*p_hwfn
,
274 struct qed_eq
*p_eq
= cookie
;
275 struct qed_chain
*p_chain
= &p_eq
->chain
;
278 /* take a snapshot of the FW consumer */
279 u16 fw_cons_idx
= le16_to_cpu(*p_eq
->p_fw_cons
);
281 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
, "fw_cons_idx %x\n", fw_cons_idx
);
283 /* Need to guarantee the fw_cons index we use points to a usuable
284 * element (to comply with our chain), so our macros would comply
286 if ((fw_cons_idx
& qed_chain_get_usable_per_page(p_chain
)) ==
287 qed_chain_get_usable_per_page(p_chain
))
288 fw_cons_idx
+= qed_chain_get_unusable_per_page(p_chain
);
290 /* Complete current segment of eq entries */
291 while (fw_cons_idx
!= qed_chain_get_cons_idx(p_chain
)) {
292 struct event_ring_entry
*p_eqe
= qed_chain_consume(p_chain
);
299 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
300 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
304 le16_to_cpu(p_eqe
->echo
),
305 p_eqe
->fw_return_code
,
308 if (GET_FIELD(p_eqe
->flags
, EVENT_RING_ENTRY_ASYNC
)) {
309 if (qed_async_event_completion(p_hwfn
, p_eqe
))
311 } else if (qed_spq_completion(p_hwfn
,
313 p_eqe
->fw_return_code
,
318 qed_chain_recycle_consumed(p_chain
);
321 qed_eq_prod_update(p_hwfn
, qed_chain_get_prod_idx(p_chain
));
326 struct qed_eq
*qed_eq_alloc(struct qed_hwfn
*p_hwfn
,
331 /* Allocate EQ struct */
332 p_eq
= kzalloc(sizeof(*p_eq
), GFP_KERNEL
);
334 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_eq'\n");
338 /* Allocate and initialize EQ chain*/
339 if (qed_chain_alloc(p_hwfn
->cdev
,
340 QED_CHAIN_USE_TO_PRODUCE
,
343 sizeof(union event_ring_element
),
345 DP_NOTICE(p_hwfn
, "Failed to allocate eq chain\n");
346 goto eq_allocate_fail
;
349 /* register EQ completion on the SP SB */
350 qed_int_register_cb(p_hwfn
,
359 qed_eq_free(p_hwfn
, p_eq
);
363 void qed_eq_setup(struct qed_hwfn
*p_hwfn
,
366 qed_chain_reset(&p_eq
->chain
);
369 void qed_eq_free(struct qed_hwfn
*p_hwfn
,
374 qed_chain_free(p_hwfn
->cdev
, &p_eq
->chain
);
378 /***************************************************************************
379 * CQE API - manipulate EQ functionality
380 ***************************************************************************/
381 static int qed_cqe_completion(
382 struct qed_hwfn
*p_hwfn
,
383 struct eth_slow_path_rx_cqe
*cqe
,
384 enum protocol_type protocol
)
386 if (IS_VF(p_hwfn
->cdev
))
389 /* @@@tmp - it's possible we'll eventually want to handle some
390 * actual commands that can arrive here, but for now this is only
391 * used to complete the ramrod using the echo value on the cqe
393 return qed_spq_completion(p_hwfn
, cqe
->echo
, 0, NULL
);
396 int qed_eth_cqe_completion(struct qed_hwfn
*p_hwfn
,
397 struct eth_slow_path_rx_cqe
*cqe
)
401 rc
= qed_cqe_completion(p_hwfn
, cqe
, PROTOCOLID_ETH
);
404 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
410 /***************************************************************************
411 * Slow hwfn Queue (spq)
412 ***************************************************************************/
413 void qed_spq_setup(struct qed_hwfn
*p_hwfn
)
415 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
416 struct qed_spq_entry
*p_virt
= NULL
;
417 dma_addr_t p_phys
= 0;
420 INIT_LIST_HEAD(&p_spq
->pending
);
421 INIT_LIST_HEAD(&p_spq
->completion_pending
);
422 INIT_LIST_HEAD(&p_spq
->free_pool
);
423 INIT_LIST_HEAD(&p_spq
->unlimited_pending
);
424 spin_lock_init(&p_spq
->lock
);
427 p_phys
= p_spq
->p_phys
+ offsetof(struct qed_spq_entry
, ramrod
);
428 p_virt
= p_spq
->p_virt
;
430 for (i
= 0; i
< p_spq
->chain
.capacity
; i
++) {
431 DMA_REGPAIR_LE(p_virt
->elem
.data_ptr
, p_phys
);
433 list_add_tail(&p_virt
->list
, &p_spq
->free_pool
);
436 p_phys
+= sizeof(struct qed_spq_entry
);
440 p_spq
->normal_count
= 0;
441 p_spq
->comp_count
= 0;
442 p_spq
->comp_sent_count
= 0;
443 p_spq
->unlimited_pending_count
= 0;
445 bitmap_zero(p_spq
->p_comp_bitmap
, SPQ_RING_SIZE
);
446 p_spq
->comp_bitmap_idx
= 0;
448 /* SPQ cid, cannot fail */
449 qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_CORE
, &p_spq
->cid
);
450 qed_spq_hw_initialize(p_hwfn
, p_spq
);
452 /* reset the chain itself */
453 qed_chain_reset(&p_spq
->chain
);
456 int qed_spq_alloc(struct qed_hwfn
*p_hwfn
)
458 struct qed_spq
*p_spq
= NULL
;
459 dma_addr_t p_phys
= 0;
460 struct qed_spq_entry
*p_virt
= NULL
;
464 kzalloc(sizeof(struct qed_spq
), GFP_KERNEL
);
466 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_spq'\n");
471 if (qed_chain_alloc(p_hwfn
->cdev
,
472 QED_CHAIN_USE_TO_PRODUCE
,
473 QED_CHAIN_MODE_SINGLE
,
474 0, /* N/A when the mode is SINGLE */
475 sizeof(struct slow_path_element
),
477 DP_NOTICE(p_hwfn
, "Failed to allocate spq chain\n");
478 goto spq_allocate_fail
;
481 /* allocate and fill the SPQ elements (incl. ramrod data list) */
482 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
483 p_spq
->chain
.capacity
*
484 sizeof(struct qed_spq_entry
),
489 goto spq_allocate_fail
;
491 p_spq
->p_virt
= p_virt
;
492 p_spq
->p_phys
= p_phys
;
493 p_hwfn
->p_spq
= p_spq
;
498 qed_chain_free(p_hwfn
->cdev
, &p_spq
->chain
);
503 void qed_spq_free(struct qed_hwfn
*p_hwfn
)
505 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
511 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
512 p_spq
->chain
.capacity
*
513 sizeof(struct qed_spq_entry
),
517 qed_chain_free(p_hwfn
->cdev
, &p_spq
->chain
);
523 qed_spq_get_entry(struct qed_hwfn
*p_hwfn
,
524 struct qed_spq_entry
**pp_ent
)
526 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
527 struct qed_spq_entry
*p_ent
= NULL
;
530 spin_lock_bh(&p_spq
->lock
);
532 if (list_empty(&p_spq
->free_pool
)) {
533 p_ent
= kzalloc(sizeof(*p_ent
), GFP_ATOMIC
);
538 p_ent
->queue
= &p_spq
->unlimited_pending
;
540 p_ent
= list_first_entry(&p_spq
->free_pool
,
541 struct qed_spq_entry
,
543 list_del(&p_ent
->list
);
544 p_ent
->queue
= &p_spq
->pending
;
550 spin_unlock_bh(&p_spq
->lock
);
554 /* Locked variant; Should be called while the SPQ lock is taken */
555 static void __qed_spq_return_entry(struct qed_hwfn
*p_hwfn
,
556 struct qed_spq_entry
*p_ent
)
558 list_add_tail(&p_ent
->list
, &p_hwfn
->p_spq
->free_pool
);
561 void qed_spq_return_entry(struct qed_hwfn
*p_hwfn
,
562 struct qed_spq_entry
*p_ent
)
564 spin_lock_bh(&p_hwfn
->p_spq
->lock
);
565 __qed_spq_return_entry(p_hwfn
, p_ent
);
566 spin_unlock_bh(&p_hwfn
->p_spq
->lock
);
570 * @brief qed_spq_add_entry - adds a new entry to the pending
571 * list. Should be used while lock is being held.
573 * Addes an entry to the pending list is there is room (en empty
574 * element is available in the free_pool), or else places the
575 * entry in the unlimited_pending pool.
584 qed_spq_add_entry(struct qed_hwfn
*p_hwfn
,
585 struct qed_spq_entry
*p_ent
,
586 enum spq_priority priority
)
588 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
590 if (p_ent
->queue
== &p_spq
->unlimited_pending
) {
592 if (list_empty(&p_spq
->free_pool
)) {
593 list_add_tail(&p_ent
->list
, &p_spq
->unlimited_pending
);
594 p_spq
->unlimited_pending_count
++;
598 struct qed_spq_entry
*p_en2
;
600 p_en2
= list_first_entry(&p_spq
->free_pool
,
601 struct qed_spq_entry
,
603 list_del(&p_en2
->list
);
605 /* Copy the ring element physical pointer to the new
606 * entry, since we are about to override the entire ring
607 * entry and don't want to lose the pointer.
609 p_ent
->elem
.data_ptr
= p_en2
->elem
.data_ptr
;
613 /* EBLOCK responsible to free the allocated p_ent */
614 if (p_ent
->comp_mode
!= QED_SPQ_MODE_EBLOCK
)
621 /* entry is to be placed in 'pending' queue */
623 case QED_SPQ_PRIORITY_NORMAL
:
624 list_add_tail(&p_ent
->list
, &p_spq
->pending
);
625 p_spq
->normal_count
++;
627 case QED_SPQ_PRIORITY_HIGH
:
628 list_add(&p_ent
->list
, &p_spq
->pending
);
638 /***************************************************************************
640 ***************************************************************************/
641 u32
qed_spq_get_cid(struct qed_hwfn
*p_hwfn
)
644 return 0xffffffff; /* illegal */
645 return p_hwfn
->p_spq
->cid
;
648 /***************************************************************************
649 * Posting new Ramrods
650 ***************************************************************************/
651 static int qed_spq_post_list(struct qed_hwfn
*p_hwfn
,
652 struct list_head
*head
,
655 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
658 while (qed_chain_get_elem_left(&p_spq
->chain
) > keep_reserve
&&
660 struct qed_spq_entry
*p_ent
=
661 list_first_entry(head
, struct qed_spq_entry
, list
);
662 list_del(&p_ent
->list
);
663 list_add_tail(&p_ent
->list
, &p_spq
->completion_pending
);
664 p_spq
->comp_sent_count
++;
666 rc
= qed_spq_hw_post(p_hwfn
, p_spq
, p_ent
);
668 list_del(&p_ent
->list
);
669 __qed_spq_return_entry(p_hwfn
, p_ent
);
677 static int qed_spq_pend_post(struct qed_hwfn
*p_hwfn
)
679 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
680 struct qed_spq_entry
*p_ent
= NULL
;
682 while (!list_empty(&p_spq
->free_pool
)) {
683 if (list_empty(&p_spq
->unlimited_pending
))
686 p_ent
= list_first_entry(&p_spq
->unlimited_pending
,
687 struct qed_spq_entry
,
692 list_del(&p_ent
->list
);
694 qed_spq_add_entry(p_hwfn
, p_ent
, p_ent
->priority
);
697 return qed_spq_post_list(p_hwfn
, &p_spq
->pending
,
698 SPQ_HIGH_PRI_RESERVE_DEFAULT
);
701 int qed_spq_post(struct qed_hwfn
*p_hwfn
,
702 struct qed_spq_entry
*p_ent
,
706 struct qed_spq
*p_spq
= p_hwfn
? p_hwfn
->p_spq
: NULL
;
707 bool b_ret_ent
= true;
713 DP_NOTICE(p_hwfn
, "Got a NULL pointer\n");
717 /* Complete the entry */
718 rc
= qed_spq_fill_entry(p_hwfn
, p_ent
);
720 spin_lock_bh(&p_spq
->lock
);
722 /* Check return value after LOCK is taken for cleaner error flow */
726 /* Add the request to the pending queue */
727 rc
= qed_spq_add_entry(p_hwfn
, p_ent
, p_ent
->priority
);
731 rc
= qed_spq_pend_post(p_hwfn
);
733 /* Since it's possible that pending failed for a different
734 * entry [although unlikely], the failed entry was already
735 * dealt with; No need to return it here.
741 spin_unlock_bh(&p_spq
->lock
);
743 if (p_ent
->comp_mode
== QED_SPQ_MODE_EBLOCK
) {
744 /* For entries in QED BLOCK mode, the completion code cannot
745 * perform the necessary cleanup - if it did, we couldn't
746 * access p_ent here to see whether it's successful or not.
747 * Thus, after gaining the answer perform the cleanup here.
749 rc
= qed_spq_block(p_hwfn
, p_ent
, fw_return_code
);
751 if (p_ent
->queue
== &p_spq
->unlimited_pending
) {
752 /* This is an allocated p_ent which does not need to
763 qed_spq_return_entry(p_hwfn
, p_ent
);
768 spin_lock_bh(&p_spq
->lock
);
769 list_del(&p_ent
->list
);
770 qed_chain_return_produced(&p_spq
->chain
);
773 /* return to the free pool */
775 __qed_spq_return_entry(p_hwfn
, p_ent
);
776 spin_unlock_bh(&p_spq
->lock
);
781 int qed_spq_completion(struct qed_hwfn
*p_hwfn
,
784 union event_ring_data
*p_data
)
786 struct qed_spq
*p_spq
;
787 struct qed_spq_entry
*p_ent
= NULL
;
788 struct qed_spq_entry
*tmp
;
789 struct qed_spq_entry
*found
= NULL
;
795 p_spq
= p_hwfn
->p_spq
;
799 spin_lock_bh(&p_spq
->lock
);
800 list_for_each_entry_safe(p_ent
, tmp
, &p_spq
->completion_pending
,
802 if (p_ent
->elem
.hdr
.echo
== echo
) {
803 u16 pos
= le16_to_cpu(echo
) % SPQ_RING_SIZE
;
805 list_del(&p_ent
->list
);
807 /* Avoid overriding of SPQ entries when getting
808 * out-of-order completions, by marking the completions
809 * in a bitmap and increasing the chain consumer only
810 * for the first successive completed entries.
812 bitmap_set(p_spq
->p_comp_bitmap
, pos
, SPQ_RING_SIZE
);
814 while (test_bit(p_spq
->comp_bitmap_idx
,
815 p_spq
->p_comp_bitmap
)) {
816 bitmap_clear(p_spq
->p_comp_bitmap
,
817 p_spq
->comp_bitmap_idx
,
819 p_spq
->comp_bitmap_idx
++;
820 qed_chain_return_produced(&p_spq
->chain
);
828 /* This is relatively uncommon - depends on scenarios
829 * which have mutliple per-PF sent ramrods.
831 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
832 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
834 le16_to_cpu(p_ent
->elem
.hdr
.echo
));
837 /* Release lock before callback, as callback may post
838 * an additional ramrod.
840 spin_unlock_bh(&p_spq
->lock
);
844 "Failed to find an entry this EQE completes\n");
848 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
, "Complete: func %p cookie %p)\n",
849 p_ent
->comp_cb
.function
, p_ent
->comp_cb
.cookie
);
850 if (found
->comp_cb
.function
)
851 found
->comp_cb
.function(p_hwfn
, found
->comp_cb
.cookie
, p_data
,
854 if ((found
->comp_mode
!= QED_SPQ_MODE_EBLOCK
) ||
855 (found
->queue
== &p_spq
->unlimited_pending
))
856 /* EBLOCK is responsible for returning its own entry into the
857 * free list, unless it originally added the entry into the
858 * unlimited pending list.
860 qed_spq_return_entry(p_hwfn
, found
);
862 /* Attempt to post pending requests */
863 spin_lock_bh(&p_spq
->lock
);
864 rc
= qed_spq_pend_post(p_hwfn
);
865 spin_unlock_bh(&p_spq
->lock
);
870 struct qed_consq
*qed_consq_alloc(struct qed_hwfn
*p_hwfn
)
872 struct qed_consq
*p_consq
;
874 /* Allocate ConsQ struct */
875 p_consq
= kzalloc(sizeof(*p_consq
), GFP_KERNEL
);
877 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_consq'\n");
881 /* Allocate and initialize EQ chain*/
882 if (qed_chain_alloc(p_hwfn
->cdev
,
883 QED_CHAIN_USE_TO_PRODUCE
,
885 QED_CHAIN_PAGE_SIZE
/ 0x80,
888 DP_NOTICE(p_hwfn
, "Failed to allocate consq chain");
889 goto consq_allocate_fail
;
895 qed_consq_free(p_hwfn
, p_consq
);
899 void qed_consq_setup(struct qed_hwfn
*p_hwfn
,
900 struct qed_consq
*p_consq
)
902 qed_chain_reset(&p_consq
->chain
);
905 void qed_consq_free(struct qed_hwfn
*p_hwfn
,
906 struct qed_consq
*p_consq
)
910 qed_chain_free(p_hwfn
->cdev
, &p_consq
->chain
);