1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
23 #include "qed_dev_api.h"
28 #include "qed_reg_addr.h"
30 #include "qed_sriov.h"
32 /***************************************************************************
33 * Structures & Definitions
34 ***************************************************************************/
36 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
37 #define SPQ_BLOCK_SLEEP_LENGTH (1000)
39 /***************************************************************************
40 * Blocking Imp. (BLOCK/EBLOCK mode)
41 ***************************************************************************/
42 static void qed_spq_blocking_cb(struct qed_hwfn
*p_hwfn
,
44 union event_ring_data
*data
,
47 struct qed_spq_comp_done
*comp_done
;
49 comp_done
= (struct qed_spq_comp_done
*)cookie
;
51 comp_done
->done
= 0x1;
52 comp_done
->fw_return_code
= fw_return_code
;
54 /* make update visible to waiting thread */
58 static int qed_spq_block(struct qed_hwfn
*p_hwfn
,
59 struct qed_spq_entry
*p_ent
,
62 int sleep_count
= SPQ_BLOCK_SLEEP_LENGTH
;
63 struct qed_spq_comp_done
*comp_done
;
66 comp_done
= (struct qed_spq_comp_done
*)p_ent
->comp_cb
.cookie
;
68 /* validate we receive completion update */
70 if (comp_done
->done
== 1) {
72 *p_fw_ret
= comp_done
->fw_return_code
;
75 usleep_range(5000, 10000);
79 DP_INFO(p_hwfn
, "Ramrod is stuck, requesting MCP drain\n");
80 rc
= qed_mcp_drain(p_hwfn
, p_hwfn
->p_main_ptt
);
82 DP_NOTICE(p_hwfn
, "MCP drain failed\n");
84 /* Retry after drain */
85 sleep_count
= SPQ_BLOCK_SLEEP_LENGTH
;
87 /* validate we receive completion update */
89 if (comp_done
->done
== 1) {
91 *p_fw_ret
= comp_done
->fw_return_code
;
94 usleep_range(5000, 10000);
98 if (comp_done
->done
== 1) {
100 *p_fw_ret
= comp_done
->fw_return_code
;
104 DP_NOTICE(p_hwfn
, "Ramrod is stuck, MCP drain failed\n");
109 /***************************************************************************
110 * SPQ entries inner API
111 ***************************************************************************/
113 qed_spq_fill_entry(struct qed_hwfn
*p_hwfn
,
114 struct qed_spq_entry
*p_ent
)
118 switch (p_ent
->comp_mode
) {
119 case QED_SPQ_MODE_EBLOCK
:
120 case QED_SPQ_MODE_BLOCK
:
121 p_ent
->comp_cb
.function
= qed_spq_blocking_cb
;
123 case QED_SPQ_MODE_CB
:
126 DP_NOTICE(p_hwfn
, "Unknown SPQE completion mode %d\n",
131 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
132 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
134 p_ent
->elem
.hdr
.cmd_id
,
135 p_ent
->elem
.hdr
.protocol_id
,
136 p_ent
->elem
.data_ptr
.hi
,
137 p_ent
->elem
.data_ptr
.lo
,
138 D_TRINE(p_ent
->comp_mode
, QED_SPQ_MODE_EBLOCK
,
139 QED_SPQ_MODE_BLOCK
, "MODE_EBLOCK", "MODE_BLOCK",
145 /***************************************************************************
147 ***************************************************************************/
148 static void qed_spq_hw_initialize(struct qed_hwfn
*p_hwfn
,
149 struct qed_spq
*p_spq
)
152 struct qed_cxt_info cxt_info
;
153 struct core_conn_context
*p_cxt
;
154 union qed_qm_pq_params pq_params
;
157 cxt_info
.iid
= p_spq
->cid
;
159 rc
= qed_cxt_get_cid_info(p_hwfn
, &cxt_info
);
162 DP_NOTICE(p_hwfn
, "Cannot find context info for cid=%d\n",
167 p_cxt
= cxt_info
.p_cxt
;
169 SET_FIELD(p_cxt
->xstorm_ag_context
.flags10
,
170 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN
, 1);
171 SET_FIELD(p_cxt
->xstorm_ag_context
.flags1
,
172 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE
, 1);
173 SET_FIELD(p_cxt
->xstorm_ag_context
.flags9
,
174 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN
, 1);
176 /* QM physical queue */
177 memset(&pq_params
, 0, sizeof(pq_params
));
178 pq_params
.core
.tc
= LB_TC
;
179 pq
= qed_get_qm_pq(p_hwfn
, PROTOCOLID_CORE
, &pq_params
);
180 p_cxt
->xstorm_ag_context
.physical_q0
= cpu_to_le16(pq
);
182 p_cxt
->xstorm_st_context
.spq_base_lo
=
183 DMA_LO_LE(p_spq
->chain
.p_phys_addr
);
184 p_cxt
->xstorm_st_context
.spq_base_hi
=
185 DMA_HI_LE(p_spq
->chain
.p_phys_addr
);
187 DMA_REGPAIR_LE(p_cxt
->xstorm_st_context
.consolid_base_addr
,
188 p_hwfn
->p_consq
->chain
.p_phys_addr
);
191 static int qed_spq_hw_post(struct qed_hwfn
*p_hwfn
,
192 struct qed_spq
*p_spq
,
193 struct qed_spq_entry
*p_ent
)
195 struct qed_chain
*p_chain
= &p_hwfn
->p_spq
->chain
;
196 u16 echo
= qed_chain_get_prod_idx(p_chain
);
197 struct slow_path_element
*elem
;
198 struct core_db_data db
;
200 p_ent
->elem
.hdr
.echo
= cpu_to_le16(echo
);
201 elem
= qed_chain_produce(p_chain
);
203 DP_NOTICE(p_hwfn
, "Failed to produce from SPQ chain\n");
207 *elem
= p_ent
->elem
; /* struct assignment */
209 /* send a doorbell on the slow hwfn session */
210 memset(&db
, 0, sizeof(db
));
211 SET_FIELD(db
.params
, CORE_DB_DATA_DEST
, DB_DEST_XCM
);
212 SET_FIELD(db
.params
, CORE_DB_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
213 SET_FIELD(db
.params
, CORE_DB_DATA_AGG_VAL_SEL
,
214 DQ_XCM_CORE_SPQ_PROD_CMD
);
215 db
.agg_flags
= DQ_XCM_CORE_DQ_CF_CMD
;
216 db
.spq_prod
= cpu_to_le16(qed_chain_get_prod_idx(p_chain
));
218 /* make sure the SPQE is updated before the doorbell */
221 DOORBELL(p_hwfn
, qed_db_addr(p_spq
->cid
, DQ_DEMS_LEGACY
), *(u32
*)&db
);
223 /* make sure doorbell is rang */
226 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
227 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
228 qed_db_addr(p_spq
->cid
, DQ_DEMS_LEGACY
),
229 p_spq
->cid
, db
.params
, db
.agg_flags
,
230 qed_chain_get_prod_idx(p_chain
));
235 /***************************************************************************
236 * Asynchronous events
237 ***************************************************************************/
239 qed_async_event_completion(struct qed_hwfn
*p_hwfn
,
240 struct event_ring_entry
*p_eqe
)
242 switch (p_eqe
->protocol_id
) {
243 case PROTOCOLID_COMMON
:
244 return qed_sriov_eqe_event(p_hwfn
,
246 p_eqe
->echo
, &p_eqe
->data
);
249 "Unknown Async completion for protocol: %d\n",
255 /***************************************************************************
257 ***************************************************************************/
258 void qed_eq_prod_update(struct qed_hwfn
*p_hwfn
,
261 u32 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+
262 USTORM_EQE_CONS_OFFSET(p_hwfn
->rel_pf_id
);
264 REG_WR16(p_hwfn
, addr
, prod
);
266 /* keep prod updates ordered */
270 int qed_eq_completion(struct qed_hwfn
*p_hwfn
,
274 struct qed_eq
*p_eq
= cookie
;
275 struct qed_chain
*p_chain
= &p_eq
->chain
;
278 /* take a snapshot of the FW consumer */
279 u16 fw_cons_idx
= le16_to_cpu(*p_eq
->p_fw_cons
);
281 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
, "fw_cons_idx %x\n", fw_cons_idx
);
283 /* Need to guarantee the fw_cons index we use points to a usuable
284 * element (to comply with our chain), so our macros would comply
286 if ((fw_cons_idx
& qed_chain_get_usable_per_page(p_chain
)) ==
287 qed_chain_get_usable_per_page(p_chain
))
288 fw_cons_idx
+= qed_chain_get_unusable_per_page(p_chain
);
290 /* Complete current segment of eq entries */
291 while (fw_cons_idx
!= qed_chain_get_cons_idx(p_chain
)) {
292 struct event_ring_entry
*p_eqe
= qed_chain_consume(p_chain
);
299 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
300 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
304 le16_to_cpu(p_eqe
->echo
),
305 p_eqe
->fw_return_code
,
308 if (GET_FIELD(p_eqe
->flags
, EVENT_RING_ENTRY_ASYNC
)) {
309 if (qed_async_event_completion(p_hwfn
, p_eqe
))
311 } else if (qed_spq_completion(p_hwfn
,
313 p_eqe
->fw_return_code
,
318 qed_chain_recycle_consumed(p_chain
);
321 qed_eq_prod_update(p_hwfn
, qed_chain_get_prod_idx(p_chain
));
326 struct qed_eq
*qed_eq_alloc(struct qed_hwfn
*p_hwfn
,
331 /* Allocate EQ struct */
332 p_eq
= kzalloc(sizeof(*p_eq
), GFP_KERNEL
);
334 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_eq'\n");
338 /* Allocate and initialize EQ chain*/
339 if (qed_chain_alloc(p_hwfn
->cdev
,
340 QED_CHAIN_USE_TO_PRODUCE
,
342 QED_CHAIN_CNT_TYPE_U16
,
344 sizeof(union event_ring_element
),
346 DP_NOTICE(p_hwfn
, "Failed to allocate eq chain\n");
347 goto eq_allocate_fail
;
350 /* register EQ completion on the SP SB */
351 qed_int_register_cb(p_hwfn
,
360 qed_eq_free(p_hwfn
, p_eq
);
364 void qed_eq_setup(struct qed_hwfn
*p_hwfn
,
367 qed_chain_reset(&p_eq
->chain
);
370 void qed_eq_free(struct qed_hwfn
*p_hwfn
,
375 qed_chain_free(p_hwfn
->cdev
, &p_eq
->chain
);
379 /***************************************************************************
380 * CQE API - manipulate EQ functionality
381 ***************************************************************************/
382 static int qed_cqe_completion(
383 struct qed_hwfn
*p_hwfn
,
384 struct eth_slow_path_rx_cqe
*cqe
,
385 enum protocol_type protocol
)
387 if (IS_VF(p_hwfn
->cdev
))
390 /* @@@tmp - it's possible we'll eventually want to handle some
391 * actual commands that can arrive here, but for now this is only
392 * used to complete the ramrod using the echo value on the cqe
394 return qed_spq_completion(p_hwfn
, cqe
->echo
, 0, NULL
);
397 int qed_eth_cqe_completion(struct qed_hwfn
*p_hwfn
,
398 struct eth_slow_path_rx_cqe
*cqe
)
402 rc
= qed_cqe_completion(p_hwfn
, cqe
, PROTOCOLID_ETH
);
405 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
411 /***************************************************************************
412 * Slow hwfn Queue (spq)
413 ***************************************************************************/
414 void qed_spq_setup(struct qed_hwfn
*p_hwfn
)
416 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
417 struct qed_spq_entry
*p_virt
= NULL
;
418 dma_addr_t p_phys
= 0;
421 INIT_LIST_HEAD(&p_spq
->pending
);
422 INIT_LIST_HEAD(&p_spq
->completion_pending
);
423 INIT_LIST_HEAD(&p_spq
->free_pool
);
424 INIT_LIST_HEAD(&p_spq
->unlimited_pending
);
425 spin_lock_init(&p_spq
->lock
);
428 p_phys
= p_spq
->p_phys
+ offsetof(struct qed_spq_entry
, ramrod
);
429 p_virt
= p_spq
->p_virt
;
431 capacity
= qed_chain_get_capacity(&p_spq
->chain
);
432 for (i
= 0; i
< capacity
; i
++) {
433 DMA_REGPAIR_LE(p_virt
->elem
.data_ptr
, p_phys
);
435 list_add_tail(&p_virt
->list
, &p_spq
->free_pool
);
438 p_phys
+= sizeof(struct qed_spq_entry
);
442 p_spq
->normal_count
= 0;
443 p_spq
->comp_count
= 0;
444 p_spq
->comp_sent_count
= 0;
445 p_spq
->unlimited_pending_count
= 0;
447 bitmap_zero(p_spq
->p_comp_bitmap
, SPQ_RING_SIZE
);
448 p_spq
->comp_bitmap_idx
= 0;
450 /* SPQ cid, cannot fail */
451 qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_CORE
, &p_spq
->cid
);
452 qed_spq_hw_initialize(p_hwfn
, p_spq
);
454 /* reset the chain itself */
455 qed_chain_reset(&p_spq
->chain
);
458 int qed_spq_alloc(struct qed_hwfn
*p_hwfn
)
460 struct qed_spq_entry
*p_virt
= NULL
;
461 struct qed_spq
*p_spq
= NULL
;
462 dma_addr_t p_phys
= 0;
467 kzalloc(sizeof(struct qed_spq
), GFP_KERNEL
);
469 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_spq'\n");
474 if (qed_chain_alloc(p_hwfn
->cdev
,
475 QED_CHAIN_USE_TO_PRODUCE
,
476 QED_CHAIN_MODE_SINGLE
,
477 QED_CHAIN_CNT_TYPE_U16
,
478 0, /* N/A when the mode is SINGLE */
479 sizeof(struct slow_path_element
),
481 DP_NOTICE(p_hwfn
, "Failed to allocate spq chain\n");
482 goto spq_allocate_fail
;
485 /* allocate and fill the SPQ elements (incl. ramrod data list) */
486 capacity
= qed_chain_get_capacity(&p_spq
->chain
);
487 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
489 sizeof(struct qed_spq_entry
),
490 &p_phys
, GFP_KERNEL
);
493 goto spq_allocate_fail
;
495 p_spq
->p_virt
= p_virt
;
496 p_spq
->p_phys
= p_phys
;
497 p_hwfn
->p_spq
= p_spq
;
502 qed_chain_free(p_hwfn
->cdev
, &p_spq
->chain
);
507 void qed_spq_free(struct qed_hwfn
*p_hwfn
)
509 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
516 capacity
= qed_chain_get_capacity(&p_spq
->chain
);
517 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
519 sizeof(struct qed_spq_entry
),
520 p_spq
->p_virt
, p_spq
->p_phys
);
523 qed_chain_free(p_hwfn
->cdev
, &p_spq
->chain
);
529 qed_spq_get_entry(struct qed_hwfn
*p_hwfn
,
530 struct qed_spq_entry
**pp_ent
)
532 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
533 struct qed_spq_entry
*p_ent
= NULL
;
536 spin_lock_bh(&p_spq
->lock
);
538 if (list_empty(&p_spq
->free_pool
)) {
539 p_ent
= kzalloc(sizeof(*p_ent
), GFP_ATOMIC
);
544 p_ent
->queue
= &p_spq
->unlimited_pending
;
546 p_ent
= list_first_entry(&p_spq
->free_pool
,
547 struct qed_spq_entry
,
549 list_del(&p_ent
->list
);
550 p_ent
->queue
= &p_spq
->pending
;
556 spin_unlock_bh(&p_spq
->lock
);
560 /* Locked variant; Should be called while the SPQ lock is taken */
561 static void __qed_spq_return_entry(struct qed_hwfn
*p_hwfn
,
562 struct qed_spq_entry
*p_ent
)
564 list_add_tail(&p_ent
->list
, &p_hwfn
->p_spq
->free_pool
);
567 void qed_spq_return_entry(struct qed_hwfn
*p_hwfn
,
568 struct qed_spq_entry
*p_ent
)
570 spin_lock_bh(&p_hwfn
->p_spq
->lock
);
571 __qed_spq_return_entry(p_hwfn
, p_ent
);
572 spin_unlock_bh(&p_hwfn
->p_spq
->lock
);
576 * @brief qed_spq_add_entry - adds a new entry to the pending
577 * list. Should be used while lock is being held.
579 * Addes an entry to the pending list is there is room (en empty
580 * element is available in the free_pool), or else places the
581 * entry in the unlimited_pending pool.
590 qed_spq_add_entry(struct qed_hwfn
*p_hwfn
,
591 struct qed_spq_entry
*p_ent
,
592 enum spq_priority priority
)
594 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
596 if (p_ent
->queue
== &p_spq
->unlimited_pending
) {
598 if (list_empty(&p_spq
->free_pool
)) {
599 list_add_tail(&p_ent
->list
, &p_spq
->unlimited_pending
);
600 p_spq
->unlimited_pending_count
++;
604 struct qed_spq_entry
*p_en2
;
606 p_en2
= list_first_entry(&p_spq
->free_pool
,
607 struct qed_spq_entry
,
609 list_del(&p_en2
->list
);
611 /* Copy the ring element physical pointer to the new
612 * entry, since we are about to override the entire ring
613 * entry and don't want to lose the pointer.
615 p_ent
->elem
.data_ptr
= p_en2
->elem
.data_ptr
;
619 /* EBLOCK responsible to free the allocated p_ent */
620 if (p_ent
->comp_mode
!= QED_SPQ_MODE_EBLOCK
)
627 /* entry is to be placed in 'pending' queue */
629 case QED_SPQ_PRIORITY_NORMAL
:
630 list_add_tail(&p_ent
->list
, &p_spq
->pending
);
631 p_spq
->normal_count
++;
633 case QED_SPQ_PRIORITY_HIGH
:
634 list_add(&p_ent
->list
, &p_spq
->pending
);
644 /***************************************************************************
646 ***************************************************************************/
647 u32
qed_spq_get_cid(struct qed_hwfn
*p_hwfn
)
650 return 0xffffffff; /* illegal */
651 return p_hwfn
->p_spq
->cid
;
654 /***************************************************************************
655 * Posting new Ramrods
656 ***************************************************************************/
657 static int qed_spq_post_list(struct qed_hwfn
*p_hwfn
,
658 struct list_head
*head
,
661 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
664 while (qed_chain_get_elem_left(&p_spq
->chain
) > keep_reserve
&&
666 struct qed_spq_entry
*p_ent
=
667 list_first_entry(head
, struct qed_spq_entry
, list
);
668 list_del(&p_ent
->list
);
669 list_add_tail(&p_ent
->list
, &p_spq
->completion_pending
);
670 p_spq
->comp_sent_count
++;
672 rc
= qed_spq_hw_post(p_hwfn
, p_spq
, p_ent
);
674 list_del(&p_ent
->list
);
675 __qed_spq_return_entry(p_hwfn
, p_ent
);
683 static int qed_spq_pend_post(struct qed_hwfn
*p_hwfn
)
685 struct qed_spq
*p_spq
= p_hwfn
->p_spq
;
686 struct qed_spq_entry
*p_ent
= NULL
;
688 while (!list_empty(&p_spq
->free_pool
)) {
689 if (list_empty(&p_spq
->unlimited_pending
))
692 p_ent
= list_first_entry(&p_spq
->unlimited_pending
,
693 struct qed_spq_entry
,
698 list_del(&p_ent
->list
);
700 qed_spq_add_entry(p_hwfn
, p_ent
, p_ent
->priority
);
703 return qed_spq_post_list(p_hwfn
, &p_spq
->pending
,
704 SPQ_HIGH_PRI_RESERVE_DEFAULT
);
707 int qed_spq_post(struct qed_hwfn
*p_hwfn
,
708 struct qed_spq_entry
*p_ent
,
712 struct qed_spq
*p_spq
= p_hwfn
? p_hwfn
->p_spq
: NULL
;
713 bool b_ret_ent
= true;
719 DP_NOTICE(p_hwfn
, "Got a NULL pointer\n");
723 /* Complete the entry */
724 rc
= qed_spq_fill_entry(p_hwfn
, p_ent
);
726 spin_lock_bh(&p_spq
->lock
);
728 /* Check return value after LOCK is taken for cleaner error flow */
732 /* Add the request to the pending queue */
733 rc
= qed_spq_add_entry(p_hwfn
, p_ent
, p_ent
->priority
);
737 rc
= qed_spq_pend_post(p_hwfn
);
739 /* Since it's possible that pending failed for a different
740 * entry [although unlikely], the failed entry was already
741 * dealt with; No need to return it here.
747 spin_unlock_bh(&p_spq
->lock
);
749 if (p_ent
->comp_mode
== QED_SPQ_MODE_EBLOCK
) {
750 /* For entries in QED BLOCK mode, the completion code cannot
751 * perform the necessary cleanup - if it did, we couldn't
752 * access p_ent here to see whether it's successful or not.
753 * Thus, after gaining the answer perform the cleanup here.
755 rc
= qed_spq_block(p_hwfn
, p_ent
, fw_return_code
);
757 if (p_ent
->queue
== &p_spq
->unlimited_pending
) {
758 /* This is an allocated p_ent which does not need to
769 qed_spq_return_entry(p_hwfn
, p_ent
);
774 spin_lock_bh(&p_spq
->lock
);
775 list_del(&p_ent
->list
);
776 qed_chain_return_produced(&p_spq
->chain
);
779 /* return to the free pool */
781 __qed_spq_return_entry(p_hwfn
, p_ent
);
782 spin_unlock_bh(&p_spq
->lock
);
787 int qed_spq_completion(struct qed_hwfn
*p_hwfn
,
790 union event_ring_data
*p_data
)
792 struct qed_spq
*p_spq
;
793 struct qed_spq_entry
*p_ent
= NULL
;
794 struct qed_spq_entry
*tmp
;
795 struct qed_spq_entry
*found
= NULL
;
801 p_spq
= p_hwfn
->p_spq
;
805 spin_lock_bh(&p_spq
->lock
);
806 list_for_each_entry_safe(p_ent
, tmp
, &p_spq
->completion_pending
,
808 if (p_ent
->elem
.hdr
.echo
== echo
) {
809 u16 pos
= le16_to_cpu(echo
) % SPQ_RING_SIZE
;
811 list_del(&p_ent
->list
);
813 /* Avoid overriding of SPQ entries when getting
814 * out-of-order completions, by marking the completions
815 * in a bitmap and increasing the chain consumer only
816 * for the first successive completed entries.
818 __set_bit(pos
, p_spq
->p_comp_bitmap
);
820 while (test_bit(p_spq
->comp_bitmap_idx
,
821 p_spq
->p_comp_bitmap
)) {
822 __clear_bit(p_spq
->comp_bitmap_idx
,
823 p_spq
->p_comp_bitmap
);
824 p_spq
->comp_bitmap_idx
++;
825 qed_chain_return_produced(&p_spq
->chain
);
833 /* This is relatively uncommon - depends on scenarios
834 * which have mutliple per-PF sent ramrods.
836 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
837 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
839 le16_to_cpu(p_ent
->elem
.hdr
.echo
));
842 /* Release lock before callback, as callback may post
843 * an additional ramrod.
845 spin_unlock_bh(&p_spq
->lock
);
849 "Failed to find an entry this EQE completes\n");
853 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
, "Complete: func %p cookie %p)\n",
854 p_ent
->comp_cb
.function
, p_ent
->comp_cb
.cookie
);
855 if (found
->comp_cb
.function
)
856 found
->comp_cb
.function(p_hwfn
, found
->comp_cb
.cookie
, p_data
,
859 if ((found
->comp_mode
!= QED_SPQ_MODE_EBLOCK
) ||
860 (found
->queue
== &p_spq
->unlimited_pending
))
861 /* EBLOCK is responsible for returning its own entry into the
862 * free list, unless it originally added the entry into the
863 * unlimited pending list.
865 qed_spq_return_entry(p_hwfn
, found
);
867 /* Attempt to post pending requests */
868 spin_lock_bh(&p_spq
->lock
);
869 rc
= qed_spq_pend_post(p_hwfn
);
870 spin_unlock_bh(&p_spq
->lock
);
875 struct qed_consq
*qed_consq_alloc(struct qed_hwfn
*p_hwfn
)
877 struct qed_consq
*p_consq
;
879 /* Allocate ConsQ struct */
880 p_consq
= kzalloc(sizeof(*p_consq
), GFP_KERNEL
);
882 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_consq'\n");
886 /* Allocate and initialize EQ chain*/
887 if (qed_chain_alloc(p_hwfn
->cdev
,
888 QED_CHAIN_USE_TO_PRODUCE
,
890 QED_CHAIN_CNT_TYPE_U16
,
891 QED_CHAIN_PAGE_SIZE
/ 0x80,
892 0x80, &p_consq
->chain
)) {
893 DP_NOTICE(p_hwfn
, "Failed to allocate consq chain");
894 goto consq_allocate_fail
;
900 qed_consq_free(p_hwfn
, p_consq
);
904 void qed_consq_setup(struct qed_hwfn
*p_hwfn
,
905 struct qed_consq
*p_consq
)
907 qed_chain_reset(&p_consq
->chain
);
910 void qed_consq_free(struct qed_hwfn
*p_hwfn
,
911 struct qed_consq
*p_consq
)
915 qed_chain_free(p_hwfn
->cdev
, &p_consq
->chain
);