1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/vmalloc.h>
21 #include <linux/etherdevice.h>
22 #include <linux/qed/qed_chain.h>
23 #include <linux/qed/qed_if.h>
27 #include "qed_dev_api.h"
30 #include "qed_init_ops.h"
33 #include "qed_reg_addr.h"
35 #include "qed_sriov.h"
38 static spinlock_t qm_lock
;
39 static bool qm_lock_init
= false;
41 /* API common to all protocols */
43 BAR_ID_0
, /* used for GRC */
44 BAR_ID_1
/* Used for doorbells */
47 static u32
qed_hw_bar_size(struct qed_hwfn
*p_hwfn
,
50 u32 bar_reg
= (bar_id
== BAR_ID_0
?
51 PGLUE_B_REG_PF_BAR0_SIZE
: PGLUE_B_REG_PF_BAR1_SIZE
);
54 if (IS_VF(p_hwfn
->cdev
))
57 val
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
, bar_reg
);
59 return 1 << (val
+ 15);
61 /* Old MFW initialized above registered only conditionally */
62 if (p_hwfn
->cdev
->num_hwfns
> 1) {
64 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
65 return BAR_ID_0
? 256 * 1024 : 512 * 1024;
68 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
73 void qed_init_dp(struct qed_dev
*cdev
,
74 u32 dp_module
, u8 dp_level
)
78 cdev
->dp_level
= dp_level
;
79 cdev
->dp_module
= dp_module
;
80 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
81 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
83 p_hwfn
->dp_level
= dp_level
;
84 p_hwfn
->dp_module
= dp_module
;
88 void qed_init_struct(struct qed_dev
*cdev
)
92 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
93 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
97 p_hwfn
->b_active
= false;
99 mutex_init(&p_hwfn
->dmae_info
.mutex
);
102 /* hwfn 0 is always active */
103 cdev
->hwfns
[0].b_active
= true;
105 /* set the default cache alignment to 128 */
106 cdev
->cache_shift
= 7;
109 static void qed_qm_info_free(struct qed_hwfn
*p_hwfn
)
111 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
113 kfree(qm_info
->qm_pq_params
);
114 qm_info
->qm_pq_params
= NULL
;
115 kfree(qm_info
->qm_vport_params
);
116 qm_info
->qm_vport_params
= NULL
;
117 kfree(qm_info
->qm_port_params
);
118 qm_info
->qm_port_params
= NULL
;
119 kfree(qm_info
->wfq_data
);
120 qm_info
->wfq_data
= NULL
;
123 void qed_resc_free(struct qed_dev
*cdev
)
130 kfree(cdev
->fw_data
);
131 cdev
->fw_data
= NULL
;
133 kfree(cdev
->reset_stats
);
135 for_each_hwfn(cdev
, i
) {
136 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
138 kfree(p_hwfn
->p_tx_cids
);
139 p_hwfn
->p_tx_cids
= NULL
;
140 kfree(p_hwfn
->p_rx_cids
);
141 p_hwfn
->p_rx_cids
= NULL
;
144 for_each_hwfn(cdev
, i
) {
145 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
147 qed_cxt_mngr_free(p_hwfn
);
148 qed_qm_info_free(p_hwfn
);
149 qed_spq_free(p_hwfn
);
150 qed_eq_free(p_hwfn
, p_hwfn
->p_eq
);
151 qed_consq_free(p_hwfn
, p_hwfn
->p_consq
);
152 qed_int_free(p_hwfn
);
153 qed_iov_free(p_hwfn
);
154 qed_dmae_info_free(p_hwfn
);
155 qed_dcbx_info_free(p_hwfn
, p_hwfn
->p_dcbx_info
);
159 static int qed_init_qm_info(struct qed_hwfn
*p_hwfn
, bool b_sleepable
)
161 u8 num_vports
, vf_offset
= 0, i
, vport_id
, num_ports
, curr_queue
= 0;
162 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
163 struct init_qm_port_params
*p_qm_port
;
164 bool init_rdma_offload_pq
= false;
165 bool init_pure_ack_pq
= false;
166 bool init_ooo_pq
= false;
167 u16 num_pqs
, multi_cos_tcs
= 1;
168 u8 pf_wfq
= qm_info
->pf_wfq
;
169 u32 pf_rl
= qm_info
->pf_rl
;
173 #ifdef CONFIG_QED_SRIOV
174 if (p_hwfn
->cdev
->p_iov_info
)
175 num_vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
177 memset(qm_info
, 0, sizeof(*qm_info
));
179 num_pqs
= multi_cos_tcs
+ num_vfs
+ 1; /* The '1' is for pure-LB */
180 num_vports
= (u8
)RESC_NUM(p_hwfn
, QED_VPORT
);
182 if (p_hwfn
->hw_info
.personality
== QED_PCI_ETH_ROCE
) {
183 num_pqs
++; /* for RoCE queue */
184 init_rdma_offload_pq
= true;
185 /* we subtract num_vfs because each require a rate limiter,
186 * and one default rate limiter
188 if (p_hwfn
->pf_params
.rdma_pf_params
.enable_dcqcn
)
189 num_pf_rls
= RESC_NUM(p_hwfn
, QED_RL
) - num_vfs
- 1;
191 num_pqs
+= num_pf_rls
;
192 qm_info
->num_pf_rls
= (u8
) num_pf_rls
;
195 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
196 num_pqs
+= 2; /* for iSCSI pure-ACK / OOO queue */
197 init_pure_ack_pq
= true;
201 /* Sanity checking that setup requires legal number of resources */
202 if (num_pqs
> RESC_NUM(p_hwfn
, QED_PQ
)) {
204 "Need too many Physical queues - 0x%04x when only %04x are available\n",
205 num_pqs
, RESC_NUM(p_hwfn
, QED_PQ
));
209 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
211 qm_info
->qm_pq_params
= kcalloc(num_pqs
,
212 sizeof(struct init_qm_pq_params
),
213 b_sleepable
? GFP_KERNEL
: GFP_ATOMIC
);
214 if (!qm_info
->qm_pq_params
)
217 qm_info
->qm_vport_params
= kcalloc(num_vports
,
218 sizeof(struct init_qm_vport_params
),
219 b_sleepable
? GFP_KERNEL
221 if (!qm_info
->qm_vport_params
)
224 qm_info
->qm_port_params
= kcalloc(MAX_NUM_PORTS
,
225 sizeof(struct init_qm_port_params
),
226 b_sleepable
? GFP_KERNEL
228 if (!qm_info
->qm_port_params
)
231 qm_info
->wfq_data
= kcalloc(num_vports
, sizeof(struct qed_wfq_data
),
232 b_sleepable
? GFP_KERNEL
: GFP_ATOMIC
);
233 if (!qm_info
->wfq_data
)
236 vport_id
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
238 /* First init rate limited queues */
239 for (curr_queue
= 0; curr_queue
< num_pf_rls
; curr_queue
++) {
240 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
++;
241 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
242 p_hwfn
->hw_info
.non_offload_tc
;
243 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
244 qm_info
->qm_pq_params
[curr_queue
].rl_valid
= 1;
247 /* First init per-TC PQs */
248 for (i
= 0; i
< multi_cos_tcs
; i
++) {
249 struct init_qm_pq_params
*params
=
250 &qm_info
->qm_pq_params
[curr_queue
++];
252 if (p_hwfn
->hw_info
.personality
== QED_PCI_ETH_ROCE
||
253 p_hwfn
->hw_info
.personality
== QED_PCI_ETH
) {
254 params
->vport_id
= vport_id
;
255 params
->tc_id
= p_hwfn
->hw_info
.non_offload_tc
;
256 params
->wrr_group
= 1;
258 params
->vport_id
= vport_id
;
259 params
->tc_id
= p_hwfn
->hw_info
.offload_tc
;
260 params
->wrr_group
= 1;
264 /* Then init pure-LB PQ */
265 qm_info
->pure_lb_pq
= curr_queue
;
266 qm_info
->qm_pq_params
[curr_queue
].vport_id
=
267 (u8
) RESC_START(p_hwfn
, QED_VPORT
);
268 qm_info
->qm_pq_params
[curr_queue
].tc_id
= PURE_LB_TC
;
269 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
272 qm_info
->offload_pq
= 0;
273 if (init_rdma_offload_pq
) {
274 qm_info
->offload_pq
= curr_queue
;
275 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
;
276 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
277 p_hwfn
->hw_info
.offload_tc
;
278 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
282 if (init_pure_ack_pq
) {
283 qm_info
->pure_ack_pq
= curr_queue
;
284 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
;
285 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
286 p_hwfn
->hw_info
.offload_tc
;
287 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
292 qm_info
->ooo_pq
= curr_queue
;
293 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
;
294 qm_info
->qm_pq_params
[curr_queue
].tc_id
= DCBX_ISCSI_OOO_TC
;
295 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
299 /* Then init per-VF PQs */
300 vf_offset
= curr_queue
;
301 for (i
= 0; i
< num_vfs
; i
++) {
302 /* First vport is used by the PF */
303 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
+ i
+ 1;
304 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
305 p_hwfn
->hw_info
.non_offload_tc
;
306 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
307 qm_info
->qm_pq_params
[curr_queue
].rl_valid
= 1;
311 qm_info
->vf_queues_offset
= vf_offset
;
312 qm_info
->num_pqs
= num_pqs
;
313 qm_info
->num_vports
= num_vports
;
315 /* Initialize qm port parameters */
316 num_ports
= p_hwfn
->cdev
->num_ports_in_engines
;
317 for (i
= 0; i
< num_ports
; i
++) {
318 p_qm_port
= &qm_info
->qm_port_params
[i
];
319 p_qm_port
->active
= 1;
321 p_qm_port
->active_phys_tcs
= 0x7;
323 p_qm_port
->active_phys_tcs
= 0x9f;
324 p_qm_port
->num_pbf_cmd_lines
= PBF_MAX_CMD_LINES
/ num_ports
;
325 p_qm_port
->num_btb_blocks
= BTB_MAX_BLOCKS
/ num_ports
;
328 qm_info
->max_phys_tcs_per_port
= NUM_OF_PHYS_TCS
;
330 qm_info
->start_pq
= (u16
)RESC_START(p_hwfn
, QED_PQ
);
332 qm_info
->num_vf_pqs
= num_vfs
;
333 qm_info
->start_vport
= (u8
) RESC_START(p_hwfn
, QED_VPORT
);
335 for (i
= 0; i
< qm_info
->num_vports
; i
++)
336 qm_info
->qm_vport_params
[i
].vport_wfq
= 1;
338 qm_info
->vport_rl_en
= 1;
339 qm_info
->vport_wfq_en
= 1;
340 qm_info
->pf_rl
= pf_rl
;
341 qm_info
->pf_wfq
= pf_wfq
;
346 DP_NOTICE(p_hwfn
, "Failed to allocate memory for QM params\n");
347 qed_qm_info_free(p_hwfn
);
351 /* This function reconfigures the QM pf on the fly.
352 * For this purpose we:
353 * 1. reconfigure the QM database
354 * 2. set new values to runtime arrat
355 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
356 * 4. activate init tool in QM_PF stage
357 * 5. send an sdm_qm_cmd through rbc interface to release the QM
359 int qed_qm_reconf(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
361 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
365 /* qm_info is allocated in qed_init_qm_info() which is already called
366 * from qed_resc_alloc() or previous call of qed_qm_reconf().
367 * The allocated size may change each init, so we free it before next
370 qed_qm_info_free(p_hwfn
);
372 /* initialize qed's qm data structure */
373 rc
= qed_init_qm_info(p_hwfn
, false);
377 /* stop PF's qm queues */
378 spin_lock_bh(&qm_lock
);
379 b_rc
= qed_send_qm_stop_cmd(p_hwfn
, p_ptt
, false, true,
380 qm_info
->start_pq
, qm_info
->num_pqs
);
381 spin_unlock_bh(&qm_lock
);
385 /* clear the QM_PF runtime phase leftovers from previous init */
386 qed_init_clear_rt_data(p_hwfn
);
388 /* prepare QM portion of runtime array */
389 qed_qm_init_pf(p_hwfn
);
391 /* activate init tool on runtime array */
392 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_QM_PF
, p_hwfn
->rel_pf_id
,
393 p_hwfn
->hw_info
.hw_mode
);
397 /* start PF's qm queues */
398 spin_lock_bh(&qm_lock
);
399 b_rc
= qed_send_qm_stop_cmd(p_hwfn
, p_ptt
, true, true,
400 qm_info
->start_pq
, qm_info
->num_pqs
);
401 spin_unlock_bh(&qm_lock
);
408 int qed_resc_alloc(struct qed_dev
*cdev
)
410 struct qed_consq
*p_consq
;
417 cdev
->fw_data
= kzalloc(sizeof(*cdev
->fw_data
), GFP_KERNEL
);
421 /* Allocate Memory for the Queue->CID mapping */
422 for_each_hwfn(cdev
, i
) {
423 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
424 int tx_size
= sizeof(struct qed_hw_cid_data
) *
425 RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
426 int rx_size
= sizeof(struct qed_hw_cid_data
) *
427 RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
429 p_hwfn
->p_tx_cids
= kzalloc(tx_size
, GFP_KERNEL
);
430 if (!p_hwfn
->p_tx_cids
) {
432 "Failed to allocate memory for Tx Cids\n");
436 p_hwfn
->p_rx_cids
= kzalloc(rx_size
, GFP_KERNEL
);
437 if (!p_hwfn
->p_rx_cids
) {
439 "Failed to allocate memory for Rx Cids\n");
444 for_each_hwfn(cdev
, i
) {
445 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
446 u32 n_eqes
, num_cons
;
448 /* First allocate the context manager structure */
449 rc
= qed_cxt_mngr_alloc(p_hwfn
);
453 /* Set the HW cid/tid numbers (in the contest manager)
454 * Must be done prior to any further computations.
456 rc
= qed_cxt_set_pf_params(p_hwfn
);
460 /* Prepare and process QM requirements */
461 rc
= qed_init_qm_info(p_hwfn
, true);
465 /* Compute the ILT client partition */
466 rc
= qed_cxt_cfg_ilt_compute(p_hwfn
);
470 /* CID map / ILT shadow table / T2
471 * The talbes sizes are determined by the computations above
473 rc
= qed_cxt_tables_alloc(p_hwfn
);
477 /* SPQ, must follow ILT because initializes SPQ context */
478 rc
= qed_spq_alloc(p_hwfn
);
482 /* SP status block allocation */
483 p_hwfn
->p_dpc_ptt
= qed_get_reserved_ptt(p_hwfn
,
486 rc
= qed_int_alloc(p_hwfn
, p_hwfn
->p_main_ptt
);
490 rc
= qed_iov_alloc(p_hwfn
);
495 n_eqes
= qed_chain_get_capacity(&p_hwfn
->p_spq
->chain
);
496 if (p_hwfn
->hw_info
.personality
== QED_PCI_ETH_ROCE
) {
497 num_cons
= qed_cxt_get_proto_cid_count(p_hwfn
,
500 n_eqes
+= num_cons
+ 2 * MAX_NUM_VFS_BB
;
501 } else if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) {
503 qed_cxt_get_proto_cid_count(p_hwfn
,
504 PROTOCOLID_ISCSI
, 0);
505 n_eqes
+= 2 * num_cons
;
508 if (n_eqes
> 0xFFFF) {
510 "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
516 p_eq
= qed_eq_alloc(p_hwfn
, (u16
) n_eqes
);
521 p_consq
= qed_consq_alloc(p_hwfn
);
524 p_hwfn
->p_consq
= p_consq
;
526 /* DMA info initialization */
527 rc
= qed_dmae_info_alloc(p_hwfn
);
530 "Failed to allocate memory for dmae_info structure\n");
534 /* DCBX initialization */
535 rc
= qed_dcbx_info_alloc(p_hwfn
);
538 "Failed to allocate memory for dcbx structure\n");
543 cdev
->reset_stats
= kzalloc(sizeof(*cdev
->reset_stats
), GFP_KERNEL
);
544 if (!cdev
->reset_stats
) {
545 DP_NOTICE(cdev
, "Failed to allocate reset statistics\n");
559 void qed_resc_setup(struct qed_dev
*cdev
)
566 for_each_hwfn(cdev
, i
) {
567 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
569 qed_cxt_mngr_setup(p_hwfn
);
570 qed_spq_setup(p_hwfn
);
571 qed_eq_setup(p_hwfn
, p_hwfn
->p_eq
);
572 qed_consq_setup(p_hwfn
, p_hwfn
->p_consq
);
574 /* Read shadow of current MFW mailbox */
575 qed_mcp_read_mb(p_hwfn
, p_hwfn
->p_main_ptt
);
576 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
577 p_hwfn
->mcp_info
->mfw_mb_cur
,
578 p_hwfn
->mcp_info
->mfw_mb_length
);
580 qed_int_setup(p_hwfn
, p_hwfn
->p_main_ptt
);
582 qed_iov_setup(p_hwfn
, p_hwfn
->p_main_ptt
);
586 #define FINAL_CLEANUP_POLL_CNT (100)
587 #define FINAL_CLEANUP_POLL_TIME (10)
588 int qed_final_cleanup(struct qed_hwfn
*p_hwfn
,
589 struct qed_ptt
*p_ptt
, u16 id
, bool is_vf
)
591 u32 command
= 0, addr
, count
= FINAL_CLEANUP_POLL_CNT
;
594 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+
595 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn
->rel_pf_id
);
600 command
|= X_FINAL_CLEANUP_AGG_INT
<<
601 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT
;
602 command
|= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT
;
603 command
|= id
<< SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT
;
604 command
|= SDM_COMP_TYPE_AGG_INT
<< SDM_OP_GEN_COMP_TYPE_SHIFT
;
606 /* Make sure notification is not set before initiating final cleanup */
607 if (REG_RD(p_hwfn
, addr
)) {
610 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
611 REG_WR(p_hwfn
, addr
, 0);
614 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
615 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
618 qed_wr(p_hwfn
, p_ptt
, XSDM_REG_OPERATION_GEN
, command
);
620 /* Poll until completion */
621 while (!REG_RD(p_hwfn
, addr
) && count
--)
622 msleep(FINAL_CLEANUP_POLL_TIME
);
624 if (REG_RD(p_hwfn
, addr
))
628 "Failed to receive FW final cleanup notification\n");
630 /* Cleanup afterwards */
631 REG_WR(p_hwfn
, addr
, 0);
636 static void qed_calc_hw_mode(struct qed_hwfn
*p_hwfn
)
640 hw_mode
= (1 << MODE_BB_B0
);
642 switch (p_hwfn
->cdev
->num_ports_in_engines
) {
644 hw_mode
|= 1 << MODE_PORTS_PER_ENG_1
;
647 hw_mode
|= 1 << MODE_PORTS_PER_ENG_2
;
650 hw_mode
|= 1 << MODE_PORTS_PER_ENG_4
;
653 DP_NOTICE(p_hwfn
, "num_ports_in_engine = %d not supported\n",
654 p_hwfn
->cdev
->num_ports_in_engines
);
658 switch (p_hwfn
->cdev
->mf_mode
) {
661 hw_mode
|= 1 << MODE_MF_SI
;
664 hw_mode
|= 1 << MODE_MF_SD
;
667 DP_NOTICE(p_hwfn
, "Unsupported MF mode, init as DEFAULT\n");
668 hw_mode
|= 1 << MODE_MF_SI
;
671 hw_mode
|= 1 << MODE_ASIC
;
673 if (p_hwfn
->cdev
->num_hwfns
> 1)
674 hw_mode
|= 1 << MODE_100G
;
676 p_hwfn
->hw_info
.hw_mode
= hw_mode
;
678 DP_VERBOSE(p_hwfn
, (NETIF_MSG_PROBE
| NETIF_MSG_IFUP
),
679 "Configuring function for hw_mode: 0x%08x\n",
680 p_hwfn
->hw_info
.hw_mode
);
683 /* Init run time data for all PFs on an engine. */
684 static void qed_init_cau_rt_data(struct qed_dev
*cdev
)
686 u32 offset
= CAU_REG_SB_VAR_MEMORY_RT_OFFSET
;
689 for_each_hwfn(cdev
, i
) {
690 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
691 struct qed_igu_info
*p_igu_info
;
692 struct qed_igu_block
*p_block
;
693 struct cau_sb_entry sb_entry
;
695 p_igu_info
= p_hwfn
->hw_info
.p_igu_info
;
697 for (sb_id
= 0; sb_id
< QED_MAPPING_MEMORY_SIZE(cdev
);
699 p_block
= &p_igu_info
->igu_map
.igu_blocks
[sb_id
];
703 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
704 p_block
->function_id
,
706 STORE_RT_REG_AGG(p_hwfn
, offset
+ sb_id
* 2,
712 static int qed_hw_init_common(struct qed_hwfn
*p_hwfn
,
713 struct qed_ptt
*p_ptt
,
716 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
717 struct qed_qm_common_rt_init_params params
;
718 struct qed_dev
*cdev
= p_hwfn
->cdev
;
724 qed_init_cau_rt_data(cdev
);
726 /* Program GTT windows */
727 qed_gtt_init(p_hwfn
);
729 if (p_hwfn
->mcp_info
) {
730 if (p_hwfn
->mcp_info
->func_info
.bandwidth_max
)
731 qm_info
->pf_rl_en
= 1;
732 if (p_hwfn
->mcp_info
->func_info
.bandwidth_min
)
733 qm_info
->pf_wfq_en
= 1;
736 memset(¶ms
, 0, sizeof(params
));
737 params
.max_ports_per_engine
= p_hwfn
->cdev
->num_ports_in_engines
;
738 params
.max_phys_tcs_per_port
= qm_info
->max_phys_tcs_per_port
;
739 params
.pf_rl_en
= qm_info
->pf_rl_en
;
740 params
.pf_wfq_en
= qm_info
->pf_wfq_en
;
741 params
.vport_rl_en
= qm_info
->vport_rl_en
;
742 params
.vport_wfq_en
= qm_info
->vport_wfq_en
;
743 params
.port_params
= qm_info
->qm_port_params
;
745 qed_qm_common_rt_init(p_hwfn
, ¶ms
);
747 qed_cxt_hw_init_common(p_hwfn
);
749 /* Close gate from NIG to BRB/Storm; By default they are open, but
750 * we close them to prevent NIG from passing data to reset blocks.
751 * Should have been done in the ENGINE phase, but init-tool lacks
752 * proper port-pretend capabilities.
754 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
755 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
756 qed_port_pretend(p_hwfn
, p_ptt
, p_hwfn
->port_id
^ 1);
757 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
758 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
759 qed_port_unpretend(p_hwfn
, p_ptt
);
761 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_ENGINE
, ANY_PHASE_ID
, hw_mode
);
765 qed_wr(p_hwfn
, p_ptt
, PSWRQ2_REG_L2P_VALIDATE_VFID
, 0);
766 qed_wr(p_hwfn
, p_ptt
, PGLUE_B_REG_USE_CLIENTID_IN_TAG
, 1);
768 if (QED_IS_BB(p_hwfn
->cdev
)) {
769 num_pfs
= NUM_OF_ENG_PFS(p_hwfn
->cdev
);
770 for (pf_id
= 0; pf_id
< num_pfs
; pf_id
++) {
771 qed_fid_pretend(p_hwfn
, p_ptt
, pf_id
);
772 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
773 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
775 /* pretend to original PF */
776 qed_fid_pretend(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
);
779 for (vf_id
= 0; vf_id
< MAX_NUM_VFS_BB
; vf_id
++) {
780 concrete_fid
= qed_vfid_to_concrete(p_hwfn
, vf_id
);
781 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) concrete_fid
);
782 qed_wr(p_hwfn
, p_ptt
, CCFC_REG_STRONG_ENABLE_VF
, 0x1);
784 /* pretend to original PF */
785 qed_fid_pretend(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
);
790 static int qed_hw_init_port(struct qed_hwfn
*p_hwfn
,
791 struct qed_ptt
*p_ptt
,
796 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PORT
, p_hwfn
->port_id
, hw_mode
);
800 if (hw_mode
& (1 << MODE_MF_SI
)) {
803 if (!qed_hw_init_first_eth(p_hwfn
, p_ptt
, &pf_id
)) {
804 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
805 "PF[%08x] is first eth on engine\n", pf_id
);
807 /* We should have configured BIT for ppfid, i.e., the
808 * relative function number in the port. But there's a
809 * bug in LLH in BB where the ppfid is actually engine
810 * based, so we need to take this into account.
812 qed_wr(p_hwfn
, p_ptt
,
813 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR
, 1 << pf_id
);
816 /* Take the protocol-based hit vector if there is a hit,
817 * otherwise take the other vector.
819 qed_wr(p_hwfn
, p_ptt
, NIG_REG_LLH_CLS_TYPE_DUALMODE
, 0x2);
824 static int qed_hw_init_pf(struct qed_hwfn
*p_hwfn
,
825 struct qed_ptt
*p_ptt
,
826 struct qed_tunn_start_params
*p_tunn
,
829 enum qed_int_mode int_mode
,
830 bool allow_npar_tx_switch
)
832 u8 rel_pf_id
= p_hwfn
->rel_pf_id
;
835 if (p_hwfn
->mcp_info
) {
836 struct qed_mcp_function_info
*p_info
;
838 p_info
= &p_hwfn
->mcp_info
->func_info
;
839 if (p_info
->bandwidth_min
)
840 p_hwfn
->qm_info
.pf_wfq
= p_info
->bandwidth_min
;
842 /* Update rate limit once we'll actually have a link */
843 p_hwfn
->qm_info
.pf_rl
= 100000;
846 qed_cxt_hw_init_pf(p_hwfn
);
848 qed_int_igu_init_rt(p_hwfn
);
850 /* Set VLAN in NIG if needed */
851 if (hw_mode
& (1 << MODE_MF_SD
)) {
852 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
, "Configuring LLH_FUNC_TAG\n");
853 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET
, 1);
854 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET
,
855 p_hwfn
->hw_info
.ovlan
);
858 /* Enable classification by MAC if needed */
859 if (hw_mode
& (1 << MODE_MF_SI
)) {
860 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
861 "Configuring TAGMAC_CLS_TYPE\n");
863 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET
, 1);
866 /* Protocl Configuration */
867 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_TCP_RT_OFFSET
,
868 (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
) ? 1 : 0);
869 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_FCOE_RT_OFFSET
, 0);
870 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_ROCE_RT_OFFSET
, 0);
872 /* Cleanup chip from previous driver if such remains exist */
873 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, rel_pf_id
, false);
877 /* PF Init sequence */
878 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PF
, rel_pf_id
, hw_mode
);
882 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
883 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_QM_PF
, rel_pf_id
, hw_mode
);
887 /* Pure runtime initializations - directly to the HW */
888 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, true, true);
890 if (hw_mode
& (1 << MODE_MF_SI
)) {
894 if (!qed_hw_init_first_eth(p_hwfn
, p_ptt
, &pf_id
)) {
895 if (p_hwfn
->rel_pf_id
== pf_id
) {
896 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
897 "PF[%d] is first ETH on engine\n",
901 qed_wr(p_hwfn
, p_ptt
, PRS_REG_MSG_INFO
, val
);
906 /* enable interrupts */
907 qed_int_igu_enable(p_hwfn
, p_ptt
, int_mode
);
909 /* send function start command */
910 rc
= qed_sp_pf_start(p_hwfn
, p_tunn
, p_hwfn
->cdev
->mf_mode
,
911 allow_npar_tx_switch
);
913 DP_NOTICE(p_hwfn
, "Function start ramrod failed\n");
918 static int qed_change_pci_hwfn(struct qed_hwfn
*p_hwfn
,
919 struct qed_ptt
*p_ptt
,
922 u32 delay_idx
= 0, val
, set_val
= enable
? 1 : 0;
924 /* Change PF in PXP */
925 qed_wr(p_hwfn
, p_ptt
,
926 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, set_val
);
928 /* wait until value is set - try for 1 second every 50us */
929 for (delay_idx
= 0; delay_idx
< 20000; delay_idx
++) {
930 val
= qed_rd(p_hwfn
, p_ptt
,
931 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
);
935 usleep_range(50, 60);
938 if (val
!= set_val
) {
940 "PFID_ENABLE_MASTER wasn't changed after a second\n");
947 static void qed_reset_mb_shadow(struct qed_hwfn
*p_hwfn
,
948 struct qed_ptt
*p_main_ptt
)
950 /* Read shadow of current MFW mailbox */
951 qed_mcp_read_mb(p_hwfn
, p_main_ptt
);
952 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
953 p_hwfn
->mcp_info
->mfw_mb_cur
,
954 p_hwfn
->mcp_info
->mfw_mb_length
);
957 int qed_hw_init(struct qed_dev
*cdev
,
958 struct qed_tunn_start_params
*p_tunn
,
960 enum qed_int_mode int_mode
,
961 bool allow_npar_tx_switch
,
962 const u8
*bin_fw_data
)
964 u32 load_code
, param
;
967 if ((int_mode
== QED_INT_MODE_MSI
) && (cdev
->num_hwfns
> 1)) {
968 DP_NOTICE(cdev
, "MSI mode is not supported for CMT devices\n");
973 rc
= qed_init_fw_data(cdev
, bin_fw_data
);
978 for_each_hwfn(cdev
, i
) {
979 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
982 p_hwfn
->b_int_enabled
= 1;
986 /* Enable DMAE in PXP */
987 rc
= qed_change_pci_hwfn(p_hwfn
, p_hwfn
->p_main_ptt
, true);
989 qed_calc_hw_mode(p_hwfn
);
991 rc
= qed_mcp_load_req(p_hwfn
, p_hwfn
->p_main_ptt
,
994 DP_NOTICE(p_hwfn
, "Failed sending LOAD_REQ command\n");
998 qed_reset_mb_shadow(p_hwfn
, p_hwfn
->p_main_ptt
);
1000 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
1001 "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
1004 p_hwfn
->first_on_engine
= (load_code
==
1005 FW_MSG_CODE_DRV_LOAD_ENGINE
);
1007 if (!qm_lock_init
) {
1008 spin_lock_init(&qm_lock
);
1009 qm_lock_init
= true;
1012 switch (load_code
) {
1013 case FW_MSG_CODE_DRV_LOAD_ENGINE
:
1014 rc
= qed_hw_init_common(p_hwfn
, p_hwfn
->p_main_ptt
,
1015 p_hwfn
->hw_info
.hw_mode
);
1019 case FW_MSG_CODE_DRV_LOAD_PORT
:
1020 rc
= qed_hw_init_port(p_hwfn
, p_hwfn
->p_main_ptt
,
1021 p_hwfn
->hw_info
.hw_mode
);
1026 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
1027 rc
= qed_hw_init_pf(p_hwfn
, p_hwfn
->p_main_ptt
,
1028 p_tunn
, p_hwfn
->hw_info
.hw_mode
,
1029 b_hw_start
, int_mode
,
1030 allow_npar_tx_switch
);
1039 "init phase failed for loadcode 0x%x (rc %d)\n",
1042 /* ACK mfw regardless of success or failure of initialization */
1043 mfw_rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1044 DRV_MSG_CODE_LOAD_DONE
,
1045 0, &load_code
, ¶m
);
1049 DP_NOTICE(p_hwfn
, "Failed sending LOAD_DONE command\n");
1053 /* send DCBX attention request command */
1056 "sending phony dcbx set command to trigger DCBx attention handling\n");
1057 mfw_rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1058 DRV_MSG_CODE_SET_DCBX
,
1059 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT
,
1060 &load_code
, ¶m
);
1063 "Failed to send DCBX attention request\n");
1067 p_hwfn
->hw_init_done
= true;
1073 #define QED_HW_STOP_RETRY_LIMIT (10)
1074 static inline void qed_hw_timers_stop(struct qed_dev
*cdev
,
1075 struct qed_hwfn
*p_hwfn
,
1076 struct qed_ptt
*p_ptt
)
1081 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_CONN
, 0x0);
1082 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_TASK
, 0x0);
1084 for (i
= 0; i
< QED_HW_STOP_RETRY_LIMIT
; i
++) {
1085 if ((!qed_rd(p_hwfn
, p_ptt
,
1086 TM_REG_PF_SCAN_ACTIVE_CONN
)) &&
1087 (!qed_rd(p_hwfn
, p_ptt
,
1088 TM_REG_PF_SCAN_ACTIVE_TASK
)))
1091 /* Dependent on number of connection/tasks, possibly
1092 * 1ms sleep is required between polls
1094 usleep_range(1000, 2000);
1097 if (i
< QED_HW_STOP_RETRY_LIMIT
)
1101 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
1102 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_CONN
),
1103 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_TASK
));
1106 void qed_hw_timers_stop_all(struct qed_dev
*cdev
)
1110 for_each_hwfn(cdev
, j
) {
1111 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
1112 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
1114 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
1118 int qed_hw_stop(struct qed_dev
*cdev
)
1123 for_each_hwfn(cdev
, j
) {
1124 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
1125 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
1127 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Stopping hw/fw\n");
1130 qed_vf_pf_int_cleanup(p_hwfn
);
1134 /* mark the hw as uninitialized... */
1135 p_hwfn
->hw_init_done
= false;
1137 rc
= qed_sp_pf_stop(p_hwfn
);
1140 "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
1142 qed_wr(p_hwfn
, p_ptt
,
1143 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
1145 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
1146 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
1147 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
1148 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
1149 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
1151 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
1153 /* Disable Attention Generation */
1154 qed_int_igu_disable_int(p_hwfn
, p_ptt
);
1156 qed_wr(p_hwfn
, p_ptt
, IGU_REG_LEADING_EDGE_LATCH
, 0);
1157 qed_wr(p_hwfn
, p_ptt
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
1159 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, true);
1161 /* Need to wait 1ms to guarantee SBs are cleared */
1162 usleep_range(1000, 2000);
1166 /* Disable DMAE in PXP - in CMT, this should only be done for
1167 * first hw-function, and only after all transactions have
1168 * stopped for all active hw-functions.
1170 t_rc
= qed_change_pci_hwfn(&cdev
->hwfns
[0],
1171 cdev
->hwfns
[0].p_main_ptt
, false);
1179 void qed_hw_stop_fastpath(struct qed_dev
*cdev
)
1183 for_each_hwfn(cdev
, j
) {
1184 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
1185 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
1188 qed_vf_pf_int_cleanup(p_hwfn
);
1194 "Shutting down the fastpath\n");
1196 qed_wr(p_hwfn
, p_ptt
,
1197 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
1199 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
1200 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
1201 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
1202 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
1203 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
1205 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, false);
1207 /* Need to wait 1ms to guarantee SBs are cleared */
1208 usleep_range(1000, 2000);
1212 void qed_hw_start_fastpath(struct qed_hwfn
*p_hwfn
)
1214 if (IS_VF(p_hwfn
->cdev
))
1217 /* Re-open incoming traffic */
1218 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1219 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x0);
1222 static int qed_reg_assert(struct qed_hwfn
*hwfn
,
1223 struct qed_ptt
*ptt
, u32 reg
,
1226 u32 assert_val
= qed_rd(hwfn
, ptt
, reg
);
1228 if (assert_val
!= expected
) {
1229 DP_NOTICE(hwfn
, "Value at address 0x%x != 0x%08x\n",
1237 int qed_hw_reset(struct qed_dev
*cdev
)
1240 u32 unload_resp
, unload_param
;
1243 for_each_hwfn(cdev
, i
) {
1244 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1247 rc
= qed_vf_pf_reset(p_hwfn
);
1253 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Resetting hw/fw\n");
1255 /* Check for incorrect states */
1256 qed_reg_assert(p_hwfn
, p_hwfn
->p_main_ptt
,
1257 QM_REG_USG_CNT_PF_TX
, 0);
1258 qed_reg_assert(p_hwfn
, p_hwfn
->p_main_ptt
,
1259 QM_REG_USG_CNT_PF_OTHER
, 0);
1261 /* Disable PF in HW blocks */
1262 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, DORQ_REG_PF_DB_ENABLE
, 0);
1263 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, QM_REG_PF_EN
, 0);
1264 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1265 TCFC_REG_STRONG_ENABLE_PF
, 0);
1266 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1267 CCFC_REG_STRONG_ENABLE_PF
, 0);
1269 /* Send unload command to MCP */
1270 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1271 DRV_MSG_CODE_UNLOAD_REQ
,
1272 DRV_MB_PARAM_UNLOAD_WOL_MCP
,
1273 &unload_resp
, &unload_param
);
1275 DP_NOTICE(p_hwfn
, "qed_hw_reset: UNLOAD_REQ failed\n");
1276 unload_resp
= FW_MSG_CODE_DRV_UNLOAD_ENGINE
;
1279 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1280 DRV_MSG_CODE_UNLOAD_DONE
,
1281 0, &unload_resp
, &unload_param
);
1283 DP_NOTICE(p_hwfn
, "qed_hw_reset: UNLOAD_DONE failed\n");
1291 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
1292 static void qed_hw_hwfn_free(struct qed_hwfn
*p_hwfn
)
1294 qed_ptt_pool_free(p_hwfn
);
1295 kfree(p_hwfn
->hw_info
.p_igu_info
);
1298 /* Setup bar access */
1299 static void qed_hw_hwfn_prepare(struct qed_hwfn
*p_hwfn
)
1301 /* clear indirect access */
1302 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_88_F0
, 0);
1303 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_8C_F0
, 0);
1304 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_90_F0
, 0);
1305 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_94_F0
, 0);
1307 /* Clean Previous errors if such exist */
1308 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1309 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR
,
1310 1 << p_hwfn
->abs_pf_id
);
1312 /* enable internal target-read */
1313 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1314 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ
, 1);
1317 static void get_function_id(struct qed_hwfn
*p_hwfn
)
1320 p_hwfn
->hw_info
.opaque_fid
= (u16
)REG_RD(p_hwfn
, PXP_PF_ME_OPAQUE_ADDR
);
1322 p_hwfn
->hw_info
.concrete_fid
= REG_RD(p_hwfn
, PXP_PF_ME_CONCRETE_ADDR
);
1324 p_hwfn
->abs_pf_id
= (p_hwfn
->hw_info
.concrete_fid
>> 16) & 0xf;
1325 p_hwfn
->rel_pf_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
1326 PXP_CONCRETE_FID_PFID
);
1327 p_hwfn
->port_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
1328 PXP_CONCRETE_FID_PORT
);
1331 static void qed_hw_set_feat(struct qed_hwfn
*p_hwfn
)
1333 u32
*feat_num
= p_hwfn
->hw_info
.feat_num
;
1334 int num_features
= 1;
1336 feat_num
[QED_PF_L2_QUE
] = min_t(u32
, RESC_NUM(p_hwfn
, QED_SB
) /
1338 RESC_NUM(p_hwfn
, QED_L2_QUEUE
));
1339 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
1340 "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1341 feat_num
[QED_PF_L2_QUE
], RESC_NUM(p_hwfn
, QED_SB
),
1345 static int qed_hw_get_resc(struct qed_hwfn
*p_hwfn
)
1347 u8 enabled_func_idx
= p_hwfn
->enabled_func_idx
;
1348 u32
*resc_start
= p_hwfn
->hw_info
.resc_start
;
1349 u8 num_funcs
= p_hwfn
->num_funcs_on_engine
;
1350 u32
*resc_num
= p_hwfn
->hw_info
.resc_num
;
1351 struct qed_sb_cnt_info sb_cnt_info
;
1352 int i
, max_vf_vlan_filters
;
1354 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
1356 #ifdef CONFIG_QED_SRIOV
1357 max_vf_vlan_filters
= QED_ETH_MAX_VF_NUM_VLAN_FILTERS
;
1359 max_vf_vlan_filters
= 0;
1362 qed_int_get_num_sbs(p_hwfn
, &sb_cnt_info
);
1364 resc_num
[QED_SB
] = min_t(u32
,
1365 (MAX_SB_PER_PATH_BB
/ num_funcs
),
1366 sb_cnt_info
.sb_cnt
);
1367 resc_num
[QED_L2_QUEUE
] = MAX_NUM_L2_QUEUES_BB
/ num_funcs
;
1368 resc_num
[QED_VPORT
] = MAX_NUM_VPORTS_BB
/ num_funcs
;
1369 resc_num
[QED_RSS_ENG
] = ETH_RSS_ENGINE_NUM_BB
/ num_funcs
;
1370 resc_num
[QED_PQ
] = MAX_QM_TX_QUEUES_BB
/ num_funcs
;
1371 resc_num
[QED_RL
] = min_t(u32
, 64, resc_num
[QED_VPORT
]);
1372 resc_num
[QED_MAC
] = ETH_NUM_MAC_FILTERS
/ num_funcs
;
1373 resc_num
[QED_VLAN
] = (ETH_NUM_VLAN_FILTERS
- 1 /*For vlan0*/) /
1375 resc_num
[QED_ILT
] = PXP_NUM_ILT_RECORDS_BB
/ num_funcs
;
1377 for (i
= 0; i
< QED_MAX_RESC
; i
++)
1378 resc_start
[i
] = resc_num
[i
] * enabled_func_idx
;
1380 /* Sanity for ILT */
1381 if (RESC_END(p_hwfn
, QED_ILT
) > PXP_NUM_ILT_RECORDS_BB
) {
1382 DP_NOTICE(p_hwfn
, "Can't assign ILT pages [%08x,...,%08x]\n",
1383 RESC_START(p_hwfn
, QED_ILT
),
1384 RESC_END(p_hwfn
, QED_ILT
) - 1);
1388 qed_hw_set_feat(p_hwfn
);
1390 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
1391 "The numbers for each resource are:\n"
1392 "SB = %d start = %d\n"
1393 "L2_QUEUE = %d start = %d\n"
1394 "VPORT = %d start = %d\n"
1395 "PQ = %d start = %d\n"
1396 "RL = %d start = %d\n"
1397 "MAC = %d start = %d\n"
1398 "VLAN = %d start = %d\n"
1399 "ILT = %d start = %d\n",
1400 p_hwfn
->hw_info
.resc_num
[QED_SB
],
1401 p_hwfn
->hw_info
.resc_start
[QED_SB
],
1402 p_hwfn
->hw_info
.resc_num
[QED_L2_QUEUE
],
1403 p_hwfn
->hw_info
.resc_start
[QED_L2_QUEUE
],
1404 p_hwfn
->hw_info
.resc_num
[QED_VPORT
],
1405 p_hwfn
->hw_info
.resc_start
[QED_VPORT
],
1406 p_hwfn
->hw_info
.resc_num
[QED_PQ
],
1407 p_hwfn
->hw_info
.resc_start
[QED_PQ
],
1408 p_hwfn
->hw_info
.resc_num
[QED_RL
],
1409 p_hwfn
->hw_info
.resc_start
[QED_RL
],
1410 p_hwfn
->hw_info
.resc_num
[QED_MAC
],
1411 p_hwfn
->hw_info
.resc_start
[QED_MAC
],
1412 p_hwfn
->hw_info
.resc_num
[QED_VLAN
],
1413 p_hwfn
->hw_info
.resc_start
[QED_VLAN
],
1414 p_hwfn
->hw_info
.resc_num
[QED_ILT
],
1415 p_hwfn
->hw_info
.resc_start
[QED_ILT
]);
1420 static int qed_hw_get_nvm_info(struct qed_hwfn
*p_hwfn
,
1421 struct qed_ptt
*p_ptt
)
1423 u32 nvm_cfg1_offset
, mf_mode
, addr
, generic_cont0
, core_cfg
;
1424 u32 port_cfg_addr
, link_temp
, nvm_cfg_addr
, device_capabilities
;
1425 struct qed_mcp_link_params
*link
;
1427 /* Read global nvm_cfg address */
1428 nvm_cfg_addr
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_GEN_PURP_CR0
);
1430 /* Verify MCP has initialized it */
1431 if (!nvm_cfg_addr
) {
1432 DP_NOTICE(p_hwfn
, "Shared memory not initialized\n");
1436 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
1437 nvm_cfg1_offset
= qed_rd(p_hwfn
, p_ptt
, nvm_cfg_addr
+ 4);
1439 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1440 offsetof(struct nvm_cfg1
, glob
) +
1441 offsetof(struct nvm_cfg1_glob
, core_cfg
);
1443 core_cfg
= qed_rd(p_hwfn
, p_ptt
, addr
);
1445 switch ((core_cfg
& NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK
) >>
1446 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET
) {
1447 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G
:
1448 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X40G
;
1450 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G
:
1451 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X50G
;
1453 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G
:
1454 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X100G
;
1456 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F
:
1457 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_F
;
1459 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E
:
1460 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_E
;
1462 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G
:
1463 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X20G
;
1465 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G
:
1466 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X40G
;
1468 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G
:
1469 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X25G
;
1471 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G
:
1472 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X25G
;
1475 DP_NOTICE(p_hwfn
, "Unknown port mode in 0x%08x\n",
1480 /* Read default link configuration */
1481 link
= &p_hwfn
->mcp_info
->link_input
;
1482 port_cfg_addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1483 offsetof(struct nvm_cfg1
, port
[MFW_PORT(p_hwfn
)]);
1484 link_temp
= qed_rd(p_hwfn
, p_ptt
,
1486 offsetof(struct nvm_cfg1_port
, speed_cap_mask
));
1487 link
->speed
.advertised_speeds
=
1488 link_temp
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK
;
1490 p_hwfn
->mcp_info
->link_capabilities
.speed_capabilities
=
1491 link
->speed
.advertised_speeds
;
1493 link_temp
= qed_rd(p_hwfn
, p_ptt
,
1495 offsetof(struct nvm_cfg1_port
, link_settings
));
1496 switch ((link_temp
& NVM_CFG1_PORT_DRV_LINK_SPEED_MASK
) >>
1497 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET
) {
1498 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG
:
1499 link
->speed
.autoneg
= true;
1501 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G
:
1502 link
->speed
.forced_speed
= 1000;
1504 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G
:
1505 link
->speed
.forced_speed
= 10000;
1507 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G
:
1508 link
->speed
.forced_speed
= 25000;
1510 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G
:
1511 link
->speed
.forced_speed
= 40000;
1513 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G
:
1514 link
->speed
.forced_speed
= 50000;
1516 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G
:
1517 link
->speed
.forced_speed
= 100000;
1520 DP_NOTICE(p_hwfn
, "Unknown Speed in 0x%08x\n",
1524 link_temp
&= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK
;
1525 link_temp
>>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET
;
1526 link
->pause
.autoneg
= !!(link_temp
&
1527 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG
);
1528 link
->pause
.forced_rx
= !!(link_temp
&
1529 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX
);
1530 link
->pause
.forced_tx
= !!(link_temp
&
1531 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX
);
1532 link
->loopback_mode
= 0;
1534 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1535 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1536 link
->speed
.forced_speed
, link
->speed
.advertised_speeds
,
1537 link
->speed
.autoneg
, link
->pause
.autoneg
);
1539 /* Read Multi-function information from shmem */
1540 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1541 offsetof(struct nvm_cfg1
, glob
) +
1542 offsetof(struct nvm_cfg1_glob
, generic_cont0
);
1544 generic_cont0
= qed_rd(p_hwfn
, p_ptt
, addr
);
1546 mf_mode
= (generic_cont0
& NVM_CFG1_GLOB_MF_MODE_MASK
) >>
1547 NVM_CFG1_GLOB_MF_MODE_OFFSET
;
1550 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED
:
1551 p_hwfn
->cdev
->mf_mode
= QED_MF_OVLAN
;
1553 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0
:
1554 p_hwfn
->cdev
->mf_mode
= QED_MF_NPAR
;
1556 case NVM_CFG1_GLOB_MF_MODE_DEFAULT
:
1557 p_hwfn
->cdev
->mf_mode
= QED_MF_DEFAULT
;
1560 DP_INFO(p_hwfn
, "Multi function mode is %08x\n",
1561 p_hwfn
->cdev
->mf_mode
);
1563 /* Read Multi-function information from shmem */
1564 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1565 offsetof(struct nvm_cfg1
, glob
) +
1566 offsetof(struct nvm_cfg1_glob
, device_capabilities
);
1568 device_capabilities
= qed_rd(p_hwfn
, p_ptt
, addr
);
1569 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET
)
1570 __set_bit(QED_DEV_CAP_ETH
,
1571 &p_hwfn
->hw_info
.device_capabilities
);
1572 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI
)
1573 __set_bit(QED_DEV_CAP_ISCSI
,
1574 &p_hwfn
->hw_info
.device_capabilities
);
1575 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE
)
1576 __set_bit(QED_DEV_CAP_ROCE
,
1577 &p_hwfn
->hw_info
.device_capabilities
);
1579 return qed_mcp_fill_shmem_func_info(p_hwfn
, p_ptt
);
1582 static void qed_get_num_funcs(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1584 u8 num_funcs
, enabled_func_idx
= p_hwfn
->rel_pf_id
;
1585 u32 reg_function_hide
, tmp
, eng_mask
, low_pfs_mask
;
1587 num_funcs
= MAX_NUM_PFS_BB
;
1589 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
1590 * in the other bits are selected.
1591 * Bits 1-15 are for functions 1-15, respectively, and their value is
1592 * '0' only for enabled functions (function 0 always exists and
1594 * In case of CMT, only the "even" functions are enabled, and thus the
1595 * number of functions for both hwfns is learnt from the same bits.
1597 reg_function_hide
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_FUNCTION_HIDE
);
1599 if (reg_function_hide
& 0x1) {
1600 if (QED_PATH_ID(p_hwfn
) && p_hwfn
->cdev
->num_hwfns
== 1) {
1608 /* Get the number of the enabled functions on the engine */
1609 tmp
= (reg_function_hide
^ 0xffffffff) & eng_mask
;
1616 /* Get the PF index within the enabled functions */
1617 low_pfs_mask
= (0x1 << p_hwfn
->abs_pf_id
) - 1;
1618 tmp
= reg_function_hide
& eng_mask
& low_pfs_mask
;
1626 p_hwfn
->num_funcs_on_engine
= num_funcs
;
1627 p_hwfn
->enabled_func_idx
= enabled_func_idx
;
1631 "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
1634 p_hwfn
->num_funcs_on_engine
);
1638 qed_get_hw_info(struct qed_hwfn
*p_hwfn
,
1639 struct qed_ptt
*p_ptt
,
1640 enum qed_pci_personality personality
)
1645 /* Since all information is common, only first hwfns should do this */
1646 if (IS_LEAD_HWFN(p_hwfn
)) {
1647 rc
= qed_iov_hw_info(p_hwfn
);
1652 /* Read the port mode */
1653 port_mode
= qed_rd(p_hwfn
, p_ptt
,
1654 CNIG_REG_NW_PORT_MODE_BB_B0
);
1656 if (port_mode
< 3) {
1657 p_hwfn
->cdev
->num_ports_in_engines
= 1;
1658 } else if (port_mode
<= 5) {
1659 p_hwfn
->cdev
->num_ports_in_engines
= 2;
1661 DP_NOTICE(p_hwfn
, "PORT MODE: %d not supported\n",
1662 p_hwfn
->cdev
->num_ports_in_engines
);
1664 /* Default num_ports_in_engines to something */
1665 p_hwfn
->cdev
->num_ports_in_engines
= 1;
1668 qed_hw_get_nvm_info(p_hwfn
, p_ptt
);
1670 rc
= qed_int_igu_read_cam(p_hwfn
, p_ptt
);
1674 if (qed_mcp_is_init(p_hwfn
))
1675 ether_addr_copy(p_hwfn
->hw_info
.hw_mac_addr
,
1676 p_hwfn
->mcp_info
->func_info
.mac
);
1678 eth_random_addr(p_hwfn
->hw_info
.hw_mac_addr
);
1680 if (qed_mcp_is_init(p_hwfn
)) {
1681 if (p_hwfn
->mcp_info
->func_info
.ovlan
!= QED_MCP_VLAN_UNSET
)
1682 p_hwfn
->hw_info
.ovlan
=
1683 p_hwfn
->mcp_info
->func_info
.ovlan
;
1685 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
1688 if (qed_mcp_is_init(p_hwfn
)) {
1689 enum qed_pci_personality protocol
;
1691 protocol
= p_hwfn
->mcp_info
->func_info
.protocol
;
1692 p_hwfn
->hw_info
.personality
= protocol
;
1695 qed_get_num_funcs(p_hwfn
, p_ptt
);
1697 return qed_hw_get_resc(p_hwfn
);
1700 static int qed_get_dev_info(struct qed_dev
*cdev
)
1702 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
1705 /* Read Vendor Id / Device Id */
1706 pci_read_config_word(cdev
->pdev
, PCI_VENDOR_ID
,
1708 pci_read_config_word(cdev
->pdev
, PCI_DEVICE_ID
,
1710 cdev
->chip_num
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1711 MISCS_REG_CHIP_NUM
);
1712 cdev
->chip_rev
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1713 MISCS_REG_CHIP_REV
);
1714 MASK_FIELD(CHIP_REV
, cdev
->chip_rev
);
1716 cdev
->type
= QED_DEV_TYPE_BB
;
1717 /* Learn number of HW-functions */
1718 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1719 MISCS_REG_CMT_ENABLED_FOR_PAIR
);
1721 if (tmp
& (1 << p_hwfn
->rel_pf_id
)) {
1722 DP_NOTICE(cdev
->hwfns
, "device in CMT mode\n");
1723 cdev
->num_hwfns
= 2;
1725 cdev
->num_hwfns
= 1;
1728 cdev
->chip_bond_id
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1729 MISCS_REG_CHIP_TEST_REG
) >> 4;
1730 MASK_FIELD(CHIP_BOND_ID
, cdev
->chip_bond_id
);
1731 cdev
->chip_metal
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1732 MISCS_REG_CHIP_METAL
);
1733 MASK_FIELD(CHIP_METAL
, cdev
->chip_metal
);
1735 DP_INFO(cdev
->hwfns
,
1736 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1737 cdev
->chip_num
, cdev
->chip_rev
,
1738 cdev
->chip_bond_id
, cdev
->chip_metal
);
1740 if (QED_IS_BB(cdev
) && CHIP_REV_IS_A0(cdev
)) {
1741 DP_NOTICE(cdev
->hwfns
,
1742 "The chip type/rev (BB A0) is not supported!\n");
1749 static int qed_hw_prepare_single(struct qed_hwfn
*p_hwfn
,
1750 void __iomem
*p_regview
,
1751 void __iomem
*p_doorbells
,
1752 enum qed_pci_personality personality
)
1756 /* Split PCI bars evenly between hwfns */
1757 p_hwfn
->regview
= p_regview
;
1758 p_hwfn
->doorbells
= p_doorbells
;
1760 if (IS_VF(p_hwfn
->cdev
))
1761 return qed_vf_hw_prepare(p_hwfn
);
1763 /* Validate that chip access is feasible */
1764 if (REG_RD(p_hwfn
, PXP_PF_ME_OPAQUE_ADDR
) == 0xffffffff) {
1766 "Reading the ME register returns all Fs; Preventing further chip access\n");
1770 get_function_id(p_hwfn
);
1772 /* Allocate PTT pool */
1773 rc
= qed_ptt_pool_alloc(p_hwfn
);
1775 DP_NOTICE(p_hwfn
, "Failed to prepare hwfn's hw\n");
1779 /* Allocate the main PTT */
1780 p_hwfn
->p_main_ptt
= qed_get_reserved_ptt(p_hwfn
, RESERVED_PTT_MAIN
);
1782 /* First hwfn learns basic information, e.g., number of hwfns */
1783 if (!p_hwfn
->my_id
) {
1784 rc
= qed_get_dev_info(p_hwfn
->cdev
);
1789 qed_hw_hwfn_prepare(p_hwfn
);
1791 /* Initialize MCP structure */
1792 rc
= qed_mcp_cmd_init(p_hwfn
, p_hwfn
->p_main_ptt
);
1794 DP_NOTICE(p_hwfn
, "Failed initializing mcp command\n");
1798 /* Read the device configuration information from the HW and SHMEM */
1799 rc
= qed_get_hw_info(p_hwfn
, p_hwfn
->p_main_ptt
, personality
);
1801 DP_NOTICE(p_hwfn
, "Failed to get HW information\n");
1805 /* Allocate the init RT array and initialize the init-ops engine */
1806 rc
= qed_init_alloc(p_hwfn
);
1808 DP_NOTICE(p_hwfn
, "Failed to allocate the init array\n");
1814 if (IS_LEAD_HWFN(p_hwfn
))
1815 qed_iov_free_hw_info(p_hwfn
->cdev
);
1816 qed_mcp_free(p_hwfn
);
1818 qed_hw_hwfn_free(p_hwfn
);
1823 int qed_hw_prepare(struct qed_dev
*cdev
,
1826 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
1829 /* Store the precompiled init data ptrs */
1831 qed_init_iro_array(cdev
);
1833 /* Initialize the first hwfn - will learn number of hwfns */
1834 rc
= qed_hw_prepare_single(p_hwfn
,
1836 cdev
->doorbells
, personality
);
1840 personality
= p_hwfn
->hw_info
.personality
;
1842 /* Initialize the rest of the hwfns */
1843 if (cdev
->num_hwfns
> 1) {
1844 void __iomem
*p_regview
, *p_doorbell
;
1847 /* adjust bar offset for second engine */
1848 addr
= cdev
->regview
+ qed_hw_bar_size(p_hwfn
, BAR_ID_0
) / 2;
1851 /* adjust doorbell bar offset for second engine */
1852 addr
= cdev
->doorbells
+ qed_hw_bar_size(p_hwfn
, BAR_ID_1
) / 2;
1855 /* prepare second hw function */
1856 rc
= qed_hw_prepare_single(&cdev
->hwfns
[1], p_regview
,
1857 p_doorbell
, personality
);
1859 /* in case of error, need to free the previously
1860 * initiliazed hwfn 0.
1864 qed_init_free(p_hwfn
);
1865 qed_mcp_free(p_hwfn
);
1866 qed_hw_hwfn_free(p_hwfn
);
1874 void qed_hw_remove(struct qed_dev
*cdev
)
1878 for_each_hwfn(cdev
, i
) {
1879 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1882 qed_vf_pf_release(p_hwfn
);
1886 qed_init_free(p_hwfn
);
1887 qed_hw_hwfn_free(p_hwfn
);
1888 qed_mcp_free(p_hwfn
);
1891 qed_iov_free_hw_info(cdev
);
1894 static void qed_chain_free_next_ptr(struct qed_dev
*cdev
,
1895 struct qed_chain
*p_chain
)
1897 void *p_virt
= p_chain
->p_virt_addr
, *p_virt_next
= NULL
;
1898 dma_addr_t p_phys
= p_chain
->p_phys_addr
, p_phys_next
= 0;
1899 struct qed_chain_next
*p_next
;
1905 size
= p_chain
->elem_size
* p_chain
->usable_per_page
;
1907 for (i
= 0; i
< p_chain
->page_cnt
; i
++) {
1911 p_next
= (struct qed_chain_next
*)((u8
*)p_virt
+ size
);
1912 p_virt_next
= p_next
->next_virt
;
1913 p_phys_next
= HILO_DMA_REGPAIR(p_next
->next_phys
);
1915 dma_free_coherent(&cdev
->pdev
->dev
,
1916 QED_CHAIN_PAGE_SIZE
, p_virt
, p_phys
);
1918 p_virt
= p_virt_next
;
1919 p_phys
= p_phys_next
;
1923 static void qed_chain_free_single(struct qed_dev
*cdev
,
1924 struct qed_chain
*p_chain
)
1926 if (!p_chain
->p_virt_addr
)
1929 dma_free_coherent(&cdev
->pdev
->dev
,
1930 QED_CHAIN_PAGE_SIZE
,
1931 p_chain
->p_virt_addr
, p_chain
->p_phys_addr
);
1934 static void qed_chain_free_pbl(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
1936 void **pp_virt_addr_tbl
= p_chain
->pbl
.pp_virt_addr_tbl
;
1937 u32 page_cnt
= p_chain
->page_cnt
, i
, pbl_size
;
1938 u8
*p_pbl_virt
= p_chain
->pbl
.p_virt_table
;
1940 if (!pp_virt_addr_tbl
)
1943 if (!p_chain
->pbl
.p_virt_table
)
1946 for (i
= 0; i
< page_cnt
; i
++) {
1947 if (!pp_virt_addr_tbl
[i
])
1950 dma_free_coherent(&cdev
->pdev
->dev
,
1951 QED_CHAIN_PAGE_SIZE
,
1952 pp_virt_addr_tbl
[i
],
1953 *(dma_addr_t
*)p_pbl_virt
);
1955 p_pbl_virt
+= QED_CHAIN_PBL_ENTRY_SIZE
;
1958 pbl_size
= page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
1959 dma_free_coherent(&cdev
->pdev
->dev
,
1961 p_chain
->pbl
.p_virt_table
, p_chain
->pbl
.p_phys_table
);
1963 vfree(p_chain
->pbl
.pp_virt_addr_tbl
);
1966 void qed_chain_free(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
1968 switch (p_chain
->mode
) {
1969 case QED_CHAIN_MODE_NEXT_PTR
:
1970 qed_chain_free_next_ptr(cdev
, p_chain
);
1972 case QED_CHAIN_MODE_SINGLE
:
1973 qed_chain_free_single(cdev
, p_chain
);
1975 case QED_CHAIN_MODE_PBL
:
1976 qed_chain_free_pbl(cdev
, p_chain
);
1982 qed_chain_alloc_sanity_check(struct qed_dev
*cdev
,
1983 enum qed_chain_cnt_type cnt_type
,
1984 size_t elem_size
, u32 page_cnt
)
1986 u64 chain_size
= ELEMS_PER_PAGE(elem_size
) * page_cnt
;
1988 /* The actual chain size can be larger than the maximal possible value
1989 * after rounding up the requested elements number to pages, and after
1990 * taking into acount the unusuable elements (next-ptr elements).
1991 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
1992 * size/capacity fields are of a u32 type.
1994 if ((cnt_type
== QED_CHAIN_CNT_TYPE_U16
&&
1995 chain_size
> 0x10000) ||
1996 (cnt_type
== QED_CHAIN_CNT_TYPE_U32
&&
1997 chain_size
> 0x100000000ULL
)) {
1999 "The actual chain size (0x%llx) is larger than the maximal possible value\n",
2008 qed_chain_alloc_next_ptr(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
2010 void *p_virt
= NULL
, *p_virt_prev
= NULL
;
2011 dma_addr_t p_phys
= 0;
2014 for (i
= 0; i
< p_chain
->page_cnt
; i
++) {
2015 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
2016 QED_CHAIN_PAGE_SIZE
,
2017 &p_phys
, GFP_KERNEL
);
2019 DP_NOTICE(cdev
, "Failed to allocate chain memory\n");
2024 qed_chain_init_mem(p_chain
, p_virt
, p_phys
);
2025 qed_chain_reset(p_chain
);
2027 qed_chain_init_next_ptr_elem(p_chain
, p_virt_prev
,
2031 p_virt_prev
= p_virt
;
2033 /* Last page's next element should point to the beginning of the
2036 qed_chain_init_next_ptr_elem(p_chain
, p_virt_prev
,
2037 p_chain
->p_virt_addr
,
2038 p_chain
->p_phys_addr
);
2044 qed_chain_alloc_single(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
2046 dma_addr_t p_phys
= 0;
2047 void *p_virt
= NULL
;
2049 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
2050 QED_CHAIN_PAGE_SIZE
, &p_phys
, GFP_KERNEL
);
2052 DP_NOTICE(cdev
, "Failed to allocate chain memory\n");
2056 qed_chain_init_mem(p_chain
, p_virt
, p_phys
);
2057 qed_chain_reset(p_chain
);
2062 static int qed_chain_alloc_pbl(struct qed_dev
*cdev
, struct qed_chain
*p_chain
)
2064 u32 page_cnt
= p_chain
->page_cnt
, size
, i
;
2065 dma_addr_t p_phys
= 0, p_pbl_phys
= 0;
2066 void **pp_virt_addr_tbl
= NULL
;
2067 u8
*p_pbl_virt
= NULL
;
2068 void *p_virt
= NULL
;
2070 size
= page_cnt
* sizeof(*pp_virt_addr_tbl
);
2071 pp_virt_addr_tbl
= vmalloc(size
);
2072 if (!pp_virt_addr_tbl
) {
2074 "Failed to allocate memory for the chain virtual addresses table\n");
2077 memset(pp_virt_addr_tbl
, 0, size
);
2079 /* The allocation of the PBL table is done with its full size, since it
2080 * is expected to be successive.
2081 * qed_chain_init_pbl_mem() is called even in a case of an allocation
2082 * failure, since pp_virt_addr_tbl was previously allocated, and it
2083 * should be saved to allow its freeing during the error flow.
2085 size
= page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
2086 p_pbl_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
2087 size
, &p_pbl_phys
, GFP_KERNEL
);
2088 qed_chain_init_pbl_mem(p_chain
, p_pbl_virt
, p_pbl_phys
,
2091 DP_NOTICE(cdev
, "Failed to allocate chain pbl memory\n");
2095 for (i
= 0; i
< page_cnt
; i
++) {
2096 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
2097 QED_CHAIN_PAGE_SIZE
,
2098 &p_phys
, GFP_KERNEL
);
2100 DP_NOTICE(cdev
, "Failed to allocate chain memory\n");
2105 qed_chain_init_mem(p_chain
, p_virt
, p_phys
);
2106 qed_chain_reset(p_chain
);
2109 /* Fill the PBL table with the physical address of the page */
2110 *(dma_addr_t
*)p_pbl_virt
= p_phys
;
2111 /* Keep the virtual address of the page */
2112 p_chain
->pbl
.pp_virt_addr_tbl
[i
] = p_virt
;
2114 p_pbl_virt
+= QED_CHAIN_PBL_ENTRY_SIZE
;
2120 int qed_chain_alloc(struct qed_dev
*cdev
,
2121 enum qed_chain_use_mode intended_use
,
2122 enum qed_chain_mode mode
,
2123 enum qed_chain_cnt_type cnt_type
,
2124 u32 num_elems
, size_t elem_size
, struct qed_chain
*p_chain
)
2129 if (mode
== QED_CHAIN_MODE_SINGLE
)
2132 page_cnt
= QED_CHAIN_PAGE_CNT(num_elems
, elem_size
, mode
);
2134 rc
= qed_chain_alloc_sanity_check(cdev
, cnt_type
, elem_size
, page_cnt
);
2137 "Cannot allocate a chain with the given arguments:\n"
2138 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
2139 intended_use
, mode
, cnt_type
, num_elems
, elem_size
);
2143 qed_chain_init_params(p_chain
, page_cnt
, (u8
) elem_size
, intended_use
,
2147 case QED_CHAIN_MODE_NEXT_PTR
:
2148 rc
= qed_chain_alloc_next_ptr(cdev
, p_chain
);
2150 case QED_CHAIN_MODE_SINGLE
:
2151 rc
= qed_chain_alloc_single(cdev
, p_chain
);
2153 case QED_CHAIN_MODE_PBL
:
2154 rc
= qed_chain_alloc_pbl(cdev
, p_chain
);
2163 qed_chain_free(cdev
, p_chain
);
2167 int qed_fw_l2_queue(struct qed_hwfn
*p_hwfn
, u16 src_id
, u16
*dst_id
)
2169 if (src_id
>= RESC_NUM(p_hwfn
, QED_L2_QUEUE
)) {
2172 min
= (u16
) RESC_START(p_hwfn
, QED_L2_QUEUE
);
2173 max
= min
+ RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
2175 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
2181 *dst_id
= RESC_START(p_hwfn
, QED_L2_QUEUE
) + src_id
;
2186 int qed_fw_vport(struct qed_hwfn
*p_hwfn
,
2187 u8 src_id
, u8
*dst_id
)
2189 if (src_id
>= RESC_NUM(p_hwfn
, QED_VPORT
)) {
2192 min
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
2193 max
= min
+ RESC_NUM(p_hwfn
, QED_VPORT
);
2195 "vport id [%d] is not valid, available indices [%d - %d]\n",
2201 *dst_id
= RESC_START(p_hwfn
, QED_VPORT
) + src_id
;
2206 int qed_fw_rss_eng(struct qed_hwfn
*p_hwfn
,
2207 u8 src_id
, u8
*dst_id
)
2209 if (src_id
>= RESC_NUM(p_hwfn
, QED_RSS_ENG
)) {
2212 min
= (u8
)RESC_START(p_hwfn
, QED_RSS_ENG
);
2213 max
= min
+ RESC_NUM(p_hwfn
, QED_RSS_ENG
);
2215 "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
2221 *dst_id
= RESC_START(p_hwfn
, QED_RSS_ENG
) + src_id
;
2226 static int qed_set_coalesce(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
2227 u32 hw_addr
, void *p_eth_qzone
,
2228 size_t eth_qzone_size
, u8 timeset
)
2230 struct coalescing_timeset
*p_coal_timeset
;
2232 if (p_hwfn
->cdev
->int_coalescing_mode
!= QED_COAL_MODE_ENABLE
) {
2233 DP_NOTICE(p_hwfn
, "Coalescing configuration not enabled\n");
2237 p_coal_timeset
= p_eth_qzone
;
2238 memset(p_coal_timeset
, 0, eth_qzone_size
);
2239 SET_FIELD(p_coal_timeset
->value
, COALESCING_TIMESET_TIMESET
, timeset
);
2240 SET_FIELD(p_coal_timeset
->value
, COALESCING_TIMESET_VALID
, 1);
2241 qed_memcpy_to(p_hwfn
, p_ptt
, hw_addr
, p_eth_qzone
, eth_qzone_size
);
2246 int qed_set_rxq_coalesce(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
2247 u16 coalesce
, u8 qid
, u16 sb_id
)
2249 struct ustorm_eth_queue_zone eth_qzone
;
2250 u8 timeset
, timer_res
;
2255 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
2256 if (coalesce
<= 0x7F) {
2258 } else if (coalesce
<= 0xFF) {
2260 } else if (coalesce
<= 0x1FF) {
2263 DP_ERR(p_hwfn
, "Invalid coalesce value - %d\n", coalesce
);
2266 timeset
= (u8
)(coalesce
>> timer_res
);
2268 rc
= qed_fw_l2_queue(p_hwfn
, (u16
)qid
, &fw_qid
);
2272 rc
= qed_int_set_timer_res(p_hwfn
, p_ptt
, timer_res
, sb_id
, false);
2276 address
= BAR0_MAP_REG_USDM_RAM
+ USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid
);
2278 rc
= qed_set_coalesce(p_hwfn
, p_ptt
, address
, ð_qzone
,
2279 sizeof(struct ustorm_eth_queue_zone
), timeset
);
2283 p_hwfn
->cdev
->rx_coalesce_usecs
= coalesce
;
2288 int qed_set_txq_coalesce(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
2289 u16 coalesce
, u8 qid
, u16 sb_id
)
2291 struct xstorm_eth_queue_zone eth_qzone
;
2292 u8 timeset
, timer_res
;
2297 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
2298 if (coalesce
<= 0x7F) {
2300 } else if (coalesce
<= 0xFF) {
2302 } else if (coalesce
<= 0x1FF) {
2305 DP_ERR(p_hwfn
, "Invalid coalesce value - %d\n", coalesce
);
2308 timeset
= (u8
)(coalesce
>> timer_res
);
2310 rc
= qed_fw_l2_queue(p_hwfn
, (u16
)qid
, &fw_qid
);
2314 rc
= qed_int_set_timer_res(p_hwfn
, p_ptt
, timer_res
, sb_id
, true);
2318 address
= BAR0_MAP_REG_XSDM_RAM
+ XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid
);
2320 rc
= qed_set_coalesce(p_hwfn
, p_ptt
, address
, ð_qzone
,
2321 sizeof(struct xstorm_eth_queue_zone
), timeset
);
2325 p_hwfn
->cdev
->tx_coalesce_usecs
= coalesce
;
2330 /* Calculate final WFQ values for all vports and configure them.
2331 * After this configuration each vport will have
2332 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
2334 static void qed_configure_wfq_for_all_vports(struct qed_hwfn
*p_hwfn
,
2335 struct qed_ptt
*p_ptt
,
2338 struct init_qm_vport_params
*vport_params
;
2341 vport_params
= p_hwfn
->qm_info
.qm_vport_params
;
2343 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
2344 u32 wfq_speed
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
2346 vport_params
[i
].vport_wfq
= (wfq_speed
* QED_WFQ_UNIT
) /
2348 qed_init_vport_wfq(p_hwfn
, p_ptt
,
2349 vport_params
[i
].first_tx_pq_id
,
2350 vport_params
[i
].vport_wfq
);
2354 static void qed_init_wfq_default_param(struct qed_hwfn
*p_hwfn
,
2360 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++)
2361 p_hwfn
->qm_info
.qm_vport_params
[i
].vport_wfq
= 1;
2364 static void qed_disable_wfq_for_all_vports(struct qed_hwfn
*p_hwfn
,
2365 struct qed_ptt
*p_ptt
,
2368 struct init_qm_vport_params
*vport_params
;
2371 vport_params
= p_hwfn
->qm_info
.qm_vport_params
;
2373 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
2374 qed_init_wfq_default_param(p_hwfn
, min_pf_rate
);
2375 qed_init_vport_wfq(p_hwfn
, p_ptt
,
2376 vport_params
[i
].first_tx_pq_id
,
2377 vport_params
[i
].vport_wfq
);
2381 /* This function performs several validations for WFQ
2382 * configuration and required min rate for a given vport
2383 * 1. req_rate must be greater than one percent of min_pf_rate.
2384 * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
2385 * rates to get less than one percent of min_pf_rate.
2386 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
2388 static int qed_init_wfq_param(struct qed_hwfn
*p_hwfn
,
2389 u16 vport_id
, u32 req_rate
,
2392 u32 total_req_min_rate
= 0, total_left_rate
= 0, left_rate_per_vp
= 0;
2393 int non_requested_count
= 0, req_count
= 0, i
, num_vports
;
2395 num_vports
= p_hwfn
->qm_info
.num_vports
;
2397 /* Accounting for the vports which are configured for WFQ explicitly */
2398 for (i
= 0; i
< num_vports
; i
++) {
2401 if ((i
!= vport_id
) &&
2402 p_hwfn
->qm_info
.wfq_data
[i
].configured
) {
2404 tmp_speed
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
2405 total_req_min_rate
+= tmp_speed
;
2409 /* Include current vport data as well */
2411 total_req_min_rate
+= req_rate
;
2412 non_requested_count
= num_vports
- req_count
;
2414 if (req_rate
< min_pf_rate
/ QED_WFQ_UNIT
) {
2415 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2416 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
2417 vport_id
, req_rate
, min_pf_rate
);
2421 if (num_vports
> QED_WFQ_UNIT
) {
2422 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2423 "Number of vports is greater than %d\n",
2428 if (total_req_min_rate
> min_pf_rate
) {
2429 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2430 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
2431 total_req_min_rate
, min_pf_rate
);
2435 total_left_rate
= min_pf_rate
- total_req_min_rate
;
2437 left_rate_per_vp
= total_left_rate
/ non_requested_count
;
2438 if (left_rate_per_vp
< min_pf_rate
/ QED_WFQ_UNIT
) {
2439 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2440 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
2441 left_rate_per_vp
, min_pf_rate
);
2445 p_hwfn
->qm_info
.wfq_data
[vport_id
].min_speed
= req_rate
;
2446 p_hwfn
->qm_info
.wfq_data
[vport_id
].configured
= true;
2448 for (i
= 0; i
< num_vports
; i
++) {
2449 if (p_hwfn
->qm_info
.wfq_data
[i
].configured
)
2452 p_hwfn
->qm_info
.wfq_data
[i
].min_speed
= left_rate_per_vp
;
2458 static int __qed_configure_vport_wfq(struct qed_hwfn
*p_hwfn
,
2459 struct qed_ptt
*p_ptt
, u16 vp_id
, u32 rate
)
2461 struct qed_mcp_link_state
*p_link
;
2464 p_link
= &p_hwfn
->cdev
->hwfns
[0].mcp_info
->link_output
;
2466 if (!p_link
->min_pf_rate
) {
2467 p_hwfn
->qm_info
.wfq_data
[vp_id
].min_speed
= rate
;
2468 p_hwfn
->qm_info
.wfq_data
[vp_id
].configured
= true;
2472 rc
= qed_init_wfq_param(p_hwfn
, vp_id
, rate
, p_link
->min_pf_rate
);
2475 qed_configure_wfq_for_all_vports(p_hwfn
, p_ptt
,
2476 p_link
->min_pf_rate
);
2479 "Validation failed while configuring min rate\n");
2484 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn
*p_hwfn
,
2485 struct qed_ptt
*p_ptt
,
2488 bool use_wfq
= false;
2492 /* Validate all pre configured vports for wfq */
2493 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
2496 if (!p_hwfn
->qm_info
.wfq_data
[i
].configured
)
2499 rate
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
2502 rc
= qed_init_wfq_param(p_hwfn
, i
, rate
, min_pf_rate
);
2505 "WFQ validation failed while configuring min rate\n");
2511 qed_configure_wfq_for_all_vports(p_hwfn
, p_ptt
, min_pf_rate
);
2513 qed_disable_wfq_for_all_vports(p_hwfn
, p_ptt
, min_pf_rate
);
2518 /* Main API for qed clients to configure vport min rate.
2519 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
2520 * rate - Speed in Mbps needs to be assigned to a given vport.
2522 int qed_configure_vport_wfq(struct qed_dev
*cdev
, u16 vp_id
, u32 rate
)
2524 int i
, rc
= -EINVAL
;
2526 /* Currently not supported; Might change in future */
2527 if (cdev
->num_hwfns
> 1) {
2529 "WFQ configuration is not supported for this device\n");
2533 for_each_hwfn(cdev
, i
) {
2534 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2535 struct qed_ptt
*p_ptt
;
2537 p_ptt
= qed_ptt_acquire(p_hwfn
);
2541 rc
= __qed_configure_vport_wfq(p_hwfn
, p_ptt
, vp_id
, rate
);
2544 qed_ptt_release(p_hwfn
, p_ptt
);
2548 qed_ptt_release(p_hwfn
, p_ptt
);
2554 /* API to configure WFQ from mcp link change */
2555 void qed_configure_vp_wfq_on_link_change(struct qed_dev
*cdev
, u32 min_pf_rate
)
2559 if (cdev
->num_hwfns
> 1) {
2562 "WFQ configuration is not supported for this device\n");
2566 for_each_hwfn(cdev
, i
) {
2567 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2569 __qed_configure_vp_wfq_on_link_change(p_hwfn
,
2575 int __qed_configure_pf_max_bandwidth(struct qed_hwfn
*p_hwfn
,
2576 struct qed_ptt
*p_ptt
,
2577 struct qed_mcp_link_state
*p_link
,
2582 p_hwfn
->mcp_info
->func_info
.bandwidth_max
= max_bw
;
2584 if (!p_link
->line_speed
&& (max_bw
!= 100))
2587 p_link
->speed
= (p_link
->line_speed
* max_bw
) / 100;
2588 p_hwfn
->qm_info
.pf_rl
= p_link
->speed
;
2590 /* Since the limiter also affects Tx-switched traffic, we don't want it
2591 * to limit such traffic in case there's no actual limit.
2592 * In that case, set limit to imaginary high boundary.
2595 p_hwfn
->qm_info
.pf_rl
= 100000;
2597 rc
= qed_init_pf_rl(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
,
2598 p_hwfn
->qm_info
.pf_rl
);
2600 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2601 "Configured MAX bandwidth to be %08x Mb/sec\n",
2607 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
2608 int qed_configure_pf_max_bandwidth(struct qed_dev
*cdev
, u8 max_bw
)
2610 int i
, rc
= -EINVAL
;
2612 if (max_bw
< 1 || max_bw
> 100) {
2613 DP_NOTICE(cdev
, "PF max bw valid range is [1-100]\n");
2617 for_each_hwfn(cdev
, i
) {
2618 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2619 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(cdev
);
2620 struct qed_mcp_link_state
*p_link
;
2621 struct qed_ptt
*p_ptt
;
2623 p_link
= &p_lead
->mcp_info
->link_output
;
2625 p_ptt
= qed_ptt_acquire(p_hwfn
);
2629 rc
= __qed_configure_pf_max_bandwidth(p_hwfn
, p_ptt
,
2632 qed_ptt_release(p_hwfn
, p_ptt
);
2641 int __qed_configure_pf_min_bandwidth(struct qed_hwfn
*p_hwfn
,
2642 struct qed_ptt
*p_ptt
,
2643 struct qed_mcp_link_state
*p_link
,
2648 p_hwfn
->mcp_info
->func_info
.bandwidth_min
= min_bw
;
2649 p_hwfn
->qm_info
.pf_wfq
= min_bw
;
2651 if (!p_link
->line_speed
)
2654 p_link
->min_pf_rate
= (p_link
->line_speed
* min_bw
) / 100;
2656 rc
= qed_init_pf_wfq(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
, min_bw
);
2658 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2659 "Configured MIN bandwidth to be %d Mb/sec\n",
2660 p_link
->min_pf_rate
);
2665 /* Main API to configure PF min bandwidth where bw range is [1-100] */
2666 int qed_configure_pf_min_bandwidth(struct qed_dev
*cdev
, u8 min_bw
)
2668 int i
, rc
= -EINVAL
;
2670 if (min_bw
< 1 || min_bw
> 100) {
2671 DP_NOTICE(cdev
, "PF min bw valid range is [1-100]\n");
2675 for_each_hwfn(cdev
, i
) {
2676 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2677 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(cdev
);
2678 struct qed_mcp_link_state
*p_link
;
2679 struct qed_ptt
*p_ptt
;
2681 p_link
= &p_lead
->mcp_info
->link_output
;
2683 p_ptt
= qed_ptt_acquire(p_hwfn
);
2687 rc
= __qed_configure_pf_min_bandwidth(p_hwfn
, p_ptt
,
2690 qed_ptt_release(p_hwfn
, p_ptt
);
2694 if (p_link
->min_pf_rate
) {
2695 u32 min_rate
= p_link
->min_pf_rate
;
2697 rc
= __qed_configure_vp_wfq_on_link_change(p_hwfn
,
2702 qed_ptt_release(p_hwfn
, p_ptt
);
2708 void qed_clean_wfq_db(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2710 struct qed_mcp_link_state
*p_link
;
2712 p_link
= &p_hwfn
->mcp_info
->link_output
;
2714 if (p_link
->min_pf_rate
)
2715 qed_disable_wfq_for_all_vports(p_hwfn
, p_ptt
,
2716 p_link
->min_pf_rate
);
2718 memset(p_hwfn
->qm_info
.wfq_data
, 0,
2719 sizeof(*p_hwfn
->qm_info
.wfq_data
) * p_hwfn
->qm_info
.num_vports
);