1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/etherdevice.h>
21 #include <linux/qed/qed_chain.h>
22 #include <linux/qed/qed_if.h>
26 #include "qed_dev_api.h"
29 #include "qed_init_ops.h"
32 #include "qed_reg_addr.h"
34 #include "qed_sriov.h"
37 static spinlock_t qm_lock
;
38 static bool qm_lock_init
= false;
40 /* API common to all protocols */
42 BAR_ID_0
, /* used for GRC */
43 BAR_ID_1
/* Used for doorbells */
46 static u32
qed_hw_bar_size(struct qed_hwfn
*p_hwfn
,
49 u32 bar_reg
= (bar_id
== BAR_ID_0
?
50 PGLUE_B_REG_PF_BAR0_SIZE
: PGLUE_B_REG_PF_BAR1_SIZE
);
53 if (IS_VF(p_hwfn
->cdev
))
56 val
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
, bar_reg
);
58 return 1 << (val
+ 15);
60 /* Old MFW initialized above registered only conditionally */
61 if (p_hwfn
->cdev
->num_hwfns
> 1) {
63 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
64 return BAR_ID_0
? 256 * 1024 : 512 * 1024;
67 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
72 void qed_init_dp(struct qed_dev
*cdev
,
73 u32 dp_module
, u8 dp_level
)
77 cdev
->dp_level
= dp_level
;
78 cdev
->dp_module
= dp_module
;
79 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
80 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
82 p_hwfn
->dp_level
= dp_level
;
83 p_hwfn
->dp_module
= dp_module
;
87 void qed_init_struct(struct qed_dev
*cdev
)
91 for (i
= 0; i
< MAX_HWFNS_PER_DEVICE
; i
++) {
92 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
96 p_hwfn
->b_active
= false;
98 mutex_init(&p_hwfn
->dmae_info
.mutex
);
101 /* hwfn 0 is always active */
102 cdev
->hwfns
[0].b_active
= true;
104 /* set the default cache alignment to 128 */
105 cdev
->cache_shift
= 7;
108 static void qed_qm_info_free(struct qed_hwfn
*p_hwfn
)
110 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
112 kfree(qm_info
->qm_pq_params
);
113 qm_info
->qm_pq_params
= NULL
;
114 kfree(qm_info
->qm_vport_params
);
115 qm_info
->qm_vport_params
= NULL
;
116 kfree(qm_info
->qm_port_params
);
117 qm_info
->qm_port_params
= NULL
;
118 kfree(qm_info
->wfq_data
);
119 qm_info
->wfq_data
= NULL
;
122 void qed_resc_free(struct qed_dev
*cdev
)
129 kfree(cdev
->fw_data
);
130 cdev
->fw_data
= NULL
;
132 kfree(cdev
->reset_stats
);
134 for_each_hwfn(cdev
, i
) {
135 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
137 kfree(p_hwfn
->p_tx_cids
);
138 p_hwfn
->p_tx_cids
= NULL
;
139 kfree(p_hwfn
->p_rx_cids
);
140 p_hwfn
->p_rx_cids
= NULL
;
143 for_each_hwfn(cdev
, i
) {
144 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
146 qed_cxt_mngr_free(p_hwfn
);
147 qed_qm_info_free(p_hwfn
);
148 qed_spq_free(p_hwfn
);
149 qed_eq_free(p_hwfn
, p_hwfn
->p_eq
);
150 qed_consq_free(p_hwfn
, p_hwfn
->p_consq
);
151 qed_int_free(p_hwfn
);
152 qed_iov_free(p_hwfn
);
153 qed_dmae_info_free(p_hwfn
);
154 qed_dcbx_info_free(p_hwfn
, p_hwfn
->p_dcbx_info
);
158 static int qed_init_qm_info(struct qed_hwfn
*p_hwfn
, bool b_sleepable
)
160 u8 num_vports
, vf_offset
= 0, i
, vport_id
, num_ports
, curr_queue
= 0;
161 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
162 struct init_qm_port_params
*p_qm_port
;
163 u16 num_pqs
, multi_cos_tcs
= 1;
164 u8 pf_wfq
= qm_info
->pf_wfq
;
165 u32 pf_rl
= qm_info
->pf_rl
;
168 #ifdef CONFIG_QED_SRIOV
169 if (p_hwfn
->cdev
->p_iov_info
)
170 num_vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
172 memset(qm_info
, 0, sizeof(*qm_info
));
174 num_pqs
= multi_cos_tcs
+ num_vfs
+ 1; /* The '1' is for pure-LB */
175 num_vports
= (u8
)RESC_NUM(p_hwfn
, QED_VPORT
);
177 /* Sanity checking that setup requires legal number of resources */
178 if (num_pqs
> RESC_NUM(p_hwfn
, QED_PQ
)) {
180 "Need too many Physical queues - 0x%04x when only %04x are available\n",
181 num_pqs
, RESC_NUM(p_hwfn
, QED_PQ
));
185 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
187 qm_info
->qm_pq_params
= kcalloc(num_pqs
,
188 sizeof(struct init_qm_pq_params
),
189 b_sleepable
? GFP_KERNEL
: GFP_ATOMIC
);
190 if (!qm_info
->qm_pq_params
)
193 qm_info
->qm_vport_params
= kcalloc(num_vports
,
194 sizeof(struct init_qm_vport_params
),
195 b_sleepable
? GFP_KERNEL
197 if (!qm_info
->qm_vport_params
)
200 qm_info
->qm_port_params
= kcalloc(MAX_NUM_PORTS
,
201 sizeof(struct init_qm_port_params
),
202 b_sleepable
? GFP_KERNEL
204 if (!qm_info
->qm_port_params
)
207 qm_info
->wfq_data
= kcalloc(num_vports
, sizeof(struct qed_wfq_data
),
208 b_sleepable
? GFP_KERNEL
: GFP_ATOMIC
);
209 if (!qm_info
->wfq_data
)
212 vport_id
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
214 /* First init per-TC PQs */
215 for (i
= 0; i
< multi_cos_tcs
; i
++) {
216 struct init_qm_pq_params
*params
=
217 &qm_info
->qm_pq_params
[curr_queue
++];
219 if (p_hwfn
->hw_info
.personality
== QED_PCI_ETH
) {
220 params
->vport_id
= vport_id
;
221 params
->tc_id
= p_hwfn
->hw_info
.non_offload_tc
;
222 params
->wrr_group
= 1;
224 params
->vport_id
= vport_id
;
225 params
->tc_id
= p_hwfn
->hw_info
.offload_tc
;
226 params
->wrr_group
= 1;
230 /* Then init pure-LB PQ */
231 qm_info
->pure_lb_pq
= curr_queue
;
232 qm_info
->qm_pq_params
[curr_queue
].vport_id
=
233 (u8
) RESC_START(p_hwfn
, QED_VPORT
);
234 qm_info
->qm_pq_params
[curr_queue
].tc_id
= PURE_LB_TC
;
235 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
238 qm_info
->offload_pq
= 0;
239 /* Then init per-VF PQs */
240 vf_offset
= curr_queue
;
241 for (i
= 0; i
< num_vfs
; i
++) {
242 /* First vport is used by the PF */
243 qm_info
->qm_pq_params
[curr_queue
].vport_id
= vport_id
+ i
+ 1;
244 qm_info
->qm_pq_params
[curr_queue
].tc_id
=
245 p_hwfn
->hw_info
.non_offload_tc
;
246 qm_info
->qm_pq_params
[curr_queue
].wrr_group
= 1;
250 qm_info
->vf_queues_offset
= vf_offset
;
251 qm_info
->num_pqs
= num_pqs
;
252 qm_info
->num_vports
= num_vports
;
254 /* Initialize qm port parameters */
255 num_ports
= p_hwfn
->cdev
->num_ports_in_engines
;
256 for (i
= 0; i
< num_ports
; i
++) {
257 p_qm_port
= &qm_info
->qm_port_params
[i
];
258 p_qm_port
->active
= 1;
259 p_qm_port
->num_active_phys_tcs
= 4;
260 p_qm_port
->num_pbf_cmd_lines
= PBF_MAX_CMD_LINES
/ num_ports
;
261 p_qm_port
->num_btb_blocks
= BTB_MAX_BLOCKS
/ num_ports
;
264 qm_info
->max_phys_tcs_per_port
= NUM_OF_PHYS_TCS
;
266 qm_info
->start_pq
= (u16
)RESC_START(p_hwfn
, QED_PQ
);
268 qm_info
->num_vf_pqs
= num_vfs
;
269 qm_info
->start_vport
= (u8
) RESC_START(p_hwfn
, QED_VPORT
);
271 for (i
= 0; i
< qm_info
->num_vports
; i
++)
272 qm_info
->qm_vport_params
[i
].vport_wfq
= 1;
274 qm_info
->vport_rl_en
= 1;
275 qm_info
->vport_wfq_en
= 1;
276 qm_info
->pf_rl
= pf_rl
;
277 qm_info
->pf_wfq
= pf_wfq
;
282 DP_NOTICE(p_hwfn
, "Failed to allocate memory for QM params\n");
283 qed_qm_info_free(p_hwfn
);
287 /* This function reconfigures the QM pf on the fly.
288 * For this purpose we:
289 * 1. reconfigure the QM database
290 * 2. set new values to runtime arrat
291 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
292 * 4. activate init tool in QM_PF stage
293 * 5. send an sdm_qm_cmd through rbc interface to release the QM
295 int qed_qm_reconf(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
297 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
301 /* qm_info is allocated in qed_init_qm_info() which is already called
302 * from qed_resc_alloc() or previous call of qed_qm_reconf().
303 * The allocated size may change each init, so we free it before next
306 qed_qm_info_free(p_hwfn
);
308 /* initialize qed's qm data structure */
309 rc
= qed_init_qm_info(p_hwfn
, false);
313 /* stop PF's qm queues */
314 spin_lock_bh(&qm_lock
);
315 b_rc
= qed_send_qm_stop_cmd(p_hwfn
, p_ptt
, false, true,
316 qm_info
->start_pq
, qm_info
->num_pqs
);
317 spin_unlock_bh(&qm_lock
);
321 /* clear the QM_PF runtime phase leftovers from previous init */
322 qed_init_clear_rt_data(p_hwfn
);
324 /* prepare QM portion of runtime array */
325 qed_qm_init_pf(p_hwfn
);
327 /* activate init tool on runtime array */
328 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_QM_PF
, p_hwfn
->rel_pf_id
,
329 p_hwfn
->hw_info
.hw_mode
);
333 /* start PF's qm queues */
334 spin_lock_bh(&qm_lock
);
335 b_rc
= qed_send_qm_stop_cmd(p_hwfn
, p_ptt
, true, true,
336 qm_info
->start_pq
, qm_info
->num_pqs
);
337 spin_unlock_bh(&qm_lock
);
344 int qed_resc_alloc(struct qed_dev
*cdev
)
346 struct qed_consq
*p_consq
;
353 cdev
->fw_data
= kzalloc(sizeof(*cdev
->fw_data
), GFP_KERNEL
);
357 /* Allocate Memory for the Queue->CID mapping */
358 for_each_hwfn(cdev
, i
) {
359 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
360 int tx_size
= sizeof(struct qed_hw_cid_data
) *
361 RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
362 int rx_size
= sizeof(struct qed_hw_cid_data
) *
363 RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
365 p_hwfn
->p_tx_cids
= kzalloc(tx_size
, GFP_KERNEL
);
366 if (!p_hwfn
->p_tx_cids
) {
368 "Failed to allocate memory for Tx Cids\n");
373 p_hwfn
->p_rx_cids
= kzalloc(rx_size
, GFP_KERNEL
);
374 if (!p_hwfn
->p_rx_cids
) {
376 "Failed to allocate memory for Rx Cids\n");
382 for_each_hwfn(cdev
, i
) {
383 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
385 /* First allocate the context manager structure */
386 rc
= qed_cxt_mngr_alloc(p_hwfn
);
390 /* Set the HW cid/tid numbers (in the contest manager)
391 * Must be done prior to any further computations.
393 rc
= qed_cxt_set_pf_params(p_hwfn
);
397 /* Prepare and process QM requirements */
398 rc
= qed_init_qm_info(p_hwfn
, true);
402 /* Compute the ILT client partition */
403 rc
= qed_cxt_cfg_ilt_compute(p_hwfn
);
407 /* CID map / ILT shadow table / T2
408 * The talbes sizes are determined by the computations above
410 rc
= qed_cxt_tables_alloc(p_hwfn
);
414 /* SPQ, must follow ILT because initializes SPQ context */
415 rc
= qed_spq_alloc(p_hwfn
);
419 /* SP status block allocation */
420 p_hwfn
->p_dpc_ptt
= qed_get_reserved_ptt(p_hwfn
,
423 rc
= qed_int_alloc(p_hwfn
, p_hwfn
->p_main_ptt
);
427 rc
= qed_iov_alloc(p_hwfn
);
432 p_eq
= qed_eq_alloc(p_hwfn
, 256);
439 p_consq
= qed_consq_alloc(p_hwfn
);
444 p_hwfn
->p_consq
= p_consq
;
446 /* DMA info initialization */
447 rc
= qed_dmae_info_alloc(p_hwfn
);
450 "Failed to allocate memory for dmae_info structure\n");
454 /* DCBX initialization */
455 rc
= qed_dcbx_info_alloc(p_hwfn
);
458 "Failed to allocate memory for dcbx structure\n");
463 cdev
->reset_stats
= kzalloc(sizeof(*cdev
->reset_stats
), GFP_KERNEL
);
464 if (!cdev
->reset_stats
) {
465 DP_NOTICE(cdev
, "Failed to allocate reset statistics\n");
477 void qed_resc_setup(struct qed_dev
*cdev
)
484 for_each_hwfn(cdev
, i
) {
485 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
487 qed_cxt_mngr_setup(p_hwfn
);
488 qed_spq_setup(p_hwfn
);
489 qed_eq_setup(p_hwfn
, p_hwfn
->p_eq
);
490 qed_consq_setup(p_hwfn
, p_hwfn
->p_consq
);
492 /* Read shadow of current MFW mailbox */
493 qed_mcp_read_mb(p_hwfn
, p_hwfn
->p_main_ptt
);
494 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
495 p_hwfn
->mcp_info
->mfw_mb_cur
,
496 p_hwfn
->mcp_info
->mfw_mb_length
);
498 qed_int_setup(p_hwfn
, p_hwfn
->p_main_ptt
);
500 qed_iov_setup(p_hwfn
, p_hwfn
->p_main_ptt
);
504 #define FINAL_CLEANUP_POLL_CNT (100)
505 #define FINAL_CLEANUP_POLL_TIME (10)
506 int qed_final_cleanup(struct qed_hwfn
*p_hwfn
,
507 struct qed_ptt
*p_ptt
, u16 id
, bool is_vf
)
509 u32 command
= 0, addr
, count
= FINAL_CLEANUP_POLL_CNT
;
512 addr
= GTT_BAR0_MAP_REG_USDM_RAM
+
513 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn
->rel_pf_id
);
518 command
|= X_FINAL_CLEANUP_AGG_INT
<<
519 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT
;
520 command
|= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT
;
521 command
|= id
<< SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT
;
522 command
|= SDM_COMP_TYPE_AGG_INT
<< SDM_OP_GEN_COMP_TYPE_SHIFT
;
524 /* Make sure notification is not set before initiating final cleanup */
525 if (REG_RD(p_hwfn
, addr
)) {
528 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
529 REG_WR(p_hwfn
, addr
, 0);
532 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
533 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
536 qed_wr(p_hwfn
, p_ptt
, XSDM_REG_OPERATION_GEN
, command
);
538 /* Poll until completion */
539 while (!REG_RD(p_hwfn
, addr
) && count
--)
540 msleep(FINAL_CLEANUP_POLL_TIME
);
542 if (REG_RD(p_hwfn
, addr
))
546 "Failed to receive FW final cleanup notification\n");
548 /* Cleanup afterwards */
549 REG_WR(p_hwfn
, addr
, 0);
554 static void qed_calc_hw_mode(struct qed_hwfn
*p_hwfn
)
558 hw_mode
= (1 << MODE_BB_B0
);
560 switch (p_hwfn
->cdev
->num_ports_in_engines
) {
562 hw_mode
|= 1 << MODE_PORTS_PER_ENG_1
;
565 hw_mode
|= 1 << MODE_PORTS_PER_ENG_2
;
568 hw_mode
|= 1 << MODE_PORTS_PER_ENG_4
;
571 DP_NOTICE(p_hwfn
, "num_ports_in_engine = %d not supported\n",
572 p_hwfn
->cdev
->num_ports_in_engines
);
576 switch (p_hwfn
->cdev
->mf_mode
) {
579 hw_mode
|= 1 << MODE_MF_SI
;
582 hw_mode
|= 1 << MODE_MF_SD
;
585 DP_NOTICE(p_hwfn
, "Unsupported MF mode, init as DEFAULT\n");
586 hw_mode
|= 1 << MODE_MF_SI
;
589 hw_mode
|= 1 << MODE_ASIC
;
591 if (p_hwfn
->cdev
->num_hwfns
> 1)
592 hw_mode
|= 1 << MODE_100G
;
594 p_hwfn
->hw_info
.hw_mode
= hw_mode
;
596 DP_VERBOSE(p_hwfn
, (NETIF_MSG_PROBE
| NETIF_MSG_IFUP
),
597 "Configuring function for hw_mode: 0x%08x\n",
598 p_hwfn
->hw_info
.hw_mode
);
601 /* Init run time data for all PFs on an engine. */
602 static void qed_init_cau_rt_data(struct qed_dev
*cdev
)
604 u32 offset
= CAU_REG_SB_VAR_MEMORY_RT_OFFSET
;
607 for_each_hwfn(cdev
, i
) {
608 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
609 struct qed_igu_info
*p_igu_info
;
610 struct qed_igu_block
*p_block
;
611 struct cau_sb_entry sb_entry
;
613 p_igu_info
= p_hwfn
->hw_info
.p_igu_info
;
615 for (sb_id
= 0; sb_id
< QED_MAPPING_MEMORY_SIZE(cdev
);
617 p_block
= &p_igu_info
->igu_map
.igu_blocks
[sb_id
];
621 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
622 p_block
->function_id
,
624 STORE_RT_REG_AGG(p_hwfn
, offset
+ sb_id
* 2,
630 static int qed_hw_init_common(struct qed_hwfn
*p_hwfn
,
631 struct qed_ptt
*p_ptt
,
634 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
635 struct qed_qm_common_rt_init_params params
;
636 struct qed_dev
*cdev
= p_hwfn
->cdev
;
641 qed_init_cau_rt_data(cdev
);
643 /* Program GTT windows */
644 qed_gtt_init(p_hwfn
);
646 if (p_hwfn
->mcp_info
) {
647 if (p_hwfn
->mcp_info
->func_info
.bandwidth_max
)
648 qm_info
->pf_rl_en
= 1;
649 if (p_hwfn
->mcp_info
->func_info
.bandwidth_min
)
650 qm_info
->pf_wfq_en
= 1;
653 memset(¶ms
, 0, sizeof(params
));
654 params
.max_ports_per_engine
= p_hwfn
->cdev
->num_ports_in_engines
;
655 params
.max_phys_tcs_per_port
= qm_info
->max_phys_tcs_per_port
;
656 params
.pf_rl_en
= qm_info
->pf_rl_en
;
657 params
.pf_wfq_en
= qm_info
->pf_wfq_en
;
658 params
.vport_rl_en
= qm_info
->vport_rl_en
;
659 params
.vport_wfq_en
= qm_info
->vport_wfq_en
;
660 params
.port_params
= qm_info
->qm_port_params
;
662 qed_qm_common_rt_init(p_hwfn
, ¶ms
);
664 qed_cxt_hw_init_common(p_hwfn
);
666 /* Close gate from NIG to BRB/Storm; By default they are open, but
667 * we close them to prevent NIG from passing data to reset blocks.
668 * Should have been done in the ENGINE phase, but init-tool lacks
669 * proper port-pretend capabilities.
671 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
672 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
673 qed_port_pretend(p_hwfn
, p_ptt
, p_hwfn
->port_id
^ 1);
674 qed_wr(p_hwfn
, p_ptt
, NIG_REG_RX_BRB_OUT_EN
, 0);
675 qed_wr(p_hwfn
, p_ptt
, NIG_REG_STORM_OUT_EN
, 0);
676 qed_port_unpretend(p_hwfn
, p_ptt
);
678 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_ENGINE
, ANY_PHASE_ID
, hw_mode
);
682 qed_wr(p_hwfn
, p_ptt
, PSWRQ2_REG_L2P_VALIDATE_VFID
, 0);
683 qed_wr(p_hwfn
, p_ptt
, PGLUE_B_REG_USE_CLIENTID_IN_TAG
, 1);
685 /* Disable relaxed ordering in the PCI config space */
686 qed_wr(p_hwfn
, p_ptt
, 0x20b4,
687 qed_rd(p_hwfn
, p_ptt
, 0x20b4) & ~0x10);
689 for (vf_id
= 0; vf_id
< MAX_NUM_VFS_BB
; vf_id
++) {
690 concrete_fid
= qed_vfid_to_concrete(p_hwfn
, vf_id
);
691 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) concrete_fid
);
692 qed_wr(p_hwfn
, p_ptt
, CCFC_REG_STRONG_ENABLE_VF
, 0x1);
694 /* pretend to original PF */
695 qed_fid_pretend(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
);
700 static int qed_hw_init_port(struct qed_hwfn
*p_hwfn
,
701 struct qed_ptt
*p_ptt
,
706 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PORT
, p_hwfn
->port_id
,
711 static int qed_hw_init_pf(struct qed_hwfn
*p_hwfn
,
712 struct qed_ptt
*p_ptt
,
713 struct qed_tunn_start_params
*p_tunn
,
716 enum qed_int_mode int_mode
,
717 bool allow_npar_tx_switch
)
719 u8 rel_pf_id
= p_hwfn
->rel_pf_id
;
722 if (p_hwfn
->mcp_info
) {
723 struct qed_mcp_function_info
*p_info
;
725 p_info
= &p_hwfn
->mcp_info
->func_info
;
726 if (p_info
->bandwidth_min
)
727 p_hwfn
->qm_info
.pf_wfq
= p_info
->bandwidth_min
;
729 /* Update rate limit once we'll actually have a link */
730 p_hwfn
->qm_info
.pf_rl
= 100000;
733 qed_cxt_hw_init_pf(p_hwfn
);
735 qed_int_igu_init_rt(p_hwfn
);
737 /* Set VLAN in NIG if needed */
738 if (hw_mode
& (1 << MODE_MF_SD
)) {
739 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
, "Configuring LLH_FUNC_TAG\n");
740 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET
, 1);
741 STORE_RT_REG(p_hwfn
, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET
,
742 p_hwfn
->hw_info
.ovlan
);
745 /* Enable classification by MAC if needed */
746 if (hw_mode
& (1 << MODE_MF_SI
)) {
747 DP_VERBOSE(p_hwfn
, NETIF_MSG_HW
,
748 "Configuring TAGMAC_CLS_TYPE\n");
750 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET
, 1);
753 /* Protocl Configuration */
754 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_TCP_RT_OFFSET
, 0);
755 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_FCOE_RT_OFFSET
, 0);
756 STORE_RT_REG(p_hwfn
, PRS_REG_SEARCH_ROCE_RT_OFFSET
, 0);
758 /* Cleanup chip from previous driver if such remains exist */
759 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, rel_pf_id
, false);
763 /* PF Init sequence */
764 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_PF
, rel_pf_id
, hw_mode
);
768 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
769 rc
= qed_init_run(p_hwfn
, p_ptt
, PHASE_QM_PF
, rel_pf_id
, hw_mode
);
773 /* Pure runtime initializations - directly to the HW */
774 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, true, true);
777 /* enable interrupts */
778 qed_int_igu_enable(p_hwfn
, p_ptt
, int_mode
);
780 /* send function start command */
781 rc
= qed_sp_pf_start(p_hwfn
, p_tunn
, p_hwfn
->cdev
->mf_mode
,
782 allow_npar_tx_switch
);
784 DP_NOTICE(p_hwfn
, "Function start ramrod failed\n");
789 static int qed_change_pci_hwfn(struct qed_hwfn
*p_hwfn
,
790 struct qed_ptt
*p_ptt
,
793 u32 delay_idx
= 0, val
, set_val
= enable
? 1 : 0;
795 /* Change PF in PXP */
796 qed_wr(p_hwfn
, p_ptt
,
797 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
, set_val
);
799 /* wait until value is set - try for 1 second every 50us */
800 for (delay_idx
= 0; delay_idx
< 20000; delay_idx
++) {
801 val
= qed_rd(p_hwfn
, p_ptt
,
802 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER
);
806 usleep_range(50, 60);
809 if (val
!= set_val
) {
811 "PFID_ENABLE_MASTER wasn't changed after a second\n");
818 static void qed_reset_mb_shadow(struct qed_hwfn
*p_hwfn
,
819 struct qed_ptt
*p_main_ptt
)
821 /* Read shadow of current MFW mailbox */
822 qed_mcp_read_mb(p_hwfn
, p_main_ptt
);
823 memcpy(p_hwfn
->mcp_info
->mfw_mb_shadow
,
824 p_hwfn
->mcp_info
->mfw_mb_cur
,
825 p_hwfn
->mcp_info
->mfw_mb_length
);
828 int qed_hw_init(struct qed_dev
*cdev
,
829 struct qed_tunn_start_params
*p_tunn
,
831 enum qed_int_mode int_mode
,
832 bool allow_npar_tx_switch
,
833 const u8
*bin_fw_data
)
835 u32 load_code
, param
;
838 if ((int_mode
== QED_INT_MODE_MSI
) && (cdev
->num_hwfns
> 1)) {
839 DP_NOTICE(cdev
, "MSI mode is not supported for CMT devices\n");
844 rc
= qed_init_fw_data(cdev
, bin_fw_data
);
849 for_each_hwfn(cdev
, i
) {
850 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
853 p_hwfn
->b_int_enabled
= 1;
857 /* Enable DMAE in PXP */
858 rc
= qed_change_pci_hwfn(p_hwfn
, p_hwfn
->p_main_ptt
, true);
860 qed_calc_hw_mode(p_hwfn
);
862 rc
= qed_mcp_load_req(p_hwfn
, p_hwfn
->p_main_ptt
,
865 DP_NOTICE(p_hwfn
, "Failed sending LOAD_REQ command\n");
869 qed_reset_mb_shadow(p_hwfn
, p_hwfn
->p_main_ptt
);
871 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
872 "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
875 p_hwfn
->first_on_engine
= (load_code
==
876 FW_MSG_CODE_DRV_LOAD_ENGINE
);
879 spin_lock_init(&qm_lock
);
884 case FW_MSG_CODE_DRV_LOAD_ENGINE
:
885 rc
= qed_hw_init_common(p_hwfn
, p_hwfn
->p_main_ptt
,
886 p_hwfn
->hw_info
.hw_mode
);
890 case FW_MSG_CODE_DRV_LOAD_PORT
:
891 rc
= qed_hw_init_port(p_hwfn
, p_hwfn
->p_main_ptt
,
892 p_hwfn
->hw_info
.hw_mode
);
897 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
898 rc
= qed_hw_init_pf(p_hwfn
, p_hwfn
->p_main_ptt
,
899 p_tunn
, p_hwfn
->hw_info
.hw_mode
,
900 b_hw_start
, int_mode
,
901 allow_npar_tx_switch
);
910 "init phase failed for loadcode 0x%x (rc %d)\n",
913 /* ACK mfw regardless of success or failure of initialization */
914 mfw_rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
915 DRV_MSG_CODE_LOAD_DONE
,
916 0, &load_code
, ¶m
);
920 DP_NOTICE(p_hwfn
, "Failed sending LOAD_DONE command\n");
924 /* send DCBX attention request command */
927 "sending phony dcbx set command to trigger DCBx attention handling\n");
928 mfw_rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
929 DRV_MSG_CODE_SET_DCBX
,
930 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT
,
934 "Failed to send DCBX attention request\n");
938 p_hwfn
->hw_init_done
= true;
944 #define QED_HW_STOP_RETRY_LIMIT (10)
945 static inline void qed_hw_timers_stop(struct qed_dev
*cdev
,
946 struct qed_hwfn
*p_hwfn
,
947 struct qed_ptt
*p_ptt
)
952 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_CONN
, 0x0);
953 qed_wr(p_hwfn
, p_ptt
, TM_REG_PF_ENABLE_TASK
, 0x0);
955 for (i
= 0; i
< QED_HW_STOP_RETRY_LIMIT
; i
++) {
956 if ((!qed_rd(p_hwfn
, p_ptt
,
957 TM_REG_PF_SCAN_ACTIVE_CONN
)) &&
958 (!qed_rd(p_hwfn
, p_ptt
,
959 TM_REG_PF_SCAN_ACTIVE_TASK
)))
962 /* Dependent on number of connection/tasks, possibly
963 * 1ms sleep is required between polls
965 usleep_range(1000, 2000);
968 if (i
< QED_HW_STOP_RETRY_LIMIT
)
972 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
973 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_CONN
),
974 (u8
)qed_rd(p_hwfn
, p_ptt
, TM_REG_PF_SCAN_ACTIVE_TASK
));
977 void qed_hw_timers_stop_all(struct qed_dev
*cdev
)
981 for_each_hwfn(cdev
, j
) {
982 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
983 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
985 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
989 int qed_hw_stop(struct qed_dev
*cdev
)
994 for_each_hwfn(cdev
, j
) {
995 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
996 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
998 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Stopping hw/fw\n");
1001 qed_vf_pf_int_cleanup(p_hwfn
);
1005 /* mark the hw as uninitialized... */
1006 p_hwfn
->hw_init_done
= false;
1008 rc
= qed_sp_pf_stop(p_hwfn
);
1011 "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
1013 qed_wr(p_hwfn
, p_ptt
,
1014 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
1016 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
1017 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
1018 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
1019 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
1020 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
1022 qed_hw_timers_stop(cdev
, p_hwfn
, p_ptt
);
1024 /* Disable Attention Generation */
1025 qed_int_igu_disable_int(p_hwfn
, p_ptt
);
1027 qed_wr(p_hwfn
, p_ptt
, IGU_REG_LEADING_EDGE_LATCH
, 0);
1028 qed_wr(p_hwfn
, p_ptt
, IGU_REG_TRAILING_EDGE_LATCH
, 0);
1030 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, true);
1032 /* Need to wait 1ms to guarantee SBs are cleared */
1033 usleep_range(1000, 2000);
1037 /* Disable DMAE in PXP - in CMT, this should only be done for
1038 * first hw-function, and only after all transactions have
1039 * stopped for all active hw-functions.
1041 t_rc
= qed_change_pci_hwfn(&cdev
->hwfns
[0],
1042 cdev
->hwfns
[0].p_main_ptt
, false);
1050 void qed_hw_stop_fastpath(struct qed_dev
*cdev
)
1054 for_each_hwfn(cdev
, j
) {
1055 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[j
];
1056 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
1059 qed_vf_pf_int_cleanup(p_hwfn
);
1065 "Shutting down the fastpath\n");
1067 qed_wr(p_hwfn
, p_ptt
,
1068 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x1);
1070 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_TCP
, 0x0);
1071 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_UDP
, 0x0);
1072 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_FCOE
, 0x0);
1073 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_ROCE
, 0x0);
1074 qed_wr(p_hwfn
, p_ptt
, PRS_REG_SEARCH_OPENFLOW
, 0x0);
1076 qed_int_igu_init_pure_rt(p_hwfn
, p_ptt
, false, false);
1078 /* Need to wait 1ms to guarantee SBs are cleared */
1079 usleep_range(1000, 2000);
1083 void qed_hw_start_fastpath(struct qed_hwfn
*p_hwfn
)
1085 if (IS_VF(p_hwfn
->cdev
))
1088 /* Re-open incoming traffic */
1089 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1090 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF
, 0x0);
1093 static int qed_reg_assert(struct qed_hwfn
*hwfn
,
1094 struct qed_ptt
*ptt
, u32 reg
,
1097 u32 assert_val
= qed_rd(hwfn
, ptt
, reg
);
1099 if (assert_val
!= expected
) {
1100 DP_NOTICE(hwfn
, "Value at address 0x%x != 0x%08x\n",
1108 int qed_hw_reset(struct qed_dev
*cdev
)
1111 u32 unload_resp
, unload_param
;
1114 for_each_hwfn(cdev
, i
) {
1115 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1118 rc
= qed_vf_pf_reset(p_hwfn
);
1124 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFDOWN
, "Resetting hw/fw\n");
1126 /* Check for incorrect states */
1127 qed_reg_assert(p_hwfn
, p_hwfn
->p_main_ptt
,
1128 QM_REG_USG_CNT_PF_TX
, 0);
1129 qed_reg_assert(p_hwfn
, p_hwfn
->p_main_ptt
,
1130 QM_REG_USG_CNT_PF_OTHER
, 0);
1132 /* Disable PF in HW blocks */
1133 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, DORQ_REG_PF_DB_ENABLE
, 0);
1134 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, QM_REG_PF_EN
, 0);
1135 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1136 TCFC_REG_STRONG_ENABLE_PF
, 0);
1137 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1138 CCFC_REG_STRONG_ENABLE_PF
, 0);
1140 /* Send unload command to MCP */
1141 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1142 DRV_MSG_CODE_UNLOAD_REQ
,
1143 DRV_MB_PARAM_UNLOAD_WOL_MCP
,
1144 &unload_resp
, &unload_param
);
1146 DP_NOTICE(p_hwfn
, "qed_hw_reset: UNLOAD_REQ failed\n");
1147 unload_resp
= FW_MSG_CODE_DRV_UNLOAD_ENGINE
;
1150 rc
= qed_mcp_cmd(p_hwfn
, p_hwfn
->p_main_ptt
,
1151 DRV_MSG_CODE_UNLOAD_DONE
,
1152 0, &unload_resp
, &unload_param
);
1154 DP_NOTICE(p_hwfn
, "qed_hw_reset: UNLOAD_DONE failed\n");
1162 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
1163 static void qed_hw_hwfn_free(struct qed_hwfn
*p_hwfn
)
1165 qed_ptt_pool_free(p_hwfn
);
1166 kfree(p_hwfn
->hw_info
.p_igu_info
);
1169 /* Setup bar access */
1170 static void qed_hw_hwfn_prepare(struct qed_hwfn
*p_hwfn
)
1172 /* clear indirect access */
1173 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_88_F0
, 0);
1174 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_8C_F0
, 0);
1175 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_90_F0
, 0);
1176 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PGLUE_B_REG_PGL_ADDR_94_F0
, 0);
1178 /* Clean Previous errors if such exist */
1179 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1180 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR
,
1181 1 << p_hwfn
->abs_pf_id
);
1183 /* enable internal target-read */
1184 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
,
1185 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ
, 1);
1188 static void get_function_id(struct qed_hwfn
*p_hwfn
)
1191 p_hwfn
->hw_info
.opaque_fid
= (u16
)REG_RD(p_hwfn
, PXP_PF_ME_OPAQUE_ADDR
);
1193 p_hwfn
->hw_info
.concrete_fid
= REG_RD(p_hwfn
, PXP_PF_ME_CONCRETE_ADDR
);
1195 p_hwfn
->abs_pf_id
= (p_hwfn
->hw_info
.concrete_fid
>> 16) & 0xf;
1196 p_hwfn
->rel_pf_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
1197 PXP_CONCRETE_FID_PFID
);
1198 p_hwfn
->port_id
= GET_FIELD(p_hwfn
->hw_info
.concrete_fid
,
1199 PXP_CONCRETE_FID_PORT
);
1202 static void qed_hw_set_feat(struct qed_hwfn
*p_hwfn
)
1204 u32
*feat_num
= p_hwfn
->hw_info
.feat_num
;
1205 int num_features
= 1;
1207 feat_num
[QED_PF_L2_QUE
] = min_t(u32
, RESC_NUM(p_hwfn
, QED_SB
) /
1209 RESC_NUM(p_hwfn
, QED_L2_QUEUE
));
1210 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
1211 "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1212 feat_num
[QED_PF_L2_QUE
], RESC_NUM(p_hwfn
, QED_SB
),
1216 static void qed_hw_get_resc(struct qed_hwfn
*p_hwfn
)
1218 u32
*resc_start
= p_hwfn
->hw_info
.resc_start
;
1219 u8 num_funcs
= p_hwfn
->num_funcs_on_engine
;
1220 u32
*resc_num
= p_hwfn
->hw_info
.resc_num
;
1221 struct qed_sb_cnt_info sb_cnt_info
;
1222 int i
, max_vf_vlan_filters
;
1224 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
1226 #ifdef CONFIG_QED_SRIOV
1227 max_vf_vlan_filters
= QED_ETH_MAX_VF_NUM_VLAN_FILTERS
;
1229 max_vf_vlan_filters
= 0;
1232 qed_int_get_num_sbs(p_hwfn
, &sb_cnt_info
);
1234 resc_num
[QED_SB
] = min_t(u32
,
1235 (MAX_SB_PER_PATH_BB
/ num_funcs
),
1236 sb_cnt_info
.sb_cnt
);
1237 resc_num
[QED_L2_QUEUE
] = MAX_NUM_L2_QUEUES_BB
/ num_funcs
;
1238 resc_num
[QED_VPORT
] = MAX_NUM_VPORTS_BB
/ num_funcs
;
1239 resc_num
[QED_RSS_ENG
] = ETH_RSS_ENGINE_NUM_BB
/ num_funcs
;
1240 resc_num
[QED_PQ
] = MAX_QM_TX_QUEUES_BB
/ num_funcs
;
1241 resc_num
[QED_RL
] = 8;
1242 resc_num
[QED_MAC
] = ETH_NUM_MAC_FILTERS
/ num_funcs
;
1243 resc_num
[QED_VLAN
] = (ETH_NUM_VLAN_FILTERS
- 1 /*For vlan0*/) /
1245 resc_num
[QED_ILT
] = 950;
1247 for (i
= 0; i
< QED_MAX_RESC
; i
++)
1248 resc_start
[i
] = resc_num
[i
] * p_hwfn
->rel_pf_id
;
1250 qed_hw_set_feat(p_hwfn
);
1252 DP_VERBOSE(p_hwfn
, NETIF_MSG_PROBE
,
1253 "The numbers for each resource are:\n"
1254 "SB = %d start = %d\n"
1255 "L2_QUEUE = %d start = %d\n"
1256 "VPORT = %d start = %d\n"
1257 "PQ = %d start = %d\n"
1258 "RL = %d start = %d\n"
1259 "MAC = %d start = %d\n"
1260 "VLAN = %d start = %d\n"
1261 "ILT = %d start = %d\n",
1262 p_hwfn
->hw_info
.resc_num
[QED_SB
],
1263 p_hwfn
->hw_info
.resc_start
[QED_SB
],
1264 p_hwfn
->hw_info
.resc_num
[QED_L2_QUEUE
],
1265 p_hwfn
->hw_info
.resc_start
[QED_L2_QUEUE
],
1266 p_hwfn
->hw_info
.resc_num
[QED_VPORT
],
1267 p_hwfn
->hw_info
.resc_start
[QED_VPORT
],
1268 p_hwfn
->hw_info
.resc_num
[QED_PQ
],
1269 p_hwfn
->hw_info
.resc_start
[QED_PQ
],
1270 p_hwfn
->hw_info
.resc_num
[QED_RL
],
1271 p_hwfn
->hw_info
.resc_start
[QED_RL
],
1272 p_hwfn
->hw_info
.resc_num
[QED_MAC
],
1273 p_hwfn
->hw_info
.resc_start
[QED_MAC
],
1274 p_hwfn
->hw_info
.resc_num
[QED_VLAN
],
1275 p_hwfn
->hw_info
.resc_start
[QED_VLAN
],
1276 p_hwfn
->hw_info
.resc_num
[QED_ILT
],
1277 p_hwfn
->hw_info
.resc_start
[QED_ILT
]);
1280 static int qed_hw_get_nvm_info(struct qed_hwfn
*p_hwfn
,
1281 struct qed_ptt
*p_ptt
)
1283 u32 nvm_cfg1_offset
, mf_mode
, addr
, generic_cont0
, core_cfg
;
1284 u32 port_cfg_addr
, link_temp
, nvm_cfg_addr
, device_capabilities
;
1285 struct qed_mcp_link_params
*link
;
1287 /* Read global nvm_cfg address */
1288 nvm_cfg_addr
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_GEN_PURP_CR0
);
1290 /* Verify MCP has initialized it */
1291 if (!nvm_cfg_addr
) {
1292 DP_NOTICE(p_hwfn
, "Shared memory not initialized\n");
1296 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
1297 nvm_cfg1_offset
= qed_rd(p_hwfn
, p_ptt
, nvm_cfg_addr
+ 4);
1299 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1300 offsetof(struct nvm_cfg1
, glob
) +
1301 offsetof(struct nvm_cfg1_glob
, core_cfg
);
1303 core_cfg
= qed_rd(p_hwfn
, p_ptt
, addr
);
1305 switch ((core_cfg
& NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK
) >>
1306 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET
) {
1307 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G
:
1308 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X40G
;
1310 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G
:
1311 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X50G
;
1313 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G
:
1314 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X100G
;
1316 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F
:
1317 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_F
;
1319 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E
:
1320 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X10G_E
;
1322 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G
:
1323 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_4X20G
;
1325 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G
:
1326 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X40G
;
1328 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G
:
1329 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_2X25G
;
1331 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G
:
1332 p_hwfn
->hw_info
.port_mode
= QED_PORT_MODE_DE_1X25G
;
1335 DP_NOTICE(p_hwfn
, "Unknown port mode in 0x%08x\n",
1340 /* Read default link configuration */
1341 link
= &p_hwfn
->mcp_info
->link_input
;
1342 port_cfg_addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1343 offsetof(struct nvm_cfg1
, port
[MFW_PORT(p_hwfn
)]);
1344 link_temp
= qed_rd(p_hwfn
, p_ptt
,
1346 offsetof(struct nvm_cfg1_port
, speed_cap_mask
));
1347 link
->speed
.advertised_speeds
=
1348 link_temp
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK
;
1350 p_hwfn
->mcp_info
->link_capabilities
.speed_capabilities
=
1351 link
->speed
.advertised_speeds
;
1353 link_temp
= qed_rd(p_hwfn
, p_ptt
,
1355 offsetof(struct nvm_cfg1_port
, link_settings
));
1356 switch ((link_temp
& NVM_CFG1_PORT_DRV_LINK_SPEED_MASK
) >>
1357 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET
) {
1358 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG
:
1359 link
->speed
.autoneg
= true;
1361 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G
:
1362 link
->speed
.forced_speed
= 1000;
1364 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G
:
1365 link
->speed
.forced_speed
= 10000;
1367 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G
:
1368 link
->speed
.forced_speed
= 25000;
1370 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G
:
1371 link
->speed
.forced_speed
= 40000;
1373 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G
:
1374 link
->speed
.forced_speed
= 50000;
1376 case NVM_CFG1_PORT_DRV_LINK_SPEED_100G
:
1377 link
->speed
.forced_speed
= 100000;
1380 DP_NOTICE(p_hwfn
, "Unknown Speed in 0x%08x\n",
1384 link_temp
&= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK
;
1385 link_temp
>>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET
;
1386 link
->pause
.autoneg
= !!(link_temp
&
1387 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG
);
1388 link
->pause
.forced_rx
= !!(link_temp
&
1389 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX
);
1390 link
->pause
.forced_tx
= !!(link_temp
&
1391 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX
);
1392 link
->loopback_mode
= 0;
1394 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1395 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1396 link
->speed
.forced_speed
, link
->speed
.advertised_speeds
,
1397 link
->speed
.autoneg
, link
->pause
.autoneg
);
1399 /* Read Multi-function information from shmem */
1400 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1401 offsetof(struct nvm_cfg1
, glob
) +
1402 offsetof(struct nvm_cfg1_glob
, generic_cont0
);
1404 generic_cont0
= qed_rd(p_hwfn
, p_ptt
, addr
);
1406 mf_mode
= (generic_cont0
& NVM_CFG1_GLOB_MF_MODE_MASK
) >>
1407 NVM_CFG1_GLOB_MF_MODE_OFFSET
;
1410 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED
:
1411 p_hwfn
->cdev
->mf_mode
= QED_MF_OVLAN
;
1413 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0
:
1414 p_hwfn
->cdev
->mf_mode
= QED_MF_NPAR
;
1416 case NVM_CFG1_GLOB_MF_MODE_DEFAULT
:
1417 p_hwfn
->cdev
->mf_mode
= QED_MF_DEFAULT
;
1420 DP_INFO(p_hwfn
, "Multi function mode is %08x\n",
1421 p_hwfn
->cdev
->mf_mode
);
1423 /* Read Multi-function information from shmem */
1424 addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1425 offsetof(struct nvm_cfg1
, glob
) +
1426 offsetof(struct nvm_cfg1_glob
, device_capabilities
);
1428 device_capabilities
= qed_rd(p_hwfn
, p_ptt
, addr
);
1429 if (device_capabilities
& NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET
)
1430 __set_bit(QED_DEV_CAP_ETH
,
1431 &p_hwfn
->hw_info
.device_capabilities
);
1433 return qed_mcp_fill_shmem_func_info(p_hwfn
, p_ptt
);
1436 static void qed_get_num_funcs(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1438 u32 reg_function_hide
, tmp
, eng_mask
;
1441 num_funcs
= MAX_NUM_PFS_BB
;
1443 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
1444 * in the other bits are selected.
1445 * Bits 1-15 are for functions 1-15, respectively, and their value is
1446 * '0' only for enabled functions (function 0 always exists and
1448 * In case of CMT, only the "even" functions are enabled, and thus the
1449 * number of functions for both hwfns is learnt from the same bits.
1451 reg_function_hide
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_FUNCTION_HIDE
);
1453 if (reg_function_hide
& 0x1) {
1454 if (QED_PATH_ID(p_hwfn
) && p_hwfn
->cdev
->num_hwfns
== 1) {
1462 /* Get the number of the enabled functions on the engine */
1463 tmp
= (reg_function_hide
^ 0xffffffff) & eng_mask
;
1471 p_hwfn
->num_funcs_on_engine
= num_funcs
;
1475 "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
1478 p_hwfn
->num_funcs_on_engine
);
1482 qed_get_hw_info(struct qed_hwfn
*p_hwfn
,
1483 struct qed_ptt
*p_ptt
,
1484 enum qed_pci_personality personality
)
1489 /* Since all information is common, only first hwfns should do this */
1490 if (IS_LEAD_HWFN(p_hwfn
)) {
1491 rc
= qed_iov_hw_info(p_hwfn
);
1496 /* Read the port mode */
1497 port_mode
= qed_rd(p_hwfn
, p_ptt
,
1498 CNIG_REG_NW_PORT_MODE_BB_B0
);
1500 if (port_mode
< 3) {
1501 p_hwfn
->cdev
->num_ports_in_engines
= 1;
1502 } else if (port_mode
<= 5) {
1503 p_hwfn
->cdev
->num_ports_in_engines
= 2;
1505 DP_NOTICE(p_hwfn
, "PORT MODE: %d not supported\n",
1506 p_hwfn
->cdev
->num_ports_in_engines
);
1508 /* Default num_ports_in_engines to something */
1509 p_hwfn
->cdev
->num_ports_in_engines
= 1;
1512 qed_hw_get_nvm_info(p_hwfn
, p_ptt
);
1514 rc
= qed_int_igu_read_cam(p_hwfn
, p_ptt
);
1518 if (qed_mcp_is_init(p_hwfn
))
1519 ether_addr_copy(p_hwfn
->hw_info
.hw_mac_addr
,
1520 p_hwfn
->mcp_info
->func_info
.mac
);
1522 eth_random_addr(p_hwfn
->hw_info
.hw_mac_addr
);
1524 if (qed_mcp_is_init(p_hwfn
)) {
1525 if (p_hwfn
->mcp_info
->func_info
.ovlan
!= QED_MCP_VLAN_UNSET
)
1526 p_hwfn
->hw_info
.ovlan
=
1527 p_hwfn
->mcp_info
->func_info
.ovlan
;
1529 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
1532 if (qed_mcp_is_init(p_hwfn
)) {
1533 enum qed_pci_personality protocol
;
1535 protocol
= p_hwfn
->mcp_info
->func_info
.protocol
;
1536 p_hwfn
->hw_info
.personality
= protocol
;
1539 qed_get_num_funcs(p_hwfn
, p_ptt
);
1541 qed_hw_get_resc(p_hwfn
);
1546 static int qed_get_dev_info(struct qed_dev
*cdev
)
1548 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
1551 /* Read Vendor Id / Device Id */
1552 pci_read_config_word(cdev
->pdev
, PCI_VENDOR_ID
,
1554 pci_read_config_word(cdev
->pdev
, PCI_DEVICE_ID
,
1556 cdev
->chip_num
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1557 MISCS_REG_CHIP_NUM
);
1558 cdev
->chip_rev
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1559 MISCS_REG_CHIP_REV
);
1560 MASK_FIELD(CHIP_REV
, cdev
->chip_rev
);
1562 cdev
->type
= QED_DEV_TYPE_BB
;
1563 /* Learn number of HW-functions */
1564 tmp
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1565 MISCS_REG_CMT_ENABLED_FOR_PAIR
);
1567 if (tmp
& (1 << p_hwfn
->rel_pf_id
)) {
1568 DP_NOTICE(cdev
->hwfns
, "device in CMT mode\n");
1569 cdev
->num_hwfns
= 2;
1571 cdev
->num_hwfns
= 1;
1574 cdev
->chip_bond_id
= qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1575 MISCS_REG_CHIP_TEST_REG
) >> 4;
1576 MASK_FIELD(CHIP_BOND_ID
, cdev
->chip_bond_id
);
1577 cdev
->chip_metal
= (u16
)qed_rd(p_hwfn
, p_hwfn
->p_main_ptt
,
1578 MISCS_REG_CHIP_METAL
);
1579 MASK_FIELD(CHIP_METAL
, cdev
->chip_metal
);
1581 DP_INFO(cdev
->hwfns
,
1582 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1583 cdev
->chip_num
, cdev
->chip_rev
,
1584 cdev
->chip_bond_id
, cdev
->chip_metal
);
1586 if (QED_IS_BB(cdev
) && CHIP_REV_IS_A0(cdev
)) {
1587 DP_NOTICE(cdev
->hwfns
,
1588 "The chip type/rev (BB A0) is not supported!\n");
1595 static int qed_hw_prepare_single(struct qed_hwfn
*p_hwfn
,
1596 void __iomem
*p_regview
,
1597 void __iomem
*p_doorbells
,
1598 enum qed_pci_personality personality
)
1602 /* Split PCI bars evenly between hwfns */
1603 p_hwfn
->regview
= p_regview
;
1604 p_hwfn
->doorbells
= p_doorbells
;
1606 if (IS_VF(p_hwfn
->cdev
))
1607 return qed_vf_hw_prepare(p_hwfn
);
1609 /* Validate that chip access is feasible */
1610 if (REG_RD(p_hwfn
, PXP_PF_ME_OPAQUE_ADDR
) == 0xffffffff) {
1612 "Reading the ME register returns all Fs; Preventing further chip access\n");
1616 get_function_id(p_hwfn
);
1618 /* Allocate PTT pool */
1619 rc
= qed_ptt_pool_alloc(p_hwfn
);
1621 DP_NOTICE(p_hwfn
, "Failed to prepare hwfn's hw\n");
1625 /* Allocate the main PTT */
1626 p_hwfn
->p_main_ptt
= qed_get_reserved_ptt(p_hwfn
, RESERVED_PTT_MAIN
);
1628 /* First hwfn learns basic information, e.g., number of hwfns */
1629 if (!p_hwfn
->my_id
) {
1630 rc
= qed_get_dev_info(p_hwfn
->cdev
);
1635 qed_hw_hwfn_prepare(p_hwfn
);
1637 /* Initialize MCP structure */
1638 rc
= qed_mcp_cmd_init(p_hwfn
, p_hwfn
->p_main_ptt
);
1640 DP_NOTICE(p_hwfn
, "Failed initializing mcp command\n");
1644 /* Read the device configuration information from the HW and SHMEM */
1645 rc
= qed_get_hw_info(p_hwfn
, p_hwfn
->p_main_ptt
, personality
);
1647 DP_NOTICE(p_hwfn
, "Failed to get HW information\n");
1651 /* Allocate the init RT array and initialize the init-ops engine */
1652 rc
= qed_init_alloc(p_hwfn
);
1654 DP_NOTICE(p_hwfn
, "Failed to allocate the init array\n");
1660 if (IS_LEAD_HWFN(p_hwfn
))
1661 qed_iov_free_hw_info(p_hwfn
->cdev
);
1662 qed_mcp_free(p_hwfn
);
1664 qed_hw_hwfn_free(p_hwfn
);
1669 int qed_hw_prepare(struct qed_dev
*cdev
,
1672 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
1675 /* Store the precompiled init data ptrs */
1677 qed_init_iro_array(cdev
);
1679 /* Initialize the first hwfn - will learn number of hwfns */
1680 rc
= qed_hw_prepare_single(p_hwfn
,
1682 cdev
->doorbells
, personality
);
1686 personality
= p_hwfn
->hw_info
.personality
;
1688 /* Initialize the rest of the hwfns */
1689 if (cdev
->num_hwfns
> 1) {
1690 void __iomem
*p_regview
, *p_doorbell
;
1693 /* adjust bar offset for second engine */
1694 addr
= cdev
->regview
+ qed_hw_bar_size(p_hwfn
, BAR_ID_0
) / 2;
1697 /* adjust doorbell bar offset for second engine */
1698 addr
= cdev
->doorbells
+ qed_hw_bar_size(p_hwfn
, BAR_ID_1
) / 2;
1701 /* prepare second hw function */
1702 rc
= qed_hw_prepare_single(&cdev
->hwfns
[1], p_regview
,
1703 p_doorbell
, personality
);
1705 /* in case of error, need to free the previously
1706 * initiliazed hwfn 0.
1710 qed_init_free(p_hwfn
);
1711 qed_mcp_free(p_hwfn
);
1712 qed_hw_hwfn_free(p_hwfn
);
1720 void qed_hw_remove(struct qed_dev
*cdev
)
1724 for_each_hwfn(cdev
, i
) {
1725 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1728 qed_vf_pf_release(p_hwfn
);
1732 qed_init_free(p_hwfn
);
1733 qed_hw_hwfn_free(p_hwfn
);
1734 qed_mcp_free(p_hwfn
);
1737 qed_iov_free_hw_info(cdev
);
1740 int qed_chain_alloc(struct qed_dev
*cdev
,
1741 enum qed_chain_use_mode intended_use
,
1742 enum qed_chain_mode mode
,
1745 struct qed_chain
*p_chain
)
1747 dma_addr_t p_pbl_phys
= 0;
1748 void *p_pbl_virt
= NULL
;
1749 dma_addr_t p_phys
= 0;
1750 void *p_virt
= NULL
;
1754 if (mode
== QED_CHAIN_MODE_SINGLE
)
1757 page_cnt
= QED_CHAIN_PAGE_CNT(num_elems
, elem_size
, mode
);
1759 size
= page_cnt
* QED_CHAIN_PAGE_SIZE
;
1760 p_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
1761 size
, &p_phys
, GFP_KERNEL
);
1763 DP_NOTICE(cdev
, "Failed to allocate chain mem\n");
1767 if (mode
== QED_CHAIN_MODE_PBL
) {
1768 size
= page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
1769 p_pbl_virt
= dma_alloc_coherent(&cdev
->pdev
->dev
,
1773 DP_NOTICE(cdev
, "Failed to allocate chain pbl mem\n");
1777 qed_chain_pbl_init(p_chain
, p_virt
, p_phys
, page_cnt
,
1778 (u8
)elem_size
, intended_use
,
1779 p_pbl_phys
, p_pbl_virt
);
1781 qed_chain_init(p_chain
, p_virt
, p_phys
, page_cnt
,
1782 (u8
)elem_size
, intended_use
, mode
);
1788 dma_free_coherent(&cdev
->pdev
->dev
,
1789 page_cnt
* QED_CHAIN_PAGE_SIZE
,
1791 dma_free_coherent(&cdev
->pdev
->dev
,
1792 page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
,
1793 p_pbl_virt
, p_pbl_phys
);
1798 void qed_chain_free(struct qed_dev
*cdev
,
1799 struct qed_chain
*p_chain
)
1803 if (!p_chain
->p_virt_addr
)
1806 if (p_chain
->mode
== QED_CHAIN_MODE_PBL
) {
1807 size
= p_chain
->page_cnt
* QED_CHAIN_PBL_ENTRY_SIZE
;
1808 dma_free_coherent(&cdev
->pdev
->dev
, size
,
1809 p_chain
->pbl
.p_virt_table
,
1810 p_chain
->pbl
.p_phys_table
);
1813 size
= p_chain
->page_cnt
* QED_CHAIN_PAGE_SIZE
;
1814 dma_free_coherent(&cdev
->pdev
->dev
, size
,
1815 p_chain
->p_virt_addr
,
1816 p_chain
->p_phys_addr
);
1819 int qed_fw_l2_queue(struct qed_hwfn
*p_hwfn
,
1820 u16 src_id
, u16
*dst_id
)
1822 if (src_id
>= RESC_NUM(p_hwfn
, QED_L2_QUEUE
)) {
1825 min
= (u16
)RESC_START(p_hwfn
, QED_L2_QUEUE
);
1826 max
= min
+ RESC_NUM(p_hwfn
, QED_L2_QUEUE
);
1828 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1834 *dst_id
= RESC_START(p_hwfn
, QED_L2_QUEUE
) + src_id
;
1839 int qed_fw_vport(struct qed_hwfn
*p_hwfn
,
1840 u8 src_id
, u8
*dst_id
)
1842 if (src_id
>= RESC_NUM(p_hwfn
, QED_VPORT
)) {
1845 min
= (u8
)RESC_START(p_hwfn
, QED_VPORT
);
1846 max
= min
+ RESC_NUM(p_hwfn
, QED_VPORT
);
1848 "vport id [%d] is not valid, available indices [%d - %d]\n",
1854 *dst_id
= RESC_START(p_hwfn
, QED_VPORT
) + src_id
;
1859 int qed_fw_rss_eng(struct qed_hwfn
*p_hwfn
,
1860 u8 src_id
, u8
*dst_id
)
1862 if (src_id
>= RESC_NUM(p_hwfn
, QED_RSS_ENG
)) {
1865 min
= (u8
)RESC_START(p_hwfn
, QED_RSS_ENG
);
1866 max
= min
+ RESC_NUM(p_hwfn
, QED_RSS_ENG
);
1868 "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1874 *dst_id
= RESC_START(p_hwfn
, QED_RSS_ENG
) + src_id
;
1879 /* Calculate final WFQ values for all vports and configure them.
1880 * After this configuration each vport will have
1881 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
1883 static void qed_configure_wfq_for_all_vports(struct qed_hwfn
*p_hwfn
,
1884 struct qed_ptt
*p_ptt
,
1887 struct init_qm_vport_params
*vport_params
;
1890 vport_params
= p_hwfn
->qm_info
.qm_vport_params
;
1892 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
1893 u32 wfq_speed
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
1895 vport_params
[i
].vport_wfq
= (wfq_speed
* QED_WFQ_UNIT
) /
1897 qed_init_vport_wfq(p_hwfn
, p_ptt
,
1898 vport_params
[i
].first_tx_pq_id
,
1899 vport_params
[i
].vport_wfq
);
1903 static void qed_init_wfq_default_param(struct qed_hwfn
*p_hwfn
,
1909 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++)
1910 p_hwfn
->qm_info
.qm_vport_params
[i
].vport_wfq
= 1;
1913 static void qed_disable_wfq_for_all_vports(struct qed_hwfn
*p_hwfn
,
1914 struct qed_ptt
*p_ptt
,
1917 struct init_qm_vport_params
*vport_params
;
1920 vport_params
= p_hwfn
->qm_info
.qm_vport_params
;
1922 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
1923 qed_init_wfq_default_param(p_hwfn
, min_pf_rate
);
1924 qed_init_vport_wfq(p_hwfn
, p_ptt
,
1925 vport_params
[i
].first_tx_pq_id
,
1926 vport_params
[i
].vport_wfq
);
1930 /* This function performs several validations for WFQ
1931 * configuration and required min rate for a given vport
1932 * 1. req_rate must be greater than one percent of min_pf_rate.
1933 * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
1934 * rates to get less than one percent of min_pf_rate.
1935 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
1937 static int qed_init_wfq_param(struct qed_hwfn
*p_hwfn
,
1938 u16 vport_id
, u32 req_rate
,
1941 u32 total_req_min_rate
= 0, total_left_rate
= 0, left_rate_per_vp
= 0;
1942 int non_requested_count
= 0, req_count
= 0, i
, num_vports
;
1944 num_vports
= p_hwfn
->qm_info
.num_vports
;
1946 /* Accounting for the vports which are configured for WFQ explicitly */
1947 for (i
= 0; i
< num_vports
; i
++) {
1950 if ((i
!= vport_id
) &&
1951 p_hwfn
->qm_info
.wfq_data
[i
].configured
) {
1953 tmp_speed
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
1954 total_req_min_rate
+= tmp_speed
;
1958 /* Include current vport data as well */
1960 total_req_min_rate
+= req_rate
;
1961 non_requested_count
= num_vports
- req_count
;
1963 if (req_rate
< min_pf_rate
/ QED_WFQ_UNIT
) {
1964 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1965 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1966 vport_id
, req_rate
, min_pf_rate
);
1970 if (num_vports
> QED_WFQ_UNIT
) {
1971 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1972 "Number of vports is greater than %d\n",
1977 if (total_req_min_rate
> min_pf_rate
) {
1978 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1979 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
1980 total_req_min_rate
, min_pf_rate
);
1984 total_left_rate
= min_pf_rate
- total_req_min_rate
;
1986 left_rate_per_vp
= total_left_rate
/ non_requested_count
;
1987 if (left_rate_per_vp
< min_pf_rate
/ QED_WFQ_UNIT
) {
1988 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1989 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1990 left_rate_per_vp
, min_pf_rate
);
1994 p_hwfn
->qm_info
.wfq_data
[vport_id
].min_speed
= req_rate
;
1995 p_hwfn
->qm_info
.wfq_data
[vport_id
].configured
= true;
1997 for (i
= 0; i
< num_vports
; i
++) {
1998 if (p_hwfn
->qm_info
.wfq_data
[i
].configured
)
2001 p_hwfn
->qm_info
.wfq_data
[i
].min_speed
= left_rate_per_vp
;
2007 static int __qed_configure_vport_wfq(struct qed_hwfn
*p_hwfn
,
2008 struct qed_ptt
*p_ptt
, u16 vp_id
, u32 rate
)
2010 struct qed_mcp_link_state
*p_link
;
2013 p_link
= &p_hwfn
->cdev
->hwfns
[0].mcp_info
->link_output
;
2015 if (!p_link
->min_pf_rate
) {
2016 p_hwfn
->qm_info
.wfq_data
[vp_id
].min_speed
= rate
;
2017 p_hwfn
->qm_info
.wfq_data
[vp_id
].configured
= true;
2021 rc
= qed_init_wfq_param(p_hwfn
, vp_id
, rate
, p_link
->min_pf_rate
);
2024 qed_configure_wfq_for_all_vports(p_hwfn
, p_ptt
,
2025 p_link
->min_pf_rate
);
2028 "Validation failed while configuring min rate\n");
2033 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn
*p_hwfn
,
2034 struct qed_ptt
*p_ptt
,
2037 bool use_wfq
= false;
2041 /* Validate all pre configured vports for wfq */
2042 for (i
= 0; i
< p_hwfn
->qm_info
.num_vports
; i
++) {
2045 if (!p_hwfn
->qm_info
.wfq_data
[i
].configured
)
2048 rate
= p_hwfn
->qm_info
.wfq_data
[i
].min_speed
;
2051 rc
= qed_init_wfq_param(p_hwfn
, i
, rate
, min_pf_rate
);
2054 "WFQ validation failed while configuring min rate\n");
2060 qed_configure_wfq_for_all_vports(p_hwfn
, p_ptt
, min_pf_rate
);
2062 qed_disable_wfq_for_all_vports(p_hwfn
, p_ptt
, min_pf_rate
);
2067 /* Main API for qed clients to configure vport min rate.
2068 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
2069 * rate - Speed in Mbps needs to be assigned to a given vport.
2071 int qed_configure_vport_wfq(struct qed_dev
*cdev
, u16 vp_id
, u32 rate
)
2073 int i
, rc
= -EINVAL
;
2075 /* Currently not supported; Might change in future */
2076 if (cdev
->num_hwfns
> 1) {
2078 "WFQ configuration is not supported for this device\n");
2082 for_each_hwfn(cdev
, i
) {
2083 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2084 struct qed_ptt
*p_ptt
;
2086 p_ptt
= qed_ptt_acquire(p_hwfn
);
2090 rc
= __qed_configure_vport_wfq(p_hwfn
, p_ptt
, vp_id
, rate
);
2093 qed_ptt_release(p_hwfn
, p_ptt
);
2097 qed_ptt_release(p_hwfn
, p_ptt
);
2103 /* API to configure WFQ from mcp link change */
2104 void qed_configure_vp_wfq_on_link_change(struct qed_dev
*cdev
, u32 min_pf_rate
)
2108 if (cdev
->num_hwfns
> 1) {
2111 "WFQ configuration is not supported for this device\n");
2115 for_each_hwfn(cdev
, i
) {
2116 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2118 __qed_configure_vp_wfq_on_link_change(p_hwfn
,
2124 int __qed_configure_pf_max_bandwidth(struct qed_hwfn
*p_hwfn
,
2125 struct qed_ptt
*p_ptt
,
2126 struct qed_mcp_link_state
*p_link
,
2131 p_hwfn
->mcp_info
->func_info
.bandwidth_max
= max_bw
;
2133 if (!p_link
->line_speed
&& (max_bw
!= 100))
2136 p_link
->speed
= (p_link
->line_speed
* max_bw
) / 100;
2137 p_hwfn
->qm_info
.pf_rl
= p_link
->speed
;
2139 /* Since the limiter also affects Tx-switched traffic, we don't want it
2140 * to limit such traffic in case there's no actual limit.
2141 * In that case, set limit to imaginary high boundary.
2144 p_hwfn
->qm_info
.pf_rl
= 100000;
2146 rc
= qed_init_pf_rl(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
,
2147 p_hwfn
->qm_info
.pf_rl
);
2149 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2150 "Configured MAX bandwidth to be %08x Mb/sec\n",
2156 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
2157 int qed_configure_pf_max_bandwidth(struct qed_dev
*cdev
, u8 max_bw
)
2159 int i
, rc
= -EINVAL
;
2161 if (max_bw
< 1 || max_bw
> 100) {
2162 DP_NOTICE(cdev
, "PF max bw valid range is [1-100]\n");
2166 for_each_hwfn(cdev
, i
) {
2167 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2168 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(cdev
);
2169 struct qed_mcp_link_state
*p_link
;
2170 struct qed_ptt
*p_ptt
;
2172 p_link
= &p_lead
->mcp_info
->link_output
;
2174 p_ptt
= qed_ptt_acquire(p_hwfn
);
2178 rc
= __qed_configure_pf_max_bandwidth(p_hwfn
, p_ptt
,
2181 qed_ptt_release(p_hwfn
, p_ptt
);
2190 int __qed_configure_pf_min_bandwidth(struct qed_hwfn
*p_hwfn
,
2191 struct qed_ptt
*p_ptt
,
2192 struct qed_mcp_link_state
*p_link
,
2197 p_hwfn
->mcp_info
->func_info
.bandwidth_min
= min_bw
;
2198 p_hwfn
->qm_info
.pf_wfq
= min_bw
;
2200 if (!p_link
->line_speed
)
2203 p_link
->min_pf_rate
= (p_link
->line_speed
* min_bw
) / 100;
2205 rc
= qed_init_pf_wfq(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
, min_bw
);
2207 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2208 "Configured MIN bandwidth to be %d Mb/sec\n",
2209 p_link
->min_pf_rate
);
2214 /* Main API to configure PF min bandwidth where bw range is [1-100] */
2215 int qed_configure_pf_min_bandwidth(struct qed_dev
*cdev
, u8 min_bw
)
2217 int i
, rc
= -EINVAL
;
2219 if (min_bw
< 1 || min_bw
> 100) {
2220 DP_NOTICE(cdev
, "PF min bw valid range is [1-100]\n");
2224 for_each_hwfn(cdev
, i
) {
2225 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
2226 struct qed_hwfn
*p_lead
= QED_LEADING_HWFN(cdev
);
2227 struct qed_mcp_link_state
*p_link
;
2228 struct qed_ptt
*p_ptt
;
2230 p_link
= &p_lead
->mcp_info
->link_output
;
2232 p_ptt
= qed_ptt_acquire(p_hwfn
);
2236 rc
= __qed_configure_pf_min_bandwidth(p_hwfn
, p_ptt
,
2239 qed_ptt_release(p_hwfn
, p_ptt
);
2243 if (p_link
->min_pf_rate
) {
2244 u32 min_rate
= p_link
->min_pf_rate
;
2246 rc
= __qed_configure_vp_wfq_on_link_change(p_hwfn
,
2251 qed_ptt_release(p_hwfn
, p_ptt
);
2257 void qed_clean_wfq_db(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2259 struct qed_mcp_link_state
*p_link
;
2261 p_link
= &p_hwfn
->mcp_info
->link_output
;
2263 if (p_link
->min_pf_rate
)
2264 qed_disable_wfq_for_all_vports(p_hwfn
, p_ptt
,
2265 p_link
->min_pf_rate
);
2267 memset(p_hwfn
->qm_info
.wfq_data
, 0,
2268 sizeof(*p_hwfn
->qm_info
.wfq_data
) * p_hwfn
->qm_info
.num_vports
);