1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <linux/bitops.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/log2.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/bitops.h>
22 #include "qed_dev_api.h"
25 #include "qed_init_ops.h"
26 #include "qed_reg_addr.h"
28 /* Max number of connection types in HW (DQ/CDU etc.) */
29 #define MAX_CONN_TYPES PROTOCOLID_COMMON
30 #define NUM_TASK_TYPES 2
31 #define NUM_TASK_PF_SEGMENTS 4
34 #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
36 /* Doorbell-Queue constants */
37 #define DQ_RANGE_SHIFT 4
38 #define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
41 #define ILT_DEFAULT_HW_P_SIZE 3
42 #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
43 #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
45 /* ILT entry structure */
46 #define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
47 #define ILT_ENTRY_PHY_ADDR_SHIFT 0
48 #define ILT_ENTRY_VALID_MASK 0x1ULL
49 #define ILT_ENTRY_VALID_SHIFT 52
50 #define ILT_ENTRY_IN_REGS 2
51 #define ILT_REG_SIZE_IN_BYTES 4
53 /* connection context union */
55 struct core_conn_context core_ctx
;
56 struct eth_conn_context eth_ctx
;
59 #define CONN_CXT_SIZE(p_hwfn) \
60 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
62 /* PF per protocl configuration object */
63 struct qed_conn_type_cfg
{
68 /* ILT Client configuration, Per connection type (protocol) resources. */
69 #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
83 struct qed_ilt_cli_blk
{
84 u32 total_size
; /* 0 means not active */
85 u32 real_size_in_page
;
89 struct qed_ilt_client_cfg
{
93 struct ilt_cfg_pair first
;
94 struct ilt_cfg_pair last
;
95 struct ilt_cfg_pair p_size
;
97 /* ILT client blocks for PF */
98 struct qed_ilt_cli_blk pf_blks
[ILT_CLI_PF_BLOCKS
];
104 * Protocol acquired CID lists
105 * PF start line in ILT
113 struct qed_cid_acquired_map
{
116 unsigned long *cid_map
;
119 struct qed_cxt_mngr
{
120 /* Per protocl configuration */
121 struct qed_conn_type_cfg conn_cfg
[MAX_CONN_TYPES
];
123 /* computed ILT structure */
124 struct qed_ilt_client_cfg clients
[ILT_CLI_MAX
];
127 struct qed_cid_acquired_map acquired
[MAX_CONN_TYPES
];
129 /* ILT shadow table */
130 struct qed_dma_mem
*ilt_shadow
;
134 static u32
qed_cxt_cdu_iids(struct qed_cxt_mngr
*p_mngr
)
136 u32 type
, pf_cids
= 0;
138 for (type
= 0; type
< MAX_CONN_TYPES
; type
++)
139 pf_cids
+= p_mngr
->conn_cfg
[type
].cid_count
;
144 static void qed_cxt_qm_iids(struct qed_hwfn
*p_hwfn
,
145 struct qed_qm_iids
*iids
)
147 struct qed_cxt_mngr
*p_mngr
= p_hwfn
->p_cxt_mngr
;
150 for (type
= 0; type
< MAX_CONN_TYPES
; type
++)
151 iids
->cids
+= p_mngr
->conn_cfg
[type
].cid_count
;
153 DP_VERBOSE(p_hwfn
, QED_MSG_ILT
, "iids: CIDS %08x\n", iids
->cids
);
156 /* set the iids count per protocol */
157 static void qed_cxt_set_proto_cid_count(struct qed_hwfn
*p_hwfn
,
158 enum protocol_type type
,
161 struct qed_cxt_mngr
*p_mgr
= p_hwfn
->p_cxt_mngr
;
162 struct qed_conn_type_cfg
*p_conn
= &p_mgr
->conn_cfg
[type
];
164 p_conn
->cid_count
= roundup(cid_count
, DQ_RANGE_ALIGN
);
167 static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg
*p_cli
,
168 struct qed_ilt_cli_blk
*p_blk
,
169 u32 start_line
, u32 total_size
,
172 u32 ilt_size
= ILT_PAGE_IN_BYTES(p_cli
->p_size
.val
);
174 /* verify thatits called only once for each block */
175 if (p_blk
->total_size
)
178 p_blk
->total_size
= total_size
;
179 p_blk
->real_size_in_page
= 0;
181 p_blk
->real_size_in_page
= (ilt_size
/ elem_size
) * elem_size
;
182 p_blk
->start_line
= start_line
;
185 static void qed_ilt_cli_adv_line(struct qed_hwfn
*p_hwfn
,
186 struct qed_ilt_client_cfg
*p_cli
,
187 struct qed_ilt_cli_blk
*p_blk
,
188 u32
*p_line
, enum ilt_clients client_id
)
190 if (!p_blk
->total_size
)
194 p_cli
->first
.val
= *p_line
;
196 p_cli
->active
= true;
197 *p_line
+= DIV_ROUND_UP(p_blk
->total_size
,
198 p_blk
->real_size_in_page
);
199 p_cli
->last
.val
= *p_line
- 1;
201 DP_VERBOSE(p_hwfn
, QED_MSG_ILT
,
202 "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
203 client_id
, p_cli
->first
.val
,
204 p_cli
->last
.val
, p_blk
->total_size
,
205 p_blk
->real_size_in_page
, p_blk
->start_line
);
208 int qed_cxt_cfg_ilt_compute(struct qed_hwfn
*p_hwfn
)
210 struct qed_cxt_mngr
*p_mngr
= p_hwfn
->p_cxt_mngr
;
211 struct qed_ilt_client_cfg
*p_cli
;
212 struct qed_ilt_cli_blk
*p_blk
;
213 u32 curr_line
, total
, pf_cids
;
214 struct qed_qm_iids qm_iids
;
216 memset(&qm_iids
, 0, sizeof(qm_iids
));
218 p_mngr
->pf_start_line
= RESC_START(p_hwfn
, QED_ILT
);
220 DP_VERBOSE(p_hwfn
, QED_MSG_ILT
,
221 "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
222 p_hwfn
->my_id
, p_hwfn
->p_cxt_mngr
->pf_start_line
);
225 p_cli
= &p_mngr
->clients
[ILT_CLI_CDUC
];
226 curr_line
= p_mngr
->pf_start_line
;
227 p_cli
->pf_total_lines
= 0;
229 /* get the counters for the CDUC and QM clients */
230 pf_cids
= qed_cxt_cdu_iids(p_mngr
);
232 p_blk
= &p_cli
->pf_blks
[CDUC_BLK
];
234 total
= pf_cids
* CONN_CXT_SIZE(p_hwfn
);
236 qed_ilt_cli_blk_fill(p_cli
, p_blk
, curr_line
,
237 total
, CONN_CXT_SIZE(p_hwfn
));
239 qed_ilt_cli_adv_line(p_hwfn
, p_cli
, p_blk
, &curr_line
, ILT_CLI_CDUC
);
240 p_cli
->pf_total_lines
= curr_line
- p_blk
->start_line
;
243 p_cli
= &p_mngr
->clients
[ILT_CLI_QM
];
244 p_blk
= &p_cli
->pf_blks
[0];
246 qed_cxt_qm_iids(p_hwfn
, &qm_iids
);
247 total
= qed_qm_pf_mem_size(p_hwfn
->rel_pf_id
, qm_iids
.cids
, 0, 0,
248 p_hwfn
->qm_info
.num_pqs
, 0);
250 DP_VERBOSE(p_hwfn
, QED_MSG_ILT
,
251 "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
252 qm_iids
.cids
, p_hwfn
->qm_info
.num_pqs
, total
);
254 qed_ilt_cli_blk_fill(p_cli
, p_blk
,
255 curr_line
, total
* 0x1000,
258 qed_ilt_cli_adv_line(p_hwfn
, p_cli
, p_blk
, &curr_line
, ILT_CLI_QM
);
259 p_cli
->pf_total_lines
= curr_line
- p_blk
->start_line
;
261 if (curr_line
- p_hwfn
->p_cxt_mngr
->pf_start_line
>
262 RESC_NUM(p_hwfn
, QED_ILT
)) {
263 DP_ERR(p_hwfn
, "too many ilt lines...#lines=%d\n",
264 curr_line
- p_hwfn
->p_cxt_mngr
->pf_start_line
);
271 #define for_each_ilt_valid_client(pos, clients) \
272 for (pos = 0; pos < ILT_CLI_MAX; pos++)
274 /* Total number of ILT lines used by this PF */
275 static u32
qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg
*ilt_clients
)
280 for_each_ilt_valid_client(i
, ilt_clients
) {
281 if (!ilt_clients
[i
].active
)
283 size
+= (ilt_clients
[i
].last
.val
-
284 ilt_clients
[i
].first
.val
+ 1);
290 static void qed_ilt_shadow_free(struct qed_hwfn
*p_hwfn
)
292 struct qed_ilt_client_cfg
*p_cli
= p_hwfn
->p_cxt_mngr
->clients
;
293 struct qed_cxt_mngr
*p_mngr
= p_hwfn
->p_cxt_mngr
;
296 ilt_size
= qed_cxt_ilt_shadow_size(p_cli
);
298 for (i
= 0; p_mngr
->ilt_shadow
&& i
< ilt_size
; i
++) {
299 struct qed_dma_mem
*p_dma
= &p_mngr
->ilt_shadow
[i
];
302 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
303 p_dma
->size
, p_dma
->p_virt
,
305 p_dma
->p_virt
= NULL
;
307 kfree(p_mngr
->ilt_shadow
);
310 static int qed_ilt_blk_alloc(struct qed_hwfn
*p_hwfn
,
311 struct qed_ilt_cli_blk
*p_blk
,
312 enum ilt_clients ilt_client
,
313 u32 start_line_offset
)
315 struct qed_dma_mem
*ilt_shadow
= p_hwfn
->p_cxt_mngr
->ilt_shadow
;
316 u32 lines
, line
, sz_left
;
318 if (!p_blk
->total_size
)
321 sz_left
= p_blk
->total_size
;
322 lines
= DIV_ROUND_UP(sz_left
, p_blk
->real_size_in_page
);
323 line
= p_blk
->start_line
+ start_line_offset
-
324 p_hwfn
->p_cxt_mngr
->pf_start_line
;
326 for (; lines
; lines
--) {
331 size
= min_t(u32
, sz_left
,
332 p_blk
->real_size_in_page
);
333 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
339 memset(p_virt
, 0, size
);
341 ilt_shadow
[line
].p_phys
= p_phys
;
342 ilt_shadow
[line
].p_virt
= p_virt
;
343 ilt_shadow
[line
].size
= size
;
345 DP_VERBOSE(p_hwfn
, QED_MSG_ILT
,
346 "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
347 line
, (u64
)p_phys
, p_virt
, size
);
356 static int qed_ilt_shadow_alloc(struct qed_hwfn
*p_hwfn
)
358 struct qed_cxt_mngr
*p_mngr
= p_hwfn
->p_cxt_mngr
;
359 struct qed_ilt_client_cfg
*clients
= p_mngr
->clients
;
360 struct qed_ilt_cli_blk
*p_blk
;
364 size
= qed_cxt_ilt_shadow_size(clients
);
365 p_mngr
->ilt_shadow
= kcalloc(size
, sizeof(struct qed_dma_mem
),
367 if (!p_mngr
->ilt_shadow
) {
368 DP_NOTICE(p_hwfn
, "Failed to allocate ilt shadow table\n");
370 goto ilt_shadow_fail
;
373 DP_VERBOSE(p_hwfn
, QED_MSG_ILT
,
374 "Allocated 0x%x bytes for ilt shadow\n",
375 (u32
)(size
* sizeof(struct qed_dma_mem
)));
377 for_each_ilt_valid_client(i
, clients
) {
378 if (!clients
[i
].active
)
380 for (j
= 0; j
< ILT_CLI_PF_BLOCKS
; j
++) {
381 p_blk
= &clients
[i
].pf_blks
[j
];
382 rc
= qed_ilt_blk_alloc(p_hwfn
, p_blk
, i
, 0);
384 goto ilt_shadow_fail
;
391 qed_ilt_shadow_free(p_hwfn
);
395 static void qed_cid_map_free(struct qed_hwfn
*p_hwfn
)
397 struct qed_cxt_mngr
*p_mngr
= p_hwfn
->p_cxt_mngr
;
400 for (type
= 0; type
< MAX_CONN_TYPES
; type
++) {
401 kfree(p_mngr
->acquired
[type
].cid_map
);
402 p_mngr
->acquired
[type
].max_count
= 0;
403 p_mngr
->acquired
[type
].start_cid
= 0;
407 static int qed_cid_map_alloc(struct qed_hwfn
*p_hwfn
)
409 struct qed_cxt_mngr
*p_mngr
= p_hwfn
->p_cxt_mngr
;
413 for (type
= 0; type
< MAX_CONN_TYPES
; type
++) {
414 u32 cid_cnt
= p_hwfn
->p_cxt_mngr
->conn_cfg
[type
].cid_count
;
420 size
= DIV_ROUND_UP(cid_cnt
,
421 sizeof(unsigned long) * BITS_PER_BYTE
) *
422 sizeof(unsigned long);
423 p_mngr
->acquired
[type
].cid_map
= kzalloc(size
, GFP_KERNEL
);
424 if (!p_mngr
->acquired
[type
].cid_map
)
427 p_mngr
->acquired
[type
].max_count
= cid_cnt
;
428 p_mngr
->acquired
[type
].start_cid
= start_cid
;
430 p_hwfn
->p_cxt_mngr
->conn_cfg
[type
].cid_start
= start_cid
;
432 DP_VERBOSE(p_hwfn
, QED_MSG_CXT
,
433 "Type %08x start: %08x count %08x\n",
434 type
, p_mngr
->acquired
[type
].start_cid
,
435 p_mngr
->acquired
[type
].max_count
);
436 start_cid
+= cid_cnt
;
442 qed_cid_map_free(p_hwfn
);
446 int qed_cxt_mngr_alloc(struct qed_hwfn
*p_hwfn
)
448 struct qed_cxt_mngr
*p_mngr
;
451 p_mngr
= kzalloc(sizeof(*p_mngr
), GFP_ATOMIC
);
453 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_cxt_mngr'\n");
457 /* Initialize ILT client registers */
458 p_mngr
->clients
[ILT_CLI_CDUC
].first
.reg
= ILT_CFG_REG(CDUC
, FIRST_ILT
);
459 p_mngr
->clients
[ILT_CLI_CDUC
].last
.reg
= ILT_CFG_REG(CDUC
, LAST_ILT
);
460 p_mngr
->clients
[ILT_CLI_CDUC
].p_size
.reg
= ILT_CFG_REG(CDUC
, P_SIZE
);
462 p_mngr
->clients
[ILT_CLI_QM
].first
.reg
= ILT_CFG_REG(QM
, FIRST_ILT
);
463 p_mngr
->clients
[ILT_CLI_QM
].last
.reg
= ILT_CFG_REG(QM
, LAST_ILT
);
464 p_mngr
->clients
[ILT_CLI_QM
].p_size
.reg
= ILT_CFG_REG(QM
, P_SIZE
);
466 /* default ILT page size for all clients is 32K */
467 for (i
= 0; i
< ILT_CLI_MAX
; i
++)
468 p_mngr
->clients
[i
].p_size
.val
= ILT_DEFAULT_HW_P_SIZE
;
470 /* Set the cxt mangr pointer priori to further allocations */
471 p_hwfn
->p_cxt_mngr
= p_mngr
;
476 int qed_cxt_tables_alloc(struct qed_hwfn
*p_hwfn
)
480 /* Allocate the ILT shadow table */
481 rc
= qed_ilt_shadow_alloc(p_hwfn
);
483 DP_NOTICE(p_hwfn
, "Failed to allocate ilt memory\n");
484 goto tables_alloc_fail
;
487 /* Allocate and initialize the acquired cids bitmaps */
488 rc
= qed_cid_map_alloc(p_hwfn
);
490 DP_NOTICE(p_hwfn
, "Failed to allocate cid maps\n");
491 goto tables_alloc_fail
;
497 qed_cxt_mngr_free(p_hwfn
);
501 void qed_cxt_mngr_free(struct qed_hwfn
*p_hwfn
)
503 if (!p_hwfn
->p_cxt_mngr
)
506 qed_cid_map_free(p_hwfn
);
507 qed_ilt_shadow_free(p_hwfn
);
508 kfree(p_hwfn
->p_cxt_mngr
);
510 p_hwfn
->p_cxt_mngr
= NULL
;
513 void qed_cxt_mngr_setup(struct qed_hwfn
*p_hwfn
)
515 struct qed_cxt_mngr
*p_mngr
= p_hwfn
->p_cxt_mngr
;
518 /* Reset acquired cids */
519 for (type
= 0; type
< MAX_CONN_TYPES
; type
++) {
520 u32 cid_cnt
= p_hwfn
->p_cxt_mngr
->conn_cfg
[type
].cid_count
;
525 memset(p_mngr
->acquired
[type
].cid_map
, 0,
526 DIV_ROUND_UP(cid_cnt
,
527 sizeof(unsigned long) * BITS_PER_BYTE
) *
528 sizeof(unsigned long));
533 #define CDUC_CXT_SIZE_SHIFT \
534 CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
536 #define CDUC_CXT_SIZE_MASK \
537 (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
539 #define CDUC_BLOCK_WASTE_SHIFT \
540 CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
542 #define CDUC_BLOCK_WASTE_MASK \
543 (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
545 #define CDUC_NCIB_SHIFT \
546 CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
548 #define CDUC_NCIB_MASK \
549 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
551 static void qed_cdu_init_common(struct qed_hwfn
*p_hwfn
)
553 u32 page_sz
, elems_per_page
, block_waste
, cxt_size
, cdu_params
= 0;
555 /* CDUC - connection configuration */
556 page_sz
= p_hwfn
->p_cxt_mngr
->clients
[ILT_CLI_CDUC
].p_size
.val
;
557 cxt_size
= CONN_CXT_SIZE(p_hwfn
);
558 elems_per_page
= ILT_PAGE_IN_BYTES(page_sz
) / cxt_size
;
559 block_waste
= ILT_PAGE_IN_BYTES(page_sz
) - elems_per_page
* cxt_size
;
561 SET_FIELD(cdu_params
, CDUC_CXT_SIZE
, cxt_size
);
562 SET_FIELD(cdu_params
, CDUC_BLOCK_WASTE
, block_waste
);
563 SET_FIELD(cdu_params
, CDUC_NCIB
, elems_per_page
);
564 STORE_RT_REG(p_hwfn
, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET
, cdu_params
);
567 void qed_qm_init_pf(struct qed_hwfn
*p_hwfn
)
569 struct qed_qm_pf_rt_init_params params
;
570 struct qed_qm_info
*qm_info
= &p_hwfn
->qm_info
;
571 struct qed_qm_iids iids
;
573 memset(&iids
, 0, sizeof(iids
));
574 qed_cxt_qm_iids(p_hwfn
, &iids
);
576 memset(¶ms
, 0, sizeof(params
));
577 params
.port_id
= p_hwfn
->port_id
;
578 params
.pf_id
= p_hwfn
->rel_pf_id
;
579 params
.max_phys_tcs_per_port
= qm_info
->max_phys_tcs_per_port
;
580 params
.is_first_pf
= p_hwfn
->first_on_engine
;
581 params
.num_pf_cids
= iids
.cids
;
582 params
.start_pq
= qm_info
->start_pq
;
583 params
.num_pf_pqs
= qm_info
->num_pqs
;
584 params
.start_vport
= qm_info
->num_vports
;
585 params
.pf_wfq
= qm_info
->pf_wfq
;
586 params
.pf_rl
= qm_info
->pf_rl
;
587 params
.pq_params
= qm_info
->qm_pq_params
;
588 params
.vport_params
= qm_info
->qm_vport_params
;
590 qed_qm_pf_rt_init(p_hwfn
, p_hwfn
->p_main_ptt
, ¶ms
);
594 static int qed_cm_init_pf(struct qed_hwfn
*p_hwfn
)
596 union qed_qm_pq_params pq_params
;
599 /* XCM pure-LB queue */
600 memset(&pq_params
, 0, sizeof(pq_params
));
601 pq_params
.core
.tc
= LB_TC
;
602 pq
= qed_get_qm_pq(p_hwfn
, PROTOCOLID_CORE
, &pq_params
);
603 STORE_RT_REG(p_hwfn
, XCM_REG_CON_PHY_Q3_RT_OFFSET
, pq
);
609 static void qed_dq_init_pf(struct qed_hwfn
*p_hwfn
)
611 struct qed_cxt_mngr
*p_mngr
= p_hwfn
->p_cxt_mngr
;
612 u32 dq_pf_max_cid
= 0;
614 dq_pf_max_cid
+= (p_mngr
->conn_cfg
[0].cid_count
>> DQ_RANGE_SHIFT
);
615 STORE_RT_REG(p_hwfn
, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET
, dq_pf_max_cid
);
617 dq_pf_max_cid
+= (p_mngr
->conn_cfg
[1].cid_count
>> DQ_RANGE_SHIFT
);
618 STORE_RT_REG(p_hwfn
, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET
, dq_pf_max_cid
);
620 dq_pf_max_cid
+= (p_mngr
->conn_cfg
[2].cid_count
>> DQ_RANGE_SHIFT
);
621 STORE_RT_REG(p_hwfn
, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET
, dq_pf_max_cid
);
623 dq_pf_max_cid
+= (p_mngr
->conn_cfg
[3].cid_count
>> DQ_RANGE_SHIFT
);
624 STORE_RT_REG(p_hwfn
, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET
, dq_pf_max_cid
);
626 dq_pf_max_cid
+= (p_mngr
->conn_cfg
[4].cid_count
>> DQ_RANGE_SHIFT
);
627 STORE_RT_REG(p_hwfn
, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET
, dq_pf_max_cid
);
630 dq_pf_max_cid
+= (p_mngr
->conn_cfg
[5].cid_count
>> DQ_RANGE_SHIFT
);
631 STORE_RT_REG(p_hwfn
, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET
, dq_pf_max_cid
);
634 static void qed_ilt_bounds_init(struct qed_hwfn
*p_hwfn
)
636 struct qed_ilt_client_cfg
*ilt_clients
;
639 ilt_clients
= p_hwfn
->p_cxt_mngr
->clients
;
640 for_each_ilt_valid_client(i
, ilt_clients
) {
641 if (!ilt_clients
[i
].active
)
644 ilt_clients
[i
].first
.reg
,
645 ilt_clients
[i
].first
.val
);
647 ilt_clients
[i
].last
.reg
,
648 ilt_clients
[i
].last
.val
);
650 ilt_clients
[i
].p_size
.reg
,
651 ilt_clients
[i
].p_size
.val
);
655 /* ILT (PSWRQ2) PF */
656 static void qed_ilt_init_pf(struct qed_hwfn
*p_hwfn
)
658 struct qed_ilt_client_cfg
*clients
;
659 struct qed_cxt_mngr
*p_mngr
;
660 struct qed_dma_mem
*p_shdw
;
661 u32 line
, rt_offst
, i
;
663 qed_ilt_bounds_init(p_hwfn
);
665 p_mngr
= p_hwfn
->p_cxt_mngr
;
666 p_shdw
= p_mngr
->ilt_shadow
;
667 clients
= p_hwfn
->p_cxt_mngr
->clients
;
669 for_each_ilt_valid_client(i
, clients
) {
670 if (!clients
[i
].active
)
673 /** Client's 1st val and RT array are absolute, ILT shadows'
674 * lines are relative.
676 line
= clients
[i
].first
.val
- p_mngr
->pf_start_line
;
677 rt_offst
= PSWRQ2_REG_ILT_MEMORY_RT_OFFSET
+
678 clients
[i
].first
.val
* ILT_ENTRY_IN_REGS
;
680 for (; line
<= clients
[i
].last
.val
- p_mngr
->pf_start_line
;
681 line
++, rt_offst
+= ILT_ENTRY_IN_REGS
) {
682 u64 ilt_hw_entry
= 0;
684 /** p_virt could be NULL incase of dynamic
687 if (p_shdw
[line
].p_virt
) {
688 SET_FIELD(ilt_hw_entry
, ILT_ENTRY_VALID
, 1ULL);
689 SET_FIELD(ilt_hw_entry
, ILT_ENTRY_PHY_ADDR
,
690 (p_shdw
[line
].p_phys
>> 12));
692 DP_VERBOSE(p_hwfn
, QED_MSG_ILT
,
693 "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
695 (u64
)(p_shdw
[line
].p_phys
>> 12));
698 STORE_RT_REG_AGG(p_hwfn
, rt_offst
, ilt_hw_entry
);
703 void qed_cxt_hw_init_common(struct qed_hwfn
*p_hwfn
)
705 qed_cdu_init_common(p_hwfn
);
708 void qed_cxt_hw_init_pf(struct qed_hwfn
*p_hwfn
)
710 qed_qm_init_pf(p_hwfn
);
711 qed_cm_init_pf(p_hwfn
);
712 qed_dq_init_pf(p_hwfn
);
713 qed_ilt_init_pf(p_hwfn
);
716 int qed_cxt_acquire_cid(struct qed_hwfn
*p_hwfn
,
717 enum protocol_type type
,
720 struct qed_cxt_mngr
*p_mngr
= p_hwfn
->p_cxt_mngr
;
723 if (type
>= MAX_CONN_TYPES
|| !p_mngr
->acquired
[type
].cid_map
) {
724 DP_NOTICE(p_hwfn
, "Invalid protocol type %d", type
);
728 rel_cid
= find_first_zero_bit(p_mngr
->acquired
[type
].cid_map
,
729 p_mngr
->acquired
[type
].max_count
);
731 if (rel_cid
>= p_mngr
->acquired
[type
].max_count
) {
732 DP_NOTICE(p_hwfn
, "no CID available for protocol %d\n",
737 __set_bit(rel_cid
, p_mngr
->acquired
[type
].cid_map
);
739 *p_cid
= rel_cid
+ p_mngr
->acquired
[type
].start_cid
;
744 static bool qed_cxt_test_cid_acquired(struct qed_hwfn
*p_hwfn
,
746 enum protocol_type
*p_type
)
748 struct qed_cxt_mngr
*p_mngr
= p_hwfn
->p_cxt_mngr
;
749 struct qed_cid_acquired_map
*p_map
;
750 enum protocol_type p
;
753 /* Iterate over protocols and find matching cid range */
754 for (p
= 0; p
< MAX_CONN_TYPES
; p
++) {
755 p_map
= &p_mngr
->acquired
[p
];
759 if (cid
>= p_map
->start_cid
&&
760 cid
< p_map
->start_cid
+ p_map
->max_count
)
765 if (p
== MAX_CONN_TYPES
) {
766 DP_NOTICE(p_hwfn
, "Invalid CID %d", cid
);
770 rel_cid
= cid
- p_map
->start_cid
;
771 if (!test_bit(rel_cid
, p_map
->cid_map
)) {
772 DP_NOTICE(p_hwfn
, "CID %d not acquired", cid
);
778 void qed_cxt_release_cid(struct qed_hwfn
*p_hwfn
,
781 struct qed_cxt_mngr
*p_mngr
= p_hwfn
->p_cxt_mngr
;
782 enum protocol_type type
;
786 /* Test acquired and find matching per-protocol map */
787 b_acquired
= qed_cxt_test_cid_acquired(p_hwfn
, cid
, &type
);
792 rel_cid
= cid
- p_mngr
->acquired
[type
].start_cid
;
793 __clear_bit(rel_cid
, p_mngr
->acquired
[type
].cid_map
);
796 int qed_cxt_get_cid_info(struct qed_hwfn
*p_hwfn
,
797 struct qed_cxt_info
*p_info
)
799 struct qed_cxt_mngr
*p_mngr
= p_hwfn
->p_cxt_mngr
;
800 u32 conn_cxt_size
, hw_p_size
, cxts_per_p
, line
;
801 enum protocol_type type
;
804 /* Test acquired and find matching per-protocol map */
805 b_acquired
= qed_cxt_test_cid_acquired(p_hwfn
, p_info
->iid
, &type
);
810 /* set the protocl type */
813 /* compute context virtual pointer */
814 hw_p_size
= p_hwfn
->p_cxt_mngr
->clients
[ILT_CLI_CDUC
].p_size
.val
;
816 conn_cxt_size
= CONN_CXT_SIZE(p_hwfn
);
817 cxts_per_p
= ILT_PAGE_IN_BYTES(hw_p_size
) / conn_cxt_size
;
818 line
= p_info
->iid
/ cxts_per_p
;
820 /* Make sure context is allocated (dynamic allocation) */
821 if (!p_mngr
->ilt_shadow
[line
].p_virt
)
824 p_info
->p_cxt
= p_mngr
->ilt_shadow
[line
].p_virt
+
825 p_info
->iid
% cxts_per_p
* conn_cxt_size
;
827 DP_VERBOSE(p_hwfn
, (QED_MSG_ILT
| QED_MSG_CXT
),
828 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
829 p_info
->iid
/ cxts_per_p
, p_info
->p_cxt
, p_info
->iid
);
834 int qed_cxt_set_pf_params(struct qed_hwfn
*p_hwfn
)
836 struct qed_eth_pf_params
*p_params
= &p_hwfn
->pf_params
.eth_pf_params
;
838 /* Set the number of required CORE connections */
839 u32 core_cids
= 1; /* SPQ */
841 qed_cxt_set_proto_cid_count(p_hwfn
, PROTOCOLID_CORE
, core_cids
);
843 qed_cxt_set_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
,