1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
12 #include <linux/types.h>
13 #include <asm/byteorder.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/slab.h>
17 #include <linux/qed/common_hsi.h>
20 /* Each Page contains a next pointer at its end */
21 QED_CHAIN_MODE_NEXT_PTR
,
23 /* Chain is a single page (next ptr) is unrequired */
24 QED_CHAIN_MODE_SINGLE
,
26 /* Page pointers are located in a side list */
30 enum qed_chain_use_mode
{
31 QED_CHAIN_USE_TO_PRODUCE
, /* Chain starts empty */
32 QED_CHAIN_USE_TO_CONSUME
, /* Chain starts full */
33 QED_CHAIN_USE_TO_CONSUME_PRODUCE
, /* Chain starts empty */
36 enum qed_chain_cnt_type
{
37 /* The chain's size/prod/cons are kept in 16-bit variables */
38 QED_CHAIN_CNT_TYPE_U16
,
40 /* The chain's size/prod/cons are kept in 32-bit variables */
41 QED_CHAIN_CNT_TYPE_U32
,
44 struct qed_chain_next
{
45 struct regpair next_phys
;
49 struct qed_chain_pbl_u16
{
54 struct qed_chain_pbl_u32
{
59 struct qed_chain_pbl
{
60 /* Base address of a pre-allocated buffer for pbl */
61 dma_addr_t p_phys_table
;
64 /* Table for keeping the virtual addresses of the chain pages,
65 * respectively to the physical addresses in the pbl table.
67 void **pp_virt_addr_tbl
;
69 /* Index to current used page by producer/consumer */
71 struct qed_chain_pbl_u16 pbl16
;
72 struct qed_chain_pbl_u32 pbl32
;
76 struct qed_chain_u16
{
77 /* Cyclic index of next element to produce/consme */
82 struct qed_chain_u32
{
83 /* Cyclic index of next element to produce/consme */
90 dma_addr_t p_phys_addr
;
94 enum qed_chain_mode mode
;
95 enum qed_chain_use_mode intended_use
; /* used to produce/consume */
96 enum qed_chain_cnt_type cnt_type
;
99 struct qed_chain_u16 chain16
;
100 struct qed_chain_u32 chain32
;
105 /* Number of elements - capacity is for usable elements only,
106 * while size will contain total number of elements [for entire chain].
111 /* Elements information for fast calculations */
113 u16 elem_per_page_mask
;
118 struct qed_chain_pbl pbl
;
121 #define QED_CHAIN_PBL_ENTRY_SIZE (8)
122 #define QED_CHAIN_PAGE_SIZE (0x1000)
123 #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
125 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
126 ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \
127 (1 + ((sizeof(struct qed_chain_next) - 1) / \
130 #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
131 ((u32)(ELEMS_PER_PAGE(elem_size) - \
132 UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
134 #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
135 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
137 #define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
138 #define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
141 static inline u16
qed_chain_get_prod_idx(struct qed_chain
*p_chain
)
143 return p_chain
->u
.chain16
.prod_idx
;
146 static inline u16
qed_chain_get_cons_idx(struct qed_chain
*p_chain
)
148 return p_chain
->u
.chain16
.cons_idx
;
151 static inline u32
qed_chain_get_cons_idx_u32(struct qed_chain
*p_chain
)
153 return p_chain
->u
.chain32
.cons_idx
;
156 static inline u16
qed_chain_get_elem_left(struct qed_chain
*p_chain
)
160 used
= (u16
) (((u32
)0x10000 +
161 (u32
)p_chain
->u
.chain16
.prod_idx
) -
162 (u32
)p_chain
->u
.chain16
.cons_idx
);
163 if (p_chain
->mode
== QED_CHAIN_MODE_NEXT_PTR
)
164 used
-= p_chain
->u
.chain16
.prod_idx
/ p_chain
->elem_per_page
-
165 p_chain
->u
.chain16
.cons_idx
/ p_chain
->elem_per_page
;
167 return (u16
)(p_chain
->capacity
- used
);
170 static inline u32
qed_chain_get_elem_left_u32(struct qed_chain
*p_chain
)
174 used
= (u32
) (((u64
)0x100000000ULL
+
175 (u64
)p_chain
->u
.chain32
.prod_idx
) -
176 (u64
)p_chain
->u
.chain32
.cons_idx
);
177 if (p_chain
->mode
== QED_CHAIN_MODE_NEXT_PTR
)
178 used
-= p_chain
->u
.chain32
.prod_idx
/ p_chain
->elem_per_page
-
179 p_chain
->u
.chain32
.cons_idx
/ p_chain
->elem_per_page
;
181 return p_chain
->capacity
- used
;
184 static inline u16
qed_chain_get_usable_per_page(struct qed_chain
*p_chain
)
186 return p_chain
->usable_per_page
;
189 static inline u16
qed_chain_get_unusable_per_page(struct qed_chain
*p_chain
)
191 return p_chain
->elem_unusable
;
194 static inline u32
qed_chain_get_page_cnt(struct qed_chain
*p_chain
)
196 return p_chain
->page_cnt
;
199 static inline dma_addr_t
qed_chain_get_pbl_phys(struct qed_chain
*p_chain
)
201 return p_chain
->pbl
.p_phys_table
;
205 * @brief qed_chain_advance_page -
207 * Advance the next element accros pages for a linked chain
215 qed_chain_advance_page(struct qed_chain
*p_chain
,
216 void **p_next_elem
, void *idx_to_inc
, void *page_to_inc
)
219 struct qed_chain_next
*p_next
= NULL
;
221 switch (p_chain
->mode
) {
222 case QED_CHAIN_MODE_NEXT_PTR
:
223 p_next
= *p_next_elem
;
224 *p_next_elem
= p_next
->next_virt
;
225 if (is_chain_u16(p_chain
))
226 *(u16
*)idx_to_inc
+= p_chain
->elem_unusable
;
228 *(u32
*)idx_to_inc
+= p_chain
->elem_unusable
;
230 case QED_CHAIN_MODE_SINGLE
:
231 *p_next_elem
= p_chain
->p_virt_addr
;
234 case QED_CHAIN_MODE_PBL
:
235 if (is_chain_u16(p_chain
)) {
236 if (++(*(u16
*)page_to_inc
) == p_chain
->page_cnt
)
237 *(u16
*)page_to_inc
= 0;
238 page_index
= *(u16
*)page_to_inc
;
240 if (++(*(u32
*)page_to_inc
) == p_chain
->page_cnt
)
241 *(u32
*)page_to_inc
= 0;
242 page_index
= *(u32
*)page_to_inc
;
244 *p_next_elem
= p_chain
->pbl
.pp_virt_addr_tbl
[page_index
];
248 #define is_unusable_idx(p, idx) \
249 (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
251 #define is_unusable_idx_u32(p, idx) \
252 (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
253 #define is_unusable_next_idx(p, idx) \
254 ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
255 (p)->usable_per_page)
257 #define is_unusable_next_idx_u32(p, idx) \
258 ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
259 (p)->usable_per_page)
261 #define test_and_skip(p, idx) \
263 if (is_chain_u16(p)) { \
264 if (is_unusable_idx(p, idx)) \
265 (p)->u.chain16.idx += (p)->elem_unusable; \
267 if (is_unusable_idx_u32(p, idx)) \
268 (p)->u.chain32.idx += (p)->elem_unusable; \
273 * @brief qed_chain_return_produced -
275 * A chain in which the driver "Produces" elements should use this API
276 * to indicate previous produced elements are now consumed.
280 static inline void qed_chain_return_produced(struct qed_chain
*p_chain
)
282 if (is_chain_u16(p_chain
))
283 p_chain
->u
.chain16
.cons_idx
++;
285 p_chain
->u
.chain32
.cons_idx
++;
286 test_and_skip(p_chain
, cons_idx
);
290 * @brief qed_chain_produce -
292 * A chain in which the driver "Produces" elements should use this to get
293 * a pointer to the next element which can be "Produced". It's driver
294 * responsibility to validate that the chain has room for new element.
298 * @return void*, a pointer to next element
300 static inline void *qed_chain_produce(struct qed_chain
*p_chain
)
302 void *p_ret
= NULL
, *p_prod_idx
, *p_prod_page_idx
;
304 if (is_chain_u16(p_chain
)) {
305 if ((p_chain
->u
.chain16
.prod_idx
&
306 p_chain
->elem_per_page_mask
) == p_chain
->next_page_mask
) {
307 p_prod_idx
= &p_chain
->u
.chain16
.prod_idx
;
308 p_prod_page_idx
= &p_chain
->pbl
.u
.pbl16
.prod_page_idx
;
309 qed_chain_advance_page(p_chain
, &p_chain
->p_prod_elem
,
310 p_prod_idx
, p_prod_page_idx
);
312 p_chain
->u
.chain16
.prod_idx
++;
314 if ((p_chain
->u
.chain32
.prod_idx
&
315 p_chain
->elem_per_page_mask
) == p_chain
->next_page_mask
) {
316 p_prod_idx
= &p_chain
->u
.chain32
.prod_idx
;
317 p_prod_page_idx
= &p_chain
->pbl
.u
.pbl32
.prod_page_idx
;
318 qed_chain_advance_page(p_chain
, &p_chain
->p_prod_elem
,
319 p_prod_idx
, p_prod_page_idx
);
321 p_chain
->u
.chain32
.prod_idx
++;
324 p_ret
= p_chain
->p_prod_elem
;
325 p_chain
->p_prod_elem
= (void *)(((u8
*)p_chain
->p_prod_elem
) +
332 * @brief qed_chain_get_capacity -
334 * Get the maximum number of BDs in chain
339 * @return number of unusable BDs
341 static inline u32
qed_chain_get_capacity(struct qed_chain
*p_chain
)
343 return p_chain
->capacity
;
347 * @brief qed_chain_recycle_consumed -
349 * Returns an element which was previously consumed;
350 * Increments producers so they could be written to FW.
354 static inline void qed_chain_recycle_consumed(struct qed_chain
*p_chain
)
356 test_and_skip(p_chain
, prod_idx
);
357 if (is_chain_u16(p_chain
))
358 p_chain
->u
.chain16
.prod_idx
++;
360 p_chain
->u
.chain32
.prod_idx
++;
364 * @brief qed_chain_consume -
366 * A Chain in which the driver utilizes data written by a different source
367 * (i.e., FW) should use this to access passed buffers.
371 * @return void*, a pointer to the next buffer written
373 static inline void *qed_chain_consume(struct qed_chain
*p_chain
)
375 void *p_ret
= NULL
, *p_cons_idx
, *p_cons_page_idx
;
377 if (is_chain_u16(p_chain
)) {
378 if ((p_chain
->u
.chain16
.cons_idx
&
379 p_chain
->elem_per_page_mask
) == p_chain
->next_page_mask
) {
380 p_cons_idx
= &p_chain
->u
.chain16
.cons_idx
;
381 p_cons_page_idx
= &p_chain
->pbl
.u
.pbl16
.cons_page_idx
;
382 qed_chain_advance_page(p_chain
, &p_chain
->p_cons_elem
,
383 p_cons_idx
, p_cons_page_idx
);
385 p_chain
->u
.chain16
.cons_idx
++;
387 if ((p_chain
->u
.chain32
.cons_idx
&
388 p_chain
->elem_per_page_mask
) == p_chain
->next_page_mask
) {
389 p_cons_idx
= &p_chain
->u
.chain32
.cons_idx
;
390 p_cons_page_idx
= &p_chain
->pbl
.u
.pbl32
.cons_page_idx
;
391 qed_chain_advance_page(p_chain
, &p_chain
->p_cons_elem
,
392 p_cons_idx
, p_cons_page_idx
);
394 p_chain
->u
.chain32
.cons_idx
++;
397 p_ret
= p_chain
->p_cons_elem
;
398 p_chain
->p_cons_elem
= (void *)(((u8
*)p_chain
->p_cons_elem
) +
405 * @brief qed_chain_reset - Resets the chain to its start state
407 * @param p_chain pointer to a previously allocted chain
409 static inline void qed_chain_reset(struct qed_chain
*p_chain
)
413 if (is_chain_u16(p_chain
)) {
414 p_chain
->u
.chain16
.prod_idx
= 0;
415 p_chain
->u
.chain16
.cons_idx
= 0;
417 p_chain
->u
.chain32
.prod_idx
= 0;
418 p_chain
->u
.chain32
.cons_idx
= 0;
420 p_chain
->p_cons_elem
= p_chain
->p_virt_addr
;
421 p_chain
->p_prod_elem
= p_chain
->p_virt_addr
;
423 if (p_chain
->mode
== QED_CHAIN_MODE_PBL
) {
424 /* Use (page_cnt - 1) as a reset value for the prod/cons page's
425 * indices, to avoid unnecessary page advancing on the first
426 * call to qed_chain_produce/consume. Instead, the indices
427 * will be advanced to page_cnt and then will be wrapped to 0.
429 u32 reset_val
= p_chain
->page_cnt
- 1;
431 if (is_chain_u16(p_chain
)) {
432 p_chain
->pbl
.u
.pbl16
.prod_page_idx
= (u16
)reset_val
;
433 p_chain
->pbl
.u
.pbl16
.cons_page_idx
= (u16
)reset_val
;
435 p_chain
->pbl
.u
.pbl32
.prod_page_idx
= reset_val
;
436 p_chain
->pbl
.u
.pbl32
.cons_page_idx
= reset_val
;
440 switch (p_chain
->intended_use
) {
441 case QED_CHAIN_USE_TO_CONSUME_PRODUCE
:
442 case QED_CHAIN_USE_TO_PRODUCE
:
446 case QED_CHAIN_USE_TO_CONSUME
:
447 /* produce empty elements */
448 for (i
= 0; i
< p_chain
->capacity
; i
++)
449 qed_chain_recycle_consumed(p_chain
);
455 * @brief qed_chain_init - Initalizes a basic chain struct
459 * @param p_phys_addr physical address of allocated buffer's beginning
460 * @param page_cnt number of pages in the allocated buffer
461 * @param elem_size size of each element in the chain
462 * @param intended_use
465 static inline void qed_chain_init_params(struct qed_chain
*p_chain
,
468 enum qed_chain_use_mode intended_use
,
469 enum qed_chain_mode mode
,
470 enum qed_chain_cnt_type cnt_type
)
472 /* chain fixed parameters */
473 p_chain
->p_virt_addr
= NULL
;
474 p_chain
->p_phys_addr
= 0;
475 p_chain
->elem_size
= elem_size
;
476 p_chain
->intended_use
= intended_use
;
477 p_chain
->mode
= mode
;
478 p_chain
->cnt_type
= cnt_type
;
480 p_chain
->elem_per_page
= ELEMS_PER_PAGE(elem_size
);
481 p_chain
->usable_per_page
= USABLE_ELEMS_PER_PAGE(elem_size
, mode
);
482 p_chain
->elem_per_page_mask
= p_chain
->elem_per_page
- 1;
483 p_chain
->elem_unusable
= UNUSABLE_ELEMS_PER_PAGE(elem_size
, mode
);
484 p_chain
->next_page_mask
= (p_chain
->usable_per_page
&
485 p_chain
->elem_per_page_mask
);
487 p_chain
->page_cnt
= page_cnt
;
488 p_chain
->capacity
= p_chain
->usable_per_page
* page_cnt
;
489 p_chain
->size
= p_chain
->elem_per_page
* page_cnt
;
491 p_chain
->pbl
.p_phys_table
= 0;
492 p_chain
->pbl
.p_virt_table
= NULL
;
493 p_chain
->pbl
.pp_virt_addr_tbl
= NULL
;
497 * @brief qed_chain_init_mem -
499 * Initalizes a basic chain struct with its chain buffers
502 * @param p_virt_addr virtual address of allocated buffer's beginning
503 * @param p_phys_addr physical address of allocated buffer's beginning
506 static inline void qed_chain_init_mem(struct qed_chain
*p_chain
,
507 void *p_virt_addr
, dma_addr_t p_phys_addr
)
509 p_chain
->p_virt_addr
= p_virt_addr
;
510 p_chain
->p_phys_addr
= p_phys_addr
;
514 * @brief qed_chain_init_pbl_mem -
516 * Initalizes a basic chain struct with its pbl buffers
519 * @param p_virt_pbl pointer to a pre allocated side table which will hold
520 * virtual page addresses.
521 * @param p_phys_pbl pointer to a pre-allocated side table which will hold
522 * physical page addresses.
523 * @param pp_virt_addr_tbl
524 * pointer to a pre-allocated side table which will hold
525 * the virtual addresses of the chain pages.
528 static inline void qed_chain_init_pbl_mem(struct qed_chain
*p_chain
,
530 dma_addr_t p_phys_pbl
,
531 void **pp_virt_addr_tbl
)
533 p_chain
->pbl
.p_phys_table
= p_phys_pbl
;
534 p_chain
->pbl
.p_virt_table
= p_virt_pbl
;
535 p_chain
->pbl
.pp_virt_addr_tbl
= pp_virt_addr_tbl
;
539 * @brief qed_chain_init_next_ptr_elem -
541 * Initalizes a next pointer element
544 * @param p_virt_curr virtual address of a chain page of which the next
545 * pointer element is initialized
546 * @param p_virt_next virtual address of the next chain page
547 * @param p_phys_next physical address of the next chain page
551 qed_chain_init_next_ptr_elem(struct qed_chain
*p_chain
,
553 void *p_virt_next
, dma_addr_t p_phys_next
)
555 struct qed_chain_next
*p_next
;
558 size
= p_chain
->elem_size
* p_chain
->usable_per_page
;
559 p_next
= (struct qed_chain_next
*)((u8
*)p_virt_curr
+ size
);
561 DMA_REGPAIR_LE(p_next
->next_phys
, p_phys_next
);
563 p_next
->next_virt
= p_virt_next
;
567 * @brief qed_chain_get_last_elem -
569 * Returns a pointer to the last element of the chain
575 static inline void *qed_chain_get_last_elem(struct qed_chain
*p_chain
)
577 struct qed_chain_next
*p_next
= NULL
;
578 void *p_virt_addr
= NULL
;
579 u32 size
, last_page_idx
;
581 if (!p_chain
->p_virt_addr
)
584 switch (p_chain
->mode
) {
585 case QED_CHAIN_MODE_NEXT_PTR
:
586 size
= p_chain
->elem_size
* p_chain
->usable_per_page
;
587 p_virt_addr
= p_chain
->p_virt_addr
;
588 p_next
= (struct qed_chain_next
*)((u8
*)p_virt_addr
+ size
);
589 while (p_next
->next_virt
!= p_chain
->p_virt_addr
) {
590 p_virt_addr
= p_next
->next_virt
;
591 p_next
= (struct qed_chain_next
*)((u8
*)p_virt_addr
+
595 case QED_CHAIN_MODE_SINGLE
:
596 p_virt_addr
= p_chain
->p_virt_addr
;
598 case QED_CHAIN_MODE_PBL
:
599 last_page_idx
= p_chain
->page_cnt
- 1;
600 p_virt_addr
= p_chain
->pbl
.pp_virt_addr_tbl
[last_page_idx
];
603 /* p_virt_addr points at this stage to the last page of the chain */
604 size
= p_chain
->elem_size
* (p_chain
->usable_per_page
- 1);
605 p_virt_addr
= (u8
*)p_virt_addr
+ size
;
611 * @brief qed_chain_set_prod - sets the prod to the given value
616 static inline void qed_chain_set_prod(struct qed_chain
*p_chain
,
617 u32 prod_idx
, void *p_prod_elem
)
619 if (is_chain_u16(p_chain
))
620 p_chain
->u
.chain16
.prod_idx
= (u16
) prod_idx
;
622 p_chain
->u
.chain32
.prod_idx
= prod_idx
;
623 p_chain
->p_prod_elem
= p_prod_elem
;
627 * @brief qed_chain_pbl_zero_mem - set chain memory to 0
631 static inline void qed_chain_pbl_zero_mem(struct qed_chain
*p_chain
)
635 if (p_chain
->mode
!= QED_CHAIN_MODE_PBL
)
638 page_cnt
= qed_chain_get_page_cnt(p_chain
);
640 for (i
= 0; i
< page_cnt
; i
++)
641 memset(p_chain
->pbl
.pp_virt_addr_tbl
[i
], 0,
642 QED_CHAIN_PAGE_SIZE
);