1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/pci.h>
23 #include <linux/netdevice.h>
24 #include <linux/vmalloc.h>
25 #include "liquidio_common.h"
26 #include "octeon_droq.h"
27 #include "octeon_iq.h"
28 #include "response_manager.h"
29 #include "octeon_device.h"
30 #include "octeon_main.h"
31 #include "octeon_network.h"
32 #include "cn66xx_regs.h"
33 #include "cn66xx_device.h"
35 #define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
36 #define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
39 struct list_head list
;
44 struct list_head list
;
45 struct octeon_recv_info
*rinfo
;
46 octeon_dispatch_fn_t disp_fn
;
49 /** Get the argument that the user set when registering dispatch
50 * function for a given opcode/subcode.
51 * @param octeon_dev - the octeon device pointer.
52 * @param opcode - the opcode for which the dispatch argument
54 * @param subcode - the subcode for which the dispatch argument
56 * @return Success: void * (argument to the dispatch function)
57 * @return Failure: NULL
60 static inline void *octeon_get_dispatch_arg(struct octeon_device
*octeon_dev
,
61 u16 opcode
, u16 subcode
)
64 struct list_head
*dispatch
;
66 u16 combined_opcode
= OPCODE_SUBCODE(opcode
, subcode
);
68 idx
= combined_opcode
& OCTEON_OPCODE_MASK
;
70 spin_lock_bh(&octeon_dev
->dispatch
.lock
);
72 if (octeon_dev
->dispatch
.count
== 0) {
73 spin_unlock_bh(&octeon_dev
->dispatch
.lock
);
77 if (octeon_dev
->dispatch
.dlist
[idx
].opcode
== combined_opcode
) {
78 fn_arg
= octeon_dev
->dispatch
.dlist
[idx
].arg
;
80 list_for_each(dispatch
,
81 &octeon_dev
->dispatch
.dlist
[idx
].list
) {
82 if (((struct octeon_dispatch
*)dispatch
)->opcode
==
84 fn_arg
= ((struct octeon_dispatch
*)
91 spin_unlock_bh(&octeon_dev
->dispatch
.lock
);
95 /** Check for packets on Droq. This function should be called with
97 * @param droq - Droq on which count is checked.
98 * @return Returns packet count.
100 u32
octeon_droq_check_hw_for_pkts(struct octeon_droq
*droq
)
104 pkt_count
= readl(droq
->pkts_sent_reg
);
106 atomic_add(pkt_count
, &droq
->pkts_pending
);
107 writel(pkt_count
, droq
->pkts_sent_reg
);
113 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq
*droq
)
117 /* max_empty_descs is the max. no. of descs that can have no buffers.
118 * If the empty desc count goes beyond this value, we cannot safely
119 * read in a 64K packet sent by Octeon
120 * (64K is max pkt size from Octeon)
122 droq
->max_empty_descs
= 0;
125 droq
->max_empty_descs
++;
126 count
+= droq
->buffer_size
;
127 } while (count
< (64 * 1024));
129 droq
->max_empty_descs
= droq
->max_count
- droq
->max_empty_descs
;
132 static void octeon_droq_reset_indices(struct octeon_droq
*droq
)
136 droq
->refill_idx
= 0;
137 droq
->refill_count
= 0;
138 atomic_set(&droq
->pkts_pending
, 0);
142 octeon_droq_destroy_ring_buffers(struct octeon_device
*oct
,
143 struct octeon_droq
*droq
)
146 struct octeon_skb_page_info
*pg_info
;
148 for (i
= 0; i
< droq
->max_count
; i
++) {
149 pg_info
= &droq
->recv_buf_list
[i
].pg_info
;
152 lio_unmap_ring(oct
->pci_dev
,
157 recv_buffer_destroy(droq
->recv_buf_list
[i
].buffer
,
160 if (droq
->desc_ring
&& droq
->desc_ring
[i
].info_ptr
)
161 lio_unmap_ring_info(oct
->pci_dev
,
163 desc_ring
[i
].info_ptr
,
165 droq
->recv_buf_list
[i
].buffer
= NULL
;
168 octeon_droq_reset_indices(droq
);
172 octeon_droq_setup_ring_buffers(struct octeon_device
*oct
,
173 struct octeon_droq
*droq
)
177 struct octeon_droq_desc
*desc_ring
= droq
->desc_ring
;
179 for (i
= 0; i
< droq
->max_count
; i
++) {
180 buf
= recv_buffer_alloc(oct
, &droq
->recv_buf_list
[i
].pg_info
);
183 dev_err(&oct
->pci_dev
->dev
, "%s buffer alloc failed\n",
185 droq
->stats
.rx_alloc_failure
++;
189 droq
->recv_buf_list
[i
].buffer
= buf
;
190 droq
->recv_buf_list
[i
].data
= get_rbd(buf
);
191 droq
->info_list
[i
].length
= 0;
193 /* map ring buffers into memory */
194 desc_ring
[i
].info_ptr
= lio_map_ring_info(droq
, i
);
195 desc_ring
[i
].buffer_ptr
=
196 lio_map_ring(droq
->recv_buf_list
[i
].buffer
);
199 octeon_droq_reset_indices(droq
);
201 octeon_droq_compute_max_packet_bufs(droq
);
206 int octeon_delete_droq(struct octeon_device
*oct
, u32 q_no
)
208 struct octeon_droq
*droq
= oct
->droq
[q_no
];
210 dev_dbg(&oct
->pci_dev
->dev
, "%s[%d]\n", __func__
, q_no
);
212 octeon_droq_destroy_ring_buffers(oct
, droq
);
213 vfree(droq
->recv_buf_list
);
215 if (droq
->info_base_addr
)
216 cnnic_free_aligned_dma(oct
->pci_dev
, droq
->info_list
,
217 droq
->info_alloc_size
,
218 droq
->info_base_addr
,
219 droq
->info_list_dma
);
222 lio_dma_free(oct
, (droq
->max_count
* OCT_DROQ_DESC_SIZE
),
223 droq
->desc_ring
, droq
->desc_ring_dma
);
225 memset(droq
, 0, OCT_DROQ_SIZE
);
230 int octeon_init_droq(struct octeon_device
*oct
,
236 struct octeon_droq
*droq
;
237 u32 desc_ring_size
= 0, c_num_descs
= 0, c_buf_size
= 0;
238 u32 c_pkts_per_intr
= 0, c_refill_threshold
= 0;
239 int orig_node
= dev_to_node(&oct
->pci_dev
->dev
);
240 int numa_node
= cpu_to_node(q_no
% num_online_cpus());
242 dev_dbg(&oct
->pci_dev
->dev
, "%s[%d]\n", __func__
, q_no
);
244 droq
= oct
->droq
[q_no
];
245 memset(droq
, 0, OCT_DROQ_SIZE
);
250 droq
->app_ctx
= app_ctx
;
252 droq
->app_ctx
= (void *)(size_t)q_no
;
254 c_num_descs
= num_descs
;
255 c_buf_size
= desc_size
;
256 if (OCTEON_CN6XXX(oct
)) {
257 struct octeon_config
*conf6x
= CHIP_FIELD(oct
, cn6xxx
, conf
);
259 c_pkts_per_intr
= (u32
)CFG_GET_OQ_PKTS_PER_INTR(conf6x
);
261 (u32
)CFG_GET_OQ_REFILL_THRESHOLD(conf6x
);
266 droq
->max_count
= c_num_descs
;
267 droq
->buffer_size
= c_buf_size
;
269 desc_ring_size
= droq
->max_count
* OCT_DROQ_DESC_SIZE
;
270 set_dev_node(&oct
->pci_dev
->dev
, numa_node
);
271 droq
->desc_ring
= lio_dma_alloc(oct
, desc_ring_size
,
272 (dma_addr_t
*)&droq
->desc_ring_dma
);
273 set_dev_node(&oct
->pci_dev
->dev
, orig_node
);
274 if (!droq
->desc_ring
)
275 droq
->desc_ring
= lio_dma_alloc(oct
, desc_ring_size
,
276 (dma_addr_t
*)&droq
->desc_ring_dma
);
278 if (!droq
->desc_ring
) {
279 dev_err(&oct
->pci_dev
->dev
,
280 "Output queue %d ring alloc failed\n", q_no
);
284 dev_dbg(&oct
->pci_dev
->dev
, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
285 q_no
, droq
->desc_ring
, droq
->desc_ring_dma
);
286 dev_dbg(&oct
->pci_dev
->dev
, "droq[%d]: num_desc: %d\n", q_no
,
290 cnnic_numa_alloc_aligned_dma((droq
->max_count
*
292 &droq
->info_alloc_size
,
293 &droq
->info_base_addr
,
295 if (!droq
->info_list
) {
296 dev_err(&oct
->pci_dev
->dev
, "Cannot allocate memory for info list.\n");
297 lio_dma_free(oct
, (droq
->max_count
* OCT_DROQ_DESC_SIZE
),
298 droq
->desc_ring
, droq
->desc_ring_dma
);
302 droq
->recv_buf_list
= (struct octeon_recv_buffer
*)
303 vmalloc_node(droq
->max_count
*
304 OCT_DROQ_RECVBUF_SIZE
,
306 if (!droq
->recv_buf_list
)
307 droq
->recv_buf_list
= (struct octeon_recv_buffer
*)
308 vmalloc(droq
->max_count
*
309 OCT_DROQ_RECVBUF_SIZE
);
310 if (!droq
->recv_buf_list
) {
311 dev_err(&oct
->pci_dev
->dev
, "Output queue recv buf list alloc failed\n");
315 if (octeon_droq_setup_ring_buffers(oct
, droq
))
318 droq
->pkts_per_intr
= c_pkts_per_intr
;
319 droq
->refill_threshold
= c_refill_threshold
;
321 dev_dbg(&oct
->pci_dev
->dev
, "DROQ INIT: max_empty_descs: %d\n",
322 droq
->max_empty_descs
);
324 spin_lock_init(&droq
->lock
);
326 INIT_LIST_HEAD(&droq
->dispatch_list
);
328 /* For 56xx Pass1, this function won't be called, so no checks. */
329 oct
->fn_list
.setup_oq_regs(oct
, q_no
);
331 oct
->io_qmask
.oq
|= (1ULL << q_no
);
336 octeon_delete_droq(oct
, q_no
);
340 /* octeon_create_recv_info
342 * octeon_dev - pointer to the octeon device structure
343 * droq - droq in which the packet arrived.
344 * buf_cnt - no. of buffers used by the packet.
345 * idx - index in the descriptor for the first buffer in the packet.
347 * Allocates a recv_info_t and copies the buffer addresses for packet data
348 * into the recv_pkt space which starts at an 8B offset from recv_info_t.
349 * Flags the descriptors for refill later. If available descriptors go
350 * below the threshold to receive a 64K pkt, new buffers are first allocated
351 * before the recv_pkt_t is created.
352 * This routine will be called in interrupt context.
354 * Success: Pointer to recv_info_t
357 * The droq->lock is held when this routine is called.
359 static inline struct octeon_recv_info
*octeon_create_recv_info(
360 struct octeon_device
*octeon_dev
,
361 struct octeon_droq
*droq
,
365 struct octeon_droq_info
*info
;
366 struct octeon_recv_pkt
*recv_pkt
;
367 struct octeon_recv_info
*recv_info
;
369 struct octeon_skb_page_info
*pg_info
;
371 info
= &droq
->info_list
[idx
];
373 recv_info
= octeon_alloc_recv_info(sizeof(struct __dispatch
));
377 recv_pkt
= recv_info
->recv_pkt
;
378 recv_pkt
->rh
= info
->rh
;
379 recv_pkt
->length
= (u32
)info
->length
;
380 recv_pkt
->buffer_count
= (u16
)buf_cnt
;
381 recv_pkt
->octeon_id
= (u16
)octeon_dev
->octeon_id
;
384 bytes_left
= (u32
)info
->length
;
388 pg_info
= &droq
->recv_buf_list
[idx
].pg_info
;
390 lio_unmap_ring(octeon_dev
->pci_dev
,
392 pg_info
->page
= NULL
;
396 recv_pkt
->buffer_size
[i
] =
398 droq
->buffer_size
) ? droq
->buffer_size
: bytes_left
;
400 recv_pkt
->buffer_ptr
[i
] = droq
->recv_buf_list
[idx
].buffer
;
401 droq
->recv_buf_list
[idx
].buffer
= NULL
;
403 INCR_INDEX_BY1(idx
, droq
->max_count
);
404 bytes_left
-= droq
->buffer_size
;
412 /* If we were not able to refill all buffers, try to move around
413 * the buffers that were not dispatched.
416 octeon_droq_refill_pullup_descs(struct octeon_droq
*droq
,
417 struct octeon_droq_desc
*desc_ring
)
419 u32 desc_refilled
= 0;
421 u32 refill_index
= droq
->refill_idx
;
423 while (refill_index
!= droq
->read_idx
) {
424 if (droq
->recv_buf_list
[refill_index
].buffer
) {
425 droq
->recv_buf_list
[droq
->refill_idx
].buffer
=
426 droq
->recv_buf_list
[refill_index
].buffer
;
427 droq
->recv_buf_list
[droq
->refill_idx
].data
=
428 droq
->recv_buf_list
[refill_index
].data
;
429 desc_ring
[droq
->refill_idx
].buffer_ptr
=
430 desc_ring
[refill_index
].buffer_ptr
;
431 droq
->recv_buf_list
[refill_index
].buffer
= NULL
;
432 desc_ring
[refill_index
].buffer_ptr
= 0;
434 INCR_INDEX_BY1(droq
->refill_idx
,
437 droq
->refill_count
--;
438 } while (droq
->recv_buf_list
[droq
->refill_idx
].
441 INCR_INDEX_BY1(refill_index
, droq
->max_count
);
443 return desc_refilled
;
446 /* octeon_droq_refill
448 * droq - droq in which descriptors require new buffers.
450 * Called during normal DROQ processing in interrupt mode or by the poll
451 * thread to refill the descriptors from which buffers were dispatched
452 * to upper layers. Attempts to allocate new buffers. If that fails, moves
453 * up buffers (that were not dispatched) to form a contiguous ring.
455 * No of descriptors refilled.
457 * This routine is called with droq->lock held.
460 octeon_droq_refill(struct octeon_device
*octeon_dev
, struct octeon_droq
*droq
)
462 struct octeon_droq_desc
*desc_ring
;
465 u32 desc_refilled
= 0;
466 struct octeon_skb_page_info
*pg_info
;
468 desc_ring
= droq
->desc_ring
;
470 while (droq
->refill_count
&& (desc_refilled
< droq
->max_count
)) {
471 /* If a valid buffer exists (happens if there is no dispatch),
473 * the buffer, else allocate.
475 if (!droq
->recv_buf_list
[droq
->refill_idx
].buffer
) {
477 &droq
->recv_buf_list
[droq
->refill_idx
].pg_info
;
478 /* Either recycle the existing pages or go for
482 buf
= recv_buffer_reuse(octeon_dev
, pg_info
);
484 buf
= recv_buffer_alloc(octeon_dev
, pg_info
);
485 /* If a buffer could not be allocated, no point in
489 droq
->stats
.rx_alloc_failure
++;
492 droq
->recv_buf_list
[droq
->refill_idx
].buffer
=
496 data
= get_rbd(droq
->recv_buf_list
497 [droq
->refill_idx
].buffer
);
500 droq
->recv_buf_list
[droq
->refill_idx
].data
= data
;
502 desc_ring
[droq
->refill_idx
].buffer_ptr
=
503 lio_map_ring(droq
->recv_buf_list
[droq
->
505 /* Reset any previous values in the length field. */
506 droq
->info_list
[droq
->refill_idx
].length
= 0;
508 INCR_INDEX_BY1(droq
->refill_idx
, droq
->max_count
);
510 droq
->refill_count
--;
513 if (droq
->refill_count
)
515 octeon_droq_refill_pullup_descs(droq
, desc_ring
);
517 /* if droq->refill_count
518 * The refill count would not change in pass two. We only moved buffers
519 * to close the gap in the ring, but we would still have the same no. of
522 return desc_refilled
;
526 octeon_droq_get_bufcount(u32 buf_size
, u32 total_len
)
530 while (total_len
> (buf_size
* buf_cnt
))
536 octeon_droq_dispatch_pkt(struct octeon_device
*oct
,
537 struct octeon_droq
*droq
,
539 struct octeon_droq_info
*info
)
542 octeon_dispatch_fn_t disp_fn
;
543 struct octeon_recv_info
*rinfo
;
545 cnt
= octeon_droq_get_bufcount(droq
->buffer_size
, (u32
)info
->length
);
547 disp_fn
= octeon_get_dispatch(oct
, (u16
)rh
->r
.opcode
,
550 rinfo
= octeon_create_recv_info(oct
, droq
, cnt
, droq
->read_idx
);
552 struct __dispatch
*rdisp
= rinfo
->rsvd
;
554 rdisp
->rinfo
= rinfo
;
555 rdisp
->disp_fn
= disp_fn
;
556 rinfo
->recv_pkt
->rh
= *rh
;
557 list_add_tail(&rdisp
->list
,
558 &droq
->dispatch_list
);
560 droq
->stats
.dropped_nomem
++;
563 dev_err(&oct
->pci_dev
->dev
, "DROQ: No dispatch function (opcode %u/%u)\n",
564 (unsigned int)rh
->r
.opcode
,
565 (unsigned int)rh
->r
.subcode
);
566 droq
->stats
.dropped_nodispatch
++;
567 } /* else (dispatch_fn ... */
572 static inline void octeon_droq_drop_packets(struct octeon_device
*oct
,
573 struct octeon_droq
*droq
,
577 struct octeon_droq_info
*info
;
579 for (i
= 0; i
< cnt
; i
++) {
580 info
= &droq
->info_list
[droq
->read_idx
];
581 octeon_swap_8B_data((u64
*)info
, 2);
584 info
->length
-= OCT_RH_SIZE
;
585 droq
->stats
.bytes_received
+= info
->length
;
586 buf_cnt
= octeon_droq_get_bufcount(droq
->buffer_size
,
589 dev_err(&oct
->pci_dev
->dev
, "DROQ: In drop: pkt with len 0\n");
593 INCR_INDEX(droq
->read_idx
, buf_cnt
, droq
->max_count
);
594 droq
->refill_count
+= buf_cnt
;
599 octeon_droq_fast_process_packets(struct octeon_device
*oct
,
600 struct octeon_droq
*droq
,
603 struct octeon_droq_info
*info
;
605 u32 pkt
, total_len
= 0, pkt_count
;
607 pkt_count
= pkts_to_process
;
609 for (pkt
= 0; pkt
< pkt_count
; pkt
++) {
611 struct sk_buff
*nicbuf
= NULL
;
612 struct octeon_skb_page_info
*pg_info
;
615 info
= &droq
->info_list
[droq
->read_idx
];
616 octeon_swap_8B_data((u64
*)info
, 2);
619 dev_err(&oct
->pci_dev
->dev
,
620 "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
621 droq
->q_no
, droq
->read_idx
, pkt_count
);
622 print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS
,
628 /* Len of resp hdr in included in the received data len. */
629 info
->length
-= OCT_RH_SIZE
;
632 total_len
+= (u32
)info
->length
;
633 if (OPCODE_SLOW_PATH(rh
)) {
636 buf_cnt
= octeon_droq_dispatch_pkt(oct
, droq
, rh
, info
);
637 INCR_INDEX(droq
->read_idx
, buf_cnt
, droq
->max_count
);
638 droq
->refill_count
+= buf_cnt
;
640 if (info
->length
<= droq
->buffer_size
) {
641 pkt_len
= (u32
)info
->length
;
642 nicbuf
= droq
->recv_buf_list
[
643 droq
->read_idx
].buffer
;
644 pg_info
= &droq
->recv_buf_list
[
645 droq
->read_idx
].pg_info
;
646 if (recv_buffer_recycle(oct
, pg_info
))
647 pg_info
->page
= NULL
;
648 droq
->recv_buf_list
[droq
->read_idx
].buffer
=
651 INCR_INDEX_BY1(droq
->read_idx
, droq
->max_count
);
652 droq
->refill_count
++;
654 nicbuf
= octeon_fast_packet_alloc((u32
)
657 /* nicbuf allocation can fail. We'll handle it
660 while (pkt_len
< info
->length
) {
661 int cpy_len
, idx
= droq
->read_idx
;
663 cpy_len
= ((pkt_len
+ droq
->buffer_size
)
665 ((u32
)info
->length
- pkt_len
) :
669 octeon_fast_packet_next(droq
,
673 buf
= droq
->recv_buf_list
[idx
].
675 recv_buffer_fast_free(buf
);
676 droq
->recv_buf_list
[idx
].buffer
679 droq
->stats
.rx_alloc_failure
++;
683 INCR_INDEX_BY1(droq
->read_idx
,
685 droq
->refill_count
++;
690 if (droq
->ops
.fptr
) {
691 droq
->ops
.fptr(oct
->octeon_id
,
696 recv_buffer_free(nicbuf
);
701 if (droq
->refill_count
>= droq
->refill_threshold
) {
702 int desc_refilled
= octeon_droq_refill(oct
, droq
);
704 /* Flush the droq descriptor data to memory to be sure
705 * that when we update the credits the data in memory
709 writel((desc_refilled
), droq
->pkts_credit_reg
);
710 /* make sure mmio write completes */
714 } /* for (each packet)... */
716 /* Increment refill_count by the number of buffers processed. */
717 droq
->stats
.pkts_received
+= pkt
;
718 droq
->stats
.bytes_received
+= total_len
;
720 if ((droq
->ops
.drop_on_max
) && (pkts_to_process
- pkt
)) {
721 octeon_droq_drop_packets(oct
, droq
, (pkts_to_process
- pkt
));
723 droq
->stats
.dropped_toomany
+= (pkts_to_process
- pkt
);
724 return pkts_to_process
;
731 octeon_droq_process_packets(struct octeon_device
*oct
,
732 struct octeon_droq
*droq
,
735 u32 pkt_count
= 0, pkts_processed
= 0;
736 struct list_head
*tmp
, *tmp2
;
738 pkt_count
= atomic_read(&droq
->pkts_pending
);
742 if (pkt_count
> budget
)
745 /* Grab the droq lock */
746 spin_lock(&droq
->lock
);
748 pkts_processed
= octeon_droq_fast_process_packets(oct
, droq
, pkt_count
);
750 atomic_sub(pkts_processed
, &droq
->pkts_pending
);
752 /* Release the spin lock */
753 spin_unlock(&droq
->lock
);
755 list_for_each_safe(tmp
, tmp2
, &droq
->dispatch_list
) {
756 struct __dispatch
*rdisp
= (struct __dispatch
*)tmp
;
759 rdisp
->disp_fn(rdisp
->rinfo
,
760 octeon_get_dispatch_arg
762 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.opcode
,
763 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.subcode
));
766 /* If there are packets pending. schedule tasklet again */
767 if (atomic_read(&droq
->pkts_pending
))
774 * Utility function to poll for packets. check_hw_for_packets must be
775 * called before calling this routine.
779 octeon_droq_process_poll_pkts(struct octeon_device
*oct
,
780 struct octeon_droq
*droq
, u32 budget
)
782 struct list_head
*tmp
, *tmp2
;
783 u32 pkts_available
= 0, pkts_processed
= 0;
784 u32 total_pkts_processed
= 0;
786 if (budget
> droq
->max_count
)
787 budget
= droq
->max_count
;
789 spin_lock(&droq
->lock
);
791 while (total_pkts_processed
< budget
) {
793 CVM_MIN((budget
- total_pkts_processed
),
794 (u32
)(atomic_read(&droq
->pkts_pending
)));
796 if (pkts_available
== 0)
800 octeon_droq_fast_process_packets(oct
, droq
,
803 atomic_sub(pkts_processed
, &droq
->pkts_pending
);
805 total_pkts_processed
+= pkts_processed
;
807 octeon_droq_check_hw_for_pkts(droq
);
810 spin_unlock(&droq
->lock
);
812 list_for_each_safe(tmp
, tmp2
, &droq
->dispatch_list
) {
813 struct __dispatch
*rdisp
= (struct __dispatch
*)tmp
;
816 rdisp
->disp_fn(rdisp
->rinfo
,
817 octeon_get_dispatch_arg
819 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.opcode
,
820 (u16
)rdisp
->rinfo
->recv_pkt
->rh
.r
.subcode
));
823 return total_pkts_processed
;
827 octeon_process_droq_poll_cmd(struct octeon_device
*oct
, u32 q_no
, int cmd
,
830 struct octeon_droq
*droq
;
832 droq
= oct
->droq
[q_no
];
834 if (cmd
== POLL_EVENT_PROCESS_PKTS
)
835 return octeon_droq_process_poll_pkts(oct
, droq
, arg
);
837 if (cmd
== POLL_EVENT_PENDING_PKTS
) {
838 u32 pkt_cnt
= atomic_read(&droq
->pkts_pending
);
840 return octeon_droq_process_packets(oct
, droq
, pkt_cnt
);
843 if (cmd
== POLL_EVENT_ENABLE_INTR
) {
847 /* Enable Pkt Interrupt */
848 switch (oct
->chip_id
) {
850 case OCTEON_CN68XX
: {
851 struct octeon_cn6xxx
*cn6xxx
=
852 (struct octeon_cn6xxx
*)oct
->chip
;
854 (&cn6xxx
->lock_for_droq_int_enb_reg
, flags
);
857 CN6XXX_SLI_PKT_TIME_INT_ENB
);
858 value
|= (1 << q_no
);
859 octeon_write_csr(oct
,
860 CN6XXX_SLI_PKT_TIME_INT_ENB
,
864 CN6XXX_SLI_PKT_CNT_INT_ENB
);
865 value
|= (1 << q_no
);
866 octeon_write_csr(oct
,
867 CN6XXX_SLI_PKT_CNT_INT_ENB
,
870 /* don't bother flushing the enables */
872 spin_unlock_irqrestore
873 (&cn6xxx
->lock_for_droq_int_enb_reg
, flags
);
882 dev_err(&oct
->pci_dev
->dev
, "%s Unknown command: %d\n", __func__
, cmd
);
886 int octeon_register_droq_ops(struct octeon_device
*oct
, u32 q_no
,
887 struct octeon_droq_ops
*ops
)
889 struct octeon_droq
*droq
;
891 struct octeon_config
*oct_cfg
= NULL
;
893 oct_cfg
= octeon_get_conf(oct
);
899 dev_err(&oct
->pci_dev
->dev
, "%s: droq_ops pointer is NULL\n",
904 if (q_no
>= CFG_GET_OQ_MAX_Q(oct_cfg
)) {
905 dev_err(&oct
->pci_dev
->dev
, "%s: droq id (%d) exceeds MAX (%d)\n",
906 __func__
, q_no
, (oct
->num_oqs
- 1));
910 droq
= oct
->droq
[q_no
];
912 spin_lock_irqsave(&droq
->lock
, flags
);
914 memcpy(&droq
->ops
, ops
, sizeof(struct octeon_droq_ops
));
916 spin_unlock_irqrestore(&droq
->lock
, flags
);
921 int octeon_unregister_droq_ops(struct octeon_device
*oct
, u32 q_no
)
924 struct octeon_droq
*droq
;
925 struct octeon_config
*oct_cfg
= NULL
;
927 oct_cfg
= octeon_get_conf(oct
);
932 if (q_no
>= CFG_GET_OQ_MAX_Q(oct_cfg
)) {
933 dev_err(&oct
->pci_dev
->dev
, "%s: droq id (%d) exceeds MAX (%d)\n",
934 __func__
, q_no
, oct
->num_oqs
- 1);
938 droq
= oct
->droq
[q_no
];
941 dev_info(&oct
->pci_dev
->dev
,
942 "Droq id (%d) not available.\n", q_no
);
946 spin_lock_irqsave(&droq
->lock
, flags
);
948 droq
->ops
.fptr
= NULL
;
949 droq
->ops
.farg
= NULL
;
950 droq
->ops
.drop_on_max
= 0;
952 spin_unlock_irqrestore(&droq
->lock
, flags
);
957 int octeon_create_droq(struct octeon_device
*oct
,
958 u32 q_no
, u32 num_descs
,
959 u32 desc_size
, void *app_ctx
)
961 struct octeon_droq
*droq
;
962 int numa_node
= cpu_to_node(q_no
% num_online_cpus());
964 if (oct
->droq
[q_no
]) {
965 dev_dbg(&oct
->pci_dev
->dev
, "Droq already in use. Cannot create droq %d again\n",
970 /* Allocate the DS for the new droq. */
971 droq
= vmalloc_node(sizeof(*droq
), numa_node
);
973 droq
= vmalloc(sizeof(*droq
));
975 goto create_droq_fail
;
976 memset(droq
, 0, sizeof(struct octeon_droq
));
978 /*Disable the pkt o/p for this Q */
979 octeon_set_droq_pkt_op(oct
, q_no
, 0);
980 oct
->droq
[q_no
] = droq
;
982 /* Initialize the Droq */
983 octeon_init_droq(oct
, q_no
, num_descs
, desc_size
, app_ctx
);
987 dev_dbg(&oct
->pci_dev
->dev
, "%s: Total number of OQ: %d\n", __func__
,
990 /* Global Droq register settings */
992 /* As of now not required, as setting are done for all 32 Droqs at
998 octeon_delete_droq(oct
, q_no
);