2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
10 #define NICVF_QUEUES_H
12 #include <linux/netdevice.h>
15 #define MAX_QUEUE_SET 128
16 #define MAX_RCV_QUEUES_PER_QS 8
17 #define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
18 #define MAX_SND_QUEUES_PER_QS 8
19 #define MAX_CMP_QUEUES_PER_QS 8
21 /* VF's queue interrupt ranges */
22 #define NICVF_INTR_ID_CQ 0
23 #define NICVF_INTR_ID_SQ 8
24 #define NICVF_INTR_ID_RBDR 16
25 #define NICVF_INTR_ID_MISC 18
26 #define NICVF_INTR_ID_QS_ERR 19
28 #define for_each_cq_irq(irq) \
29 for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
30 #define for_each_sq_irq(irq) \
31 for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
32 #define for_each_rbdr_irq(irq) \
33 for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
35 #define RBDR_SIZE0 0ULL /* 8K entries */
36 #define RBDR_SIZE1 1ULL /* 16K entries */
37 #define RBDR_SIZE2 2ULL /* 32K entries */
38 #define RBDR_SIZE3 3ULL /* 64K entries */
39 #define RBDR_SIZE4 4ULL /* 126K entries */
40 #define RBDR_SIZE5 5ULL /* 256K entries */
41 #define RBDR_SIZE6 6ULL /* 512K entries */
43 #define SND_QUEUE_SIZE0 0ULL /* 1K entries */
44 #define SND_QUEUE_SIZE1 1ULL /* 2K entries */
45 #define SND_QUEUE_SIZE2 2ULL /* 4K entries */
46 #define SND_QUEUE_SIZE3 3ULL /* 8K entries */
47 #define SND_QUEUE_SIZE4 4ULL /* 16K entries */
48 #define SND_QUEUE_SIZE5 5ULL /* 32K entries */
49 #define SND_QUEUE_SIZE6 6ULL /* 64K entries */
51 #define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
52 #define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
53 #define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
54 #define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
55 #define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
56 #define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
57 #define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
59 /* Default queue count per QS, its lengths and threshold values */
60 #define DEFAULT_RBDR_CNT 1
62 #define SND_QSIZE SND_QUEUE_SIZE2
63 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
64 #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
65 #define SND_QUEUE_THRESH 2ULL
66 #define MIN_SQ_DESC_PER_PKT_XMIT 2
67 /* Since timestamp not enabled, otherwise 2 */
68 #define MAX_CQE_PER_PKT_XMIT 1
70 /* Keep CQ and SQ sizes same, if timestamping
71 * is enabled this equation will change.
73 #define CMP_QSIZE CMP_QUEUE_SIZE2
74 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
75 #define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2)
76 #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */
78 #define RBDR_SIZE RBDR_SIZE0
79 #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
80 #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
81 #define RBDR_THRESH (RCV_BUF_COUNT / 2)
82 #define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
83 #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
84 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
86 #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
88 /* Calculate number of CQEs to reserve for all SQEs.
89 * Its 1/256th level of CQ size.
90 * '+ 1' to account for pipelining
92 #define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
93 (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
95 /* Descriptor size in bytes */
96 #define SND_QUEUE_DESC_SIZE 16
97 #define CMP_QUEUE_DESC_SIZE 512
99 /* Buffer / descriptor alignments */
100 #define NICVF_RCV_BUF_ALIGN 7
101 #define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN)
102 #define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
103 #define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
105 #define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
107 /* Queue enable/disable */
108 #define NICVF_SQ_EN BIT_ULL(19)
111 #define NICVF_CQ_RESET BIT_ULL(41)
112 #define NICVF_SQ_RESET BIT_ULL(17)
113 #define NICVF_RBDR_RESET BIT_ULL(43)
115 enum CQ_RX_ERRLVL_E
{
123 CQ_RX_ERROP_RE_NONE
= 0x0,
124 CQ_RX_ERROP_RE_PARTIAL
= 0x1,
125 CQ_RX_ERROP_RE_JABBER
= 0x2,
126 CQ_RX_ERROP_RE_FCS
= 0x7,
127 CQ_RX_ERROP_RE_TERMINATE
= 0x9,
128 CQ_RX_ERROP_RE_RX_CTL
= 0xb,
129 CQ_RX_ERROP_PREL2_ERR
= 0x1f,
130 CQ_RX_ERROP_L2_FRAGMENT
= 0x20,
131 CQ_RX_ERROP_L2_OVERRUN
= 0x21,
132 CQ_RX_ERROP_L2_PFCS
= 0x22,
133 CQ_RX_ERROP_L2_PUNY
= 0x23,
134 CQ_RX_ERROP_L2_MAL
= 0x24,
135 CQ_RX_ERROP_L2_OVERSIZE
= 0x25,
136 CQ_RX_ERROP_L2_UNDERSIZE
= 0x26,
137 CQ_RX_ERROP_L2_LENMISM
= 0x27,
138 CQ_RX_ERROP_L2_PCLP
= 0x28,
139 CQ_RX_ERROP_IP_NOT
= 0x41,
140 CQ_RX_ERROP_IP_CSUM_ERR
= 0x42,
141 CQ_RX_ERROP_IP_MAL
= 0x43,
142 CQ_RX_ERROP_IP_MALD
= 0x44,
143 CQ_RX_ERROP_IP_HOP
= 0x45,
144 CQ_RX_ERROP_L3_ICRC
= 0x46,
145 CQ_RX_ERROP_L3_PCLP
= 0x47,
146 CQ_RX_ERROP_L4_MAL
= 0x61,
147 CQ_RX_ERROP_L4_CHK
= 0x62,
148 CQ_RX_ERROP_UDP_LEN
= 0x63,
149 CQ_RX_ERROP_L4_PORT
= 0x64,
150 CQ_RX_ERROP_TCP_FLAG
= 0x65,
151 CQ_RX_ERROP_TCP_OFFSET
= 0x66,
152 CQ_RX_ERROP_L4_PCLP
= 0x67,
153 CQ_RX_ERROP_RBDR_TRUNC
= 0x70,
157 CQ_TX_ERROP_GOOD
= 0x0,
158 CQ_TX_ERROP_DESC_FAULT
= 0x10,
159 CQ_TX_ERROP_HDR_CONS_ERR
= 0x11,
160 CQ_TX_ERROP_SUBDC_ERR
= 0x12,
161 CQ_TX_ERROP_IMM_SIZE_OFLOW
= 0x80,
162 CQ_TX_ERROP_DATA_SEQUENCE_ERR
= 0x81,
163 CQ_TX_ERROP_MEM_SEQUENCE_ERR
= 0x82,
164 CQ_TX_ERROP_LOCK_VIOL
= 0x83,
165 CQ_TX_ERROP_DATA_FAULT
= 0x84,
166 CQ_TX_ERROP_TSTMP_CONFLICT
= 0x85,
167 CQ_TX_ERROP_TSTMP_TIMEOUT
= 0x86,
168 CQ_TX_ERROP_MEM_FAULT
= 0x87,
169 CQ_TX_ERROP_CK_OVERLAP
= 0x88,
170 CQ_TX_ERROP_CK_OFLOW
= 0x89,
171 CQ_TX_ERROP_ENUM_LAST
= 0x8a,
174 struct cmp_queue_stats
{
191 } ____cacheline_aligned_in_smp
;
198 struct rx_tx_queue_stats
{
201 } ____cacheline_aligned_in_smp
;
207 dma_addr_t phys_base
;
216 u32 thresh
; /* Threshold level for interrupt */
220 struct q_desc_mem dmem
;
221 } ____cacheline_aligned_in_smp
;
225 struct rbdr
*rbdr_start
;
226 struct rbdr
*rbdr_cont
;
227 bool en_tcp_reassembly
;
228 u8 cq_qs
; /* CQ's QS to which this RQ is assigned */
229 u8 cq_idx
; /* CQ index (0 to 7) in the QS */
230 u8 cont_rbdr_qs
; /* Continue buffer ptrs - QS num */
231 u8 cont_qs_rbdr_idx
; /* RBDR idx in the cont QS */
232 u8 start_rbdr_qs
; /* First buffer ptrs - QS num */
233 u8 start_qs_rbdr_idx
; /* RBDR idx in the above QS */
235 struct rx_tx_queue_stats stats
;
236 } ____cacheline_aligned_in_smp
;
241 spinlock_t lock
; /* lock to serialize processing CQEs */
243 struct q_desc_mem dmem
;
244 struct cmp_queue_stats stats
;
246 } ____cacheline_aligned_in_smp
;
250 u8 cq_qs
; /* CQ's QS to which this SQ is pointing */
251 u8 cq_idx
; /* CQ index (0 to 7) in the above QS */
259 #define TSO_HEADER_SIZE 128
260 /* For TSO segment's header */
262 dma_addr_t tso_hdrs_phys
;
264 cpumask_t affinity_mask
;
265 struct q_desc_mem dmem
;
266 struct rx_tx_queue_stats stats
;
267 } ____cacheline_aligned_in_smp
;
280 struct rcv_queue rq
[MAX_RCV_QUEUES_PER_QS
];
281 struct cmp_queue cq
[MAX_CMP_QUEUES_PER_QS
];
282 struct snd_queue sq
[MAX_SND_QUEUES_PER_QS
];
283 struct rbdr rbdr
[MAX_RCV_BUF_DESC_RINGS_PER_QS
];
284 } ____cacheline_aligned_in_smp
;
286 #define GET_RBDR_DESC(RING, idx)\
287 (&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
288 #define GET_SQ_DESC(RING, idx)\
289 (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
290 #define GET_CQ_DESC(RING, idx)\
291 (&(((union cq_desc_t *)((RING)->desc))[idx]))
294 #define CQ_WR_FULL BIT(26)
295 #define CQ_WR_DISABLE BIT(25)
296 #define CQ_WR_FAULT BIT(24)
297 #define CQ_CQE_COUNT (0xFFFF << 0)
299 #define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
301 void nicvf_config_vlan_stripping(struct nicvf
*nic
,
302 netdev_features_t features
);
303 int nicvf_set_qset_resources(struct nicvf
*nic
);
304 int nicvf_config_data_transfer(struct nicvf
*nic
, bool enable
);
305 void nicvf_qset_config(struct nicvf
*nic
, bool enable
);
306 void nicvf_cmp_queue_config(struct nicvf
*nic
, struct queue_set
*qs
,
307 int qidx
, bool enable
);
309 void nicvf_sq_enable(struct nicvf
*nic
, struct snd_queue
*sq
, int qidx
);
310 void nicvf_sq_disable(struct nicvf
*nic
, int qidx
);
311 void nicvf_put_sq_desc(struct snd_queue
*sq
, int desc_cnt
);
312 void nicvf_sq_free_used_descs(struct net_device
*netdev
,
313 struct snd_queue
*sq
, int qidx
);
314 int nicvf_sq_append_skb(struct nicvf
*nic
, struct sk_buff
*skb
);
316 struct sk_buff
*nicvf_get_rcv_skb(struct nicvf
*nic
, struct cqe_rx_t
*cqe_rx
);
317 void nicvf_rbdr_task(unsigned long data
);
318 void nicvf_rbdr_work(struct work_struct
*work
);
320 void nicvf_enable_intr(struct nicvf
*nic
, int int_type
, int q_idx
);
321 void nicvf_disable_intr(struct nicvf
*nic
, int int_type
, int q_idx
);
322 void nicvf_clear_intr(struct nicvf
*nic
, int int_type
, int q_idx
);
323 int nicvf_is_intr_enabled(struct nicvf
*nic
, int int_type
, int q_idx
);
325 /* Register access APIs */
326 void nicvf_reg_write(struct nicvf
*nic
, u64 offset
, u64 val
);
327 u64
nicvf_reg_read(struct nicvf
*nic
, u64 offset
);
328 void nicvf_qset_reg_write(struct nicvf
*nic
, u64 offset
, u64 val
);
329 u64
nicvf_qset_reg_read(struct nicvf
*nic
, u64 offset
);
330 void nicvf_queue_reg_write(struct nicvf
*nic
, u64 offset
,
332 u64
nicvf_queue_reg_read(struct nicvf
*nic
,
333 u64 offset
, u64 qidx
);
336 void nicvf_update_rq_stats(struct nicvf
*nic
, int rq_idx
);
337 void nicvf_update_sq_stats(struct nicvf
*nic
, int sq_idx
);
338 int nicvf_check_cqe_rx_errs(struct nicvf
*nic
, struct cqe_rx_t
*cqe_rx
);
339 int nicvf_check_cqe_tx_errs(struct nicvf
*nic
,
340 struct cmp_queue
*cq
, struct cqe_send_t
*cqe_tx
);
341 #endif /* NICVF_QUEUES_H */