Merge remote-tracking branch 'omap_dss2/for-next'
[deliverable/linux.git] / drivers / net / ethernet / cavium / thunder / nicvf_queues.c
CommitLineData
4863dea3
SG
1/*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9#include <linux/pci.h>
10#include <linux/netdevice.h>
11#include <linux/ip.h>
12#include <linux/etherdevice.h>
13#include <net/ip.h>
14#include <net/tso.h>
15
16#include "nic_reg.h"
17#include "nic.h"
18#include "q_struct.h"
19#include "nicvf_queues.h"
20
5c2e26f6
SG
21static void nicvf_get_page(struct nicvf *nic)
22{
23 if (!nic->rb_pageref || !nic->rb_page)
24 return;
25
6d061f9f 26 page_ref_add(nic->rb_page, nic->rb_pageref);
5c2e26f6
SG
27 nic->rb_pageref = 0;
28}
29
4863dea3
SG
30/* Poll a register for a specific value */
31static int nicvf_poll_reg(struct nicvf *nic, int qidx,
32 u64 reg, int bit_pos, int bits, int val)
33{
34 u64 bit_mask;
35 u64 reg_val;
36 int timeout = 10;
37
38 bit_mask = (1ULL << bits) - 1;
39 bit_mask = (bit_mask << bit_pos);
40
41 while (timeout) {
42 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
43 if (((reg_val & bit_mask) >> bit_pos) == val)
44 return 0;
45 usleep_range(1000, 2000);
46 timeout--;
47 }
48 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
49 return 1;
50}
51
52/* Allocate memory for a queue's descriptors */
53static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
54 int q_len, int desc_size, int align_bytes)
55{
56 dmem->q_len = q_len;
57 dmem->size = (desc_size * q_len) + align_bytes;
58 /* Save address, need it while freeing */
59 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
60 &dmem->dma, GFP_KERNEL);
61 if (!dmem->unalign_base)
62 return -ENOMEM;
63
64 /* Align memory address for 'align_bytes' */
65 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
39a0dd0b 66 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
4863dea3
SG
67 return 0;
68}
69
70/* Free queue's descriptor memory */
71static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
72{
73 if (!dmem)
74 return;
75
76 dma_free_coherent(&nic->pdev->dev, dmem->size,
77 dmem->unalign_base, dmem->dma);
78 dmem->unalign_base = NULL;
79 dmem->base = NULL;
80}
81
82/* Allocate buffer for packet reception
83 * HW returns memory address where packet is DMA'ed but not a pointer
84 * into RBDR ring, so save buffer address at the start of fragment and
85 * align the start address to a cache aligned address
86 */
87static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
88 u32 buf_len, u64 **rbuf)
89{
6e4be8d6 90 int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0;
4863dea3
SG
91
92 /* Check if request can be accomodated in previous allocated page */
5c2e26f6
SG
93 if (nic->rb_page &&
94 ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) {
95 nic->rb_pageref++;
96 goto ret;
4863dea3
SG
97 }
98
5c2e26f6
SG
99 nicvf_get_page(nic);
100 nic->rb_page = NULL;
101
4863dea3
SG
102 /* Allocate a new page */
103 if (!nic->rb_page) {
f8ce9666
SG
104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
105 order);
4863dea3 106 if (!nic->rb_page) {
a05d4845 107 nic->drv_stats.rcv_buffer_alloc_failures++;
4863dea3
SG
108 return -ENOMEM;
109 }
110 nic->rb_page_offset = 0;
111 }
112
5c2e26f6 113ret:
668dda06 114 *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
5c2e26f6 115 nic->rb_page_offset += buf_len;
4863dea3 116
4863dea3
SG
117 return 0;
118}
119
668dda06 120/* Build skb around receive buffer */
4863dea3
SG
121static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
122 u64 rb_ptr, int len)
123{
668dda06 124 void *data;
4863dea3 125 struct sk_buff *skb;
4863dea3 126
668dda06 127 data = phys_to_virt(rb_ptr);
4863dea3
SG
128
129 /* Now build an skb to give to stack */
668dda06 130 skb = build_skb(data, RCV_FRAG_LEN);
4863dea3 131 if (!skb) {
668dda06 132 put_page(virt_to_page(data));
4863dea3
SG
133 return NULL;
134 }
135
668dda06 136 prefetch(skb->data);
4863dea3
SG
137 return skb;
138}
139
140/* Allocate RBDR ring and populate receive buffers */
141static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
142 int ring_len, int buf_size)
143{
144 int idx;
145 u64 *rbuf;
146 struct rbdr_entry_t *desc;
147 int err;
148
149 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
150 sizeof(struct rbdr_entry_t),
151 NICVF_RCV_BUF_ALIGN_BYTES);
152 if (err)
153 return err;
154
155 rbdr->desc = rbdr->dmem.base;
156 /* Buffer size has to be in multiples of 128 bytes */
157 rbdr->dma_size = buf_size;
158 rbdr->enable = true;
159 rbdr->thresh = RBDR_THRESH;
160
161 nic->rb_page = NULL;
162 for (idx = 0; idx < ring_len; idx++) {
163 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
164 &rbuf);
165 if (err)
166 return err;
167
168 desc = GET_RBDR_DESC(rbdr, idx);
169 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
170 }
5c2e26f6
SG
171
172 nicvf_get_page(nic);
173
4863dea3
SG
174 return 0;
175}
176
177/* Free RBDR ring and its receive buffers */
178static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
179{
180 int head, tail;
181 u64 buf_addr;
182 struct rbdr_entry_t *desc;
4863dea3
SG
183
184 if (!rbdr)
185 return;
186
187 rbdr->enable = false;
188 if (!rbdr->dmem.base)
189 return;
190
191 head = rbdr->head;
192 tail = rbdr->tail;
193
194 /* Free SKBs */
195 while (head != tail) {
196 desc = GET_RBDR_DESC(rbdr, head);
197 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
668dda06 198 put_page(virt_to_page(phys_to_virt(buf_addr)));
4863dea3
SG
199 head++;
200 head &= (rbdr->dmem.q_len - 1);
201 }
202 /* Free SKB of tail desc */
203 desc = GET_RBDR_DESC(rbdr, tail);
204 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
668dda06 205 put_page(virt_to_page(phys_to_virt(buf_addr)));
4863dea3
SG
206
207 /* Free RBDR ring */
208 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
209}
210
211/* Refill receive buffer descriptors with new buffers.
212 */
fd7ec062 213static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
4863dea3
SG
214{
215 struct queue_set *qs = nic->qs;
216 int rbdr_idx = qs->rbdr_cnt;
217 int tail, qcount;
218 int refill_rb_cnt;
219 struct rbdr *rbdr;
220 struct rbdr_entry_t *desc;
221 u64 *rbuf;
222 int new_rb = 0;
223
224refill:
225 if (!rbdr_idx)
226 return;
227 rbdr_idx--;
228 rbdr = &qs->rbdr[rbdr_idx];
229 /* Check if it's enabled */
230 if (!rbdr->enable)
231 goto next_rbdr;
232
233 /* Get no of desc's to be refilled */
234 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
235 qcount &= 0x7FFFF;
236 /* Doorbell can be ringed with a max of ring size minus 1 */
237 if (qcount >= (qs->rbdr_len - 1))
238 goto next_rbdr;
239 else
240 refill_rb_cnt = qs->rbdr_len - qcount - 1;
241
242 /* Start filling descs from tail */
243 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
244 while (refill_rb_cnt) {
245 tail++;
246 tail &= (rbdr->dmem.q_len - 1);
247
248 if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
249 break;
250
251 desc = GET_RBDR_DESC(rbdr, tail);
252 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
253 refill_rb_cnt--;
254 new_rb++;
255 }
256
5c2e26f6
SG
257 nicvf_get_page(nic);
258
4863dea3
SG
259 /* make sure all memory stores are done before ringing doorbell */
260 smp_wmb();
261
262 /* Check if buffer allocation failed */
263 if (refill_rb_cnt)
264 nic->rb_alloc_fail = true;
265 else
266 nic->rb_alloc_fail = false;
267
268 /* Notify HW */
269 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
270 rbdr_idx, new_rb);
271next_rbdr:
272 /* Re-enable RBDR interrupts only if buffer allocation is success */
273 if (!nic->rb_alloc_fail && rbdr->enable)
274 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
275
276 if (rbdr_idx)
277 goto refill;
278}
279
280/* Alloc rcv buffers in non-atomic mode for better success */
281void nicvf_rbdr_work(struct work_struct *work)
282{
283 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
284
285 nicvf_refill_rbdr(nic, GFP_KERNEL);
286 if (nic->rb_alloc_fail)
287 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
288 else
289 nic->rb_work_scheduled = false;
290}
291
292/* In Softirq context, alloc rcv buffers in atomic mode */
293void nicvf_rbdr_task(unsigned long data)
294{
295 struct nicvf *nic = (struct nicvf *)data;
296
297 nicvf_refill_rbdr(nic, GFP_ATOMIC);
298 if (nic->rb_alloc_fail) {
299 nic->rb_work_scheduled = true;
300 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
301 }
302}
303
304/* Initialize completion queue */
305static int nicvf_init_cmp_queue(struct nicvf *nic,
306 struct cmp_queue *cq, int q_len)
307{
308 int err;
309
310 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
311 NICVF_CQ_BASE_ALIGN_BYTES);
312 if (err)
313 return err;
314
315 cq->desc = cq->dmem.base;
b9687b48 316 cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
4863dea3
SG
317 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
318
319 return 0;
320}
321
322static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
323{
324 if (!cq)
325 return;
326 if (!cq->dmem.base)
327 return;
328
329 nicvf_free_q_desc_mem(nic, &cq->dmem);
330}
331
332/* Initialize transmit queue */
333static int nicvf_init_snd_queue(struct nicvf *nic,
334 struct snd_queue *sq, int q_len)
335{
336 int err;
337
338 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
339 NICVF_SQ_BASE_ALIGN_BYTES);
340 if (err)
341 return err;
342
343 sq->desc = sq->dmem.base;
86ace693 344 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
fa1a6c93
AM
345 if (!sq->skbuff)
346 return -ENOMEM;
4863dea3
SG
347 sq->head = 0;
348 sq->tail = 0;
349 atomic_set(&sq->free_cnt, q_len - 1);
350 sq->thresh = SND_QUEUE_THRESH;
351
352 /* Preallocate memory for TSO segment's header */
353 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
354 q_len * TSO_HEADER_SIZE,
355 &sq->tso_hdrs_phys, GFP_KERNEL);
356 if (!sq->tso_hdrs)
357 return -ENOMEM;
358
359 return 0;
360}
361
362static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
363{
364 if (!sq)
365 return;
366 if (!sq->dmem.base)
367 return;
368
369 if (sq->tso_hdrs)
143ceb0b
SG
370 dma_free_coherent(&nic->pdev->dev,
371 sq->dmem.q_len * TSO_HEADER_SIZE,
4863dea3
SG
372 sq->tso_hdrs, sq->tso_hdrs_phys);
373
374 kfree(sq->skbuff);
375 nicvf_free_q_desc_mem(nic, &sq->dmem);
376}
377
378static void nicvf_reclaim_snd_queue(struct nicvf *nic,
379 struct queue_set *qs, int qidx)
380{
381 /* Disable send queue */
382 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
383 /* Check if SQ is stopped */
384 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
385 return;
386 /* Reset send queue */
387 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
388}
389
390static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
391 struct queue_set *qs, int qidx)
392{
393 union nic_mbx mbx = {};
394
395 /* Make sure all packets in the pipeline are written back into mem */
396 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
397 nicvf_send_msg_to_pf(nic, &mbx);
398}
399
400static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
401 struct queue_set *qs, int qidx)
402{
403 /* Disable timer threshold (doesn't get reset upon CQ reset */
404 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
405 /* Disable completion queue */
406 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
407 /* Reset completion queue */
408 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
409}
410
411static void nicvf_reclaim_rbdr(struct nicvf *nic,
412 struct rbdr *rbdr, int qidx)
413{
414 u64 tmp, fifo_state;
415 int timeout = 10;
416
417 /* Save head and tail pointers for feeing up buffers */
418 rbdr->head = nicvf_queue_reg_read(nic,
419 NIC_QSET_RBDR_0_1_HEAD,
420 qidx) >> 3;
421 rbdr->tail = nicvf_queue_reg_read(nic,
422 NIC_QSET_RBDR_0_1_TAIL,
423 qidx) >> 3;
424
425 /* If RBDR FIFO is in 'FAIL' state then do a reset first
426 * before relaiming.
427 */
428 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
429 if (((fifo_state >> 62) & 0x03) == 0x3)
430 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
431 qidx, NICVF_RBDR_RESET);
432
433 /* Disable RBDR */
434 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
435 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
436 return;
437 while (1) {
438 tmp = nicvf_queue_reg_read(nic,
439 NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
440 qidx);
441 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
442 break;
443 usleep_range(1000, 2000);
444 timeout--;
445 if (!timeout) {
446 netdev_err(nic->netdev,
447 "Failed polling on prefetch status\n");
448 return;
449 }
450 }
451 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
452 qidx, NICVF_RBDR_RESET);
453
454 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
455 return;
456 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
457 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
458 return;
459}
460
aa2e259b
SG
461void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
462{
463 u64 rq_cfg;
464 int sqs;
465
466 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
467
468 /* Enable first VLAN stripping */
469 if (features & NETIF_F_HW_VLAN_CTAG_RX)
470 rq_cfg |= (1ULL << 25);
471 else
472 rq_cfg &= ~(1ULL << 25);
473 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
474
475 /* Configure Secondary Qsets, if any */
476 for (sqs = 0; sqs < nic->sqs_count; sqs++)
477 if (nic->snicvf[sqs])
478 nicvf_queue_reg_write(nic->snicvf[sqs],
479 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
480}
481
3458c40d
JJ
482static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
483{
484 union nic_mbx mbx = {};
485
486 /* Reset all RXQ's stats */
487 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
488 mbx.reset_stat.rq_stat_mask = 0xFFFF;
489 nicvf_send_msg_to_pf(nic, &mbx);
490}
491
4863dea3
SG
492/* Configures receive queue */
493static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
494 int qidx, bool enable)
495{
496 union nic_mbx mbx = {};
497 struct rcv_queue *rq;
498 struct rq_cfg rq_cfg;
499
500 rq = &qs->rq[qidx];
501 rq->enable = enable;
502
503 /* Disable receive queue */
504 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
505
506 if (!rq->enable) {
507 nicvf_reclaim_rcv_queue(nic, qs, qidx);
508 return;
509 }
510
511 rq->cq_qs = qs->vnic_id;
512 rq->cq_idx = qidx;
513 rq->start_rbdr_qs = qs->vnic_id;
514 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
515 rq->cont_rbdr_qs = qs->vnic_id;
516 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
517 /* all writes of RBDR data to be loaded into L2 Cache as well*/
518 rq->caching = 1;
519
520 /* Send a mailbox msg to PF to config RQ */
521 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
522 mbx.rq.qs_num = qs->vnic_id;
523 mbx.rq.rq_num = qidx;
524 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
525 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
526 (rq->cont_qs_rbdr_idx << 8) |
527 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
528 nicvf_send_msg_to_pf(nic, &mbx);
529
530 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
531 mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
532 nicvf_send_msg_to_pf(nic, &mbx);
533
534 /* RQ drop config
535 * Enable CQ drop to reserve sufficient CQEs for all tx packets
536 */
537 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
538 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
539 nicvf_send_msg_to_pf(nic, &mbx);
540
aa2e259b
SG
541 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
542 if (!nic->sqs_mode)
543 nicvf_config_vlan_stripping(nic, nic->netdev->features);
4863dea3
SG
544
545 /* Enable Receive queue */
161de2ca 546 memset(&rq_cfg, 0, sizeof(struct rq_cfg));
4863dea3
SG
547 rq_cfg.ena = 1;
548 rq_cfg.tcp_ena = 0;
549 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
550}
551
552/* Configures completion queue */
553void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
554 int qidx, bool enable)
555{
556 struct cmp_queue *cq;
557 struct cq_cfg cq_cfg;
558
559 cq = &qs->cq[qidx];
560 cq->enable = enable;
561
562 if (!cq->enable) {
563 nicvf_reclaim_cmp_queue(nic, qs, qidx);
564 return;
565 }
566
567 /* Reset completion queue */
568 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
569
570 if (!cq->enable)
571 return;
572
573 spin_lock_init(&cq->lock);
574 /* Set completion queue base address */
575 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
576 qidx, (u64)(cq->dmem.phys_base));
577
578 /* Enable Completion queue */
161de2ca 579 memset(&cq_cfg, 0, sizeof(struct cq_cfg));
4863dea3
SG
580 cq_cfg.ena = 1;
581 cq_cfg.reset = 0;
582 cq_cfg.caching = 0;
583 cq_cfg.qsize = CMP_QSIZE;
584 cq_cfg.avg_con = 0;
585 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
586
587 /* Set threshold value for interrupt generation */
588 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
589 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
006394a7 590 qidx, CMP_QUEUE_TIMER_THRESH);
4863dea3
SG
591}
592
593/* Configures transmit queue */
594static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
595 int qidx, bool enable)
596{
597 union nic_mbx mbx = {};
598 struct snd_queue *sq;
599 struct sq_cfg sq_cfg;
600
601 sq = &qs->sq[qidx];
602 sq->enable = enable;
603
604 if (!sq->enable) {
605 nicvf_reclaim_snd_queue(nic, qs, qidx);
606 return;
607 }
608
609 /* Reset send queue */
610 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
611
612 sq->cq_qs = qs->vnic_id;
613 sq->cq_idx = qidx;
614
615 /* Send a mailbox msg to PF to config SQ */
616 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
617 mbx.sq.qs_num = qs->vnic_id;
618 mbx.sq.sq_num = qidx;
92dc8769 619 mbx.sq.sqs_mode = nic->sqs_mode;
4863dea3
SG
620 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
621 nicvf_send_msg_to_pf(nic, &mbx);
622
623 /* Set queue base address */
624 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
625 qidx, (u64)(sq->dmem.phys_base));
626
627 /* Enable send queue & set queue size */
161de2ca 628 memset(&sq_cfg, 0, sizeof(struct sq_cfg));
4863dea3
SG
629 sq_cfg.ena = 1;
630 sq_cfg.reset = 0;
631 sq_cfg.ldwb = 0;
632 sq_cfg.qsize = SND_QSIZE;
633 sq_cfg.tstmp_bgx_intf = 0;
634 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
635
636 /* Set threshold value for interrupt generation */
637 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
638
639 /* Set queue:cpu affinity for better load distribution */
640 if (cpu_online(qidx)) {
641 cpumask_set_cpu(qidx, &sq->affinity_mask);
642 netif_set_xps_queue(nic->netdev,
643 &sq->affinity_mask, qidx);
644 }
645}
646
647/* Configures receive buffer descriptor ring */
648static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
649 int qidx, bool enable)
650{
651 struct rbdr *rbdr;
652 struct rbdr_cfg rbdr_cfg;
653
654 rbdr = &qs->rbdr[qidx];
655 nicvf_reclaim_rbdr(nic, rbdr, qidx);
656 if (!enable)
657 return;
658
659 /* Set descriptor base address */
660 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
661 qidx, (u64)(rbdr->dmem.phys_base));
662
663 /* Enable RBDR & set queue size */
664 /* Buffer size should be in multiples of 128 bytes */
161de2ca 665 memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
4863dea3
SG
666 rbdr_cfg.ena = 1;
667 rbdr_cfg.reset = 0;
668 rbdr_cfg.ldwb = 0;
669 rbdr_cfg.qsize = RBDR_SIZE;
670 rbdr_cfg.avg_con = 0;
671 rbdr_cfg.lines = rbdr->dma_size / 128;
672 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
673 qidx, *(u64 *)&rbdr_cfg);
674
675 /* Notify HW */
676 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
677 qidx, qs->rbdr_len - 1);
678
679 /* Set threshold value for interrupt generation */
680 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
681 qidx, rbdr->thresh - 1);
682}
683
684/* Requests PF to assign and enable Qset */
685void nicvf_qset_config(struct nicvf *nic, bool enable)
686{
687 union nic_mbx mbx = {};
688 struct queue_set *qs = nic->qs;
689 struct qs_cfg *qs_cfg;
690
691 if (!qs) {
692 netdev_warn(nic->netdev,
693 "Qset is still not allocated, don't init queues\n");
694 return;
695 }
696
697 qs->enable = enable;
698 qs->vnic_id = nic->vf_id;
699
700 /* Send a mailbox msg to PF to config Qset */
701 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
702 mbx.qs.num = qs->vnic_id;
92dc8769 703 mbx.qs.sqs_count = nic->sqs_count;
4863dea3
SG
704
705 mbx.qs.cfg = 0;
706 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
707 if (qs->enable) {
708 qs_cfg->ena = 1;
709#ifdef __BIG_ENDIAN
710 qs_cfg->be = 1;
711#endif
712 qs_cfg->vnic = qs->vnic_id;
713 }
714 nicvf_send_msg_to_pf(nic, &mbx);
715}
716
717static void nicvf_free_resources(struct nicvf *nic)
718{
719 int qidx;
720 struct queue_set *qs = nic->qs;
721
722 /* Free receive buffer descriptor ring */
723 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
724 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
725
726 /* Free completion queue */
727 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
728 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
729
730 /* Free send queue */
731 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
732 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
733}
734
735static int nicvf_alloc_resources(struct nicvf *nic)
736{
737 int qidx;
738 struct queue_set *qs = nic->qs;
739
740 /* Alloc receive buffer descriptor ring */
741 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
742 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
743 DMA_BUFFER_LEN))
744 goto alloc_fail;
745 }
746
747 /* Alloc send queue */
748 for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
749 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
750 goto alloc_fail;
751 }
752
753 /* Alloc completion queue */
754 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
755 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
756 goto alloc_fail;
757 }
758
759 return 0;
760alloc_fail:
761 nicvf_free_resources(nic);
762 return -ENOMEM;
763}
764
765int nicvf_set_qset_resources(struct nicvf *nic)
766{
767 struct queue_set *qs;
768
769 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
770 if (!qs)
771 return -ENOMEM;
772 nic->qs = qs;
773
774 /* Set count of each queue */
3a397ebe
SG
775 qs->rbdr_cnt = DEFAULT_RBDR_CNT;
776 qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
777 qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
778 qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
4863dea3
SG
779
780 /* Set queue lengths */
781 qs->rbdr_len = RCV_BUF_COUNT;
782 qs->sq_len = SND_QUEUE_LEN;
783 qs->cq_len = CMP_QUEUE_LEN;
92dc8769
SG
784
785 nic->rx_queues = qs->rq_cnt;
786 nic->tx_queues = qs->sq_cnt;
787
4863dea3
SG
788 return 0;
789}
790
791int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
792{
793 bool disable = false;
794 struct queue_set *qs = nic->qs;
795 int qidx;
796
797 if (!qs)
798 return 0;
799
800 if (enable) {
801 if (nicvf_alloc_resources(nic))
802 return -ENOMEM;
803
804 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
805 nicvf_snd_queue_config(nic, qs, qidx, enable);
806 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
807 nicvf_cmp_queue_config(nic, qs, qidx, enable);
808 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
809 nicvf_rbdr_config(nic, qs, qidx, enable);
810 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
811 nicvf_rcv_queue_config(nic, qs, qidx, enable);
812 } else {
813 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
814 nicvf_rcv_queue_config(nic, qs, qidx, disable);
815 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
816 nicvf_rbdr_config(nic, qs, qidx, disable);
817 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
818 nicvf_snd_queue_config(nic, qs, qidx, disable);
819 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
820 nicvf_cmp_queue_config(nic, qs, qidx, disable);
821
822 nicvf_free_resources(nic);
823 }
824
3458c40d
JJ
825 /* Reset RXQ's stats.
826 * SQ's stats will get reset automatically once SQ is reset.
827 */
828 nicvf_reset_rcv_queue_stats(nic);
829
4863dea3
SG
830 return 0;
831}
832
833/* Get a free desc from SQ
834 * returns descriptor ponter & descriptor number
835 */
836static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
837{
838 int qentry;
839
840 qentry = sq->tail;
841 atomic_sub(desc_cnt, &sq->free_cnt);
842 sq->tail += desc_cnt;
843 sq->tail &= (sq->dmem.q_len - 1);
844
845 return qentry;
846}
847
848/* Free descriptor back to SQ for future use */
849void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
850{
851 atomic_add(desc_cnt, &sq->free_cnt);
852 sq->head += desc_cnt;
853 sq->head &= (sq->dmem.q_len - 1);
854}
855
856static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
857{
858 qentry++;
859 qentry &= (sq->dmem.q_len - 1);
860 return qentry;
861}
862
863void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
864{
865 u64 sq_cfg;
866
867 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
868 sq_cfg |= NICVF_SQ_EN;
869 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
870 /* Ring doorbell so that H/W restarts processing SQEs */
871 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
872}
873
874void nicvf_sq_disable(struct nicvf *nic, int qidx)
875{
876 u64 sq_cfg;
877
878 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
879 sq_cfg &= ~NICVF_SQ_EN;
880 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
881}
882
883void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
884 int qidx)
885{
886 u64 head, tail;
887 struct sk_buff *skb;
888 struct nicvf *nic = netdev_priv(netdev);
889 struct sq_hdr_subdesc *hdr;
890
891 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
892 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
893 while (sq->head != head) {
894 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
895 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
896 nicvf_put_sq_desc(sq, 1);
897 continue;
898 }
899 skb = (struct sk_buff *)sq->skbuff[sq->head];
143ceb0b
SG
900 if (skb)
901 dev_kfree_skb_any(skb);
4863dea3
SG
902 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
903 atomic64_add(hdr->tot_len,
904 (atomic64_t *)&netdev->stats.tx_bytes);
4863dea3
SG
905 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
906 }
907}
908
909/* Calculate no of SQ subdescriptors needed to transmit all
910 * segments of this TSO packet.
911 * Taken from 'Tilera network driver' with a minor modification.
912 */
913static int nicvf_tso_count_subdescs(struct sk_buff *skb)
914{
915 struct skb_shared_info *sh = skb_shinfo(skb);
916 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
917 unsigned int data_len = skb->len - sh_len;
918 unsigned int p_len = sh->gso_size;
919 long f_id = -1; /* id of the current fragment */
920 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
921 long f_used = 0; /* bytes used from the current fragment */
922 long n; /* size of the current piece of payload */
923 int num_edescs = 0;
924 int segment;
925
926 for (segment = 0; segment < sh->gso_segs; segment++) {
927 unsigned int p_used = 0;
928
929 /* One edesc for header and for each piece of the payload. */
930 for (num_edescs++; p_used < p_len; num_edescs++) {
931 /* Advance as needed. */
932 while (f_used >= f_size) {
933 f_id++;
934 f_size = skb_frag_size(&sh->frags[f_id]);
935 f_used = 0;
936 }
937
938 /* Use bytes from the current fragment. */
939 n = p_len - p_used;
940 if (n > f_size - f_used)
941 n = f_size - f_used;
942 f_used += n;
943 p_used += n;
944 }
945
946 /* The last segment may be less than gso_size. */
947 data_len -= p_len;
948 if (data_len < p_len)
949 p_len = data_len;
950 }
951
952 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
953 return num_edescs + sh->gso_segs;
954}
955
7ceb8a13
SG
956#define POST_CQE_DESC_COUNT 2
957
4863dea3
SG
958/* Get the number of SQ descriptors needed to xmit this skb */
959static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
960{
961 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
962
40fb5f8a 963 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
4863dea3
SG
964 subdesc_cnt = nicvf_tso_count_subdescs(skb);
965 return subdesc_cnt;
966 }
967
7ceb8a13
SG
968 /* Dummy descriptors to get TSO pkt completion notification */
969 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
970 subdesc_cnt += POST_CQE_DESC_COUNT;
971
4863dea3
SG
972 if (skb_shinfo(skb)->nr_frags)
973 subdesc_cnt += skb_shinfo(skb)->nr_frags;
974
975 return subdesc_cnt;
976}
977
978/* Add SQ HEADER subdescriptor.
979 * First subdescriptor for every send descriptor.
980 */
981static inline void
40fb5f8a 982nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
4863dea3
SG
983 int subdesc_cnt, struct sk_buff *skb, int len)
984{
985 int proto;
986 struct sq_hdr_subdesc *hdr;
987
988 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
4863dea3
SG
989 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
990 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
7ceb8a13
SG
991
992 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
993 /* post_cqe = 0, to avoid HW posting a CQE for every TSO
994 * segment transmitted on 88xx.
995 */
996 hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
997 } else {
998 sq->skbuff[qentry] = (u64)skb;
999 /* Enable notification via CQE after processing SQE */
1000 hdr->post_cqe = 1;
1001 /* No of subdescriptors following this */
1002 hdr->subdesc_cnt = subdesc_cnt;
1003 }
4863dea3
SG
1004 hdr->tot_len = len;
1005
1006 /* Offload checksum calculation to HW */
1007 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4863dea3
SG
1008 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1009 hdr->l3_offset = skb_network_offset(skb);
1010 hdr->l4_offset = skb_transport_offset(skb);
1011
1012 proto = ip_hdr(skb)->protocol;
1013 switch (proto) {
1014 case IPPROTO_TCP:
1015 hdr->csum_l4 = SEND_L4_CSUM_TCP;
1016 break;
1017 case IPPROTO_UDP:
1018 hdr->csum_l4 = SEND_L4_CSUM_UDP;
1019 break;
1020 case IPPROTO_SCTP:
1021 hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1022 break;
1023 }
1024 }
40fb5f8a
SG
1025
1026 if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
1027 hdr->tso = 1;
1028 hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
1029 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
1030 /* For non-tunneled pkts, point this to L2 ethertype */
1031 hdr->inner_l3_offset = skb_network_offset(skb) - 2;
1032 nic->drv_stats.tx_tso++;
1033 }
4863dea3
SG
1034}
1035
1036/* SQ GATHER subdescriptor
1037 * Must follow HDR descriptor
1038 */
1039static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1040 int size, u64 data)
1041{
1042 struct sq_gather_subdesc *gather;
1043
1044 qentry &= (sq->dmem.q_len - 1);
1045 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1046
1047 memset(gather, 0, SND_QUEUE_DESC_SIZE);
1048 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
4b561c17 1049 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
4863dea3
SG
1050 gather->size = size;
1051 gather->addr = data;
1052}
1053
7ceb8a13
SG
1054/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
1055 * packet so that a CQE is posted as a notifation for transmission of
1056 * TSO packet.
1057 */
1058static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
1059 int tso_sqe, struct sk_buff *skb)
1060{
1061 struct sq_imm_subdesc *imm;
1062 struct sq_hdr_subdesc *hdr;
1063
1064 sq->skbuff[qentry] = (u64)skb;
1065
1066 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1067 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1068 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1069 /* Enable notification via CQE after processing SQE */
1070 hdr->post_cqe = 1;
1071 /* There is no packet to transmit here */
1072 hdr->dont_send = 1;
1073 hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
1074 hdr->tot_len = 1;
1075 /* Actual TSO header SQE index, needed for cleanup */
1076 hdr->rsvd2 = tso_sqe;
1077
1078 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1079 imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
1080 memset(imm, 0, SND_QUEUE_DESC_SIZE);
1081 imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
1082 imm->len = 1;
1083}
1084
4863dea3
SG
1085/* Segment a TSO packet into 'gso_size' segments and append
1086 * them to SQ for transfer
1087 */
1088static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
92dc8769 1089 int sq_num, int qentry, struct sk_buff *skb)
4863dea3
SG
1090{
1091 struct tso_t tso;
1092 int seg_subdescs = 0, desc_cnt = 0;
1093 int seg_len, total_len, data_left;
1094 int hdr_qentry = qentry;
1095 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1096
1097 tso_start(skb, &tso);
1098 total_len = skb->len - hdr_len;
1099 while (total_len > 0) {
1100 char *hdr;
1101
1102 /* Save Qentry for adding HDR_SUBDESC at the end */
1103 hdr_qentry = qentry;
1104
1105 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1106 total_len -= data_left;
1107
1108 /* Add segment's header */
1109 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1110 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
1111 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1112 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
1113 sq->tso_hdrs_phys +
1114 qentry * TSO_HEADER_SIZE);
1115 /* HDR_SUDESC + GATHER */
1116 seg_subdescs = 2;
1117 seg_len = hdr_len;
1118
1119 /* Add segment's payload fragments */
1120 while (data_left > 0) {
1121 int size;
1122
1123 size = min_t(int, tso.size, data_left);
1124
1125 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1126 nicvf_sq_add_gather_subdesc(sq, qentry, size,
1127 virt_to_phys(tso.data));
1128 seg_subdescs++;
1129 seg_len += size;
1130
1131 data_left -= size;
1132 tso_build_data(skb, &tso, size);
1133 }
40fb5f8a 1134 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
4863dea3 1135 seg_subdescs - 1, skb, seg_len);
143ceb0b 1136 sq->skbuff[hdr_qentry] = (u64)NULL;
4863dea3
SG
1137 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1138
1139 desc_cnt += seg_subdescs;
1140 }
1141 /* Save SKB in the last segment for freeing */
1142 sq->skbuff[hdr_qentry] = (u64)skb;
1143
1144 /* make sure all memory stores are done before ringing doorbell */
1145 smp_wmb();
1146
1147 /* Inform HW to xmit all TSO segments */
1148 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
92dc8769 1149 sq_num, desc_cnt);
2cb468e0 1150 nic->drv_stats.tx_tso++;
4863dea3
SG
1151 return 1;
1152}
1153
1154/* Append an skb to a SQ for packet transfer. */
1155int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
1156{
1157 int i, size;
7ceb8a13 1158 int subdesc_cnt, tso_sqe = 0;
4863dea3 1159 int sq_num, qentry;
92dc8769 1160 struct queue_set *qs;
4863dea3
SG
1161 struct snd_queue *sq;
1162
1163 sq_num = skb_get_queue_mapping(skb);
92dc8769
SG
1164 if (sq_num >= MAX_SND_QUEUES_PER_QS) {
1165 /* Get secondary Qset's SQ structure */
1166 i = sq_num / MAX_SND_QUEUES_PER_QS;
1167 if (!nic->snicvf[i - 1]) {
1168 netdev_warn(nic->netdev,
1169 "Secondary Qset#%d's ptr not initialized\n",
1170 i - 1);
1171 return 1;
1172 }
1173 nic = (struct nicvf *)nic->snicvf[i - 1];
1174 sq_num = sq_num % MAX_SND_QUEUES_PER_QS;
1175 }
1176
1177 qs = nic->qs;
4863dea3
SG
1178 sq = &qs->sq[sq_num];
1179
1180 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1181 if (subdesc_cnt > atomic_read(&sq->free_cnt))
1182 goto append_fail;
1183
1184 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1185
1186 /* Check if its a TSO packet */
40fb5f8a 1187 if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
92dc8769 1188 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
4863dea3
SG
1189
1190 /* Add SQ header subdesc */
40fb5f8a
SG
1191 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
1192 skb, skb->len);
7ceb8a13 1193 tso_sqe = qentry;
4863dea3
SG
1194
1195 /* Add SQ gather subdescs */
1196 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1197 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1198 nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
1199
1200 /* Check for scattered buffer */
1201 if (!skb_is_nonlinear(skb))
1202 goto doorbell;
1203
1204 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1205 const struct skb_frag_struct *frag;
1206
1207 frag = &skb_shinfo(skb)->frags[i];
1208
1209 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1210 size = skb_frag_size(frag);
1211 nicvf_sq_add_gather_subdesc(sq, qentry, size,
1212 virt_to_phys(
1213 skb_frag_address(frag)));
1214 }
1215
1216doorbell:
7ceb8a13
SG
1217 if (nic->t88 && skb_shinfo(skb)->gso_size) {
1218 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1219 nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
1220 }
1221
4863dea3
SG
1222 /* make sure all memory stores are done before ringing doorbell */
1223 smp_wmb();
1224
1225 /* Inform HW to xmit new packet */
1226 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1227 sq_num, subdesc_cnt);
1228 return 1;
1229
1230append_fail:
92dc8769
SG
1231 /* Use original PCI dev for debug log */
1232 nic = nic->pnicvf;
4863dea3
SG
1233 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
1234 return 0;
1235}
1236
1237static inline unsigned frag_num(unsigned i)
1238{
1239#ifdef __BIG_ENDIAN
1240 return (i & ~3) + 3 - (i & 3);
1241#else
1242 return i;
1243#endif
1244}
1245
1246/* Returns SKB for a received packet */
1247struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1248{
1249 int frag;
1250 int payload_len = 0;
1251 struct sk_buff *skb = NULL;
a8671acc
SG
1252 struct page *page;
1253 int offset;
4863dea3
SG
1254 u16 *rb_lens = NULL;
1255 u64 *rb_ptrs = NULL;
1256
1257 rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
02a72bd8
SG
1258 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
1259 * CQE_RX at word6, hence buffer pointers move by word
1260 *
1261 * Use existing 'hw_tso' flag which will be set for all chips
1262 * except 88xx pass1 instead of a additional cache line
1263 * access (or miss) by using pci dev's revision.
1264 */
1265 if (!nic->hw_tso)
1266 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
1267 else
1268 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
4863dea3
SG
1269
1270 netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
1271 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1272
1273 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1274 payload_len = rb_lens[frag_num(frag)];
1275 if (!frag) {
1276 /* First fragment */
1277 skb = nicvf_rb_ptr_to_skb(nic,
1278 *rb_ptrs - cqe_rx->align_pad,
1279 payload_len);
1280 if (!skb)
1281 return NULL;
1282 skb_reserve(skb, cqe_rx->align_pad);
1283 skb_put(skb, payload_len);
1284 } else {
1285 /* Add fragments */
a8671acc
SG
1286 page = virt_to_page(phys_to_virt(*rb_ptrs));
1287 offset = phys_to_virt(*rb_ptrs) - page_address(page);
1288 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1289 offset, payload_len, RCV_FRAG_LEN);
4863dea3
SG
1290 }
1291 /* Next buffer pointer */
1292 rb_ptrs++;
1293 }
1294 return skb;
1295}
1296
b45ceb40 1297static u64 nicvf_int_type_to_mask(int int_type, int q_idx)
4863dea3
SG
1298{
1299 u64 reg_val;
1300
4863dea3
SG
1301 switch (int_type) {
1302 case NICVF_INTR_CQ:
b45ceb40 1303 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
4863dea3
SG
1304 break;
1305 case NICVF_INTR_SQ:
b45ceb40 1306 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
4863dea3
SG
1307 break;
1308 case NICVF_INTR_RBDR:
b45ceb40 1309 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
4863dea3
SG
1310 break;
1311 case NICVF_INTR_PKT_DROP:
b45ceb40 1312 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
4863dea3
SG
1313 break;
1314 case NICVF_INTR_TCP_TIMER:
b45ceb40 1315 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
4863dea3
SG
1316 break;
1317 case NICVF_INTR_MBOX:
b45ceb40 1318 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
4863dea3
SG
1319 break;
1320 case NICVF_INTR_QS_ERR:
b45ceb40 1321 reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT);
4863dea3
SG
1322 break;
1323 default:
b45ceb40 1324 reg_val = 0;
4863dea3
SG
1325 }
1326
b45ceb40
YN
1327 return reg_val;
1328}
1329
1330/* Enable interrupt */
1331void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1332{
1333 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1334
1335 if (!mask) {
1336 netdev_dbg(nic->netdev,
1337 "Failed to enable interrupt: unknown type\n");
1338 return;
1339 }
1340 nicvf_reg_write(nic, NIC_VF_ENA_W1S,
1341 nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
4863dea3
SG
1342}
1343
1344/* Disable interrupt */
1345void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1346{
b45ceb40 1347 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
4863dea3 1348
b45ceb40
YN
1349 if (!mask) {
1350 netdev_dbg(nic->netdev,
4863dea3 1351 "Failed to disable interrupt: unknown type\n");
b45ceb40 1352 return;
4863dea3
SG
1353 }
1354
b45ceb40 1355 nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
4863dea3
SG
1356}
1357
1358/* Clear interrupt */
1359void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1360{
b45ceb40 1361 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
4863dea3 1362
b45ceb40
YN
1363 if (!mask) {
1364 netdev_dbg(nic->netdev,
4863dea3 1365 "Failed to clear interrupt: unknown type\n");
b45ceb40 1366 return;
4863dea3
SG
1367 }
1368
b45ceb40 1369 nicvf_reg_write(nic, NIC_VF_INT, mask);
4863dea3
SG
1370}
1371
1372/* Check if interrupt is enabled */
1373int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1374{
b45ceb40
YN
1375 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1376 /* If interrupt type is unknown, we treat it disabled. */
1377 if (!mask) {
1378 netdev_dbg(nic->netdev,
4863dea3 1379 "Failed to check interrupt enable: unknown type\n");
b45ceb40 1380 return 0;
4863dea3
SG
1381 }
1382
b45ceb40 1383 return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S);
4863dea3
SG
1384}
1385
1386void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1387{
1388 struct rcv_queue *rq;
1389
1390#define GET_RQ_STATS(reg) \
1391 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1392 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1393
1394 rq = &nic->qs->rq[rq_idx];
1395 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
1396 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
1397}
1398
1399void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1400{
1401 struct snd_queue *sq;
1402
1403#define GET_SQ_STATS(reg) \
1404 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1405 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1406
1407 sq = &nic->qs->sq[sq_idx];
1408 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
1409 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
1410}
1411
1412/* Check for errors in the receive cmp.queue entry */
ad2ecebd 1413int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
4863dea3 1414{
a2dc5ded 1415 struct nicvf_hw_stats *stats = &nic->hw_stats;
4863dea3 1416
ad2ecebd 1417 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
4863dea3 1418 return 0;
4863dea3
SG
1419
1420 if (netif_msg_rx_err(nic))
1421 netdev_err(nic->netdev,
1422 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1423 nic->netdev->name,
1424 cqe_rx->err_level, cqe_rx->err_opcode);
1425
4863dea3
SG
1426 switch (cqe_rx->err_opcode) {
1427 case CQ_RX_ERROP_RE_PARTIAL:
a2dc5ded 1428 stats->rx_bgx_truncated_pkts++;
4863dea3
SG
1429 break;
1430 case CQ_RX_ERROP_RE_JABBER:
a2dc5ded 1431 stats->rx_jabber_errs++;
4863dea3
SG
1432 break;
1433 case CQ_RX_ERROP_RE_FCS:
a2dc5ded 1434 stats->rx_fcs_errs++;
4863dea3
SG
1435 break;
1436 case CQ_RX_ERROP_RE_RX_CTL:
a2dc5ded 1437 stats->rx_bgx_errs++;
4863dea3
SG
1438 break;
1439 case CQ_RX_ERROP_PREL2_ERR:
a2dc5ded 1440 stats->rx_prel2_errs++;
4863dea3
SG
1441 break;
1442 case CQ_RX_ERROP_L2_MAL:
a2dc5ded 1443 stats->rx_l2_hdr_malformed++;
4863dea3
SG
1444 break;
1445 case CQ_RX_ERROP_L2_OVERSIZE:
a2dc5ded 1446 stats->rx_oversize++;
4863dea3
SG
1447 break;
1448 case CQ_RX_ERROP_L2_UNDERSIZE:
a2dc5ded 1449 stats->rx_undersize++;
4863dea3
SG
1450 break;
1451 case CQ_RX_ERROP_L2_LENMISM:
a2dc5ded 1452 stats->rx_l2_len_mismatch++;
4863dea3
SG
1453 break;
1454 case CQ_RX_ERROP_L2_PCLP:
a2dc5ded 1455 stats->rx_l2_pclp++;
4863dea3
SG
1456 break;
1457 case CQ_RX_ERROP_IP_NOT:
a2dc5ded 1458 stats->rx_ip_ver_errs++;
4863dea3
SG
1459 break;
1460 case CQ_RX_ERROP_IP_CSUM_ERR:
a2dc5ded 1461 stats->rx_ip_csum_errs++;
4863dea3
SG
1462 break;
1463 case CQ_RX_ERROP_IP_MAL:
a2dc5ded 1464 stats->rx_ip_hdr_malformed++;
4863dea3
SG
1465 break;
1466 case CQ_RX_ERROP_IP_MALD:
a2dc5ded 1467 stats->rx_ip_payload_malformed++;
4863dea3
SG
1468 break;
1469 case CQ_RX_ERROP_IP_HOP:
a2dc5ded 1470 stats->rx_ip_ttl_errs++;
4863dea3
SG
1471 break;
1472 case CQ_RX_ERROP_L3_PCLP:
a2dc5ded 1473 stats->rx_l3_pclp++;
4863dea3
SG
1474 break;
1475 case CQ_RX_ERROP_L4_MAL:
a2dc5ded 1476 stats->rx_l4_malformed++;
4863dea3
SG
1477 break;
1478 case CQ_RX_ERROP_L4_CHK:
a2dc5ded 1479 stats->rx_l4_csum_errs++;
4863dea3
SG
1480 break;
1481 case CQ_RX_ERROP_UDP_LEN:
a2dc5ded 1482 stats->rx_udp_len_errs++;
4863dea3
SG
1483 break;
1484 case CQ_RX_ERROP_L4_PORT:
a2dc5ded 1485 stats->rx_l4_port_errs++;
4863dea3
SG
1486 break;
1487 case CQ_RX_ERROP_TCP_FLAG:
a2dc5ded 1488 stats->rx_tcp_flag_errs++;
4863dea3
SG
1489 break;
1490 case CQ_RX_ERROP_TCP_OFFSET:
a2dc5ded 1491 stats->rx_tcp_offset_errs++;
4863dea3
SG
1492 break;
1493 case CQ_RX_ERROP_L4_PCLP:
a2dc5ded 1494 stats->rx_l4_pclp++;
4863dea3
SG
1495 break;
1496 case CQ_RX_ERROP_RBDR_TRUNC:
a2dc5ded 1497 stats->rx_truncated_pkts++;
4863dea3
SG
1498 break;
1499 }
1500
1501 return 1;
1502}
1503
1504/* Check for errors in the send cmp.queue entry */
1505int nicvf_check_cqe_tx_errs(struct nicvf *nic,
1506 struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
1507{
1508 struct cmp_queue_stats *stats = &cq->stats;
1509
1510 switch (cqe_tx->send_status) {
1511 case CQ_TX_ERROP_GOOD:
1512 stats->tx.good++;
1513 return 0;
1514 case CQ_TX_ERROP_DESC_FAULT:
1515 stats->tx.desc_fault++;
1516 break;
1517 case CQ_TX_ERROP_HDR_CONS_ERR:
1518 stats->tx.hdr_cons_err++;
1519 break;
1520 case CQ_TX_ERROP_SUBDC_ERR:
1521 stats->tx.subdesc_err++;
1522 break;
1523 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1524 stats->tx.imm_size_oflow++;
1525 break;
1526 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1527 stats->tx.data_seq_err++;
1528 break;
1529 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1530 stats->tx.mem_seq_err++;
1531 break;
1532 case CQ_TX_ERROP_LOCK_VIOL:
1533 stats->tx.lock_viol++;
1534 break;
1535 case CQ_TX_ERROP_DATA_FAULT:
1536 stats->tx.data_fault++;
1537 break;
1538 case CQ_TX_ERROP_TSTMP_CONFLICT:
1539 stats->tx.tstmp_conflict++;
1540 break;
1541 case CQ_TX_ERROP_TSTMP_TIMEOUT:
1542 stats->tx.tstmp_timeout++;
1543 break;
1544 case CQ_TX_ERROP_MEM_FAULT:
1545 stats->tx.mem_fault++;
1546 break;
1547 case CQ_TX_ERROP_CK_OVERLAP:
1548 stats->tx.csum_overlap++;
1549 break;
1550 case CQ_TX_ERROP_CK_OFLOW:
1551 stats->tx.csum_overflow++;
1552 break;
1553 }
1554
1555 return 1;
1556}
This page took 0.174751 seconds and 5 git commands to generate.