Merge tag 'pci-v4.2-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[deliverable/linux.git] / drivers / net / ethernet / intel / fm10k / fm10k_main.c
CommitLineData
b3890e30
AD
1/* Intel Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19 */
20
21#include <linux/types.h>
22#include <linux/module.h>
23#include <net/ipv6.h>
24#include <net/ip.h>
25#include <net/tcp.h>
26#include <linux/if_macvlan.h>
b101c962 27#include <linux/prefetch.h>
b3890e30
AD
28
29#include "fm10k.h"
30
f4f88c6d 31#define DRV_VERSION "0.15.2-k"
b3890e30
AD
32const char fm10k_driver_version[] = DRV_VERSION;
33char fm10k_driver_name[] = "fm10k";
34static const char fm10k_driver_string[] =
35 "Intel(R) Ethernet Switch Host Interface Driver";
36static const char fm10k_copyright[] =
37 "Copyright (c) 2013 Intel Corporation.";
38
39MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
40MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver");
41MODULE_LICENSE("GPL");
42MODULE_VERSION(DRV_VERSION);
43
b382bb1b
JK
44/* single workqueue for entire fm10k driver */
45struct workqueue_struct *fm10k_workqueue = NULL;
46
6d2ce900
AD
47/**
48 * fm10k_init_module - Driver Registration Routine
b3890e30
AD
49 *
50 * fm10k_init_module is the first routine called when the driver is
51 * loaded. All it does is register with the PCI subsystem.
52 **/
53static int __init fm10k_init_module(void)
54{
55 pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version);
56 pr_info("%s\n", fm10k_copyright);
57
b382bb1b
JK
58 /* create driver workqueue */
59 if (!fm10k_workqueue)
60 fm10k_workqueue = create_workqueue("fm10k");
61
7461fd91
AD
62 fm10k_dbg_init();
63
b3890e30
AD
64 return fm10k_register_pci_driver();
65}
66module_init(fm10k_init_module);
67
68/**
69 * fm10k_exit_module - Driver Exit Cleanup Routine
70 *
71 * fm10k_exit_module is called just before the driver is removed
72 * from memory.
73 **/
74static void __exit fm10k_exit_module(void)
75{
76 fm10k_unregister_pci_driver();
7461fd91
AD
77
78 fm10k_dbg_exit();
b382bb1b
JK
79
80 /* destroy driver workqueue */
81 flush_workqueue(fm10k_workqueue);
82 destroy_workqueue(fm10k_workqueue);
83 fm10k_workqueue = NULL;
b3890e30
AD
84}
85module_exit(fm10k_exit_module);
18283cad 86
b101c962
AD
87static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
88 struct fm10k_rx_buffer *bi)
89{
90 struct page *page = bi->page;
91 dma_addr_t dma;
92
93 /* Only page will be NULL if buffer was consumed */
94 if (likely(page))
95 return true;
96
97 /* alloc new page for storage */
42b17f09 98 page = dev_alloc_page();
b101c962
AD
99 if (unlikely(!page)) {
100 rx_ring->rx_stats.alloc_failed++;
101 return false;
102 }
103
104 /* map page for use */
105 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
106
107 /* if mapping failed free memory back to system since
108 * there isn't much point in holding memory we can't use
109 */
110 if (dma_mapping_error(rx_ring->dev, dma)) {
111 __free_page(page);
b101c962
AD
112
113 rx_ring->rx_stats.alloc_failed++;
114 return false;
115 }
116
117 bi->dma = dma;
118 bi->page = page;
119 bi->page_offset = 0;
120
121 return true;
122}
123
124/**
125 * fm10k_alloc_rx_buffers - Replace used receive buffers
126 * @rx_ring: ring to place buffers on
127 * @cleaned_count: number of buffers to replace
128 **/
129void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
130{
131 union fm10k_rx_desc *rx_desc;
132 struct fm10k_rx_buffer *bi;
133 u16 i = rx_ring->next_to_use;
134
135 /* nothing to do */
136 if (!cleaned_count)
137 return;
138
139 rx_desc = FM10K_RX_DESC(rx_ring, i);
140 bi = &rx_ring->rx_buffer[i];
141 i -= rx_ring->count;
142
143 do {
144 if (!fm10k_alloc_mapped_page(rx_ring, bi))
145 break;
146
147 /* Refresh the desc even if buffer_addrs didn't change
148 * because each write-back erases this info.
149 */
150 rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
151
152 rx_desc++;
153 bi++;
154 i++;
155 if (unlikely(!i)) {
156 rx_desc = FM10K_RX_DESC(rx_ring, 0);
157 bi = rx_ring->rx_buffer;
158 i -= rx_ring->count;
159 }
160
ba5b8dcd
AD
161 /* clear the status bits for the next_to_use descriptor */
162 rx_desc->d.staterr = 0;
b101c962
AD
163
164 cleaned_count--;
165 } while (cleaned_count);
166
167 i += rx_ring->count;
168
169 if (rx_ring->next_to_use != i) {
170 /* record the next descriptor to use */
171 rx_ring->next_to_use = i;
172
173 /* update next to alloc since we have filled the ring */
174 rx_ring->next_to_alloc = i;
175
176 /* Force memory writes to complete before letting h/w
177 * know there are new descriptors to fetch. (Only
178 * applicable for weak-ordered memory model archs,
179 * such as IA-64).
180 */
181 wmb();
182
183 /* notify hardware of new descriptors */
184 writel(i, rx_ring->tail);
185 }
186}
187
188/**
189 * fm10k_reuse_rx_page - page flip buffer and store it back on the ring
190 * @rx_ring: rx descriptor ring to store buffers on
191 * @old_buff: donor buffer to have page reused
192 *
193 * Synchronizes page for reuse by the interface
194 **/
195static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
196 struct fm10k_rx_buffer *old_buff)
197{
198 struct fm10k_rx_buffer *new_buff;
199 u16 nta = rx_ring->next_to_alloc;
200
201 new_buff = &rx_ring->rx_buffer[nta];
202
203 /* update, and store next to alloc */
204 nta++;
205 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
206
207 /* transfer page from old buffer to new buffer */
ba5b8dcd 208 *new_buff = *old_buff;
b101c962
AD
209
210 /* sync the buffer for use by the device */
211 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
212 old_buff->page_offset,
213 FM10K_RX_BUFSZ,
214 DMA_FROM_DEVICE);
215}
216
ba5b8dcd
AD
217static inline bool fm10k_page_is_reserved(struct page *page)
218{
219 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
220}
221
b101c962
AD
222static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
223 struct page *page,
de445199 224 unsigned int __maybe_unused truesize)
b101c962
AD
225{
226 /* avoid re-using remote pages */
ba5b8dcd 227 if (unlikely(fm10k_page_is_reserved(page)))
b101c962
AD
228 return false;
229
230#if (PAGE_SIZE < 8192)
231 /* if we are only owner of page we can reuse it */
232 if (unlikely(page_count(page) != 1))
233 return false;
234
235 /* flip page offset to other buffer */
236 rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
b101c962
AD
237#else
238 /* move offset up to the next cache line */
239 rx_buffer->page_offset += truesize;
240
241 if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
242 return false;
b101c962
AD
243#endif
244
ba5b8dcd
AD
245 /* Even if we own the page, we are not allowed to use atomic_set()
246 * This would break get_page_unless_zero() users.
247 */
248 atomic_inc(&page->_count);
249
b101c962
AD
250 return true;
251}
252
253/**
254 * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
b101c962
AD
255 * @rx_buffer: buffer containing page to add
256 * @rx_desc: descriptor containing length of buffer written by hardware
257 * @skb: sk_buff to place the data into
258 *
259 * This function will add the data contained in rx_buffer->page to the skb.
260 * This is done either through a direct copy if the data in the buffer is
261 * less than the skb header size, otherwise it will just attach the page as
262 * a frag to the skb.
263 *
264 * The function will then update the page offset if necessary and return
265 * true if the buffer can be reused by the interface.
266 **/
de445199 267static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
b101c962
AD
268 union fm10k_rx_desc *rx_desc,
269 struct sk_buff *skb)
270{
271 struct page *page = rx_buffer->page;
1a8782e5 272 unsigned char *va = page_address(page) + rx_buffer->page_offset;
b101c962
AD
273 unsigned int size = le16_to_cpu(rx_desc->w.length);
274#if (PAGE_SIZE < 8192)
275 unsigned int truesize = FM10K_RX_BUFSZ;
276#else
1a8782e5 277 unsigned int truesize = SKB_DATA_ALIGN(size);
b101c962 278#endif
1a8782e5 279 unsigned int pull_len;
b101c962 280
1a8782e5
AD
281 if (unlikely(skb_is_nonlinear(skb)))
282 goto add_tail_frag;
b101c962 283
1a8782e5 284 if (likely(size <= FM10K_RX_HDR_LEN)) {
b101c962
AD
285 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
286
ba5b8dcd
AD
287 /* page is not reserved, we can reuse buffer as-is */
288 if (likely(!fm10k_page_is_reserved(page)))
b101c962
AD
289 return true;
290
291 /* this page cannot be reused so discard it */
ba5b8dcd 292 __free_page(page);
b101c962
AD
293 return false;
294 }
295
1a8782e5
AD
296 /* we need the header to contain the greater of either ETH_HLEN or
297 * 60 bytes if the skb->len is less than 60 for skb_pad.
298 */
299 pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
300
301 /* align pull length to size of long to optimize memcpy performance */
302 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
303
304 /* update all of the pointers */
305 va += pull_len;
306 size -= pull_len;
307
308add_tail_frag:
b101c962 309 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1a8782e5 310 (unsigned long)va & ~PAGE_MASK, size, truesize);
b101c962
AD
311
312 return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
313}
314
315static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
316 union fm10k_rx_desc *rx_desc,
317 struct sk_buff *skb)
318{
319 struct fm10k_rx_buffer *rx_buffer;
320 struct page *page;
321
322 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
b101c962
AD
323 page = rx_buffer->page;
324 prefetchw(page);
325
326 if (likely(!skb)) {
327 void *page_addr = page_address(page) +
328 rx_buffer->page_offset;
329
330 /* prefetch first cache line of first page */
331 prefetch(page_addr);
332#if L1_CACHE_BYTES < 128
333 prefetch(page_addr + L1_CACHE_BYTES);
334#endif
335
336 /* allocate a skb to store the frags */
67fd893e
AD
337 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
338 FM10K_RX_HDR_LEN);
b101c962
AD
339 if (unlikely(!skb)) {
340 rx_ring->rx_stats.alloc_failed++;
341 return NULL;
342 }
343
344 /* we will be copying header into skb->data in
345 * pskb_may_pull so it is in our interest to prefetch
346 * it now to avoid a possible cache miss
347 */
348 prefetchw(skb->data);
349 }
350
351 /* we are reusing so sync this buffer for CPU use */
352 dma_sync_single_range_for_cpu(rx_ring->dev,
353 rx_buffer->dma,
354 rx_buffer->page_offset,
355 FM10K_RX_BUFSZ,
356 DMA_FROM_DEVICE);
357
358 /* pull page into skb */
de445199 359 if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) {
b101c962
AD
360 /* hand second half of page back to the ring */
361 fm10k_reuse_rx_page(rx_ring, rx_buffer);
362 } else {
363 /* we are not reusing the buffer so unmap it */
364 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
365 PAGE_SIZE, DMA_FROM_DEVICE);
366 }
367
368 /* clear contents of rx_buffer */
369 rx_buffer->page = NULL;
370
371 return skb;
372}
373
76a540d4
AD
374static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
375 union fm10k_rx_desc *rx_desc,
376 struct sk_buff *skb)
377{
378 skb_checksum_none_assert(skb);
379
380 /* Rx checksum disabled via ethtool */
381 if (!(ring->netdev->features & NETIF_F_RXCSUM))
382 return;
383
384 /* TCP/UDP checksum error bit is set */
385 if (fm10k_test_staterr(rx_desc,
386 FM10K_RXD_STATUS_L4E |
387 FM10K_RXD_STATUS_L4E2 |
388 FM10K_RXD_STATUS_IPE |
389 FM10K_RXD_STATUS_IPE2)) {
390 ring->rx_stats.csum_err++;
391 return;
392 }
393
394 /* It must be a TCP or UDP packet with a valid checksum */
395 if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2))
396 skb->encapsulation = true;
397 else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS))
398 return;
399
400 skb->ip_summed = CHECKSUM_UNNECESSARY;
401}
402
403#define FM10K_RSS_L4_TYPES_MASK \
404 ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \
405 (1ul << FM10K_RSSTYPE_IPV4_UDP) | \
406 (1ul << FM10K_RSSTYPE_IPV6_TCP) | \
407 (1ul << FM10K_RSSTYPE_IPV6_UDP))
408
409static inline void fm10k_rx_hash(struct fm10k_ring *ring,
410 union fm10k_rx_desc *rx_desc,
411 struct sk_buff *skb)
412{
413 u16 rss_type;
414
415 if (!(ring->netdev->features & NETIF_F_RXHASH))
416 return;
417
418 rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK;
419 if (!rss_type)
420 return;
421
422 skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
423 (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
424 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
425}
426
a211e013
AD
427static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring,
428 union fm10k_rx_desc *rx_desc,
429 struct sk_buff *skb)
430{
431 struct fm10k_intfc *interface = rx_ring->q_vector->interface;
432
433 FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
434
435 if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED))
436 fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb),
437 le64_to_cpu(rx_desc->q.timestamp));
438}
439
5cd5e2e9 440static void fm10k_type_trans(struct fm10k_ring *rx_ring,
de445199 441 union fm10k_rx_desc __maybe_unused *rx_desc,
5cd5e2e9
AD
442 struct sk_buff *skb)
443{
444 struct net_device *dev = rx_ring->netdev;
445 struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel);
446
447 /* check to see if DGLORT belongs to a MACVLAN */
448 if (l2_accel) {
449 u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1;
450
451 idx -= l2_accel->dglort;
452 if (idx < l2_accel->size && l2_accel->macvlan[idx])
453 dev = l2_accel->macvlan[idx];
454 else
455 l2_accel = NULL;
456 }
457
458 skb->protocol = eth_type_trans(skb, dev);
459
460 if (!l2_accel)
461 return;
462
463 /* update MACVLAN statistics */
464 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1,
465 !!(rx_desc->w.hdr_info &
466 cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK)));
467}
468
b101c962
AD
469/**
470 * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor
471 * @rx_ring: rx descriptor ring packet is being transacted on
472 * @rx_desc: pointer to the EOP Rx descriptor
473 * @skb: pointer to current skb being populated
474 *
475 * This function checks the ring, descriptor, and packet information in
476 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
477 * other fields within the skb.
478 **/
479static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
480 union fm10k_rx_desc *rx_desc,
481 struct sk_buff *skb)
482{
483 unsigned int len = skb->len;
484
76a540d4
AD
485 fm10k_rx_hash(rx_ring, rx_desc, skb);
486
487 fm10k_rx_checksum(rx_ring, rx_desc, skb);
488
a211e013
AD
489 fm10k_rx_hwtstamp(rx_ring, rx_desc, skb);
490
b101c962
AD
491 FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
492
493 skb_record_rx_queue(skb, rx_ring->queue_index);
494
495 FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
496
497 if (rx_desc->w.vlan) {
498 u16 vid = le16_to_cpu(rx_desc->w.vlan);
499
500 if (vid != rx_ring->vid)
501 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
502 }
503
5cd5e2e9 504 fm10k_type_trans(rx_ring, rx_desc, skb);
b101c962
AD
505
506 return len;
507}
508
509/**
510 * fm10k_is_non_eop - process handling of non-EOP buffers
511 * @rx_ring: Rx ring being processed
512 * @rx_desc: Rx descriptor for current buffer
513 *
514 * This function updates next to clean. If the buffer is an EOP buffer
515 * this function exits returning false, otherwise it will place the
516 * sk_buff in the next buffer to be chained and return true indicating
517 * that this is in fact a non-EOP buffer.
518 **/
519static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
520 union fm10k_rx_desc *rx_desc)
521{
522 u32 ntc = rx_ring->next_to_clean + 1;
523
524 /* fetch, update, and store next to clean */
525 ntc = (ntc < rx_ring->count) ? ntc : 0;
526 rx_ring->next_to_clean = ntc;
527
528 prefetch(FM10K_RX_DESC(rx_ring, ntc));
529
530 if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP)))
531 return false;
532
533 return true;
534}
535
b101c962
AD
536/**
537 * fm10k_cleanup_headers - Correct corrupted or empty headers
538 * @rx_ring: rx descriptor ring packet is being transacted on
539 * @rx_desc: pointer to the EOP Rx descriptor
540 * @skb: pointer to current skb being fixed
541 *
542 * Address the case where we are pulling data in on pages only
543 * and as such no data is present in the skb header.
544 *
545 * In addition if skb is not at least 60 bytes we need to pad it so that
546 * it is large enough to qualify as a valid Ethernet frame.
547 *
548 * Returns true if an error was encountered and skb was freed.
549 **/
550static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
551 union fm10k_rx_desc *rx_desc,
552 struct sk_buff *skb)
553{
554 if (unlikely((fm10k_test_staterr(rx_desc,
555 FM10K_RXD_STATUS_RXE)))) {
556 dev_kfree_skb_any(skb);
557 rx_ring->rx_stats.errors++;
558 return true;
559 }
560
a94d9e22
AD
561 /* if eth_skb_pad returns an error the skb was freed */
562 if (eth_skb_pad(skb))
563 return true;
b101c962
AD
564
565 return false;
566}
567
568/**
569 * fm10k_receive_skb - helper function to handle rx indications
570 * @q_vector: structure containing interrupt and ring information
571 * @skb: packet to send up
572 **/
573static void fm10k_receive_skb(struct fm10k_q_vector *q_vector,
574 struct sk_buff *skb)
575{
576 napi_gro_receive(&q_vector->napi, skb);
577}
578
579static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
580 struct fm10k_ring *rx_ring,
581 int budget)
582{
583 struct sk_buff *skb = rx_ring->skb;
584 unsigned int total_bytes = 0, total_packets = 0;
585 u16 cleaned_count = fm10k_desc_unused(rx_ring);
586
59486329 587 while (likely(total_packets < budget)) {
b101c962
AD
588 union fm10k_rx_desc *rx_desc;
589
590 /* return some buffers to hardware, one at a time is too slow */
591 if (cleaned_count >= FM10K_RX_BUFFER_WRITE) {
592 fm10k_alloc_rx_buffers(rx_ring, cleaned_count);
593 cleaned_count = 0;
594 }
595
596 rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean);
597
124b74c1 598 if (!rx_desc->d.staterr)
b101c962
AD
599 break;
600
601 /* This memory barrier is needed to keep us from reading
602 * any other fields out of the rx_desc until we know the
124b74c1 603 * descriptor has been written back
b101c962 604 */
124b74c1 605 dma_rmb();
b101c962
AD
606
607 /* retrieve a buffer from the ring */
608 skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);
609
610 /* exit if we failed to retrieve a buffer */
611 if (!skb)
612 break;
613
614 cleaned_count++;
615
616 /* fetch next buffer in frame if non-eop */
617 if (fm10k_is_non_eop(rx_ring, rx_desc))
618 continue;
619
620 /* verify the packet layout is correct */
621 if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) {
622 skb = NULL;
623 continue;
624 }
625
626 /* populate checksum, timestamp, VLAN, and protocol */
627 total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb);
628
629 fm10k_receive_skb(q_vector, skb);
630
631 /* reset skb pointer */
632 skb = NULL;
633
634 /* update budget accounting */
635 total_packets++;
59486329 636 }
b101c962
AD
637
638 /* place incomplete frames back on ring for completion */
639 rx_ring->skb = skb;
640
641 u64_stats_update_begin(&rx_ring->syncp);
642 rx_ring->stats.packets += total_packets;
643 rx_ring->stats.bytes += total_bytes;
644 u64_stats_update_end(&rx_ring->syncp);
645 q_vector->rx.total_packets += total_packets;
646 q_vector->rx.total_bytes += total_bytes;
647
648 return total_packets < budget;
649}
650
76a540d4
AD
651#define VXLAN_HLEN (sizeof(struct udphdr) + 8)
652static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
653{
654 struct fm10k_intfc *interface = netdev_priv(skb->dev);
655 struct fm10k_vxlan_port *vxlan_port;
656
657 /* we can only offload a vxlan if we recognize it as such */
658 vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
659 struct fm10k_vxlan_port, list);
660
661 if (!vxlan_port)
662 return NULL;
663 if (vxlan_port->port != udp_hdr(skb)->dest)
664 return NULL;
665
666 /* return offset of udp_hdr plus 8 bytes for VXLAN header */
667 return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN);
668}
669
670#define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF)
671#define NVGRE_TNI htons(0x2000)
672struct fm10k_nvgre_hdr {
673 __be16 flags;
674 __be16 proto;
675 __be32 tni;
676};
677
678static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
679{
680 struct fm10k_nvgre_hdr *nvgre_hdr;
681 int hlen = ip_hdrlen(skb);
682
683 /* currently only IPv4 is supported due to hlen above */
684 if (vlan_get_protocol(skb) != htons(ETH_P_IP))
685 return NULL;
686
687 /* our transport header should be NVGRE */
688 nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen);
689
690 /* verify all reserved flags are 0 */
691 if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS)
692 return NULL;
693
76a540d4
AD
694 /* report start of ethernet header */
695 if (nvgre_hdr->flags & NVGRE_TNI)
696 return (struct ethhdr *)(nvgre_hdr + 1);
697
698 return (struct ethhdr *)(&nvgre_hdr->tni);
699}
700
5bf33dc6 701__be16 fm10k_tx_encap_offload(struct sk_buff *skb)
76a540d4 702{
8c1a90aa 703 u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen;
76a540d4 704 struct ethhdr *eth_hdr;
76a540d4 705
8c1a90aa
MV
706 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
707 skb->inner_protocol != htons(ETH_P_TEB))
b66b6d9f
JS
708 return 0;
709
76a540d4
AD
710 switch (vlan_get_protocol(skb)) {
711 case htons(ETH_P_IP):
712 l4_hdr = ip_hdr(skb)->protocol;
713 break;
714 case htons(ETH_P_IPV6):
715 l4_hdr = ipv6_hdr(skb)->nexthdr;
716 break;
717 default:
718 return 0;
719 }
720
721 switch (l4_hdr) {
722 case IPPROTO_UDP:
723 eth_hdr = fm10k_port_is_vxlan(skb);
724 break;
725 case IPPROTO_GRE:
726 eth_hdr = fm10k_gre_is_nvgre(skb);
727 break;
728 default:
729 return 0;
730 }
731
732 if (!eth_hdr)
733 return 0;
734
735 switch (eth_hdr->h_proto) {
736 case htons(ETH_P_IP):
8c1a90aa
MV
737 inner_l4_hdr = inner_ip_hdr(skb)->protocol;
738 break;
76a540d4 739 case htons(ETH_P_IPV6):
8c1a90aa 740 inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr;
76a540d4
AD
741 break;
742 default:
743 return 0;
744 }
745
8c1a90aa
MV
746 switch (inner_l4_hdr) {
747 case IPPROTO_TCP:
748 inner_l4_hlen = inner_tcp_hdrlen(skb);
749 break;
750 case IPPROTO_UDP:
751 inner_l4_hlen = 8;
752 break;
753 default:
754 return 0;
755 }
756
757 /* The hardware allows tunnel offloads only if the combined inner and
758 * outer header is 184 bytes or less
759 */
760 if (skb_inner_transport_header(skb) + inner_l4_hlen -
761 skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH)
762 return 0;
763
76a540d4
AD
764 return eth_hdr->h_proto;
765}
766
767static int fm10k_tso(struct fm10k_ring *tx_ring,
768 struct fm10k_tx_buffer *first)
769{
770 struct sk_buff *skb = first->skb;
771 struct fm10k_tx_desc *tx_desc;
772 unsigned char *th;
773 u8 hdrlen;
774
775 if (skb->ip_summed != CHECKSUM_PARTIAL)
776 return 0;
777
778 if (!skb_is_gso(skb))
779 return 0;
780
781 /* compute header lengths */
782 if (skb->encapsulation) {
783 if (!fm10k_tx_encap_offload(skb))
784 goto err_vxlan;
785 th = skb_inner_transport_header(skb);
786 } else {
787 th = skb_transport_header(skb);
788 }
789
790 /* compute offset from SOF to transport header and add header len */
791 hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2);
792
793 first->tx_flags |= FM10K_TX_FLAGS_CSUM;
794
795 /* update gso size and bytecount with header size */
796 first->gso_segs = skb_shinfo(skb)->gso_segs;
797 first->bytecount += (first->gso_segs - 1) * hdrlen;
798
799 /* populate Tx descriptor header size and mss */
800 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
801 tx_desc->hdrlen = hdrlen;
802 tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
803
804 return 1;
805err_vxlan:
806 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
807 if (!net_ratelimit())
808 netdev_err(tx_ring->netdev,
809 "TSO requested for unsupported tunnel, disabling offload\n");
810 return -1;
811}
812
813static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
814 struct fm10k_tx_buffer *first)
815{
816 struct sk_buff *skb = first->skb;
817 struct fm10k_tx_desc *tx_desc;
818 union {
819 struct iphdr *ipv4;
820 struct ipv6hdr *ipv6;
821 u8 *raw;
822 } network_hdr;
823 __be16 protocol;
824 u8 l4_hdr = 0;
825
826 if (skb->ip_summed != CHECKSUM_PARTIAL)
827 goto no_csum;
828
829 if (skb->encapsulation) {
830 protocol = fm10k_tx_encap_offload(skb);
831 if (!protocol) {
832 if (skb_checksum_help(skb)) {
833 dev_warn(tx_ring->dev,
834 "failed to offload encap csum!\n");
835 tx_ring->tx_stats.csum_err++;
836 }
837 goto no_csum;
838 }
839 network_hdr.raw = skb_inner_network_header(skb);
840 } else {
841 protocol = vlan_get_protocol(skb);
842 network_hdr.raw = skb_network_header(skb);
843 }
844
845 switch (protocol) {
846 case htons(ETH_P_IP):
847 l4_hdr = network_hdr.ipv4->protocol;
848 break;
849 case htons(ETH_P_IPV6):
850 l4_hdr = network_hdr.ipv6->nexthdr;
851 break;
852 default:
853 if (unlikely(net_ratelimit())) {
854 dev_warn(tx_ring->dev,
855 "partial checksum but ip version=%x!\n",
856 protocol);
857 }
858 tx_ring->tx_stats.csum_err++;
859 goto no_csum;
860 }
861
862 switch (l4_hdr) {
863 case IPPROTO_TCP:
864 case IPPROTO_UDP:
865 break;
866 case IPPROTO_GRE:
867 if (skb->encapsulation)
868 break;
869 default:
870 if (unlikely(net_ratelimit())) {
871 dev_warn(tx_ring->dev,
872 "partial checksum but l4 proto=%x!\n",
873 l4_hdr);
874 }
875 tx_ring->tx_stats.csum_err++;
876 goto no_csum;
877 }
878
879 /* update TX checksum flag */
880 first->tx_flags |= FM10K_TX_FLAGS_CSUM;
881
882no_csum:
883 /* populate Tx descriptor header size and mss */
884 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
885 tx_desc->hdrlen = 0;
886 tx_desc->mss = 0;
887}
888
889#define FM10K_SET_FLAG(_input, _flag, _result) \
890 ((_flag <= _result) ? \
891 ((u32)(_input & _flag) * (_result / _flag)) : \
892 ((u32)(_input & _flag) / (_flag / _result)))
893
894static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
895{
896 /* set type for advanced descriptor with frame checksum insertion */
897 u32 desc_flags = 0;
898
a211e013
AD
899 /* set timestamping bits */
900 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
901 likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
902 desc_flags |= FM10K_TXD_FLAG_TIME;
903
76a540d4
AD
904 /* set checksum offload bits */
905 desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
906 FM10K_TXD_FLAG_CSUM);
907
908 return desc_flags;
909}
910
b101c962
AD
911static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
912 struct fm10k_tx_desc *tx_desc, u16 i,
913 dma_addr_t dma, unsigned int size, u8 desc_flags)
914{
915 /* set RS and INT for last frame in a cache line */
916 if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0)
917 desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT;
918
919 /* record values to descriptor */
920 tx_desc->buffer_addr = cpu_to_le64(dma);
921 tx_desc->flags = desc_flags;
922 tx_desc->buflen = cpu_to_le16(size);
923
924 /* return true if we just wrapped the ring */
925 return i == tx_ring->count;
926}
927
2c2b2f0c
AD
928static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
929{
930 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
931
eca32047 932 /* Memory barrier before checking head and tail */
2c2b2f0c
AD
933 smp_mb();
934
eca32047 935 /* Check again in a case another CPU has just made room available */
2c2b2f0c
AD
936 if (likely(fm10k_desc_unused(tx_ring) < size))
937 return -EBUSY;
938
939 /* A reprieve! - use start_queue because it doesn't call schedule */
940 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
941 ++tx_ring->tx_stats.restart_queue;
942 return 0;
943}
944
945static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
946{
947 if (likely(fm10k_desc_unused(tx_ring) >= size))
948 return 0;
949 return __fm10k_maybe_stop_tx(tx_ring, size);
950}
951
b101c962
AD
952static void fm10k_tx_map(struct fm10k_ring *tx_ring,
953 struct fm10k_tx_buffer *first)
954{
955 struct sk_buff *skb = first->skb;
956 struct fm10k_tx_buffer *tx_buffer;
957 struct fm10k_tx_desc *tx_desc;
958 struct skb_frag_struct *frag;
959 unsigned char *data;
960 dma_addr_t dma;
961 unsigned int data_len, size;
76a540d4 962 u32 tx_flags = first->tx_flags;
b101c962 963 u16 i = tx_ring->next_to_use;
76a540d4 964 u8 flags = fm10k_tx_desc_flags(skb, tx_flags);
b101c962
AD
965
966 tx_desc = FM10K_TX_DESC(tx_ring, i);
967
968 /* add HW VLAN tag */
df8a39de
JP
969 if (skb_vlan_tag_present(skb))
970 tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
b101c962
AD
971 else
972 tx_desc->vlan = 0;
973
974 size = skb_headlen(skb);
975 data = skb->data;
976
977 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
978
979 data_len = skb->data_len;
980 tx_buffer = first;
981
982 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
983 if (dma_mapping_error(tx_ring->dev, dma))
984 goto dma_error;
985
986 /* record length, and DMA address */
987 dma_unmap_len_set(tx_buffer, len, size);
988 dma_unmap_addr_set(tx_buffer, dma, dma);
989
990 while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) {
991 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma,
992 FM10K_MAX_DATA_PER_TXD, flags)) {
993 tx_desc = FM10K_TX_DESC(tx_ring, 0);
994 i = 0;
995 }
996
997 dma += FM10K_MAX_DATA_PER_TXD;
998 size -= FM10K_MAX_DATA_PER_TXD;
999 }
1000
1001 if (likely(!data_len))
1002 break;
1003
1004 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++,
1005 dma, size, flags)) {
1006 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1007 i = 0;
1008 }
1009
1010 size = skb_frag_size(frag);
1011 data_len -= size;
1012
1013 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1014 DMA_TO_DEVICE);
1015
1016 tx_buffer = &tx_ring->tx_buffer[i];
1017 }
1018
1019 /* write last descriptor with LAST bit set */
1020 flags |= FM10K_TXD_FLAG_LAST;
1021
1022 if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags))
1023 i = 0;
1024
1025 /* record bytecount for BQL */
1026 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1027
1028 /* record SW timestamp if HW timestamp is not available */
1029 skb_tx_timestamp(first->skb);
1030
1031 /* Force memory writes to complete before letting h/w know there
1032 * are new descriptors to fetch. (Only applicable for weak-ordered
1033 * memory model archs, such as IA-64).
1034 *
1035 * We also need this memory barrier to make certain all of the
1036 * status bits have been updated before next_to_watch is written.
1037 */
1038 wmb();
1039
1040 /* set next_to_watch value indicating a packet is present */
1041 first->next_to_watch = tx_desc;
1042
1043 tx_ring->next_to_use = i;
1044
2c2b2f0c
AD
1045 /* Make sure there is space in the ring for the next send. */
1046 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
1047
b101c962 1048 /* notify HW of packet */
2c2b2f0c
AD
1049 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
1050 writel(i, tx_ring->tail);
b101c962 1051
2c2b2f0c
AD
1052 /* we need this if more than one processor can write to our tail
1053 * at a time, it synchronizes IO on IA64/Altix systems
1054 */
1055 mmiowb();
1056 }
b101c962
AD
1057
1058 return;
1059dma_error:
1060 dev_err(tx_ring->dev, "TX DMA map failed\n");
1061
1062 /* clear dma mappings for failed tx_buffer map */
1063 for (;;) {
1064 tx_buffer = &tx_ring->tx_buffer[i];
1065 fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1066 if (tx_buffer == first)
1067 break;
1068 if (i == 0)
1069 i = tx_ring->count;
1070 i--;
1071 }
1072
1073 tx_ring->next_to_use = i;
1074}
1075
b101c962
AD
1076netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
1077 struct fm10k_ring *tx_ring)
1078{
1079 struct fm10k_tx_buffer *first;
76a540d4 1080 int tso;
b101c962
AD
1081 u32 tx_flags = 0;
1082#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
1083 unsigned short f;
1084#endif
1085 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1086
1087 /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
1088 * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD,
1089 * + 2 desc gap to keep tail from touching head
1090 * otherwise try next time
1091 */
1092#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
1093 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1094 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
1095#else
1096 count += skb_shinfo(skb)->nr_frags;
1097#endif
1098 if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
1099 tx_ring->tx_stats.tx_busy++;
1100 return NETDEV_TX_BUSY;
1101 }
1102
1103 /* record the location of the first descriptor for this packet */
1104 first = &tx_ring->tx_buffer[tx_ring->next_to_use];
1105 first->skb = skb;
1106 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
1107 first->gso_segs = 1;
1108
1109 /* record initial flags and protocol */
1110 first->tx_flags = tx_flags;
1111
76a540d4
AD
1112 tso = fm10k_tso(tx_ring, first);
1113 if (tso < 0)
1114 goto out_drop;
1115 else if (!tso)
1116 fm10k_tx_csum(tx_ring, first);
1117
b101c962
AD
1118 fm10k_tx_map(tx_ring, first);
1119
76a540d4
AD
1120 return NETDEV_TX_OK;
1121
1122out_drop:
1123 dev_kfree_skb_any(first->skb);
1124 first->skb = NULL;
1125
b101c962
AD
1126 return NETDEV_TX_OK;
1127}
1128
1129static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
1130{
1131 return ring->stats.packets;
1132}
1133
1134static u64 fm10k_get_tx_pending(struct fm10k_ring *ring)
1135{
1136 /* use SW head and tail until we have real hardware */
1137 u32 head = ring->next_to_clean;
1138 u32 tail = ring->next_to_use;
1139
1140 return ((head <= tail) ? tail : tail + ring->count) - head;
1141}
1142
1143bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
1144{
1145 u32 tx_done = fm10k_get_tx_completed(tx_ring);
1146 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1147 u32 tx_pending = fm10k_get_tx_pending(tx_ring);
1148
1149 clear_check_for_tx_hang(tx_ring);
1150
1151 /* Check for a hung queue, but be thorough. This verifies
1152 * that a transmit has been completed since the previous
1153 * check AND there is at least one packet pending. By
1154 * requiring this to fail twice we avoid races with
1155 * clearing the ARMED bit and conditions where we
1156 * run the check_tx_hang logic with a transmit completion
1157 * pending but without time to complete it yet.
1158 */
1159 if (!tx_pending || (tx_done_old != tx_done)) {
1160 /* update completed stats and continue */
1161 tx_ring->tx_stats.tx_done_old = tx_done;
1162 /* reset the countdown */
1163 clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
1164
1165 return false;
1166 }
1167
1168 /* make sure it is true for two checks in a row */
1169 return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
1170}
1171
1172/**
1173 * fm10k_tx_timeout_reset - initiate reset due to Tx timeout
1174 * @interface: driver private struct
1175 **/
1176void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
1177{
1178 /* Do the reset outside of interrupt context */
1179 if (!test_bit(__FM10K_DOWN, &interface->state)) {
b101c962
AD
1180 interface->tx_timeout_count++;
1181 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1182 fm10k_service_event_schedule(interface);
1183 }
1184}
1185
1186/**
1187 * fm10k_clean_tx_irq - Reclaim resources after transmit completes
1188 * @q_vector: structure containing interrupt and ring information
1189 * @tx_ring: tx ring to clean
1190 **/
1191static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
1192 struct fm10k_ring *tx_ring)
1193{
1194 struct fm10k_intfc *interface = q_vector->interface;
1195 struct fm10k_tx_buffer *tx_buffer;
1196 struct fm10k_tx_desc *tx_desc;
1197 unsigned int total_bytes = 0, total_packets = 0;
1198 unsigned int budget = q_vector->tx.work_limit;
1199 unsigned int i = tx_ring->next_to_clean;
1200
1201 if (test_bit(__FM10K_DOWN, &interface->state))
1202 return true;
1203
1204 tx_buffer = &tx_ring->tx_buffer[i];
1205 tx_desc = FM10K_TX_DESC(tx_ring, i);
1206 i -= tx_ring->count;
1207
1208 do {
1209 struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch;
1210
1211 /* if next_to_watch is not set then there is no work pending */
1212 if (!eop_desc)
1213 break;
1214
1215 /* prevent any other reads prior to eop_desc */
1216 read_barrier_depends();
1217
1218 /* if DD is not set pending work has not been completed */
1219 if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
1220 break;
1221
1222 /* clear next_to_watch to prevent false hangs */
1223 tx_buffer->next_to_watch = NULL;
1224
1225 /* update the statistics for this packet */
1226 total_bytes += tx_buffer->bytecount;
1227 total_packets += tx_buffer->gso_segs;
1228
1229 /* free the skb */
1230 dev_consume_skb_any(tx_buffer->skb);
1231
1232 /* unmap skb header data */
1233 dma_unmap_single(tx_ring->dev,
1234 dma_unmap_addr(tx_buffer, dma),
1235 dma_unmap_len(tx_buffer, len),
1236 DMA_TO_DEVICE);
1237
1238 /* clear tx_buffer data */
1239 tx_buffer->skb = NULL;
1240 dma_unmap_len_set(tx_buffer, len, 0);
1241
1242 /* unmap remaining buffers */
1243 while (tx_desc != eop_desc) {
1244 tx_buffer++;
1245 tx_desc++;
1246 i++;
1247 if (unlikely(!i)) {
1248 i -= tx_ring->count;
1249 tx_buffer = tx_ring->tx_buffer;
1250 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1251 }
1252
1253 /* unmap any remaining paged data */
1254 if (dma_unmap_len(tx_buffer, len)) {
1255 dma_unmap_page(tx_ring->dev,
1256 dma_unmap_addr(tx_buffer, dma),
1257 dma_unmap_len(tx_buffer, len),
1258 DMA_TO_DEVICE);
1259 dma_unmap_len_set(tx_buffer, len, 0);
1260 }
1261 }
1262
1263 /* move us one more past the eop_desc for start of next pkt */
1264 tx_buffer++;
1265 tx_desc++;
1266 i++;
1267 if (unlikely(!i)) {
1268 i -= tx_ring->count;
1269 tx_buffer = tx_ring->tx_buffer;
1270 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1271 }
1272
1273 /* issue prefetch for next Tx descriptor */
1274 prefetch(tx_desc);
1275
1276 /* update budget accounting */
1277 budget--;
1278 } while (likely(budget));
1279
1280 i += tx_ring->count;
1281 tx_ring->next_to_clean = i;
1282 u64_stats_update_begin(&tx_ring->syncp);
1283 tx_ring->stats.bytes += total_bytes;
1284 tx_ring->stats.packets += total_packets;
1285 u64_stats_update_end(&tx_ring->syncp);
1286 q_vector->tx.total_bytes += total_bytes;
1287 q_vector->tx.total_packets += total_packets;
1288
1289 if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) {
1290 /* schedule immediate reset if we believe we hung */
1291 struct fm10k_hw *hw = &interface->hw;
1292
1293 netif_err(interface, drv, tx_ring->netdev,
1294 "Detected Tx Unit Hang\n"
1295 " Tx Queue <%d>\n"
1296 " TDH, TDT <%x>, <%x>\n"
1297 " next_to_use <%x>\n"
1298 " next_to_clean <%x>\n",
1299 tx_ring->queue_index,
1300 fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)),
1301 fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)),
1302 tx_ring->next_to_use, i);
1303
1304 netif_stop_subqueue(tx_ring->netdev,
1305 tx_ring->queue_index);
1306
1307 netif_info(interface, probe, tx_ring->netdev,
1308 "tx hang %d detected on queue %d, resetting interface\n",
1309 interface->tx_timeout_count + 1,
1310 tx_ring->queue_index);
1311
1312 fm10k_tx_timeout_reset(interface);
1313
1314 /* the netdev is about to reset, no point in enabling stuff */
1315 return true;
1316 }
1317
1318 /* notify netdev of completed buffers */
1319 netdev_tx_completed_queue(txring_txq(tx_ring),
1320 total_packets, total_bytes);
1321
1322#define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2)
1323 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1324 (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1325 /* Make sure that anybody stopping the queue after this
1326 * sees the new next_to_clean.
1327 */
1328 smp_mb();
1329 if (__netif_subqueue_stopped(tx_ring->netdev,
1330 tx_ring->queue_index) &&
1331 !test_bit(__FM10K_DOWN, &interface->state)) {
1332 netif_wake_subqueue(tx_ring->netdev,
1333 tx_ring->queue_index);
1334 ++tx_ring->tx_stats.restart_queue;
1335 }
1336 }
1337
1338 return !!budget;
1339}
1340
18283cad
AD
1341/**
1342 * fm10k_update_itr - update the dynamic ITR value based on packet size
1343 *
1344 * Stores a new ITR value based on strictly on packet size. The
1345 * divisors and thresholds used by this function were determined based
1346 * on theoretical maximum wire speed and testing data, in order to
1347 * minimize response time while increasing bulk throughput.
1348 *
1349 * @ring_container: Container for rings to have ITR updated
1350 **/
1351static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
1352{
1353 unsigned int avg_wire_size, packets;
1354
1355 /* Only update ITR if we are using adaptive setting */
1356 if (!(ring_container->itr & FM10K_ITR_ADAPTIVE))
1357 goto clear_counts;
1358
1359 packets = ring_container->total_packets;
1360 if (!packets)
1361 goto clear_counts;
1362
1363 avg_wire_size = ring_container->total_bytes / packets;
1364
1365 /* Add 24 bytes to size to account for CRC, preamble, and gap */
1366 avg_wire_size += 24;
1367
1368 /* Don't starve jumbo frames */
1369 if (avg_wire_size > 3000)
1370 avg_wire_size = 3000;
1371
1372 /* Give a little boost to mid-size frames */
1373 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
1374 avg_wire_size /= 3;
1375 else
1376 avg_wire_size /= 2;
1377
1378 /* write back value and retain adaptive flag */
1379 ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE;
1380
1381clear_counts:
1382 ring_container->total_bytes = 0;
1383 ring_container->total_packets = 0;
1384}
1385
1386static void fm10k_qv_enable(struct fm10k_q_vector *q_vector)
1387{
1388 /* Enable auto-mask and clear the current mask */
1389 u32 itr = FM10K_ITR_ENABLE;
1390
1391 /* Update Tx ITR */
1392 fm10k_update_itr(&q_vector->tx);
1393
1394 /* Update Rx ITR */
1395 fm10k_update_itr(&q_vector->rx);
1396
1397 /* Store Tx itr in timer slot 0 */
1398 itr |= (q_vector->tx.itr & FM10K_ITR_MAX);
1399
1400 /* Shift Rx itr to timer slot 1 */
1401 itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT;
1402
1403 /* Write the final value to the ITR register */
1404 writel(itr, q_vector->itr);
1405}
1406
1407static int fm10k_poll(struct napi_struct *napi, int budget)
1408{
1409 struct fm10k_q_vector *q_vector =
1410 container_of(napi, struct fm10k_q_vector, napi);
b101c962
AD
1411 struct fm10k_ring *ring;
1412 int per_ring_budget;
1413 bool clean_complete = true;
1414
1415 fm10k_for_each_ring(ring, q_vector->tx)
1416 clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
1417
1418 /* attempt to distribute budget to each queue fairly, but don't
1419 * allow the budget to go below 1 because we'll exit polling
1420 */
1421 if (q_vector->rx.count > 1)
1422 per_ring_budget = max(budget/q_vector->rx.count, 1);
1423 else
1424 per_ring_budget = budget;
1425
1426 fm10k_for_each_ring(ring, q_vector->rx)
1427 clean_complete &= fm10k_clean_rx_irq(q_vector, ring,
1428 per_ring_budget);
1429
1430 /* If all work not completed, return budget and keep polling */
1431 if (!clean_complete)
1432 return budget;
18283cad
AD
1433
1434 /* all work done, exit the polling mode */
1435 napi_complete(napi);
1436
1437 /* re-enable the q_vector */
1438 fm10k_qv_enable(q_vector);
1439
1440 return 0;
1441}
1442
aa3ac822
AD
1443/**
1444 * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device
1445 * @interface: board private structure to initialize
1446 *
1447 * When QoS (Quality of Service) is enabled, allocate queues for
1448 * each traffic class. If multiqueue isn't available,then abort QoS
1449 * initialization.
1450 *
1451 * This function handles all combinations of Qos and RSS.
1452 *
1453 **/
1454static bool fm10k_set_qos_queues(struct fm10k_intfc *interface)
1455{
1456 struct net_device *dev = interface->netdev;
1457 struct fm10k_ring_feature *f;
1458 int rss_i, i;
1459 int pcs;
1460
1461 /* Map queue offset and counts onto allocated tx queues */
1462 pcs = netdev_get_num_tc(dev);
1463
1464 if (pcs <= 1)
1465 return false;
1466
1467 /* set QoS mask and indices */
1468 f = &interface->ring_feature[RING_F_QOS];
1469 f->indices = pcs;
1470 f->mask = (1 << fls(pcs - 1)) - 1;
1471
1472 /* determine the upper limit for our current DCB mode */
1473 rss_i = interface->hw.mac.max_queues / pcs;
1474 rss_i = 1 << (fls(rss_i) - 1);
1475
1476 /* set RSS mask and indices */
1477 f = &interface->ring_feature[RING_F_RSS];
1478 rss_i = min_t(u16, rss_i, f->limit);
1479 f->indices = rss_i;
1480 f->mask = (1 << fls(rss_i - 1)) - 1;
1481
1482 /* configure pause class to queue mapping */
1483 for (i = 0; i < pcs; i++)
1484 netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
1485
1486 interface->num_rx_queues = rss_i * pcs;
1487 interface->num_tx_queues = rss_i * pcs;
1488
1489 return true;
1490}
1491
1492/**
1493 * fm10k_set_rss_queues: Allocate queues for RSS
1494 * @interface: board private structure to initialize
1495 *
1496 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
1497 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
1498 *
1499 **/
1500static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
1501{
1502 struct fm10k_ring_feature *f;
1503 u16 rss_i;
1504
1505 f = &interface->ring_feature[RING_F_RSS];
1506 rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit);
1507
1508 /* record indices and power of 2 mask for RSS */
1509 f->indices = rss_i;
1510 f->mask = (1 << fls(rss_i - 1)) - 1;
1511
1512 interface->num_rx_queues = rss_i;
1513 interface->num_tx_queues = rss_i;
1514
1515 return true;
1516}
1517
18283cad
AD
1518/**
1519 * fm10k_set_num_queues: Allocate queues for device, feature dependent
1520 * @interface: board private structure to initialize
1521 *
1522 * This is the top level queue allocation routine. The order here is very
1523 * important, starting with the "most" number of features turned on at once,
1524 * and ending with the smallest set of features. This way large combinations
1525 * can be allocated if they're turned on, and smaller combinations are the
1526 * fallthrough conditions.
1527 *
1528 **/
1529static void fm10k_set_num_queues(struct fm10k_intfc *interface)
1530{
1531 /* Start with base case */
1532 interface->num_rx_queues = 1;
1533 interface->num_tx_queues = 1;
aa3ac822
AD
1534
1535 if (fm10k_set_qos_queues(interface))
1536 return;
1537
1538 fm10k_set_rss_queues(interface);
18283cad
AD
1539}
1540
1541/**
1542 * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
1543 * @interface: board private structure to initialize
1544 * @v_count: q_vectors allocated on interface, used for ring interleaving
1545 * @v_idx: index of vector in interface struct
1546 * @txr_count: total number of Tx rings to allocate
1547 * @txr_idx: index of first Tx ring to allocate
1548 * @rxr_count: total number of Rx rings to allocate
1549 * @rxr_idx: index of first Rx ring to allocate
1550 *
1551 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1552 **/
1553static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
1554 unsigned int v_count, unsigned int v_idx,
1555 unsigned int txr_count, unsigned int txr_idx,
1556 unsigned int rxr_count, unsigned int rxr_idx)
1557{
1558 struct fm10k_q_vector *q_vector;
e27ef599 1559 struct fm10k_ring *ring;
18283cad
AD
1560 int ring_count, size;
1561
1562 ring_count = txr_count + rxr_count;
e27ef599
AD
1563 size = sizeof(struct fm10k_q_vector) +
1564 (sizeof(struct fm10k_ring) * ring_count);
18283cad
AD
1565
1566 /* allocate q_vector and rings */
1567 q_vector = kzalloc(size, GFP_KERNEL);
1568 if (!q_vector)
1569 return -ENOMEM;
1570
1571 /* initialize NAPI */
1572 netif_napi_add(interface->netdev, &q_vector->napi,
1573 fm10k_poll, NAPI_POLL_WEIGHT);
1574
1575 /* tie q_vector and interface together */
1576 interface->q_vector[v_idx] = q_vector;
1577 q_vector->interface = interface;
1578 q_vector->v_idx = v_idx;
1579
e27ef599
AD
1580 /* initialize pointer to rings */
1581 ring = q_vector->ring;
1582
18283cad 1583 /* save Tx ring container info */
e27ef599
AD
1584 q_vector->tx.ring = ring;
1585 q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK;
18283cad
AD
1586 q_vector->tx.itr = interface->tx_itr;
1587 q_vector->tx.count = txr_count;
1588
e27ef599
AD
1589 while (txr_count) {
1590 /* assign generic ring traits */
1591 ring->dev = &interface->pdev->dev;
1592 ring->netdev = interface->netdev;
1593
1594 /* configure backlink on ring */
1595 ring->q_vector = q_vector;
1596
1597 /* apply Tx specific ring traits */
1598 ring->count = interface->tx_ring_count;
1599 ring->queue_index = txr_idx;
1600
1601 /* assign ring to interface */
1602 interface->tx_ring[txr_idx] = ring;
1603
1604 /* update count and index */
1605 txr_count--;
1606 txr_idx += v_count;
1607
1608 /* push pointer to next ring */
1609 ring++;
1610 }
1611
18283cad 1612 /* save Rx ring container info */
e27ef599 1613 q_vector->rx.ring = ring;
18283cad
AD
1614 q_vector->rx.itr = interface->rx_itr;
1615 q_vector->rx.count = rxr_count;
1616
e27ef599
AD
1617 while (rxr_count) {
1618 /* assign generic ring traits */
1619 ring->dev = &interface->pdev->dev;
1620 ring->netdev = interface->netdev;
5cd5e2e9 1621 rcu_assign_pointer(ring->l2_accel, interface->l2_accel);
e27ef599
AD
1622
1623 /* configure backlink on ring */
1624 ring->q_vector = q_vector;
1625
1626 /* apply Rx specific ring traits */
1627 ring->count = interface->rx_ring_count;
1628 ring->queue_index = rxr_idx;
1629
1630 /* assign ring to interface */
1631 interface->rx_ring[rxr_idx] = ring;
1632
1633 /* update count and index */
1634 rxr_count--;
1635 rxr_idx += v_count;
1636
1637 /* push pointer to next ring */
1638 ring++;
1639 }
1640
7461fd91
AD
1641 fm10k_dbg_q_vector_init(q_vector);
1642
18283cad
AD
1643 return 0;
1644}
1645
1646/**
1647 * fm10k_free_q_vector - Free memory allocated for specific interrupt vector
1648 * @interface: board private structure to initialize
1649 * @v_idx: Index of vector to be freed
1650 *
1651 * This function frees the memory allocated to the q_vector. In addition if
1652 * NAPI is enabled it will delete any references to the NAPI struct prior
1653 * to freeing the q_vector.
1654 **/
1655static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx)
1656{
1657 struct fm10k_q_vector *q_vector = interface->q_vector[v_idx];
e27ef599
AD
1658 struct fm10k_ring *ring;
1659
7461fd91
AD
1660 fm10k_dbg_q_vector_exit(q_vector);
1661
e27ef599
AD
1662 fm10k_for_each_ring(ring, q_vector->tx)
1663 interface->tx_ring[ring->queue_index] = NULL;
1664
1665 fm10k_for_each_ring(ring, q_vector->rx)
1666 interface->rx_ring[ring->queue_index] = NULL;
18283cad
AD
1667
1668 interface->q_vector[v_idx] = NULL;
1669 netif_napi_del(&q_vector->napi);
1670 kfree_rcu(q_vector, rcu);
1671}
1672
1673/**
1674 * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors
1675 * @interface: board private structure to initialize
1676 *
1677 * We allocate one q_vector per queue interrupt. If allocation fails we
1678 * return -ENOMEM.
1679 **/
1680static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
1681{
1682 unsigned int q_vectors = interface->num_q_vectors;
1683 unsigned int rxr_remaining = interface->num_rx_queues;
1684 unsigned int txr_remaining = interface->num_tx_queues;
1685 unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1686 int err;
1687
1688 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1689 for (; rxr_remaining; v_idx++) {
1690 err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
1691 0, 0, 1, rxr_idx);
1692 if (err)
1693 goto err_out;
1694
1695 /* update counts and index */
1696 rxr_remaining--;
1697 rxr_idx++;
1698 }
1699 }
1700
1701 for (; v_idx < q_vectors; v_idx++) {
1702 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1703 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1704
1705 err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
1706 tqpv, txr_idx,
1707 rqpv, rxr_idx);
1708
1709 if (err)
1710 goto err_out;
1711
1712 /* update counts and index */
1713 rxr_remaining -= rqpv;
1714 txr_remaining -= tqpv;
1715 rxr_idx++;
1716 txr_idx++;
1717 }
1718
1719 return 0;
1720
1721err_out:
1722 interface->num_tx_queues = 0;
1723 interface->num_rx_queues = 0;
1724 interface->num_q_vectors = 0;
1725
1726 while (v_idx--)
1727 fm10k_free_q_vector(interface, v_idx);
1728
1729 return -ENOMEM;
1730}
1731
1732/**
1733 * fm10k_free_q_vectors - Free memory allocated for interrupt vectors
1734 * @interface: board private structure to initialize
1735 *
1736 * This function frees the memory allocated to the q_vectors. In addition if
1737 * NAPI is enabled it will delete any references to the NAPI struct prior
1738 * to freeing the q_vector.
1739 **/
1740static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
1741{
1742 int v_idx = interface->num_q_vectors;
1743
1744 interface->num_tx_queues = 0;
1745 interface->num_rx_queues = 0;
1746 interface->num_q_vectors = 0;
1747
1748 while (v_idx--)
1749 fm10k_free_q_vector(interface, v_idx);
1750}
1751
1752/**
1753 * f10k_reset_msix_capability - reset MSI-X capability
1754 * @interface: board private structure to initialize
1755 *
1756 * Reset the MSI-X capability back to its starting state
1757 **/
1758static void fm10k_reset_msix_capability(struct fm10k_intfc *interface)
1759{
1760 pci_disable_msix(interface->pdev);
1761 kfree(interface->msix_entries);
1762 interface->msix_entries = NULL;
1763}
1764
1765/**
1766 * f10k_init_msix_capability - configure MSI-X capability
1767 * @interface: board private structure to initialize
1768 *
1769 * Attempt to configure the interrupts using the best available
1770 * capabilities of the hardware and the kernel.
1771 **/
1772static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
1773{
1774 struct fm10k_hw *hw = &interface->hw;
1775 int v_budget, vector;
1776
1777 /* It's easy to be greedy for MSI-X vectors, but it really
1778 * doesn't do us much good if we have a lot more vectors
1779 * than CPU's. So let's be conservative and only ask for
1780 * (roughly) the same number of vectors as there are CPU's.
1781 * the default is to use pairs of vectors
1782 */
1783 v_budget = max(interface->num_rx_queues, interface->num_tx_queues);
1784 v_budget = min_t(u16, v_budget, num_online_cpus());
1785
1786 /* account for vectors not related to queues */
1787 v_budget += NON_Q_VECTORS(hw);
1788
1789 /* At the same time, hardware can only support a maximum of
1790 * hw.mac->max_msix_vectors vectors. With features
1791 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
1792 * descriptor queues supported by our device. Thus, we cap it off in
1793 * those rare cases where the cpu count also exceeds our vector limit.
1794 */
1795 v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
1796
1797 /* A failure in MSI-X entry allocation is fatal. */
1798 interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
1799 GFP_KERNEL);
1800 if (!interface->msix_entries)
1801 return -ENOMEM;
1802
1803 /* populate entry values */
1804 for (vector = 0; vector < v_budget; vector++)
1805 interface->msix_entries[vector].entry = vector;
1806
1807 /* Attempt to enable MSI-X with requested value */
1808 v_budget = pci_enable_msix_range(interface->pdev,
1809 interface->msix_entries,
1810 MIN_MSIX_COUNT(hw),
1811 v_budget);
1812 if (v_budget < 0) {
1813 kfree(interface->msix_entries);
1814 interface->msix_entries = NULL;
1815 return -ENOMEM;
1816 }
1817
1818 /* record the number of queues available for q_vectors */
1819 interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw);
1820
1821 return 0;
1822}
1823
aa3ac822
AD
1824/**
1825 * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS
1826 * @interface: Interface structure continaining rings and devices
1827 *
1828 * Cache the descriptor ring offsets for Qos
1829 **/
1830static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
1831{
1832 struct net_device *dev = interface->netdev;
1833 int pc, offset, rss_i, i, q_idx;
1834 u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1;
1835 u8 num_pcs = netdev_get_num_tc(dev);
1836
1837 if (num_pcs <= 1)
1838 return false;
1839
1840 rss_i = interface->ring_feature[RING_F_RSS].indices;
1841
1842 for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) {
1843 q_idx = pc;
1844 for (i = 0; i < rss_i; i++) {
1845 interface->tx_ring[offset + i]->reg_idx = q_idx;
1846 interface->tx_ring[offset + i]->qos_pc = pc;
1847 interface->rx_ring[offset + i]->reg_idx = q_idx;
1848 interface->rx_ring[offset + i]->qos_pc = pc;
1849 q_idx += pc_stride;
1850 }
1851 }
1852
1853 return true;
1854}
1855
1856/**
1857 * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS
1858 * @interface: Interface structure continaining rings and devices
1859 *
1860 * Cache the descriptor ring offsets for RSS
1861 **/
1862static void fm10k_cache_ring_rss(struct fm10k_intfc *interface)
1863{
1864 int i;
1865
1866 for (i = 0; i < interface->num_rx_queues; i++)
1867 interface->rx_ring[i]->reg_idx = i;
1868
1869 for (i = 0; i < interface->num_tx_queues; i++)
1870 interface->tx_ring[i]->reg_idx = i;
1871}
1872
1873/**
1874 * fm10k_assign_rings - Map rings to network devices
1875 * @interface: Interface structure containing rings and devices
1876 *
1877 * This function is meant to go though and configure both the network
1878 * devices so that they contain rings, and configure the rings so that
1879 * they function with their network devices.
1880 **/
1881static void fm10k_assign_rings(struct fm10k_intfc *interface)
1882{
1883 if (fm10k_cache_ring_qos(interface))
1884 return;
1885
1886 fm10k_cache_ring_rss(interface);
1887}
1888
18283cad
AD
1889static void fm10k_init_reta(struct fm10k_intfc *interface)
1890{
1891 u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
1892 u32 reta, base;
1893
1894 /* If the netdev is initialized we have to maintain table if possible */
1895 if (interface->netdev->reg_state) {
1896 for (i = FM10K_RETA_SIZE; i--;) {
1897 reta = interface->reta[i];
1898 if ((((reta << 24) >> 24) < rss_i) &&
1899 (((reta << 16) >> 24) < rss_i) &&
1900 (((reta << 8) >> 24) < rss_i) &&
1901 (((reta) >> 24) < rss_i))
1902 continue;
1903 goto repopulate_reta;
1904 }
1905
1906 /* do nothing if all of the elements are in bounds */
1907 return;
1908 }
1909
1910repopulate_reta:
1911 /* Populate the redirection table 4 entries at a time. To do this
1912 * we are generating the results for n and n+2 and then interleaving
1913 * those with the results with n+1 and n+3.
1914 */
1915 for (i = FM10K_RETA_SIZE; i--;) {
1916 /* first pass generates n and n+2 */
1917 base = ((i * 0x00040004) + 0x00020000) * rss_i;
1918 reta = (base & 0x3F803F80) >> 7;
1919
1920 /* second pass generates n+1 and n+3 */
1921 base += 0x00010001 * rss_i;
1922 reta |= (base & 0x3F803F80) << 1;
1923
1924 interface->reta[i] = reta;
1925 }
1926}
1927
1928/**
1929 * fm10k_init_queueing_scheme - Determine proper queueing scheme
1930 * @interface: board private structure to initialize
1931 *
1932 * We determine which queueing scheme to use based on...
1933 * - Hardware queue count (num_*_queues)
1934 * - defined by miscellaneous hardware support/features (RSS, etc.)
1935 **/
1936int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
1937{
1938 int err;
1939
1940 /* Number of supported queues */
1941 fm10k_set_num_queues(interface);
1942
1943 /* Configure MSI-X capability */
1944 err = fm10k_init_msix_capability(interface);
1945 if (err) {
1946 dev_err(&interface->pdev->dev,
1947 "Unable to initialize MSI-X capability\n");
1948 return err;
1949 }
1950
1951 /* Allocate memory for queues */
1952 err = fm10k_alloc_q_vectors(interface);
1953 if (err)
1954 return err;
1955
aa3ac822
AD
1956 /* Map rings to devices, and map devices to physical queues */
1957 fm10k_assign_rings(interface);
1958
18283cad
AD
1959 /* Initialize RSS redirection table */
1960 fm10k_init_reta(interface);
1961
1962 return 0;
1963}
1964
1965/**
1966 * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings
1967 * @interface: board private structure to clear queueing scheme on
1968 *
1969 * We go through and clear queueing specific resources and reset the structure
1970 * to pre-load conditions
1971 **/
1972void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface)
1973{
1974 fm10k_free_q_vectors(interface);
1975 fm10k_reset_msix_capability(interface);
1976}
This page took 0.177559 seconds and 5 git commands to generate.