Commit | Line | Data |
---|---|---|
b3890e30 AD |
1 | /* Intel Ethernet Switch Host Interface Driver |
2 | * Copyright(c) 2013 - 2014 Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * The full GNU General Public License is included in this distribution in | |
14 | * the file called "COPYING". | |
15 | * | |
16 | * Contact Information: | |
17 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
18 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
19 | */ | |
20 | ||
21 | #include <linux/types.h> | |
22 | #include <linux/module.h> | |
23 | #include <net/ipv6.h> | |
24 | #include <net/ip.h> | |
25 | #include <net/tcp.h> | |
26 | #include <linux/if_macvlan.h> | |
b101c962 | 27 | #include <linux/prefetch.h> |
b3890e30 AD |
28 | |
29 | #include "fm10k.h" | |
30 | ||
e3b6e95d | 31 | #define DRV_VERSION "0.19.3-k" |
b3890e30 AD |
32 | const char fm10k_driver_version[] = DRV_VERSION; |
33 | char fm10k_driver_name[] = "fm10k"; | |
34 | static const char fm10k_driver_string[] = | |
35 | "Intel(R) Ethernet Switch Host Interface Driver"; | |
36 | static const char fm10k_copyright[] = | |
37 | "Copyright (c) 2013 Intel Corporation."; | |
38 | ||
39 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); | |
40 | MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver"); | |
41 | MODULE_LICENSE("GPL"); | |
42 | MODULE_VERSION(DRV_VERSION); | |
43 | ||
b382bb1b | 44 | /* single workqueue for entire fm10k driver */ |
07146e2e | 45 | struct workqueue_struct *fm10k_workqueue; |
b382bb1b | 46 | |
6d2ce900 AD |
47 | /** |
48 | * fm10k_init_module - Driver Registration Routine | |
b3890e30 AD |
49 | * |
50 | * fm10k_init_module is the first routine called when the driver is | |
51 | * loaded. All it does is register with the PCI subsystem. | |
52 | **/ | |
53 | static int __init fm10k_init_module(void) | |
54 | { | |
55 | pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version); | |
56 | pr_info("%s\n", fm10k_copyright); | |
57 | ||
b382bb1b | 58 | /* create driver workqueue */ |
07146e2e | 59 | fm10k_workqueue = create_workqueue("fm10k"); |
b382bb1b | 60 | |
7461fd91 AD |
61 | fm10k_dbg_init(); |
62 | ||
b3890e30 AD |
63 | return fm10k_register_pci_driver(); |
64 | } | |
65 | module_init(fm10k_init_module); | |
66 | ||
67 | /** | |
68 | * fm10k_exit_module - Driver Exit Cleanup Routine | |
69 | * | |
70 | * fm10k_exit_module is called just before the driver is removed | |
71 | * from memory. | |
72 | **/ | |
73 | static void __exit fm10k_exit_module(void) | |
74 | { | |
75 | fm10k_unregister_pci_driver(); | |
7461fd91 AD |
76 | |
77 | fm10k_dbg_exit(); | |
b382bb1b JK |
78 | |
79 | /* destroy driver workqueue */ | |
80 | flush_workqueue(fm10k_workqueue); | |
81 | destroy_workqueue(fm10k_workqueue); | |
b3890e30 AD |
82 | } |
83 | module_exit(fm10k_exit_module); | |
18283cad | 84 | |
b101c962 AD |
85 | static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, |
86 | struct fm10k_rx_buffer *bi) | |
87 | { | |
88 | struct page *page = bi->page; | |
89 | dma_addr_t dma; | |
90 | ||
91 | /* Only page will be NULL if buffer was consumed */ | |
92 | if (likely(page)) | |
93 | return true; | |
94 | ||
95 | /* alloc new page for storage */ | |
42b17f09 | 96 | page = dev_alloc_page(); |
b101c962 AD |
97 | if (unlikely(!page)) { |
98 | rx_ring->rx_stats.alloc_failed++; | |
99 | return false; | |
100 | } | |
101 | ||
102 | /* map page for use */ | |
103 | dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); | |
104 | ||
105 | /* if mapping failed free memory back to system since | |
106 | * there isn't much point in holding memory we can't use | |
107 | */ | |
108 | if (dma_mapping_error(rx_ring->dev, dma)) { | |
109 | __free_page(page); | |
b101c962 AD |
110 | |
111 | rx_ring->rx_stats.alloc_failed++; | |
112 | return false; | |
113 | } | |
114 | ||
115 | bi->dma = dma; | |
116 | bi->page = page; | |
117 | bi->page_offset = 0; | |
118 | ||
119 | return true; | |
120 | } | |
121 | ||
122 | /** | |
123 | * fm10k_alloc_rx_buffers - Replace used receive buffers | |
124 | * @rx_ring: ring to place buffers on | |
125 | * @cleaned_count: number of buffers to replace | |
126 | **/ | |
127 | void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) | |
128 | { | |
129 | union fm10k_rx_desc *rx_desc; | |
130 | struct fm10k_rx_buffer *bi; | |
131 | u16 i = rx_ring->next_to_use; | |
132 | ||
133 | /* nothing to do */ | |
134 | if (!cleaned_count) | |
135 | return; | |
136 | ||
137 | rx_desc = FM10K_RX_DESC(rx_ring, i); | |
138 | bi = &rx_ring->rx_buffer[i]; | |
139 | i -= rx_ring->count; | |
140 | ||
141 | do { | |
142 | if (!fm10k_alloc_mapped_page(rx_ring, bi)) | |
143 | break; | |
144 | ||
145 | /* Refresh the desc even if buffer_addrs didn't change | |
146 | * because each write-back erases this info. | |
147 | */ | |
148 | rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); | |
149 | ||
150 | rx_desc++; | |
151 | bi++; | |
152 | i++; | |
153 | if (unlikely(!i)) { | |
154 | rx_desc = FM10K_RX_DESC(rx_ring, 0); | |
155 | bi = rx_ring->rx_buffer; | |
156 | i -= rx_ring->count; | |
157 | } | |
158 | ||
ba5b8dcd AD |
159 | /* clear the status bits for the next_to_use descriptor */ |
160 | rx_desc->d.staterr = 0; | |
b101c962 AD |
161 | |
162 | cleaned_count--; | |
163 | } while (cleaned_count); | |
164 | ||
165 | i += rx_ring->count; | |
166 | ||
167 | if (rx_ring->next_to_use != i) { | |
168 | /* record the next descriptor to use */ | |
169 | rx_ring->next_to_use = i; | |
170 | ||
171 | /* update next to alloc since we have filled the ring */ | |
172 | rx_ring->next_to_alloc = i; | |
173 | ||
174 | /* Force memory writes to complete before letting h/w | |
175 | * know there are new descriptors to fetch. (Only | |
176 | * applicable for weak-ordered memory model archs, | |
177 | * such as IA-64). | |
178 | */ | |
179 | wmb(); | |
180 | ||
181 | /* notify hardware of new descriptors */ | |
182 | writel(i, rx_ring->tail); | |
183 | } | |
184 | } | |
185 | ||
186 | /** | |
187 | * fm10k_reuse_rx_page - page flip buffer and store it back on the ring | |
188 | * @rx_ring: rx descriptor ring to store buffers on | |
189 | * @old_buff: donor buffer to have page reused | |
190 | * | |
191 | * Synchronizes page for reuse by the interface | |
192 | **/ | |
193 | static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, | |
194 | struct fm10k_rx_buffer *old_buff) | |
195 | { | |
196 | struct fm10k_rx_buffer *new_buff; | |
197 | u16 nta = rx_ring->next_to_alloc; | |
198 | ||
199 | new_buff = &rx_ring->rx_buffer[nta]; | |
200 | ||
201 | /* update, and store next to alloc */ | |
202 | nta++; | |
203 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | |
204 | ||
205 | /* transfer page from old buffer to new buffer */ | |
ba5b8dcd | 206 | *new_buff = *old_buff; |
b101c962 AD |
207 | |
208 | /* sync the buffer for use by the device */ | |
209 | dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, | |
210 | old_buff->page_offset, | |
211 | FM10K_RX_BUFSZ, | |
212 | DMA_FROM_DEVICE); | |
213 | } | |
214 | ||
ba5b8dcd AD |
215 | static inline bool fm10k_page_is_reserved(struct page *page) |
216 | { | |
2f064f34 | 217 | return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); |
ba5b8dcd AD |
218 | } |
219 | ||
b101c962 AD |
220 | static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, |
221 | struct page *page, | |
de445199 | 222 | unsigned int __maybe_unused truesize) |
b101c962 AD |
223 | { |
224 | /* avoid re-using remote pages */ | |
ba5b8dcd | 225 | if (unlikely(fm10k_page_is_reserved(page))) |
b101c962 AD |
226 | return false; |
227 | ||
228 | #if (PAGE_SIZE < 8192) | |
229 | /* if we are only owner of page we can reuse it */ | |
230 | if (unlikely(page_count(page) != 1)) | |
231 | return false; | |
232 | ||
233 | /* flip page offset to other buffer */ | |
234 | rx_buffer->page_offset ^= FM10K_RX_BUFSZ; | |
b101c962 AD |
235 | #else |
236 | /* move offset up to the next cache line */ | |
237 | rx_buffer->page_offset += truesize; | |
238 | ||
239 | if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) | |
240 | return false; | |
b101c962 AD |
241 | #endif |
242 | ||
ba5b8dcd AD |
243 | /* Even if we own the page, we are not allowed to use atomic_set() |
244 | * This would break get_page_unless_zero() users. | |
245 | */ | |
fe896d18 | 246 | page_ref_inc(page); |
ba5b8dcd | 247 | |
b101c962 AD |
248 | return true; |
249 | } | |
250 | ||
251 | /** | |
252 | * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff | |
b101c962 AD |
253 | * @rx_buffer: buffer containing page to add |
254 | * @rx_desc: descriptor containing length of buffer written by hardware | |
255 | * @skb: sk_buff to place the data into | |
256 | * | |
257 | * This function will add the data contained in rx_buffer->page to the skb. | |
258 | * This is done either through a direct copy if the data in the buffer is | |
259 | * less than the skb header size, otherwise it will just attach the page as | |
260 | * a frag to the skb. | |
261 | * | |
262 | * The function will then update the page offset if necessary and return | |
263 | * true if the buffer can be reused by the interface. | |
264 | **/ | |
de445199 | 265 | static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, |
b101c962 AD |
266 | union fm10k_rx_desc *rx_desc, |
267 | struct sk_buff *skb) | |
268 | { | |
269 | struct page *page = rx_buffer->page; | |
1a8782e5 | 270 | unsigned char *va = page_address(page) + rx_buffer->page_offset; |
b101c962 AD |
271 | unsigned int size = le16_to_cpu(rx_desc->w.length); |
272 | #if (PAGE_SIZE < 8192) | |
273 | unsigned int truesize = FM10K_RX_BUFSZ; | |
274 | #else | |
1a8782e5 | 275 | unsigned int truesize = SKB_DATA_ALIGN(size); |
b101c962 | 276 | #endif |
1a8782e5 | 277 | unsigned int pull_len; |
b101c962 | 278 | |
1a8782e5 AD |
279 | if (unlikely(skb_is_nonlinear(skb))) |
280 | goto add_tail_frag; | |
b101c962 | 281 | |
1a8782e5 | 282 | if (likely(size <= FM10K_RX_HDR_LEN)) { |
b101c962 AD |
283 | memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); |
284 | ||
ba5b8dcd AD |
285 | /* page is not reserved, we can reuse buffer as-is */ |
286 | if (likely(!fm10k_page_is_reserved(page))) | |
b101c962 AD |
287 | return true; |
288 | ||
289 | /* this page cannot be reused so discard it */ | |
ba5b8dcd | 290 | __free_page(page); |
b101c962 AD |
291 | return false; |
292 | } | |
293 | ||
1a8782e5 AD |
294 | /* we need the header to contain the greater of either ETH_HLEN or |
295 | * 60 bytes if the skb->len is less than 60 for skb_pad. | |
296 | */ | |
297 | pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN); | |
298 | ||
299 | /* align pull length to size of long to optimize memcpy performance */ | |
300 | memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); | |
301 | ||
302 | /* update all of the pointers */ | |
303 | va += pull_len; | |
304 | size -= pull_len; | |
305 | ||
306 | add_tail_frag: | |
b101c962 | 307 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, |
1a8782e5 | 308 | (unsigned long)va & ~PAGE_MASK, size, truesize); |
b101c962 AD |
309 | |
310 | return fm10k_can_reuse_rx_page(rx_buffer, page, truesize); | |
311 | } | |
312 | ||
313 | static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, | |
314 | union fm10k_rx_desc *rx_desc, | |
315 | struct sk_buff *skb) | |
316 | { | |
317 | struct fm10k_rx_buffer *rx_buffer; | |
318 | struct page *page; | |
319 | ||
320 | rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; | |
b101c962 AD |
321 | page = rx_buffer->page; |
322 | prefetchw(page); | |
323 | ||
324 | if (likely(!skb)) { | |
325 | void *page_addr = page_address(page) + | |
326 | rx_buffer->page_offset; | |
327 | ||
328 | /* prefetch first cache line of first page */ | |
329 | prefetch(page_addr); | |
330 | #if L1_CACHE_BYTES < 128 | |
331 | prefetch(page_addr + L1_CACHE_BYTES); | |
332 | #endif | |
333 | ||
334 | /* allocate a skb to store the frags */ | |
67fd893e AD |
335 | skb = napi_alloc_skb(&rx_ring->q_vector->napi, |
336 | FM10K_RX_HDR_LEN); | |
b101c962 AD |
337 | if (unlikely(!skb)) { |
338 | rx_ring->rx_stats.alloc_failed++; | |
339 | return NULL; | |
340 | } | |
341 | ||
342 | /* we will be copying header into skb->data in | |
343 | * pskb_may_pull so it is in our interest to prefetch | |
344 | * it now to avoid a possible cache miss | |
345 | */ | |
346 | prefetchw(skb->data); | |
347 | } | |
348 | ||
349 | /* we are reusing so sync this buffer for CPU use */ | |
350 | dma_sync_single_range_for_cpu(rx_ring->dev, | |
351 | rx_buffer->dma, | |
352 | rx_buffer->page_offset, | |
353 | FM10K_RX_BUFSZ, | |
354 | DMA_FROM_DEVICE); | |
355 | ||
356 | /* pull page into skb */ | |
de445199 | 357 | if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) { |
b101c962 AD |
358 | /* hand second half of page back to the ring */ |
359 | fm10k_reuse_rx_page(rx_ring, rx_buffer); | |
360 | } else { | |
361 | /* we are not reusing the buffer so unmap it */ | |
362 | dma_unmap_page(rx_ring->dev, rx_buffer->dma, | |
363 | PAGE_SIZE, DMA_FROM_DEVICE); | |
364 | } | |
365 | ||
366 | /* clear contents of rx_buffer */ | |
367 | rx_buffer->page = NULL; | |
368 | ||
369 | return skb; | |
370 | } | |
371 | ||
76a540d4 AD |
372 | static inline void fm10k_rx_checksum(struct fm10k_ring *ring, |
373 | union fm10k_rx_desc *rx_desc, | |
374 | struct sk_buff *skb) | |
375 | { | |
376 | skb_checksum_none_assert(skb); | |
377 | ||
378 | /* Rx checksum disabled via ethtool */ | |
379 | if (!(ring->netdev->features & NETIF_F_RXCSUM)) | |
380 | return; | |
381 | ||
382 | /* TCP/UDP checksum error bit is set */ | |
383 | if (fm10k_test_staterr(rx_desc, | |
384 | FM10K_RXD_STATUS_L4E | | |
385 | FM10K_RXD_STATUS_L4E2 | | |
386 | FM10K_RXD_STATUS_IPE | | |
387 | FM10K_RXD_STATUS_IPE2)) { | |
388 | ring->rx_stats.csum_err++; | |
389 | return; | |
390 | } | |
391 | ||
392 | /* It must be a TCP or UDP packet with a valid checksum */ | |
393 | if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2)) | |
394 | skb->encapsulation = true; | |
395 | else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS)) | |
396 | return; | |
397 | ||
398 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
80043f3b JK |
399 | |
400 | ring->rx_stats.csum_good++; | |
76a540d4 AD |
401 | } |
402 | ||
403 | #define FM10K_RSS_L4_TYPES_MASK \ | |
fcdb0a99 BA |
404 | (BIT(FM10K_RSSTYPE_IPV4_TCP) | \ |
405 | BIT(FM10K_RSSTYPE_IPV4_UDP) | \ | |
406 | BIT(FM10K_RSSTYPE_IPV6_TCP) | \ | |
407 | BIT(FM10K_RSSTYPE_IPV6_UDP)) | |
76a540d4 AD |
408 | |
409 | static inline void fm10k_rx_hash(struct fm10k_ring *ring, | |
410 | union fm10k_rx_desc *rx_desc, | |
411 | struct sk_buff *skb) | |
412 | { | |
413 | u16 rss_type; | |
414 | ||
415 | if (!(ring->netdev->features & NETIF_F_RXHASH)) | |
416 | return; | |
417 | ||
418 | rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK; | |
419 | if (!rss_type) | |
420 | return; | |
421 | ||
422 | skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), | |
fcdb0a99 | 423 | (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ? |
76a540d4 AD |
424 | PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); |
425 | } | |
426 | ||
a211e013 AD |
427 | static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring, |
428 | union fm10k_rx_desc *rx_desc, | |
429 | struct sk_buff *skb) | |
430 | { | |
431 | struct fm10k_intfc *interface = rx_ring->q_vector->interface; | |
432 | ||
433 | FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; | |
434 | ||
435 | if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED)) | |
436 | fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb), | |
437 | le64_to_cpu(rx_desc->q.timestamp)); | |
438 | } | |
439 | ||
5cd5e2e9 | 440 | static void fm10k_type_trans(struct fm10k_ring *rx_ring, |
de445199 | 441 | union fm10k_rx_desc __maybe_unused *rx_desc, |
5cd5e2e9 AD |
442 | struct sk_buff *skb) |
443 | { | |
444 | struct net_device *dev = rx_ring->netdev; | |
445 | struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); | |
446 | ||
447 | /* check to see if DGLORT belongs to a MACVLAN */ | |
448 | if (l2_accel) { | |
449 | u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1; | |
450 | ||
451 | idx -= l2_accel->dglort; | |
452 | if (idx < l2_accel->size && l2_accel->macvlan[idx]) | |
453 | dev = l2_accel->macvlan[idx]; | |
454 | else | |
455 | l2_accel = NULL; | |
456 | } | |
457 | ||
458 | skb->protocol = eth_type_trans(skb, dev); | |
459 | ||
460 | if (!l2_accel) | |
461 | return; | |
462 | ||
463 | /* update MACVLAN statistics */ | |
464 | macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1, | |
465 | !!(rx_desc->w.hdr_info & | |
466 | cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK))); | |
467 | } | |
468 | ||
b101c962 AD |
469 | /** |
470 | * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor | |
471 | * @rx_ring: rx descriptor ring packet is being transacted on | |
472 | * @rx_desc: pointer to the EOP Rx descriptor | |
473 | * @skb: pointer to current skb being populated | |
474 | * | |
475 | * This function checks the ring, descriptor, and packet information in | |
476 | * order to populate the hash, checksum, VLAN, timestamp, protocol, and | |
477 | * other fields within the skb. | |
478 | **/ | |
479 | static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, | |
480 | union fm10k_rx_desc *rx_desc, | |
481 | struct sk_buff *skb) | |
482 | { | |
483 | unsigned int len = skb->len; | |
484 | ||
76a540d4 AD |
485 | fm10k_rx_hash(rx_ring, rx_desc, skb); |
486 | ||
487 | fm10k_rx_checksum(rx_ring, rx_desc, skb); | |
488 | ||
a211e013 AD |
489 | fm10k_rx_hwtstamp(rx_ring, rx_desc, skb); |
490 | ||
b101c962 AD |
491 | FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; |
492 | ||
493 | skb_record_rx_queue(skb, rx_ring->queue_index); | |
494 | ||
495 | FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort; | |
496 | ||
497 | if (rx_desc->w.vlan) { | |
498 | u16 vid = le16_to_cpu(rx_desc->w.vlan); | |
499 | ||
e71c9318 | 500 | if ((vid & VLAN_VID_MASK) != rx_ring->vid) |
b101c962 | 501 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); |
e71c9318 JK |
502 | else if (vid & VLAN_PRIO_MASK) |
503 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
504 | vid & VLAN_PRIO_MASK); | |
b101c962 AD |
505 | } |
506 | ||
5cd5e2e9 | 507 | fm10k_type_trans(rx_ring, rx_desc, skb); |
b101c962 AD |
508 | |
509 | return len; | |
510 | } | |
511 | ||
512 | /** | |
513 | * fm10k_is_non_eop - process handling of non-EOP buffers | |
514 | * @rx_ring: Rx ring being processed | |
515 | * @rx_desc: Rx descriptor for current buffer | |
516 | * | |
517 | * This function updates next to clean. If the buffer is an EOP buffer | |
518 | * this function exits returning false, otherwise it will place the | |
519 | * sk_buff in the next buffer to be chained and return true indicating | |
520 | * that this is in fact a non-EOP buffer. | |
521 | **/ | |
522 | static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, | |
523 | union fm10k_rx_desc *rx_desc) | |
524 | { | |
525 | u32 ntc = rx_ring->next_to_clean + 1; | |
526 | ||
527 | /* fetch, update, and store next to clean */ | |
528 | ntc = (ntc < rx_ring->count) ? ntc : 0; | |
529 | rx_ring->next_to_clean = ntc; | |
530 | ||
531 | prefetch(FM10K_RX_DESC(rx_ring, ntc)); | |
532 | ||
533 | if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP))) | |
534 | return false; | |
535 | ||
536 | return true; | |
537 | } | |
538 | ||
b101c962 AD |
539 | /** |
540 | * fm10k_cleanup_headers - Correct corrupted or empty headers | |
541 | * @rx_ring: rx descriptor ring packet is being transacted on | |
542 | * @rx_desc: pointer to the EOP Rx descriptor | |
543 | * @skb: pointer to current skb being fixed | |
544 | * | |
545 | * Address the case where we are pulling data in on pages only | |
546 | * and as such no data is present in the skb header. | |
547 | * | |
548 | * In addition if skb is not at least 60 bytes we need to pad it so that | |
549 | * it is large enough to qualify as a valid Ethernet frame. | |
550 | * | |
551 | * Returns true if an error was encountered and skb was freed. | |
552 | **/ | |
553 | static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, | |
554 | union fm10k_rx_desc *rx_desc, | |
555 | struct sk_buff *skb) | |
556 | { | |
557 | if (unlikely((fm10k_test_staterr(rx_desc, | |
558 | FM10K_RXD_STATUS_RXE)))) { | |
80043f3b JK |
559 | #define FM10K_TEST_RXD_BIT(rxd, bit) \ |
560 | ((rxd)->w.csum_err & cpu_to_le16(bit)) | |
561 | if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR)) | |
562 | rx_ring->rx_stats.switch_errors++; | |
563 | if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR)) | |
564 | rx_ring->rx_stats.drops++; | |
565 | if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR)) | |
566 | rx_ring->rx_stats.pp_errors++; | |
567 | if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY)) | |
568 | rx_ring->rx_stats.link_errors++; | |
569 | if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG)) | |
570 | rx_ring->rx_stats.length_errors++; | |
b101c962 AD |
571 | dev_kfree_skb_any(skb); |
572 | rx_ring->rx_stats.errors++; | |
573 | return true; | |
574 | } | |
575 | ||
a94d9e22 AD |
576 | /* if eth_skb_pad returns an error the skb was freed */ |
577 | if (eth_skb_pad(skb)) | |
578 | return true; | |
b101c962 AD |
579 | |
580 | return false; | |
581 | } | |
582 | ||
583 | /** | |
584 | * fm10k_receive_skb - helper function to handle rx indications | |
585 | * @q_vector: structure containing interrupt and ring information | |
586 | * @skb: packet to send up | |
587 | **/ | |
588 | static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, | |
589 | struct sk_buff *skb) | |
590 | { | |
591 | napi_gro_receive(&q_vector->napi, skb); | |
592 | } | |
593 | ||
32b3e08f JB |
594 | static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, |
595 | struct fm10k_ring *rx_ring, | |
596 | int budget) | |
b101c962 AD |
597 | { |
598 | struct sk_buff *skb = rx_ring->skb; | |
599 | unsigned int total_bytes = 0, total_packets = 0; | |
600 | u16 cleaned_count = fm10k_desc_unused(rx_ring); | |
601 | ||
59486329 | 602 | while (likely(total_packets < budget)) { |
b101c962 AD |
603 | union fm10k_rx_desc *rx_desc; |
604 | ||
605 | /* return some buffers to hardware, one at a time is too slow */ | |
606 | if (cleaned_count >= FM10K_RX_BUFFER_WRITE) { | |
607 | fm10k_alloc_rx_buffers(rx_ring, cleaned_count); | |
608 | cleaned_count = 0; | |
609 | } | |
610 | ||
611 | rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); | |
612 | ||
124b74c1 | 613 | if (!rx_desc->d.staterr) |
b101c962 AD |
614 | break; |
615 | ||
616 | /* This memory barrier is needed to keep us from reading | |
617 | * any other fields out of the rx_desc until we know the | |
124b74c1 | 618 | * descriptor has been written back |
b101c962 | 619 | */ |
124b74c1 | 620 | dma_rmb(); |
b101c962 AD |
621 | |
622 | /* retrieve a buffer from the ring */ | |
623 | skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); | |
624 | ||
625 | /* exit if we failed to retrieve a buffer */ | |
626 | if (!skb) | |
627 | break; | |
628 | ||
629 | cleaned_count++; | |
630 | ||
631 | /* fetch next buffer in frame if non-eop */ | |
632 | if (fm10k_is_non_eop(rx_ring, rx_desc)) | |
633 | continue; | |
634 | ||
635 | /* verify the packet layout is correct */ | |
636 | if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { | |
637 | skb = NULL; | |
638 | continue; | |
639 | } | |
640 | ||
641 | /* populate checksum, timestamp, VLAN, and protocol */ | |
642 | total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); | |
643 | ||
644 | fm10k_receive_skb(q_vector, skb); | |
645 | ||
646 | /* reset skb pointer */ | |
647 | skb = NULL; | |
648 | ||
649 | /* update budget accounting */ | |
650 | total_packets++; | |
59486329 | 651 | } |
b101c962 AD |
652 | |
653 | /* place incomplete frames back on ring for completion */ | |
654 | rx_ring->skb = skb; | |
655 | ||
656 | u64_stats_update_begin(&rx_ring->syncp); | |
657 | rx_ring->stats.packets += total_packets; | |
658 | rx_ring->stats.bytes += total_bytes; | |
659 | u64_stats_update_end(&rx_ring->syncp); | |
660 | q_vector->rx.total_packets += total_packets; | |
661 | q_vector->rx.total_bytes += total_bytes; | |
662 | ||
32b3e08f | 663 | return total_packets; |
b101c962 AD |
664 | } |
665 | ||
76a540d4 AD |
666 | #define VXLAN_HLEN (sizeof(struct udphdr) + 8) |
667 | static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb) | |
668 | { | |
669 | struct fm10k_intfc *interface = netdev_priv(skb->dev); | |
670 | struct fm10k_vxlan_port *vxlan_port; | |
671 | ||
672 | /* we can only offload a vxlan if we recognize it as such */ | |
673 | vxlan_port = list_first_entry_or_null(&interface->vxlan_port, | |
674 | struct fm10k_vxlan_port, list); | |
675 | ||
676 | if (!vxlan_port) | |
677 | return NULL; | |
678 | if (vxlan_port->port != udp_hdr(skb)->dest) | |
679 | return NULL; | |
680 | ||
681 | /* return offset of udp_hdr plus 8 bytes for VXLAN header */ | |
682 | return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN); | |
683 | } | |
684 | ||
685 | #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF) | |
686 | #define NVGRE_TNI htons(0x2000) | |
687 | struct fm10k_nvgre_hdr { | |
688 | __be16 flags; | |
689 | __be16 proto; | |
690 | __be32 tni; | |
691 | }; | |
692 | ||
693 | static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) | |
694 | { | |
695 | struct fm10k_nvgre_hdr *nvgre_hdr; | |
696 | int hlen = ip_hdrlen(skb); | |
697 | ||
698 | /* currently only IPv4 is supported due to hlen above */ | |
699 | if (vlan_get_protocol(skb) != htons(ETH_P_IP)) | |
700 | return NULL; | |
701 | ||
702 | /* our transport header should be NVGRE */ | |
703 | nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen); | |
704 | ||
705 | /* verify all reserved flags are 0 */ | |
706 | if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) | |
707 | return NULL; | |
708 | ||
76a540d4 AD |
709 | /* report start of ethernet header */ |
710 | if (nvgre_hdr->flags & NVGRE_TNI) | |
711 | return (struct ethhdr *)(nvgre_hdr + 1); | |
712 | ||
713 | return (struct ethhdr *)(&nvgre_hdr->tni); | |
714 | } | |
715 | ||
5bf33dc6 | 716 | __be16 fm10k_tx_encap_offload(struct sk_buff *skb) |
76a540d4 | 717 | { |
8c1a90aa | 718 | u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen; |
76a540d4 | 719 | struct ethhdr *eth_hdr; |
76a540d4 | 720 | |
8c1a90aa MV |
721 | if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || |
722 | skb->inner_protocol != htons(ETH_P_TEB)) | |
b66b6d9f JS |
723 | return 0; |
724 | ||
76a540d4 AD |
725 | switch (vlan_get_protocol(skb)) { |
726 | case htons(ETH_P_IP): | |
727 | l4_hdr = ip_hdr(skb)->protocol; | |
728 | break; | |
729 | case htons(ETH_P_IPV6): | |
730 | l4_hdr = ipv6_hdr(skb)->nexthdr; | |
731 | break; | |
732 | default: | |
733 | return 0; | |
734 | } | |
735 | ||
736 | switch (l4_hdr) { | |
737 | case IPPROTO_UDP: | |
738 | eth_hdr = fm10k_port_is_vxlan(skb); | |
739 | break; | |
740 | case IPPROTO_GRE: | |
741 | eth_hdr = fm10k_gre_is_nvgre(skb); | |
742 | break; | |
743 | default: | |
744 | return 0; | |
745 | } | |
746 | ||
747 | if (!eth_hdr) | |
748 | return 0; | |
749 | ||
750 | switch (eth_hdr->h_proto) { | |
751 | case htons(ETH_P_IP): | |
8c1a90aa MV |
752 | inner_l4_hdr = inner_ip_hdr(skb)->protocol; |
753 | break; | |
76a540d4 | 754 | case htons(ETH_P_IPV6): |
8c1a90aa | 755 | inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr; |
76a540d4 AD |
756 | break; |
757 | default: | |
758 | return 0; | |
759 | } | |
760 | ||
8c1a90aa MV |
761 | switch (inner_l4_hdr) { |
762 | case IPPROTO_TCP: | |
763 | inner_l4_hlen = inner_tcp_hdrlen(skb); | |
764 | break; | |
765 | case IPPROTO_UDP: | |
766 | inner_l4_hlen = 8; | |
767 | break; | |
768 | default: | |
769 | return 0; | |
770 | } | |
771 | ||
772 | /* The hardware allows tunnel offloads only if the combined inner and | |
773 | * outer header is 184 bytes or less | |
774 | */ | |
775 | if (skb_inner_transport_header(skb) + inner_l4_hlen - | |
776 | skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH) | |
777 | return 0; | |
778 | ||
76a540d4 AD |
779 | return eth_hdr->h_proto; |
780 | } | |
781 | ||
782 | static int fm10k_tso(struct fm10k_ring *tx_ring, | |
783 | struct fm10k_tx_buffer *first) | |
784 | { | |
785 | struct sk_buff *skb = first->skb; | |
786 | struct fm10k_tx_desc *tx_desc; | |
787 | unsigned char *th; | |
788 | u8 hdrlen; | |
789 | ||
790 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
791 | return 0; | |
792 | ||
793 | if (!skb_is_gso(skb)) | |
794 | return 0; | |
795 | ||
796 | /* compute header lengths */ | |
797 | if (skb->encapsulation) { | |
798 | if (!fm10k_tx_encap_offload(skb)) | |
799 | goto err_vxlan; | |
800 | th = skb_inner_transport_header(skb); | |
801 | } else { | |
802 | th = skb_transport_header(skb); | |
803 | } | |
804 | ||
805 | /* compute offset from SOF to transport header and add header len */ | |
806 | hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2); | |
807 | ||
808 | first->tx_flags |= FM10K_TX_FLAGS_CSUM; | |
809 | ||
810 | /* update gso size and bytecount with header size */ | |
811 | first->gso_segs = skb_shinfo(skb)->gso_segs; | |
812 | first->bytecount += (first->gso_segs - 1) * hdrlen; | |
813 | ||
814 | /* populate Tx descriptor header size and mss */ | |
815 | tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); | |
816 | tx_desc->hdrlen = hdrlen; | |
817 | tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | |
818 | ||
819 | return 1; | |
820 | err_vxlan: | |
821 | tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; | |
822 | if (!net_ratelimit()) | |
823 | netdev_err(tx_ring->netdev, | |
824 | "TSO requested for unsupported tunnel, disabling offload\n"); | |
825 | return -1; | |
826 | } | |
827 | ||
828 | static void fm10k_tx_csum(struct fm10k_ring *tx_ring, | |
829 | struct fm10k_tx_buffer *first) | |
830 | { | |
831 | struct sk_buff *skb = first->skb; | |
832 | struct fm10k_tx_desc *tx_desc; | |
833 | union { | |
834 | struct iphdr *ipv4; | |
835 | struct ipv6hdr *ipv6; | |
836 | u8 *raw; | |
837 | } network_hdr; | |
838 | __be16 protocol; | |
839 | u8 l4_hdr = 0; | |
840 | ||
841 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
842 | goto no_csum; | |
843 | ||
844 | if (skb->encapsulation) { | |
845 | protocol = fm10k_tx_encap_offload(skb); | |
846 | if (!protocol) { | |
847 | if (skb_checksum_help(skb)) { | |
848 | dev_warn(tx_ring->dev, | |
849 | "failed to offload encap csum!\n"); | |
850 | tx_ring->tx_stats.csum_err++; | |
851 | } | |
852 | goto no_csum; | |
853 | } | |
854 | network_hdr.raw = skb_inner_network_header(skb); | |
855 | } else { | |
856 | protocol = vlan_get_protocol(skb); | |
857 | network_hdr.raw = skb_network_header(skb); | |
858 | } | |
859 | ||
860 | switch (protocol) { | |
861 | case htons(ETH_P_IP): | |
862 | l4_hdr = network_hdr.ipv4->protocol; | |
863 | break; | |
864 | case htons(ETH_P_IPV6): | |
865 | l4_hdr = network_hdr.ipv6->nexthdr; | |
866 | break; | |
867 | default: | |
868 | if (unlikely(net_ratelimit())) { | |
869 | dev_warn(tx_ring->dev, | |
870 | "partial checksum but ip version=%x!\n", | |
871 | protocol); | |
872 | } | |
873 | tx_ring->tx_stats.csum_err++; | |
874 | goto no_csum; | |
875 | } | |
876 | ||
877 | switch (l4_hdr) { | |
878 | case IPPROTO_TCP: | |
879 | case IPPROTO_UDP: | |
880 | break; | |
881 | case IPPROTO_GRE: | |
882 | if (skb->encapsulation) | |
883 | break; | |
884 | default: | |
885 | if (unlikely(net_ratelimit())) { | |
886 | dev_warn(tx_ring->dev, | |
887 | "partial checksum but l4 proto=%x!\n", | |
888 | l4_hdr); | |
889 | } | |
890 | tx_ring->tx_stats.csum_err++; | |
891 | goto no_csum; | |
892 | } | |
893 | ||
894 | /* update TX checksum flag */ | |
895 | first->tx_flags |= FM10K_TX_FLAGS_CSUM; | |
80043f3b | 896 | tx_ring->tx_stats.csum_good++; |
76a540d4 AD |
897 | |
898 | no_csum: | |
899 | /* populate Tx descriptor header size and mss */ | |
900 | tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); | |
901 | tx_desc->hdrlen = 0; | |
902 | tx_desc->mss = 0; | |
903 | } | |
904 | ||
905 | #define FM10K_SET_FLAG(_input, _flag, _result) \ | |
906 | ((_flag <= _result) ? \ | |
907 | ((u32)(_input & _flag) * (_result / _flag)) : \ | |
908 | ((u32)(_input & _flag) / (_flag / _result))) | |
909 | ||
910 | static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) | |
911 | { | |
912 | /* set type for advanced descriptor with frame checksum insertion */ | |
913 | u32 desc_flags = 0; | |
914 | ||
a211e013 AD |
915 | /* set timestamping bits */ |
916 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && | |
917 | likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) | |
a4fcad65 | 918 | desc_flags |= FM10K_TXD_FLAG_TIME; |
a211e013 | 919 | |
76a540d4 AD |
920 | /* set checksum offload bits */ |
921 | desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, | |
922 | FM10K_TXD_FLAG_CSUM); | |
923 | ||
924 | return desc_flags; | |
925 | } | |
926 | ||
b101c962 AD |
927 | static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, |
928 | struct fm10k_tx_desc *tx_desc, u16 i, | |
929 | dma_addr_t dma, unsigned int size, u8 desc_flags) | |
930 | { | |
931 | /* set RS and INT for last frame in a cache line */ | |
932 | if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0) | |
933 | desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT; | |
934 | ||
935 | /* record values to descriptor */ | |
936 | tx_desc->buffer_addr = cpu_to_le64(dma); | |
937 | tx_desc->flags = desc_flags; | |
938 | tx_desc->buflen = cpu_to_le16(size); | |
939 | ||
940 | /* return true if we just wrapped the ring */ | |
941 | return i == tx_ring->count; | |
942 | } | |
943 | ||
2c2b2f0c AD |
944 | static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) |
945 | { | |
946 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | |
947 | ||
eca32047 | 948 | /* Memory barrier before checking head and tail */ |
2c2b2f0c AD |
949 | smp_mb(); |
950 | ||
eca32047 | 951 | /* Check again in a case another CPU has just made room available */ |
2c2b2f0c AD |
952 | if (likely(fm10k_desc_unused(tx_ring) < size)) |
953 | return -EBUSY; | |
954 | ||
955 | /* A reprieve! - use start_queue because it doesn't call schedule */ | |
956 | netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); | |
957 | ++tx_ring->tx_stats.restart_queue; | |
958 | return 0; | |
959 | } | |
960 | ||
961 | static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) | |
962 | { | |
963 | if (likely(fm10k_desc_unused(tx_ring) >= size)) | |
964 | return 0; | |
965 | return __fm10k_maybe_stop_tx(tx_ring, size); | |
966 | } | |
967 | ||
b101c962 AD |
968 | static void fm10k_tx_map(struct fm10k_ring *tx_ring, |
969 | struct fm10k_tx_buffer *first) | |
970 | { | |
971 | struct sk_buff *skb = first->skb; | |
972 | struct fm10k_tx_buffer *tx_buffer; | |
973 | struct fm10k_tx_desc *tx_desc; | |
974 | struct skb_frag_struct *frag; | |
975 | unsigned char *data; | |
976 | dma_addr_t dma; | |
977 | unsigned int data_len, size; | |
76a540d4 | 978 | u32 tx_flags = first->tx_flags; |
b101c962 | 979 | u16 i = tx_ring->next_to_use; |
76a540d4 | 980 | u8 flags = fm10k_tx_desc_flags(skb, tx_flags); |
b101c962 AD |
981 | |
982 | tx_desc = FM10K_TX_DESC(tx_ring, i); | |
983 | ||
984 | /* add HW VLAN tag */ | |
df8a39de JP |
985 | if (skb_vlan_tag_present(skb)) |
986 | tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); | |
b101c962 AD |
987 | else |
988 | tx_desc->vlan = 0; | |
989 | ||
990 | size = skb_headlen(skb); | |
991 | data = skb->data; | |
992 | ||
993 | dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); | |
994 | ||
995 | data_len = skb->data_len; | |
996 | tx_buffer = first; | |
997 | ||
998 | for (frag = &skb_shinfo(skb)->frags[0];; frag++) { | |
999 | if (dma_mapping_error(tx_ring->dev, dma)) | |
1000 | goto dma_error; | |
1001 | ||
1002 | /* record length, and DMA address */ | |
1003 | dma_unmap_len_set(tx_buffer, len, size); | |
1004 | dma_unmap_addr_set(tx_buffer, dma, dma); | |
1005 | ||
1006 | while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) { | |
1007 | if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, | |
1008 | FM10K_MAX_DATA_PER_TXD, flags)) { | |
1009 | tx_desc = FM10K_TX_DESC(tx_ring, 0); | |
1010 | i = 0; | |
1011 | } | |
1012 | ||
1013 | dma += FM10K_MAX_DATA_PER_TXD; | |
1014 | size -= FM10K_MAX_DATA_PER_TXD; | |
1015 | } | |
1016 | ||
1017 | if (likely(!data_len)) | |
1018 | break; | |
1019 | ||
1020 | if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, | |
1021 | dma, size, flags)) { | |
1022 | tx_desc = FM10K_TX_DESC(tx_ring, 0); | |
1023 | i = 0; | |
1024 | } | |
1025 | ||
1026 | size = skb_frag_size(frag); | |
1027 | data_len -= size; | |
1028 | ||
1029 | dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, | |
1030 | DMA_TO_DEVICE); | |
1031 | ||
1032 | tx_buffer = &tx_ring->tx_buffer[i]; | |
1033 | } | |
1034 | ||
1035 | /* write last descriptor with LAST bit set */ | |
1036 | flags |= FM10K_TXD_FLAG_LAST; | |
1037 | ||
1038 | if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) | |
1039 | i = 0; | |
1040 | ||
1041 | /* record bytecount for BQL */ | |
1042 | netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); | |
1043 | ||
1044 | /* record SW timestamp if HW timestamp is not available */ | |
1045 | skb_tx_timestamp(first->skb); | |
1046 | ||
1047 | /* Force memory writes to complete before letting h/w know there | |
1048 | * are new descriptors to fetch. (Only applicable for weak-ordered | |
1049 | * memory model archs, such as IA-64). | |
1050 | * | |
1051 | * We also need this memory barrier to make certain all of the | |
1052 | * status bits have been updated before next_to_watch is written. | |
1053 | */ | |
1054 | wmb(); | |
1055 | ||
1056 | /* set next_to_watch value indicating a packet is present */ | |
1057 | first->next_to_watch = tx_desc; | |
1058 | ||
1059 | tx_ring->next_to_use = i; | |
1060 | ||
2c2b2f0c AD |
1061 | /* Make sure there is space in the ring for the next send. */ |
1062 | fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); | |
1063 | ||
b101c962 | 1064 | /* notify HW of packet */ |
2c2b2f0c AD |
1065 | if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { |
1066 | writel(i, tx_ring->tail); | |
b101c962 | 1067 | |
2c2b2f0c AD |
1068 | /* we need this if more than one processor can write to our tail |
1069 | * at a time, it synchronizes IO on IA64/Altix systems | |
1070 | */ | |
1071 | mmiowb(); | |
1072 | } | |
b101c962 AD |
1073 | |
1074 | return; | |
1075 | dma_error: | |
1076 | dev_err(tx_ring->dev, "TX DMA map failed\n"); | |
1077 | ||
1078 | /* clear dma mappings for failed tx_buffer map */ | |
1079 | for (;;) { | |
1080 | tx_buffer = &tx_ring->tx_buffer[i]; | |
1081 | fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); | |
1082 | if (tx_buffer == first) | |
1083 | break; | |
1084 | if (i == 0) | |
1085 | i = tx_ring->count; | |
1086 | i--; | |
1087 | } | |
1088 | ||
1089 | tx_ring->next_to_use = i; | |
1090 | } | |
1091 | ||
b101c962 AD |
1092 | netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, |
1093 | struct fm10k_ring *tx_ring) | |
1094 | { | |
03d13a51 | 1095 | u16 count = TXD_USE_COUNT(skb_headlen(skb)); |
b101c962 | 1096 | struct fm10k_tx_buffer *first; |
b101c962 | 1097 | unsigned short f; |
03d13a51 JK |
1098 | u32 tx_flags = 0; |
1099 | int tso; | |
b101c962 AD |
1100 | |
1101 | /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, | |
1102 | * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, | |
1103 | * + 2 desc gap to keep tail from touching head | |
1104 | * otherwise try next time | |
1105 | */ | |
b101c962 AD |
1106 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) |
1107 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); | |
aae072e3 | 1108 | |
b101c962 AD |
1109 | if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { |
1110 | tx_ring->tx_stats.tx_busy++; | |
1111 | return NETDEV_TX_BUSY; | |
1112 | } | |
1113 | ||
1114 | /* record the location of the first descriptor for this packet */ | |
1115 | first = &tx_ring->tx_buffer[tx_ring->next_to_use]; | |
1116 | first->skb = skb; | |
1117 | first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); | |
1118 | first->gso_segs = 1; | |
1119 | ||
1120 | /* record initial flags and protocol */ | |
1121 | first->tx_flags = tx_flags; | |
1122 | ||
76a540d4 AD |
1123 | tso = fm10k_tso(tx_ring, first); |
1124 | if (tso < 0) | |
1125 | goto out_drop; | |
1126 | else if (!tso) | |
1127 | fm10k_tx_csum(tx_ring, first); | |
1128 | ||
b101c962 AD |
1129 | fm10k_tx_map(tx_ring, first); |
1130 | ||
76a540d4 AD |
1131 | return NETDEV_TX_OK; |
1132 | ||
1133 | out_drop: | |
1134 | dev_kfree_skb_any(first->skb); | |
1135 | first->skb = NULL; | |
1136 | ||
b101c962 AD |
1137 | return NETDEV_TX_OK; |
1138 | } | |
1139 | ||
1140 | static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) | |
1141 | { | |
1142 | return ring->stats.packets; | |
1143 | } | |
1144 | ||
1145 | static u64 fm10k_get_tx_pending(struct fm10k_ring *ring) | |
1146 | { | |
1147 | /* use SW head and tail until we have real hardware */ | |
1148 | u32 head = ring->next_to_clean; | |
1149 | u32 tail = ring->next_to_use; | |
1150 | ||
1151 | return ((head <= tail) ? tail : tail + ring->count) - head; | |
1152 | } | |
1153 | ||
1154 | bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) | |
1155 | { | |
1156 | u32 tx_done = fm10k_get_tx_completed(tx_ring); | |
1157 | u32 tx_done_old = tx_ring->tx_stats.tx_done_old; | |
1158 | u32 tx_pending = fm10k_get_tx_pending(tx_ring); | |
1159 | ||
1160 | clear_check_for_tx_hang(tx_ring); | |
1161 | ||
1162 | /* Check for a hung queue, but be thorough. This verifies | |
1163 | * that a transmit has been completed since the previous | |
1164 | * check AND there is at least one packet pending. By | |
1165 | * requiring this to fail twice we avoid races with | |
1166 | * clearing the ARMED bit and conditions where we | |
1167 | * run the check_tx_hang logic with a transmit completion | |
1168 | * pending but without time to complete it yet. | |
1169 | */ | |
1170 | if (!tx_pending || (tx_done_old != tx_done)) { | |
1171 | /* update completed stats and continue */ | |
1172 | tx_ring->tx_stats.tx_done_old = tx_done; | |
1173 | /* reset the countdown */ | |
1174 | clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); | |
1175 | ||
1176 | return false; | |
1177 | } | |
1178 | ||
1179 | /* make sure it is true for two checks in a row */ | |
1180 | return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); | |
1181 | } | |
1182 | ||
1183 | /** | |
1184 | * fm10k_tx_timeout_reset - initiate reset due to Tx timeout | |
1185 | * @interface: driver private struct | |
1186 | **/ | |
1187 | void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) | |
1188 | { | |
1189 | /* Do the reset outside of interrupt context */ | |
1190 | if (!test_bit(__FM10K_DOWN, &interface->state)) { | |
b101c962 AD |
1191 | interface->tx_timeout_count++; |
1192 | interface->flags |= FM10K_FLAG_RESET_REQUESTED; | |
1193 | fm10k_service_event_schedule(interface); | |
1194 | } | |
1195 | } | |
1196 | ||
1197 | /** | |
1198 | * fm10k_clean_tx_irq - Reclaim resources after transmit completes | |
1199 | * @q_vector: structure containing interrupt and ring information | |
1200 | * @tx_ring: tx ring to clean | |
1201 | **/ | |
1202 | static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, | |
1203 | struct fm10k_ring *tx_ring) | |
1204 | { | |
1205 | struct fm10k_intfc *interface = q_vector->interface; | |
1206 | struct fm10k_tx_buffer *tx_buffer; | |
1207 | struct fm10k_tx_desc *tx_desc; | |
1208 | unsigned int total_bytes = 0, total_packets = 0; | |
1209 | unsigned int budget = q_vector->tx.work_limit; | |
1210 | unsigned int i = tx_ring->next_to_clean; | |
1211 | ||
1212 | if (test_bit(__FM10K_DOWN, &interface->state)) | |
1213 | return true; | |
1214 | ||
1215 | tx_buffer = &tx_ring->tx_buffer[i]; | |
1216 | tx_desc = FM10K_TX_DESC(tx_ring, i); | |
1217 | i -= tx_ring->count; | |
1218 | ||
1219 | do { | |
1220 | struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch; | |
1221 | ||
1222 | /* if next_to_watch is not set then there is no work pending */ | |
1223 | if (!eop_desc) | |
1224 | break; | |
1225 | ||
1226 | /* prevent any other reads prior to eop_desc */ | |
1227 | read_barrier_depends(); | |
1228 | ||
1229 | /* if DD is not set pending work has not been completed */ | |
1230 | if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) | |
1231 | break; | |
1232 | ||
1233 | /* clear next_to_watch to prevent false hangs */ | |
1234 | tx_buffer->next_to_watch = NULL; | |
1235 | ||
1236 | /* update the statistics for this packet */ | |
1237 | total_bytes += tx_buffer->bytecount; | |
1238 | total_packets += tx_buffer->gso_segs; | |
1239 | ||
1240 | /* free the skb */ | |
1241 | dev_consume_skb_any(tx_buffer->skb); | |
1242 | ||
1243 | /* unmap skb header data */ | |
1244 | dma_unmap_single(tx_ring->dev, | |
1245 | dma_unmap_addr(tx_buffer, dma), | |
1246 | dma_unmap_len(tx_buffer, len), | |
1247 | DMA_TO_DEVICE); | |
1248 | ||
1249 | /* clear tx_buffer data */ | |
1250 | tx_buffer->skb = NULL; | |
1251 | dma_unmap_len_set(tx_buffer, len, 0); | |
1252 | ||
1253 | /* unmap remaining buffers */ | |
1254 | while (tx_desc != eop_desc) { | |
1255 | tx_buffer++; | |
1256 | tx_desc++; | |
1257 | i++; | |
1258 | if (unlikely(!i)) { | |
1259 | i -= tx_ring->count; | |
1260 | tx_buffer = tx_ring->tx_buffer; | |
1261 | tx_desc = FM10K_TX_DESC(tx_ring, 0); | |
1262 | } | |
1263 | ||
1264 | /* unmap any remaining paged data */ | |
1265 | if (dma_unmap_len(tx_buffer, len)) { | |
1266 | dma_unmap_page(tx_ring->dev, | |
1267 | dma_unmap_addr(tx_buffer, dma), | |
1268 | dma_unmap_len(tx_buffer, len), | |
1269 | DMA_TO_DEVICE); | |
1270 | dma_unmap_len_set(tx_buffer, len, 0); | |
1271 | } | |
1272 | } | |
1273 | ||
1274 | /* move us one more past the eop_desc for start of next pkt */ | |
1275 | tx_buffer++; | |
1276 | tx_desc++; | |
1277 | i++; | |
1278 | if (unlikely(!i)) { | |
1279 | i -= tx_ring->count; | |
1280 | tx_buffer = tx_ring->tx_buffer; | |
1281 | tx_desc = FM10K_TX_DESC(tx_ring, 0); | |
1282 | } | |
1283 | ||
1284 | /* issue prefetch for next Tx descriptor */ | |
1285 | prefetch(tx_desc); | |
1286 | ||
1287 | /* update budget accounting */ | |
1288 | budget--; | |
1289 | } while (likely(budget)); | |
1290 | ||
1291 | i += tx_ring->count; | |
1292 | tx_ring->next_to_clean = i; | |
1293 | u64_stats_update_begin(&tx_ring->syncp); | |
1294 | tx_ring->stats.bytes += total_bytes; | |
1295 | tx_ring->stats.packets += total_packets; | |
1296 | u64_stats_update_end(&tx_ring->syncp); | |
1297 | q_vector->tx.total_bytes += total_bytes; | |
1298 | q_vector->tx.total_packets += total_packets; | |
1299 | ||
1300 | if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { | |
1301 | /* schedule immediate reset if we believe we hung */ | |
1302 | struct fm10k_hw *hw = &interface->hw; | |
1303 | ||
1304 | netif_err(interface, drv, tx_ring->netdev, | |
1305 | "Detected Tx Unit Hang\n" | |
1306 | " Tx Queue <%d>\n" | |
1307 | " TDH, TDT <%x>, <%x>\n" | |
1308 | " next_to_use <%x>\n" | |
1309 | " next_to_clean <%x>\n", | |
1310 | tx_ring->queue_index, | |
1311 | fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), | |
1312 | fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), | |
1313 | tx_ring->next_to_use, i); | |
1314 | ||
1315 | netif_stop_subqueue(tx_ring->netdev, | |
1316 | tx_ring->queue_index); | |
1317 | ||
1318 | netif_info(interface, probe, tx_ring->netdev, | |
1319 | "tx hang %d detected on queue %d, resetting interface\n", | |
1320 | interface->tx_timeout_count + 1, | |
1321 | tx_ring->queue_index); | |
1322 | ||
1323 | fm10k_tx_timeout_reset(interface); | |
1324 | ||
1325 | /* the netdev is about to reset, no point in enabling stuff */ | |
1326 | return true; | |
1327 | } | |
1328 | ||
1329 | /* notify netdev of completed buffers */ | |
1330 | netdev_tx_completed_queue(txring_txq(tx_ring), | |
1331 | total_packets, total_bytes); | |
1332 | ||
1333 | #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2) | |
1334 | if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && | |
1335 | (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { | |
1336 | /* Make sure that anybody stopping the queue after this | |
1337 | * sees the new next_to_clean. | |
1338 | */ | |
1339 | smp_mb(); | |
1340 | if (__netif_subqueue_stopped(tx_ring->netdev, | |
1341 | tx_ring->queue_index) && | |
1342 | !test_bit(__FM10K_DOWN, &interface->state)) { | |
1343 | netif_wake_subqueue(tx_ring->netdev, | |
1344 | tx_ring->queue_index); | |
1345 | ++tx_ring->tx_stats.restart_queue; | |
1346 | } | |
1347 | } | |
1348 | ||
1349 | return !!budget; | |
1350 | } | |
1351 | ||
18283cad AD |
1352 | /** |
1353 | * fm10k_update_itr - update the dynamic ITR value based on packet size | |
1354 | * | |
1355 | * Stores a new ITR value based on strictly on packet size. The | |
1356 | * divisors and thresholds used by this function were determined based | |
1357 | * on theoretical maximum wire speed and testing data, in order to | |
1358 | * minimize response time while increasing bulk throughput. | |
1359 | * | |
1360 | * @ring_container: Container for rings to have ITR updated | |
1361 | **/ | |
1362 | static void fm10k_update_itr(struct fm10k_ring_container *ring_container) | |
1363 | { | |
242722dd | 1364 | unsigned int avg_wire_size, packets, itr_round; |
18283cad AD |
1365 | |
1366 | /* Only update ITR if we are using adaptive setting */ | |
584373f5 | 1367 | if (!ITR_IS_ADAPTIVE(ring_container->itr)) |
18283cad AD |
1368 | goto clear_counts; |
1369 | ||
1370 | packets = ring_container->total_packets; | |
1371 | if (!packets) | |
1372 | goto clear_counts; | |
1373 | ||
1374 | avg_wire_size = ring_container->total_bytes / packets; | |
1375 | ||
242722dd JK |
1376 | /* The following is a crude approximation of: |
1377 | * wmem_default / (size + overhead) = desired_pkts_per_int | |
1378 | * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate | |
1379 | * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value | |
1380 | * | |
1381 | * Assuming wmem_default is 212992 and overhead is 640 bytes per | |
1382 | * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the | |
1383 | * formula down to | |
1384 | * | |
1385 | * (34 * (size + 24)) / (size + 640) = ITR | |
1386 | * | |
1387 | * We first do some math on the packet size and then finally bitshift | |
1388 | * by 8 after rounding up. We also have to account for PCIe link speed | |
1389 | * difference as ITR scales based on this. | |
1390 | */ | |
1391 | if (avg_wire_size <= 360) { | |
1392 | /* Start at 250K ints/sec and gradually drop to 77K ints/sec */ | |
1393 | avg_wire_size *= 8; | |
1394 | avg_wire_size += 376; | |
1395 | } else if (avg_wire_size <= 1152) { | |
1396 | /* 77K ints/sec to 45K ints/sec */ | |
1397 | avg_wire_size *= 3; | |
1398 | avg_wire_size += 2176; | |
1399 | } else if (avg_wire_size <= 1920) { | |
1400 | /* 45K ints/sec to 38K ints/sec */ | |
1401 | avg_wire_size += 4480; | |
1402 | } else { | |
1403 | /* plateau at a limit of 38K ints/sec */ | |
1404 | avg_wire_size = 6656; | |
1405 | } | |
18283cad | 1406 | |
242722dd JK |
1407 | /* Perform final bitshift for division after rounding up to ensure |
1408 | * that the calculation will never get below a 1. The bit shift | |
1409 | * accounts for changes in the ITR due to PCIe link speed. | |
1410 | */ | |
1411 | itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8; | |
fcdb0a99 | 1412 | avg_wire_size += BIT(itr_round) - 1; |
242722dd | 1413 | avg_wire_size >>= itr_round; |
18283cad AD |
1414 | |
1415 | /* write back value and retain adaptive flag */ | |
1416 | ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE; | |
1417 | ||
1418 | clear_counts: | |
1419 | ring_container->total_bytes = 0; | |
1420 | ring_container->total_packets = 0; | |
1421 | } | |
1422 | ||
1423 | static void fm10k_qv_enable(struct fm10k_q_vector *q_vector) | |
1424 | { | |
1425 | /* Enable auto-mask and clear the current mask */ | |
1426 | u32 itr = FM10K_ITR_ENABLE; | |
1427 | ||
1428 | /* Update Tx ITR */ | |
1429 | fm10k_update_itr(&q_vector->tx); | |
1430 | ||
1431 | /* Update Rx ITR */ | |
1432 | fm10k_update_itr(&q_vector->rx); | |
1433 | ||
1434 | /* Store Tx itr in timer slot 0 */ | |
1435 | itr |= (q_vector->tx.itr & FM10K_ITR_MAX); | |
1436 | ||
1437 | /* Shift Rx itr to timer slot 1 */ | |
1438 | itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT; | |
1439 | ||
1440 | /* Write the final value to the ITR register */ | |
1441 | writel(itr, q_vector->itr); | |
1442 | } | |
1443 | ||
1444 | static int fm10k_poll(struct napi_struct *napi, int budget) | |
1445 | { | |
1446 | struct fm10k_q_vector *q_vector = | |
1447 | container_of(napi, struct fm10k_q_vector, napi); | |
b101c962 | 1448 | struct fm10k_ring *ring; |
32b3e08f | 1449 | int per_ring_budget, work_done = 0; |
b101c962 AD |
1450 | bool clean_complete = true; |
1451 | ||
1452 | fm10k_for_each_ring(ring, q_vector->tx) | |
1453 | clean_complete &= fm10k_clean_tx_irq(q_vector, ring); | |
1454 | ||
9f872986 AD |
1455 | /* Handle case where we are called by netpoll with a budget of 0 */ |
1456 | if (budget <= 0) | |
1457 | return budget; | |
1458 | ||
b101c962 AD |
1459 | /* attempt to distribute budget to each queue fairly, but don't |
1460 | * allow the budget to go below 1 because we'll exit polling | |
1461 | */ | |
1462 | if (q_vector->rx.count > 1) | |
a4fcad65 | 1463 | per_ring_budget = max(budget / q_vector->rx.count, 1); |
b101c962 AD |
1464 | else |
1465 | per_ring_budget = budget; | |
1466 | ||
32b3e08f JB |
1467 | fm10k_for_each_ring(ring, q_vector->rx) { |
1468 | int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget); | |
1469 | ||
1470 | work_done += work; | |
1471 | clean_complete &= !!(work < per_ring_budget); | |
1472 | } | |
b101c962 AD |
1473 | |
1474 | /* If all work not completed, return budget and keep polling */ | |
1475 | if (!clean_complete) | |
1476 | return budget; | |
18283cad AD |
1477 | |
1478 | /* all work done, exit the polling mode */ | |
32b3e08f | 1479 | napi_complete_done(napi, work_done); |
18283cad AD |
1480 | |
1481 | /* re-enable the q_vector */ | |
1482 | fm10k_qv_enable(q_vector); | |
1483 | ||
1484 | return 0; | |
1485 | } | |
1486 | ||
aa3ac822 AD |
1487 | /** |
1488 | * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device | |
1489 | * @interface: board private structure to initialize | |
1490 | * | |
1491 | * When QoS (Quality of Service) is enabled, allocate queues for | |
1492 | * each traffic class. If multiqueue isn't available,then abort QoS | |
1493 | * initialization. | |
1494 | * | |
1495 | * This function handles all combinations of Qos and RSS. | |
1496 | * | |
1497 | **/ | |
1498 | static bool fm10k_set_qos_queues(struct fm10k_intfc *interface) | |
1499 | { | |
1500 | struct net_device *dev = interface->netdev; | |
1501 | struct fm10k_ring_feature *f; | |
1502 | int rss_i, i; | |
1503 | int pcs; | |
1504 | ||
1505 | /* Map queue offset and counts onto allocated tx queues */ | |
1506 | pcs = netdev_get_num_tc(dev); | |
1507 | ||
1508 | if (pcs <= 1) | |
1509 | return false; | |
1510 | ||
1511 | /* set QoS mask and indices */ | |
1512 | f = &interface->ring_feature[RING_F_QOS]; | |
1513 | f->indices = pcs; | |
fcdb0a99 | 1514 | f->mask = BIT(fls(pcs - 1)) - 1; |
aa3ac822 AD |
1515 | |
1516 | /* determine the upper limit for our current DCB mode */ | |
1517 | rss_i = interface->hw.mac.max_queues / pcs; | |
fcdb0a99 | 1518 | rss_i = BIT(fls(rss_i) - 1); |
aa3ac822 AD |
1519 | |
1520 | /* set RSS mask and indices */ | |
1521 | f = &interface->ring_feature[RING_F_RSS]; | |
1522 | rss_i = min_t(u16, rss_i, f->limit); | |
1523 | f->indices = rss_i; | |
fcdb0a99 | 1524 | f->mask = BIT(fls(rss_i - 1)) - 1; |
aa3ac822 AD |
1525 | |
1526 | /* configure pause class to queue mapping */ | |
1527 | for (i = 0; i < pcs; i++) | |
1528 | netdev_set_tc_queue(dev, i, rss_i, rss_i * i); | |
1529 | ||
1530 | interface->num_rx_queues = rss_i * pcs; | |
1531 | interface->num_tx_queues = rss_i * pcs; | |
1532 | ||
1533 | return true; | |
1534 | } | |
1535 | ||
1536 | /** | |
1537 | * fm10k_set_rss_queues: Allocate queues for RSS | |
1538 | * @interface: board private structure to initialize | |
1539 | * | |
1540 | * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try | |
1541 | * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. | |
1542 | * | |
1543 | **/ | |
1544 | static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) | |
1545 | { | |
1546 | struct fm10k_ring_feature *f; | |
1547 | u16 rss_i; | |
1548 | ||
1549 | f = &interface->ring_feature[RING_F_RSS]; | |
1550 | rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit); | |
1551 | ||
1552 | /* record indices and power of 2 mask for RSS */ | |
1553 | f->indices = rss_i; | |
fcdb0a99 | 1554 | f->mask = BIT(fls(rss_i - 1)) - 1; |
aa3ac822 AD |
1555 | |
1556 | interface->num_rx_queues = rss_i; | |
1557 | interface->num_tx_queues = rss_i; | |
1558 | ||
1559 | return true; | |
1560 | } | |
1561 | ||
18283cad AD |
1562 | /** |
1563 | * fm10k_set_num_queues: Allocate queues for device, feature dependent | |
1564 | * @interface: board private structure to initialize | |
1565 | * | |
1566 | * This is the top level queue allocation routine. The order here is very | |
1567 | * important, starting with the "most" number of features turned on at once, | |
1568 | * and ending with the smallest set of features. This way large combinations | |
1569 | * can be allocated if they're turned on, and smaller combinations are the | |
1570 | * fallthrough conditions. | |
1571 | * | |
1572 | **/ | |
1573 | static void fm10k_set_num_queues(struct fm10k_intfc *interface) | |
1574 | { | |
1575 | /* Start with base case */ | |
1576 | interface->num_rx_queues = 1; | |
1577 | interface->num_tx_queues = 1; | |
aa3ac822 AD |
1578 | |
1579 | if (fm10k_set_qos_queues(interface)) | |
1580 | return; | |
1581 | ||
1582 | fm10k_set_rss_queues(interface); | |
18283cad AD |
1583 | } |
1584 | ||
1585 | /** | |
1586 | * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector | |
1587 | * @interface: board private structure to initialize | |
1588 | * @v_count: q_vectors allocated on interface, used for ring interleaving | |
1589 | * @v_idx: index of vector in interface struct | |
1590 | * @txr_count: total number of Tx rings to allocate | |
1591 | * @txr_idx: index of first Tx ring to allocate | |
1592 | * @rxr_count: total number of Rx rings to allocate | |
1593 | * @rxr_idx: index of first Rx ring to allocate | |
1594 | * | |
1595 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | |
1596 | **/ | |
1597 | static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, | |
1598 | unsigned int v_count, unsigned int v_idx, | |
1599 | unsigned int txr_count, unsigned int txr_idx, | |
1600 | unsigned int rxr_count, unsigned int rxr_idx) | |
1601 | { | |
1602 | struct fm10k_q_vector *q_vector; | |
e27ef599 | 1603 | struct fm10k_ring *ring; |
18283cad AD |
1604 | int ring_count, size; |
1605 | ||
1606 | ring_count = txr_count + rxr_count; | |
e27ef599 AD |
1607 | size = sizeof(struct fm10k_q_vector) + |
1608 | (sizeof(struct fm10k_ring) * ring_count); | |
18283cad AD |
1609 | |
1610 | /* allocate q_vector and rings */ | |
1611 | q_vector = kzalloc(size, GFP_KERNEL); | |
1612 | if (!q_vector) | |
1613 | return -ENOMEM; | |
1614 | ||
1615 | /* initialize NAPI */ | |
1616 | netif_napi_add(interface->netdev, &q_vector->napi, | |
1617 | fm10k_poll, NAPI_POLL_WEIGHT); | |
1618 | ||
1619 | /* tie q_vector and interface together */ | |
1620 | interface->q_vector[v_idx] = q_vector; | |
1621 | q_vector->interface = interface; | |
1622 | q_vector->v_idx = v_idx; | |
1623 | ||
e27ef599 AD |
1624 | /* initialize pointer to rings */ |
1625 | ring = q_vector->ring; | |
1626 | ||
18283cad | 1627 | /* save Tx ring container info */ |
e27ef599 AD |
1628 | q_vector->tx.ring = ring; |
1629 | q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; | |
18283cad | 1630 | q_vector->tx.itr = interface->tx_itr; |
242722dd | 1631 | q_vector->tx.itr_scale = interface->hw.mac.itr_scale; |
18283cad AD |
1632 | q_vector->tx.count = txr_count; |
1633 | ||
e27ef599 AD |
1634 | while (txr_count) { |
1635 | /* assign generic ring traits */ | |
1636 | ring->dev = &interface->pdev->dev; | |
1637 | ring->netdev = interface->netdev; | |
1638 | ||
1639 | /* configure backlink on ring */ | |
1640 | ring->q_vector = q_vector; | |
1641 | ||
1642 | /* apply Tx specific ring traits */ | |
1643 | ring->count = interface->tx_ring_count; | |
1644 | ring->queue_index = txr_idx; | |
1645 | ||
1646 | /* assign ring to interface */ | |
1647 | interface->tx_ring[txr_idx] = ring; | |
1648 | ||
1649 | /* update count and index */ | |
1650 | txr_count--; | |
1651 | txr_idx += v_count; | |
1652 | ||
1653 | /* push pointer to next ring */ | |
1654 | ring++; | |
1655 | } | |
1656 | ||
18283cad | 1657 | /* save Rx ring container info */ |
e27ef599 | 1658 | q_vector->rx.ring = ring; |
18283cad | 1659 | q_vector->rx.itr = interface->rx_itr; |
242722dd | 1660 | q_vector->rx.itr_scale = interface->hw.mac.itr_scale; |
18283cad AD |
1661 | q_vector->rx.count = rxr_count; |
1662 | ||
e27ef599 AD |
1663 | while (rxr_count) { |
1664 | /* assign generic ring traits */ | |
1665 | ring->dev = &interface->pdev->dev; | |
1666 | ring->netdev = interface->netdev; | |
5cd5e2e9 | 1667 | rcu_assign_pointer(ring->l2_accel, interface->l2_accel); |
e27ef599 AD |
1668 | |
1669 | /* configure backlink on ring */ | |
1670 | ring->q_vector = q_vector; | |
1671 | ||
1672 | /* apply Rx specific ring traits */ | |
1673 | ring->count = interface->rx_ring_count; | |
1674 | ring->queue_index = rxr_idx; | |
1675 | ||
1676 | /* assign ring to interface */ | |
1677 | interface->rx_ring[rxr_idx] = ring; | |
1678 | ||
1679 | /* update count and index */ | |
1680 | rxr_count--; | |
1681 | rxr_idx += v_count; | |
1682 | ||
1683 | /* push pointer to next ring */ | |
1684 | ring++; | |
1685 | } | |
1686 | ||
7461fd91 AD |
1687 | fm10k_dbg_q_vector_init(q_vector); |
1688 | ||
18283cad AD |
1689 | return 0; |
1690 | } | |
1691 | ||
1692 | /** | |
1693 | * fm10k_free_q_vector - Free memory allocated for specific interrupt vector | |
1694 | * @interface: board private structure to initialize | |
1695 | * @v_idx: Index of vector to be freed | |
1696 | * | |
1697 | * This function frees the memory allocated to the q_vector. In addition if | |
1698 | * NAPI is enabled it will delete any references to the NAPI struct prior | |
1699 | * to freeing the q_vector. | |
1700 | **/ | |
1701 | static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx) | |
1702 | { | |
1703 | struct fm10k_q_vector *q_vector = interface->q_vector[v_idx]; | |
e27ef599 AD |
1704 | struct fm10k_ring *ring; |
1705 | ||
7461fd91 AD |
1706 | fm10k_dbg_q_vector_exit(q_vector); |
1707 | ||
e27ef599 AD |
1708 | fm10k_for_each_ring(ring, q_vector->tx) |
1709 | interface->tx_ring[ring->queue_index] = NULL; | |
1710 | ||
1711 | fm10k_for_each_ring(ring, q_vector->rx) | |
1712 | interface->rx_ring[ring->queue_index] = NULL; | |
18283cad AD |
1713 | |
1714 | interface->q_vector[v_idx] = NULL; | |
1715 | netif_napi_del(&q_vector->napi); | |
1716 | kfree_rcu(q_vector, rcu); | |
1717 | } | |
1718 | ||
1719 | /** | |
1720 | * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors | |
1721 | * @interface: board private structure to initialize | |
1722 | * | |
1723 | * We allocate one q_vector per queue interrupt. If allocation fails we | |
1724 | * return -ENOMEM. | |
1725 | **/ | |
1726 | static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface) | |
1727 | { | |
1728 | unsigned int q_vectors = interface->num_q_vectors; | |
1729 | unsigned int rxr_remaining = interface->num_rx_queues; | |
1730 | unsigned int txr_remaining = interface->num_tx_queues; | |
1731 | unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; | |
1732 | int err; | |
1733 | ||
1734 | if (q_vectors >= (rxr_remaining + txr_remaining)) { | |
1735 | for (; rxr_remaining; v_idx++) { | |
1736 | err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, | |
1737 | 0, 0, 1, rxr_idx); | |
1738 | if (err) | |
1739 | goto err_out; | |
1740 | ||
1741 | /* update counts and index */ | |
1742 | rxr_remaining--; | |
1743 | rxr_idx++; | |
1744 | } | |
1745 | } | |
1746 | ||
1747 | for (; v_idx < q_vectors; v_idx++) { | |
1748 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); | |
1749 | int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); | |
1750 | ||
1751 | err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, | |
1752 | tqpv, txr_idx, | |
1753 | rqpv, rxr_idx); | |
1754 | ||
1755 | if (err) | |
1756 | goto err_out; | |
1757 | ||
1758 | /* update counts and index */ | |
1759 | rxr_remaining -= rqpv; | |
1760 | txr_remaining -= tqpv; | |
1761 | rxr_idx++; | |
1762 | txr_idx++; | |
1763 | } | |
1764 | ||
1765 | return 0; | |
1766 | ||
1767 | err_out: | |
1768 | interface->num_tx_queues = 0; | |
1769 | interface->num_rx_queues = 0; | |
1770 | interface->num_q_vectors = 0; | |
1771 | ||
1772 | while (v_idx--) | |
1773 | fm10k_free_q_vector(interface, v_idx); | |
1774 | ||
1775 | return -ENOMEM; | |
1776 | } | |
1777 | ||
1778 | /** | |
1779 | * fm10k_free_q_vectors - Free memory allocated for interrupt vectors | |
1780 | * @interface: board private structure to initialize | |
1781 | * | |
1782 | * This function frees the memory allocated to the q_vectors. In addition if | |
1783 | * NAPI is enabled it will delete any references to the NAPI struct prior | |
1784 | * to freeing the q_vector. | |
1785 | **/ | |
1786 | static void fm10k_free_q_vectors(struct fm10k_intfc *interface) | |
1787 | { | |
1788 | int v_idx = interface->num_q_vectors; | |
1789 | ||
1790 | interface->num_tx_queues = 0; | |
1791 | interface->num_rx_queues = 0; | |
1792 | interface->num_q_vectors = 0; | |
1793 | ||
1794 | while (v_idx--) | |
1795 | fm10k_free_q_vector(interface, v_idx); | |
1796 | } | |
1797 | ||
1798 | /** | |
1799 | * f10k_reset_msix_capability - reset MSI-X capability | |
1800 | * @interface: board private structure to initialize | |
1801 | * | |
1802 | * Reset the MSI-X capability back to its starting state | |
1803 | **/ | |
1804 | static void fm10k_reset_msix_capability(struct fm10k_intfc *interface) | |
1805 | { | |
1806 | pci_disable_msix(interface->pdev); | |
1807 | kfree(interface->msix_entries); | |
1808 | interface->msix_entries = NULL; | |
1809 | } | |
1810 | ||
1811 | /** | |
1812 | * f10k_init_msix_capability - configure MSI-X capability | |
1813 | * @interface: board private structure to initialize | |
1814 | * | |
1815 | * Attempt to configure the interrupts using the best available | |
1816 | * capabilities of the hardware and the kernel. | |
1817 | **/ | |
1818 | static int fm10k_init_msix_capability(struct fm10k_intfc *interface) | |
1819 | { | |
1820 | struct fm10k_hw *hw = &interface->hw; | |
1821 | int v_budget, vector; | |
1822 | ||
1823 | /* It's easy to be greedy for MSI-X vectors, but it really | |
1824 | * doesn't do us much good if we have a lot more vectors | |
1825 | * than CPU's. So let's be conservative and only ask for | |
1826 | * (roughly) the same number of vectors as there are CPU's. | |
1827 | * the default is to use pairs of vectors | |
1828 | */ | |
1829 | v_budget = max(interface->num_rx_queues, interface->num_tx_queues); | |
1830 | v_budget = min_t(u16, v_budget, num_online_cpus()); | |
1831 | ||
1832 | /* account for vectors not related to queues */ | |
1833 | v_budget += NON_Q_VECTORS(hw); | |
1834 | ||
1835 | /* At the same time, hardware can only support a maximum of | |
1836 | * hw.mac->max_msix_vectors vectors. With features | |
1837 | * such as RSS and VMDq, we can easily surpass the number of Rx and Tx | |
1838 | * descriptor queues supported by our device. Thus, we cap it off in | |
1839 | * those rare cases where the cpu count also exceeds our vector limit. | |
1840 | */ | |
1841 | v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); | |
1842 | ||
1843 | /* A failure in MSI-X entry allocation is fatal. */ | |
1844 | interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), | |
1845 | GFP_KERNEL); | |
1846 | if (!interface->msix_entries) | |
1847 | return -ENOMEM; | |
1848 | ||
1849 | /* populate entry values */ | |
1850 | for (vector = 0; vector < v_budget; vector++) | |
1851 | interface->msix_entries[vector].entry = vector; | |
1852 | ||
1853 | /* Attempt to enable MSI-X with requested value */ | |
1854 | v_budget = pci_enable_msix_range(interface->pdev, | |
1855 | interface->msix_entries, | |
1856 | MIN_MSIX_COUNT(hw), | |
1857 | v_budget); | |
1858 | if (v_budget < 0) { | |
1859 | kfree(interface->msix_entries); | |
1860 | interface->msix_entries = NULL; | |
1861 | return -ENOMEM; | |
1862 | } | |
1863 | ||
1864 | /* record the number of queues available for q_vectors */ | |
1865 | interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw); | |
1866 | ||
1867 | return 0; | |
1868 | } | |
1869 | ||
aa3ac822 AD |
1870 | /** |
1871 | * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS | |
1872 | * @interface: Interface structure continaining rings and devices | |
1873 | * | |
1874 | * Cache the descriptor ring offsets for Qos | |
1875 | **/ | |
1876 | static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) | |
1877 | { | |
1878 | struct net_device *dev = interface->netdev; | |
1879 | int pc, offset, rss_i, i, q_idx; | |
1880 | u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1; | |
1881 | u8 num_pcs = netdev_get_num_tc(dev); | |
1882 | ||
1883 | if (num_pcs <= 1) | |
1884 | return false; | |
1885 | ||
1886 | rss_i = interface->ring_feature[RING_F_RSS].indices; | |
1887 | ||
1888 | for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) { | |
1889 | q_idx = pc; | |
1890 | for (i = 0; i < rss_i; i++) { | |
1891 | interface->tx_ring[offset + i]->reg_idx = q_idx; | |
1892 | interface->tx_ring[offset + i]->qos_pc = pc; | |
1893 | interface->rx_ring[offset + i]->reg_idx = q_idx; | |
1894 | interface->rx_ring[offset + i]->qos_pc = pc; | |
1895 | q_idx += pc_stride; | |
1896 | } | |
1897 | } | |
1898 | ||
1899 | return true; | |
1900 | } | |
1901 | ||
1902 | /** | |
1903 | * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS | |
1904 | * @interface: Interface structure continaining rings and devices | |
1905 | * | |
1906 | * Cache the descriptor ring offsets for RSS | |
1907 | **/ | |
1908 | static void fm10k_cache_ring_rss(struct fm10k_intfc *interface) | |
1909 | { | |
1910 | int i; | |
1911 | ||
1912 | for (i = 0; i < interface->num_rx_queues; i++) | |
1913 | interface->rx_ring[i]->reg_idx = i; | |
1914 | ||
1915 | for (i = 0; i < interface->num_tx_queues; i++) | |
1916 | interface->tx_ring[i]->reg_idx = i; | |
1917 | } | |
1918 | ||
1919 | /** | |
1920 | * fm10k_assign_rings - Map rings to network devices | |
1921 | * @interface: Interface structure containing rings and devices | |
1922 | * | |
1923 | * This function is meant to go though and configure both the network | |
1924 | * devices so that they contain rings, and configure the rings so that | |
1925 | * they function with their network devices. | |
1926 | **/ | |
1927 | static void fm10k_assign_rings(struct fm10k_intfc *interface) | |
1928 | { | |
1929 | if (fm10k_cache_ring_qos(interface)) | |
1930 | return; | |
1931 | ||
1932 | fm10k_cache_ring_rss(interface); | |
1933 | } | |
1934 | ||
18283cad AD |
1935 | static void fm10k_init_reta(struct fm10k_intfc *interface) |
1936 | { | |
1937 | u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; | |
1938 | u32 reta, base; | |
1939 | ||
1012014e KJ |
1940 | /* If the Rx flow indirection table has been configured manually, we |
1941 | * need to maintain it when possible. | |
1942 | */ | |
1943 | if (netif_is_rxfh_configured(interface->netdev)) { | |
18283cad AD |
1944 | for (i = FM10K_RETA_SIZE; i--;) { |
1945 | reta = interface->reta[i]; | |
1946 | if ((((reta << 24) >> 24) < rss_i) && | |
1947 | (((reta << 16) >> 24) < rss_i) && | |
1948 | (((reta << 8) >> 24) < rss_i) && | |
1949 | (((reta) >> 24) < rss_i)) | |
1950 | continue; | |
1012014e KJ |
1951 | |
1952 | /* this should never happen */ | |
1953 | dev_err(&interface->pdev->dev, | |
1954 | "RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n"); | |
18283cad AD |
1955 | goto repopulate_reta; |
1956 | } | |
1957 | ||
1958 | /* do nothing if all of the elements are in bounds */ | |
1959 | return; | |
1960 | } | |
1961 | ||
1962 | repopulate_reta: | |
1963 | /* Populate the redirection table 4 entries at a time. To do this | |
1964 | * we are generating the results for n and n+2 and then interleaving | |
1965 | * those with the results with n+1 and n+3. | |
1966 | */ | |
1967 | for (i = FM10K_RETA_SIZE; i--;) { | |
1968 | /* first pass generates n and n+2 */ | |
1969 | base = ((i * 0x00040004) + 0x00020000) * rss_i; | |
1970 | reta = (base & 0x3F803F80) >> 7; | |
1971 | ||
1972 | /* second pass generates n+1 and n+3 */ | |
1973 | base += 0x00010001 * rss_i; | |
1974 | reta |= (base & 0x3F803F80) << 1; | |
1975 | ||
1976 | interface->reta[i] = reta; | |
1977 | } | |
1978 | } | |
1979 | ||
1980 | /** | |
1981 | * fm10k_init_queueing_scheme - Determine proper queueing scheme | |
1982 | * @interface: board private structure to initialize | |
1983 | * | |
1984 | * We determine which queueing scheme to use based on... | |
1985 | * - Hardware queue count (num_*_queues) | |
1986 | * - defined by miscellaneous hardware support/features (RSS, etc.) | |
1987 | **/ | |
1988 | int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) | |
1989 | { | |
1990 | int err; | |
1991 | ||
1992 | /* Number of supported queues */ | |
1993 | fm10k_set_num_queues(interface); | |
1994 | ||
1995 | /* Configure MSI-X capability */ | |
1996 | err = fm10k_init_msix_capability(interface); | |
1997 | if (err) { | |
1998 | dev_err(&interface->pdev->dev, | |
1999 | "Unable to initialize MSI-X capability\n"); | |
2000 | return err; | |
2001 | } | |
2002 | ||
2003 | /* Allocate memory for queues */ | |
2004 | err = fm10k_alloc_q_vectors(interface); | |
587731e6 AD |
2005 | if (err) { |
2006 | fm10k_reset_msix_capability(interface); | |
18283cad | 2007 | return err; |
587731e6 | 2008 | } |
18283cad | 2009 | |
aa3ac822 AD |
2010 | /* Map rings to devices, and map devices to physical queues */ |
2011 | fm10k_assign_rings(interface); | |
2012 | ||
18283cad AD |
2013 | /* Initialize RSS redirection table */ |
2014 | fm10k_init_reta(interface); | |
2015 | ||
2016 | return 0; | |
2017 | } | |
2018 | ||
2019 | /** | |
2020 | * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings | |
2021 | * @interface: board private structure to clear queueing scheme on | |
2022 | * | |
2023 | * We go through and clear queueing specific resources and reset the structure | |
2024 | * to pre-load conditions | |
2025 | **/ | |
2026 | void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface) | |
2027 | { | |
2028 | fm10k_free_q_vectors(interface); | |
2029 | fm10k_reset_msix_capability(interface); | |
2030 | } |