Commit | Line | Data |
---|---|---|
0e7b3644 | 1 | /* Intel Ethernet Switch Host Interface Driver |
f4e25f6e | 2 | * Copyright(c) 2013 - 2015 Intel Corporation. |
0e7b3644 AD |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * The full GNU General Public License is included in this distribution in | |
14 | * the file called "COPYING". | |
15 | * | |
16 | * Contact Information: | |
17 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
18 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
19 | */ | |
20 | ||
21 | #include "fm10k.h" | |
3abaae42 | 22 | #include <linux/vmalloc.h> |
0d722ec8 | 23 | #ifdef CONFIG_FM10K_VXLAN |
76a540d4 | 24 | #include <net/vxlan.h> |
f6b03c10 | 25 | #endif /* CONFIG_FM10K_VXLAN */ |
3abaae42 AD |
26 | |
27 | /** | |
28 | * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) | |
29 | * @tx_ring: tx descriptor ring (for a specific queue) to setup | |
30 | * | |
31 | * Return 0 on success, negative on failure | |
32 | **/ | |
33 | int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring) | |
34 | { | |
35 | struct device *dev = tx_ring->dev; | |
36 | int size; | |
37 | ||
38 | size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; | |
39 | ||
40 | tx_ring->tx_buffer = vzalloc(size); | |
41 | if (!tx_ring->tx_buffer) | |
42 | goto err; | |
43 | ||
44 | u64_stats_init(&tx_ring->syncp); | |
45 | ||
46 | /* round up to nearest 4K */ | |
47 | tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc); | |
48 | tx_ring->size = ALIGN(tx_ring->size, 4096); | |
49 | ||
50 | tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, | |
51 | &tx_ring->dma, GFP_KERNEL); | |
52 | if (!tx_ring->desc) | |
53 | goto err; | |
54 | ||
55 | return 0; | |
56 | ||
57 | err: | |
58 | vfree(tx_ring->tx_buffer); | |
59 | tx_ring->tx_buffer = NULL; | |
60 | return -ENOMEM; | |
61 | } | |
62 | ||
63 | /** | |
64 | * fm10k_setup_all_tx_resources - allocate all queues Tx resources | |
65 | * @interface: board private structure | |
66 | * | |
67 | * If this function returns with an error, then it's possible one or | |
68 | * more of the rings is populated (while the rest are not). It is the | |
69 | * callers duty to clean those orphaned rings. | |
70 | * | |
71 | * Return 0 on success, negative on failure | |
72 | **/ | |
73 | static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface) | |
74 | { | |
75 | int i, err = 0; | |
76 | ||
77 | for (i = 0; i < interface->num_tx_queues; i++) { | |
78 | err = fm10k_setup_tx_resources(interface->tx_ring[i]); | |
79 | if (!err) | |
80 | continue; | |
81 | ||
82 | netif_err(interface, probe, interface->netdev, | |
83 | "Allocation for Tx Queue %u failed\n", i); | |
84 | goto err_setup_tx; | |
85 | } | |
86 | ||
87 | return 0; | |
88 | err_setup_tx: | |
89 | /* rewind the index freeing the rings as we go */ | |
90 | while (i--) | |
91 | fm10k_free_tx_resources(interface->tx_ring[i]); | |
92 | return err; | |
93 | } | |
94 | ||
95 | /** | |
96 | * fm10k_setup_rx_resources - allocate Rx resources (Descriptors) | |
97 | * @rx_ring: rx descriptor ring (for a specific queue) to setup | |
98 | * | |
99 | * Returns 0 on success, negative on failure | |
100 | **/ | |
101 | int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring) | |
102 | { | |
103 | struct device *dev = rx_ring->dev; | |
104 | int size; | |
105 | ||
106 | size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; | |
107 | ||
108 | rx_ring->rx_buffer = vzalloc(size); | |
109 | if (!rx_ring->rx_buffer) | |
110 | goto err; | |
111 | ||
112 | u64_stats_init(&rx_ring->syncp); | |
113 | ||
114 | /* Round up to nearest 4K */ | |
115 | rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc); | |
116 | rx_ring->size = ALIGN(rx_ring->size, 4096); | |
117 | ||
118 | rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, | |
119 | &rx_ring->dma, GFP_KERNEL); | |
120 | if (!rx_ring->desc) | |
121 | goto err; | |
122 | ||
123 | return 0; | |
124 | err: | |
125 | vfree(rx_ring->rx_buffer); | |
126 | rx_ring->rx_buffer = NULL; | |
127 | return -ENOMEM; | |
128 | } | |
129 | ||
130 | /** | |
131 | * fm10k_setup_all_rx_resources - allocate all queues Rx resources | |
132 | * @interface: board private structure | |
133 | * | |
134 | * If this function returns with an error, then it's possible one or | |
135 | * more of the rings is populated (while the rest are not). It is the | |
136 | * callers duty to clean those orphaned rings. | |
137 | * | |
138 | * Return 0 on success, negative on failure | |
139 | **/ | |
140 | static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface) | |
141 | { | |
142 | int i, err = 0; | |
143 | ||
144 | for (i = 0; i < interface->num_rx_queues; i++) { | |
145 | err = fm10k_setup_rx_resources(interface->rx_ring[i]); | |
146 | if (!err) | |
147 | continue; | |
148 | ||
149 | netif_err(interface, probe, interface->netdev, | |
150 | "Allocation for Rx Queue %u failed\n", i); | |
151 | goto err_setup_rx; | |
152 | } | |
153 | ||
154 | return 0; | |
155 | err_setup_rx: | |
156 | /* rewind the index freeing the rings as we go */ | |
157 | while (i--) | |
158 | fm10k_free_rx_resources(interface->rx_ring[i]); | |
159 | return err; | |
160 | } | |
161 | ||
162 | void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring, | |
163 | struct fm10k_tx_buffer *tx_buffer) | |
164 | { | |
165 | if (tx_buffer->skb) { | |
166 | dev_kfree_skb_any(tx_buffer->skb); | |
167 | if (dma_unmap_len(tx_buffer, len)) | |
168 | dma_unmap_single(ring->dev, | |
169 | dma_unmap_addr(tx_buffer, dma), | |
170 | dma_unmap_len(tx_buffer, len), | |
171 | DMA_TO_DEVICE); | |
172 | } else if (dma_unmap_len(tx_buffer, len)) { | |
173 | dma_unmap_page(ring->dev, | |
174 | dma_unmap_addr(tx_buffer, dma), | |
175 | dma_unmap_len(tx_buffer, len), | |
176 | DMA_TO_DEVICE); | |
177 | } | |
178 | tx_buffer->next_to_watch = NULL; | |
179 | tx_buffer->skb = NULL; | |
180 | dma_unmap_len_set(tx_buffer, len, 0); | |
181 | /* tx_buffer must be completely set up in the transmit path */ | |
182 | } | |
183 | ||
184 | /** | |
185 | * fm10k_clean_tx_ring - Free Tx Buffers | |
186 | * @tx_ring: ring to be cleaned | |
187 | **/ | |
188 | static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring) | |
189 | { | |
190 | struct fm10k_tx_buffer *tx_buffer; | |
191 | unsigned long size; | |
192 | u16 i; | |
193 | ||
194 | /* ring already cleared, nothing to do */ | |
195 | if (!tx_ring->tx_buffer) | |
196 | return; | |
197 | ||
198 | /* Free all the Tx ring sk_buffs */ | |
199 | for (i = 0; i < tx_ring->count; i++) { | |
200 | tx_buffer = &tx_ring->tx_buffer[i]; | |
201 | fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); | |
202 | } | |
203 | ||
204 | /* reset BQL values */ | |
205 | netdev_tx_reset_queue(txring_txq(tx_ring)); | |
206 | ||
207 | size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; | |
208 | memset(tx_ring->tx_buffer, 0, size); | |
209 | ||
210 | /* Zero out the descriptor ring */ | |
211 | memset(tx_ring->desc, 0, tx_ring->size); | |
212 | } | |
213 | ||
214 | /** | |
215 | * fm10k_free_tx_resources - Free Tx Resources per Queue | |
216 | * @tx_ring: Tx descriptor ring for a specific queue | |
217 | * | |
218 | * Free all transmit software resources | |
219 | **/ | |
220 | void fm10k_free_tx_resources(struct fm10k_ring *tx_ring) | |
221 | { | |
222 | fm10k_clean_tx_ring(tx_ring); | |
223 | ||
224 | vfree(tx_ring->tx_buffer); | |
225 | tx_ring->tx_buffer = NULL; | |
226 | ||
227 | /* if not set, then don't free */ | |
228 | if (!tx_ring->desc) | |
229 | return; | |
230 | ||
231 | dma_free_coherent(tx_ring->dev, tx_ring->size, | |
232 | tx_ring->desc, tx_ring->dma); | |
233 | tx_ring->desc = NULL; | |
234 | } | |
235 | ||
236 | /** | |
237 | * fm10k_clean_all_tx_rings - Free Tx Buffers for all queues | |
238 | * @interface: board private structure | |
239 | **/ | |
240 | void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface) | |
241 | { | |
242 | int i; | |
243 | ||
244 | for (i = 0; i < interface->num_tx_queues; i++) | |
245 | fm10k_clean_tx_ring(interface->tx_ring[i]); | |
a211e013 AD |
246 | |
247 | /* remove any stale timestamp buffers and free them */ | |
248 | skb_queue_purge(&interface->ts_tx_skb_queue); | |
3abaae42 AD |
249 | } |
250 | ||
251 | /** | |
252 | * fm10k_free_all_tx_resources - Free Tx Resources for All Queues | |
253 | * @interface: board private structure | |
254 | * | |
255 | * Free all transmit software resources | |
256 | **/ | |
257 | static void fm10k_free_all_tx_resources(struct fm10k_intfc *interface) | |
258 | { | |
259 | int i = interface->num_tx_queues; | |
260 | ||
261 | while (i--) | |
262 | fm10k_free_tx_resources(interface->tx_ring[i]); | |
263 | } | |
264 | ||
265 | /** | |
266 | * fm10k_clean_rx_ring - Free Rx Buffers per Queue | |
267 | * @rx_ring: ring to free buffers from | |
268 | **/ | |
269 | static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring) | |
270 | { | |
271 | unsigned long size; | |
272 | u16 i; | |
273 | ||
274 | if (!rx_ring->rx_buffer) | |
275 | return; | |
276 | ||
277 | if (rx_ring->skb) | |
278 | dev_kfree_skb(rx_ring->skb); | |
279 | rx_ring->skb = NULL; | |
280 | ||
281 | /* Free all the Rx ring sk_buffs */ | |
282 | for (i = 0; i < rx_ring->count; i++) { | |
283 | struct fm10k_rx_buffer *buffer = &rx_ring->rx_buffer[i]; | |
284 | /* clean-up will only set page pointer to NULL */ | |
285 | if (!buffer->page) | |
286 | continue; | |
287 | ||
288 | dma_unmap_page(rx_ring->dev, buffer->dma, | |
289 | PAGE_SIZE, DMA_FROM_DEVICE); | |
290 | __free_page(buffer->page); | |
291 | ||
292 | buffer->page = NULL; | |
293 | } | |
294 | ||
295 | size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; | |
296 | memset(rx_ring->rx_buffer, 0, size); | |
297 | ||
298 | /* Zero out the descriptor ring */ | |
299 | memset(rx_ring->desc, 0, rx_ring->size); | |
300 | ||
301 | rx_ring->next_to_alloc = 0; | |
302 | rx_ring->next_to_clean = 0; | |
303 | rx_ring->next_to_use = 0; | |
304 | } | |
305 | ||
306 | /** | |
307 | * fm10k_free_rx_resources - Free Rx Resources | |
308 | * @rx_ring: ring to clean the resources from | |
309 | * | |
310 | * Free all receive software resources | |
311 | **/ | |
312 | void fm10k_free_rx_resources(struct fm10k_ring *rx_ring) | |
313 | { | |
314 | fm10k_clean_rx_ring(rx_ring); | |
315 | ||
316 | vfree(rx_ring->rx_buffer); | |
317 | rx_ring->rx_buffer = NULL; | |
318 | ||
319 | /* if not set, then don't free */ | |
320 | if (!rx_ring->desc) | |
321 | return; | |
322 | ||
323 | dma_free_coherent(rx_ring->dev, rx_ring->size, | |
324 | rx_ring->desc, rx_ring->dma); | |
325 | ||
326 | rx_ring->desc = NULL; | |
327 | } | |
328 | ||
329 | /** | |
330 | * fm10k_clean_all_rx_rings - Free Rx Buffers for all queues | |
331 | * @interface: board private structure | |
332 | **/ | |
333 | void fm10k_clean_all_rx_rings(struct fm10k_intfc *interface) | |
334 | { | |
335 | int i; | |
336 | ||
337 | for (i = 0; i < interface->num_rx_queues; i++) | |
338 | fm10k_clean_rx_ring(interface->rx_ring[i]); | |
339 | } | |
340 | ||
341 | /** | |
342 | * fm10k_free_all_rx_resources - Free Rx Resources for All Queues | |
343 | * @interface: board private structure | |
344 | * | |
345 | * Free all receive software resources | |
346 | **/ | |
347 | static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface) | |
348 | { | |
349 | int i = interface->num_rx_queues; | |
350 | ||
351 | while (i--) | |
352 | fm10k_free_rx_resources(interface->rx_ring[i]); | |
353 | } | |
0e7b3644 | 354 | |
504c5eac AD |
355 | /** |
356 | * fm10k_request_glort_range - Request GLORTs for use in configuring rules | |
357 | * @interface: board private structure | |
358 | * | |
eca32047 | 359 | * This function allocates a range of glorts for this interface to use. |
504c5eac AD |
360 | **/ |
361 | static void fm10k_request_glort_range(struct fm10k_intfc *interface) | |
362 | { | |
363 | struct fm10k_hw *hw = &interface->hw; | |
364 | u16 mask = (~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT; | |
365 | ||
366 | /* establish GLORT base */ | |
367 | interface->glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE; | |
368 | interface->glort_count = 0; | |
369 | ||
370 | /* nothing we can do until mask is allocated */ | |
371 | if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE) | |
372 | return; | |
373 | ||
883a9ccb AD |
374 | /* we support 3 possible GLORT configurations. |
375 | * 1: VFs consume all but the last 1 | |
376 | * 2: VFs and PF split glorts with possible gap between | |
377 | * 3: VFs allocated first 64, all others belong to PF | |
378 | */ | |
379 | if (mask <= hw->iov.total_vfs) { | |
380 | interface->glort_count = 1; | |
381 | interface->glort += mask; | |
382 | } else if (mask < 64) { | |
383 | interface->glort_count = (mask + 1) / 2; | |
384 | interface->glort += interface->glort_count; | |
385 | } else { | |
386 | interface->glort_count = mask - 63; | |
387 | interface->glort += 64; | |
388 | } | |
504c5eac AD |
389 | } |
390 | ||
76a540d4 AD |
391 | /** |
392 | * fm10k_del_vxlan_port_all | |
393 | * @interface: board private structure | |
394 | * | |
395 | * This function frees the entire vxlan_port list | |
396 | **/ | |
397 | static void fm10k_del_vxlan_port_all(struct fm10k_intfc *interface) | |
398 | { | |
399 | struct fm10k_vxlan_port *vxlan_port; | |
400 | ||
401 | /* flush all entries from list */ | |
402 | vxlan_port = list_first_entry_or_null(&interface->vxlan_port, | |
403 | struct fm10k_vxlan_port, list); | |
404 | while (vxlan_port) { | |
405 | list_del(&vxlan_port->list); | |
406 | kfree(vxlan_port); | |
407 | vxlan_port = list_first_entry_or_null(&interface->vxlan_port, | |
408 | struct fm10k_vxlan_port, | |
409 | list); | |
410 | } | |
411 | } | |
412 | ||
413 | /** | |
414 | * fm10k_restore_vxlan_port | |
415 | * @interface: board private structure | |
416 | * | |
417 | * This function restores the value in the tunnel_cfg register after reset | |
418 | **/ | |
419 | static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface) | |
420 | { | |
421 | struct fm10k_hw *hw = &interface->hw; | |
422 | struct fm10k_vxlan_port *vxlan_port; | |
423 | ||
424 | /* only the PF supports configuring tunnels */ | |
425 | if (hw->mac.type != fm10k_mac_pf) | |
426 | return; | |
427 | ||
428 | vxlan_port = list_first_entry_or_null(&interface->vxlan_port, | |
429 | struct fm10k_vxlan_port, list); | |
430 | ||
431 | /* restore tunnel configuration register */ | |
432 | fm10k_write_reg(hw, FM10K_TUNNEL_CFG, | |
433 | (vxlan_port ? ntohs(vxlan_port->port) : 0) | | |
434 | (ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT)); | |
435 | } | |
436 | ||
437 | /** | |
438 | * fm10k_add_vxlan_port | |
439 | * @netdev: network interface device structure | |
440 | * @sa_family: Address family of new port | |
441 | * @port: port number used for VXLAN | |
442 | * | |
443 | * This funciton is called when a new VXLAN interface has added a new port | |
444 | * number to the range that is currently in use for VXLAN. The new port | |
445 | * number is always added to the tail so that the port number list should | |
446 | * match the order in which the ports were allocated. The head of the list | |
447 | * is always used as the VXLAN port number for offloads. | |
448 | **/ | |
449 | static void fm10k_add_vxlan_port(struct net_device *dev, | |
450 | sa_family_t sa_family, __be16 port) { | |
451 | struct fm10k_intfc *interface = netdev_priv(dev); | |
452 | struct fm10k_vxlan_port *vxlan_port; | |
453 | ||
454 | /* only the PF supports configuring tunnels */ | |
455 | if (interface->hw.mac.type != fm10k_mac_pf) | |
456 | return; | |
457 | ||
458 | /* existing ports are pulled out so our new entry is always last */ | |
459 | fm10k_vxlan_port_for_each(vxlan_port, interface) { | |
460 | if ((vxlan_port->port == port) && | |
461 | (vxlan_port->sa_family == sa_family)) { | |
462 | list_del(&vxlan_port->list); | |
463 | goto insert_tail; | |
464 | } | |
465 | } | |
466 | ||
467 | /* allocate memory to track ports */ | |
468 | vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC); | |
469 | if (!vxlan_port) | |
470 | return; | |
471 | vxlan_port->port = port; | |
472 | vxlan_port->sa_family = sa_family; | |
473 | ||
474 | insert_tail: | |
475 | /* add new port value to list */ | |
476 | list_add_tail(&vxlan_port->list, &interface->vxlan_port); | |
477 | ||
478 | fm10k_restore_vxlan_port(interface); | |
479 | } | |
480 | ||
481 | /** | |
482 | * fm10k_del_vxlan_port | |
483 | * @netdev: network interface device structure | |
484 | * @sa_family: Address family of freed port | |
485 | * @port: port number used for VXLAN | |
486 | * | |
487 | * This funciton is called when a new VXLAN interface has freed a port | |
488 | * number from the range that is currently in use for VXLAN. The freed | |
489 | * port is removed from the list and the new head is used to determine | |
490 | * the port number for offloads. | |
491 | **/ | |
492 | static void fm10k_del_vxlan_port(struct net_device *dev, | |
493 | sa_family_t sa_family, __be16 port) { | |
494 | struct fm10k_intfc *interface = netdev_priv(dev); | |
495 | struct fm10k_vxlan_port *vxlan_port; | |
496 | ||
497 | if (interface->hw.mac.type != fm10k_mac_pf) | |
498 | return; | |
499 | ||
500 | /* find the port in the list and free it */ | |
501 | fm10k_vxlan_port_for_each(vxlan_port, interface) { | |
502 | if ((vxlan_port->port == port) && | |
503 | (vxlan_port->sa_family == sa_family)) { | |
504 | list_del(&vxlan_port->list); | |
505 | kfree(vxlan_port); | |
506 | break; | |
507 | } | |
508 | } | |
509 | ||
510 | fm10k_restore_vxlan_port(interface); | |
511 | } | |
512 | ||
504c5eac AD |
513 | /** |
514 | * fm10k_open - Called when a network interface is made active | |
515 | * @netdev: network interface device structure | |
516 | * | |
517 | * Returns 0 on success, negative value on failure | |
518 | * | |
519 | * The open entry point is called when a network interface is made | |
520 | * active by the system (IFF_UP). At this point all resources needed | |
521 | * for transmit and receive operations are allocated, the interrupt | |
522 | * handler is registered with the OS, the watchdog timer is started, | |
523 | * and the stack is notified that the interface is ready. | |
524 | **/ | |
525 | int fm10k_open(struct net_device *netdev) | |
526 | { | |
527 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
18283cad AD |
528 | int err; |
529 | ||
3abaae42 AD |
530 | /* allocate transmit descriptors */ |
531 | err = fm10k_setup_all_tx_resources(interface); | |
532 | if (err) | |
533 | goto err_setup_tx; | |
534 | ||
535 | /* allocate receive descriptors */ | |
536 | err = fm10k_setup_all_rx_resources(interface); | |
537 | if (err) | |
538 | goto err_setup_rx; | |
539 | ||
18283cad AD |
540 | /* allocate interrupt resources */ |
541 | err = fm10k_qv_request_irq(interface); | |
542 | if (err) | |
543 | goto err_req_irq; | |
504c5eac AD |
544 | |
545 | /* setup GLORT assignment for this port */ | |
546 | fm10k_request_glort_range(interface); | |
547 | ||
e27ef599 | 548 | /* Notify the stack of the actual queue counts */ |
c9d49940 AD |
549 | err = netif_set_real_num_tx_queues(netdev, |
550 | interface->num_tx_queues); | |
551 | if (err) | |
552 | goto err_set_queues; | |
e27ef599 AD |
553 | |
554 | err = netif_set_real_num_rx_queues(netdev, | |
555 | interface->num_rx_queues); | |
556 | if (err) | |
557 | goto err_set_queues; | |
558 | ||
0d722ec8 | 559 | #ifdef CONFIG_FM10K_VXLAN |
76a540d4 AD |
560 | /* update VXLAN port configuration */ |
561 | vxlan_get_rx_port(netdev); | |
76a540d4 | 562 | #endif |
0d722ec8 | 563 | |
504c5eac AD |
564 | fm10k_up(interface); |
565 | ||
566 | return 0; | |
18283cad | 567 | |
e27ef599 AD |
568 | err_set_queues: |
569 | fm10k_qv_free_irq(interface); | |
18283cad | 570 | err_req_irq: |
3abaae42 AD |
571 | fm10k_free_all_rx_resources(interface); |
572 | err_setup_rx: | |
573 | fm10k_free_all_tx_resources(interface); | |
574 | err_setup_tx: | |
18283cad | 575 | return err; |
504c5eac AD |
576 | } |
577 | ||
578 | /** | |
579 | * fm10k_close - Disables a network interface | |
580 | * @netdev: network interface device structure | |
581 | * | |
582 | * Returns 0, this is not allowed to fail | |
583 | * | |
584 | * The close entry point is called when an interface is de-activated | |
585 | * by the OS. The hardware is still under the drivers control, but | |
586 | * needs to be disabled. A global MAC reset is issued to stop the | |
587 | * hardware, and all transmit and receive resources are freed. | |
588 | **/ | |
589 | int fm10k_close(struct net_device *netdev) | |
590 | { | |
591 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
592 | ||
593 | fm10k_down(interface); | |
594 | ||
18283cad AD |
595 | fm10k_qv_free_irq(interface); |
596 | ||
76a540d4 AD |
597 | fm10k_del_vxlan_port_all(interface); |
598 | ||
3abaae42 AD |
599 | fm10k_free_all_tx_resources(interface); |
600 | fm10k_free_all_rx_resources(interface); | |
601 | ||
504c5eac AD |
602 | return 0; |
603 | } | |
604 | ||
0e7b3644 AD |
605 | static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) |
606 | { | |
b101c962 | 607 | struct fm10k_intfc *interface = netdev_priv(dev); |
c9d49940 | 608 | unsigned int r_idx = skb->queue_mapping; |
b101c962 AD |
609 | int err; |
610 | ||
a4fcad65 | 611 | if ((skb->protocol == htons(ETH_P_8021Q)) && |
df8a39de | 612 | !skb_vlan_tag_present(skb)) { |
b101c962 AD |
613 | /* FM10K only supports hardware tagging, any tags in frame |
614 | * are considered 2nd level or "outer" tags | |
615 | */ | |
616 | struct vlan_hdr *vhdr; | |
617 | __be16 proto; | |
618 | ||
619 | /* make sure skb is not shared */ | |
620 | skb = skb_share_check(skb, GFP_ATOMIC); | |
621 | if (!skb) | |
622 | return NETDEV_TX_OK; | |
623 | ||
624 | /* make sure there is enough room to move the ethernet header */ | |
625 | if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) | |
626 | return NETDEV_TX_OK; | |
627 | ||
628 | /* verify the skb head is not shared */ | |
629 | err = skb_cow_head(skb, 0); | |
6f97532e | 630 | if (err) { |
631 | dev_kfree_skb(skb); | |
b101c962 | 632 | return NETDEV_TX_OK; |
6f97532e | 633 | } |
b101c962 | 634 | |
aa502b4a | 635 | /* locate VLAN header */ |
b101c962 AD |
636 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); |
637 | ||
638 | /* pull the 2 key pieces of data out of it */ | |
639 | __vlan_hwaccel_put_tag(skb, | |
640 | htons(ETH_P_8021Q), | |
641 | ntohs(vhdr->h_vlan_TCI)); | |
642 | proto = vhdr->h_vlan_encapsulated_proto; | |
643 | skb->protocol = (ntohs(proto) >= 1536) ? proto : | |
644 | htons(ETH_P_802_2); | |
645 | ||
646 | /* squash it by moving the ethernet addresses up 4 bytes */ | |
647 | memmove(skb->data + VLAN_HLEN, skb->data, 12); | |
648 | __skb_pull(skb, VLAN_HLEN); | |
649 | skb_reset_mac_header(skb); | |
650 | } | |
651 | ||
652 | /* The minimum packet size for a single buffer is 17B so pad the skb | |
653 | * in order to meet this minimum size requirement. | |
654 | */ | |
655 | if (unlikely(skb->len < 17)) { | |
656 | int pad_len = 17 - skb->len; | |
657 | ||
658 | if (skb_pad(skb, pad_len)) | |
659 | return NETDEV_TX_OK; | |
660 | __skb_put(skb, pad_len); | |
661 | } | |
662 | ||
a211e013 AD |
663 | /* prepare packet for hardware time stamping */ |
664 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) | |
665 | fm10k_ts_tx_enqueue(interface, skb); | |
666 | ||
b101c962 AD |
667 | if (r_idx >= interface->num_tx_queues) |
668 | r_idx %= interface->num_tx_queues; | |
669 | ||
670 | err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]); | |
671 | ||
672 | return err; | |
0e7b3644 AD |
673 | } |
674 | ||
675 | static int fm10k_change_mtu(struct net_device *dev, int new_mtu) | |
676 | { | |
677 | if (new_mtu < 68 || new_mtu > FM10K_MAX_JUMBO_FRAME_SIZE) | |
678 | return -EINVAL; | |
679 | ||
680 | dev->mtu = new_mtu; | |
681 | ||
682 | return 0; | |
683 | } | |
684 | ||
b101c962 AD |
685 | /** |
686 | * fm10k_tx_timeout - Respond to a Tx Hang | |
687 | * @netdev: network interface device structure | |
688 | **/ | |
689 | static void fm10k_tx_timeout(struct net_device *netdev) | |
690 | { | |
691 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
692 | bool real_tx_hang = false; | |
693 | int i; | |
694 | ||
695 | #define TX_TIMEO_LIMIT 16000 | |
696 | for (i = 0; i < interface->num_tx_queues; i++) { | |
697 | struct fm10k_ring *tx_ring = interface->tx_ring[i]; | |
698 | ||
699 | if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) | |
700 | real_tx_hang = true; | |
701 | } | |
702 | ||
703 | if (real_tx_hang) { | |
704 | fm10k_tx_timeout_reset(interface); | |
705 | } else { | |
706 | netif_info(interface, drv, netdev, | |
707 | "Fake Tx hang detected with timeout of %d seconds\n", | |
a4fcad65 | 708 | netdev->watchdog_timeo / HZ); |
b101c962 AD |
709 | |
710 | /* fake Tx hang - increase the kernel timeout */ | |
711 | if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) | |
712 | netdev->watchdog_timeo *= 2; | |
713 | } | |
714 | } | |
715 | ||
8f5e20d4 AD |
716 | static int fm10k_uc_vlan_unsync(struct net_device *netdev, |
717 | const unsigned char *uc_addr) | |
718 | { | |
719 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
720 | struct fm10k_hw *hw = &interface->hw; | |
721 | u16 glort = interface->glort; | |
722 | u16 vid = interface->vid; | |
723 | bool set = !!(vid / VLAN_N_VID); | |
724 | int err; | |
725 | ||
726 | /* drop any leading bits on the VLAN ID */ | |
727 | vid &= VLAN_N_VID - 1; | |
728 | ||
729 | err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr, vid, set, 0); | |
730 | if (err) | |
731 | return err; | |
732 | ||
733 | /* return non-zero value as we are only doing a partial sync/unsync */ | |
734 | return 1; | |
735 | } | |
736 | ||
737 | static int fm10k_mc_vlan_unsync(struct net_device *netdev, | |
738 | const unsigned char *mc_addr) | |
739 | { | |
740 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
741 | struct fm10k_hw *hw = &interface->hw; | |
742 | u16 glort = interface->glort; | |
743 | u16 vid = interface->vid; | |
744 | bool set = !!(vid / VLAN_N_VID); | |
745 | int err; | |
746 | ||
747 | /* drop any leading bits on the VLAN ID */ | |
748 | vid &= VLAN_N_VID - 1; | |
749 | ||
750 | err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set); | |
751 | if (err) | |
752 | return err; | |
753 | ||
754 | /* return non-zero value as we are only doing a partial sync/unsync */ | |
755 | return 1; | |
756 | } | |
757 | ||
758 | static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) | |
759 | { | |
760 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
761 | struct fm10k_hw *hw = &interface->hw; | |
762 | s32 err; | |
e71c9318 | 763 | int i; |
8f5e20d4 AD |
764 | |
765 | /* updates do not apply to VLAN 0 */ | |
766 | if (!vid) | |
767 | return 0; | |
768 | ||
769 | if (vid >= VLAN_N_VID) | |
770 | return -EINVAL; | |
771 | ||
772 | /* Verify we have permission to add VLANs */ | |
773 | if (hw->mac.vlan_override) | |
774 | return -EACCES; | |
775 | ||
8f5e20d4 AD |
776 | /* update active_vlans bitmask */ |
777 | set_bit(vid, interface->active_vlans); | |
778 | if (!set) | |
779 | clear_bit(vid, interface->active_vlans); | |
780 | ||
aa502b4a | 781 | /* disable the default VLAN ID on ring if we have an active VLAN */ |
e71c9318 JK |
782 | for (i = 0; i < interface->num_rx_queues; i++) { |
783 | struct fm10k_ring *rx_ring = interface->rx_ring[i]; | |
784 | u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1); | |
785 | ||
786 | if (test_bit(rx_vid, interface->active_vlans)) | |
787 | rx_ring->vid |= FM10K_VLAN_CLEAR; | |
788 | else | |
789 | rx_ring->vid &= ~FM10K_VLAN_CLEAR; | |
790 | } | |
791 | ||
3d02b3df BA |
792 | /* Do not remove default VLAN ID related entries from VLAN and MAC |
793 | * tables | |
794 | */ | |
56f0569e | 795 | if (!set && vid == hw->mac.default_vid) |
661b2067 JK |
796 | return 0; |
797 | ||
3f0bdb2e JK |
798 | /* Do not throw an error if the interface is down. We will sync once |
799 | * we come up | |
800 | */ | |
801 | if (test_bit(__FM10K_DOWN, &interface->state)) | |
802 | return 0; | |
803 | ||
8f5e20d4 AD |
804 | fm10k_mbx_lock(interface); |
805 | ||
eca32047 | 806 | /* only need to update the VLAN if not in promiscuous mode */ |
8f5e20d4 AD |
807 | if (!(netdev->flags & IFF_PROMISC)) { |
808 | err = hw->mac.ops.update_vlan(hw, vid, 0, set); | |
809 | if (err) | |
13cb2dad | 810 | goto err_out; |
8f5e20d4 AD |
811 | } |
812 | ||
813 | /* update our base MAC address */ | |
814 | err = hw->mac.ops.update_uc_addr(hw, interface->glort, hw->mac.addr, | |
815 | vid, set, 0); | |
816 | if (err) | |
13cb2dad | 817 | goto err_out; |
8f5e20d4 | 818 | |
aa502b4a | 819 | /* set VLAN ID prior to syncing/unsyncing the VLAN */ |
8f5e20d4 AD |
820 | interface->vid = vid + (set ? VLAN_N_VID : 0); |
821 | ||
822 | /* Update the unicast and multicast address list to add/drop VLAN */ | |
823 | __dev_uc_unsync(netdev, fm10k_uc_vlan_unsync); | |
824 | __dev_mc_unsync(netdev, fm10k_mc_vlan_unsync); | |
825 | ||
13cb2dad | 826 | err_out: |
8f5e20d4 AD |
827 | fm10k_mbx_unlock(interface); |
828 | ||
13cb2dad | 829 | return err; |
8f5e20d4 AD |
830 | } |
831 | ||
832 | static int fm10k_vlan_rx_add_vid(struct net_device *netdev, | |
833 | __always_unused __be16 proto, u16 vid) | |
834 | { | |
835 | /* update VLAN and address table based on changes */ | |
836 | return fm10k_update_vid(netdev, vid, true); | |
837 | } | |
838 | ||
839 | static int fm10k_vlan_rx_kill_vid(struct net_device *netdev, | |
840 | __always_unused __be16 proto, u16 vid) | |
841 | { | |
842 | /* update VLAN and address table based on changes */ | |
843 | return fm10k_update_vid(netdev, vid, false); | |
844 | } | |
845 | ||
846 | static u16 fm10k_find_next_vlan(struct fm10k_intfc *interface, u16 vid) | |
847 | { | |
848 | struct fm10k_hw *hw = &interface->hw; | |
849 | u16 default_vid = hw->mac.default_vid; | |
850 | u16 vid_limit = vid < default_vid ? default_vid : VLAN_N_VID; | |
851 | ||
852 | vid = find_next_bit(interface->active_vlans, vid_limit, ++vid); | |
853 | ||
854 | return vid; | |
855 | } | |
856 | ||
857 | static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface) | |
858 | { | |
859 | struct fm10k_hw *hw = &interface->hw; | |
860 | u32 vid, prev_vid; | |
861 | ||
862 | /* loop through and find any gaps in the table */ | |
863 | for (vid = 0, prev_vid = 0; | |
864 | prev_vid < VLAN_N_VID; | |
865 | prev_vid = vid + 1, vid = fm10k_find_next_vlan(interface, vid)) { | |
866 | if (prev_vid == vid) | |
867 | continue; | |
868 | ||
869 | /* send request to clear multiple bits at a time */ | |
870 | prev_vid += (vid - prev_vid - 1) << FM10K_VLAN_LENGTH_SHIFT; | |
871 | hw->mac.ops.update_vlan(hw, prev_vid, 0, false); | |
872 | } | |
873 | } | |
874 | ||
875 | static int __fm10k_uc_sync(struct net_device *dev, | |
876 | const unsigned char *addr, bool sync) | |
877 | { | |
878 | struct fm10k_intfc *interface = netdev_priv(dev); | |
879 | struct fm10k_hw *hw = &interface->hw; | |
880 | u16 vid, glort = interface->glort; | |
881 | s32 err; | |
882 | ||
883 | if (!is_valid_ether_addr(addr)) | |
884 | return -EADDRNOTAVAIL; | |
885 | ||
886 | /* update table with current entries */ | |
887 | for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; | |
888 | vid < VLAN_N_VID; | |
889 | vid = fm10k_find_next_vlan(interface, vid)) { | |
890 | err = hw->mac.ops.update_uc_addr(hw, glort, addr, | |
891 | vid, sync, 0); | |
892 | if (err) | |
893 | return err; | |
894 | } | |
895 | ||
896 | return 0; | |
897 | } | |
898 | ||
899 | static int fm10k_uc_sync(struct net_device *dev, | |
900 | const unsigned char *addr) | |
901 | { | |
902 | return __fm10k_uc_sync(dev, addr, true); | |
903 | } | |
904 | ||
905 | static int fm10k_uc_unsync(struct net_device *dev, | |
906 | const unsigned char *addr) | |
907 | { | |
908 | return __fm10k_uc_sync(dev, addr, false); | |
909 | } | |
910 | ||
0e7b3644 AD |
911 | static int fm10k_set_mac(struct net_device *dev, void *p) |
912 | { | |
8f5e20d4 AD |
913 | struct fm10k_intfc *interface = netdev_priv(dev); |
914 | struct fm10k_hw *hw = &interface->hw; | |
0e7b3644 AD |
915 | struct sockaddr *addr = p; |
916 | s32 err = 0; | |
917 | ||
918 | if (!is_valid_ether_addr(addr->sa_data)) | |
919 | return -EADDRNOTAVAIL; | |
920 | ||
8f5e20d4 AD |
921 | if (dev->flags & IFF_UP) { |
922 | /* setting MAC address requires mailbox */ | |
923 | fm10k_mbx_lock(interface); | |
924 | ||
925 | err = fm10k_uc_sync(dev, addr->sa_data); | |
926 | if (!err) | |
927 | fm10k_uc_unsync(dev, hw->mac.addr); | |
928 | ||
929 | fm10k_mbx_unlock(interface); | |
930 | } | |
931 | ||
0e7b3644 AD |
932 | if (!err) { |
933 | ether_addr_copy(dev->dev_addr, addr->sa_data); | |
8f5e20d4 | 934 | ether_addr_copy(hw->mac.addr, addr->sa_data); |
0e7b3644 AD |
935 | dev->addr_assign_type &= ~NET_ADDR_RANDOM; |
936 | } | |
937 | ||
8f5e20d4 AD |
938 | /* if we had a mailbox error suggest trying again */ |
939 | return err ? -EAGAIN : 0; | |
940 | } | |
941 | ||
942 | static int __fm10k_mc_sync(struct net_device *dev, | |
943 | const unsigned char *addr, bool sync) | |
944 | { | |
945 | struct fm10k_intfc *interface = netdev_priv(dev); | |
946 | struct fm10k_hw *hw = &interface->hw; | |
947 | u16 vid, glort = interface->glort; | |
8f5e20d4 AD |
948 | |
949 | /* update table with current entries */ | |
950 | for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; | |
951 | vid < VLAN_N_VID; | |
952 | vid = fm10k_find_next_vlan(interface, vid)) { | |
745136a8 | 953 | hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync); |
8f5e20d4 AD |
954 | } |
955 | ||
956 | return 0; | |
957 | } | |
958 | ||
959 | static int fm10k_mc_sync(struct net_device *dev, | |
960 | const unsigned char *addr) | |
961 | { | |
962 | return __fm10k_mc_sync(dev, addr, true); | |
963 | } | |
964 | ||
965 | static int fm10k_mc_unsync(struct net_device *dev, | |
966 | const unsigned char *addr) | |
967 | { | |
968 | return __fm10k_mc_sync(dev, addr, false); | |
0e7b3644 AD |
969 | } |
970 | ||
971 | static void fm10k_set_rx_mode(struct net_device *dev) | |
972 | { | |
8f5e20d4 AD |
973 | struct fm10k_intfc *interface = netdev_priv(dev); |
974 | struct fm10k_hw *hw = &interface->hw; | |
975 | int xcast_mode; | |
976 | ||
977 | /* no need to update the harwdare if we are not running */ | |
978 | if (!(dev->flags & IFF_UP)) | |
979 | return; | |
980 | ||
981 | /* determine new mode based on flags */ | |
982 | xcast_mode = (dev->flags & IFF_PROMISC) ? FM10K_XCAST_MODE_PROMISC : | |
983 | (dev->flags & IFF_ALLMULTI) ? FM10K_XCAST_MODE_ALLMULTI : | |
984 | (dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ? | |
985 | FM10K_XCAST_MODE_MULTI : FM10K_XCAST_MODE_NONE; | |
986 | ||
987 | fm10k_mbx_lock(interface); | |
988 | ||
a7731cc8 | 989 | /* update xcast mode first, but only if it changed */ |
8f5e20d4 AD |
990 | if (interface->xcast_mode != xcast_mode) { |
991 | /* update VLAN table */ | |
992 | if (xcast_mode == FM10K_XCAST_MODE_PROMISC) | |
993 | hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, true); | |
994 | if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC) | |
995 | fm10k_clear_unused_vlans(interface); | |
996 | ||
997 | /* update xcast mode */ | |
998 | hw->mac.ops.update_xcast_mode(hw, interface->glort, xcast_mode); | |
999 | ||
1000 | /* record updated xcast mode state */ | |
1001 | interface->xcast_mode = xcast_mode; | |
1002 | } | |
1003 | ||
a7731cc8 JK |
1004 | /* synchronize all of the addresses */ |
1005 | if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { | |
1006 | __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); | |
1007 | if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) | |
1008 | __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync); | |
1009 | } | |
1010 | ||
8f5e20d4 AD |
1011 | fm10k_mbx_unlock(interface); |
1012 | } | |
1013 | ||
1014 | void fm10k_restore_rx_state(struct fm10k_intfc *interface) | |
1015 | { | |
1016 | struct net_device *netdev = interface->netdev; | |
1017 | struct fm10k_hw *hw = &interface->hw; | |
1018 | int xcast_mode; | |
1019 | u16 vid, glort; | |
1020 | ||
1021 | /* record glort for this interface */ | |
1022 | glort = interface->glort; | |
1023 | ||
1024 | /* convert interface flags to xcast mode */ | |
1025 | if (netdev->flags & IFF_PROMISC) | |
1026 | xcast_mode = FM10K_XCAST_MODE_PROMISC; | |
1027 | else if (netdev->flags & IFF_ALLMULTI) | |
1028 | xcast_mode = FM10K_XCAST_MODE_ALLMULTI; | |
1029 | else if (netdev->flags & (IFF_BROADCAST | IFF_MULTICAST)) | |
1030 | xcast_mode = FM10K_XCAST_MODE_MULTI; | |
1031 | else | |
1032 | xcast_mode = FM10K_XCAST_MODE_NONE; | |
1033 | ||
1034 | fm10k_mbx_lock(interface); | |
1035 | ||
1036 | /* Enable logical port */ | |
1037 | hw->mac.ops.update_lport_state(hw, glort, interface->glort_count, true); | |
1038 | ||
1039 | /* update VLAN table */ | |
1040 | hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, | |
1041 | xcast_mode == FM10K_XCAST_MODE_PROMISC); | |
1042 | ||
1043 | /* Add filter for VLAN 0 */ | |
1044 | hw->mac.ops.update_vlan(hw, 0, 0, true); | |
1045 | ||
1046 | /* update table with current entries */ | |
1047 | for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; | |
1048 | vid < VLAN_N_VID; | |
1049 | vid = fm10k_find_next_vlan(interface, vid)) { | |
1050 | hw->mac.ops.update_vlan(hw, vid, 0, true); | |
1051 | hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr, | |
1052 | vid, true, 0); | |
1053 | } | |
1054 | ||
5c2d642f | 1055 | /* update xcast mode before synchronizing addresses */ |
a7731cc8 JK |
1056 | hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); |
1057 | ||
eca32047 | 1058 | /* synchronize all of the addresses */ |
8f5e20d4 AD |
1059 | if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { |
1060 | __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); | |
1061 | if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) | |
1062 | __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync); | |
1063 | } | |
1064 | ||
8f5e20d4 AD |
1065 | fm10k_mbx_unlock(interface); |
1066 | ||
1067 | /* record updated xcast mode state */ | |
1068 | interface->xcast_mode = xcast_mode; | |
76a540d4 AD |
1069 | |
1070 | /* Restore tunnel configuration */ | |
1071 | fm10k_restore_vxlan_port(interface); | |
8f5e20d4 AD |
1072 | } |
1073 | ||
1074 | void fm10k_reset_rx_state(struct fm10k_intfc *interface) | |
1075 | { | |
1076 | struct net_device *netdev = interface->netdev; | |
1077 | struct fm10k_hw *hw = &interface->hw; | |
1078 | ||
1079 | fm10k_mbx_lock(interface); | |
1080 | ||
1081 | /* clear the logical port state on lower device */ | |
1082 | hw->mac.ops.update_lport_state(hw, interface->glort, | |
1083 | interface->glort_count, false); | |
1084 | ||
1085 | fm10k_mbx_unlock(interface); | |
1086 | ||
1087 | /* reset flags to default state */ | |
1088 | interface->xcast_mode = FM10K_XCAST_MODE_NONE; | |
1089 | ||
1090 | /* clear the sync flag since the lport has been dropped */ | |
1091 | __dev_uc_unsync(netdev, NULL); | |
1092 | __dev_mc_unsync(netdev, NULL); | |
0e7b3644 AD |
1093 | } |
1094 | ||
e27ef599 AD |
1095 | /** |
1096 | * fm10k_get_stats64 - Get System Network Statistics | |
1097 | * @netdev: network interface device structure | |
1098 | * @stats: storage space for 64bit statistics | |
1099 | * | |
1100 | * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This | |
1101 | * function replaces fm10k_get_stats for kernels which support it. | |
1102 | */ | |
1103 | static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, | |
1104 | struct rtnl_link_stats64 *stats) | |
1105 | { | |
1106 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
1107 | struct fm10k_ring *ring; | |
1108 | unsigned int start, i; | |
1109 | u64 bytes, packets; | |
1110 | ||
1111 | rcu_read_lock(); | |
1112 | ||
1113 | for (i = 0; i < interface->num_rx_queues; i++) { | |
1114 | ring = ACCESS_ONCE(interface->rx_ring[i]); | |
1115 | ||
1116 | if (!ring) | |
1117 | continue; | |
1118 | ||
1119 | do { | |
1120 | start = u64_stats_fetch_begin_irq(&ring->syncp); | |
1121 | packets = ring->stats.packets; | |
1122 | bytes = ring->stats.bytes; | |
1123 | } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
1124 | ||
1125 | stats->rx_packets += packets; | |
1126 | stats->rx_bytes += bytes; | |
1127 | } | |
1128 | ||
1129 | for (i = 0; i < interface->num_tx_queues; i++) { | |
f4e25f6e | 1130 | ring = ACCESS_ONCE(interface->tx_ring[i]); |
e27ef599 AD |
1131 | |
1132 | if (!ring) | |
1133 | continue; | |
1134 | ||
1135 | do { | |
1136 | start = u64_stats_fetch_begin_irq(&ring->syncp); | |
1137 | packets = ring->stats.packets; | |
1138 | bytes = ring->stats.bytes; | |
1139 | } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
1140 | ||
1141 | stats->tx_packets += packets; | |
1142 | stats->tx_bytes += bytes; | |
1143 | } | |
1144 | ||
1145 | rcu_read_unlock(); | |
1146 | ||
1147 | /* following stats updated by fm10k_service_task() */ | |
1148 | stats->rx_missed_errors = netdev->stats.rx_missed_errors; | |
1149 | ||
1150 | return stats; | |
1151 | } | |
1152 | ||
aa3ac822 AD |
1153 | int fm10k_setup_tc(struct net_device *dev, u8 tc) |
1154 | { | |
1155 | struct fm10k_intfc *interface = netdev_priv(dev); | |
09f8a82b | 1156 | int err; |
aa3ac822 AD |
1157 | |
1158 | /* Currently only the PF supports priority classes */ | |
1159 | if (tc && (interface->hw.mac.type != fm10k_mac_pf)) | |
1160 | return -EINVAL; | |
1161 | ||
1162 | /* Hardware supports up to 8 traffic classes */ | |
1163 | if (tc > 8) | |
1164 | return -EINVAL; | |
1165 | ||
1166 | /* Hardware has to reinitialize queues to match packet | |
1167 | * buffer alignment. Unfortunately, the hardware is not | |
1168 | * flexible enough to do this dynamically. | |
1169 | */ | |
1170 | if (netif_running(dev)) | |
1171 | fm10k_close(dev); | |
1172 | ||
1173 | fm10k_mbx_free_irq(interface); | |
1174 | ||
1175 | fm10k_clear_queueing_scheme(interface); | |
1176 | ||
1177 | /* we expect the prio_tc map to be repopulated later */ | |
1178 | netdev_reset_tc(dev); | |
1179 | netdev_set_num_tc(dev, tc); | |
1180 | ||
09f8a82b AD |
1181 | err = fm10k_init_queueing_scheme(interface); |
1182 | if (err) | |
1183 | goto err_queueing_scheme; | |
aa3ac822 | 1184 | |
09f8a82b AD |
1185 | err = fm10k_mbx_request_irq(interface); |
1186 | if (err) | |
1187 | goto err_mbx_irq; | |
aa3ac822 | 1188 | |
09f8a82b AD |
1189 | err = netif_running(dev) ? fm10k_open(dev) : 0; |
1190 | if (err) | |
1191 | goto err_open; | |
aa3ac822 AD |
1192 | |
1193 | /* flag to indicate SWPRI has yet to be updated */ | |
1194 | interface->flags |= FM10K_FLAG_SWPRI_CONFIG; | |
1195 | ||
1196 | return 0; | |
09f8a82b AD |
1197 | err_open: |
1198 | fm10k_mbx_free_irq(interface); | |
1199 | err_mbx_irq: | |
1200 | fm10k_clear_queueing_scheme(interface); | |
1201 | err_queueing_scheme: | |
1202 | netif_device_detach(dev); | |
1203 | ||
1204 | return err; | |
aa3ac822 AD |
1205 | } |
1206 | ||
16e5cc64 JF |
1207 | static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto, |
1208 | struct tc_to_netdev *tc) | |
e4c6734e | 1209 | { |
5eb4dce3 | 1210 | if (tc->type != TC_SETUP_MQPRIO) |
e4c6734e JF |
1211 | return -EINVAL; |
1212 | ||
16e5cc64 | 1213 | return fm10k_setup_tc(dev, tc->tc); |
e4c6734e JF |
1214 | } |
1215 | ||
a211e013 AD |
1216 | static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
1217 | { | |
1218 | switch (cmd) { | |
1219 | case SIOCGHWTSTAMP: | |
1220 | return fm10k_get_ts_config(netdev, ifr); | |
1221 | case SIOCSHWTSTAMP: | |
1222 | return fm10k_set_ts_config(netdev, ifr); | |
1223 | default: | |
1224 | return -EOPNOTSUPP; | |
1225 | } | |
1226 | } | |
1227 | ||
5cd5e2e9 AD |
1228 | static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, |
1229 | struct fm10k_l2_accel *l2_accel) | |
1230 | { | |
1231 | struct fm10k_ring *ring; | |
1232 | int i; | |
1233 | ||
1234 | for (i = 0; i < interface->num_rx_queues; i++) { | |
1235 | ring = interface->rx_ring[i]; | |
1236 | rcu_assign_pointer(ring->l2_accel, l2_accel); | |
1237 | } | |
1238 | ||
1239 | interface->l2_accel = l2_accel; | |
1240 | } | |
1241 | ||
1242 | static void *fm10k_dfwd_add_station(struct net_device *dev, | |
1243 | struct net_device *sdev) | |
1244 | { | |
1245 | struct fm10k_intfc *interface = netdev_priv(dev); | |
1246 | struct fm10k_l2_accel *l2_accel = interface->l2_accel; | |
1247 | struct fm10k_l2_accel *old_l2_accel = NULL; | |
1248 | struct fm10k_dglort_cfg dglort = { 0 }; | |
1249 | struct fm10k_hw *hw = &interface->hw; | |
1250 | int size = 0, i; | |
1251 | u16 glort; | |
1252 | ||
1253 | /* allocate l2 accel structure if it is not available */ | |
1254 | if (!l2_accel) { | |
1255 | /* verify there is enough free GLORTs to support l2_accel */ | |
1256 | if (interface->glort_count < 7) | |
1257 | return ERR_PTR(-EBUSY); | |
1258 | ||
1259 | size = offsetof(struct fm10k_l2_accel, macvlan[7]); | |
1260 | l2_accel = kzalloc(size, GFP_KERNEL); | |
1261 | if (!l2_accel) | |
1262 | return ERR_PTR(-ENOMEM); | |
1263 | ||
1264 | l2_accel->size = 7; | |
1265 | l2_accel->dglort = interface->glort; | |
1266 | ||
1267 | /* update pointers */ | |
1268 | fm10k_assign_l2_accel(interface, l2_accel); | |
1269 | /* do not expand if we are at our limit */ | |
1270 | } else if ((l2_accel->count == FM10K_MAX_STATIONS) || | |
1271 | (l2_accel->count == (interface->glort_count - 1))) { | |
1272 | return ERR_PTR(-EBUSY); | |
1273 | /* expand if we have hit the size limit */ | |
1274 | } else if (l2_accel->count == l2_accel->size) { | |
1275 | old_l2_accel = l2_accel; | |
1276 | size = offsetof(struct fm10k_l2_accel, | |
1277 | macvlan[(l2_accel->size * 2) + 1]); | |
1278 | l2_accel = kzalloc(size, GFP_KERNEL); | |
1279 | if (!l2_accel) | |
1280 | return ERR_PTR(-ENOMEM); | |
1281 | ||
1282 | memcpy(l2_accel, old_l2_accel, | |
1283 | offsetof(struct fm10k_l2_accel, | |
1284 | macvlan[old_l2_accel->size])); | |
1285 | ||
1286 | l2_accel->size = (old_l2_accel->size * 2) + 1; | |
1287 | ||
1288 | /* update pointers */ | |
1289 | fm10k_assign_l2_accel(interface, l2_accel); | |
1290 | kfree_rcu(old_l2_accel, rcu); | |
1291 | } | |
1292 | ||
1293 | /* add macvlan to accel table, and record GLORT for position */ | |
1294 | for (i = 0; i < l2_accel->size; i++) { | |
1295 | if (!l2_accel->macvlan[i]) | |
1296 | break; | |
1297 | } | |
1298 | ||
1299 | /* record station */ | |
1300 | l2_accel->macvlan[i] = sdev; | |
1301 | l2_accel->count++; | |
1302 | ||
1303 | /* configure default DGLORT mapping for RSS/DCB */ | |
1304 | dglort.idx = fm10k_dglort_pf_rss; | |
1305 | dglort.inner_rss = 1; | |
1306 | dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); | |
1307 | dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); | |
1308 | dglort.glort = interface->glort; | |
1309 | dglort.shared_l = fls(l2_accel->size); | |
1310 | hw->mac.ops.configure_dglort_map(hw, &dglort); | |
1311 | ||
1312 | /* Add rules for this specific dglort to the switch */ | |
1313 | fm10k_mbx_lock(interface); | |
1314 | ||
1315 | glort = l2_accel->dglort + 1 + i; | |
1316 | hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_MULTI); | |
1317 | hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, true, 0); | |
1318 | ||
1319 | fm10k_mbx_unlock(interface); | |
1320 | ||
1321 | return sdev; | |
1322 | } | |
1323 | ||
1324 | static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) | |
1325 | { | |
1326 | struct fm10k_intfc *interface = netdev_priv(dev); | |
1327 | struct fm10k_l2_accel *l2_accel = ACCESS_ONCE(interface->l2_accel); | |
1328 | struct fm10k_dglort_cfg dglort = { 0 }; | |
1329 | struct fm10k_hw *hw = &interface->hw; | |
1330 | struct net_device *sdev = priv; | |
1331 | int i; | |
1332 | u16 glort; | |
1333 | ||
1334 | if (!l2_accel) | |
1335 | return; | |
1336 | ||
1337 | /* search table for matching interface */ | |
1338 | for (i = 0; i < l2_accel->size; i++) { | |
1339 | if (l2_accel->macvlan[i] == sdev) | |
1340 | break; | |
1341 | } | |
1342 | ||
1343 | /* exit if macvlan not found */ | |
1344 | if (i == l2_accel->size) | |
1345 | return; | |
1346 | ||
1347 | /* Remove any rules specific to this dglort */ | |
1348 | fm10k_mbx_lock(interface); | |
1349 | ||
1350 | glort = l2_accel->dglort + 1 + i; | |
1351 | hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_NONE); | |
1352 | hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, false, 0); | |
1353 | ||
1354 | fm10k_mbx_unlock(interface); | |
1355 | ||
1356 | /* record removal */ | |
1357 | l2_accel->macvlan[i] = NULL; | |
1358 | l2_accel->count--; | |
1359 | ||
1360 | /* configure default DGLORT mapping for RSS/DCB */ | |
1361 | dglort.idx = fm10k_dglort_pf_rss; | |
1362 | dglort.inner_rss = 1; | |
1363 | dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); | |
1364 | dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); | |
1365 | dglort.glort = interface->glort; | |
f1f3322e | 1366 | dglort.shared_l = fls(l2_accel->size); |
5cd5e2e9 AD |
1367 | hw->mac.ops.configure_dglort_map(hw, &dglort); |
1368 | ||
1369 | /* If table is empty remove it */ | |
1370 | if (l2_accel->count == 0) { | |
1371 | fm10k_assign_l2_accel(interface, NULL); | |
1372 | kfree_rcu(l2_accel, rcu); | |
1373 | } | |
1374 | } | |
1375 | ||
5bf33dc6 MV |
1376 | static netdev_features_t fm10k_features_check(struct sk_buff *skb, |
1377 | struct net_device *dev, | |
1378 | netdev_features_t features) | |
1379 | { | |
1380 | if (!skb->encapsulation || fm10k_tx_encap_offload(skb)) | |
1381 | return features; | |
1382 | ||
a188222b | 1383 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
5bf33dc6 MV |
1384 | } |
1385 | ||
0e7b3644 | 1386 | static const struct net_device_ops fm10k_netdev_ops = { |
504c5eac AD |
1387 | .ndo_open = fm10k_open, |
1388 | .ndo_stop = fm10k_close, | |
0e7b3644 AD |
1389 | .ndo_validate_addr = eth_validate_addr, |
1390 | .ndo_start_xmit = fm10k_xmit_frame, | |
1391 | .ndo_set_mac_address = fm10k_set_mac, | |
1392 | .ndo_change_mtu = fm10k_change_mtu, | |
b101c962 | 1393 | .ndo_tx_timeout = fm10k_tx_timeout, |
8f5e20d4 AD |
1394 | .ndo_vlan_rx_add_vid = fm10k_vlan_rx_add_vid, |
1395 | .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid, | |
0e7b3644 | 1396 | .ndo_set_rx_mode = fm10k_set_rx_mode, |
e27ef599 | 1397 | .ndo_get_stats64 = fm10k_get_stats64, |
e4c6734e | 1398 | .ndo_setup_tc = __fm10k_setup_tc, |
883a9ccb AD |
1399 | .ndo_set_vf_mac = fm10k_ndo_set_vf_mac, |
1400 | .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan, | |
1401 | .ndo_set_vf_rate = fm10k_ndo_set_vf_bw, | |
1402 | .ndo_get_vf_config = fm10k_ndo_get_vf_config, | |
76a540d4 AD |
1403 | .ndo_add_vxlan_port = fm10k_add_vxlan_port, |
1404 | .ndo_del_vxlan_port = fm10k_del_vxlan_port, | |
a211e013 | 1405 | .ndo_do_ioctl = fm10k_ioctl, |
5cd5e2e9 AD |
1406 | .ndo_dfwd_add_station = fm10k_dfwd_add_station, |
1407 | .ndo_dfwd_del_station = fm10k_dfwd_del_station, | |
8b4a98c7 JK |
1408 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1409 | .ndo_poll_controller = fm10k_netpoll, | |
1410 | #endif | |
5bf33dc6 | 1411 | .ndo_features_check = fm10k_features_check, |
0e7b3644 AD |
1412 | }; |
1413 | ||
1414 | #define DEFAULT_DEBUG_LEVEL_SHIFT 3 | |
1415 | ||
e0244903 | 1416 | struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info) |
0e7b3644 | 1417 | { |
e0244903 | 1418 | netdev_features_t hw_features; |
0e7b3644 AD |
1419 | struct fm10k_intfc *interface; |
1420 | struct net_device *dev; | |
1421 | ||
e27ef599 | 1422 | dev = alloc_etherdev_mq(sizeof(struct fm10k_intfc), MAX_QUEUES); |
0e7b3644 AD |
1423 | if (!dev) |
1424 | return NULL; | |
1425 | ||
1426 | /* set net device and ethtool ops */ | |
1427 | dev->netdev_ops = &fm10k_netdev_ops; | |
82dd0f7e | 1428 | fm10k_set_ethtool_ops(dev); |
0e7b3644 AD |
1429 | |
1430 | /* configure default debug level */ | |
1431 | interface = netdev_priv(dev); | |
1432 | interface->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; | |
1433 | ||
1434 | /* configure default features */ | |
76a540d4 AD |
1435 | dev->features |= NETIF_F_IP_CSUM | |
1436 | NETIF_F_IPV6_CSUM | | |
1437 | NETIF_F_SG | | |
1438 | NETIF_F_TSO | | |
1439 | NETIF_F_TSO6 | | |
1440 | NETIF_F_TSO_ECN | | |
76a540d4 AD |
1441 | NETIF_F_RXHASH | |
1442 | NETIF_F_RXCSUM; | |
0e7b3644 | 1443 | |
e0244903 JK |
1444 | /* Only the PF can support VXLAN and NVGRE tunnel offloads */ |
1445 | if (info->mac == fm10k_mac_pf) { | |
1446 | dev->hw_enc_features = NETIF_F_IP_CSUM | | |
1447 | NETIF_F_TSO | | |
1448 | NETIF_F_TSO6 | | |
1449 | NETIF_F_TSO_ECN | | |
1450 | NETIF_F_GSO_UDP_TUNNEL | | |
1451 | NETIF_F_IPV6_CSUM | | |
1452 | NETIF_F_SG; | |
1453 | ||
1454 | dev->features |= NETIF_F_GSO_UDP_TUNNEL; | |
1455 | } | |
1456 | ||
0e7b3644 | 1457 | /* all features defined to this point should be changeable */ |
e0244903 | 1458 | hw_features = dev->features; |
0e7b3644 | 1459 | |
5cd5e2e9 | 1460 | /* allow user to enable L2 forwarding acceleration */ |
e0244903 | 1461 | hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; |
5cd5e2e9 | 1462 | |
0e7b3644 AD |
1463 | /* configure VLAN features */ |
1464 | dev->vlan_features |= dev->features; | |
1465 | ||
8f5e20d4 AD |
1466 | /* we want to leave these both on as we cannot disable VLAN tag |
1467 | * insertion or stripping on the hardware since it is contained | |
1468 | * in the FTAG and not in the frame itself. | |
1469 | */ | |
1470 | dev->features |= NETIF_F_HW_VLAN_CTAG_TX | | |
1471 | NETIF_F_HW_VLAN_CTAG_RX | | |
1472 | NETIF_F_HW_VLAN_CTAG_FILTER; | |
1473 | ||
1474 | dev->priv_flags |= IFF_UNICAST_FLT; | |
1475 | ||
e0244903 JK |
1476 | dev->hw_features |= hw_features; |
1477 | ||
0e7b3644 AD |
1478 | return dev; |
1479 | } |