Commit | Line | Data |
---|---|---|
86641094 | 1 | /* Intel(R) Ethernet Switch Host Interface Driver |
9de6a1a6 | 2 | * Copyright(c) 2013 - 2016 Intel Corporation. |
0e7b3644 AD |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * The full GNU General Public License is included in this distribution in | |
14 | * the file called "COPYING". | |
15 | * | |
16 | * Contact Information: | |
17 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
18 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
19 | */ | |
20 | ||
21 | #include "fm10k.h" | |
3abaae42 | 22 | #include <linux/vmalloc.h> |
f174cdbe | 23 | #include <net/udp_tunnel.h> |
3abaae42 AD |
24 | |
25 | /** | |
26 | * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) | |
27 | * @tx_ring: tx descriptor ring (for a specific queue) to setup | |
28 | * | |
29 | * Return 0 on success, negative on failure | |
30 | **/ | |
31 | int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring) | |
32 | { | |
33 | struct device *dev = tx_ring->dev; | |
34 | int size; | |
35 | ||
36 | size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; | |
37 | ||
38 | tx_ring->tx_buffer = vzalloc(size); | |
39 | if (!tx_ring->tx_buffer) | |
40 | goto err; | |
41 | ||
42 | u64_stats_init(&tx_ring->syncp); | |
43 | ||
44 | /* round up to nearest 4K */ | |
45 | tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc); | |
46 | tx_ring->size = ALIGN(tx_ring->size, 4096); | |
47 | ||
48 | tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, | |
49 | &tx_ring->dma, GFP_KERNEL); | |
50 | if (!tx_ring->desc) | |
51 | goto err; | |
52 | ||
53 | return 0; | |
54 | ||
55 | err: | |
56 | vfree(tx_ring->tx_buffer); | |
57 | tx_ring->tx_buffer = NULL; | |
58 | return -ENOMEM; | |
59 | } | |
60 | ||
61 | /** | |
62 | * fm10k_setup_all_tx_resources - allocate all queues Tx resources | |
63 | * @interface: board private structure | |
64 | * | |
65 | * If this function returns with an error, then it's possible one or | |
66 | * more of the rings is populated (while the rest are not). It is the | |
67 | * callers duty to clean those orphaned rings. | |
68 | * | |
69 | * Return 0 on success, negative on failure | |
70 | **/ | |
71 | static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface) | |
72 | { | |
73 | int i, err = 0; | |
74 | ||
75 | for (i = 0; i < interface->num_tx_queues; i++) { | |
76 | err = fm10k_setup_tx_resources(interface->tx_ring[i]); | |
77 | if (!err) | |
78 | continue; | |
79 | ||
80 | netif_err(interface, probe, interface->netdev, | |
81 | "Allocation for Tx Queue %u failed\n", i); | |
82 | goto err_setup_tx; | |
83 | } | |
84 | ||
85 | return 0; | |
86 | err_setup_tx: | |
87 | /* rewind the index freeing the rings as we go */ | |
88 | while (i--) | |
89 | fm10k_free_tx_resources(interface->tx_ring[i]); | |
90 | return err; | |
91 | } | |
92 | ||
93 | /** | |
94 | * fm10k_setup_rx_resources - allocate Rx resources (Descriptors) | |
95 | * @rx_ring: rx descriptor ring (for a specific queue) to setup | |
96 | * | |
97 | * Returns 0 on success, negative on failure | |
98 | **/ | |
99 | int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring) | |
100 | { | |
101 | struct device *dev = rx_ring->dev; | |
102 | int size; | |
103 | ||
104 | size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; | |
105 | ||
106 | rx_ring->rx_buffer = vzalloc(size); | |
107 | if (!rx_ring->rx_buffer) | |
108 | goto err; | |
109 | ||
110 | u64_stats_init(&rx_ring->syncp); | |
111 | ||
112 | /* Round up to nearest 4K */ | |
113 | rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc); | |
114 | rx_ring->size = ALIGN(rx_ring->size, 4096); | |
115 | ||
116 | rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, | |
117 | &rx_ring->dma, GFP_KERNEL); | |
118 | if (!rx_ring->desc) | |
119 | goto err; | |
120 | ||
121 | return 0; | |
122 | err: | |
123 | vfree(rx_ring->rx_buffer); | |
124 | rx_ring->rx_buffer = NULL; | |
125 | return -ENOMEM; | |
126 | } | |
127 | ||
128 | /** | |
129 | * fm10k_setup_all_rx_resources - allocate all queues Rx resources | |
130 | * @interface: board private structure | |
131 | * | |
132 | * If this function returns with an error, then it's possible one or | |
133 | * more of the rings is populated (while the rest are not). It is the | |
134 | * callers duty to clean those orphaned rings. | |
135 | * | |
136 | * Return 0 on success, negative on failure | |
137 | **/ | |
138 | static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface) | |
139 | { | |
140 | int i, err = 0; | |
141 | ||
142 | for (i = 0; i < interface->num_rx_queues; i++) { | |
143 | err = fm10k_setup_rx_resources(interface->rx_ring[i]); | |
144 | if (!err) | |
145 | continue; | |
146 | ||
147 | netif_err(interface, probe, interface->netdev, | |
148 | "Allocation for Rx Queue %u failed\n", i); | |
149 | goto err_setup_rx; | |
150 | } | |
151 | ||
152 | return 0; | |
153 | err_setup_rx: | |
154 | /* rewind the index freeing the rings as we go */ | |
155 | while (i--) | |
156 | fm10k_free_rx_resources(interface->rx_ring[i]); | |
157 | return err; | |
158 | } | |
159 | ||
160 | void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring, | |
161 | struct fm10k_tx_buffer *tx_buffer) | |
162 | { | |
163 | if (tx_buffer->skb) { | |
164 | dev_kfree_skb_any(tx_buffer->skb); | |
165 | if (dma_unmap_len(tx_buffer, len)) | |
166 | dma_unmap_single(ring->dev, | |
167 | dma_unmap_addr(tx_buffer, dma), | |
168 | dma_unmap_len(tx_buffer, len), | |
169 | DMA_TO_DEVICE); | |
170 | } else if (dma_unmap_len(tx_buffer, len)) { | |
171 | dma_unmap_page(ring->dev, | |
172 | dma_unmap_addr(tx_buffer, dma), | |
173 | dma_unmap_len(tx_buffer, len), | |
174 | DMA_TO_DEVICE); | |
175 | } | |
176 | tx_buffer->next_to_watch = NULL; | |
177 | tx_buffer->skb = NULL; | |
178 | dma_unmap_len_set(tx_buffer, len, 0); | |
179 | /* tx_buffer must be completely set up in the transmit path */ | |
180 | } | |
181 | ||
182 | /** | |
183 | * fm10k_clean_tx_ring - Free Tx Buffers | |
184 | * @tx_ring: ring to be cleaned | |
185 | **/ | |
186 | static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring) | |
187 | { | |
188 | struct fm10k_tx_buffer *tx_buffer; | |
189 | unsigned long size; | |
190 | u16 i; | |
191 | ||
192 | /* ring already cleared, nothing to do */ | |
193 | if (!tx_ring->tx_buffer) | |
194 | return; | |
195 | ||
196 | /* Free all the Tx ring sk_buffs */ | |
197 | for (i = 0; i < tx_ring->count; i++) { | |
198 | tx_buffer = &tx_ring->tx_buffer[i]; | |
199 | fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); | |
200 | } | |
201 | ||
202 | /* reset BQL values */ | |
203 | netdev_tx_reset_queue(txring_txq(tx_ring)); | |
204 | ||
205 | size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; | |
206 | memset(tx_ring->tx_buffer, 0, size); | |
207 | ||
208 | /* Zero out the descriptor ring */ | |
209 | memset(tx_ring->desc, 0, tx_ring->size); | |
210 | } | |
211 | ||
212 | /** | |
213 | * fm10k_free_tx_resources - Free Tx Resources per Queue | |
214 | * @tx_ring: Tx descriptor ring for a specific queue | |
215 | * | |
216 | * Free all transmit software resources | |
217 | **/ | |
218 | void fm10k_free_tx_resources(struct fm10k_ring *tx_ring) | |
219 | { | |
220 | fm10k_clean_tx_ring(tx_ring); | |
221 | ||
222 | vfree(tx_ring->tx_buffer); | |
223 | tx_ring->tx_buffer = NULL; | |
224 | ||
225 | /* if not set, then don't free */ | |
226 | if (!tx_ring->desc) | |
227 | return; | |
228 | ||
229 | dma_free_coherent(tx_ring->dev, tx_ring->size, | |
230 | tx_ring->desc, tx_ring->dma); | |
231 | tx_ring->desc = NULL; | |
232 | } | |
233 | ||
234 | /** | |
235 | * fm10k_clean_all_tx_rings - Free Tx Buffers for all queues | |
236 | * @interface: board private structure | |
237 | **/ | |
238 | void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface) | |
239 | { | |
240 | int i; | |
241 | ||
242 | for (i = 0; i < interface->num_tx_queues; i++) | |
243 | fm10k_clean_tx_ring(interface->tx_ring[i]); | |
244 | } | |
245 | ||
246 | /** | |
247 | * fm10k_free_all_tx_resources - Free Tx Resources for All Queues | |
248 | * @interface: board private structure | |
249 | * | |
250 | * Free all transmit software resources | |
251 | **/ | |
252 | static void fm10k_free_all_tx_resources(struct fm10k_intfc *interface) | |
253 | { | |
254 | int i = interface->num_tx_queues; | |
255 | ||
256 | while (i--) | |
257 | fm10k_free_tx_resources(interface->tx_ring[i]); | |
258 | } | |
259 | ||
260 | /** | |
261 | * fm10k_clean_rx_ring - Free Rx Buffers per Queue | |
262 | * @rx_ring: ring to free buffers from | |
263 | **/ | |
264 | static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring) | |
265 | { | |
266 | unsigned long size; | |
267 | u16 i; | |
268 | ||
269 | if (!rx_ring->rx_buffer) | |
270 | return; | |
271 | ||
272 | if (rx_ring->skb) | |
273 | dev_kfree_skb(rx_ring->skb); | |
274 | rx_ring->skb = NULL; | |
275 | ||
276 | /* Free all the Rx ring sk_buffs */ | |
277 | for (i = 0; i < rx_ring->count; i++) { | |
278 | struct fm10k_rx_buffer *buffer = &rx_ring->rx_buffer[i]; | |
279 | /* clean-up will only set page pointer to NULL */ | |
280 | if (!buffer->page) | |
281 | continue; | |
282 | ||
283 | dma_unmap_page(rx_ring->dev, buffer->dma, | |
284 | PAGE_SIZE, DMA_FROM_DEVICE); | |
285 | __free_page(buffer->page); | |
286 | ||
287 | buffer->page = NULL; | |
288 | } | |
289 | ||
290 | size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; | |
291 | memset(rx_ring->rx_buffer, 0, size); | |
292 | ||
293 | /* Zero out the descriptor ring */ | |
294 | memset(rx_ring->desc, 0, rx_ring->size); | |
295 | ||
296 | rx_ring->next_to_alloc = 0; | |
297 | rx_ring->next_to_clean = 0; | |
298 | rx_ring->next_to_use = 0; | |
299 | } | |
300 | ||
301 | /** | |
302 | * fm10k_free_rx_resources - Free Rx Resources | |
303 | * @rx_ring: ring to clean the resources from | |
304 | * | |
305 | * Free all receive software resources | |
306 | **/ | |
307 | void fm10k_free_rx_resources(struct fm10k_ring *rx_ring) | |
308 | { | |
309 | fm10k_clean_rx_ring(rx_ring); | |
310 | ||
311 | vfree(rx_ring->rx_buffer); | |
312 | rx_ring->rx_buffer = NULL; | |
313 | ||
314 | /* if not set, then don't free */ | |
315 | if (!rx_ring->desc) | |
316 | return; | |
317 | ||
318 | dma_free_coherent(rx_ring->dev, rx_ring->size, | |
319 | rx_ring->desc, rx_ring->dma); | |
320 | ||
321 | rx_ring->desc = NULL; | |
322 | } | |
323 | ||
324 | /** | |
325 | * fm10k_clean_all_rx_rings - Free Rx Buffers for all queues | |
326 | * @interface: board private structure | |
327 | **/ | |
328 | void fm10k_clean_all_rx_rings(struct fm10k_intfc *interface) | |
329 | { | |
330 | int i; | |
331 | ||
332 | for (i = 0; i < interface->num_rx_queues; i++) | |
333 | fm10k_clean_rx_ring(interface->rx_ring[i]); | |
334 | } | |
335 | ||
336 | /** | |
337 | * fm10k_free_all_rx_resources - Free Rx Resources for All Queues | |
338 | * @interface: board private structure | |
339 | * | |
340 | * Free all receive software resources | |
341 | **/ | |
342 | static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface) | |
343 | { | |
344 | int i = interface->num_rx_queues; | |
345 | ||
346 | while (i--) | |
347 | fm10k_free_rx_resources(interface->rx_ring[i]); | |
348 | } | |
0e7b3644 | 349 | |
504c5eac AD |
350 | /** |
351 | * fm10k_request_glort_range - Request GLORTs for use in configuring rules | |
352 | * @interface: board private structure | |
353 | * | |
eca32047 | 354 | * This function allocates a range of glorts for this interface to use. |
504c5eac AD |
355 | **/ |
356 | static void fm10k_request_glort_range(struct fm10k_intfc *interface) | |
357 | { | |
358 | struct fm10k_hw *hw = &interface->hw; | |
359 | u16 mask = (~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT; | |
360 | ||
361 | /* establish GLORT base */ | |
362 | interface->glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE; | |
363 | interface->glort_count = 0; | |
364 | ||
365 | /* nothing we can do until mask is allocated */ | |
366 | if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE) | |
367 | return; | |
368 | ||
883a9ccb AD |
369 | /* we support 3 possible GLORT configurations. |
370 | * 1: VFs consume all but the last 1 | |
371 | * 2: VFs and PF split glorts with possible gap between | |
372 | * 3: VFs allocated first 64, all others belong to PF | |
373 | */ | |
374 | if (mask <= hw->iov.total_vfs) { | |
375 | interface->glort_count = 1; | |
376 | interface->glort += mask; | |
377 | } else if (mask < 64) { | |
378 | interface->glort_count = (mask + 1) / 2; | |
379 | interface->glort += interface->glort_count; | |
380 | } else { | |
381 | interface->glort_count = mask - 63; | |
382 | interface->glort += 64; | |
383 | } | |
504c5eac AD |
384 | } |
385 | ||
76a540d4 | 386 | /** |
f92e0e48 | 387 | * fm10k_free_udp_port_info |
76a540d4 AD |
388 | * @interface: board private structure |
389 | * | |
1ad78292 | 390 | * This function frees both geneve_port and vxlan_port structures |
76a540d4 | 391 | **/ |
f92e0e48 | 392 | static void fm10k_free_udp_port_info(struct fm10k_intfc *interface) |
76a540d4 | 393 | { |
f92e0e48 JK |
394 | struct fm10k_udp_port *port; |
395 | ||
396 | /* flush all entries from vxlan list */ | |
397 | port = list_first_entry_or_null(&interface->vxlan_port, | |
398 | struct fm10k_udp_port, list); | |
399 | while (port) { | |
400 | list_del(&port->list); | |
401 | kfree(port); | |
402 | port = list_first_entry_or_null(&interface->vxlan_port, | |
403 | struct fm10k_udp_port, | |
404 | list); | |
76a540d4 | 405 | } |
1ad78292 JK |
406 | |
407 | /* flush all entries from geneve list */ | |
408 | port = list_first_entry_or_null(&interface->geneve_port, | |
409 | struct fm10k_udp_port, list); | |
410 | while (port) { | |
411 | list_del(&port->list); | |
412 | kfree(port); | |
413 | port = list_first_entry_or_null(&interface->vxlan_port, | |
414 | struct fm10k_udp_port, | |
415 | list); | |
416 | } | |
76a540d4 AD |
417 | } |
418 | ||
419 | /** | |
f92e0e48 | 420 | * fm10k_restore_udp_port_info |
76a540d4 AD |
421 | * @interface: board private structure |
422 | * | |
f92e0e48 | 423 | * This function restores the value in the tunnel_cfg register(s) after reset |
76a540d4 | 424 | **/ |
f92e0e48 | 425 | static void fm10k_restore_udp_port_info(struct fm10k_intfc *interface) |
76a540d4 AD |
426 | { |
427 | struct fm10k_hw *hw = &interface->hw; | |
f92e0e48 | 428 | struct fm10k_udp_port *port; |
76a540d4 AD |
429 | |
430 | /* only the PF supports configuring tunnels */ | |
431 | if (hw->mac.type != fm10k_mac_pf) | |
432 | return; | |
433 | ||
f92e0e48 JK |
434 | port = list_first_entry_or_null(&interface->vxlan_port, |
435 | struct fm10k_udp_port, list); | |
76a540d4 AD |
436 | |
437 | /* restore tunnel configuration register */ | |
438 | fm10k_write_reg(hw, FM10K_TUNNEL_CFG, | |
f92e0e48 | 439 | (port ? ntohs(port->port) : 0) | |
76a540d4 | 440 | (ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT)); |
1ad78292 JK |
441 | |
442 | port = list_first_entry_or_null(&interface->geneve_port, | |
443 | struct fm10k_udp_port, list); | |
444 | ||
445 | /* restore Geneve tunnel configuration register */ | |
446 | fm10k_write_reg(hw, FM10K_TUNNEL_CFG_GENEVE, | |
447 | (port ? ntohs(port->port) : 0)); | |
76a540d4 AD |
448 | } |
449 | ||
f92e0e48 JK |
450 | static struct fm10k_udp_port * |
451 | fm10k_remove_tunnel_port(struct list_head *ports, | |
452 | struct udp_tunnel_info *ti) | |
453 | { | |
454 | struct fm10k_udp_port *port; | |
455 | ||
456 | list_for_each_entry(port, ports, list) { | |
457 | if ((port->port == ti->port) && | |
458 | (port->sa_family == ti->sa_family)) { | |
459 | list_del(&port->list); | |
460 | return port; | |
461 | } | |
462 | } | |
463 | ||
464 | return NULL; | |
465 | } | |
466 | ||
467 | static void fm10k_insert_tunnel_port(struct list_head *ports, | |
468 | struct udp_tunnel_info *ti) | |
469 | { | |
470 | struct fm10k_udp_port *port; | |
471 | ||
472 | /* remove existing port entry from the list so that the newest items | |
473 | * are always at the tail of the list. | |
474 | */ | |
475 | port = fm10k_remove_tunnel_port(ports, ti); | |
476 | if (!port) { | |
477 | port = kmalloc(sizeof(*port), GFP_ATOMIC); | |
478 | if (!port) | |
479 | return; | |
480 | port->port = ti->port; | |
481 | port->sa_family = ti->sa_family; | |
482 | } | |
483 | ||
484 | list_add_tail(&port->list, ports); | |
485 | } | |
486 | ||
76a540d4 | 487 | /** |
f92e0e48 | 488 | * fm10k_udp_tunnel_add |
76a540d4 | 489 | * @netdev: network interface device structure |
e5de25dc | 490 | * @ti: Tunnel endpoint information |
76a540d4 | 491 | * |
f92e0e48 | 492 | * This function is called when a new UDP tunnel port has been added. |
1ad78292 JK |
493 | * Due to hardware restrictions, only one port per type can be offloaded at |
494 | * once. | |
76a540d4 | 495 | **/ |
f92e0e48 | 496 | static void fm10k_udp_tunnel_add(struct net_device *dev, |
f174cdbe AD |
497 | struct udp_tunnel_info *ti) |
498 | { | |
76a540d4 | 499 | struct fm10k_intfc *interface = netdev_priv(dev); |
76a540d4 AD |
500 | |
501 | /* only the PF supports configuring tunnels */ | |
502 | if (interface->hw.mac.type != fm10k_mac_pf) | |
503 | return; | |
504 | ||
f92e0e48 JK |
505 | switch (ti->type) { |
506 | case UDP_TUNNEL_TYPE_VXLAN: | |
507 | fm10k_insert_tunnel_port(&interface->vxlan_port, ti); | |
508 | break; | |
1ad78292 JK |
509 | case UDP_TUNNEL_TYPE_GENEVE: |
510 | fm10k_insert_tunnel_port(&interface->geneve_port, ti); | |
511 | break; | |
f92e0e48 | 512 | default: |
76a540d4 | 513 | return; |
f92e0e48 | 514 | } |
76a540d4 | 515 | |
f92e0e48 | 516 | fm10k_restore_udp_port_info(interface); |
76a540d4 AD |
517 | } |
518 | ||
519 | /** | |
f92e0e48 | 520 | * fm10k_udp_tunnel_del |
76a540d4 | 521 | * @netdev: network interface device structure |
e5de25dc | 522 | * @ti: Tunnel endpoint information |
76a540d4 | 523 | * |
f92e0e48 JK |
524 | * This function is called when a new UDP tunnel port is deleted. The freed |
525 | * port will be removed from the list, then we reprogram the offloaded port | |
526 | * based on the head of the list. | |
76a540d4 | 527 | **/ |
f92e0e48 | 528 | static void fm10k_udp_tunnel_del(struct net_device *dev, |
f174cdbe AD |
529 | struct udp_tunnel_info *ti) |
530 | { | |
76a540d4 | 531 | struct fm10k_intfc *interface = netdev_priv(dev); |
f92e0e48 | 532 | struct fm10k_udp_port *port = NULL; |
76a540d4 AD |
533 | |
534 | if (interface->hw.mac.type != fm10k_mac_pf) | |
535 | return; | |
536 | ||
f92e0e48 JK |
537 | switch (ti->type) { |
538 | case UDP_TUNNEL_TYPE_VXLAN: | |
539 | port = fm10k_remove_tunnel_port(&interface->vxlan_port, ti); | |
540 | break; | |
1ad78292 JK |
541 | case UDP_TUNNEL_TYPE_GENEVE: |
542 | port = fm10k_remove_tunnel_port(&interface->geneve_port, ti); | |
543 | break; | |
f92e0e48 JK |
544 | default: |
545 | return; | |
76a540d4 AD |
546 | } |
547 | ||
f92e0e48 JK |
548 | /* if we did remove a port we need to free its memory */ |
549 | kfree(port); | |
550 | ||
551 | fm10k_restore_udp_port_info(interface); | |
76a540d4 AD |
552 | } |
553 | ||
504c5eac AD |
554 | /** |
555 | * fm10k_open - Called when a network interface is made active | |
556 | * @netdev: network interface device structure | |
557 | * | |
558 | * Returns 0 on success, negative value on failure | |
559 | * | |
560 | * The open entry point is called when a network interface is made | |
561 | * active by the system (IFF_UP). At this point all resources needed | |
562 | * for transmit and receive operations are allocated, the interrupt | |
563 | * handler is registered with the OS, the watchdog timer is started, | |
564 | * and the stack is notified that the interface is ready. | |
565 | **/ | |
566 | int fm10k_open(struct net_device *netdev) | |
567 | { | |
568 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
18283cad AD |
569 | int err; |
570 | ||
3abaae42 AD |
571 | /* allocate transmit descriptors */ |
572 | err = fm10k_setup_all_tx_resources(interface); | |
573 | if (err) | |
574 | goto err_setup_tx; | |
575 | ||
576 | /* allocate receive descriptors */ | |
577 | err = fm10k_setup_all_rx_resources(interface); | |
578 | if (err) | |
579 | goto err_setup_rx; | |
580 | ||
18283cad AD |
581 | /* allocate interrupt resources */ |
582 | err = fm10k_qv_request_irq(interface); | |
583 | if (err) | |
584 | goto err_req_irq; | |
504c5eac AD |
585 | |
586 | /* setup GLORT assignment for this port */ | |
587 | fm10k_request_glort_range(interface); | |
588 | ||
e27ef599 | 589 | /* Notify the stack of the actual queue counts */ |
c9d49940 AD |
590 | err = netif_set_real_num_tx_queues(netdev, |
591 | interface->num_tx_queues); | |
592 | if (err) | |
593 | goto err_set_queues; | |
e27ef599 AD |
594 | |
595 | err = netif_set_real_num_rx_queues(netdev, | |
596 | interface->num_rx_queues); | |
597 | if (err) | |
598 | goto err_set_queues; | |
599 | ||
f174cdbe | 600 | udp_tunnel_get_rx_info(netdev); |
0d722ec8 | 601 | |
504c5eac AD |
602 | fm10k_up(interface); |
603 | ||
604 | return 0; | |
18283cad | 605 | |
e27ef599 AD |
606 | err_set_queues: |
607 | fm10k_qv_free_irq(interface); | |
18283cad | 608 | err_req_irq: |
3abaae42 AD |
609 | fm10k_free_all_rx_resources(interface); |
610 | err_setup_rx: | |
611 | fm10k_free_all_tx_resources(interface); | |
612 | err_setup_tx: | |
18283cad | 613 | return err; |
504c5eac AD |
614 | } |
615 | ||
616 | /** | |
617 | * fm10k_close - Disables a network interface | |
618 | * @netdev: network interface device structure | |
619 | * | |
620 | * Returns 0, this is not allowed to fail | |
621 | * | |
622 | * The close entry point is called when an interface is de-activated | |
623 | * by the OS. The hardware is still under the drivers control, but | |
624 | * needs to be disabled. A global MAC reset is issued to stop the | |
625 | * hardware, and all transmit and receive resources are freed. | |
626 | **/ | |
627 | int fm10k_close(struct net_device *netdev) | |
628 | { | |
629 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
630 | ||
631 | fm10k_down(interface); | |
632 | ||
18283cad AD |
633 | fm10k_qv_free_irq(interface); |
634 | ||
f92e0e48 | 635 | fm10k_free_udp_port_info(interface); |
76a540d4 | 636 | |
3abaae42 AD |
637 | fm10k_free_all_tx_resources(interface); |
638 | fm10k_free_all_rx_resources(interface); | |
639 | ||
504c5eac AD |
640 | return 0; |
641 | } | |
642 | ||
0e7b3644 AD |
643 | static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) |
644 | { | |
b101c962 | 645 | struct fm10k_intfc *interface = netdev_priv(dev); |
c9d49940 | 646 | unsigned int r_idx = skb->queue_mapping; |
b101c962 AD |
647 | int err; |
648 | ||
a4fcad65 | 649 | if ((skb->protocol == htons(ETH_P_8021Q)) && |
df8a39de | 650 | !skb_vlan_tag_present(skb)) { |
b101c962 AD |
651 | /* FM10K only supports hardware tagging, any tags in frame |
652 | * are considered 2nd level or "outer" tags | |
653 | */ | |
654 | struct vlan_hdr *vhdr; | |
655 | __be16 proto; | |
656 | ||
657 | /* make sure skb is not shared */ | |
658 | skb = skb_share_check(skb, GFP_ATOMIC); | |
659 | if (!skb) | |
660 | return NETDEV_TX_OK; | |
661 | ||
662 | /* make sure there is enough room to move the ethernet header */ | |
663 | if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) | |
664 | return NETDEV_TX_OK; | |
665 | ||
666 | /* verify the skb head is not shared */ | |
667 | err = skb_cow_head(skb, 0); | |
6f97532e | 668 | if (err) { |
669 | dev_kfree_skb(skb); | |
b101c962 | 670 | return NETDEV_TX_OK; |
6f97532e | 671 | } |
b101c962 | 672 | |
aa502b4a | 673 | /* locate VLAN header */ |
b101c962 AD |
674 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); |
675 | ||
676 | /* pull the 2 key pieces of data out of it */ | |
677 | __vlan_hwaccel_put_tag(skb, | |
678 | htons(ETH_P_8021Q), | |
679 | ntohs(vhdr->h_vlan_TCI)); | |
680 | proto = vhdr->h_vlan_encapsulated_proto; | |
681 | skb->protocol = (ntohs(proto) >= 1536) ? proto : | |
682 | htons(ETH_P_802_2); | |
683 | ||
684 | /* squash it by moving the ethernet addresses up 4 bytes */ | |
685 | memmove(skb->data + VLAN_HLEN, skb->data, 12); | |
686 | __skb_pull(skb, VLAN_HLEN); | |
687 | skb_reset_mac_header(skb); | |
688 | } | |
689 | ||
690 | /* The minimum packet size for a single buffer is 17B so pad the skb | |
691 | * in order to meet this minimum size requirement. | |
692 | */ | |
693 | if (unlikely(skb->len < 17)) { | |
694 | int pad_len = 17 - skb->len; | |
695 | ||
696 | if (skb_pad(skb, pad_len)) | |
697 | return NETDEV_TX_OK; | |
698 | __skb_put(skb, pad_len); | |
699 | } | |
700 | ||
701 | if (r_idx >= interface->num_tx_queues) | |
702 | r_idx %= interface->num_tx_queues; | |
703 | ||
704 | err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]); | |
705 | ||
706 | return err; | |
0e7b3644 AD |
707 | } |
708 | ||
709 | static int fm10k_change_mtu(struct net_device *dev, int new_mtu) | |
710 | { | |
711 | if (new_mtu < 68 || new_mtu > FM10K_MAX_JUMBO_FRAME_SIZE) | |
712 | return -EINVAL; | |
713 | ||
714 | dev->mtu = new_mtu; | |
715 | ||
716 | return 0; | |
717 | } | |
718 | ||
b101c962 AD |
719 | /** |
720 | * fm10k_tx_timeout - Respond to a Tx Hang | |
721 | * @netdev: network interface device structure | |
722 | **/ | |
723 | static void fm10k_tx_timeout(struct net_device *netdev) | |
724 | { | |
725 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
726 | bool real_tx_hang = false; | |
727 | int i; | |
728 | ||
729 | #define TX_TIMEO_LIMIT 16000 | |
730 | for (i = 0; i < interface->num_tx_queues; i++) { | |
731 | struct fm10k_ring *tx_ring = interface->tx_ring[i]; | |
732 | ||
733 | if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) | |
734 | real_tx_hang = true; | |
735 | } | |
736 | ||
737 | if (real_tx_hang) { | |
738 | fm10k_tx_timeout_reset(interface); | |
739 | } else { | |
740 | netif_info(interface, drv, netdev, | |
741 | "Fake Tx hang detected with timeout of %d seconds\n", | |
a4fcad65 | 742 | netdev->watchdog_timeo / HZ); |
b101c962 AD |
743 | |
744 | /* fake Tx hang - increase the kernel timeout */ | |
745 | if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) | |
746 | netdev->watchdog_timeo *= 2; | |
747 | } | |
748 | } | |
749 | ||
8f5e20d4 AD |
750 | static int fm10k_uc_vlan_unsync(struct net_device *netdev, |
751 | const unsigned char *uc_addr) | |
752 | { | |
753 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
754 | struct fm10k_hw *hw = &interface->hw; | |
755 | u16 glort = interface->glort; | |
756 | u16 vid = interface->vid; | |
757 | bool set = !!(vid / VLAN_N_VID); | |
758 | int err; | |
759 | ||
760 | /* drop any leading bits on the VLAN ID */ | |
761 | vid &= VLAN_N_VID - 1; | |
762 | ||
763 | err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr, vid, set, 0); | |
764 | if (err) | |
765 | return err; | |
766 | ||
767 | /* return non-zero value as we are only doing a partial sync/unsync */ | |
768 | return 1; | |
769 | } | |
770 | ||
771 | static int fm10k_mc_vlan_unsync(struct net_device *netdev, | |
772 | const unsigned char *mc_addr) | |
773 | { | |
774 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
775 | struct fm10k_hw *hw = &interface->hw; | |
776 | u16 glort = interface->glort; | |
777 | u16 vid = interface->vid; | |
778 | bool set = !!(vid / VLAN_N_VID); | |
779 | int err; | |
780 | ||
781 | /* drop any leading bits on the VLAN ID */ | |
782 | vid &= VLAN_N_VID - 1; | |
783 | ||
784 | err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set); | |
785 | if (err) | |
786 | return err; | |
787 | ||
788 | /* return non-zero value as we are only doing a partial sync/unsync */ | |
789 | return 1; | |
790 | } | |
791 | ||
792 | static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) | |
793 | { | |
794 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
795 | struct fm10k_hw *hw = &interface->hw; | |
796 | s32 err; | |
e71c9318 | 797 | int i; |
8f5e20d4 AD |
798 | |
799 | /* updates do not apply to VLAN 0 */ | |
800 | if (!vid) | |
801 | return 0; | |
802 | ||
803 | if (vid >= VLAN_N_VID) | |
804 | return -EINVAL; | |
805 | ||
806 | /* Verify we have permission to add VLANs */ | |
807 | if (hw->mac.vlan_override) | |
808 | return -EACCES; | |
809 | ||
8f5e20d4 AD |
810 | /* update active_vlans bitmask */ |
811 | set_bit(vid, interface->active_vlans); | |
812 | if (!set) | |
813 | clear_bit(vid, interface->active_vlans); | |
814 | ||
aa502b4a | 815 | /* disable the default VLAN ID on ring if we have an active VLAN */ |
e71c9318 JK |
816 | for (i = 0; i < interface->num_rx_queues; i++) { |
817 | struct fm10k_ring *rx_ring = interface->rx_ring[i]; | |
818 | u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1); | |
819 | ||
820 | if (test_bit(rx_vid, interface->active_vlans)) | |
821 | rx_ring->vid |= FM10K_VLAN_CLEAR; | |
822 | else | |
823 | rx_ring->vid &= ~FM10K_VLAN_CLEAR; | |
824 | } | |
825 | ||
3d02b3df BA |
826 | /* Do not remove default VLAN ID related entries from VLAN and MAC |
827 | * tables | |
828 | */ | |
56f0569e | 829 | if (!set && vid == hw->mac.default_vid) |
661b2067 JK |
830 | return 0; |
831 | ||
3f0bdb2e JK |
832 | /* Do not throw an error if the interface is down. We will sync once |
833 | * we come up | |
834 | */ | |
835 | if (test_bit(__FM10K_DOWN, &interface->state)) | |
836 | return 0; | |
837 | ||
8f5e20d4 AD |
838 | fm10k_mbx_lock(interface); |
839 | ||
eca32047 | 840 | /* only need to update the VLAN if not in promiscuous mode */ |
8f5e20d4 AD |
841 | if (!(netdev->flags & IFF_PROMISC)) { |
842 | err = hw->mac.ops.update_vlan(hw, vid, 0, set); | |
843 | if (err) | |
13cb2dad | 844 | goto err_out; |
8f5e20d4 AD |
845 | } |
846 | ||
847 | /* update our base MAC address */ | |
848 | err = hw->mac.ops.update_uc_addr(hw, interface->glort, hw->mac.addr, | |
849 | vid, set, 0); | |
850 | if (err) | |
13cb2dad | 851 | goto err_out; |
8f5e20d4 | 852 | |
aa502b4a | 853 | /* set VLAN ID prior to syncing/unsyncing the VLAN */ |
8f5e20d4 AD |
854 | interface->vid = vid + (set ? VLAN_N_VID : 0); |
855 | ||
856 | /* Update the unicast and multicast address list to add/drop VLAN */ | |
857 | __dev_uc_unsync(netdev, fm10k_uc_vlan_unsync); | |
858 | __dev_mc_unsync(netdev, fm10k_mc_vlan_unsync); | |
859 | ||
13cb2dad | 860 | err_out: |
8f5e20d4 AD |
861 | fm10k_mbx_unlock(interface); |
862 | ||
13cb2dad | 863 | return err; |
8f5e20d4 AD |
864 | } |
865 | ||
866 | static int fm10k_vlan_rx_add_vid(struct net_device *netdev, | |
867 | __always_unused __be16 proto, u16 vid) | |
868 | { | |
869 | /* update VLAN and address table based on changes */ | |
870 | return fm10k_update_vid(netdev, vid, true); | |
871 | } | |
872 | ||
873 | static int fm10k_vlan_rx_kill_vid(struct net_device *netdev, | |
874 | __always_unused __be16 proto, u16 vid) | |
875 | { | |
876 | /* update VLAN and address table based on changes */ | |
877 | return fm10k_update_vid(netdev, vid, false); | |
878 | } | |
879 | ||
880 | static u16 fm10k_find_next_vlan(struct fm10k_intfc *interface, u16 vid) | |
881 | { | |
882 | struct fm10k_hw *hw = &interface->hw; | |
883 | u16 default_vid = hw->mac.default_vid; | |
884 | u16 vid_limit = vid < default_vid ? default_vid : VLAN_N_VID; | |
885 | ||
886 | vid = find_next_bit(interface->active_vlans, vid_limit, ++vid); | |
887 | ||
888 | return vid; | |
889 | } | |
890 | ||
891 | static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface) | |
892 | { | |
893 | struct fm10k_hw *hw = &interface->hw; | |
894 | u32 vid, prev_vid; | |
895 | ||
896 | /* loop through and find any gaps in the table */ | |
897 | for (vid = 0, prev_vid = 0; | |
898 | prev_vid < VLAN_N_VID; | |
899 | prev_vid = vid + 1, vid = fm10k_find_next_vlan(interface, vid)) { | |
900 | if (prev_vid == vid) | |
901 | continue; | |
902 | ||
903 | /* send request to clear multiple bits at a time */ | |
904 | prev_vid += (vid - prev_vid - 1) << FM10K_VLAN_LENGTH_SHIFT; | |
905 | hw->mac.ops.update_vlan(hw, prev_vid, 0, false); | |
906 | } | |
907 | } | |
908 | ||
909 | static int __fm10k_uc_sync(struct net_device *dev, | |
910 | const unsigned char *addr, bool sync) | |
911 | { | |
912 | struct fm10k_intfc *interface = netdev_priv(dev); | |
913 | struct fm10k_hw *hw = &interface->hw; | |
914 | u16 vid, glort = interface->glort; | |
915 | s32 err; | |
916 | ||
917 | if (!is_valid_ether_addr(addr)) | |
918 | return -EADDRNOTAVAIL; | |
919 | ||
920 | /* update table with current entries */ | |
8998763a | 921 | for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; |
8f5e20d4 AD |
922 | vid < VLAN_N_VID; |
923 | vid = fm10k_find_next_vlan(interface, vid)) { | |
924 | err = hw->mac.ops.update_uc_addr(hw, glort, addr, | |
925 | vid, sync, 0); | |
926 | if (err) | |
927 | return err; | |
928 | } | |
929 | ||
930 | return 0; | |
931 | } | |
932 | ||
933 | static int fm10k_uc_sync(struct net_device *dev, | |
934 | const unsigned char *addr) | |
935 | { | |
936 | return __fm10k_uc_sync(dev, addr, true); | |
937 | } | |
938 | ||
939 | static int fm10k_uc_unsync(struct net_device *dev, | |
940 | const unsigned char *addr) | |
941 | { | |
942 | return __fm10k_uc_sync(dev, addr, false); | |
943 | } | |
944 | ||
0e7b3644 AD |
945 | static int fm10k_set_mac(struct net_device *dev, void *p) |
946 | { | |
8f5e20d4 AD |
947 | struct fm10k_intfc *interface = netdev_priv(dev); |
948 | struct fm10k_hw *hw = &interface->hw; | |
0e7b3644 AD |
949 | struct sockaddr *addr = p; |
950 | s32 err = 0; | |
951 | ||
952 | if (!is_valid_ether_addr(addr->sa_data)) | |
953 | return -EADDRNOTAVAIL; | |
954 | ||
8f5e20d4 AD |
955 | if (dev->flags & IFF_UP) { |
956 | /* setting MAC address requires mailbox */ | |
957 | fm10k_mbx_lock(interface); | |
958 | ||
959 | err = fm10k_uc_sync(dev, addr->sa_data); | |
960 | if (!err) | |
961 | fm10k_uc_unsync(dev, hw->mac.addr); | |
962 | ||
963 | fm10k_mbx_unlock(interface); | |
964 | } | |
965 | ||
0e7b3644 AD |
966 | if (!err) { |
967 | ether_addr_copy(dev->dev_addr, addr->sa_data); | |
8f5e20d4 | 968 | ether_addr_copy(hw->mac.addr, addr->sa_data); |
0e7b3644 AD |
969 | dev->addr_assign_type &= ~NET_ADDR_RANDOM; |
970 | } | |
971 | ||
8f5e20d4 AD |
972 | /* if we had a mailbox error suggest trying again */ |
973 | return err ? -EAGAIN : 0; | |
974 | } | |
975 | ||
976 | static int __fm10k_mc_sync(struct net_device *dev, | |
977 | const unsigned char *addr, bool sync) | |
978 | { | |
979 | struct fm10k_intfc *interface = netdev_priv(dev); | |
980 | struct fm10k_hw *hw = &interface->hw; | |
981 | u16 vid, glort = interface->glort; | |
8f5e20d4 AD |
982 | |
983 | /* update table with current entries */ | |
8998763a | 984 | for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; |
8f5e20d4 AD |
985 | vid < VLAN_N_VID; |
986 | vid = fm10k_find_next_vlan(interface, vid)) { | |
745136a8 | 987 | hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync); |
8f5e20d4 AD |
988 | } |
989 | ||
990 | return 0; | |
991 | } | |
992 | ||
993 | static int fm10k_mc_sync(struct net_device *dev, | |
994 | const unsigned char *addr) | |
995 | { | |
996 | return __fm10k_mc_sync(dev, addr, true); | |
997 | } | |
998 | ||
999 | static int fm10k_mc_unsync(struct net_device *dev, | |
1000 | const unsigned char *addr) | |
1001 | { | |
1002 | return __fm10k_mc_sync(dev, addr, false); | |
0e7b3644 AD |
1003 | } |
1004 | ||
1005 | static void fm10k_set_rx_mode(struct net_device *dev) | |
1006 | { | |
8f5e20d4 AD |
1007 | struct fm10k_intfc *interface = netdev_priv(dev); |
1008 | struct fm10k_hw *hw = &interface->hw; | |
1009 | int xcast_mode; | |
1010 | ||
1011 | /* no need to update the harwdare if we are not running */ | |
1012 | if (!(dev->flags & IFF_UP)) | |
1013 | return; | |
1014 | ||
1015 | /* determine new mode based on flags */ | |
1016 | xcast_mode = (dev->flags & IFF_PROMISC) ? FM10K_XCAST_MODE_PROMISC : | |
1017 | (dev->flags & IFF_ALLMULTI) ? FM10K_XCAST_MODE_ALLMULTI : | |
1018 | (dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ? | |
1019 | FM10K_XCAST_MODE_MULTI : FM10K_XCAST_MODE_NONE; | |
1020 | ||
1021 | fm10k_mbx_lock(interface); | |
1022 | ||
a7731cc8 | 1023 | /* update xcast mode first, but only if it changed */ |
8f5e20d4 AD |
1024 | if (interface->xcast_mode != xcast_mode) { |
1025 | /* update VLAN table */ | |
1026 | if (xcast_mode == FM10K_XCAST_MODE_PROMISC) | |
1027 | hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, true); | |
1028 | if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC) | |
1029 | fm10k_clear_unused_vlans(interface); | |
1030 | ||
1031 | /* update xcast mode */ | |
1032 | hw->mac.ops.update_xcast_mode(hw, interface->glort, xcast_mode); | |
1033 | ||
1034 | /* record updated xcast mode state */ | |
1035 | interface->xcast_mode = xcast_mode; | |
1036 | } | |
1037 | ||
a7731cc8 | 1038 | /* synchronize all of the addresses */ |
8998763a NMK |
1039 | __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); |
1040 | __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync); | |
a7731cc8 | 1041 | |
8f5e20d4 AD |
1042 | fm10k_mbx_unlock(interface); |
1043 | } | |
1044 | ||
1045 | void fm10k_restore_rx_state(struct fm10k_intfc *interface) | |
1046 | { | |
1047 | struct net_device *netdev = interface->netdev; | |
1048 | struct fm10k_hw *hw = &interface->hw; | |
1049 | int xcast_mode; | |
1050 | u16 vid, glort; | |
1051 | ||
1052 | /* record glort for this interface */ | |
1053 | glort = interface->glort; | |
1054 | ||
1055 | /* convert interface flags to xcast mode */ | |
1056 | if (netdev->flags & IFF_PROMISC) | |
1057 | xcast_mode = FM10K_XCAST_MODE_PROMISC; | |
1058 | else if (netdev->flags & IFF_ALLMULTI) | |
1059 | xcast_mode = FM10K_XCAST_MODE_ALLMULTI; | |
1060 | else if (netdev->flags & (IFF_BROADCAST | IFF_MULTICAST)) | |
1061 | xcast_mode = FM10K_XCAST_MODE_MULTI; | |
1062 | else | |
1063 | xcast_mode = FM10K_XCAST_MODE_NONE; | |
1064 | ||
1065 | fm10k_mbx_lock(interface); | |
1066 | ||
1067 | /* Enable logical port */ | |
1068 | hw->mac.ops.update_lport_state(hw, glort, interface->glort_count, true); | |
1069 | ||
1070 | /* update VLAN table */ | |
1071 | hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, | |
1072 | xcast_mode == FM10K_XCAST_MODE_PROMISC); | |
1073 | ||
1074 | /* Add filter for VLAN 0 */ | |
1075 | hw->mac.ops.update_vlan(hw, 0, 0, true); | |
1076 | ||
1077 | /* update table with current entries */ | |
8998763a | 1078 | for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; |
8f5e20d4 AD |
1079 | vid < VLAN_N_VID; |
1080 | vid = fm10k_find_next_vlan(interface, vid)) { | |
1081 | hw->mac.ops.update_vlan(hw, vid, 0, true); | |
1082 | hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr, | |
1083 | vid, true, 0); | |
1084 | } | |
1085 | ||
5c2d642f | 1086 | /* update xcast mode before synchronizing addresses */ |
a7731cc8 JK |
1087 | hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); |
1088 | ||
eca32047 | 1089 | /* synchronize all of the addresses */ |
8998763a NMK |
1090 | __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); |
1091 | __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync); | |
8f5e20d4 | 1092 | |
8f5e20d4 AD |
1093 | fm10k_mbx_unlock(interface); |
1094 | ||
1095 | /* record updated xcast mode state */ | |
1096 | interface->xcast_mode = xcast_mode; | |
76a540d4 AD |
1097 | |
1098 | /* Restore tunnel configuration */ | |
f92e0e48 | 1099 | fm10k_restore_udp_port_info(interface); |
8f5e20d4 AD |
1100 | } |
1101 | ||
1102 | void fm10k_reset_rx_state(struct fm10k_intfc *interface) | |
1103 | { | |
1104 | struct net_device *netdev = interface->netdev; | |
1105 | struct fm10k_hw *hw = &interface->hw; | |
1106 | ||
1107 | fm10k_mbx_lock(interface); | |
1108 | ||
1109 | /* clear the logical port state on lower device */ | |
1110 | hw->mac.ops.update_lport_state(hw, interface->glort, | |
1111 | interface->glort_count, false); | |
1112 | ||
1113 | fm10k_mbx_unlock(interface); | |
1114 | ||
1115 | /* reset flags to default state */ | |
1116 | interface->xcast_mode = FM10K_XCAST_MODE_NONE; | |
1117 | ||
1118 | /* clear the sync flag since the lport has been dropped */ | |
1119 | __dev_uc_unsync(netdev, NULL); | |
1120 | __dev_mc_unsync(netdev, NULL); | |
0e7b3644 AD |
1121 | } |
1122 | ||
e27ef599 AD |
1123 | /** |
1124 | * fm10k_get_stats64 - Get System Network Statistics | |
1125 | * @netdev: network interface device structure | |
1126 | * @stats: storage space for 64bit statistics | |
1127 | * | |
1128 | * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This | |
1129 | * function replaces fm10k_get_stats for kernels which support it. | |
1130 | */ | |
1131 | static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, | |
1132 | struct rtnl_link_stats64 *stats) | |
1133 | { | |
1134 | struct fm10k_intfc *interface = netdev_priv(netdev); | |
1135 | struct fm10k_ring *ring; | |
1136 | unsigned int start, i; | |
1137 | u64 bytes, packets; | |
1138 | ||
1139 | rcu_read_lock(); | |
1140 | ||
1141 | for (i = 0; i < interface->num_rx_queues; i++) { | |
ce4dad2c | 1142 | ring = READ_ONCE(interface->rx_ring[i]); |
e27ef599 AD |
1143 | |
1144 | if (!ring) | |
1145 | continue; | |
1146 | ||
1147 | do { | |
1148 | start = u64_stats_fetch_begin_irq(&ring->syncp); | |
1149 | packets = ring->stats.packets; | |
1150 | bytes = ring->stats.bytes; | |
1151 | } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
1152 | ||
1153 | stats->rx_packets += packets; | |
1154 | stats->rx_bytes += bytes; | |
1155 | } | |
1156 | ||
1157 | for (i = 0; i < interface->num_tx_queues; i++) { | |
ce4dad2c | 1158 | ring = READ_ONCE(interface->tx_ring[i]); |
e27ef599 AD |
1159 | |
1160 | if (!ring) | |
1161 | continue; | |
1162 | ||
1163 | do { | |
1164 | start = u64_stats_fetch_begin_irq(&ring->syncp); | |
1165 | packets = ring->stats.packets; | |
1166 | bytes = ring->stats.bytes; | |
1167 | } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
1168 | ||
1169 | stats->tx_packets += packets; | |
1170 | stats->tx_bytes += bytes; | |
1171 | } | |
1172 | ||
1173 | rcu_read_unlock(); | |
1174 | ||
1175 | /* following stats updated by fm10k_service_task() */ | |
1176 | stats->rx_missed_errors = netdev->stats.rx_missed_errors; | |
1177 | ||
1178 | return stats; | |
1179 | } | |
1180 | ||
aa3ac822 AD |
1181 | int fm10k_setup_tc(struct net_device *dev, u8 tc) |
1182 | { | |
1183 | struct fm10k_intfc *interface = netdev_priv(dev); | |
09f8a82b | 1184 | int err; |
aa3ac822 AD |
1185 | |
1186 | /* Currently only the PF supports priority classes */ | |
1187 | if (tc && (interface->hw.mac.type != fm10k_mac_pf)) | |
1188 | return -EINVAL; | |
1189 | ||
1190 | /* Hardware supports up to 8 traffic classes */ | |
1191 | if (tc > 8) | |
1192 | return -EINVAL; | |
1193 | ||
1194 | /* Hardware has to reinitialize queues to match packet | |
1195 | * buffer alignment. Unfortunately, the hardware is not | |
1196 | * flexible enough to do this dynamically. | |
1197 | */ | |
1198 | if (netif_running(dev)) | |
1199 | fm10k_close(dev); | |
1200 | ||
1201 | fm10k_mbx_free_irq(interface); | |
1202 | ||
1203 | fm10k_clear_queueing_scheme(interface); | |
1204 | ||
1205 | /* we expect the prio_tc map to be repopulated later */ | |
1206 | netdev_reset_tc(dev); | |
1207 | netdev_set_num_tc(dev, tc); | |
1208 | ||
09f8a82b AD |
1209 | err = fm10k_init_queueing_scheme(interface); |
1210 | if (err) | |
1211 | goto err_queueing_scheme; | |
aa3ac822 | 1212 | |
09f8a82b AD |
1213 | err = fm10k_mbx_request_irq(interface); |
1214 | if (err) | |
1215 | goto err_mbx_irq; | |
aa3ac822 | 1216 | |
09f8a82b AD |
1217 | err = netif_running(dev) ? fm10k_open(dev) : 0; |
1218 | if (err) | |
1219 | goto err_open; | |
aa3ac822 AD |
1220 | |
1221 | /* flag to indicate SWPRI has yet to be updated */ | |
1222 | interface->flags |= FM10K_FLAG_SWPRI_CONFIG; | |
1223 | ||
1224 | return 0; | |
09f8a82b AD |
1225 | err_open: |
1226 | fm10k_mbx_free_irq(interface); | |
1227 | err_mbx_irq: | |
1228 | fm10k_clear_queueing_scheme(interface); | |
1229 | err_queueing_scheme: | |
1230 | netif_device_detach(dev); | |
1231 | ||
1232 | return err; | |
aa3ac822 AD |
1233 | } |
1234 | ||
16e5cc64 JF |
1235 | static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto, |
1236 | struct tc_to_netdev *tc) | |
e4c6734e | 1237 | { |
5eb4dce3 | 1238 | if (tc->type != TC_SETUP_MQPRIO) |
e4c6734e JF |
1239 | return -EINVAL; |
1240 | ||
16e5cc64 | 1241 | return fm10k_setup_tc(dev, tc->tc); |
e4c6734e JF |
1242 | } |
1243 | ||
5cd5e2e9 AD |
1244 | static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, |
1245 | struct fm10k_l2_accel *l2_accel) | |
1246 | { | |
1247 | struct fm10k_ring *ring; | |
1248 | int i; | |
1249 | ||
1250 | for (i = 0; i < interface->num_rx_queues; i++) { | |
1251 | ring = interface->rx_ring[i]; | |
1252 | rcu_assign_pointer(ring->l2_accel, l2_accel); | |
1253 | } | |
1254 | ||
1255 | interface->l2_accel = l2_accel; | |
1256 | } | |
1257 | ||
1258 | static void *fm10k_dfwd_add_station(struct net_device *dev, | |
1259 | struct net_device *sdev) | |
1260 | { | |
1261 | struct fm10k_intfc *interface = netdev_priv(dev); | |
1262 | struct fm10k_l2_accel *l2_accel = interface->l2_accel; | |
1263 | struct fm10k_l2_accel *old_l2_accel = NULL; | |
1264 | struct fm10k_dglort_cfg dglort = { 0 }; | |
1265 | struct fm10k_hw *hw = &interface->hw; | |
1266 | int size = 0, i; | |
1267 | u16 glort; | |
1268 | ||
1269 | /* allocate l2 accel structure if it is not available */ | |
1270 | if (!l2_accel) { | |
1271 | /* verify there is enough free GLORTs to support l2_accel */ | |
1272 | if (interface->glort_count < 7) | |
1273 | return ERR_PTR(-EBUSY); | |
1274 | ||
1275 | size = offsetof(struct fm10k_l2_accel, macvlan[7]); | |
1276 | l2_accel = kzalloc(size, GFP_KERNEL); | |
1277 | if (!l2_accel) | |
1278 | return ERR_PTR(-ENOMEM); | |
1279 | ||
1280 | l2_accel->size = 7; | |
1281 | l2_accel->dglort = interface->glort; | |
1282 | ||
1283 | /* update pointers */ | |
1284 | fm10k_assign_l2_accel(interface, l2_accel); | |
1285 | /* do not expand if we are at our limit */ | |
1286 | } else if ((l2_accel->count == FM10K_MAX_STATIONS) || | |
1287 | (l2_accel->count == (interface->glort_count - 1))) { | |
1288 | return ERR_PTR(-EBUSY); | |
1289 | /* expand if we have hit the size limit */ | |
1290 | } else if (l2_accel->count == l2_accel->size) { | |
1291 | old_l2_accel = l2_accel; | |
1292 | size = offsetof(struct fm10k_l2_accel, | |
1293 | macvlan[(l2_accel->size * 2) + 1]); | |
1294 | l2_accel = kzalloc(size, GFP_KERNEL); | |
1295 | if (!l2_accel) | |
1296 | return ERR_PTR(-ENOMEM); | |
1297 | ||
1298 | memcpy(l2_accel, old_l2_accel, | |
1299 | offsetof(struct fm10k_l2_accel, | |
1300 | macvlan[old_l2_accel->size])); | |
1301 | ||
1302 | l2_accel->size = (old_l2_accel->size * 2) + 1; | |
1303 | ||
1304 | /* update pointers */ | |
1305 | fm10k_assign_l2_accel(interface, l2_accel); | |
1306 | kfree_rcu(old_l2_accel, rcu); | |
1307 | } | |
1308 | ||
1309 | /* add macvlan to accel table, and record GLORT for position */ | |
1310 | for (i = 0; i < l2_accel->size; i++) { | |
1311 | if (!l2_accel->macvlan[i]) | |
1312 | break; | |
1313 | } | |
1314 | ||
1315 | /* record station */ | |
1316 | l2_accel->macvlan[i] = sdev; | |
1317 | l2_accel->count++; | |
1318 | ||
1319 | /* configure default DGLORT mapping for RSS/DCB */ | |
1320 | dglort.idx = fm10k_dglort_pf_rss; | |
1321 | dglort.inner_rss = 1; | |
1322 | dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); | |
1323 | dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); | |
1324 | dglort.glort = interface->glort; | |
1325 | dglort.shared_l = fls(l2_accel->size); | |
1326 | hw->mac.ops.configure_dglort_map(hw, &dglort); | |
1327 | ||
1328 | /* Add rules for this specific dglort to the switch */ | |
1329 | fm10k_mbx_lock(interface); | |
1330 | ||
1331 | glort = l2_accel->dglort + 1 + i; | |
1332 | hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_MULTI); | |
1333 | hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, true, 0); | |
1334 | ||
1335 | fm10k_mbx_unlock(interface); | |
1336 | ||
1337 | return sdev; | |
1338 | } | |
1339 | ||
1340 | static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) | |
1341 | { | |
1342 | struct fm10k_intfc *interface = netdev_priv(dev); | |
ce4dad2c | 1343 | struct fm10k_l2_accel *l2_accel = READ_ONCE(interface->l2_accel); |
5cd5e2e9 AD |
1344 | struct fm10k_dglort_cfg dglort = { 0 }; |
1345 | struct fm10k_hw *hw = &interface->hw; | |
1346 | struct net_device *sdev = priv; | |
1347 | int i; | |
1348 | u16 glort; | |
1349 | ||
1350 | if (!l2_accel) | |
1351 | return; | |
1352 | ||
1353 | /* search table for matching interface */ | |
1354 | for (i = 0; i < l2_accel->size; i++) { | |
1355 | if (l2_accel->macvlan[i] == sdev) | |
1356 | break; | |
1357 | } | |
1358 | ||
1359 | /* exit if macvlan not found */ | |
1360 | if (i == l2_accel->size) | |
1361 | return; | |
1362 | ||
1363 | /* Remove any rules specific to this dglort */ | |
1364 | fm10k_mbx_lock(interface); | |
1365 | ||
1366 | glort = l2_accel->dglort + 1 + i; | |
1367 | hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_NONE); | |
1368 | hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, false, 0); | |
1369 | ||
1370 | fm10k_mbx_unlock(interface); | |
1371 | ||
1372 | /* record removal */ | |
1373 | l2_accel->macvlan[i] = NULL; | |
1374 | l2_accel->count--; | |
1375 | ||
1376 | /* configure default DGLORT mapping for RSS/DCB */ | |
1377 | dglort.idx = fm10k_dglort_pf_rss; | |
1378 | dglort.inner_rss = 1; | |
1379 | dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); | |
1380 | dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); | |
1381 | dglort.glort = interface->glort; | |
f1f3322e | 1382 | dglort.shared_l = fls(l2_accel->size); |
5cd5e2e9 AD |
1383 | hw->mac.ops.configure_dglort_map(hw, &dglort); |
1384 | ||
1385 | /* If table is empty remove it */ | |
1386 | if (l2_accel->count == 0) { | |
1387 | fm10k_assign_l2_accel(interface, NULL); | |
1388 | kfree_rcu(l2_accel, rcu); | |
1389 | } | |
1390 | } | |
1391 | ||
5bf33dc6 MV |
1392 | static netdev_features_t fm10k_features_check(struct sk_buff *skb, |
1393 | struct net_device *dev, | |
1394 | netdev_features_t features) | |
1395 | { | |
1396 | if (!skb->encapsulation || fm10k_tx_encap_offload(skb)) | |
1397 | return features; | |
1398 | ||
a188222b | 1399 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
5bf33dc6 MV |
1400 | } |
1401 | ||
0e7b3644 | 1402 | static const struct net_device_ops fm10k_netdev_ops = { |
504c5eac AD |
1403 | .ndo_open = fm10k_open, |
1404 | .ndo_stop = fm10k_close, | |
0e7b3644 AD |
1405 | .ndo_validate_addr = eth_validate_addr, |
1406 | .ndo_start_xmit = fm10k_xmit_frame, | |
1407 | .ndo_set_mac_address = fm10k_set_mac, | |
1408 | .ndo_change_mtu = fm10k_change_mtu, | |
b101c962 | 1409 | .ndo_tx_timeout = fm10k_tx_timeout, |
8f5e20d4 AD |
1410 | .ndo_vlan_rx_add_vid = fm10k_vlan_rx_add_vid, |
1411 | .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid, | |
0e7b3644 | 1412 | .ndo_set_rx_mode = fm10k_set_rx_mode, |
e27ef599 | 1413 | .ndo_get_stats64 = fm10k_get_stats64, |
e4c6734e | 1414 | .ndo_setup_tc = __fm10k_setup_tc, |
883a9ccb AD |
1415 | .ndo_set_vf_mac = fm10k_ndo_set_vf_mac, |
1416 | .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan, | |
1417 | .ndo_set_vf_rate = fm10k_ndo_set_vf_bw, | |
1418 | .ndo_get_vf_config = fm10k_ndo_get_vf_config, | |
f92e0e48 JK |
1419 | .ndo_udp_tunnel_add = fm10k_udp_tunnel_add, |
1420 | .ndo_udp_tunnel_del = fm10k_udp_tunnel_del, | |
5cd5e2e9 AD |
1421 | .ndo_dfwd_add_station = fm10k_dfwd_add_station, |
1422 | .ndo_dfwd_del_station = fm10k_dfwd_del_station, | |
8b4a98c7 JK |
1423 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1424 | .ndo_poll_controller = fm10k_netpoll, | |
1425 | #endif | |
5bf33dc6 | 1426 | .ndo_features_check = fm10k_features_check, |
0e7b3644 AD |
1427 | }; |
1428 | ||
1429 | #define DEFAULT_DEBUG_LEVEL_SHIFT 3 | |
1430 | ||
e0244903 | 1431 | struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info) |
0e7b3644 | 1432 | { |
e0244903 | 1433 | netdev_features_t hw_features; |
0e7b3644 AD |
1434 | struct fm10k_intfc *interface; |
1435 | struct net_device *dev; | |
1436 | ||
e27ef599 | 1437 | dev = alloc_etherdev_mq(sizeof(struct fm10k_intfc), MAX_QUEUES); |
0e7b3644 AD |
1438 | if (!dev) |
1439 | return NULL; | |
1440 | ||
1441 | /* set net device and ethtool ops */ | |
1442 | dev->netdev_ops = &fm10k_netdev_ops; | |
82dd0f7e | 1443 | fm10k_set_ethtool_ops(dev); |
0e7b3644 AD |
1444 | |
1445 | /* configure default debug level */ | |
1446 | interface = netdev_priv(dev); | |
fcdb0a99 | 1447 | interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; |
0e7b3644 AD |
1448 | |
1449 | /* configure default features */ | |
76a540d4 AD |
1450 | dev->features |= NETIF_F_IP_CSUM | |
1451 | NETIF_F_IPV6_CSUM | | |
1452 | NETIF_F_SG | | |
1453 | NETIF_F_TSO | | |
1454 | NETIF_F_TSO6 | | |
1455 | NETIF_F_TSO_ECN | | |
76a540d4 AD |
1456 | NETIF_F_RXHASH | |
1457 | NETIF_F_RXCSUM; | |
0e7b3644 | 1458 | |
e0244903 JK |
1459 | /* Only the PF can support VXLAN and NVGRE tunnel offloads */ |
1460 | if (info->mac == fm10k_mac_pf) { | |
1461 | dev->hw_enc_features = NETIF_F_IP_CSUM | | |
1462 | NETIF_F_TSO | | |
1463 | NETIF_F_TSO6 | | |
1464 | NETIF_F_TSO_ECN | | |
1465 | NETIF_F_GSO_UDP_TUNNEL | | |
1466 | NETIF_F_IPV6_CSUM | | |
1467 | NETIF_F_SG; | |
1468 | ||
1469 | dev->features |= NETIF_F_GSO_UDP_TUNNEL; | |
1470 | } | |
1471 | ||
0e7b3644 | 1472 | /* all features defined to this point should be changeable */ |
e0244903 | 1473 | hw_features = dev->features; |
0e7b3644 | 1474 | |
5cd5e2e9 | 1475 | /* allow user to enable L2 forwarding acceleration */ |
e0244903 | 1476 | hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; |
5cd5e2e9 | 1477 | |
0e7b3644 AD |
1478 | /* configure VLAN features */ |
1479 | dev->vlan_features |= dev->features; | |
1480 | ||
8f5e20d4 AD |
1481 | /* we want to leave these both on as we cannot disable VLAN tag |
1482 | * insertion or stripping on the hardware since it is contained | |
1483 | * in the FTAG and not in the frame itself. | |
1484 | */ | |
1485 | dev->features |= NETIF_F_HW_VLAN_CTAG_TX | | |
1486 | NETIF_F_HW_VLAN_CTAG_RX | | |
1487 | NETIF_F_HW_VLAN_CTAG_FILTER; | |
1488 | ||
1489 | dev->priv_flags |= IFF_UNICAST_FLT; | |
1490 | ||
e0244903 JK |
1491 | dev->hw_features |= hw_features; |
1492 | ||
0e7b3644 AD |
1493 | return dev; |
1494 | } |