ded62eb3204fc7efd1121cd6a3305572333708df
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27 /* Local includes */
28 #include "i40e.h"
29 #include "i40e_diag.h"
30 #ifdef CONFIG_I40E_VXLAN
31 #include <net/vxlan.h>
32 #endif
33
34 const char i40e_driver_name[] = "i40e";
35 static const char i40e_driver_string[] =
36 "Intel(R) Ethernet Connection XL710 Network Driver";
37
38 #define DRV_KERN "-k"
39
40 #define DRV_VERSION_MAJOR 1
41 #define DRV_VERSION_MINOR 3
42 #define DRV_VERSION_BUILD 6
43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN
46 const char i40e_driver_version_str[] = DRV_VERSION;
47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
48
49 /* a bit of forward declarations */
50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51 static void i40e_handle_reset_warning(struct i40e_pf *pf);
52 static int i40e_add_vsi(struct i40e_vsi *vsi);
53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
55 static int i40e_setup_misc_vector(struct i40e_pf *pf);
56 static void i40e_determine_queue_usage(struct i40e_pf *pf);
57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
58 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
59 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
60
61 /* i40e_pci_tbl - PCI Device ID Table
62 *
63 * Last entry must be all 0s
64 *
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
67 */
68 static const struct pci_device_id i40e_pci_tbl[] = {
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
82 /* required last entry */
83 {0, }
84 };
85 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
86
87 #define I40E_MAX_VF_COUNT 128
88 static int debug = -1;
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
91
92 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
93 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
94 MODULE_LICENSE("GPL");
95 MODULE_VERSION(DRV_VERSION);
96
97 /**
98 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
99 * @hw: pointer to the HW structure
100 * @mem: ptr to mem struct to fill out
101 * @size: size of memory requested
102 * @alignment: what to align the allocation to
103 **/
104 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
105 u64 size, u32 alignment)
106 {
107 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
108
109 mem->size = ALIGN(size, alignment);
110 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
111 &mem->pa, GFP_KERNEL);
112 if (!mem->va)
113 return -ENOMEM;
114
115 return 0;
116 }
117
118 /**
119 * i40e_free_dma_mem_d - OS specific memory free for shared code
120 * @hw: pointer to the HW structure
121 * @mem: ptr to mem struct to free
122 **/
123 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
124 {
125 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
126
127 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
128 mem->va = NULL;
129 mem->pa = 0;
130 mem->size = 0;
131
132 return 0;
133 }
134
135 /**
136 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
137 * @hw: pointer to the HW structure
138 * @mem: ptr to mem struct to fill out
139 * @size: size of memory requested
140 **/
141 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
142 u32 size)
143 {
144 mem->size = size;
145 mem->va = kzalloc(size, GFP_KERNEL);
146
147 if (!mem->va)
148 return -ENOMEM;
149
150 return 0;
151 }
152
153 /**
154 * i40e_free_virt_mem_d - OS specific memory free for shared code
155 * @hw: pointer to the HW structure
156 * @mem: ptr to mem struct to free
157 **/
158 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
159 {
160 /* it's ok to kfree a NULL pointer */
161 kfree(mem->va);
162 mem->va = NULL;
163 mem->size = 0;
164
165 return 0;
166 }
167
168 /**
169 * i40e_get_lump - find a lump of free generic resource
170 * @pf: board private structure
171 * @pile: the pile of resource to search
172 * @needed: the number of items needed
173 * @id: an owner id to stick on the items assigned
174 *
175 * Returns the base item index of the lump, or negative for error
176 *
177 * The search_hint trick and lack of advanced fit-finding only work
178 * because we're highly likely to have all the same size lump requests.
179 * Linear search time and any fragmentation should be minimal.
180 **/
181 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
182 u16 needed, u16 id)
183 {
184 int ret = -ENOMEM;
185 int i, j;
186
187 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
188 dev_info(&pf->pdev->dev,
189 "param err: pile=%p needed=%d id=0x%04x\n",
190 pile, needed, id);
191 return -EINVAL;
192 }
193
194 /* start the linear search with an imperfect hint */
195 i = pile->search_hint;
196 while (i < pile->num_entries) {
197 /* skip already allocated entries */
198 if (pile->list[i] & I40E_PILE_VALID_BIT) {
199 i++;
200 continue;
201 }
202
203 /* do we have enough in this lump? */
204 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
205 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
206 break;
207 }
208
209 if (j == needed) {
210 /* there was enough, so assign it to the requestor */
211 for (j = 0; j < needed; j++)
212 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
213 ret = i;
214 pile->search_hint = i + j;
215 break;
216 } else {
217 /* not enough, so skip over it and continue looking */
218 i += j;
219 }
220 }
221
222 return ret;
223 }
224
225 /**
226 * i40e_put_lump - return a lump of generic resource
227 * @pile: the pile of resource to search
228 * @index: the base item index
229 * @id: the owner id of the items assigned
230 *
231 * Returns the count of items in the lump
232 **/
233 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
234 {
235 int valid_id = (id | I40E_PILE_VALID_BIT);
236 int count = 0;
237 int i;
238
239 if (!pile || index >= pile->num_entries)
240 return -EINVAL;
241
242 for (i = index;
243 i < pile->num_entries && pile->list[i] == valid_id;
244 i++) {
245 pile->list[i] = 0;
246 count++;
247 }
248
249 if (count && index < pile->search_hint)
250 pile->search_hint = index;
251
252 return count;
253 }
254
255 /**
256 * i40e_find_vsi_from_id - searches for the vsi with the given id
257 * @pf - the pf structure to search for the vsi
258 * @id - id of the vsi it is searching for
259 **/
260 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
261 {
262 int i;
263
264 for (i = 0; i < pf->num_alloc_vsi; i++)
265 if (pf->vsi[i] && (pf->vsi[i]->id == id))
266 return pf->vsi[i];
267
268 return NULL;
269 }
270
271 /**
272 * i40e_service_event_schedule - Schedule the service task to wake up
273 * @pf: board private structure
274 *
275 * If not already scheduled, this puts the task into the work queue
276 **/
277 static void i40e_service_event_schedule(struct i40e_pf *pf)
278 {
279 if (!test_bit(__I40E_DOWN, &pf->state) &&
280 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
281 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
282 schedule_work(&pf->service_task);
283 }
284
285 /**
286 * i40e_tx_timeout - Respond to a Tx Hang
287 * @netdev: network interface device structure
288 *
289 * If any port has noticed a Tx timeout, it is likely that the whole
290 * device is munged, not just the one netdev port, so go for the full
291 * reset.
292 **/
293 #ifdef I40E_FCOE
294 void i40e_tx_timeout(struct net_device *netdev)
295 #else
296 static void i40e_tx_timeout(struct net_device *netdev)
297 #endif
298 {
299 struct i40e_netdev_priv *np = netdev_priv(netdev);
300 struct i40e_vsi *vsi = np->vsi;
301 struct i40e_pf *pf = vsi->back;
302
303 pf->tx_timeout_count++;
304
305 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
306 pf->tx_timeout_recovery_level = 1;
307 pf->tx_timeout_last_recovery = jiffies;
308 netdev_info(netdev, "tx_timeout recovery level %d\n",
309 pf->tx_timeout_recovery_level);
310
311 switch (pf->tx_timeout_recovery_level) {
312 case 0:
313 /* disable and re-enable queues for the VSI */
314 if (in_interrupt()) {
315 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
316 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
317 } else {
318 i40e_vsi_reinit_locked(vsi);
319 }
320 break;
321 case 1:
322 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
323 break;
324 case 2:
325 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
326 break;
327 case 3:
328 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
329 break;
330 default:
331 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
332 set_bit(__I40E_DOWN_REQUESTED, &pf->state);
333 set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
334 break;
335 }
336 i40e_service_event_schedule(pf);
337 pf->tx_timeout_recovery_level++;
338 }
339
340 /**
341 * i40e_release_rx_desc - Store the new tail and head values
342 * @rx_ring: ring to bump
343 * @val: new head index
344 **/
345 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
346 {
347 rx_ring->next_to_use = val;
348
349 /* Force memory writes to complete before letting h/w
350 * know there are new descriptors to fetch. (Only
351 * applicable for weak-ordered memory model archs,
352 * such as IA-64).
353 */
354 wmb();
355 writel(val, rx_ring->tail);
356 }
357
358 /**
359 * i40e_get_vsi_stats_struct - Get System Network Statistics
360 * @vsi: the VSI we care about
361 *
362 * Returns the address of the device statistics structure.
363 * The statistics are actually updated from the service task.
364 **/
365 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
366 {
367 return &vsi->net_stats;
368 }
369
370 /**
371 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
372 * @netdev: network interface device structure
373 *
374 * Returns the address of the device statistics structure.
375 * The statistics are actually updated from the service task.
376 **/
377 #ifdef I40E_FCOE
378 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
379 struct net_device *netdev,
380 struct rtnl_link_stats64 *stats)
381 #else
382 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
383 struct net_device *netdev,
384 struct rtnl_link_stats64 *stats)
385 #endif
386 {
387 struct i40e_netdev_priv *np = netdev_priv(netdev);
388 struct i40e_ring *tx_ring, *rx_ring;
389 struct i40e_vsi *vsi = np->vsi;
390 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
391 int i;
392
393 if (test_bit(__I40E_DOWN, &vsi->state))
394 return stats;
395
396 if (!vsi->tx_rings)
397 return stats;
398
399 rcu_read_lock();
400 for (i = 0; i < vsi->num_queue_pairs; i++) {
401 u64 bytes, packets;
402 unsigned int start;
403
404 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
405 if (!tx_ring)
406 continue;
407
408 do {
409 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
410 packets = tx_ring->stats.packets;
411 bytes = tx_ring->stats.bytes;
412 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
413
414 stats->tx_packets += packets;
415 stats->tx_bytes += bytes;
416 rx_ring = &tx_ring[1];
417
418 do {
419 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
420 packets = rx_ring->stats.packets;
421 bytes = rx_ring->stats.bytes;
422 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
423
424 stats->rx_packets += packets;
425 stats->rx_bytes += bytes;
426 }
427 rcu_read_unlock();
428
429 /* following stats updated by i40e_watchdog_subtask() */
430 stats->multicast = vsi_stats->multicast;
431 stats->tx_errors = vsi_stats->tx_errors;
432 stats->tx_dropped = vsi_stats->tx_dropped;
433 stats->rx_errors = vsi_stats->rx_errors;
434 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
435 stats->rx_length_errors = vsi_stats->rx_length_errors;
436
437 return stats;
438 }
439
440 /**
441 * i40e_vsi_reset_stats - Resets all stats of the given vsi
442 * @vsi: the VSI to have its stats reset
443 **/
444 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
445 {
446 struct rtnl_link_stats64 *ns;
447 int i;
448
449 if (!vsi)
450 return;
451
452 ns = i40e_get_vsi_stats_struct(vsi);
453 memset(ns, 0, sizeof(*ns));
454 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
455 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
456 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
457 if (vsi->rx_rings && vsi->rx_rings[0]) {
458 for (i = 0; i < vsi->num_queue_pairs; i++) {
459 memset(&vsi->rx_rings[i]->stats, 0 ,
460 sizeof(vsi->rx_rings[i]->stats));
461 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
462 sizeof(vsi->rx_rings[i]->rx_stats));
463 memset(&vsi->tx_rings[i]->stats, 0 ,
464 sizeof(vsi->tx_rings[i]->stats));
465 memset(&vsi->tx_rings[i]->tx_stats, 0,
466 sizeof(vsi->tx_rings[i]->tx_stats));
467 }
468 }
469 vsi->stat_offsets_loaded = false;
470 }
471
472 /**
473 * i40e_pf_reset_stats - Reset all of the stats for the given PF
474 * @pf: the PF to be reset
475 **/
476 void i40e_pf_reset_stats(struct i40e_pf *pf)
477 {
478 int i;
479
480 memset(&pf->stats, 0, sizeof(pf->stats));
481 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
482 pf->stat_offsets_loaded = false;
483
484 for (i = 0; i < I40E_MAX_VEB; i++) {
485 if (pf->veb[i]) {
486 memset(&pf->veb[i]->stats, 0,
487 sizeof(pf->veb[i]->stats));
488 memset(&pf->veb[i]->stats_offsets, 0,
489 sizeof(pf->veb[i]->stats_offsets));
490 pf->veb[i]->stat_offsets_loaded = false;
491 }
492 }
493 }
494
495 /**
496 * i40e_stat_update48 - read and update a 48 bit stat from the chip
497 * @hw: ptr to the hardware info
498 * @hireg: the high 32 bit reg to read
499 * @loreg: the low 32 bit reg to read
500 * @offset_loaded: has the initial offset been loaded yet
501 * @offset: ptr to current offset value
502 * @stat: ptr to the stat
503 *
504 * Since the device stats are not reset at PFReset, they likely will not
505 * be zeroed when the driver starts. We'll save the first values read
506 * and use them as offsets to be subtracted from the raw values in order
507 * to report stats that count from zero. In the process, we also manage
508 * the potential roll-over.
509 **/
510 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
511 bool offset_loaded, u64 *offset, u64 *stat)
512 {
513 u64 new_data;
514
515 if (hw->device_id == I40E_DEV_ID_QEMU) {
516 new_data = rd32(hw, loreg);
517 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
518 } else {
519 new_data = rd64(hw, loreg);
520 }
521 if (!offset_loaded)
522 *offset = new_data;
523 if (likely(new_data >= *offset))
524 *stat = new_data - *offset;
525 else
526 *stat = (new_data + BIT_ULL(48)) - *offset;
527 *stat &= 0xFFFFFFFFFFFFULL;
528 }
529
530 /**
531 * i40e_stat_update32 - read and update a 32 bit stat from the chip
532 * @hw: ptr to the hardware info
533 * @reg: the hw reg to read
534 * @offset_loaded: has the initial offset been loaded yet
535 * @offset: ptr to current offset value
536 * @stat: ptr to the stat
537 **/
538 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
539 bool offset_loaded, u64 *offset, u64 *stat)
540 {
541 u32 new_data;
542
543 new_data = rd32(hw, reg);
544 if (!offset_loaded)
545 *offset = new_data;
546 if (likely(new_data >= *offset))
547 *stat = (u32)(new_data - *offset);
548 else
549 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
550 }
551
552 /**
553 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
554 * @vsi: the VSI to be updated
555 **/
556 void i40e_update_eth_stats(struct i40e_vsi *vsi)
557 {
558 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
559 struct i40e_pf *pf = vsi->back;
560 struct i40e_hw *hw = &pf->hw;
561 struct i40e_eth_stats *oes;
562 struct i40e_eth_stats *es; /* device's eth stats */
563
564 es = &vsi->eth_stats;
565 oes = &vsi->eth_stats_offsets;
566
567 /* Gather up the stats that the hw collects */
568 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
569 vsi->stat_offsets_loaded,
570 &oes->tx_errors, &es->tx_errors);
571 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
572 vsi->stat_offsets_loaded,
573 &oes->rx_discards, &es->rx_discards);
574 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
575 vsi->stat_offsets_loaded,
576 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
577 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
578 vsi->stat_offsets_loaded,
579 &oes->tx_errors, &es->tx_errors);
580
581 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
582 I40E_GLV_GORCL(stat_idx),
583 vsi->stat_offsets_loaded,
584 &oes->rx_bytes, &es->rx_bytes);
585 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
586 I40E_GLV_UPRCL(stat_idx),
587 vsi->stat_offsets_loaded,
588 &oes->rx_unicast, &es->rx_unicast);
589 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
590 I40E_GLV_MPRCL(stat_idx),
591 vsi->stat_offsets_loaded,
592 &oes->rx_multicast, &es->rx_multicast);
593 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
594 I40E_GLV_BPRCL(stat_idx),
595 vsi->stat_offsets_loaded,
596 &oes->rx_broadcast, &es->rx_broadcast);
597
598 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
599 I40E_GLV_GOTCL(stat_idx),
600 vsi->stat_offsets_loaded,
601 &oes->tx_bytes, &es->tx_bytes);
602 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
603 I40E_GLV_UPTCL(stat_idx),
604 vsi->stat_offsets_loaded,
605 &oes->tx_unicast, &es->tx_unicast);
606 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
607 I40E_GLV_MPTCL(stat_idx),
608 vsi->stat_offsets_loaded,
609 &oes->tx_multicast, &es->tx_multicast);
610 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
611 I40E_GLV_BPTCL(stat_idx),
612 vsi->stat_offsets_loaded,
613 &oes->tx_broadcast, &es->tx_broadcast);
614 vsi->stat_offsets_loaded = true;
615 }
616
617 /**
618 * i40e_update_veb_stats - Update Switch component statistics
619 * @veb: the VEB being updated
620 **/
621 static void i40e_update_veb_stats(struct i40e_veb *veb)
622 {
623 struct i40e_pf *pf = veb->pf;
624 struct i40e_hw *hw = &pf->hw;
625 struct i40e_eth_stats *oes;
626 struct i40e_eth_stats *es; /* device's eth stats */
627 int idx = 0;
628
629 idx = veb->stats_idx;
630 es = &veb->stats;
631 oes = &veb->stats_offsets;
632
633 /* Gather up the stats that the hw collects */
634 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
635 veb->stat_offsets_loaded,
636 &oes->tx_discards, &es->tx_discards);
637 if (hw->revision_id > 0)
638 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
639 veb->stat_offsets_loaded,
640 &oes->rx_unknown_protocol,
641 &es->rx_unknown_protocol);
642 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
643 veb->stat_offsets_loaded,
644 &oes->rx_bytes, &es->rx_bytes);
645 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
646 veb->stat_offsets_loaded,
647 &oes->rx_unicast, &es->rx_unicast);
648 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
649 veb->stat_offsets_loaded,
650 &oes->rx_multicast, &es->rx_multicast);
651 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
652 veb->stat_offsets_loaded,
653 &oes->rx_broadcast, &es->rx_broadcast);
654
655 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
656 veb->stat_offsets_loaded,
657 &oes->tx_bytes, &es->tx_bytes);
658 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
659 veb->stat_offsets_loaded,
660 &oes->tx_unicast, &es->tx_unicast);
661 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
662 veb->stat_offsets_loaded,
663 &oes->tx_multicast, &es->tx_multicast);
664 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
665 veb->stat_offsets_loaded,
666 &oes->tx_broadcast, &es->tx_broadcast);
667 veb->stat_offsets_loaded = true;
668 }
669
670 #ifdef I40E_FCOE
671 /**
672 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
673 * @vsi: the VSI that is capable of doing FCoE
674 **/
675 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
676 {
677 struct i40e_pf *pf = vsi->back;
678 struct i40e_hw *hw = &pf->hw;
679 struct i40e_fcoe_stats *ofs;
680 struct i40e_fcoe_stats *fs; /* device's eth stats */
681 int idx;
682
683 if (vsi->type != I40E_VSI_FCOE)
684 return;
685
686 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
687 fs = &vsi->fcoe_stats;
688 ofs = &vsi->fcoe_stats_offsets;
689
690 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
691 vsi->fcoe_stat_offsets_loaded,
692 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
693 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
694 vsi->fcoe_stat_offsets_loaded,
695 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
696 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
697 vsi->fcoe_stat_offsets_loaded,
698 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
699 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
700 vsi->fcoe_stat_offsets_loaded,
701 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
702 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
703 vsi->fcoe_stat_offsets_loaded,
704 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
705 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
706 vsi->fcoe_stat_offsets_loaded,
707 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
708 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
709 vsi->fcoe_stat_offsets_loaded,
710 &ofs->fcoe_last_error, &fs->fcoe_last_error);
711 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
712 vsi->fcoe_stat_offsets_loaded,
713 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
714
715 vsi->fcoe_stat_offsets_loaded = true;
716 }
717
718 #endif
719 /**
720 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
721 * @pf: the corresponding PF
722 *
723 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
724 **/
725 static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
726 {
727 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
728 struct i40e_hw_port_stats *nsd = &pf->stats;
729 struct i40e_hw *hw = &pf->hw;
730 u64 xoff = 0;
731 u16 i, v;
732
733 if ((hw->fc.current_mode != I40E_FC_FULL) &&
734 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
735 return;
736
737 xoff = nsd->link_xoff_rx;
738 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
739 pf->stat_offsets_loaded,
740 &osd->link_xoff_rx, &nsd->link_xoff_rx);
741
742 /* No new LFC xoff rx */
743 if (!(nsd->link_xoff_rx - xoff))
744 return;
745
746 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
747 for (v = 0; v < pf->num_alloc_vsi; v++) {
748 struct i40e_vsi *vsi = pf->vsi[v];
749
750 if (!vsi || !vsi->tx_rings[0])
751 continue;
752
753 for (i = 0; i < vsi->num_queue_pairs; i++) {
754 struct i40e_ring *ring = vsi->tx_rings[i];
755 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
756 }
757 }
758 }
759
760 /**
761 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
762 * @pf: the corresponding PF
763 *
764 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
765 **/
766 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
767 {
768 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
769 struct i40e_hw_port_stats *nsd = &pf->stats;
770 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
771 struct i40e_dcbx_config *dcb_cfg;
772 struct i40e_hw *hw = &pf->hw;
773 u16 i, v;
774 u8 tc;
775
776 dcb_cfg = &hw->local_dcbx_config;
777
778 /* Collect Link XOFF stats when PFC is disabled */
779 if (!dcb_cfg->pfc.pfcenable) {
780 i40e_update_link_xoff_rx(pf);
781 return;
782 }
783
784 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
785 u64 prio_xoff = nsd->priority_xoff_rx[i];
786 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
787 pf->stat_offsets_loaded,
788 &osd->priority_xoff_rx[i],
789 &nsd->priority_xoff_rx[i]);
790
791 /* No new PFC xoff rx */
792 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
793 continue;
794 /* Get the TC for given priority */
795 tc = dcb_cfg->etscfg.prioritytable[i];
796 xoff[tc] = true;
797 }
798
799 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
800 for (v = 0; v < pf->num_alloc_vsi; v++) {
801 struct i40e_vsi *vsi = pf->vsi[v];
802
803 if (!vsi || !vsi->tx_rings[0])
804 continue;
805
806 for (i = 0; i < vsi->num_queue_pairs; i++) {
807 struct i40e_ring *ring = vsi->tx_rings[i];
808
809 tc = ring->dcb_tc;
810 if (xoff[tc])
811 clear_bit(__I40E_HANG_CHECK_ARMED,
812 &ring->state);
813 }
814 }
815 }
816
817 /**
818 * i40e_update_vsi_stats - Update the vsi statistics counters.
819 * @vsi: the VSI to be updated
820 *
821 * There are a few instances where we store the same stat in a
822 * couple of different structs. This is partly because we have
823 * the netdev stats that need to be filled out, which is slightly
824 * different from the "eth_stats" defined by the chip and used in
825 * VF communications. We sort it out here.
826 **/
827 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
828 {
829 struct i40e_pf *pf = vsi->back;
830 struct rtnl_link_stats64 *ons;
831 struct rtnl_link_stats64 *ns; /* netdev stats */
832 struct i40e_eth_stats *oes;
833 struct i40e_eth_stats *es; /* device's eth stats */
834 u32 tx_restart, tx_busy;
835 struct i40e_ring *p;
836 u32 rx_page, rx_buf;
837 u64 bytes, packets;
838 unsigned int start;
839 u64 rx_p, rx_b;
840 u64 tx_p, tx_b;
841 u16 q;
842
843 if (test_bit(__I40E_DOWN, &vsi->state) ||
844 test_bit(__I40E_CONFIG_BUSY, &pf->state))
845 return;
846
847 ns = i40e_get_vsi_stats_struct(vsi);
848 ons = &vsi->net_stats_offsets;
849 es = &vsi->eth_stats;
850 oes = &vsi->eth_stats_offsets;
851
852 /* Gather up the netdev and vsi stats that the driver collects
853 * on the fly during packet processing
854 */
855 rx_b = rx_p = 0;
856 tx_b = tx_p = 0;
857 tx_restart = tx_busy = 0;
858 rx_page = 0;
859 rx_buf = 0;
860 rcu_read_lock();
861 for (q = 0; q < vsi->num_queue_pairs; q++) {
862 /* locate Tx ring */
863 p = ACCESS_ONCE(vsi->tx_rings[q]);
864
865 do {
866 start = u64_stats_fetch_begin_irq(&p->syncp);
867 packets = p->stats.packets;
868 bytes = p->stats.bytes;
869 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
870 tx_b += bytes;
871 tx_p += packets;
872 tx_restart += p->tx_stats.restart_queue;
873 tx_busy += p->tx_stats.tx_busy;
874
875 /* Rx queue is part of the same block as Tx queue */
876 p = &p[1];
877 do {
878 start = u64_stats_fetch_begin_irq(&p->syncp);
879 packets = p->stats.packets;
880 bytes = p->stats.bytes;
881 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
882 rx_b += bytes;
883 rx_p += packets;
884 rx_buf += p->rx_stats.alloc_buff_failed;
885 rx_page += p->rx_stats.alloc_page_failed;
886 }
887 rcu_read_unlock();
888 vsi->tx_restart = tx_restart;
889 vsi->tx_busy = tx_busy;
890 vsi->rx_page_failed = rx_page;
891 vsi->rx_buf_failed = rx_buf;
892
893 ns->rx_packets = rx_p;
894 ns->rx_bytes = rx_b;
895 ns->tx_packets = tx_p;
896 ns->tx_bytes = tx_b;
897
898 /* update netdev stats from eth stats */
899 i40e_update_eth_stats(vsi);
900 ons->tx_errors = oes->tx_errors;
901 ns->tx_errors = es->tx_errors;
902 ons->multicast = oes->rx_multicast;
903 ns->multicast = es->rx_multicast;
904 ons->rx_dropped = oes->rx_discards;
905 ns->rx_dropped = es->rx_discards;
906 ons->tx_dropped = oes->tx_discards;
907 ns->tx_dropped = es->tx_discards;
908
909 /* pull in a couple PF stats if this is the main vsi */
910 if (vsi == pf->vsi[pf->lan_vsi]) {
911 ns->rx_crc_errors = pf->stats.crc_errors;
912 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
913 ns->rx_length_errors = pf->stats.rx_length_errors;
914 }
915 }
916
917 /**
918 * i40e_update_pf_stats - Update the PF statistics counters.
919 * @pf: the PF to be updated
920 **/
921 static void i40e_update_pf_stats(struct i40e_pf *pf)
922 {
923 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
924 struct i40e_hw_port_stats *nsd = &pf->stats;
925 struct i40e_hw *hw = &pf->hw;
926 u32 val;
927 int i;
928
929 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
930 I40E_GLPRT_GORCL(hw->port),
931 pf->stat_offsets_loaded,
932 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
933 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
934 I40E_GLPRT_GOTCL(hw->port),
935 pf->stat_offsets_loaded,
936 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
937 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
938 pf->stat_offsets_loaded,
939 &osd->eth.rx_discards,
940 &nsd->eth.rx_discards);
941 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
942 I40E_GLPRT_UPRCL(hw->port),
943 pf->stat_offsets_loaded,
944 &osd->eth.rx_unicast,
945 &nsd->eth.rx_unicast);
946 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
947 I40E_GLPRT_MPRCL(hw->port),
948 pf->stat_offsets_loaded,
949 &osd->eth.rx_multicast,
950 &nsd->eth.rx_multicast);
951 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
952 I40E_GLPRT_BPRCL(hw->port),
953 pf->stat_offsets_loaded,
954 &osd->eth.rx_broadcast,
955 &nsd->eth.rx_broadcast);
956 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
957 I40E_GLPRT_UPTCL(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->eth.tx_unicast,
960 &nsd->eth.tx_unicast);
961 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
962 I40E_GLPRT_MPTCL(hw->port),
963 pf->stat_offsets_loaded,
964 &osd->eth.tx_multicast,
965 &nsd->eth.tx_multicast);
966 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
967 I40E_GLPRT_BPTCL(hw->port),
968 pf->stat_offsets_loaded,
969 &osd->eth.tx_broadcast,
970 &nsd->eth.tx_broadcast);
971
972 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
973 pf->stat_offsets_loaded,
974 &osd->tx_dropped_link_down,
975 &nsd->tx_dropped_link_down);
976
977 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
978 pf->stat_offsets_loaded,
979 &osd->crc_errors, &nsd->crc_errors);
980
981 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
982 pf->stat_offsets_loaded,
983 &osd->illegal_bytes, &nsd->illegal_bytes);
984
985 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
986 pf->stat_offsets_loaded,
987 &osd->mac_local_faults,
988 &nsd->mac_local_faults);
989 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
990 pf->stat_offsets_loaded,
991 &osd->mac_remote_faults,
992 &nsd->mac_remote_faults);
993
994 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
995 pf->stat_offsets_loaded,
996 &osd->rx_length_errors,
997 &nsd->rx_length_errors);
998
999 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1000 pf->stat_offsets_loaded,
1001 &osd->link_xon_rx, &nsd->link_xon_rx);
1002 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1003 pf->stat_offsets_loaded,
1004 &osd->link_xon_tx, &nsd->link_xon_tx);
1005 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
1006 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1007 pf->stat_offsets_loaded,
1008 &osd->link_xoff_tx, &nsd->link_xoff_tx);
1009
1010 for (i = 0; i < 8; i++) {
1011 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1012 pf->stat_offsets_loaded,
1013 &osd->priority_xon_rx[i],
1014 &nsd->priority_xon_rx[i]);
1015 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1016 pf->stat_offsets_loaded,
1017 &osd->priority_xon_tx[i],
1018 &nsd->priority_xon_tx[i]);
1019 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1020 pf->stat_offsets_loaded,
1021 &osd->priority_xoff_tx[i],
1022 &nsd->priority_xoff_tx[i]);
1023 i40e_stat_update32(hw,
1024 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1025 pf->stat_offsets_loaded,
1026 &osd->priority_xon_2_xoff[i],
1027 &nsd->priority_xon_2_xoff[i]);
1028 }
1029
1030 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1031 I40E_GLPRT_PRC64L(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->rx_size_64, &nsd->rx_size_64);
1034 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1035 I40E_GLPRT_PRC127L(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->rx_size_127, &nsd->rx_size_127);
1038 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1039 I40E_GLPRT_PRC255L(hw->port),
1040 pf->stat_offsets_loaded,
1041 &osd->rx_size_255, &nsd->rx_size_255);
1042 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1043 I40E_GLPRT_PRC511L(hw->port),
1044 pf->stat_offsets_loaded,
1045 &osd->rx_size_511, &nsd->rx_size_511);
1046 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1047 I40E_GLPRT_PRC1023L(hw->port),
1048 pf->stat_offsets_loaded,
1049 &osd->rx_size_1023, &nsd->rx_size_1023);
1050 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1051 I40E_GLPRT_PRC1522L(hw->port),
1052 pf->stat_offsets_loaded,
1053 &osd->rx_size_1522, &nsd->rx_size_1522);
1054 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1055 I40E_GLPRT_PRC9522L(hw->port),
1056 pf->stat_offsets_loaded,
1057 &osd->rx_size_big, &nsd->rx_size_big);
1058
1059 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1060 I40E_GLPRT_PTC64L(hw->port),
1061 pf->stat_offsets_loaded,
1062 &osd->tx_size_64, &nsd->tx_size_64);
1063 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1064 I40E_GLPRT_PTC127L(hw->port),
1065 pf->stat_offsets_loaded,
1066 &osd->tx_size_127, &nsd->tx_size_127);
1067 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1068 I40E_GLPRT_PTC255L(hw->port),
1069 pf->stat_offsets_loaded,
1070 &osd->tx_size_255, &nsd->tx_size_255);
1071 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1072 I40E_GLPRT_PTC511L(hw->port),
1073 pf->stat_offsets_loaded,
1074 &osd->tx_size_511, &nsd->tx_size_511);
1075 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1076 I40E_GLPRT_PTC1023L(hw->port),
1077 pf->stat_offsets_loaded,
1078 &osd->tx_size_1023, &nsd->tx_size_1023);
1079 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1080 I40E_GLPRT_PTC1522L(hw->port),
1081 pf->stat_offsets_loaded,
1082 &osd->tx_size_1522, &nsd->tx_size_1522);
1083 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1084 I40E_GLPRT_PTC9522L(hw->port),
1085 pf->stat_offsets_loaded,
1086 &osd->tx_size_big, &nsd->tx_size_big);
1087
1088 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1089 pf->stat_offsets_loaded,
1090 &osd->rx_undersize, &nsd->rx_undersize);
1091 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1092 pf->stat_offsets_loaded,
1093 &osd->rx_fragments, &nsd->rx_fragments);
1094 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1095 pf->stat_offsets_loaded,
1096 &osd->rx_oversize, &nsd->rx_oversize);
1097 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1098 pf->stat_offsets_loaded,
1099 &osd->rx_jabber, &nsd->rx_jabber);
1100
1101 /* FDIR stats */
1102 i40e_stat_update32(hw,
1103 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1104 pf->stat_offsets_loaded,
1105 &osd->fd_atr_match, &nsd->fd_atr_match);
1106 i40e_stat_update32(hw,
1107 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1108 pf->stat_offsets_loaded,
1109 &osd->fd_sb_match, &nsd->fd_sb_match);
1110 i40e_stat_update32(hw,
1111 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1112 pf->stat_offsets_loaded,
1113 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1114
1115 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1116 nsd->tx_lpi_status =
1117 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1118 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1119 nsd->rx_lpi_status =
1120 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1121 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1122 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1123 pf->stat_offsets_loaded,
1124 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1125 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1126 pf->stat_offsets_loaded,
1127 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1128
1129 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1130 !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1131 nsd->fd_sb_status = true;
1132 else
1133 nsd->fd_sb_status = false;
1134
1135 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1136 !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1137 nsd->fd_atr_status = true;
1138 else
1139 nsd->fd_atr_status = false;
1140
1141 pf->stat_offsets_loaded = true;
1142 }
1143
1144 /**
1145 * i40e_update_stats - Update the various statistics counters.
1146 * @vsi: the VSI to be updated
1147 *
1148 * Update the various stats for this VSI and its related entities.
1149 **/
1150 void i40e_update_stats(struct i40e_vsi *vsi)
1151 {
1152 struct i40e_pf *pf = vsi->back;
1153
1154 if (vsi == pf->vsi[pf->lan_vsi])
1155 i40e_update_pf_stats(pf);
1156
1157 i40e_update_vsi_stats(vsi);
1158 #ifdef I40E_FCOE
1159 i40e_update_fcoe_stats(vsi);
1160 #endif
1161 }
1162
1163 /**
1164 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1165 * @vsi: the VSI to be searched
1166 * @macaddr: the MAC address
1167 * @vlan: the vlan
1168 * @is_vf: make sure its a VF filter, else doesn't matter
1169 * @is_netdev: make sure its a netdev filter, else doesn't matter
1170 *
1171 * Returns ptr to the filter object or NULL
1172 **/
1173 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1174 u8 *macaddr, s16 vlan,
1175 bool is_vf, bool is_netdev)
1176 {
1177 struct i40e_mac_filter *f;
1178
1179 if (!vsi || !macaddr)
1180 return NULL;
1181
1182 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1183 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1184 (vlan == f->vlan) &&
1185 (!is_vf || f->is_vf) &&
1186 (!is_netdev || f->is_netdev))
1187 return f;
1188 }
1189 return NULL;
1190 }
1191
1192 /**
1193 * i40e_find_mac - Find a mac addr in the macvlan filters list
1194 * @vsi: the VSI to be searched
1195 * @macaddr: the MAC address we are searching for
1196 * @is_vf: make sure its a VF filter, else doesn't matter
1197 * @is_netdev: make sure its a netdev filter, else doesn't matter
1198 *
1199 * Returns the first filter with the provided MAC address or NULL if
1200 * MAC address was not found
1201 **/
1202 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1203 bool is_vf, bool is_netdev)
1204 {
1205 struct i40e_mac_filter *f;
1206
1207 if (!vsi || !macaddr)
1208 return NULL;
1209
1210 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1211 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1212 (!is_vf || f->is_vf) &&
1213 (!is_netdev || f->is_netdev))
1214 return f;
1215 }
1216 return NULL;
1217 }
1218
1219 /**
1220 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1221 * @vsi: the VSI to be searched
1222 *
1223 * Returns true if VSI is in vlan mode or false otherwise
1224 **/
1225 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1226 {
1227 struct i40e_mac_filter *f;
1228
1229 /* Only -1 for all the filters denotes not in vlan mode
1230 * so we have to go through all the list in order to make sure
1231 */
1232 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1233 if (f->vlan >= 0)
1234 return true;
1235 }
1236
1237 return false;
1238 }
1239
1240 /**
1241 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1242 * @vsi: the VSI to be searched
1243 * @macaddr: the mac address to be filtered
1244 * @is_vf: true if it is a VF
1245 * @is_netdev: true if it is a netdev
1246 *
1247 * Goes through all the macvlan filters and adds a
1248 * macvlan filter for each unique vlan that already exists
1249 *
1250 * Returns first filter found on success, else NULL
1251 **/
1252 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1253 bool is_vf, bool is_netdev)
1254 {
1255 struct i40e_mac_filter *f;
1256
1257 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1258 if (vsi->info.pvid)
1259 f->vlan = le16_to_cpu(vsi->info.pvid);
1260 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1261 is_vf, is_netdev)) {
1262 if (!i40e_add_filter(vsi, macaddr, f->vlan,
1263 is_vf, is_netdev))
1264 return NULL;
1265 }
1266 }
1267
1268 return list_first_entry_or_null(&vsi->mac_filter_list,
1269 struct i40e_mac_filter, list);
1270 }
1271
1272 /**
1273 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1274 * @vsi: the PF Main VSI - inappropriate for any other VSI
1275 * @macaddr: the MAC address
1276 *
1277 * Some older firmware configurations set up a default promiscuous VLAN
1278 * filter that needs to be removed.
1279 **/
1280 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1281 {
1282 struct i40e_aqc_remove_macvlan_element_data element;
1283 struct i40e_pf *pf = vsi->back;
1284 i40e_status ret;
1285
1286 /* Only appropriate for the PF main VSI */
1287 if (vsi->type != I40E_VSI_MAIN)
1288 return -EINVAL;
1289
1290 memset(&element, 0, sizeof(element));
1291 ether_addr_copy(element.mac_addr, macaddr);
1292 element.vlan_tag = 0;
1293 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1294 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1295 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1296 if (ret)
1297 return -ENOENT;
1298
1299 return 0;
1300 }
1301
1302 /**
1303 * i40e_add_filter - Add a mac/vlan filter to the VSI
1304 * @vsi: the VSI to be searched
1305 * @macaddr: the MAC address
1306 * @vlan: the vlan
1307 * @is_vf: make sure its a VF filter, else doesn't matter
1308 * @is_netdev: make sure its a netdev filter, else doesn't matter
1309 *
1310 * Returns ptr to the filter object or NULL when no memory available.
1311 **/
1312 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1313 u8 *macaddr, s16 vlan,
1314 bool is_vf, bool is_netdev)
1315 {
1316 struct i40e_mac_filter *f;
1317
1318 if (!vsi || !macaddr)
1319 return NULL;
1320
1321 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1322 if (!f) {
1323 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1324 if (!f)
1325 goto add_filter_out;
1326
1327 ether_addr_copy(f->macaddr, macaddr);
1328 f->vlan = vlan;
1329 f->changed = true;
1330
1331 INIT_LIST_HEAD(&f->list);
1332 list_add(&f->list, &vsi->mac_filter_list);
1333 }
1334
1335 /* increment counter and add a new flag if needed */
1336 if (is_vf) {
1337 if (!f->is_vf) {
1338 f->is_vf = true;
1339 f->counter++;
1340 }
1341 } else if (is_netdev) {
1342 if (!f->is_netdev) {
1343 f->is_netdev = true;
1344 f->counter++;
1345 }
1346 } else {
1347 f->counter++;
1348 }
1349
1350 /* changed tells sync_filters_subtask to
1351 * push the filter down to the firmware
1352 */
1353 if (f->changed) {
1354 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1355 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1356 }
1357
1358 add_filter_out:
1359 return f;
1360 }
1361
1362 /**
1363 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1364 * @vsi: the VSI to be searched
1365 * @macaddr: the MAC address
1366 * @vlan: the vlan
1367 * @is_vf: make sure it's a VF filter, else doesn't matter
1368 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1369 **/
1370 void i40e_del_filter(struct i40e_vsi *vsi,
1371 u8 *macaddr, s16 vlan,
1372 bool is_vf, bool is_netdev)
1373 {
1374 struct i40e_mac_filter *f;
1375
1376 if (!vsi || !macaddr)
1377 return;
1378
1379 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1380 if (!f || f->counter == 0)
1381 return;
1382
1383 if (is_vf) {
1384 if (f->is_vf) {
1385 f->is_vf = false;
1386 f->counter--;
1387 }
1388 } else if (is_netdev) {
1389 if (f->is_netdev) {
1390 f->is_netdev = false;
1391 f->counter--;
1392 }
1393 } else {
1394 /* make sure we don't remove a filter in use by VF or netdev */
1395 int min_f = 0;
1396 min_f += (f->is_vf ? 1 : 0);
1397 min_f += (f->is_netdev ? 1 : 0);
1398
1399 if (f->counter > min_f)
1400 f->counter--;
1401 }
1402
1403 /* counter == 0 tells sync_filters_subtask to
1404 * remove the filter from the firmware's list
1405 */
1406 if (f->counter == 0) {
1407 f->changed = true;
1408 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1409 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1410 }
1411 }
1412
1413 /**
1414 * i40e_set_mac - NDO callback to set mac address
1415 * @netdev: network interface device structure
1416 * @p: pointer to an address structure
1417 *
1418 * Returns 0 on success, negative on failure
1419 **/
1420 #ifdef I40E_FCOE
1421 int i40e_set_mac(struct net_device *netdev, void *p)
1422 #else
1423 static int i40e_set_mac(struct net_device *netdev, void *p)
1424 #endif
1425 {
1426 struct i40e_netdev_priv *np = netdev_priv(netdev);
1427 struct i40e_vsi *vsi = np->vsi;
1428 struct i40e_pf *pf = vsi->back;
1429 struct i40e_hw *hw = &pf->hw;
1430 struct sockaddr *addr = p;
1431 struct i40e_mac_filter *f;
1432
1433 if (!is_valid_ether_addr(addr->sa_data))
1434 return -EADDRNOTAVAIL;
1435
1436 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1437 netdev_info(netdev, "already using mac address %pM\n",
1438 addr->sa_data);
1439 return 0;
1440 }
1441
1442 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1443 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1444 return -EADDRNOTAVAIL;
1445
1446 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1447 netdev_info(netdev, "returning to hw mac address %pM\n",
1448 hw->mac.addr);
1449 else
1450 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1451
1452 if (vsi->type == I40E_VSI_MAIN) {
1453 i40e_status ret;
1454 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1455 I40E_AQC_WRITE_TYPE_LAA_WOL,
1456 addr->sa_data, NULL);
1457 if (ret) {
1458 netdev_info(netdev,
1459 "Addr change for Main VSI failed: %d\n",
1460 ret);
1461 return -EADDRNOTAVAIL;
1462 }
1463 }
1464
1465 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1466 struct i40e_aqc_remove_macvlan_element_data element;
1467
1468 memset(&element, 0, sizeof(element));
1469 ether_addr_copy(element.mac_addr, netdev->dev_addr);
1470 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1471 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1472 } else {
1473 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1474 false, false);
1475 }
1476
1477 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1478 struct i40e_aqc_add_macvlan_element_data element;
1479
1480 memset(&element, 0, sizeof(element));
1481 ether_addr_copy(element.mac_addr, hw->mac.addr);
1482 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1483 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1484 } else {
1485 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1486 false, false);
1487 if (f)
1488 f->is_laa = true;
1489 }
1490
1491 i40e_sync_vsi_filters(vsi);
1492 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1493
1494 return 0;
1495 }
1496
1497 /**
1498 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1499 * @vsi: the VSI being setup
1500 * @ctxt: VSI context structure
1501 * @enabled_tc: Enabled TCs bitmap
1502 * @is_add: True if called before Add VSI
1503 *
1504 * Setup VSI queue mapping for enabled traffic classes.
1505 **/
1506 #ifdef I40E_FCOE
1507 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1508 struct i40e_vsi_context *ctxt,
1509 u8 enabled_tc,
1510 bool is_add)
1511 #else
1512 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1513 struct i40e_vsi_context *ctxt,
1514 u8 enabled_tc,
1515 bool is_add)
1516 #endif
1517 {
1518 struct i40e_pf *pf = vsi->back;
1519 u16 sections = 0;
1520 u8 netdev_tc = 0;
1521 u16 numtc = 0;
1522 u16 qcount;
1523 u8 offset;
1524 u16 qmap;
1525 int i;
1526 u16 num_tc_qps = 0;
1527
1528 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1529 offset = 0;
1530
1531 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1532 /* Find numtc from enabled TC bitmap */
1533 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1534 if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
1535 numtc++;
1536 }
1537 if (!numtc) {
1538 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1539 numtc = 1;
1540 }
1541 } else {
1542 /* At least TC0 is enabled in case of non-DCB case */
1543 numtc = 1;
1544 }
1545
1546 vsi->tc_config.numtc = numtc;
1547 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1548 /* Number of queues per enabled TC */
1549 /* In MFP case we can have a much lower count of MSIx
1550 * vectors available and so we need to lower the used
1551 * q count.
1552 */
1553 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1554 num_tc_qps = qcount / numtc;
1555 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1556
1557 /* Setup queue offset/count for all TCs for given VSI */
1558 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1559 /* See if the given TC is enabled for the given VSI */
1560 if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
1561 /* TC is enabled */
1562 int pow, num_qps;
1563
1564 switch (vsi->type) {
1565 case I40E_VSI_MAIN:
1566 qcount = min_t(int, pf->rss_size, num_tc_qps);
1567 break;
1568 #ifdef I40E_FCOE
1569 case I40E_VSI_FCOE:
1570 qcount = num_tc_qps;
1571 break;
1572 #endif
1573 case I40E_VSI_FDIR:
1574 case I40E_VSI_SRIOV:
1575 case I40E_VSI_VMDQ2:
1576 default:
1577 qcount = num_tc_qps;
1578 WARN_ON(i != 0);
1579 break;
1580 }
1581 vsi->tc_config.tc_info[i].qoffset = offset;
1582 vsi->tc_config.tc_info[i].qcount = qcount;
1583
1584 /* find the next higher power-of-2 of num queue pairs */
1585 num_qps = qcount;
1586 pow = 0;
1587 while (num_qps && (BIT_ULL(pow) < qcount)) {
1588 pow++;
1589 num_qps >>= 1;
1590 }
1591
1592 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1593 qmap =
1594 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1595 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1596
1597 offset += qcount;
1598 } else {
1599 /* TC is not enabled so set the offset to
1600 * default queue and allocate one queue
1601 * for the given TC.
1602 */
1603 vsi->tc_config.tc_info[i].qoffset = 0;
1604 vsi->tc_config.tc_info[i].qcount = 1;
1605 vsi->tc_config.tc_info[i].netdev_tc = 0;
1606
1607 qmap = 0;
1608 }
1609 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1610 }
1611
1612 /* Set actual Tx/Rx queue pairs */
1613 vsi->num_queue_pairs = offset;
1614 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1615 if (vsi->req_queue_pairs > 0)
1616 vsi->num_queue_pairs = vsi->req_queue_pairs;
1617 else
1618 vsi->num_queue_pairs = pf->num_lan_msix;
1619 }
1620
1621 /* Scheduler section valid can only be set for ADD VSI */
1622 if (is_add) {
1623 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1624
1625 ctxt->info.up_enable_bits = enabled_tc;
1626 }
1627 if (vsi->type == I40E_VSI_SRIOV) {
1628 ctxt->info.mapping_flags |=
1629 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1630 for (i = 0; i < vsi->num_queue_pairs; i++)
1631 ctxt->info.queue_mapping[i] =
1632 cpu_to_le16(vsi->base_queue + i);
1633 } else {
1634 ctxt->info.mapping_flags |=
1635 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1636 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1637 }
1638 ctxt->info.valid_sections |= cpu_to_le16(sections);
1639 }
1640
1641 /**
1642 * i40e_set_rx_mode - NDO callback to set the netdev filters
1643 * @netdev: network interface device structure
1644 **/
1645 #ifdef I40E_FCOE
1646 void i40e_set_rx_mode(struct net_device *netdev)
1647 #else
1648 static void i40e_set_rx_mode(struct net_device *netdev)
1649 #endif
1650 {
1651 struct i40e_netdev_priv *np = netdev_priv(netdev);
1652 struct i40e_mac_filter *f, *ftmp;
1653 struct i40e_vsi *vsi = np->vsi;
1654 struct netdev_hw_addr *uca;
1655 struct netdev_hw_addr *mca;
1656 struct netdev_hw_addr *ha;
1657
1658 /* add addr if not already in the filter list */
1659 netdev_for_each_uc_addr(uca, netdev) {
1660 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1661 if (i40e_is_vsi_in_vlan(vsi))
1662 i40e_put_mac_in_vlan(vsi, uca->addr,
1663 false, true);
1664 else
1665 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1666 false, true);
1667 }
1668 }
1669
1670 netdev_for_each_mc_addr(mca, netdev) {
1671 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1672 if (i40e_is_vsi_in_vlan(vsi))
1673 i40e_put_mac_in_vlan(vsi, mca->addr,
1674 false, true);
1675 else
1676 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1677 false, true);
1678 }
1679 }
1680
1681 /* remove filter if not in netdev list */
1682 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1683 bool found = false;
1684
1685 if (!f->is_netdev)
1686 continue;
1687
1688 if (is_multicast_ether_addr(f->macaddr)) {
1689 netdev_for_each_mc_addr(mca, netdev) {
1690 if (ether_addr_equal(mca->addr, f->macaddr)) {
1691 found = true;
1692 break;
1693 }
1694 }
1695 } else {
1696 netdev_for_each_uc_addr(uca, netdev) {
1697 if (ether_addr_equal(uca->addr, f->macaddr)) {
1698 found = true;
1699 break;
1700 }
1701 }
1702
1703 for_each_dev_addr(netdev, ha) {
1704 if (ether_addr_equal(ha->addr, f->macaddr)) {
1705 found = true;
1706 break;
1707 }
1708 }
1709 }
1710 if (!found)
1711 i40e_del_filter(
1712 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1713 }
1714
1715 /* check for other flag changes */
1716 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1717 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1718 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1719 }
1720 }
1721
1722 /**
1723 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1724 * @vsi: ptr to the VSI
1725 *
1726 * Push any outstanding VSI filter changes through the AdminQ.
1727 *
1728 * Returns 0 or error value
1729 **/
1730 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1731 {
1732 struct i40e_mac_filter *f, *ftmp;
1733 bool promisc_forced_on = false;
1734 bool add_happened = false;
1735 int filter_list_len = 0;
1736 u32 changed_flags = 0;
1737 i40e_status ret = 0;
1738 struct i40e_pf *pf;
1739 int num_add = 0;
1740 int num_del = 0;
1741 int aq_err = 0;
1742 u16 cmd_flags;
1743
1744 /* empty array typed pointers, kcalloc later */
1745 struct i40e_aqc_add_macvlan_element_data *add_list;
1746 struct i40e_aqc_remove_macvlan_element_data *del_list;
1747
1748 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1749 usleep_range(1000, 2000);
1750 pf = vsi->back;
1751
1752 if (vsi->netdev) {
1753 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1754 vsi->current_netdev_flags = vsi->netdev->flags;
1755 }
1756
1757 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1758 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1759
1760 filter_list_len = pf->hw.aq.asq_buf_size /
1761 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1762 del_list = kcalloc(filter_list_len,
1763 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1764 GFP_KERNEL);
1765 if (!del_list)
1766 return -ENOMEM;
1767
1768 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1769 if (!f->changed)
1770 continue;
1771
1772 if (f->counter != 0)
1773 continue;
1774 f->changed = false;
1775 cmd_flags = 0;
1776
1777 /* add to delete list */
1778 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1779 del_list[num_del].vlan_tag =
1780 cpu_to_le16((u16)(f->vlan ==
1781 I40E_VLAN_ANY ? 0 : f->vlan));
1782
1783 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1784 del_list[num_del].flags = cmd_flags;
1785 num_del++;
1786
1787 /* unlink from filter list */
1788 list_del(&f->list);
1789 kfree(f);
1790
1791 /* flush a full buffer */
1792 if (num_del == filter_list_len) {
1793 ret = i40e_aq_remove_macvlan(&pf->hw,
1794 vsi->seid, del_list, num_del,
1795 NULL);
1796 aq_err = pf->hw.aq.asq_last_status;
1797 num_del = 0;
1798 memset(del_list, 0, sizeof(*del_list));
1799
1800 if (ret && aq_err != I40E_AQ_RC_ENOENT)
1801 dev_info(&pf->pdev->dev,
1802 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
1803 i40e_stat_str(&pf->hw, ret),
1804 i40e_aq_str(&pf->hw, aq_err));
1805 }
1806 }
1807 if (num_del) {
1808 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1809 del_list, num_del, NULL);
1810 aq_err = pf->hw.aq.asq_last_status;
1811 num_del = 0;
1812
1813 if (ret && aq_err != I40E_AQ_RC_ENOENT)
1814 dev_info(&pf->pdev->dev,
1815 "ignoring delete macvlan error, err %s aq_err %s\n",
1816 i40e_stat_str(&pf->hw, ret),
1817 i40e_aq_str(&pf->hw, aq_err));
1818 }
1819
1820 kfree(del_list);
1821 del_list = NULL;
1822
1823 /* do all the adds now */
1824 filter_list_len = pf->hw.aq.asq_buf_size /
1825 sizeof(struct i40e_aqc_add_macvlan_element_data),
1826 add_list = kcalloc(filter_list_len,
1827 sizeof(struct i40e_aqc_add_macvlan_element_data),
1828 GFP_KERNEL);
1829 if (!add_list)
1830 return -ENOMEM;
1831
1832 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1833 if (!f->changed)
1834 continue;
1835
1836 if (f->counter == 0)
1837 continue;
1838 f->changed = false;
1839 add_happened = true;
1840 cmd_flags = 0;
1841
1842 /* add to add array */
1843 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1844 add_list[num_add].vlan_tag =
1845 cpu_to_le16(
1846 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1847 add_list[num_add].queue_number = 0;
1848
1849 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1850 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1851 num_add++;
1852
1853 /* flush a full buffer */
1854 if (num_add == filter_list_len) {
1855 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1856 add_list, num_add,
1857 NULL);
1858 aq_err = pf->hw.aq.asq_last_status;
1859 num_add = 0;
1860
1861 if (ret)
1862 break;
1863 memset(add_list, 0, sizeof(*add_list));
1864 }
1865 }
1866 if (num_add) {
1867 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1868 add_list, num_add, NULL);
1869 aq_err = pf->hw.aq.asq_last_status;
1870 num_add = 0;
1871 }
1872 kfree(add_list);
1873 add_list = NULL;
1874
1875 if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
1876 dev_info(&pf->pdev->dev,
1877 "add filter failed, err %s aq_err %s\n",
1878 i40e_stat_str(&pf->hw, ret),
1879 i40e_aq_str(&pf->hw, aq_err));
1880 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1881 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1882 &vsi->state)) {
1883 promisc_forced_on = true;
1884 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1885 &vsi->state);
1886 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1887 }
1888 }
1889 }
1890
1891 /* check for changes in promiscuous modes */
1892 if (changed_flags & IFF_ALLMULTI) {
1893 bool cur_multipromisc;
1894 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1895 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1896 vsi->seid,
1897 cur_multipromisc,
1898 NULL);
1899 if (ret)
1900 dev_info(&pf->pdev->dev,
1901 "set multi promisc failed, err %s aq_err %s\n",
1902 i40e_stat_str(&pf->hw, ret),
1903 i40e_aq_str(&pf->hw,
1904 pf->hw.aq.asq_last_status));
1905 }
1906 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1907 bool cur_promisc;
1908 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1909 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1910 &vsi->state));
1911 ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1912 vsi->seid,
1913 cur_promisc, NULL);
1914 if (ret)
1915 dev_info(&pf->pdev->dev,
1916 "set uni promisc failed, err %s, aq_err %s\n",
1917 i40e_stat_str(&pf->hw, ret),
1918 i40e_aq_str(&pf->hw,
1919 pf->hw.aq.asq_last_status));
1920 ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1921 vsi->seid,
1922 cur_promisc, NULL);
1923 if (ret)
1924 dev_info(&pf->pdev->dev,
1925 "set brdcast promisc failed, err %s, aq_err %s\n",
1926 i40e_stat_str(&pf->hw, ret),
1927 i40e_aq_str(&pf->hw,
1928 pf->hw.aq.asq_last_status));
1929 }
1930
1931 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1932 return 0;
1933 }
1934
1935 /**
1936 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1937 * @pf: board private structure
1938 **/
1939 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1940 {
1941 int v;
1942
1943 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1944 return;
1945 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1946
1947 for (v = 0; v < pf->num_alloc_vsi; v++) {
1948 if (pf->vsi[v] &&
1949 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1950 i40e_sync_vsi_filters(pf->vsi[v]);
1951 }
1952 }
1953
1954 /**
1955 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1956 * @netdev: network interface device structure
1957 * @new_mtu: new value for maximum frame size
1958 *
1959 * Returns 0 on success, negative on failure
1960 **/
1961 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1962 {
1963 struct i40e_netdev_priv *np = netdev_priv(netdev);
1964 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1965 struct i40e_vsi *vsi = np->vsi;
1966
1967 /* MTU < 68 is an error and causes problems on some kernels */
1968 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1969 return -EINVAL;
1970
1971 netdev_info(netdev, "changing MTU from %d to %d\n",
1972 netdev->mtu, new_mtu);
1973 netdev->mtu = new_mtu;
1974 if (netif_running(netdev))
1975 i40e_vsi_reinit_locked(vsi);
1976
1977 return 0;
1978 }
1979
1980 /**
1981 * i40e_ioctl - Access the hwtstamp interface
1982 * @netdev: network interface device structure
1983 * @ifr: interface request data
1984 * @cmd: ioctl command
1985 **/
1986 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1987 {
1988 struct i40e_netdev_priv *np = netdev_priv(netdev);
1989 struct i40e_pf *pf = np->vsi->back;
1990
1991 switch (cmd) {
1992 case SIOCGHWTSTAMP:
1993 return i40e_ptp_get_ts_config(pf, ifr);
1994 case SIOCSHWTSTAMP:
1995 return i40e_ptp_set_ts_config(pf, ifr);
1996 default:
1997 return -EOPNOTSUPP;
1998 }
1999 }
2000
2001 /**
2002 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2003 * @vsi: the vsi being adjusted
2004 **/
2005 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2006 {
2007 struct i40e_vsi_context ctxt;
2008 i40e_status ret;
2009
2010 if ((vsi->info.valid_sections &
2011 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2012 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2013 return; /* already enabled */
2014
2015 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2016 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2017 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2018
2019 ctxt.seid = vsi->seid;
2020 ctxt.info = vsi->info;
2021 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2022 if (ret) {
2023 dev_info(&vsi->back->pdev->dev,
2024 "update vlan stripping failed, err %s aq_err %s\n",
2025 i40e_stat_str(&vsi->back->hw, ret),
2026 i40e_aq_str(&vsi->back->hw,
2027 vsi->back->hw.aq.asq_last_status));
2028 }
2029 }
2030
2031 /**
2032 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2033 * @vsi: the vsi being adjusted
2034 **/
2035 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2036 {
2037 struct i40e_vsi_context ctxt;
2038 i40e_status ret;
2039
2040 if ((vsi->info.valid_sections &
2041 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2042 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2043 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2044 return; /* already disabled */
2045
2046 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2047 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2048 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2049
2050 ctxt.seid = vsi->seid;
2051 ctxt.info = vsi->info;
2052 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2053 if (ret) {
2054 dev_info(&vsi->back->pdev->dev,
2055 "update vlan stripping failed, err %s aq_err %s\n",
2056 i40e_stat_str(&vsi->back->hw, ret),
2057 i40e_aq_str(&vsi->back->hw,
2058 vsi->back->hw.aq.asq_last_status));
2059 }
2060 }
2061
2062 /**
2063 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2064 * @netdev: network interface to be adjusted
2065 * @features: netdev features to test if VLAN offload is enabled or not
2066 **/
2067 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2068 {
2069 struct i40e_netdev_priv *np = netdev_priv(netdev);
2070 struct i40e_vsi *vsi = np->vsi;
2071
2072 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2073 i40e_vlan_stripping_enable(vsi);
2074 else
2075 i40e_vlan_stripping_disable(vsi);
2076 }
2077
2078 /**
2079 * i40e_vsi_add_vlan - Add vsi membership for given vlan
2080 * @vsi: the vsi being configured
2081 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2082 **/
2083 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2084 {
2085 struct i40e_mac_filter *f, *add_f;
2086 bool is_netdev, is_vf;
2087
2088 is_vf = (vsi->type == I40E_VSI_SRIOV);
2089 is_netdev = !!(vsi->netdev);
2090
2091 if (is_netdev) {
2092 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2093 is_vf, is_netdev);
2094 if (!add_f) {
2095 dev_info(&vsi->back->pdev->dev,
2096 "Could not add vlan filter %d for %pM\n",
2097 vid, vsi->netdev->dev_addr);
2098 return -ENOMEM;
2099 }
2100 }
2101
2102 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2103 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2104 if (!add_f) {
2105 dev_info(&vsi->back->pdev->dev,
2106 "Could not add vlan filter %d for %pM\n",
2107 vid, f->macaddr);
2108 return -ENOMEM;
2109 }
2110 }
2111
2112 /* Now if we add a vlan tag, make sure to check if it is the first
2113 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2114 * with 0, so we now accept untagged and specified tagged traffic
2115 * (and not any taged and untagged)
2116 */
2117 if (vid > 0) {
2118 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2119 I40E_VLAN_ANY,
2120 is_vf, is_netdev)) {
2121 i40e_del_filter(vsi, vsi->netdev->dev_addr,
2122 I40E_VLAN_ANY, is_vf, is_netdev);
2123 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2124 is_vf, is_netdev);
2125 if (!add_f) {
2126 dev_info(&vsi->back->pdev->dev,
2127 "Could not add filter 0 for %pM\n",
2128 vsi->netdev->dev_addr);
2129 return -ENOMEM;
2130 }
2131 }
2132 }
2133
2134 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2135 if (vid > 0 && !vsi->info.pvid) {
2136 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2137 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2138 is_vf, is_netdev)) {
2139 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2140 is_vf, is_netdev);
2141 add_f = i40e_add_filter(vsi, f->macaddr,
2142 0, is_vf, is_netdev);
2143 if (!add_f) {
2144 dev_info(&vsi->back->pdev->dev,
2145 "Could not add filter 0 for %pM\n",
2146 f->macaddr);
2147 return -ENOMEM;
2148 }
2149 }
2150 }
2151 }
2152
2153 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2154 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2155 return 0;
2156
2157 return i40e_sync_vsi_filters(vsi);
2158 }
2159
2160 /**
2161 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2162 * @vsi: the vsi being configured
2163 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2164 *
2165 * Return: 0 on success or negative otherwise
2166 **/
2167 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2168 {
2169 struct net_device *netdev = vsi->netdev;
2170 struct i40e_mac_filter *f, *add_f;
2171 bool is_vf, is_netdev;
2172 int filter_count = 0;
2173
2174 is_vf = (vsi->type == I40E_VSI_SRIOV);
2175 is_netdev = !!(netdev);
2176
2177 if (is_netdev)
2178 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2179
2180 list_for_each_entry(f, &vsi->mac_filter_list, list)
2181 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2182
2183 /* go through all the filters for this VSI and if there is only
2184 * vid == 0 it means there are no other filters, so vid 0 must
2185 * be replaced with -1. This signifies that we should from now
2186 * on accept any traffic (with any tag present, or untagged)
2187 */
2188 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2189 if (is_netdev) {
2190 if (f->vlan &&
2191 ether_addr_equal(netdev->dev_addr, f->macaddr))
2192 filter_count++;
2193 }
2194
2195 if (f->vlan)
2196 filter_count++;
2197 }
2198
2199 if (!filter_count && is_netdev) {
2200 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2201 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2202 is_vf, is_netdev);
2203 if (!f) {
2204 dev_info(&vsi->back->pdev->dev,
2205 "Could not add filter %d for %pM\n",
2206 I40E_VLAN_ANY, netdev->dev_addr);
2207 return -ENOMEM;
2208 }
2209 }
2210
2211 if (!filter_count) {
2212 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2213 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2214 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2215 is_vf, is_netdev);
2216 if (!add_f) {
2217 dev_info(&vsi->back->pdev->dev,
2218 "Could not add filter %d for %pM\n",
2219 I40E_VLAN_ANY, f->macaddr);
2220 return -ENOMEM;
2221 }
2222 }
2223 }
2224
2225 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2226 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2227 return 0;
2228
2229 return i40e_sync_vsi_filters(vsi);
2230 }
2231
2232 /**
2233 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2234 * @netdev: network interface to be adjusted
2235 * @vid: vlan id to be added
2236 *
2237 * net_device_ops implementation for adding vlan ids
2238 **/
2239 #ifdef I40E_FCOE
2240 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2241 __always_unused __be16 proto, u16 vid)
2242 #else
2243 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2244 __always_unused __be16 proto, u16 vid)
2245 #endif
2246 {
2247 struct i40e_netdev_priv *np = netdev_priv(netdev);
2248 struct i40e_vsi *vsi = np->vsi;
2249 int ret = 0;
2250
2251 if (vid > 4095)
2252 return -EINVAL;
2253
2254 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2255
2256 /* If the network stack called us with vid = 0 then
2257 * it is asking to receive priority tagged packets with
2258 * vlan id 0. Our HW receives them by default when configured
2259 * to receive untagged packets so there is no need to add an
2260 * extra filter for vlan 0 tagged packets.
2261 */
2262 if (vid)
2263 ret = i40e_vsi_add_vlan(vsi, vid);
2264
2265 if (!ret && (vid < VLAN_N_VID))
2266 set_bit(vid, vsi->active_vlans);
2267
2268 return ret;
2269 }
2270
2271 /**
2272 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2273 * @netdev: network interface to be adjusted
2274 * @vid: vlan id to be removed
2275 *
2276 * net_device_ops implementation for removing vlan ids
2277 **/
2278 #ifdef I40E_FCOE
2279 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2280 __always_unused __be16 proto, u16 vid)
2281 #else
2282 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2283 __always_unused __be16 proto, u16 vid)
2284 #endif
2285 {
2286 struct i40e_netdev_priv *np = netdev_priv(netdev);
2287 struct i40e_vsi *vsi = np->vsi;
2288
2289 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2290
2291 /* return code is ignored as there is nothing a user
2292 * can do about failure to remove and a log message was
2293 * already printed from the other function
2294 */
2295 i40e_vsi_kill_vlan(vsi, vid);
2296
2297 clear_bit(vid, vsi->active_vlans);
2298
2299 return 0;
2300 }
2301
2302 /**
2303 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2304 * @vsi: the vsi being brought back up
2305 **/
2306 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2307 {
2308 u16 vid;
2309
2310 if (!vsi->netdev)
2311 return;
2312
2313 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2314
2315 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2316 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2317 vid);
2318 }
2319
2320 /**
2321 * i40e_vsi_add_pvid - Add pvid for the VSI
2322 * @vsi: the vsi being adjusted
2323 * @vid: the vlan id to set as a PVID
2324 **/
2325 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2326 {
2327 struct i40e_vsi_context ctxt;
2328 i40e_status ret;
2329
2330 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2331 vsi->info.pvid = cpu_to_le16(vid);
2332 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2333 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2334 I40E_AQ_VSI_PVLAN_EMOD_STR;
2335
2336 ctxt.seid = vsi->seid;
2337 ctxt.info = vsi->info;
2338 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2339 if (ret) {
2340 dev_info(&vsi->back->pdev->dev,
2341 "add pvid failed, err %s aq_err %s\n",
2342 i40e_stat_str(&vsi->back->hw, ret),
2343 i40e_aq_str(&vsi->back->hw,
2344 vsi->back->hw.aq.asq_last_status));
2345 return -ENOENT;
2346 }
2347
2348 return 0;
2349 }
2350
2351 /**
2352 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2353 * @vsi: the vsi being adjusted
2354 *
2355 * Just use the vlan_rx_register() service to put it back to normal
2356 **/
2357 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2358 {
2359 i40e_vlan_stripping_disable(vsi);
2360
2361 vsi->info.pvid = 0;
2362 }
2363
2364 /**
2365 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2366 * @vsi: ptr to the VSI
2367 *
2368 * If this function returns with an error, then it's possible one or
2369 * more of the rings is populated (while the rest are not). It is the
2370 * callers duty to clean those orphaned rings.
2371 *
2372 * Return 0 on success, negative on failure
2373 **/
2374 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2375 {
2376 int i, err = 0;
2377
2378 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2379 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2380
2381 return err;
2382 }
2383
2384 /**
2385 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2386 * @vsi: ptr to the VSI
2387 *
2388 * Free VSI's transmit software resources
2389 **/
2390 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2391 {
2392 int i;
2393
2394 if (!vsi->tx_rings)
2395 return;
2396
2397 for (i = 0; i < vsi->num_queue_pairs; i++)
2398 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2399 i40e_free_tx_resources(vsi->tx_rings[i]);
2400 }
2401
2402 /**
2403 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2404 * @vsi: ptr to the VSI
2405 *
2406 * If this function returns with an error, then it's possible one or
2407 * more of the rings is populated (while the rest are not). It is the
2408 * callers duty to clean those orphaned rings.
2409 *
2410 * Return 0 on success, negative on failure
2411 **/
2412 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2413 {
2414 int i, err = 0;
2415
2416 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2417 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2418 #ifdef I40E_FCOE
2419 i40e_fcoe_setup_ddp_resources(vsi);
2420 #endif
2421 return err;
2422 }
2423
2424 /**
2425 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2426 * @vsi: ptr to the VSI
2427 *
2428 * Free all receive software resources
2429 **/
2430 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2431 {
2432 int i;
2433
2434 if (!vsi->rx_rings)
2435 return;
2436
2437 for (i = 0; i < vsi->num_queue_pairs; i++)
2438 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2439 i40e_free_rx_resources(vsi->rx_rings[i]);
2440 #ifdef I40E_FCOE
2441 i40e_fcoe_free_ddp_resources(vsi);
2442 #endif
2443 }
2444
2445 /**
2446 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2447 * @ring: The Tx ring to configure
2448 *
2449 * This enables/disables XPS for a given Tx descriptor ring
2450 * based on the TCs enabled for the VSI that ring belongs to.
2451 **/
2452 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2453 {
2454 struct i40e_vsi *vsi = ring->vsi;
2455 cpumask_var_t mask;
2456
2457 if (!ring->q_vector || !ring->netdev)
2458 return;
2459
2460 /* Single TC mode enable XPS */
2461 if (vsi->tc_config.numtc <= 1) {
2462 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2463 netif_set_xps_queue(ring->netdev,
2464 &ring->q_vector->affinity_mask,
2465 ring->queue_index);
2466 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2467 /* Disable XPS to allow selection based on TC */
2468 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2469 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2470 free_cpumask_var(mask);
2471 }
2472 }
2473
2474 /**
2475 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2476 * @ring: The Tx ring to configure
2477 *
2478 * Configure the Tx descriptor ring in the HMC context.
2479 **/
2480 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2481 {
2482 struct i40e_vsi *vsi = ring->vsi;
2483 u16 pf_q = vsi->base_queue + ring->queue_index;
2484 struct i40e_hw *hw = &vsi->back->hw;
2485 struct i40e_hmc_obj_txq tx_ctx;
2486 i40e_status err = 0;
2487 u32 qtx_ctl = 0;
2488
2489 /* some ATR related tx ring init */
2490 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2491 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2492 ring->atr_count = 0;
2493 } else {
2494 ring->atr_sample_rate = 0;
2495 }
2496
2497 /* configure XPS */
2498 i40e_config_xps_tx_ring(ring);
2499
2500 /* clear the context structure first */
2501 memset(&tx_ctx, 0, sizeof(tx_ctx));
2502
2503 tx_ctx.new_context = 1;
2504 tx_ctx.base = (ring->dma / 128);
2505 tx_ctx.qlen = ring->count;
2506 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2507 I40E_FLAG_FD_ATR_ENABLED));
2508 #ifdef I40E_FCOE
2509 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2510 #endif
2511 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2512 /* FDIR VSI tx ring can still use RS bit and writebacks */
2513 if (vsi->type != I40E_VSI_FDIR)
2514 tx_ctx.head_wb_ena = 1;
2515 tx_ctx.head_wb_addr = ring->dma +
2516 (ring->count * sizeof(struct i40e_tx_desc));
2517
2518 /* As part of VSI creation/update, FW allocates certain
2519 * Tx arbitration queue sets for each TC enabled for
2520 * the VSI. The FW returns the handles to these queue
2521 * sets as part of the response buffer to Add VSI,
2522 * Update VSI, etc. AQ commands. It is expected that
2523 * these queue set handles be associated with the Tx
2524 * queues by the driver as part of the TX queue context
2525 * initialization. This has to be done regardless of
2526 * DCB as by default everything is mapped to TC0.
2527 */
2528 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2529 tx_ctx.rdylist_act = 0;
2530
2531 /* clear the context in the HMC */
2532 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2533 if (err) {
2534 dev_info(&vsi->back->pdev->dev,
2535 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2536 ring->queue_index, pf_q, err);
2537 return -ENOMEM;
2538 }
2539
2540 /* set the context in the HMC */
2541 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2542 if (err) {
2543 dev_info(&vsi->back->pdev->dev,
2544 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2545 ring->queue_index, pf_q, err);
2546 return -ENOMEM;
2547 }
2548
2549 /* Now associate this queue with this PCI function */
2550 if (vsi->type == I40E_VSI_VMDQ2) {
2551 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2552 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2553 I40E_QTX_CTL_VFVM_INDX_MASK;
2554 } else {
2555 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2556 }
2557
2558 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2559 I40E_QTX_CTL_PF_INDX_MASK);
2560 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2561 i40e_flush(hw);
2562
2563 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2564
2565 /* cache tail off for easier writes later */
2566 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2567
2568 return 0;
2569 }
2570
2571 /**
2572 * i40e_configure_rx_ring - Configure a receive ring context
2573 * @ring: The Rx ring to configure
2574 *
2575 * Configure the Rx descriptor ring in the HMC context.
2576 **/
2577 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2578 {
2579 struct i40e_vsi *vsi = ring->vsi;
2580 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2581 u16 pf_q = vsi->base_queue + ring->queue_index;
2582 struct i40e_hw *hw = &vsi->back->hw;
2583 struct i40e_hmc_obj_rxq rx_ctx;
2584 i40e_status err = 0;
2585
2586 ring->state = 0;
2587
2588 /* clear the context structure first */
2589 memset(&rx_ctx, 0, sizeof(rx_ctx));
2590
2591 ring->rx_buf_len = vsi->rx_buf_len;
2592 ring->rx_hdr_len = vsi->rx_hdr_len;
2593
2594 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2595 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2596
2597 rx_ctx.base = (ring->dma / 128);
2598 rx_ctx.qlen = ring->count;
2599
2600 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2601 set_ring_16byte_desc_enabled(ring);
2602 rx_ctx.dsize = 0;
2603 } else {
2604 rx_ctx.dsize = 1;
2605 }
2606
2607 rx_ctx.dtype = vsi->dtype;
2608 if (vsi->dtype) {
2609 set_ring_ps_enabled(ring);
2610 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2611 I40E_RX_SPLIT_IP |
2612 I40E_RX_SPLIT_TCP_UDP |
2613 I40E_RX_SPLIT_SCTP;
2614 } else {
2615 rx_ctx.hsplit_0 = 0;
2616 }
2617
2618 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2619 (chain_len * ring->rx_buf_len));
2620 if (hw->revision_id == 0)
2621 rx_ctx.lrxqthresh = 0;
2622 else
2623 rx_ctx.lrxqthresh = 2;
2624 rx_ctx.crcstrip = 1;
2625 rx_ctx.l2tsel = 1;
2626 rx_ctx.showiv = 1;
2627 #ifdef I40E_FCOE
2628 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2629 #endif
2630 /* set the prefena field to 1 because the manual says to */
2631 rx_ctx.prefena = 1;
2632
2633 /* clear the context in the HMC */
2634 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2635 if (err) {
2636 dev_info(&vsi->back->pdev->dev,
2637 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2638 ring->queue_index, pf_q, err);
2639 return -ENOMEM;
2640 }
2641
2642 /* set the context in the HMC */
2643 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2644 if (err) {
2645 dev_info(&vsi->back->pdev->dev,
2646 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2647 ring->queue_index, pf_q, err);
2648 return -ENOMEM;
2649 }
2650
2651 /* cache tail for quicker writes, and clear the reg before use */
2652 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2653 writel(0, ring->tail);
2654
2655 if (ring_is_ps_enabled(ring)) {
2656 i40e_alloc_rx_headers(ring);
2657 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
2658 } else {
2659 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
2660 }
2661
2662 return 0;
2663 }
2664
2665 /**
2666 * i40e_vsi_configure_tx - Configure the VSI for Tx
2667 * @vsi: VSI structure describing this set of rings and resources
2668 *
2669 * Configure the Tx VSI for operation.
2670 **/
2671 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2672 {
2673 int err = 0;
2674 u16 i;
2675
2676 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2677 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2678
2679 return err;
2680 }
2681
2682 /**
2683 * i40e_vsi_configure_rx - Configure the VSI for Rx
2684 * @vsi: the VSI being configured
2685 *
2686 * Configure the Rx VSI for operation.
2687 **/
2688 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2689 {
2690 int err = 0;
2691 u16 i;
2692
2693 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2694 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2695 + ETH_FCS_LEN + VLAN_HLEN;
2696 else
2697 vsi->max_frame = I40E_RXBUFFER_2048;
2698
2699 /* figure out correct receive buffer length */
2700 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2701 I40E_FLAG_RX_PS_ENABLED)) {
2702 case I40E_FLAG_RX_1BUF_ENABLED:
2703 vsi->rx_hdr_len = 0;
2704 vsi->rx_buf_len = vsi->max_frame;
2705 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2706 break;
2707 case I40E_FLAG_RX_PS_ENABLED:
2708 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2709 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2710 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2711 break;
2712 default:
2713 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2714 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2715 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2716 break;
2717 }
2718
2719 #ifdef I40E_FCOE
2720 /* setup rx buffer for FCoE */
2721 if ((vsi->type == I40E_VSI_FCOE) &&
2722 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2723 vsi->rx_hdr_len = 0;
2724 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2725 vsi->max_frame = I40E_RXBUFFER_3072;
2726 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2727 }
2728
2729 #endif /* I40E_FCOE */
2730 /* round up for the chip's needs */
2731 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2732 BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
2733 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2734 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
2735
2736 /* set up individual rings */
2737 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2738 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2739
2740 return err;
2741 }
2742
2743 /**
2744 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2745 * @vsi: ptr to the VSI
2746 **/
2747 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2748 {
2749 struct i40e_ring *tx_ring, *rx_ring;
2750 u16 qoffset, qcount;
2751 int i, n;
2752
2753 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2754 /* Reset the TC information */
2755 for (i = 0; i < vsi->num_queue_pairs; i++) {
2756 rx_ring = vsi->rx_rings[i];
2757 tx_ring = vsi->tx_rings[i];
2758 rx_ring->dcb_tc = 0;
2759 tx_ring->dcb_tc = 0;
2760 }
2761 }
2762
2763 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2764 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
2765 continue;
2766
2767 qoffset = vsi->tc_config.tc_info[n].qoffset;
2768 qcount = vsi->tc_config.tc_info[n].qcount;
2769 for (i = qoffset; i < (qoffset + qcount); i++) {
2770 rx_ring = vsi->rx_rings[i];
2771 tx_ring = vsi->tx_rings[i];
2772 rx_ring->dcb_tc = n;
2773 tx_ring->dcb_tc = n;
2774 }
2775 }
2776 }
2777
2778 /**
2779 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2780 * @vsi: ptr to the VSI
2781 **/
2782 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2783 {
2784 if (vsi->netdev)
2785 i40e_set_rx_mode(vsi->netdev);
2786 }
2787
2788 /**
2789 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2790 * @vsi: Pointer to the targeted VSI
2791 *
2792 * This function replays the hlist on the hw where all the SB Flow Director
2793 * filters were saved.
2794 **/
2795 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2796 {
2797 struct i40e_fdir_filter *filter;
2798 struct i40e_pf *pf = vsi->back;
2799 struct hlist_node *node;
2800
2801 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2802 return;
2803
2804 hlist_for_each_entry_safe(filter, node,
2805 &pf->fdir_filter_list, fdir_node) {
2806 i40e_add_del_fdir(vsi, filter, true);
2807 }
2808 }
2809
2810 /**
2811 * i40e_vsi_configure - Set up the VSI for action
2812 * @vsi: the VSI being configured
2813 **/
2814 static int i40e_vsi_configure(struct i40e_vsi *vsi)
2815 {
2816 int err;
2817
2818 i40e_set_vsi_rx_mode(vsi);
2819 i40e_restore_vlan(vsi);
2820 i40e_vsi_config_dcb_rings(vsi);
2821 err = i40e_vsi_configure_tx(vsi);
2822 if (!err)
2823 err = i40e_vsi_configure_rx(vsi);
2824
2825 return err;
2826 }
2827
2828 /**
2829 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2830 * @vsi: the VSI being configured
2831 **/
2832 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2833 {
2834 struct i40e_pf *pf = vsi->back;
2835 struct i40e_q_vector *q_vector;
2836 struct i40e_hw *hw = &pf->hw;
2837 u16 vector;
2838 int i, q;
2839 u32 val;
2840 u32 qp;
2841
2842 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2843 * and PFINT_LNKLSTn registers, e.g.:
2844 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2845 */
2846 qp = vsi->base_queue;
2847 vector = vsi->base_vector;
2848 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2849 q_vector = vsi->q_vectors[i];
2850 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2851 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2852 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2853 q_vector->rx.itr);
2854 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2855 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2856 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2857 q_vector->tx.itr);
2858
2859 /* Linked list for the queuepairs assigned to this vector */
2860 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2861 for (q = 0; q < q_vector->num_ringpairs; q++) {
2862 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2863 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2864 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2865 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2866 (I40E_QUEUE_TYPE_TX
2867 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2868
2869 wr32(hw, I40E_QINT_RQCTL(qp), val);
2870
2871 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2872 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2873 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2874 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2875 (I40E_QUEUE_TYPE_RX
2876 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2877
2878 /* Terminate the linked list */
2879 if (q == (q_vector->num_ringpairs - 1))
2880 val |= (I40E_QUEUE_END_OF_LIST
2881 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2882
2883 wr32(hw, I40E_QINT_TQCTL(qp), val);
2884 qp++;
2885 }
2886 }
2887
2888 i40e_flush(hw);
2889 }
2890
2891 /**
2892 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2893 * @hw: ptr to the hardware info
2894 **/
2895 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
2896 {
2897 struct i40e_hw *hw = &pf->hw;
2898 u32 val;
2899
2900 /* clear things first */
2901 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2902 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2903
2904 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2905 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2906 I40E_PFINT_ICR0_ENA_GRST_MASK |
2907 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2908 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2909 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2910 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2911 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2912
2913 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
2914 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
2915
2916 if (pf->flags & I40E_FLAG_PTP)
2917 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2918
2919 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2920
2921 /* SW_ITR_IDX = 0, but don't change INTENA */
2922 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2923 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2924
2925 /* OTHER_ITR_IDX = 0 */
2926 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2927 }
2928
2929 /**
2930 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2931 * @vsi: the VSI being configured
2932 **/
2933 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2934 {
2935 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
2936 struct i40e_pf *pf = vsi->back;
2937 struct i40e_hw *hw = &pf->hw;
2938 u32 val;
2939
2940 /* set the ITR configuration */
2941 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2942 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2943 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2944 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2945 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2946 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2947
2948 i40e_enable_misc_int_causes(pf);
2949
2950 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2951 wr32(hw, I40E_PFINT_LNKLST0, 0);
2952
2953 /* Associate the queue pair to the vector and enable the queue int */
2954 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2955 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2956 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2957
2958 wr32(hw, I40E_QINT_RQCTL(0), val);
2959
2960 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2961 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2962 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2963
2964 wr32(hw, I40E_QINT_TQCTL(0), val);
2965 i40e_flush(hw);
2966 }
2967
2968 /**
2969 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2970 * @pf: board private structure
2971 **/
2972 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2973 {
2974 struct i40e_hw *hw = &pf->hw;
2975
2976 wr32(hw, I40E_PFINT_DYN_CTL0,
2977 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2978 i40e_flush(hw);
2979 }
2980
2981 /**
2982 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2983 * @pf: board private structure
2984 **/
2985 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2986 {
2987 struct i40e_hw *hw = &pf->hw;
2988 u32 val;
2989
2990 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2991 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2992 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2993
2994 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2995 i40e_flush(hw);
2996 }
2997
2998 /**
2999 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
3000 * @vsi: pointer to a vsi
3001 * @vector: enable a particular Hw Interrupt vector
3002 **/
3003 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
3004 {
3005 struct i40e_pf *pf = vsi->back;
3006 struct i40e_hw *hw = &pf->hw;
3007 u32 val;
3008
3009 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3010 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3011 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3012 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
3013 /* skip the flush */
3014 }
3015
3016 /**
3017 * i40e_irq_dynamic_disable - Disable default interrupt generation settings
3018 * @vsi: pointer to a vsi
3019 * @vector: disable a particular Hw Interrupt vector
3020 **/
3021 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
3022 {
3023 struct i40e_pf *pf = vsi->back;
3024 struct i40e_hw *hw = &pf->hw;
3025 u32 val;
3026
3027 val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3028 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
3029 i40e_flush(hw);
3030 }
3031
3032 /**
3033 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3034 * @irq: interrupt number
3035 * @data: pointer to a q_vector
3036 **/
3037 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3038 {
3039 struct i40e_q_vector *q_vector = data;
3040
3041 if (!q_vector->tx.ring && !q_vector->rx.ring)
3042 return IRQ_HANDLED;
3043
3044 napi_schedule(&q_vector->napi);
3045
3046 return IRQ_HANDLED;
3047 }
3048
3049 /**
3050 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3051 * @vsi: the VSI being configured
3052 * @basename: name for the vector
3053 *
3054 * Allocates MSI-X vectors and requests interrupts from the kernel.
3055 **/
3056 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3057 {
3058 int q_vectors = vsi->num_q_vectors;
3059 struct i40e_pf *pf = vsi->back;
3060 int base = vsi->base_vector;
3061 int rx_int_idx = 0;
3062 int tx_int_idx = 0;
3063 int vector, err;
3064
3065 for (vector = 0; vector < q_vectors; vector++) {
3066 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3067
3068 if (q_vector->tx.ring && q_vector->rx.ring) {
3069 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3070 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3071 tx_int_idx++;
3072 } else if (q_vector->rx.ring) {
3073 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3074 "%s-%s-%d", basename, "rx", rx_int_idx++);
3075 } else if (q_vector->tx.ring) {
3076 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3077 "%s-%s-%d", basename, "tx", tx_int_idx++);
3078 } else {
3079 /* skip this unused q_vector */
3080 continue;
3081 }
3082 err = request_irq(pf->msix_entries[base + vector].vector,
3083 vsi->irq_handler,
3084 0,
3085 q_vector->name,
3086 q_vector);
3087 if (err) {
3088 dev_info(&pf->pdev->dev,
3089 "%s: request_irq failed, error: %d\n",
3090 __func__, err);
3091 goto free_queue_irqs;
3092 }
3093 /* assign the mask for this irq */
3094 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3095 &q_vector->affinity_mask);
3096 }
3097
3098 vsi->irqs_ready = true;
3099 return 0;
3100
3101 free_queue_irqs:
3102 while (vector) {
3103 vector--;
3104 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3105 NULL);
3106 free_irq(pf->msix_entries[base + vector].vector,
3107 &(vsi->q_vectors[vector]));
3108 }
3109 return err;
3110 }
3111
3112 /**
3113 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3114 * @vsi: the VSI being un-configured
3115 **/
3116 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3117 {
3118 struct i40e_pf *pf = vsi->back;
3119 struct i40e_hw *hw = &pf->hw;
3120 int base = vsi->base_vector;
3121 int i;
3122
3123 for (i = 0; i < vsi->num_queue_pairs; i++) {
3124 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3125 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3126 }
3127
3128 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3129 for (i = vsi->base_vector;
3130 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3131 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3132
3133 i40e_flush(hw);
3134 for (i = 0; i < vsi->num_q_vectors; i++)
3135 synchronize_irq(pf->msix_entries[i + base].vector);
3136 } else {
3137 /* Legacy and MSI mode - this stops all interrupt handling */
3138 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3139 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3140 i40e_flush(hw);
3141 synchronize_irq(pf->pdev->irq);
3142 }
3143 }
3144
3145 /**
3146 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3147 * @vsi: the VSI being configured
3148 **/
3149 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3150 {
3151 struct i40e_pf *pf = vsi->back;
3152 int i;
3153
3154 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3155 for (i = vsi->base_vector;
3156 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3157 i40e_irq_dynamic_enable(vsi, i);
3158 } else {
3159 i40e_irq_dynamic_enable_icr0(pf);
3160 }
3161
3162 i40e_flush(&pf->hw);
3163 return 0;
3164 }
3165
3166 /**
3167 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3168 * @pf: board private structure
3169 **/
3170 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3171 {
3172 /* Disable ICR 0 */
3173 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3174 i40e_flush(&pf->hw);
3175 }
3176
3177 /**
3178 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3179 * @irq: interrupt number
3180 * @data: pointer to a q_vector
3181 *
3182 * This is the handler used for all MSI/Legacy interrupts, and deals
3183 * with both queue and non-queue interrupts. This is also used in
3184 * MSIX mode to handle the non-queue interrupts.
3185 **/
3186 static irqreturn_t i40e_intr(int irq, void *data)
3187 {
3188 struct i40e_pf *pf = (struct i40e_pf *)data;
3189 struct i40e_hw *hw = &pf->hw;
3190 irqreturn_t ret = IRQ_NONE;
3191 u32 icr0, icr0_remaining;
3192 u32 val, ena_mask;
3193
3194 icr0 = rd32(hw, I40E_PFINT_ICR0);
3195 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3196
3197 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3198 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3199 goto enable_intr;
3200
3201 /* if interrupt but no bits showing, must be SWINT */
3202 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3203 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3204 pf->sw_int_count++;
3205
3206 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3207 (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3208 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3209 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3210 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3211 }
3212
3213 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3214 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3215
3216 /* temporarily disable queue cause for NAPI processing */
3217 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3218 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3219 wr32(hw, I40E_QINT_RQCTL(0), qval);
3220
3221 qval = rd32(hw, I40E_QINT_TQCTL(0));
3222 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3223 wr32(hw, I40E_QINT_TQCTL(0), qval);
3224
3225 if (!test_bit(__I40E_DOWN, &pf->state))
3226 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
3227 }
3228
3229 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3230 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3231 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3232 }
3233
3234 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3235 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3236 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3237 }
3238
3239 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3240 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3241 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3242 }
3243
3244 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3245 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3246 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3247 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3248 val = rd32(hw, I40E_GLGEN_RSTAT);
3249 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3250 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3251 if (val == I40E_RESET_CORER) {
3252 pf->corer_count++;
3253 } else if (val == I40E_RESET_GLOBR) {
3254 pf->globr_count++;
3255 } else if (val == I40E_RESET_EMPR) {
3256 pf->empr_count++;
3257 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3258 }
3259 }
3260
3261 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3262 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3263 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3264 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3265 rd32(hw, I40E_PFHMC_ERRORINFO),
3266 rd32(hw, I40E_PFHMC_ERRORDATA));
3267 }
3268
3269 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3270 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3271
3272 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3273 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3274 i40e_ptp_tx_hwtstamp(pf);
3275 }
3276 }
3277
3278 /* If a critical error is pending we have no choice but to reset the
3279 * device.
3280 * Report and mask out any remaining unexpected interrupts.
3281 */
3282 icr0_remaining = icr0 & ena_mask;
3283 if (icr0_remaining) {
3284 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3285 icr0_remaining);
3286 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3287 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3288 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3289 dev_info(&pf->pdev->dev, "device will be reset\n");
3290 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3291 i40e_service_event_schedule(pf);
3292 }
3293 ena_mask &= ~icr0_remaining;
3294 }
3295 ret = IRQ_HANDLED;
3296
3297 enable_intr:
3298 /* re-enable interrupt causes */
3299 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3300 if (!test_bit(__I40E_DOWN, &pf->state)) {
3301 i40e_service_event_schedule(pf);
3302 i40e_irq_dynamic_enable_icr0(pf);
3303 }
3304
3305 return ret;
3306 }
3307
3308 /**
3309 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3310 * @tx_ring: tx ring to clean
3311 * @budget: how many cleans we're allowed
3312 *
3313 * Returns true if there's any budget left (e.g. the clean is finished)
3314 **/
3315 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3316 {
3317 struct i40e_vsi *vsi = tx_ring->vsi;
3318 u16 i = tx_ring->next_to_clean;
3319 struct i40e_tx_buffer *tx_buf;
3320 struct i40e_tx_desc *tx_desc;
3321
3322 tx_buf = &tx_ring->tx_bi[i];
3323 tx_desc = I40E_TX_DESC(tx_ring, i);
3324 i -= tx_ring->count;
3325
3326 do {
3327 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3328
3329 /* if next_to_watch is not set then there is no work pending */
3330 if (!eop_desc)
3331 break;
3332
3333 /* prevent any other reads prior to eop_desc */
3334 read_barrier_depends();
3335
3336 /* if the descriptor isn't done, no work yet to do */
3337 if (!(eop_desc->cmd_type_offset_bsz &
3338 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3339 break;
3340
3341 /* clear next_to_watch to prevent false hangs */
3342 tx_buf->next_to_watch = NULL;
3343
3344 tx_desc->buffer_addr = 0;
3345 tx_desc->cmd_type_offset_bsz = 0;
3346 /* move past filter desc */
3347 tx_buf++;
3348 tx_desc++;
3349 i++;
3350 if (unlikely(!i)) {
3351 i -= tx_ring->count;
3352 tx_buf = tx_ring->tx_bi;
3353 tx_desc = I40E_TX_DESC(tx_ring, 0);
3354 }
3355 /* unmap skb header data */
3356 dma_unmap_single(tx_ring->dev,
3357 dma_unmap_addr(tx_buf, dma),
3358 dma_unmap_len(tx_buf, len),
3359 DMA_TO_DEVICE);
3360 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3361 kfree(tx_buf->raw_buf);
3362
3363 tx_buf->raw_buf = NULL;
3364 tx_buf->tx_flags = 0;
3365 tx_buf->next_to_watch = NULL;
3366 dma_unmap_len_set(tx_buf, len, 0);
3367 tx_desc->buffer_addr = 0;
3368 tx_desc->cmd_type_offset_bsz = 0;
3369
3370 /* move us past the eop_desc for start of next FD desc */
3371 tx_buf++;
3372 tx_desc++;
3373 i++;
3374 if (unlikely(!i)) {
3375 i -= tx_ring->count;
3376 tx_buf = tx_ring->tx_bi;
3377 tx_desc = I40E_TX_DESC(tx_ring, 0);
3378 }
3379
3380 /* update budget accounting */
3381 budget--;
3382 } while (likely(budget));
3383
3384 i += tx_ring->count;
3385 tx_ring->next_to_clean = i;
3386
3387 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3388 i40e_irq_dynamic_enable(vsi,
3389 tx_ring->q_vector->v_idx + vsi->base_vector);
3390 }
3391 return budget > 0;
3392 }
3393
3394 /**
3395 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3396 * @irq: interrupt number
3397 * @data: pointer to a q_vector
3398 **/
3399 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3400 {
3401 struct i40e_q_vector *q_vector = data;
3402 struct i40e_vsi *vsi;
3403
3404 if (!q_vector->tx.ring)
3405 return IRQ_HANDLED;
3406
3407 vsi = q_vector->tx.ring->vsi;
3408 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3409
3410 return IRQ_HANDLED;
3411 }
3412
3413 /**
3414 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3415 * @vsi: the VSI being configured
3416 * @v_idx: vector index
3417 * @qp_idx: queue pair index
3418 **/
3419 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3420 {
3421 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3422 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3423 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3424
3425 tx_ring->q_vector = q_vector;
3426 tx_ring->next = q_vector->tx.ring;
3427 q_vector->tx.ring = tx_ring;
3428 q_vector->tx.count++;
3429
3430 rx_ring->q_vector = q_vector;
3431 rx_ring->next = q_vector->rx.ring;
3432 q_vector->rx.ring = rx_ring;
3433 q_vector->rx.count++;
3434 }
3435
3436 /**
3437 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3438 * @vsi: the VSI being configured
3439 *
3440 * This function maps descriptor rings to the queue-specific vectors
3441 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3442 * one vector per queue pair, but on a constrained vector budget, we
3443 * group the queue pairs as "efficiently" as possible.
3444 **/
3445 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3446 {
3447 int qp_remaining = vsi->num_queue_pairs;
3448 int q_vectors = vsi->num_q_vectors;
3449 int num_ringpairs;
3450 int v_start = 0;
3451 int qp_idx = 0;
3452
3453 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3454 * group them so there are multiple queues per vector.
3455 * It is also important to go through all the vectors available to be
3456 * sure that if we don't use all the vectors, that the remaining vectors
3457 * are cleared. This is especially important when decreasing the
3458 * number of queues in use.
3459 */
3460 for (; v_start < q_vectors; v_start++) {
3461 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3462
3463 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3464
3465 q_vector->num_ringpairs = num_ringpairs;
3466
3467 q_vector->rx.count = 0;
3468 q_vector->tx.count = 0;
3469 q_vector->rx.ring = NULL;
3470 q_vector->tx.ring = NULL;
3471
3472 while (num_ringpairs--) {
3473 map_vector_to_qp(vsi, v_start, qp_idx);
3474 qp_idx++;
3475 qp_remaining--;
3476 }
3477 }
3478 }
3479
3480 /**
3481 * i40e_vsi_request_irq - Request IRQ from the OS
3482 * @vsi: the VSI being configured
3483 * @basename: name for the vector
3484 **/
3485 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3486 {
3487 struct i40e_pf *pf = vsi->back;
3488 int err;
3489
3490 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3491 err = i40e_vsi_request_irq_msix(vsi, basename);
3492 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3493 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3494 pf->int_name, pf);
3495 else
3496 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3497 pf->int_name, pf);
3498
3499 if (err)
3500 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3501
3502 return err;
3503 }
3504
3505 #ifdef CONFIG_NET_POLL_CONTROLLER
3506 /**
3507 * i40e_netpoll - A Polling 'interrupt'handler
3508 * @netdev: network interface device structure
3509 *
3510 * This is used by netconsole to send skbs without having to re-enable
3511 * interrupts. It's not called while the normal interrupt routine is executing.
3512 **/
3513 #ifdef I40E_FCOE
3514 void i40e_netpoll(struct net_device *netdev)
3515 #else
3516 static void i40e_netpoll(struct net_device *netdev)
3517 #endif
3518 {
3519 struct i40e_netdev_priv *np = netdev_priv(netdev);
3520 struct i40e_vsi *vsi = np->vsi;
3521 struct i40e_pf *pf = vsi->back;
3522 int i;
3523
3524 /* if interface is down do nothing */
3525 if (test_bit(__I40E_DOWN, &vsi->state))
3526 return;
3527
3528 pf->flags |= I40E_FLAG_IN_NETPOLL;
3529 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3530 for (i = 0; i < vsi->num_q_vectors; i++)
3531 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3532 } else {
3533 i40e_intr(pf->pdev->irq, netdev);
3534 }
3535 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3536 }
3537 #endif
3538
3539 /**
3540 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3541 * @pf: the PF being configured
3542 * @pf_q: the PF queue
3543 * @enable: enable or disable state of the queue
3544 *
3545 * This routine will wait for the given Tx queue of the PF to reach the
3546 * enabled or disabled state.
3547 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3548 * multiple retries; else will return 0 in case of success.
3549 **/
3550 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3551 {
3552 int i;
3553 u32 tx_reg;
3554
3555 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3556 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3557 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3558 break;
3559
3560 usleep_range(10, 20);
3561 }
3562 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3563 return -ETIMEDOUT;
3564
3565 return 0;
3566 }
3567
3568 /**
3569 * i40e_vsi_control_tx - Start or stop a VSI's rings
3570 * @vsi: the VSI being configured
3571 * @enable: start or stop the rings
3572 **/
3573 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3574 {
3575 struct i40e_pf *pf = vsi->back;
3576 struct i40e_hw *hw = &pf->hw;
3577 int i, j, pf_q, ret = 0;
3578 u32 tx_reg;
3579
3580 pf_q = vsi->base_queue;
3581 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3582
3583 /* warn the TX unit of coming changes */
3584 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3585 if (!enable)
3586 usleep_range(10, 20);
3587
3588 for (j = 0; j < 50; j++) {
3589 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3590 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3591 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3592 break;
3593 usleep_range(1000, 2000);
3594 }
3595 /* Skip if the queue is already in the requested state */
3596 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3597 continue;
3598
3599 /* turn on/off the queue */
3600 if (enable) {
3601 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3602 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3603 } else {
3604 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3605 }
3606
3607 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3608 /* No waiting for the Tx queue to disable */
3609 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3610 continue;
3611
3612 /* wait for the change to finish */
3613 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3614 if (ret) {
3615 dev_info(&pf->pdev->dev,
3616 "%s: VSI seid %d Tx ring %d %sable timeout\n",
3617 __func__, vsi->seid, pf_q,
3618 (enable ? "en" : "dis"));
3619 break;
3620 }
3621 }
3622
3623 if (hw->revision_id == 0)
3624 mdelay(50);
3625 return ret;
3626 }
3627
3628 /**
3629 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3630 * @pf: the PF being configured
3631 * @pf_q: the PF queue
3632 * @enable: enable or disable state of the queue
3633 *
3634 * This routine will wait for the given Rx queue of the PF to reach the
3635 * enabled or disabled state.
3636 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3637 * multiple retries; else will return 0 in case of success.
3638 **/
3639 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3640 {
3641 int i;
3642 u32 rx_reg;
3643
3644 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3645 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3646 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3647 break;
3648
3649 usleep_range(10, 20);
3650 }
3651 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3652 return -ETIMEDOUT;
3653
3654 return 0;
3655 }
3656
3657 /**
3658 * i40e_vsi_control_rx - Start or stop a VSI's rings
3659 * @vsi: the VSI being configured
3660 * @enable: start or stop the rings
3661 **/
3662 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3663 {
3664 struct i40e_pf *pf = vsi->back;
3665 struct i40e_hw *hw = &pf->hw;
3666 int i, j, pf_q, ret = 0;
3667 u32 rx_reg;
3668
3669 pf_q = vsi->base_queue;
3670 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3671 for (j = 0; j < 50; j++) {
3672 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3673 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3674 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3675 break;
3676 usleep_range(1000, 2000);
3677 }
3678
3679 /* Skip if the queue is already in the requested state */
3680 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3681 continue;
3682
3683 /* turn on/off the queue */
3684 if (enable)
3685 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3686 else
3687 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3688 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3689
3690 /* wait for the change to finish */
3691 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3692 if (ret) {
3693 dev_info(&pf->pdev->dev,
3694 "%s: VSI seid %d Rx ring %d %sable timeout\n",
3695 __func__, vsi->seid, pf_q,
3696 (enable ? "en" : "dis"));
3697 break;
3698 }
3699 }
3700
3701 return ret;
3702 }
3703
3704 /**
3705 * i40e_vsi_control_rings - Start or stop a VSI's rings
3706 * @vsi: the VSI being configured
3707 * @enable: start or stop the rings
3708 **/
3709 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3710 {
3711 int ret = 0;
3712
3713 /* do rx first for enable and last for disable */
3714 if (request) {
3715 ret = i40e_vsi_control_rx(vsi, request);
3716 if (ret)
3717 return ret;
3718 ret = i40e_vsi_control_tx(vsi, request);
3719 } else {
3720 /* Ignore return value, we need to shutdown whatever we can */
3721 i40e_vsi_control_tx(vsi, request);
3722 i40e_vsi_control_rx(vsi, request);
3723 }
3724
3725 return ret;
3726 }
3727
3728 /**
3729 * i40e_vsi_free_irq - Free the irq association with the OS
3730 * @vsi: the VSI being configured
3731 **/
3732 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3733 {
3734 struct i40e_pf *pf = vsi->back;
3735 struct i40e_hw *hw = &pf->hw;
3736 int base = vsi->base_vector;
3737 u32 val, qp;
3738 int i;
3739
3740 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3741 if (!vsi->q_vectors)
3742 return;
3743
3744 if (!vsi->irqs_ready)
3745 return;
3746
3747 vsi->irqs_ready = false;
3748 for (i = 0; i < vsi->num_q_vectors; i++) {
3749 u16 vector = i + base;
3750
3751 /* free only the irqs that were actually requested */
3752 if (!vsi->q_vectors[i] ||
3753 !vsi->q_vectors[i]->num_ringpairs)
3754 continue;
3755
3756 /* clear the affinity_mask in the IRQ descriptor */
3757 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3758 NULL);
3759 free_irq(pf->msix_entries[vector].vector,
3760 vsi->q_vectors[i]);
3761
3762 /* Tear down the interrupt queue link list
3763 *
3764 * We know that they come in pairs and always
3765 * the Rx first, then the Tx. To clear the
3766 * link list, stick the EOL value into the
3767 * next_q field of the registers.
3768 */
3769 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3770 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3771 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3772 val |= I40E_QUEUE_END_OF_LIST
3773 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3774 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3775
3776 while (qp != I40E_QUEUE_END_OF_LIST) {
3777 u32 next;
3778
3779 val = rd32(hw, I40E_QINT_RQCTL(qp));
3780
3781 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3782 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3783 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3784 I40E_QINT_RQCTL_INTEVENT_MASK);
3785
3786 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3787 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3788
3789 wr32(hw, I40E_QINT_RQCTL(qp), val);
3790
3791 val = rd32(hw, I40E_QINT_TQCTL(qp));
3792
3793 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3794 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3795
3796 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3797 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3798 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3799 I40E_QINT_TQCTL_INTEVENT_MASK);
3800
3801 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3802 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3803
3804 wr32(hw, I40E_QINT_TQCTL(qp), val);
3805 qp = next;
3806 }
3807 }
3808 } else {
3809 free_irq(pf->pdev->irq, pf);
3810
3811 val = rd32(hw, I40E_PFINT_LNKLST0);
3812 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3813 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3814 val |= I40E_QUEUE_END_OF_LIST
3815 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3816 wr32(hw, I40E_PFINT_LNKLST0, val);
3817
3818 val = rd32(hw, I40E_QINT_RQCTL(qp));
3819 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3820 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3821 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3822 I40E_QINT_RQCTL_INTEVENT_MASK);
3823
3824 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3825 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3826
3827 wr32(hw, I40E_QINT_RQCTL(qp), val);
3828
3829 val = rd32(hw, I40E_QINT_TQCTL(qp));
3830
3831 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3832 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3833 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3834 I40E_QINT_TQCTL_INTEVENT_MASK);
3835
3836 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3837 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3838
3839 wr32(hw, I40E_QINT_TQCTL(qp), val);
3840 }
3841 }
3842
3843 /**
3844 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3845 * @vsi: the VSI being configured
3846 * @v_idx: Index of vector to be freed
3847 *
3848 * This function frees the memory allocated to the q_vector. In addition if
3849 * NAPI is enabled it will delete any references to the NAPI struct prior
3850 * to freeing the q_vector.
3851 **/
3852 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3853 {
3854 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3855 struct i40e_ring *ring;
3856
3857 if (!q_vector)
3858 return;
3859
3860 /* disassociate q_vector from rings */
3861 i40e_for_each_ring(ring, q_vector->tx)
3862 ring->q_vector = NULL;
3863
3864 i40e_for_each_ring(ring, q_vector->rx)
3865 ring->q_vector = NULL;
3866
3867 /* only VSI w/ an associated netdev is set up w/ NAPI */
3868 if (vsi->netdev)
3869 netif_napi_del(&q_vector->napi);
3870
3871 vsi->q_vectors[v_idx] = NULL;
3872
3873 kfree_rcu(q_vector, rcu);
3874 }
3875
3876 /**
3877 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3878 * @vsi: the VSI being un-configured
3879 *
3880 * This frees the memory allocated to the q_vectors and
3881 * deletes references to the NAPI struct.
3882 **/
3883 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3884 {
3885 int v_idx;
3886
3887 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3888 i40e_free_q_vector(vsi, v_idx);
3889 }
3890
3891 /**
3892 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3893 * @pf: board private structure
3894 **/
3895 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3896 {
3897 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3898 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3899 pci_disable_msix(pf->pdev);
3900 kfree(pf->msix_entries);
3901 pf->msix_entries = NULL;
3902 kfree(pf->irq_pile);
3903 pf->irq_pile = NULL;
3904 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3905 pci_disable_msi(pf->pdev);
3906 }
3907 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3908 }
3909
3910 /**
3911 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3912 * @pf: board private structure
3913 *
3914 * We go through and clear interrupt specific resources and reset the structure
3915 * to pre-load conditions
3916 **/
3917 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3918 {
3919 int i;
3920
3921 i40e_stop_misc_vector(pf);
3922 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3923 synchronize_irq(pf->msix_entries[0].vector);
3924 free_irq(pf->msix_entries[0].vector, pf);
3925 }
3926
3927 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3928 for (i = 0; i < pf->num_alloc_vsi; i++)
3929 if (pf->vsi[i])
3930 i40e_vsi_free_q_vectors(pf->vsi[i]);
3931 i40e_reset_interrupt_capability(pf);
3932 }
3933
3934 /**
3935 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3936 * @vsi: the VSI being configured
3937 **/
3938 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3939 {
3940 int q_idx;
3941
3942 if (!vsi->netdev)
3943 return;
3944
3945 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3946 napi_enable(&vsi->q_vectors[q_idx]->napi);
3947 }
3948
3949 /**
3950 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3951 * @vsi: the VSI being configured
3952 **/
3953 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3954 {
3955 int q_idx;
3956
3957 if (!vsi->netdev)
3958 return;
3959
3960 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3961 napi_disable(&vsi->q_vectors[q_idx]->napi);
3962 }
3963
3964 /**
3965 * i40e_vsi_close - Shut down a VSI
3966 * @vsi: the vsi to be quelled
3967 **/
3968 static void i40e_vsi_close(struct i40e_vsi *vsi)
3969 {
3970 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3971 i40e_down(vsi);
3972 i40e_vsi_free_irq(vsi);
3973 i40e_vsi_free_tx_resources(vsi);
3974 i40e_vsi_free_rx_resources(vsi);
3975 }
3976
3977 /**
3978 * i40e_quiesce_vsi - Pause a given VSI
3979 * @vsi: the VSI being paused
3980 **/
3981 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3982 {
3983 if (test_bit(__I40E_DOWN, &vsi->state))
3984 return;
3985
3986 /* No need to disable FCoE VSI when Tx suspended */
3987 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
3988 vsi->type == I40E_VSI_FCOE) {
3989 dev_dbg(&vsi->back->pdev->dev,
3990 "%s: VSI seid %d skipping FCoE VSI disable\n",
3991 __func__, vsi->seid);
3992 return;
3993 }
3994
3995 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3996 if (vsi->netdev && netif_running(vsi->netdev)) {
3997 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3998 } else {
3999 i40e_vsi_close(vsi);
4000 }
4001 }
4002
4003 /**
4004 * i40e_unquiesce_vsi - Resume a given VSI
4005 * @vsi: the VSI being resumed
4006 **/
4007 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4008 {
4009 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4010 return;
4011
4012 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4013 if (vsi->netdev && netif_running(vsi->netdev))
4014 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4015 else
4016 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4017 }
4018
4019 /**
4020 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4021 * @pf: the PF
4022 **/
4023 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4024 {
4025 int v;
4026
4027 for (v = 0; v < pf->num_alloc_vsi; v++) {
4028 if (pf->vsi[v])
4029 i40e_quiesce_vsi(pf->vsi[v]);
4030 }
4031 }
4032
4033 /**
4034 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4035 * @pf: the PF
4036 **/
4037 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4038 {
4039 int v;
4040
4041 for (v = 0; v < pf->num_alloc_vsi; v++) {
4042 if (pf->vsi[v])
4043 i40e_unquiesce_vsi(pf->vsi[v]);
4044 }
4045 }
4046
4047 #ifdef CONFIG_I40E_DCB
4048 /**
4049 * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
4050 * @vsi: the VSI being configured
4051 *
4052 * This function waits for the given VSI's Tx queues to be disabled.
4053 **/
4054 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
4055 {
4056 struct i40e_pf *pf = vsi->back;
4057 int i, pf_q, ret;
4058
4059 pf_q = vsi->base_queue;
4060 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4061 /* Check and wait for the disable status of the queue */
4062 ret = i40e_pf_txq_wait(pf, pf_q, false);
4063 if (ret) {
4064 dev_info(&pf->pdev->dev,
4065 "%s: VSI seid %d Tx ring %d disable timeout\n",
4066 __func__, vsi->seid, pf_q);
4067 return ret;
4068 }
4069 }
4070
4071 return 0;
4072 }
4073
4074 /**
4075 * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
4076 * @pf: the PF
4077 *
4078 * This function waits for the Tx queues to be in disabled state for all the
4079 * VSIs that are managed by this PF.
4080 **/
4081 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
4082 {
4083 int v, ret = 0;
4084
4085 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4086 /* No need to wait for FCoE VSI queues */
4087 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4088 ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
4089 if (ret)
4090 break;
4091 }
4092 }
4093
4094 return ret;
4095 }
4096
4097 #endif
4098 /**
4099 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4100 * @pf: pointer to PF
4101 *
4102 * Get TC map for ISCSI PF type that will include iSCSI TC
4103 * and LAN TC.
4104 **/
4105 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4106 {
4107 struct i40e_dcb_app_priority_table app;
4108 struct i40e_hw *hw = &pf->hw;
4109 u8 enabled_tc = 1; /* TC0 is always enabled */
4110 u8 tc, i;
4111 /* Get the iSCSI APP TLV */
4112 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4113
4114 for (i = 0; i < dcbcfg->numapps; i++) {
4115 app = dcbcfg->app[i];
4116 if (app.selector == I40E_APP_SEL_TCPIP &&
4117 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4118 tc = dcbcfg->etscfg.prioritytable[app.priority];
4119 enabled_tc |= BIT_ULL(tc);
4120 break;
4121 }
4122 }
4123
4124 return enabled_tc;
4125 }
4126
4127 /**
4128 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4129 * @dcbcfg: the corresponding DCBx configuration structure
4130 *
4131 * Return the number of TCs from given DCBx configuration
4132 **/
4133 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4134 {
4135 u8 num_tc = 0;
4136 int i;
4137
4138 /* Scan the ETS Config Priority Table to find
4139 * traffic class enabled for a given priority
4140 * and use the traffic class index to get the
4141 * number of traffic classes enabled
4142 */
4143 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4144 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4145 num_tc = dcbcfg->etscfg.prioritytable[i];
4146 }
4147
4148 /* Traffic class index starts from zero so
4149 * increment to return the actual count
4150 */
4151 return num_tc + 1;
4152 }
4153
4154 /**
4155 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4156 * @dcbcfg: the corresponding DCBx configuration structure
4157 *
4158 * Query the current DCB configuration and return the number of
4159 * traffic classes enabled from the given DCBX config
4160 **/
4161 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4162 {
4163 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4164 u8 enabled_tc = 1;
4165 u8 i;
4166
4167 for (i = 0; i < num_tc; i++)
4168 enabled_tc |= BIT(i);
4169
4170 return enabled_tc;
4171 }
4172
4173 /**
4174 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4175 * @pf: PF being queried
4176 *
4177 * Return number of traffic classes enabled for the given PF
4178 **/
4179 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4180 {
4181 struct i40e_hw *hw = &pf->hw;
4182 u8 i, enabled_tc;
4183 u8 num_tc = 0;
4184 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4185
4186 /* If DCB is not enabled then always in single TC */
4187 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4188 return 1;
4189
4190 /* SFP mode will be enabled for all TCs on port */
4191 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4192 return i40e_dcb_get_num_tc(dcbcfg);
4193
4194 /* MFP mode return count of enabled TCs for this PF */
4195 if (pf->hw.func_caps.iscsi)
4196 enabled_tc = i40e_get_iscsi_tc_map(pf);
4197 else
4198 return 1; /* Only TC0 */
4199
4200 /* At least have TC0 */
4201 enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4202 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4203 if (enabled_tc & BIT_ULL(i))
4204 num_tc++;
4205 }
4206 return num_tc;
4207 }
4208
4209 /**
4210 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4211 * @pf: PF being queried
4212 *
4213 * Return a bitmap for first enabled traffic class for this PF.
4214 **/
4215 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4216 {
4217 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4218 u8 i = 0;
4219
4220 if (!enabled_tc)
4221 return 0x1; /* TC0 */
4222
4223 /* Find the first enabled TC */
4224 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4225 if (enabled_tc & BIT_ULL(i))
4226 break;
4227 }
4228
4229 return BIT(i);
4230 }
4231
4232 /**
4233 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4234 * @pf: PF being queried
4235 *
4236 * Return a bitmap for enabled traffic classes for this PF.
4237 **/
4238 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4239 {
4240 /* If DCB is not enabled for this PF then just return default TC */
4241 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4242 return i40e_pf_get_default_tc(pf);
4243
4244 /* SFP mode we want PF to be enabled for all TCs */
4245 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4246 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4247
4248 /* MFP enabled and iSCSI PF type */
4249 if (pf->hw.func_caps.iscsi)
4250 return i40e_get_iscsi_tc_map(pf);
4251 else
4252 return i40e_pf_get_default_tc(pf);
4253 }
4254
4255 /**
4256 * i40e_vsi_get_bw_info - Query VSI BW Information
4257 * @vsi: the VSI being queried
4258 *
4259 * Returns 0 on success, negative value on failure
4260 **/
4261 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4262 {
4263 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4264 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4265 struct i40e_pf *pf = vsi->back;
4266 struct i40e_hw *hw = &pf->hw;
4267 i40e_status ret;
4268 u32 tc_bw_max;
4269 int i;
4270
4271 /* Get the VSI level BW configuration */
4272 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4273 if (ret) {
4274 dev_info(&pf->pdev->dev,
4275 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4276 i40e_stat_str(&pf->hw, ret),
4277 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4278 return -EINVAL;
4279 }
4280
4281 /* Get the VSI level BW configuration per TC */
4282 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4283 NULL);
4284 if (ret) {
4285 dev_info(&pf->pdev->dev,
4286 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4287 i40e_stat_str(&pf->hw, ret),
4288 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4289 return -EINVAL;
4290 }
4291
4292 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4293 dev_info(&pf->pdev->dev,
4294 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4295 bw_config.tc_valid_bits,
4296 bw_ets_config.tc_valid_bits);
4297 /* Still continuing */
4298 }
4299
4300 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4301 vsi->bw_max_quanta = bw_config.max_bw;
4302 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4303 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4304 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4305 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4306 vsi->bw_ets_limit_credits[i] =
4307 le16_to_cpu(bw_ets_config.credits[i]);
4308 /* 3 bits out of 4 for each TC */
4309 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4310 }
4311
4312 return 0;
4313 }
4314
4315 /**
4316 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4317 * @vsi: the VSI being configured
4318 * @enabled_tc: TC bitmap
4319 * @bw_credits: BW shared credits per TC
4320 *
4321 * Returns 0 on success, negative value on failure
4322 **/
4323 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4324 u8 *bw_share)
4325 {
4326 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4327 i40e_status ret;
4328 int i;
4329
4330 bw_data.tc_valid_bits = enabled_tc;
4331 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4332 bw_data.tc_bw_credits[i] = bw_share[i];
4333
4334 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4335 NULL);
4336 if (ret) {
4337 dev_info(&vsi->back->pdev->dev,
4338 "AQ command Config VSI BW allocation per TC failed = %d\n",
4339 vsi->back->hw.aq.asq_last_status);
4340 return -EINVAL;
4341 }
4342
4343 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4344 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4345
4346 return 0;
4347 }
4348
4349 /**
4350 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4351 * @vsi: the VSI being configured
4352 * @enabled_tc: TC map to be enabled
4353 *
4354 **/
4355 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4356 {
4357 struct net_device *netdev = vsi->netdev;
4358 struct i40e_pf *pf = vsi->back;
4359 struct i40e_hw *hw = &pf->hw;
4360 u8 netdev_tc = 0;
4361 int i;
4362 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4363
4364 if (!netdev)
4365 return;
4366
4367 if (!enabled_tc) {
4368 netdev_reset_tc(netdev);
4369 return;
4370 }
4371
4372 /* Set up actual enabled TCs on the VSI */
4373 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4374 return;
4375
4376 /* set per TC queues for the VSI */
4377 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4378 /* Only set TC queues for enabled tcs
4379 *
4380 * e.g. For a VSI that has TC0 and TC3 enabled the
4381 * enabled_tc bitmap would be 0x00001001; the driver
4382 * will set the numtc for netdev as 2 that will be
4383 * referenced by the netdev layer as TC 0 and 1.
4384 */
4385 if (vsi->tc_config.enabled_tc & BIT_ULL(i))
4386 netdev_set_tc_queue(netdev,
4387 vsi->tc_config.tc_info[i].netdev_tc,
4388 vsi->tc_config.tc_info[i].qcount,
4389 vsi->tc_config.tc_info[i].qoffset);
4390 }
4391
4392 /* Assign UP2TC map for the VSI */
4393 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4394 /* Get the actual TC# for the UP */
4395 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4396 /* Get the mapped netdev TC# for the UP */
4397 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
4398 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4399 }
4400 }
4401
4402 /**
4403 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4404 * @vsi: the VSI being configured
4405 * @ctxt: the ctxt buffer returned from AQ VSI update param command
4406 **/
4407 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4408 struct i40e_vsi_context *ctxt)
4409 {
4410 /* copy just the sections touched not the entire info
4411 * since not all sections are valid as returned by
4412 * update vsi params
4413 */
4414 vsi->info.mapping_flags = ctxt->info.mapping_flags;
4415 memcpy(&vsi->info.queue_mapping,
4416 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4417 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4418 sizeof(vsi->info.tc_mapping));
4419 }
4420
4421 /**
4422 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4423 * @vsi: VSI to be configured
4424 * @enabled_tc: TC bitmap
4425 *
4426 * This configures a particular VSI for TCs that are mapped to the
4427 * given TC bitmap. It uses default bandwidth share for TCs across
4428 * VSIs to configure TC for a particular VSI.
4429 *
4430 * NOTE:
4431 * It is expected that the VSI queues have been quisced before calling
4432 * this function.
4433 **/
4434 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4435 {
4436 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4437 struct i40e_vsi_context ctxt;
4438 int ret = 0;
4439 int i;
4440
4441 /* Check if enabled_tc is same as existing or new TCs */
4442 if (vsi->tc_config.enabled_tc == enabled_tc)
4443 return ret;
4444
4445 /* Enable ETS TCs with equal BW Share for now across all VSIs */
4446 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4447 if (enabled_tc & BIT_ULL(i))
4448 bw_share[i] = 1;
4449 }
4450
4451 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4452 if (ret) {
4453 dev_info(&vsi->back->pdev->dev,
4454 "Failed configuring TC map %d for VSI %d\n",
4455 enabled_tc, vsi->seid);
4456 goto out;
4457 }
4458
4459 /* Update Queue Pairs Mapping for currently enabled UPs */
4460 ctxt.seid = vsi->seid;
4461 ctxt.pf_num = vsi->back->hw.pf_id;
4462 ctxt.vf_num = 0;
4463 ctxt.uplink_seid = vsi->uplink_seid;
4464 ctxt.info = vsi->info;
4465 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4466
4467 /* Update the VSI after updating the VSI queue-mapping information */
4468 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4469 if (ret) {
4470 dev_info(&vsi->back->pdev->dev,
4471 "Update vsi tc config failed, err %s aq_err %s\n",
4472 i40e_stat_str(&vsi->back->hw, ret),
4473 i40e_aq_str(&vsi->back->hw,
4474 vsi->back->hw.aq.asq_last_status));
4475 goto out;
4476 }
4477 /* update the local VSI info with updated queue map */
4478 i40e_vsi_update_queue_map(vsi, &ctxt);
4479 vsi->info.valid_sections = 0;
4480
4481 /* Update current VSI BW information */
4482 ret = i40e_vsi_get_bw_info(vsi);
4483 if (ret) {
4484 dev_info(&vsi->back->pdev->dev,
4485 "Failed updating vsi bw info, err %s aq_err %s\n",
4486 i40e_stat_str(&vsi->back->hw, ret),
4487 i40e_aq_str(&vsi->back->hw,
4488 vsi->back->hw.aq.asq_last_status));
4489 goto out;
4490 }
4491
4492 /* Update the netdev TC setup */
4493 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4494 out:
4495 return ret;
4496 }
4497
4498 /**
4499 * i40e_veb_config_tc - Configure TCs for given VEB
4500 * @veb: given VEB
4501 * @enabled_tc: TC bitmap
4502 *
4503 * Configures given TC bitmap for VEB (switching) element
4504 **/
4505 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4506 {
4507 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4508 struct i40e_pf *pf = veb->pf;
4509 int ret = 0;
4510 int i;
4511
4512 /* No TCs or already enabled TCs just return */
4513 if (!enabled_tc || veb->enabled_tc == enabled_tc)
4514 return ret;
4515
4516 bw_data.tc_valid_bits = enabled_tc;
4517 /* bw_data.absolute_credits is not set (relative) */
4518
4519 /* Enable ETS TCs with equal BW Share for now */
4520 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4521 if (enabled_tc & BIT_ULL(i))
4522 bw_data.tc_bw_share_credits[i] = 1;
4523 }
4524
4525 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4526 &bw_data, NULL);
4527 if (ret) {
4528 dev_info(&pf->pdev->dev,
4529 "VEB bw config failed, err %s aq_err %s\n",
4530 i40e_stat_str(&pf->hw, ret),
4531 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4532 goto out;
4533 }
4534
4535 /* Update the BW information */
4536 ret = i40e_veb_get_bw_info(veb);
4537 if (ret) {
4538 dev_info(&pf->pdev->dev,
4539 "Failed getting veb bw config, err %s aq_err %s\n",
4540 i40e_stat_str(&pf->hw, ret),
4541 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4542 }
4543
4544 out:
4545 return ret;
4546 }
4547
4548 #ifdef CONFIG_I40E_DCB
4549 /**
4550 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4551 * @pf: PF struct
4552 *
4553 * Reconfigure VEB/VSIs on a given PF; it is assumed that
4554 * the caller would've quiesce all the VSIs before calling
4555 * this function
4556 **/
4557 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4558 {
4559 u8 tc_map = 0;
4560 int ret;
4561 u8 v;
4562
4563 /* Enable the TCs available on PF to all VEBs */
4564 tc_map = i40e_pf_get_tc_map(pf);
4565 for (v = 0; v < I40E_MAX_VEB; v++) {
4566 if (!pf->veb[v])
4567 continue;
4568 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4569 if (ret) {
4570 dev_info(&pf->pdev->dev,
4571 "Failed configuring TC for VEB seid=%d\n",
4572 pf->veb[v]->seid);
4573 /* Will try to configure as many components */
4574 }
4575 }
4576
4577 /* Update each VSI */
4578 for (v = 0; v < pf->num_alloc_vsi; v++) {
4579 if (!pf->vsi[v])
4580 continue;
4581
4582 /* - Enable all TCs for the LAN VSI
4583 #ifdef I40E_FCOE
4584 * - For FCoE VSI only enable the TC configured
4585 * as per the APP TLV
4586 #endif
4587 * - For all others keep them at TC0 for now
4588 */
4589 if (v == pf->lan_vsi)
4590 tc_map = i40e_pf_get_tc_map(pf);
4591 else
4592 tc_map = i40e_pf_get_default_tc(pf);
4593 #ifdef I40E_FCOE
4594 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4595 tc_map = i40e_get_fcoe_tc_map(pf);
4596 #endif /* #ifdef I40E_FCOE */
4597
4598 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4599 if (ret) {
4600 dev_info(&pf->pdev->dev,
4601 "Failed configuring TC for VSI seid=%d\n",
4602 pf->vsi[v]->seid);
4603 /* Will try to configure as many components */
4604 } else {
4605 /* Re-configure VSI vectors based on updated TC map */
4606 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4607 if (pf->vsi[v]->netdev)
4608 i40e_dcbnl_set_all(pf->vsi[v]);
4609 }
4610 }
4611 }
4612
4613 /**
4614 * i40e_resume_port_tx - Resume port Tx
4615 * @pf: PF struct
4616 *
4617 * Resume a port's Tx and issue a PF reset in case of failure to
4618 * resume.
4619 **/
4620 static int i40e_resume_port_tx(struct i40e_pf *pf)
4621 {
4622 struct i40e_hw *hw = &pf->hw;
4623 int ret;
4624
4625 ret = i40e_aq_resume_port_tx(hw, NULL);
4626 if (ret) {
4627 dev_info(&pf->pdev->dev,
4628 "Resume Port Tx failed, err %s aq_err %s\n",
4629 i40e_stat_str(&pf->hw, ret),
4630 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4631 /* Schedule PF reset to recover */
4632 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4633 i40e_service_event_schedule(pf);
4634 }
4635
4636 return ret;
4637 }
4638
4639 /**
4640 * i40e_init_pf_dcb - Initialize DCB configuration
4641 * @pf: PF being configured
4642 *
4643 * Query the current DCB configuration and cache it
4644 * in the hardware structure
4645 **/
4646 static int i40e_init_pf_dcb(struct i40e_pf *pf)
4647 {
4648 struct i40e_hw *hw = &pf->hw;
4649 int err = 0;
4650
4651 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
4652 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
4653 (pf->hw.aq.fw_maj_ver < 4))
4654 goto out;
4655
4656 /* Get the initial DCB configuration */
4657 err = i40e_init_dcb(hw);
4658 if (!err) {
4659 /* Device/Function is not DCBX capable */
4660 if ((!hw->func_caps.dcb) ||
4661 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4662 dev_info(&pf->pdev->dev,
4663 "DCBX offload is not supported or is disabled for this PF.\n");
4664
4665 if (pf->flags & I40E_FLAG_MFP_ENABLED)
4666 goto out;
4667
4668 } else {
4669 /* When status is not DISABLED then DCBX in FW */
4670 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4671 DCB_CAP_DCBX_VER_IEEE;
4672
4673 pf->flags |= I40E_FLAG_DCB_CAPABLE;
4674 /* Enable DCB tagging only when more than one TC */
4675 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4676 pf->flags |= I40E_FLAG_DCB_ENABLED;
4677 dev_dbg(&pf->pdev->dev,
4678 "DCBX offload is supported for this PF.\n");
4679 }
4680 } else {
4681 dev_info(&pf->pdev->dev,
4682 "Query for DCB configuration failed, err %s aq_err %s\n",
4683 i40e_stat_str(&pf->hw, err),
4684 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4685 }
4686
4687 out:
4688 return err;
4689 }
4690 #endif /* CONFIG_I40E_DCB */
4691 #define SPEED_SIZE 14
4692 #define FC_SIZE 8
4693 /**
4694 * i40e_print_link_message - print link up or down
4695 * @vsi: the VSI for which link needs a message
4696 */
4697 static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4698 {
4699 char speed[SPEED_SIZE] = "Unknown";
4700 char fc[FC_SIZE] = "RX/TX";
4701
4702 if (!isup) {
4703 netdev_info(vsi->netdev, "NIC Link is Down\n");
4704 return;
4705 }
4706
4707 /* Warn user if link speed on NPAR enabled partition is not at
4708 * least 10GB
4709 */
4710 if (vsi->back->hw.func_caps.npar_enable &&
4711 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
4712 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
4713 netdev_warn(vsi->netdev,
4714 "The partition detected link speed that is less than 10Gbps\n");
4715
4716 switch (vsi->back->hw.phy.link_info.link_speed) {
4717 case I40E_LINK_SPEED_40GB:
4718 strlcpy(speed, "40 Gbps", SPEED_SIZE);
4719 break;
4720 case I40E_LINK_SPEED_20GB:
4721 strncpy(speed, "20 Gbps", SPEED_SIZE);
4722 break;
4723 case I40E_LINK_SPEED_10GB:
4724 strlcpy(speed, "10 Gbps", SPEED_SIZE);
4725 break;
4726 case I40E_LINK_SPEED_1GB:
4727 strlcpy(speed, "1000 Mbps", SPEED_SIZE);
4728 break;
4729 case I40E_LINK_SPEED_100MB:
4730 strncpy(speed, "100 Mbps", SPEED_SIZE);
4731 break;
4732 default:
4733 break;
4734 }
4735
4736 switch (vsi->back->hw.fc.current_mode) {
4737 case I40E_FC_FULL:
4738 strlcpy(fc, "RX/TX", FC_SIZE);
4739 break;
4740 case I40E_FC_TX_PAUSE:
4741 strlcpy(fc, "TX", FC_SIZE);
4742 break;
4743 case I40E_FC_RX_PAUSE:
4744 strlcpy(fc, "RX", FC_SIZE);
4745 break;
4746 default:
4747 strlcpy(fc, "None", FC_SIZE);
4748 break;
4749 }
4750
4751 netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4752 speed, fc);
4753 }
4754
4755 /**
4756 * i40e_up_complete - Finish the last steps of bringing up a connection
4757 * @vsi: the VSI being configured
4758 **/
4759 static int i40e_up_complete(struct i40e_vsi *vsi)
4760 {
4761 struct i40e_pf *pf = vsi->back;
4762 int err;
4763
4764 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4765 i40e_vsi_configure_msix(vsi);
4766 else
4767 i40e_configure_msi_and_legacy(vsi);
4768
4769 /* start rings */
4770 err = i40e_vsi_control_rings(vsi, true);
4771 if (err)
4772 return err;
4773
4774 clear_bit(__I40E_DOWN, &vsi->state);
4775 i40e_napi_enable_all(vsi);
4776 i40e_vsi_enable_irq(vsi);
4777
4778 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4779 (vsi->netdev)) {
4780 i40e_print_link_message(vsi, true);
4781 netif_tx_start_all_queues(vsi->netdev);
4782 netif_carrier_on(vsi->netdev);
4783 } else if (vsi->netdev) {
4784 i40e_print_link_message(vsi, false);
4785 /* need to check for qualified module here*/
4786 if ((pf->hw.phy.link_info.link_info &
4787 I40E_AQ_MEDIA_AVAILABLE) &&
4788 (!(pf->hw.phy.link_info.an_info &
4789 I40E_AQ_QUALIFIED_MODULE)))
4790 netdev_err(vsi->netdev,
4791 "the driver failed to link because an unqualified module was detected.");
4792 }
4793
4794 /* replay FDIR SB filters */
4795 if (vsi->type == I40E_VSI_FDIR) {
4796 /* reset fd counters */
4797 pf->fd_add_err = pf->fd_atr_cnt = 0;
4798 if (pf->fd_tcp_rule > 0) {
4799 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4800 if (I40E_DEBUG_FD & pf->hw.debug_mask)
4801 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
4802 pf->fd_tcp_rule = 0;
4803 }
4804 i40e_fdir_filter_restore(vsi);
4805 }
4806 i40e_service_event_schedule(pf);
4807
4808 return 0;
4809 }
4810
4811 /**
4812 * i40e_vsi_reinit_locked - Reset the VSI
4813 * @vsi: the VSI being configured
4814 *
4815 * Rebuild the ring structs after some configuration
4816 * has changed, e.g. MTU size.
4817 **/
4818 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4819 {
4820 struct i40e_pf *pf = vsi->back;
4821
4822 WARN_ON(in_interrupt());
4823 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4824 usleep_range(1000, 2000);
4825 i40e_down(vsi);
4826
4827 /* Give a VF some time to respond to the reset. The
4828 * two second wait is based upon the watchdog cycle in
4829 * the VF driver.
4830 */
4831 if (vsi->type == I40E_VSI_SRIOV)
4832 msleep(2000);
4833 i40e_up(vsi);
4834 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4835 }
4836
4837 /**
4838 * i40e_up - Bring the connection back up after being down
4839 * @vsi: the VSI being configured
4840 **/
4841 int i40e_up(struct i40e_vsi *vsi)
4842 {
4843 int err;
4844
4845 err = i40e_vsi_configure(vsi);
4846 if (!err)
4847 err = i40e_up_complete(vsi);
4848
4849 return err;
4850 }
4851
4852 /**
4853 * i40e_down - Shutdown the connection processing
4854 * @vsi: the VSI being stopped
4855 **/
4856 void i40e_down(struct i40e_vsi *vsi)
4857 {
4858 int i;
4859
4860 /* It is assumed that the caller of this function
4861 * sets the vsi->state __I40E_DOWN bit.
4862 */
4863 if (vsi->netdev) {
4864 netif_carrier_off(vsi->netdev);
4865 netif_tx_disable(vsi->netdev);
4866 }
4867 i40e_vsi_disable_irq(vsi);
4868 i40e_vsi_control_rings(vsi, false);
4869 i40e_napi_disable_all(vsi);
4870
4871 for (i = 0; i < vsi->num_queue_pairs; i++) {
4872 i40e_clean_tx_ring(vsi->tx_rings[i]);
4873 i40e_clean_rx_ring(vsi->rx_rings[i]);
4874 }
4875 }
4876
4877 /**
4878 * i40e_setup_tc - configure multiple traffic classes
4879 * @netdev: net device to configure
4880 * @tc: number of traffic classes to enable
4881 **/
4882 #ifdef I40E_FCOE
4883 int i40e_setup_tc(struct net_device *netdev, u8 tc)
4884 #else
4885 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
4886 #endif
4887 {
4888 struct i40e_netdev_priv *np = netdev_priv(netdev);
4889 struct i40e_vsi *vsi = np->vsi;
4890 struct i40e_pf *pf = vsi->back;
4891 u8 enabled_tc = 0;
4892 int ret = -EINVAL;
4893 int i;
4894
4895 /* Check if DCB enabled to continue */
4896 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
4897 netdev_info(netdev, "DCB is not enabled for adapter\n");
4898 goto exit;
4899 }
4900
4901 /* Check if MFP enabled */
4902 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4903 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
4904 goto exit;
4905 }
4906
4907 /* Check whether tc count is within enabled limit */
4908 if (tc > i40e_pf_get_num_tc(pf)) {
4909 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
4910 goto exit;
4911 }
4912
4913 /* Generate TC map for number of tc requested */
4914 for (i = 0; i < tc; i++)
4915 enabled_tc |= BIT_ULL(i);
4916
4917 /* Requesting same TC configuration as already enabled */
4918 if (enabled_tc == vsi->tc_config.enabled_tc)
4919 return 0;
4920
4921 /* Quiesce VSI queues */
4922 i40e_quiesce_vsi(vsi);
4923
4924 /* Configure VSI for enabled TCs */
4925 ret = i40e_vsi_config_tc(vsi, enabled_tc);
4926 if (ret) {
4927 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
4928 vsi->seid);
4929 goto exit;
4930 }
4931
4932 /* Unquiesce VSI */
4933 i40e_unquiesce_vsi(vsi);
4934
4935 exit:
4936 return ret;
4937 }
4938
4939 /**
4940 * i40e_open - Called when a network interface is made active
4941 * @netdev: network interface device structure
4942 *
4943 * The open entry point is called when a network interface is made
4944 * active by the system (IFF_UP). At this point all resources needed
4945 * for transmit and receive operations are allocated, the interrupt
4946 * handler is registered with the OS, the netdev watchdog subtask is
4947 * enabled, and the stack is notified that the interface is ready.
4948 *
4949 * Returns 0 on success, negative value on failure
4950 **/
4951 int i40e_open(struct net_device *netdev)
4952 {
4953 struct i40e_netdev_priv *np = netdev_priv(netdev);
4954 struct i40e_vsi *vsi = np->vsi;
4955 struct i40e_pf *pf = vsi->back;
4956 int err;
4957
4958 /* disallow open during test or if eeprom is broken */
4959 if (test_bit(__I40E_TESTING, &pf->state) ||
4960 test_bit(__I40E_BAD_EEPROM, &pf->state))
4961 return -EBUSY;
4962
4963 netif_carrier_off(netdev);
4964
4965 err = i40e_vsi_open(vsi);
4966 if (err)
4967 return err;
4968
4969 /* configure global TSO hardware offload settings */
4970 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4971 TCP_FLAG_FIN) >> 16);
4972 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4973 TCP_FLAG_FIN |
4974 TCP_FLAG_CWR) >> 16);
4975 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4976
4977 #ifdef CONFIG_I40E_VXLAN
4978 vxlan_get_rx_port(netdev);
4979 #endif
4980
4981 return 0;
4982 }
4983
4984 /**
4985 * i40e_vsi_open -
4986 * @vsi: the VSI to open
4987 *
4988 * Finish initialization of the VSI.
4989 *
4990 * Returns 0 on success, negative value on failure
4991 **/
4992 int i40e_vsi_open(struct i40e_vsi *vsi)
4993 {
4994 struct i40e_pf *pf = vsi->back;
4995 char int_name[I40E_INT_NAME_STR_LEN];
4996 int err;
4997
4998 /* allocate descriptors */
4999 err = i40e_vsi_setup_tx_resources(vsi);
5000 if (err)
5001 goto err_setup_tx;
5002 err = i40e_vsi_setup_rx_resources(vsi);
5003 if (err)
5004 goto err_setup_rx;
5005
5006 err = i40e_vsi_configure(vsi);
5007 if (err)
5008 goto err_setup_rx;
5009
5010 if (vsi->netdev) {
5011 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5012 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5013 err = i40e_vsi_request_irq(vsi, int_name);
5014 if (err)
5015 goto err_setup_rx;
5016
5017 /* Notify the stack of the actual queue counts. */
5018 err = netif_set_real_num_tx_queues(vsi->netdev,
5019 vsi->num_queue_pairs);
5020 if (err)
5021 goto err_set_queues;
5022
5023 err = netif_set_real_num_rx_queues(vsi->netdev,
5024 vsi->num_queue_pairs);
5025 if (err)
5026 goto err_set_queues;
5027
5028 } else if (vsi->type == I40E_VSI_FDIR) {
5029 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5030 dev_driver_string(&pf->pdev->dev),
5031 dev_name(&pf->pdev->dev));
5032 err = i40e_vsi_request_irq(vsi, int_name);
5033
5034 } else {
5035 err = -EINVAL;
5036 goto err_setup_rx;
5037 }
5038
5039 err = i40e_up_complete(vsi);
5040 if (err)
5041 goto err_up_complete;
5042
5043 return 0;
5044
5045 err_up_complete:
5046 i40e_down(vsi);
5047 err_set_queues:
5048 i40e_vsi_free_irq(vsi);
5049 err_setup_rx:
5050 i40e_vsi_free_rx_resources(vsi);
5051 err_setup_tx:
5052 i40e_vsi_free_tx_resources(vsi);
5053 if (vsi == pf->vsi[pf->lan_vsi])
5054 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
5055
5056 return err;
5057 }
5058
5059 /**
5060 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5061 * @pf: Pointer to PF
5062 *
5063 * This function destroys the hlist where all the Flow Director
5064 * filters were saved.
5065 **/
5066 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5067 {
5068 struct i40e_fdir_filter *filter;
5069 struct hlist_node *node2;
5070
5071 hlist_for_each_entry_safe(filter, node2,
5072 &pf->fdir_filter_list, fdir_node) {
5073 hlist_del(&filter->fdir_node);
5074 kfree(filter);
5075 }
5076 pf->fdir_pf_active_filters = 0;
5077 }
5078
5079 /**
5080 * i40e_close - Disables a network interface
5081 * @netdev: network interface device structure
5082 *
5083 * The close entry point is called when an interface is de-activated
5084 * by the OS. The hardware is still under the driver's control, but
5085 * this netdev interface is disabled.
5086 *
5087 * Returns 0, this is not allowed to fail
5088 **/
5089 #ifdef I40E_FCOE
5090 int i40e_close(struct net_device *netdev)
5091 #else
5092 static int i40e_close(struct net_device *netdev)
5093 #endif
5094 {
5095 struct i40e_netdev_priv *np = netdev_priv(netdev);
5096 struct i40e_vsi *vsi = np->vsi;
5097
5098 i40e_vsi_close(vsi);
5099
5100 return 0;
5101 }
5102
5103 /**
5104 * i40e_do_reset - Start a PF or Core Reset sequence
5105 * @pf: board private structure
5106 * @reset_flags: which reset is requested
5107 *
5108 * The essential difference in resets is that the PF Reset
5109 * doesn't clear the packet buffers, doesn't reset the PE
5110 * firmware, and doesn't bother the other PFs on the chip.
5111 **/
5112 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5113 {
5114 u32 val;
5115
5116 WARN_ON(in_interrupt());
5117
5118 if (i40e_check_asq_alive(&pf->hw))
5119 i40e_vc_notify_reset(pf);
5120
5121 /* do the biggest reset indicated */
5122 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5123
5124 /* Request a Global Reset
5125 *
5126 * This will start the chip's countdown to the actual full
5127 * chip reset event, and a warning interrupt to be sent
5128 * to all PFs, including the requestor. Our handler
5129 * for the warning interrupt will deal with the shutdown
5130 * and recovery of the switch setup.
5131 */
5132 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5133 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5134 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5135 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5136
5137 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5138
5139 /* Request a Core Reset
5140 *
5141 * Same as Global Reset, except does *not* include the MAC/PHY
5142 */
5143 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5144 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5145 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5146 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5147 i40e_flush(&pf->hw);
5148
5149 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5150
5151 /* Request a PF Reset
5152 *
5153 * Resets only the PF-specific registers
5154 *
5155 * This goes directly to the tear-down and rebuild of
5156 * the switch, since we need to do all the recovery as
5157 * for the Core Reset.
5158 */
5159 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5160 i40e_handle_reset_warning(pf);
5161
5162 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5163 int v;
5164
5165 /* Find the VSI(s) that requested a re-init */
5166 dev_info(&pf->pdev->dev,
5167 "VSI reinit requested\n");
5168 for (v = 0; v < pf->num_alloc_vsi; v++) {
5169 struct i40e_vsi *vsi = pf->vsi[v];
5170 if (vsi != NULL &&
5171 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5172 i40e_vsi_reinit_locked(pf->vsi[v]);
5173 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5174 }
5175 }
5176
5177 /* no further action needed, so return now */
5178 return;
5179 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5180 int v;
5181
5182 /* Find the VSI(s) that needs to be brought down */
5183 dev_info(&pf->pdev->dev, "VSI down requested\n");
5184 for (v = 0; v < pf->num_alloc_vsi; v++) {
5185 struct i40e_vsi *vsi = pf->vsi[v];
5186 if (vsi != NULL &&
5187 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5188 set_bit(__I40E_DOWN, &vsi->state);
5189 i40e_down(vsi);
5190 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5191 }
5192 }
5193
5194 /* no further action needed, so return now */
5195 return;
5196 } else {
5197 dev_info(&pf->pdev->dev,
5198 "bad reset request 0x%08x\n", reset_flags);
5199 return;
5200 }
5201 }
5202
5203 #ifdef CONFIG_I40E_DCB
5204 /**
5205 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5206 * @pf: board private structure
5207 * @old_cfg: current DCB config
5208 * @new_cfg: new DCB config
5209 **/
5210 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5211 struct i40e_dcbx_config *old_cfg,
5212 struct i40e_dcbx_config *new_cfg)
5213 {
5214 bool need_reconfig = false;
5215
5216 /* Check if ETS configuration has changed */
5217 if (memcmp(&new_cfg->etscfg,
5218 &old_cfg->etscfg,
5219 sizeof(new_cfg->etscfg))) {
5220 /* If Priority Table has changed reconfig is needed */
5221 if (memcmp(&new_cfg->etscfg.prioritytable,
5222 &old_cfg->etscfg.prioritytable,
5223 sizeof(new_cfg->etscfg.prioritytable))) {
5224 need_reconfig = true;
5225 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5226 }
5227
5228 if (memcmp(&new_cfg->etscfg.tcbwtable,
5229 &old_cfg->etscfg.tcbwtable,
5230 sizeof(new_cfg->etscfg.tcbwtable)))
5231 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5232
5233 if (memcmp(&new_cfg->etscfg.tsatable,
5234 &old_cfg->etscfg.tsatable,
5235 sizeof(new_cfg->etscfg.tsatable)))
5236 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5237 }
5238
5239 /* Check if PFC configuration has changed */
5240 if (memcmp(&new_cfg->pfc,
5241 &old_cfg->pfc,
5242 sizeof(new_cfg->pfc))) {
5243 need_reconfig = true;
5244 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5245 }
5246
5247 /* Check if APP Table has changed */
5248 if (memcmp(&new_cfg->app,
5249 &old_cfg->app,
5250 sizeof(new_cfg->app))) {
5251 need_reconfig = true;
5252 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5253 }
5254
5255 dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
5256 need_reconfig);
5257 return need_reconfig;
5258 }
5259
5260 /**
5261 * i40e_handle_lldp_event - Handle LLDP Change MIB event
5262 * @pf: board private structure
5263 * @e: event info posted on ARQ
5264 **/
5265 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5266 struct i40e_arq_event_info *e)
5267 {
5268 struct i40e_aqc_lldp_get_mib *mib =
5269 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5270 struct i40e_hw *hw = &pf->hw;
5271 struct i40e_dcbx_config tmp_dcbx_cfg;
5272 bool need_reconfig = false;
5273 int ret = 0;
5274 u8 type;
5275
5276 /* Not DCB capable or capability disabled */
5277 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5278 return ret;
5279
5280 /* Ignore if event is not for Nearest Bridge */
5281 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5282 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5283 dev_dbg(&pf->pdev->dev,
5284 "%s: LLDP event mib bridge type 0x%x\n", __func__, type);
5285 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5286 return ret;
5287
5288 /* Check MIB Type and return if event for Remote MIB update */
5289 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5290 dev_dbg(&pf->pdev->dev,
5291 "%s: LLDP event mib type %s\n", __func__,
5292 type ? "remote" : "local");
5293 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5294 /* Update the remote cached instance and return */
5295 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5296 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5297 &hw->remote_dcbx_config);
5298 goto exit;
5299 }
5300
5301 /* Store the old configuration */
5302 tmp_dcbx_cfg = hw->local_dcbx_config;
5303
5304 /* Reset the old DCBx configuration data */
5305 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5306 /* Get updated DCBX data from firmware */
5307 ret = i40e_get_dcb_config(&pf->hw);
5308 if (ret) {
5309 dev_info(&pf->pdev->dev,
5310 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5311 i40e_stat_str(&pf->hw, ret),
5312 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5313 goto exit;
5314 }
5315
5316 /* No change detected in DCBX configs */
5317 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5318 sizeof(tmp_dcbx_cfg))) {
5319 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5320 goto exit;
5321 }
5322
5323 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5324 &hw->local_dcbx_config);
5325
5326 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5327
5328 if (!need_reconfig)
5329 goto exit;
5330
5331 /* Enable DCB tagging only when more than one TC */
5332 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5333 pf->flags |= I40E_FLAG_DCB_ENABLED;
5334 else
5335 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5336
5337 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5338 /* Reconfiguration needed quiesce all VSIs */
5339 i40e_pf_quiesce_all_vsi(pf);
5340
5341 /* Changes in configuration update VEB/VSI */
5342 i40e_dcb_reconfigure(pf);
5343
5344 ret = i40e_resume_port_tx(pf);
5345
5346 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5347 /* In case of error no point in resuming VSIs */
5348 if (ret)
5349 goto exit;
5350
5351 /* Wait for the PF's Tx queues to be disabled */
5352 ret = i40e_pf_wait_txq_disabled(pf);
5353 if (ret) {
5354 /* Schedule PF reset to recover */
5355 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5356 i40e_service_event_schedule(pf);
5357 } else {
5358 i40e_pf_unquiesce_all_vsi(pf);
5359 }
5360
5361 exit:
5362 return ret;
5363 }
5364 #endif /* CONFIG_I40E_DCB */
5365
5366 /**
5367 * i40e_do_reset_safe - Protected reset path for userland calls.
5368 * @pf: board private structure
5369 * @reset_flags: which reset is requested
5370 *
5371 **/
5372 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5373 {
5374 rtnl_lock();
5375 i40e_do_reset(pf, reset_flags);
5376 rtnl_unlock();
5377 }
5378
5379 /**
5380 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5381 * @pf: board private structure
5382 * @e: event info posted on ARQ
5383 *
5384 * Handler for LAN Queue Overflow Event generated by the firmware for PF
5385 * and VF queues
5386 **/
5387 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5388 struct i40e_arq_event_info *e)
5389 {
5390 struct i40e_aqc_lan_overflow *data =
5391 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5392 u32 queue = le32_to_cpu(data->prtdcb_rupto);
5393 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5394 struct i40e_hw *hw = &pf->hw;
5395 struct i40e_vf *vf;
5396 u16 vf_id;
5397
5398 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5399 queue, qtx_ctl);
5400
5401 /* Queue belongs to VF, find the VF and issue VF reset */
5402 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5403 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5404 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5405 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5406 vf_id -= hw->func_caps.vf_base_id;
5407 vf = &pf->vf[vf_id];
5408 i40e_vc_notify_vf_reset(vf);
5409 /* Allow VF to process pending reset notification */
5410 msleep(20);
5411 i40e_reset_vf(vf, false);
5412 }
5413 }
5414
5415 /**
5416 * i40e_service_event_complete - Finish up the service event
5417 * @pf: board private structure
5418 **/
5419 static void i40e_service_event_complete(struct i40e_pf *pf)
5420 {
5421 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5422
5423 /* flush memory to make sure state is correct before next watchog */
5424 smp_mb__before_atomic();
5425 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5426 }
5427
5428 /**
5429 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5430 * @pf: board private structure
5431 **/
5432 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5433 {
5434 u32 val, fcnt_prog;
5435
5436 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5437 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5438 return fcnt_prog;
5439 }
5440
5441 /**
5442 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5443 * @pf: board private structure
5444 **/
5445 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5446 {
5447 u32 val, fcnt_prog;
5448
5449 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5450 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5451 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5452 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5453 return fcnt_prog;
5454 }
5455
5456 /**
5457 * i40e_get_global_fd_count - Get total FD filters programmed on device
5458 * @pf: board private structure
5459 **/
5460 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5461 {
5462 u32 val, fcnt_prog;
5463
5464 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5465 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5466 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5467 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5468 return fcnt_prog;
5469 }
5470
5471 /**
5472 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5473 * @pf: board private structure
5474 **/
5475 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5476 {
5477 u32 fcnt_prog, fcnt_avail;
5478
5479 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5480 return;
5481
5482 /* Check if, FD SB or ATR was auto disabled and if there is enough room
5483 * to re-enable
5484 */
5485 fcnt_prog = i40e_get_global_fd_count(pf);
5486 fcnt_avail = pf->fdir_pf_filter_count;
5487 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5488 (pf->fd_add_err == 0) ||
5489 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5490 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5491 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5492 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5493 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5494 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5495 }
5496 }
5497 /* Wait for some more space to be available to turn on ATR */
5498 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5499 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5500 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5501 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5502 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5503 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5504 }
5505 }
5506 }
5507
5508 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5509 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5510 /**
5511 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5512 * @pf: board private structure
5513 **/
5514 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5515 {
5516 unsigned long min_flush_time;
5517 int flush_wait_retry = 50;
5518 bool disable_atr = false;
5519 int fd_room;
5520 int reg;
5521
5522 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5523 return;
5524
5525 if (time_after(jiffies, pf->fd_flush_timestamp +
5526 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
5527 /* If the flush is happening too quick and we have mostly
5528 * SB rules we should not re-enable ATR for some time.
5529 */
5530 min_flush_time = pf->fd_flush_timestamp
5531 + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5532 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5533
5534 if (!(time_after(jiffies, min_flush_time)) &&
5535 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5536 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5537 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5538 disable_atr = true;
5539 }
5540
5541 pf->fd_flush_timestamp = jiffies;
5542 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5543 /* flush all filters */
5544 wr32(&pf->hw, I40E_PFQF_CTL_1,
5545 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5546 i40e_flush(&pf->hw);
5547 pf->fd_flush_cnt++;
5548 pf->fd_add_err = 0;
5549 do {
5550 /* Check FD flush status every 5-6msec */
5551 usleep_range(5000, 6000);
5552 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5553 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5554 break;
5555 } while (flush_wait_retry--);
5556 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5557 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5558 } else {
5559 /* replay sideband filters */
5560 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5561 if (!disable_atr)
5562 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5563 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5564 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5565 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5566 }
5567 }
5568 }
5569
5570 /**
5571 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5572 * @pf: board private structure
5573 **/
5574 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
5575 {
5576 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5577 }
5578
5579 /* We can see up to 256 filter programming desc in transit if the filters are
5580 * being applied really fast; before we see the first
5581 * filter miss error on Rx queue 0. Accumulating enough error messages before
5582 * reacting will make sure we don't cause flush too often.
5583 */
5584 #define I40E_MAX_FD_PROGRAM_ERROR 256
5585
5586 /**
5587 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5588 * @pf: board private structure
5589 **/
5590 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5591 {
5592
5593 /* if interface is down do nothing */
5594 if (test_bit(__I40E_DOWN, &pf->state))
5595 return;
5596
5597 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5598 return;
5599
5600 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5601 i40e_fdir_flush_and_replay(pf);
5602
5603 i40e_fdir_check_and_reenable(pf);
5604
5605 }
5606
5607 /**
5608 * i40e_vsi_link_event - notify VSI of a link event
5609 * @vsi: vsi to be notified
5610 * @link_up: link up or down
5611 **/
5612 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5613 {
5614 if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
5615 return;
5616
5617 switch (vsi->type) {
5618 case I40E_VSI_MAIN:
5619 #ifdef I40E_FCOE
5620 case I40E_VSI_FCOE:
5621 #endif
5622 if (!vsi->netdev || !vsi->netdev_registered)
5623 break;
5624
5625 if (link_up) {
5626 netif_carrier_on(vsi->netdev);
5627 netif_tx_wake_all_queues(vsi->netdev);
5628 } else {
5629 netif_carrier_off(vsi->netdev);
5630 netif_tx_stop_all_queues(vsi->netdev);
5631 }
5632 break;
5633
5634 case I40E_VSI_SRIOV:
5635 case I40E_VSI_VMDQ2:
5636 case I40E_VSI_CTRL:
5637 case I40E_VSI_MIRROR:
5638 default:
5639 /* there is no notification for other VSIs */
5640 break;
5641 }
5642 }
5643
5644 /**
5645 * i40e_veb_link_event - notify elements on the veb of a link event
5646 * @veb: veb to be notified
5647 * @link_up: link up or down
5648 **/
5649 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
5650 {
5651 struct i40e_pf *pf;
5652 int i;
5653
5654 if (!veb || !veb->pf)
5655 return;
5656 pf = veb->pf;
5657
5658 /* depth first... */
5659 for (i = 0; i < I40E_MAX_VEB; i++)
5660 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
5661 i40e_veb_link_event(pf->veb[i], link_up);
5662
5663 /* ... now the local VSIs */
5664 for (i = 0; i < pf->num_alloc_vsi; i++)
5665 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
5666 i40e_vsi_link_event(pf->vsi[i], link_up);
5667 }
5668
5669 /**
5670 * i40e_link_event - Update netif_carrier status
5671 * @pf: board private structure
5672 **/
5673 static void i40e_link_event(struct i40e_pf *pf)
5674 {
5675 bool new_link, old_link;
5676 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5677 u8 new_link_speed, old_link_speed;
5678
5679 /* set this to force the get_link_status call to refresh state */
5680 pf->hw.phy.get_link_info = true;
5681
5682 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
5683 new_link = i40e_get_link_status(&pf->hw);
5684 old_link_speed = pf->hw.phy.link_info_old.link_speed;
5685 new_link_speed = pf->hw.phy.link_info.link_speed;
5686
5687 if (new_link == old_link &&
5688 new_link_speed == old_link_speed &&
5689 (test_bit(__I40E_DOWN, &vsi->state) ||
5690 new_link == netif_carrier_ok(vsi->netdev)))
5691 return;
5692
5693 if (!test_bit(__I40E_DOWN, &vsi->state))
5694 i40e_print_link_message(vsi, new_link);
5695
5696 /* Notify the base of the switch tree connected to
5697 * the link. Floating VEBs are not notified.
5698 */
5699 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
5700 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
5701 else
5702 i40e_vsi_link_event(vsi, new_link);
5703
5704 if (pf->vf)
5705 i40e_vc_notify_link_state(pf);
5706
5707 if (pf->flags & I40E_FLAG_PTP)
5708 i40e_ptp_set_increment(pf);
5709 }
5710
5711 /**
5712 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
5713 * @pf: board private structure
5714 *
5715 * Set the per-queue flags to request a check for stuck queues in the irq
5716 * clean functions, then force interrupts to be sure the irq clean is called.
5717 **/
5718 static void i40e_check_hang_subtask(struct i40e_pf *pf)
5719 {
5720 int i, v;
5721
5722 /* If we're down or resetting, just bail */
5723 if (test_bit(__I40E_DOWN, &pf->state) ||
5724 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5725 return;
5726
5727 /* for each VSI/netdev
5728 * for each Tx queue
5729 * set the check flag
5730 * for each q_vector
5731 * force an interrupt
5732 */
5733 for (v = 0; v < pf->num_alloc_vsi; v++) {
5734 struct i40e_vsi *vsi = pf->vsi[v];
5735 int armed = 0;
5736
5737 if (!pf->vsi[v] ||
5738 test_bit(__I40E_DOWN, &vsi->state) ||
5739 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
5740 continue;
5741
5742 for (i = 0; i < vsi->num_queue_pairs; i++) {
5743 set_check_for_tx_hang(vsi->tx_rings[i]);
5744 if (test_bit(__I40E_HANG_CHECK_ARMED,
5745 &vsi->tx_rings[i]->state))
5746 armed++;
5747 }
5748
5749 if (armed) {
5750 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
5751 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
5752 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
5753 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
5754 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
5755 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
5756 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
5757 } else {
5758 u16 vec = vsi->base_vector - 1;
5759 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
5760 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
5761 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
5762 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
5763 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
5764 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
5765 wr32(&vsi->back->hw,
5766 I40E_PFINT_DYN_CTLN(vec), val);
5767 }
5768 i40e_flush(&vsi->back->hw);
5769 }
5770 }
5771 }
5772
5773 /**
5774 * i40e_watchdog_subtask - periodic checks not using event driven response
5775 * @pf: board private structure
5776 **/
5777 static void i40e_watchdog_subtask(struct i40e_pf *pf)
5778 {
5779 int i;
5780
5781 /* if interface is down do nothing */
5782 if (test_bit(__I40E_DOWN, &pf->state) ||
5783 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5784 return;
5785
5786 /* make sure we don't do these things too often */
5787 if (time_before(jiffies, (pf->service_timer_previous +
5788 pf->service_timer_period)))
5789 return;
5790 pf->service_timer_previous = jiffies;
5791
5792 i40e_check_hang_subtask(pf);
5793 i40e_link_event(pf);
5794
5795 /* Update the stats for active netdevs so the network stack
5796 * can look at updated numbers whenever it cares to
5797 */
5798 for (i = 0; i < pf->num_alloc_vsi; i++)
5799 if (pf->vsi[i] && pf->vsi[i]->netdev)
5800 i40e_update_stats(pf->vsi[i]);
5801
5802 /* Update the stats for the active switching components */
5803 for (i = 0; i < I40E_MAX_VEB; i++)
5804 if (pf->veb[i])
5805 i40e_update_veb_stats(pf->veb[i]);
5806
5807 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
5808 }
5809
5810 /**
5811 * i40e_reset_subtask - Set up for resetting the device and driver
5812 * @pf: board private structure
5813 **/
5814 static void i40e_reset_subtask(struct i40e_pf *pf)
5815 {
5816 u32 reset_flags = 0;
5817
5818 rtnl_lock();
5819 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
5820 reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
5821 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
5822 }
5823 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
5824 reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
5825 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5826 }
5827 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
5828 reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
5829 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
5830 }
5831 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
5832 reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
5833 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
5834 }
5835 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
5836 reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
5837 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
5838 }
5839
5840 /* If there's a recovery already waiting, it takes
5841 * precedence before starting a new reset sequence.
5842 */
5843 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
5844 i40e_handle_reset_warning(pf);
5845 goto unlock;
5846 }
5847
5848 /* If we're already down or resetting, just bail */
5849 if (reset_flags &&
5850 !test_bit(__I40E_DOWN, &pf->state) &&
5851 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
5852 i40e_do_reset(pf, reset_flags);
5853
5854 unlock:
5855 rtnl_unlock();
5856 }
5857
5858 /**
5859 * i40e_handle_link_event - Handle link event
5860 * @pf: board private structure
5861 * @e: event info posted on ARQ
5862 **/
5863 static void i40e_handle_link_event(struct i40e_pf *pf,
5864 struct i40e_arq_event_info *e)
5865 {
5866 struct i40e_hw *hw = &pf->hw;
5867 struct i40e_aqc_get_link_status *status =
5868 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
5869
5870 /* save off old link status information */
5871 hw->phy.link_info_old = hw->phy.link_info;
5872
5873 /* Do a new status request to re-enable LSE reporting
5874 * and load new status information into the hw struct
5875 * This completely ignores any state information
5876 * in the ARQ event info, instead choosing to always
5877 * issue the AQ update link status command.
5878 */
5879 i40e_link_event(pf);
5880
5881 /* check for unqualified module, if link is down */
5882 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
5883 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
5884 (!(status->link_info & I40E_AQ_LINK_UP)))
5885 dev_err(&pf->pdev->dev,
5886 "The driver failed to link because an unqualified module was detected.\n");
5887 }
5888
5889 /**
5890 * i40e_clean_adminq_subtask - Clean the AdminQ rings
5891 * @pf: board private structure
5892 **/
5893 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5894 {
5895 struct i40e_arq_event_info event;
5896 struct i40e_hw *hw = &pf->hw;
5897 u16 pending, i = 0;
5898 i40e_status ret;
5899 u16 opcode;
5900 u32 oldval;
5901 u32 val;
5902
5903 /* Do not run clean AQ when PF reset fails */
5904 if (test_bit(__I40E_RESET_FAILED, &pf->state))
5905 return;
5906
5907 /* check for error indications */
5908 val = rd32(&pf->hw, pf->hw.aq.arq.len);
5909 oldval = val;
5910 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5911 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5912 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5913 }
5914 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5915 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5916 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5917 }
5918 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5919 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5920 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5921 }
5922 if (oldval != val)
5923 wr32(&pf->hw, pf->hw.aq.arq.len, val);
5924
5925 val = rd32(&pf->hw, pf->hw.aq.asq.len);
5926 oldval = val;
5927 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5928 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5929 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5930 }
5931 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5932 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5933 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5934 }
5935 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5936 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5937 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5938 }
5939 if (oldval != val)
5940 wr32(&pf->hw, pf->hw.aq.asq.len, val);
5941
5942 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
5943 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
5944 if (!event.msg_buf)
5945 return;
5946
5947 do {
5948 ret = i40e_clean_arq_element(hw, &event, &pending);
5949 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
5950 break;
5951 else if (ret) {
5952 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
5953 break;
5954 }
5955
5956 opcode = le16_to_cpu(event.desc.opcode);
5957 switch (opcode) {
5958
5959 case i40e_aqc_opc_get_link_status:
5960 i40e_handle_link_event(pf, &event);
5961 break;
5962 case i40e_aqc_opc_send_msg_to_pf:
5963 ret = i40e_vc_process_vf_msg(pf,
5964 le16_to_cpu(event.desc.retval),
5965 le32_to_cpu(event.desc.cookie_high),
5966 le32_to_cpu(event.desc.cookie_low),
5967 event.msg_buf,
5968 event.msg_len);
5969 break;
5970 case i40e_aqc_opc_lldp_update_mib:
5971 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
5972 #ifdef CONFIG_I40E_DCB
5973 rtnl_lock();
5974 ret = i40e_handle_lldp_event(pf, &event);
5975 rtnl_unlock();
5976 #endif /* CONFIG_I40E_DCB */
5977 break;
5978 case i40e_aqc_opc_event_lan_overflow:
5979 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
5980 i40e_handle_lan_overflow_event(pf, &event);
5981 break;
5982 case i40e_aqc_opc_send_msg_to_peer:
5983 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
5984 break;
5985 case i40e_aqc_opc_nvm_erase:
5986 case i40e_aqc_opc_nvm_update:
5987 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
5988 break;
5989 default:
5990 dev_info(&pf->pdev->dev,
5991 "ARQ Error: Unknown event 0x%04x received\n",
5992 opcode);
5993 break;
5994 }
5995 } while (pending && (i++ < pf->adminq_work_limit));
5996
5997 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
5998 /* re-enable Admin queue interrupt cause */
5999 val = rd32(hw, I40E_PFINT_ICR0_ENA);
6000 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6001 wr32(hw, I40E_PFINT_ICR0_ENA, val);
6002 i40e_flush(hw);
6003
6004 kfree(event.msg_buf);
6005 }
6006
6007 /**
6008 * i40e_verify_eeprom - make sure eeprom is good to use
6009 * @pf: board private structure
6010 **/
6011 static void i40e_verify_eeprom(struct i40e_pf *pf)
6012 {
6013 int err;
6014
6015 err = i40e_diag_eeprom_test(&pf->hw);
6016 if (err) {
6017 /* retry in case of garbage read */
6018 err = i40e_diag_eeprom_test(&pf->hw);
6019 if (err) {
6020 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6021 err);
6022 set_bit(__I40E_BAD_EEPROM, &pf->state);
6023 }
6024 }
6025
6026 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6027 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6028 clear_bit(__I40E_BAD_EEPROM, &pf->state);
6029 }
6030 }
6031
6032 /**
6033 * i40e_enable_pf_switch_lb
6034 * @pf: pointer to the PF structure
6035 *
6036 * enable switch loop back or die - no point in a return value
6037 **/
6038 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6039 {
6040 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6041 struct i40e_vsi_context ctxt;
6042 int ret;
6043
6044 ctxt.seid = pf->main_vsi_seid;
6045 ctxt.pf_num = pf->hw.pf_id;
6046 ctxt.vf_num = 0;
6047 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6048 if (ret) {
6049 dev_info(&pf->pdev->dev,
6050 "couldn't get PF vsi config, err %s aq_err %s\n",
6051 i40e_stat_str(&pf->hw, ret),
6052 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6053 return;
6054 }
6055 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6056 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6057 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6058
6059 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6060 if (ret) {
6061 dev_info(&pf->pdev->dev,
6062 "update vsi switch failed, err %s aq_err %s\n",
6063 i40e_stat_str(&pf->hw, ret),
6064 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6065 }
6066 }
6067
6068 /**
6069 * i40e_disable_pf_switch_lb
6070 * @pf: pointer to the PF structure
6071 *
6072 * disable switch loop back or die - no point in a return value
6073 **/
6074 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6075 {
6076 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6077 struct i40e_vsi_context ctxt;
6078 int ret;
6079
6080 ctxt.seid = pf->main_vsi_seid;
6081 ctxt.pf_num = pf->hw.pf_id;
6082 ctxt.vf_num = 0;
6083 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6084 if (ret) {
6085 dev_info(&pf->pdev->dev,
6086 "couldn't get PF vsi config, err %s aq_err %s\n",
6087 i40e_stat_str(&pf->hw, ret),
6088 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6089 return;
6090 }
6091 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6092 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6093 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6094
6095 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6096 if (ret) {
6097 dev_info(&pf->pdev->dev,
6098 "update vsi switch failed, err %s aq_err %s\n",
6099 i40e_stat_str(&pf->hw, ret),
6100 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6101 }
6102 }
6103
6104 /**
6105 * i40e_config_bridge_mode - Configure the HW bridge mode
6106 * @veb: pointer to the bridge instance
6107 *
6108 * Configure the loop back mode for the LAN VSI that is downlink to the
6109 * specified HW bridge instance. It is expected this function is called
6110 * when a new HW bridge is instantiated.
6111 **/
6112 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6113 {
6114 struct i40e_pf *pf = veb->pf;
6115
6116 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6117 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6118 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6119 i40e_disable_pf_switch_lb(pf);
6120 else
6121 i40e_enable_pf_switch_lb(pf);
6122 }
6123
6124 /**
6125 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6126 * @veb: pointer to the VEB instance
6127 *
6128 * This is a recursive function that first builds the attached VSIs then
6129 * recurses in to build the next layer of VEB. We track the connections
6130 * through our own index numbers because the seid's from the HW could
6131 * change across the reset.
6132 **/
6133 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6134 {
6135 struct i40e_vsi *ctl_vsi = NULL;
6136 struct i40e_pf *pf = veb->pf;
6137 int v, veb_idx;
6138 int ret;
6139
6140 /* build VSI that owns this VEB, temporarily attached to base VEB */
6141 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6142 if (pf->vsi[v] &&
6143 pf->vsi[v]->veb_idx == veb->idx &&
6144 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6145 ctl_vsi = pf->vsi[v];
6146 break;
6147 }
6148 }
6149 if (!ctl_vsi) {
6150 dev_info(&pf->pdev->dev,
6151 "missing owner VSI for veb_idx %d\n", veb->idx);
6152 ret = -ENOENT;
6153 goto end_reconstitute;
6154 }
6155 if (ctl_vsi != pf->vsi[pf->lan_vsi])
6156 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6157 ret = i40e_add_vsi(ctl_vsi);
6158 if (ret) {
6159 dev_info(&pf->pdev->dev,
6160 "rebuild of veb_idx %d owner VSI failed: %d\n",
6161 veb->idx, ret);
6162 goto end_reconstitute;
6163 }
6164 i40e_vsi_reset_stats(ctl_vsi);
6165
6166 /* create the VEB in the switch and move the VSI onto the VEB */
6167 ret = i40e_add_veb(veb, ctl_vsi);
6168 if (ret)
6169 goto end_reconstitute;
6170
6171 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6172 veb->bridge_mode = BRIDGE_MODE_VEB;
6173 else
6174 veb->bridge_mode = BRIDGE_MODE_VEPA;
6175 i40e_config_bridge_mode(veb);
6176
6177 /* create the remaining VSIs attached to this VEB */
6178 for (v = 0; v < pf->num_alloc_vsi; v++) {
6179 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6180 continue;
6181
6182 if (pf->vsi[v]->veb_idx == veb->idx) {
6183 struct i40e_vsi *vsi = pf->vsi[v];
6184 vsi->uplink_seid = veb->seid;
6185 ret = i40e_add_vsi(vsi);
6186 if (ret) {
6187 dev_info(&pf->pdev->dev,
6188 "rebuild of vsi_idx %d failed: %d\n",
6189 v, ret);
6190 goto end_reconstitute;
6191 }
6192 i40e_vsi_reset_stats(vsi);
6193 }
6194 }
6195
6196 /* create any VEBs attached to this VEB - RECURSION */
6197 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6198 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6199 pf->veb[veb_idx]->uplink_seid = veb->seid;
6200 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6201 if (ret)
6202 break;
6203 }
6204 }
6205
6206 end_reconstitute:
6207 return ret;
6208 }
6209
6210 /**
6211 * i40e_get_capabilities - get info about the HW
6212 * @pf: the PF struct
6213 **/
6214 static int i40e_get_capabilities(struct i40e_pf *pf)
6215 {
6216 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6217 u16 data_size;
6218 int buf_len;
6219 int err;
6220
6221 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6222 do {
6223 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6224 if (!cap_buf)
6225 return -ENOMEM;
6226
6227 /* this loads the data into the hw struct for us */
6228 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6229 &data_size,
6230 i40e_aqc_opc_list_func_capabilities,
6231 NULL);
6232 /* data loaded, buffer no longer needed */
6233 kfree(cap_buf);
6234
6235 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6236 /* retry with a larger buffer */
6237 buf_len = data_size;
6238 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6239 dev_info(&pf->pdev->dev,
6240 "capability discovery failed, err %s aq_err %s\n",
6241 i40e_stat_str(&pf->hw, err),
6242 i40e_aq_str(&pf->hw,
6243 pf->hw.aq.asq_last_status));
6244 return -ENODEV;
6245 }
6246 } while (err);
6247
6248 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
6249 (pf->hw.aq.fw_maj_ver < 2)) {
6250 pf->hw.func_caps.num_msix_vectors++;
6251 pf->hw.func_caps.num_msix_vectors_vf++;
6252 }
6253
6254 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6255 dev_info(&pf->pdev->dev,
6256 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6257 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6258 pf->hw.func_caps.num_msix_vectors,
6259 pf->hw.func_caps.num_msix_vectors_vf,
6260 pf->hw.func_caps.fd_filters_guaranteed,
6261 pf->hw.func_caps.fd_filters_best_effort,
6262 pf->hw.func_caps.num_tx_qp,
6263 pf->hw.func_caps.num_vsis);
6264
6265 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6266 + pf->hw.func_caps.num_vfs)
6267 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6268 dev_info(&pf->pdev->dev,
6269 "got num_vsis %d, setting num_vsis to %d\n",
6270 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6271 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6272 }
6273
6274 return 0;
6275 }
6276
6277 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6278
6279 /**
6280 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6281 * @pf: board private structure
6282 **/
6283 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6284 {
6285 struct i40e_vsi *vsi;
6286 int i;
6287
6288 /* quick workaround for an NVM issue that leaves a critical register
6289 * uninitialized
6290 */
6291 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6292 static const u32 hkey[] = {
6293 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6294 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6295 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6296 0x95b3a76d};
6297
6298 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6299 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6300 }
6301
6302 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6303 return;
6304
6305 /* find existing VSI and see if it needs configuring */
6306 vsi = NULL;
6307 for (i = 0; i < pf->num_alloc_vsi; i++) {
6308 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6309 vsi = pf->vsi[i];
6310 break;
6311 }
6312 }
6313
6314 /* create a new VSI if none exists */
6315 if (!vsi) {
6316 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6317 pf->vsi[pf->lan_vsi]->seid, 0);
6318 if (!vsi) {
6319 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6320 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6321 return;
6322 }
6323 }
6324
6325 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6326 }
6327
6328 /**
6329 * i40e_fdir_teardown - release the Flow Director resources
6330 * @pf: board private structure
6331 **/
6332 static void i40e_fdir_teardown(struct i40e_pf *pf)
6333 {
6334 int i;
6335
6336 i40e_fdir_filter_exit(pf);
6337 for (i = 0; i < pf->num_alloc_vsi; i++) {
6338 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6339 i40e_vsi_release(pf->vsi[i]);
6340 break;
6341 }
6342 }
6343 }
6344
6345 /**
6346 * i40e_prep_for_reset - prep for the core to reset
6347 * @pf: board private structure
6348 *
6349 * Close up the VFs and other things in prep for PF Reset.
6350 **/
6351 static void i40e_prep_for_reset(struct i40e_pf *pf)
6352 {
6353 struct i40e_hw *hw = &pf->hw;
6354 i40e_status ret = 0;
6355 u32 v;
6356
6357 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6358 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6359 return;
6360
6361 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6362
6363 /* quiesce the VSIs and their queues that are not already DOWN */
6364 i40e_pf_quiesce_all_vsi(pf);
6365
6366 for (v = 0; v < pf->num_alloc_vsi; v++) {
6367 if (pf->vsi[v])
6368 pf->vsi[v]->seid = 0;
6369 }
6370
6371 i40e_shutdown_adminq(&pf->hw);
6372
6373 /* call shutdown HMC */
6374 if (hw->hmc.hmc_obj) {
6375 ret = i40e_shutdown_lan_hmc(hw);
6376 if (ret)
6377 dev_warn(&pf->pdev->dev,
6378 "shutdown_lan_hmc failed: %d\n", ret);
6379 }
6380 }
6381
6382 /**
6383 * i40e_send_version - update firmware with driver version
6384 * @pf: PF struct
6385 */
6386 static void i40e_send_version(struct i40e_pf *pf)
6387 {
6388 struct i40e_driver_version dv;
6389
6390 dv.major_version = DRV_VERSION_MAJOR;
6391 dv.minor_version = DRV_VERSION_MINOR;
6392 dv.build_version = DRV_VERSION_BUILD;
6393 dv.subbuild_version = 0;
6394 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6395 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6396 }
6397
6398 /**
6399 * i40e_reset_and_rebuild - reset and rebuild using a saved config
6400 * @pf: board private structure
6401 * @reinit: if the Main VSI needs to re-initialized.
6402 **/
6403 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6404 {
6405 struct i40e_hw *hw = &pf->hw;
6406 u8 set_fc_aq_fail = 0;
6407 i40e_status ret;
6408 u32 v;
6409
6410 /* Now we wait for GRST to settle out.
6411 * We don't have to delete the VEBs or VSIs from the hw switch
6412 * because the reset will make them disappear.
6413 */
6414 ret = i40e_pf_reset(hw);
6415 if (ret) {
6416 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6417 set_bit(__I40E_RESET_FAILED, &pf->state);
6418 goto clear_recovery;
6419 }
6420 pf->pfr_count++;
6421
6422 if (test_bit(__I40E_DOWN, &pf->state))
6423 goto clear_recovery;
6424 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6425
6426 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6427 ret = i40e_init_adminq(&pf->hw);
6428 if (ret) {
6429 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6430 i40e_stat_str(&pf->hw, ret),
6431 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6432 goto clear_recovery;
6433 }
6434
6435 /* re-verify the eeprom if we just had an EMP reset */
6436 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6437 i40e_verify_eeprom(pf);
6438
6439 i40e_clear_pxe_mode(hw);
6440 ret = i40e_get_capabilities(pf);
6441 if (ret)
6442 goto end_core_reset;
6443
6444 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6445 hw->func_caps.num_rx_qp,
6446 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6447 if (ret) {
6448 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6449 goto end_core_reset;
6450 }
6451 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6452 if (ret) {
6453 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6454 goto end_core_reset;
6455 }
6456
6457 #ifdef CONFIG_I40E_DCB
6458 ret = i40e_init_pf_dcb(pf);
6459 if (ret) {
6460 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6461 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6462 /* Continue without DCB enabled */
6463 }
6464 #endif /* CONFIG_I40E_DCB */
6465 #ifdef I40E_FCOE
6466 ret = i40e_init_pf_fcoe(pf);
6467 if (ret)
6468 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
6469
6470 #endif
6471 /* do basic switch setup */
6472 ret = i40e_setup_pf_switch(pf, reinit);
6473 if (ret)
6474 goto end_core_reset;
6475
6476 /* driver is only interested in link up/down and module qualification
6477 * reports from firmware
6478 */
6479 ret = i40e_aq_set_phy_int_mask(&pf->hw,
6480 I40E_AQ_EVENT_LINK_UPDOWN |
6481 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
6482 if (ret)
6483 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6484 i40e_stat_str(&pf->hw, ret),
6485 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6486
6487 /* make sure our flow control settings are restored */
6488 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6489 if (ret)
6490 dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
6491 i40e_stat_str(&pf->hw, ret),
6492 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6493
6494 /* Rebuild the VSIs and VEBs that existed before reset.
6495 * They are still in our local switch element arrays, so only
6496 * need to rebuild the switch model in the HW.
6497 *
6498 * If there were VEBs but the reconstitution failed, we'll try
6499 * try to recover minimal use by getting the basic PF VSI working.
6500 */
6501 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6502 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6503 /* find the one VEB connected to the MAC, and find orphans */
6504 for (v = 0; v < I40E_MAX_VEB; v++) {
6505 if (!pf->veb[v])
6506 continue;
6507
6508 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6509 pf->veb[v]->uplink_seid == 0) {
6510 ret = i40e_reconstitute_veb(pf->veb[v]);
6511
6512 if (!ret)
6513 continue;
6514
6515 /* If Main VEB failed, we're in deep doodoo,
6516 * so give up rebuilding the switch and set up
6517 * for minimal rebuild of PF VSI.
6518 * If orphan failed, we'll report the error
6519 * but try to keep going.
6520 */
6521 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6522 dev_info(&pf->pdev->dev,
6523 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6524 ret);
6525 pf->vsi[pf->lan_vsi]->uplink_seid
6526 = pf->mac_seid;
6527 break;
6528 } else if (pf->veb[v]->uplink_seid == 0) {
6529 dev_info(&pf->pdev->dev,
6530 "rebuild of orphan VEB failed: %d\n",
6531 ret);
6532 }
6533 }
6534 }
6535 }
6536
6537 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6538 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6539 /* no VEB, so rebuild only the Main VSI */
6540 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6541 if (ret) {
6542 dev_info(&pf->pdev->dev,
6543 "rebuild of Main VSI failed: %d\n", ret);
6544 goto end_core_reset;
6545 }
6546 }
6547
6548 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
6549 (pf->hw.aq.fw_maj_ver < 4)) {
6550 msleep(75);
6551 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6552 if (ret)
6553 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6554 i40e_stat_str(&pf->hw, ret),
6555 i40e_aq_str(&pf->hw,
6556 pf->hw.aq.asq_last_status));
6557 }
6558 /* reinit the misc interrupt */
6559 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6560 ret = i40e_setup_misc_vector(pf);
6561
6562 /* restart the VSIs that were rebuilt and running before the reset */
6563 i40e_pf_unquiesce_all_vsi(pf);
6564
6565 if (pf->num_alloc_vfs) {
6566 for (v = 0; v < pf->num_alloc_vfs; v++)
6567 i40e_reset_vf(&pf->vf[v], true);
6568 }
6569
6570 /* tell the firmware that we're starting */
6571 i40e_send_version(pf);
6572
6573 end_core_reset:
6574 clear_bit(__I40E_RESET_FAILED, &pf->state);
6575 clear_recovery:
6576 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6577 }
6578
6579 /**
6580 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
6581 * @pf: board private structure
6582 *
6583 * Close up the VFs and other things in prep for a Core Reset,
6584 * then get ready to rebuild the world.
6585 **/
6586 static void i40e_handle_reset_warning(struct i40e_pf *pf)
6587 {
6588 i40e_prep_for_reset(pf);
6589 i40e_reset_and_rebuild(pf, false);
6590 }
6591
6592 /**
6593 * i40e_handle_mdd_event
6594 * @pf: pointer to the PF structure
6595 *
6596 * Called from the MDD irq handler to identify possibly malicious vfs
6597 **/
6598 static void i40e_handle_mdd_event(struct i40e_pf *pf)
6599 {
6600 struct i40e_hw *hw = &pf->hw;
6601 bool mdd_detected = false;
6602 bool pf_mdd_detected = false;
6603 struct i40e_vf *vf;
6604 u32 reg;
6605 int i;
6606
6607 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6608 return;
6609
6610 /* find what triggered the MDD event */
6611 reg = rd32(hw, I40E_GL_MDET_TX);
6612 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6613 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6614 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6615 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6616 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6617 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6618 I40E_GL_MDET_TX_EVENT_SHIFT;
6619 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6620 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6621 pf->hw.func_caps.base_queue;
6622 if (netif_msg_tx_err(pf))
6623 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
6624 event, queue, pf_num, vf_num);
6625 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6626 mdd_detected = true;
6627 }
6628 reg = rd32(hw, I40E_GL_MDET_RX);
6629 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6630 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6631 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6632 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6633 I40E_GL_MDET_RX_EVENT_SHIFT;
6634 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6635 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6636 pf->hw.func_caps.base_queue;
6637 if (netif_msg_rx_err(pf))
6638 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6639 event, queue, func);
6640 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6641 mdd_detected = true;
6642 }
6643
6644 if (mdd_detected) {
6645 reg = rd32(hw, I40E_PF_MDET_TX);
6646 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6647 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
6648 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
6649 pf_mdd_detected = true;
6650 }
6651 reg = rd32(hw, I40E_PF_MDET_RX);
6652 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6653 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
6654 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
6655 pf_mdd_detected = true;
6656 }
6657 /* Queue belongs to the PF, initiate a reset */
6658 if (pf_mdd_detected) {
6659 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6660 i40e_service_event_schedule(pf);
6661 }
6662 }
6663
6664 /* see if one of the VFs needs its hand slapped */
6665 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
6666 vf = &(pf->vf[i]);
6667 reg = rd32(hw, I40E_VP_MDET_TX(i));
6668 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6669 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
6670 vf->num_mdd_events++;
6671 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
6672 i);
6673 }
6674
6675 reg = rd32(hw, I40E_VP_MDET_RX(i));
6676 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6677 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
6678 vf->num_mdd_events++;
6679 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
6680 i);
6681 }
6682
6683 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
6684 dev_info(&pf->pdev->dev,
6685 "Too many MDD events on VF %d, disabled\n", i);
6686 dev_info(&pf->pdev->dev,
6687 "Use PF Control I/F to re-enable the VF\n");
6688 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
6689 }
6690 }
6691
6692 /* re-enable mdd interrupt cause */
6693 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
6694 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
6695 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
6696 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
6697 i40e_flush(hw);
6698 }
6699
6700 #ifdef CONFIG_I40E_VXLAN
6701 /**
6702 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
6703 * @pf: board private structure
6704 **/
6705 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
6706 {
6707 struct i40e_hw *hw = &pf->hw;
6708 i40e_status ret;
6709 __be16 port;
6710 int i;
6711
6712 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
6713 return;
6714
6715 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
6716
6717 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6718 if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
6719 pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
6720 port = pf->vxlan_ports[i];
6721 if (port)
6722 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
6723 I40E_AQC_TUNNEL_TYPE_VXLAN,
6724 NULL, NULL);
6725 else
6726 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
6727
6728 if (ret) {
6729 dev_info(&pf->pdev->dev,
6730 "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
6731 port ? "add" : "delete",
6732 ntohs(port), i,
6733 i40e_stat_str(&pf->hw, ret),
6734 i40e_aq_str(&pf->hw,
6735 pf->hw.aq.asq_last_status));
6736 pf->vxlan_ports[i] = 0;
6737 }
6738 }
6739 }
6740 }
6741
6742 #endif
6743 /**
6744 * i40e_service_task - Run the driver's async subtasks
6745 * @work: pointer to work_struct containing our data
6746 **/
6747 static void i40e_service_task(struct work_struct *work)
6748 {
6749 struct i40e_pf *pf = container_of(work,
6750 struct i40e_pf,
6751 service_task);
6752 unsigned long start_time = jiffies;
6753
6754 /* don't bother with service tasks if a reset is in progress */
6755 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6756 i40e_service_event_complete(pf);
6757 return;
6758 }
6759
6760 i40e_reset_subtask(pf);
6761 i40e_handle_mdd_event(pf);
6762 i40e_vc_process_vflr_event(pf);
6763 i40e_watchdog_subtask(pf);
6764 i40e_fdir_reinit_subtask(pf);
6765 i40e_sync_filters_subtask(pf);
6766 #ifdef CONFIG_I40E_VXLAN
6767 i40e_sync_vxlan_filters_subtask(pf);
6768 #endif
6769 i40e_clean_adminq_subtask(pf);
6770
6771 i40e_service_event_complete(pf);
6772
6773 /* If the tasks have taken longer than one timer cycle or there
6774 * is more work to be done, reschedule the service task now
6775 * rather than wait for the timer to tick again.
6776 */
6777 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
6778 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
6779 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
6780 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
6781 i40e_service_event_schedule(pf);
6782 }
6783
6784 /**
6785 * i40e_service_timer - timer callback
6786 * @data: pointer to PF struct
6787 **/
6788 static void i40e_service_timer(unsigned long data)
6789 {
6790 struct i40e_pf *pf = (struct i40e_pf *)data;
6791
6792 mod_timer(&pf->service_timer,
6793 round_jiffies(jiffies + pf->service_timer_period));
6794 i40e_service_event_schedule(pf);
6795 }
6796
6797 /**
6798 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
6799 * @vsi: the VSI being configured
6800 **/
6801 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
6802 {
6803 struct i40e_pf *pf = vsi->back;
6804
6805 switch (vsi->type) {
6806 case I40E_VSI_MAIN:
6807 vsi->alloc_queue_pairs = pf->num_lan_qps;
6808 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6809 I40E_REQ_DESCRIPTOR_MULTIPLE);
6810 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6811 vsi->num_q_vectors = pf->num_lan_msix;
6812 else
6813 vsi->num_q_vectors = 1;
6814
6815 break;
6816
6817 case I40E_VSI_FDIR:
6818 vsi->alloc_queue_pairs = 1;
6819 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
6820 I40E_REQ_DESCRIPTOR_MULTIPLE);
6821 vsi->num_q_vectors = 1;
6822 break;
6823
6824 case I40E_VSI_VMDQ2:
6825 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
6826 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6827 I40E_REQ_DESCRIPTOR_MULTIPLE);
6828 vsi->num_q_vectors = pf->num_vmdq_msix;
6829 break;
6830
6831 case I40E_VSI_SRIOV:
6832 vsi->alloc_queue_pairs = pf->num_vf_qps;
6833 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6834 I40E_REQ_DESCRIPTOR_MULTIPLE);
6835 break;
6836
6837 #ifdef I40E_FCOE
6838 case I40E_VSI_FCOE:
6839 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
6840 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6841 I40E_REQ_DESCRIPTOR_MULTIPLE);
6842 vsi->num_q_vectors = pf->num_fcoe_msix;
6843 break;
6844
6845 #endif /* I40E_FCOE */
6846 default:
6847 WARN_ON(1);
6848 return -ENODATA;
6849 }
6850
6851 return 0;
6852 }
6853
6854 /**
6855 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
6856 * @type: VSI pointer
6857 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
6858 *
6859 * On error: returns error code (negative)
6860 * On success: returns 0
6861 **/
6862 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
6863 {
6864 int size;
6865 int ret = 0;
6866
6867 /* allocate memory for both Tx and Rx ring pointers */
6868 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
6869 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
6870 if (!vsi->tx_rings)
6871 return -ENOMEM;
6872 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
6873
6874 if (alloc_qvectors) {
6875 /* allocate memory for q_vector pointers */
6876 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
6877 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
6878 if (!vsi->q_vectors) {
6879 ret = -ENOMEM;
6880 goto err_vectors;
6881 }
6882 }
6883 return ret;
6884
6885 err_vectors:
6886 kfree(vsi->tx_rings);
6887 return ret;
6888 }
6889
6890 /**
6891 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
6892 * @pf: board private structure
6893 * @type: type of VSI
6894 *
6895 * On error: returns error code (negative)
6896 * On success: returns vsi index in PF (positive)
6897 **/
6898 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
6899 {
6900 int ret = -ENODEV;
6901 struct i40e_vsi *vsi;
6902 int vsi_idx;
6903 int i;
6904
6905 /* Need to protect the allocation of the VSIs at the PF level */
6906 mutex_lock(&pf->switch_mutex);
6907
6908 /* VSI list may be fragmented if VSI creation/destruction has
6909 * been happening. We can afford to do a quick scan to look
6910 * for any free VSIs in the list.
6911 *
6912 * find next empty vsi slot, looping back around if necessary
6913 */
6914 i = pf->next_vsi;
6915 while (i < pf->num_alloc_vsi && pf->vsi[i])
6916 i++;
6917 if (i >= pf->num_alloc_vsi) {
6918 i = 0;
6919 while (i < pf->next_vsi && pf->vsi[i])
6920 i++;
6921 }
6922
6923 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
6924 vsi_idx = i; /* Found one! */
6925 } else {
6926 ret = -ENODEV;
6927 goto unlock_pf; /* out of VSI slots! */
6928 }
6929 pf->next_vsi = ++i;
6930
6931 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
6932 if (!vsi) {
6933 ret = -ENOMEM;
6934 goto unlock_pf;
6935 }
6936 vsi->type = type;
6937 vsi->back = pf;
6938 set_bit(__I40E_DOWN, &vsi->state);
6939 vsi->flags = 0;
6940 vsi->idx = vsi_idx;
6941 vsi->rx_itr_setting = pf->rx_itr_default;
6942 vsi->tx_itr_setting = pf->tx_itr_default;
6943 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
6944 pf->rss_table_size : 64;
6945 vsi->netdev_registered = false;
6946 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
6947 INIT_LIST_HEAD(&vsi->mac_filter_list);
6948 vsi->irqs_ready = false;
6949
6950 ret = i40e_set_num_rings_in_vsi(vsi);
6951 if (ret)
6952 goto err_rings;
6953
6954 ret = i40e_vsi_alloc_arrays(vsi, true);
6955 if (ret)
6956 goto err_rings;
6957
6958 /* Setup default MSIX irq handler for VSI */
6959 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
6960
6961 pf->vsi[vsi_idx] = vsi;
6962 ret = vsi_idx;
6963 goto unlock_pf;
6964
6965 err_rings:
6966 pf->next_vsi = i - 1;
6967 kfree(vsi);
6968 unlock_pf:
6969 mutex_unlock(&pf->switch_mutex);
6970 return ret;
6971 }
6972
6973 /**
6974 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
6975 * @type: VSI pointer
6976 * @free_qvectors: a bool to specify if q_vectors need to be freed.
6977 *
6978 * On error: returns error code (negative)
6979 * On success: returns 0
6980 **/
6981 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
6982 {
6983 /* free the ring and vector containers */
6984 if (free_qvectors) {
6985 kfree(vsi->q_vectors);
6986 vsi->q_vectors = NULL;
6987 }
6988 kfree(vsi->tx_rings);
6989 vsi->tx_rings = NULL;
6990 vsi->rx_rings = NULL;
6991 }
6992
6993 /**
6994 * i40e_vsi_clear - Deallocate the VSI provided
6995 * @vsi: the VSI being un-configured
6996 **/
6997 static int i40e_vsi_clear(struct i40e_vsi *vsi)
6998 {
6999 struct i40e_pf *pf;
7000
7001 if (!vsi)
7002 return 0;
7003
7004 if (!vsi->back)
7005 goto free_vsi;
7006 pf = vsi->back;
7007
7008 mutex_lock(&pf->switch_mutex);
7009 if (!pf->vsi[vsi->idx]) {
7010 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7011 vsi->idx, vsi->idx, vsi, vsi->type);
7012 goto unlock_vsi;
7013 }
7014
7015 if (pf->vsi[vsi->idx] != vsi) {
7016 dev_err(&pf->pdev->dev,
7017 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7018 pf->vsi[vsi->idx]->idx,
7019 pf->vsi[vsi->idx],
7020 pf->vsi[vsi->idx]->type,
7021 vsi->idx, vsi, vsi->type);
7022 goto unlock_vsi;
7023 }
7024
7025 /* updates the PF for this cleared vsi */
7026 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7027 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7028
7029 i40e_vsi_free_arrays(vsi, true);
7030
7031 pf->vsi[vsi->idx] = NULL;
7032 if (vsi->idx < pf->next_vsi)
7033 pf->next_vsi = vsi->idx;
7034
7035 unlock_vsi:
7036 mutex_unlock(&pf->switch_mutex);
7037 free_vsi:
7038 kfree(vsi);
7039
7040 return 0;
7041 }
7042
7043 /**
7044 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7045 * @vsi: the VSI being cleaned
7046 **/
7047 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7048 {
7049 int i;
7050
7051 if (vsi->tx_rings && vsi->tx_rings[0]) {
7052 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7053 kfree_rcu(vsi->tx_rings[i], rcu);
7054 vsi->tx_rings[i] = NULL;
7055 vsi->rx_rings[i] = NULL;
7056 }
7057 }
7058 }
7059
7060 /**
7061 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7062 * @vsi: the VSI being configured
7063 **/
7064 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7065 {
7066 struct i40e_ring *tx_ring, *rx_ring;
7067 struct i40e_pf *pf = vsi->back;
7068 int i;
7069
7070 /* Set basic values in the rings to be used later during open() */
7071 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7072 /* allocate space for both Tx and Rx in one shot */
7073 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7074 if (!tx_ring)
7075 goto err_out;
7076
7077 tx_ring->queue_index = i;
7078 tx_ring->reg_idx = vsi->base_queue + i;
7079 tx_ring->ring_active = false;
7080 tx_ring->vsi = vsi;
7081 tx_ring->netdev = vsi->netdev;
7082 tx_ring->dev = &pf->pdev->dev;
7083 tx_ring->count = vsi->num_desc;
7084 tx_ring->size = 0;
7085 tx_ring->dcb_tc = 0;
7086 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7087 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7088 if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
7089 tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
7090 vsi->tx_rings[i] = tx_ring;
7091
7092 rx_ring = &tx_ring[1];
7093 rx_ring->queue_index = i;
7094 rx_ring->reg_idx = vsi->base_queue + i;
7095 rx_ring->ring_active = false;
7096 rx_ring->vsi = vsi;
7097 rx_ring->netdev = vsi->netdev;
7098 rx_ring->dev = &pf->pdev->dev;
7099 rx_ring->count = vsi->num_desc;
7100 rx_ring->size = 0;
7101 rx_ring->dcb_tc = 0;
7102 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
7103 set_ring_16byte_desc_enabled(rx_ring);
7104 else
7105 clear_ring_16byte_desc_enabled(rx_ring);
7106 vsi->rx_rings[i] = rx_ring;
7107 }
7108
7109 return 0;
7110
7111 err_out:
7112 i40e_vsi_clear_rings(vsi);
7113 return -ENOMEM;
7114 }
7115
7116 /**
7117 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7118 * @pf: board private structure
7119 * @vectors: the number of MSI-X vectors to request
7120 *
7121 * Returns the number of vectors reserved, or error
7122 **/
7123 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7124 {
7125 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7126 I40E_MIN_MSIX, vectors);
7127 if (vectors < 0) {
7128 dev_info(&pf->pdev->dev,
7129 "MSI-X vector reservation failed: %d\n", vectors);
7130 vectors = 0;
7131 }
7132
7133 return vectors;
7134 }
7135
7136 /**
7137 * i40e_init_msix - Setup the MSIX capability
7138 * @pf: board private structure
7139 *
7140 * Work with the OS to set up the MSIX vectors needed.
7141 *
7142 * Returns the number of vectors reserved or negative on failure
7143 **/
7144 static int i40e_init_msix(struct i40e_pf *pf)
7145 {
7146 struct i40e_hw *hw = &pf->hw;
7147 int vectors_left;
7148 int v_budget, i;
7149 int v_actual;
7150
7151 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7152 return -ENODEV;
7153
7154 /* The number of vectors we'll request will be comprised of:
7155 * - Add 1 for "other" cause for Admin Queue events, etc.
7156 * - The number of LAN queue pairs
7157 * - Queues being used for RSS.
7158 * We don't need as many as max_rss_size vectors.
7159 * use rss_size instead in the calculation since that
7160 * is governed by number of cpus in the system.
7161 * - assumes symmetric Tx/Rx pairing
7162 * - The number of VMDq pairs
7163 #ifdef I40E_FCOE
7164 * - The number of FCOE qps.
7165 #endif
7166 * Once we count this up, try the request.
7167 *
7168 * If we can't get what we want, we'll simplify to nearly nothing
7169 * and try again. If that still fails, we punt.
7170 */
7171 vectors_left = hw->func_caps.num_msix_vectors;
7172 v_budget = 0;
7173
7174 /* reserve one vector for miscellaneous handler */
7175 if (vectors_left) {
7176 v_budget++;
7177 vectors_left--;
7178 }
7179
7180 /* reserve vectors for the main PF traffic queues */
7181 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7182 vectors_left -= pf->num_lan_msix;
7183 v_budget += pf->num_lan_msix;
7184
7185 /* reserve one vector for sideband flow director */
7186 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7187 if (vectors_left) {
7188 v_budget++;
7189 vectors_left--;
7190 } else {
7191 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7192 }
7193 }
7194
7195 #ifdef I40E_FCOE
7196 /* can we reserve enough for FCoE? */
7197 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7198 if (!vectors_left)
7199 pf->num_fcoe_msix = 0;
7200 else if (vectors_left >= pf->num_fcoe_qps)
7201 pf->num_fcoe_msix = pf->num_fcoe_qps;
7202 else
7203 pf->num_fcoe_msix = 1;
7204 v_budget += pf->num_fcoe_msix;
7205 vectors_left -= pf->num_fcoe_msix;
7206 }
7207
7208 #endif
7209 /* any vectors left over go for VMDq support */
7210 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7211 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7212 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7213
7214 /* if we're short on vectors for what's desired, we limit
7215 * the queues per vmdq. If this is still more than are
7216 * available, the user will need to change the number of
7217 * queues/vectors used by the PF later with the ethtool
7218 * channels command
7219 */
7220 if (vmdq_vecs < vmdq_vecs_wanted)
7221 pf->num_vmdq_qps = 1;
7222 pf->num_vmdq_msix = pf->num_vmdq_qps;
7223
7224 v_budget += vmdq_vecs;
7225 vectors_left -= vmdq_vecs;
7226 }
7227
7228 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7229 GFP_KERNEL);
7230 if (!pf->msix_entries)
7231 return -ENOMEM;
7232
7233 for (i = 0; i < v_budget; i++)
7234 pf->msix_entries[i].entry = i;
7235 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7236
7237 if (v_actual != v_budget) {
7238 /* If we have limited resources, we will start with no vectors
7239 * for the special features and then allocate vectors to some
7240 * of these features based on the policy and at the end disable
7241 * the features that did not get any vectors.
7242 */
7243 #ifdef I40E_FCOE
7244 pf->num_fcoe_qps = 0;
7245 pf->num_fcoe_msix = 0;
7246 #endif
7247 pf->num_vmdq_msix = 0;
7248 }
7249
7250 if (v_actual < I40E_MIN_MSIX) {
7251 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7252 kfree(pf->msix_entries);
7253 pf->msix_entries = NULL;
7254 return -ENODEV;
7255
7256 } else if (v_actual == I40E_MIN_MSIX) {
7257 /* Adjust for minimal MSIX use */
7258 pf->num_vmdq_vsis = 0;
7259 pf->num_vmdq_qps = 0;
7260 pf->num_lan_qps = 1;
7261 pf->num_lan_msix = 1;
7262
7263 } else if (v_actual != v_budget) {
7264 int vec;
7265
7266 /* reserve the misc vector */
7267 vec = v_actual - 1;
7268
7269 /* Scale vector usage down */
7270 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
7271 pf->num_vmdq_vsis = 1;
7272 pf->num_vmdq_qps = 1;
7273 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7274
7275 /* partition out the remaining vectors */
7276 switch (vec) {
7277 case 2:
7278 pf->num_lan_msix = 1;
7279 break;
7280 case 3:
7281 #ifdef I40E_FCOE
7282 /* give one vector to FCoE */
7283 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7284 pf->num_lan_msix = 1;
7285 pf->num_fcoe_msix = 1;
7286 }
7287 #else
7288 pf->num_lan_msix = 2;
7289 #endif
7290 break;
7291 default:
7292 #ifdef I40E_FCOE
7293 /* give one vector to FCoE */
7294 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7295 pf->num_fcoe_msix = 1;
7296 vec--;
7297 }
7298 #endif
7299 /* give the rest to the PF */
7300 pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
7301 break;
7302 }
7303 }
7304
7305 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7306 (pf->num_vmdq_msix == 0)) {
7307 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7308 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7309 }
7310 #ifdef I40E_FCOE
7311
7312 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7313 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7314 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7315 }
7316 #endif
7317 return v_actual;
7318 }
7319
7320 /**
7321 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7322 * @vsi: the VSI being configured
7323 * @v_idx: index of the vector in the vsi struct
7324 *
7325 * We allocate one q_vector. If allocation fails we return -ENOMEM.
7326 **/
7327 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
7328 {
7329 struct i40e_q_vector *q_vector;
7330
7331 /* allocate q_vector */
7332 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7333 if (!q_vector)
7334 return -ENOMEM;
7335
7336 q_vector->vsi = vsi;
7337 q_vector->v_idx = v_idx;
7338 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
7339 if (vsi->netdev)
7340 netif_napi_add(vsi->netdev, &q_vector->napi,
7341 i40e_napi_poll, NAPI_POLL_WEIGHT);
7342
7343 q_vector->rx.latency_range = I40E_LOW_LATENCY;
7344 q_vector->tx.latency_range = I40E_LOW_LATENCY;
7345
7346 /* tie q_vector and vsi together */
7347 vsi->q_vectors[v_idx] = q_vector;
7348
7349 return 0;
7350 }
7351
7352 /**
7353 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7354 * @vsi: the VSI being configured
7355 *
7356 * We allocate one q_vector per queue interrupt. If allocation fails we
7357 * return -ENOMEM.
7358 **/
7359 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7360 {
7361 struct i40e_pf *pf = vsi->back;
7362 int v_idx, num_q_vectors;
7363 int err;
7364
7365 /* if not MSIX, give the one vector only to the LAN VSI */
7366 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7367 num_q_vectors = vsi->num_q_vectors;
7368 else if (vsi == pf->vsi[pf->lan_vsi])
7369 num_q_vectors = 1;
7370 else
7371 return -EINVAL;
7372
7373 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7374 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
7375 if (err)
7376 goto err_out;
7377 }
7378
7379 return 0;
7380
7381 err_out:
7382 while (v_idx--)
7383 i40e_free_q_vector(vsi, v_idx);
7384
7385 return err;
7386 }
7387
7388 /**
7389 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7390 * @pf: board private structure to initialize
7391 **/
7392 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7393 {
7394 int vectors = 0;
7395 ssize_t size;
7396
7397 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7398 vectors = i40e_init_msix(pf);
7399 if (vectors < 0) {
7400 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
7401 #ifdef I40E_FCOE
7402 I40E_FLAG_FCOE_ENABLED |
7403 #endif
7404 I40E_FLAG_RSS_ENABLED |
7405 I40E_FLAG_DCB_CAPABLE |
7406 I40E_FLAG_SRIOV_ENABLED |
7407 I40E_FLAG_FD_SB_ENABLED |
7408 I40E_FLAG_FD_ATR_ENABLED |
7409 I40E_FLAG_VMDQ_ENABLED);
7410
7411 /* rework the queue expectations without MSIX */
7412 i40e_determine_queue_usage(pf);
7413 }
7414 }
7415
7416 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7417 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7418 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7419 vectors = pci_enable_msi(pf->pdev);
7420 if (vectors < 0) {
7421 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7422 vectors);
7423 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7424 }
7425 vectors = 1; /* one MSI or Legacy vector */
7426 }
7427
7428 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7429 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7430
7431 /* set up vector assignment tracking */
7432 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7433 pf->irq_pile = kzalloc(size, GFP_KERNEL);
7434 if (!pf->irq_pile) {
7435 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7436 return -ENOMEM;
7437 }
7438 pf->irq_pile->num_entries = vectors;
7439 pf->irq_pile->search_hint = 0;
7440
7441 /* track first vector for misc interrupts, ignore return */
7442 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7443
7444 return 0;
7445 }
7446
7447 /**
7448 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7449 * @pf: board private structure
7450 *
7451 * This sets up the handler for MSIX 0, which is used to manage the
7452 * non-queue interrupts, e.g. AdminQ and errors. This is not used
7453 * when in MSI or Legacy interrupt mode.
7454 **/
7455 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7456 {
7457 struct i40e_hw *hw = &pf->hw;
7458 int err = 0;
7459
7460 /* Only request the irq if this is the first time through, and
7461 * not when we're rebuilding after a Reset
7462 */
7463 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7464 err = request_irq(pf->msix_entries[0].vector,
7465 i40e_intr, 0, pf->int_name, pf);
7466 if (err) {
7467 dev_info(&pf->pdev->dev,
7468 "request_irq for %s failed: %d\n",
7469 pf->int_name, err);
7470 return -EFAULT;
7471 }
7472 }
7473
7474 i40e_enable_misc_int_causes(pf);
7475
7476 /* associate no queues to the misc vector */
7477 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7478 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7479
7480 i40e_flush(hw);
7481
7482 i40e_irq_dynamic_enable_icr0(pf);
7483
7484 return err;
7485 }
7486
7487 /**
7488 * i40e_config_rss_aq - Prepare for RSS using AQ commands
7489 * @vsi: vsi structure
7490 * @seed: RSS hash seed
7491 **/
7492 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
7493 {
7494 struct i40e_aqc_get_set_rss_key_data rss_key;
7495 struct i40e_pf *pf = vsi->back;
7496 struct i40e_hw *hw = &pf->hw;
7497 bool pf_lut = false;
7498 u8 *rss_lut;
7499 int ret, i;
7500
7501 memset(&rss_key, 0, sizeof(rss_key));
7502 memcpy(&rss_key, seed, sizeof(rss_key));
7503
7504 rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
7505 if (!rss_lut)
7506 return -ENOMEM;
7507
7508 /* Populate the LUT with max no. of queues in round robin fashion */
7509 for (i = 0; i < vsi->rss_table_size; i++)
7510 rss_lut[i] = i % vsi->rss_size;
7511
7512 ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
7513 if (ret) {
7514 dev_info(&pf->pdev->dev,
7515 "Cannot set RSS key, err %s aq_err %s\n",
7516 i40e_stat_str(&pf->hw, ret),
7517 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7518 return ret;
7519 }
7520
7521 if (vsi->type == I40E_VSI_MAIN)
7522 pf_lut = true;
7523
7524 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
7525 vsi->rss_table_size);
7526 if (ret)
7527 dev_info(&pf->pdev->dev,
7528 "Cannot set RSS lut, err %s aq_err %s\n",
7529 i40e_stat_str(&pf->hw, ret),
7530 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7531
7532 return ret;
7533 }
7534
7535 /**
7536 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
7537 * @vsi: VSI structure
7538 **/
7539 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
7540 {
7541 u8 seed[I40E_HKEY_ARRAY_SIZE];
7542 struct i40e_pf *pf = vsi->back;
7543
7544 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
7545 vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7546
7547 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
7548 return i40e_config_rss_aq(vsi, seed);
7549
7550 return 0;
7551 }
7552
7553 /**
7554 * i40e_config_rss_reg - Prepare for RSS if used
7555 * @pf: board private structure
7556 * @seed: RSS hash seed
7557 **/
7558 static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
7559 {
7560 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7561 struct i40e_hw *hw = &pf->hw;
7562 u32 *seed_dw = (u32 *)seed;
7563 u32 current_queue = 0;
7564 u32 lut = 0;
7565 int i, j;
7566
7567 /* Fill out hash function seed */
7568 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7569 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
7570
7571 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
7572 lut = 0;
7573 for (j = 0; j < 4; j++) {
7574 if (current_queue == vsi->rss_size)
7575 current_queue = 0;
7576 lut |= ((current_queue) << (8 * j));
7577 current_queue++;
7578 }
7579 wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
7580 }
7581 i40e_flush(hw);
7582
7583 return 0;
7584 }
7585
7586 /**
7587 * i40e_config_rss - Prepare for RSS if used
7588 * @pf: board private structure
7589 **/
7590 static int i40e_config_rss(struct i40e_pf *pf)
7591 {
7592 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7593 u8 seed[I40E_HKEY_ARRAY_SIZE];
7594 struct i40e_hw *hw = &pf->hw;
7595 u32 reg_val;
7596 u64 hena;
7597
7598 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
7599
7600 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
7601 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
7602 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
7603 hena |= i40e_pf_get_default_rss_hena(pf);
7604
7605 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
7606 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
7607
7608 vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7609
7610 /* Determine the RSS table size based on the hardware capabilities */
7611 reg_val = rd32(hw, I40E_PFQF_CTL_0);
7612 reg_val = (pf->rss_table_size == 512) ?
7613 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
7614 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
7615 wr32(hw, I40E_PFQF_CTL_0, reg_val);
7616
7617 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
7618 return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed);
7619 else
7620 return i40e_config_rss_reg(pf, seed);
7621 }
7622
7623 /**
7624 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
7625 * @pf: board private structure
7626 * @queue_count: the requested queue count for rss.
7627 *
7628 * returns 0 if rss is not enabled, if enabled returns the final rss queue
7629 * count which may be different from the requested queue count.
7630 **/
7631 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
7632 {
7633 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7634 int new_rss_size;
7635
7636 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
7637 return 0;
7638
7639 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
7640
7641 if (queue_count != vsi->num_queue_pairs) {
7642 vsi->req_queue_pairs = queue_count;
7643 i40e_prep_for_reset(pf);
7644
7645 pf->rss_size = new_rss_size;
7646
7647 i40e_reset_and_rebuild(pf, true);
7648 i40e_config_rss(pf);
7649 }
7650 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
7651 return pf->rss_size;
7652 }
7653
7654 /**
7655 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
7656 * @pf: board private structure
7657 **/
7658 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
7659 {
7660 i40e_status status;
7661 bool min_valid, max_valid;
7662 u32 max_bw, min_bw;
7663
7664 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
7665 &min_valid, &max_valid);
7666
7667 if (!status) {
7668 if (min_valid)
7669 pf->npar_min_bw = min_bw;
7670 if (max_valid)
7671 pf->npar_max_bw = max_bw;
7672 }
7673
7674 return status;
7675 }
7676
7677 /**
7678 * i40e_set_npar_bw_setting - Set BW settings for this PF partition
7679 * @pf: board private structure
7680 **/
7681 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
7682 {
7683 struct i40e_aqc_configure_partition_bw_data bw_data;
7684 i40e_status status;
7685
7686 /* Set the valid bit for this PF */
7687 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
7688 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
7689 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
7690
7691 /* Set the new bandwidths */
7692 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
7693
7694 return status;
7695 }
7696
7697 /**
7698 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
7699 * @pf: board private structure
7700 **/
7701 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
7702 {
7703 /* Commit temporary BW setting to permanent NVM image */
7704 enum i40e_admin_queue_err last_aq_status;
7705 i40e_status ret;
7706 u16 nvm_word;
7707
7708 if (pf->hw.partition_id != 1) {
7709 dev_info(&pf->pdev->dev,
7710 "Commit BW only works on partition 1! This is partition %d",
7711 pf->hw.partition_id);
7712 ret = I40E_NOT_SUPPORTED;
7713 goto bw_commit_out;
7714 }
7715
7716 /* Acquire NVM for read access */
7717 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
7718 last_aq_status = pf->hw.aq.asq_last_status;
7719 if (ret) {
7720 dev_info(&pf->pdev->dev,
7721 "Cannot acquire NVM for read access, err %s aq_err %s\n",
7722 i40e_stat_str(&pf->hw, ret),
7723 i40e_aq_str(&pf->hw, last_aq_status));
7724 goto bw_commit_out;
7725 }
7726
7727 /* Read word 0x10 of NVM - SW compatibility word 1 */
7728 ret = i40e_aq_read_nvm(&pf->hw,
7729 I40E_SR_NVM_CONTROL_WORD,
7730 0x10, sizeof(nvm_word), &nvm_word,
7731 false, NULL);
7732 /* Save off last admin queue command status before releasing
7733 * the NVM
7734 */
7735 last_aq_status = pf->hw.aq.asq_last_status;
7736 i40e_release_nvm(&pf->hw);
7737 if (ret) {
7738 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
7739 i40e_stat_str(&pf->hw, ret),
7740 i40e_aq_str(&pf->hw, last_aq_status));
7741 goto bw_commit_out;
7742 }
7743
7744 /* Wait a bit for NVM release to complete */
7745 msleep(50);
7746
7747 /* Acquire NVM for write access */
7748 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
7749 last_aq_status = pf->hw.aq.asq_last_status;
7750 if (ret) {
7751 dev_info(&pf->pdev->dev,
7752 "Cannot acquire NVM for write access, err %s aq_err %s\n",
7753 i40e_stat_str(&pf->hw, ret),
7754 i40e_aq_str(&pf->hw, last_aq_status));
7755 goto bw_commit_out;
7756 }
7757 /* Write it back out unchanged to initiate update NVM,
7758 * which will force a write of the shadow (alt) RAM to
7759 * the NVM - thus storing the bandwidth values permanently.
7760 */
7761 ret = i40e_aq_update_nvm(&pf->hw,
7762 I40E_SR_NVM_CONTROL_WORD,
7763 0x10, sizeof(nvm_word),
7764 &nvm_word, true, NULL);
7765 /* Save off last admin queue command status before releasing
7766 * the NVM
7767 */
7768 last_aq_status = pf->hw.aq.asq_last_status;
7769 i40e_release_nvm(&pf->hw);
7770 if (ret)
7771 dev_info(&pf->pdev->dev,
7772 "BW settings NOT SAVED, err %s aq_err %s\n",
7773 i40e_stat_str(&pf->hw, ret),
7774 i40e_aq_str(&pf->hw, last_aq_status));
7775 bw_commit_out:
7776
7777 return ret;
7778 }
7779
7780 /**
7781 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
7782 * @pf: board private structure to initialize
7783 *
7784 * i40e_sw_init initializes the Adapter private data structure.
7785 * Fields are initialized based on PCI device information and
7786 * OS network device settings (MTU size).
7787 **/
7788 static int i40e_sw_init(struct i40e_pf *pf)
7789 {
7790 int err = 0;
7791 int size;
7792
7793 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
7794 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
7795 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
7796 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
7797 if (I40E_DEBUG_USER & debug)
7798 pf->hw.debug_mask = debug;
7799 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
7800 I40E_DEFAULT_MSG_ENABLE);
7801 }
7802
7803 /* Set default capability flags */
7804 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
7805 I40E_FLAG_MSI_ENABLED |
7806 I40E_FLAG_MSIX_ENABLED;
7807
7808 if (iommu_present(&pci_bus_type))
7809 pf->flags |= I40E_FLAG_RX_PS_ENABLED;
7810 else
7811 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
7812
7813 /* Set default ITR */
7814 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
7815 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
7816
7817 /* Depending on PF configurations, it is possible that the RSS
7818 * maximum might end up larger than the available queues
7819 */
7820 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
7821 pf->rss_size = 1;
7822 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
7823 pf->rss_size_max = min_t(int, pf->rss_size_max,
7824 pf->hw.func_caps.num_tx_qp);
7825 if (pf->hw.func_caps.rss) {
7826 pf->flags |= I40E_FLAG_RSS_ENABLED;
7827 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
7828 }
7829
7830 /* MFP mode enabled */
7831 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
7832 pf->flags |= I40E_FLAG_MFP_ENABLED;
7833 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
7834 if (i40e_get_npar_bw_setting(pf))
7835 dev_warn(&pf->pdev->dev,
7836 "Could not get NPAR bw settings\n");
7837 else
7838 dev_info(&pf->pdev->dev,
7839 "Min BW = %8.8x, Max BW = %8.8x\n",
7840 pf->npar_min_bw, pf->npar_max_bw);
7841 }
7842
7843 /* FW/NVM is not yet fixed in this regard */
7844 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
7845 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
7846 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7847 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
7848 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
7849 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7850 } else {
7851 dev_info(&pf->pdev->dev,
7852 "Flow Director Sideband mode Disabled in MFP mode\n");
7853 }
7854 pf->fdir_pf_filter_count =
7855 pf->hw.func_caps.fd_filters_guaranteed;
7856 pf->hw.fdir_shared_filter_count =
7857 pf->hw.func_caps.fd_filters_best_effort;
7858 }
7859
7860 if (pf->hw.func_caps.vmdq) {
7861 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
7862 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
7863 }
7864
7865 #ifdef I40E_FCOE
7866 err = i40e_init_pf_fcoe(pf);
7867 if (err)
7868 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
7869
7870 #endif /* I40E_FCOE */
7871 #ifdef CONFIG_PCI_IOV
7872 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
7873 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
7874 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
7875 pf->num_req_vfs = min_t(int,
7876 pf->hw.func_caps.num_vfs,
7877 I40E_MAX_VF_COUNT);
7878 }
7879 #endif /* CONFIG_PCI_IOV */
7880 if (pf->hw.mac.type == I40E_MAC_X722) {
7881 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
7882 I40E_FLAG_128_QP_RSS_CAPABLE |
7883 I40E_FLAG_HW_ATR_EVICT_CAPABLE |
7884 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
7885 I40E_FLAG_WB_ON_ITR_CAPABLE |
7886 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE;
7887 }
7888 pf->eeprom_version = 0xDEAD;
7889 pf->lan_veb = I40E_NO_VEB;
7890 pf->lan_vsi = I40E_NO_VSI;
7891
7892 /* set up queue assignment tracking */
7893 size = sizeof(struct i40e_lump_tracking)
7894 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
7895 pf->qp_pile = kzalloc(size, GFP_KERNEL);
7896 if (!pf->qp_pile) {
7897 err = -ENOMEM;
7898 goto sw_init_done;
7899 }
7900 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
7901 pf->qp_pile->search_hint = 0;
7902
7903 pf->tx_timeout_recovery_level = 1;
7904
7905 mutex_init(&pf->switch_mutex);
7906
7907 /* If NPAR is enabled nudge the Tx scheduler */
7908 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
7909 i40e_set_npar_bw_setting(pf);
7910
7911 sw_init_done:
7912 return err;
7913 }
7914
7915 /**
7916 * i40e_set_ntuple - set the ntuple feature flag and take action
7917 * @pf: board private structure to initialize
7918 * @features: the feature set that the stack is suggesting
7919 *
7920 * returns a bool to indicate if reset needs to happen
7921 **/
7922 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
7923 {
7924 bool need_reset = false;
7925
7926 /* Check if Flow Director n-tuple support was enabled or disabled. If
7927 * the state changed, we need to reset.
7928 */
7929 if (features & NETIF_F_NTUPLE) {
7930 /* Enable filters and mark for reset */
7931 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
7932 need_reset = true;
7933 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7934 } else {
7935 /* turn off filters, mark for reset and clear SW filter list */
7936 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7937 need_reset = true;
7938 i40e_fdir_filter_exit(pf);
7939 }
7940 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7941 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
7942 /* reset fd counters */
7943 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
7944 pf->fdir_pf_active_filters = 0;
7945 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7946 if (I40E_DEBUG_FD & pf->hw.debug_mask)
7947 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
7948 /* if ATR was auto disabled it can be re-enabled. */
7949 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
7950 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
7951 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
7952 }
7953 return need_reset;
7954 }
7955
7956 /**
7957 * i40e_set_features - set the netdev feature flags
7958 * @netdev: ptr to the netdev being adjusted
7959 * @features: the feature set that the stack is suggesting
7960 **/
7961 static int i40e_set_features(struct net_device *netdev,
7962 netdev_features_t features)
7963 {
7964 struct i40e_netdev_priv *np = netdev_priv(netdev);
7965 struct i40e_vsi *vsi = np->vsi;
7966 struct i40e_pf *pf = vsi->back;
7967 bool need_reset;
7968
7969 if (features & NETIF_F_HW_VLAN_CTAG_RX)
7970 i40e_vlan_stripping_enable(vsi);
7971 else
7972 i40e_vlan_stripping_disable(vsi);
7973
7974 need_reset = i40e_set_ntuple(pf, features);
7975
7976 if (need_reset)
7977 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
7978
7979 return 0;
7980 }
7981
7982 #ifdef CONFIG_I40E_VXLAN
7983 /**
7984 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
7985 * @pf: board private structure
7986 * @port: The UDP port to look up
7987 *
7988 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
7989 **/
7990 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
7991 {
7992 u8 i;
7993
7994 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7995 if (pf->vxlan_ports[i] == port)
7996 return i;
7997 }
7998
7999 return i;
8000 }
8001
8002 /**
8003 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
8004 * @netdev: This physical port's netdev
8005 * @sa_family: Socket Family that VXLAN is notifying us about
8006 * @port: New UDP port number that VXLAN started listening to
8007 **/
8008 static void i40e_add_vxlan_port(struct net_device *netdev,
8009 sa_family_t sa_family, __be16 port)
8010 {
8011 struct i40e_netdev_priv *np = netdev_priv(netdev);
8012 struct i40e_vsi *vsi = np->vsi;
8013 struct i40e_pf *pf = vsi->back;
8014 u8 next_idx;
8015 u8 idx;
8016
8017 if (sa_family == AF_INET6)
8018 return;
8019
8020 idx = i40e_get_vxlan_port_idx(pf, port);
8021
8022 /* Check if port already exists */
8023 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8024 netdev_info(netdev, "vxlan port %d already offloaded\n",
8025 ntohs(port));
8026 return;
8027 }
8028
8029 /* Now check if there is space to add the new port */
8030 next_idx = i40e_get_vxlan_port_idx(pf, 0);
8031
8032 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8033 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
8034 ntohs(port));
8035 return;
8036 }
8037
8038 /* New port: add it and mark its index in the bitmap */
8039 pf->vxlan_ports[next_idx] = port;
8040 pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
8041 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
8042
8043 dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
8044 }
8045
8046 /**
8047 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
8048 * @netdev: This physical port's netdev
8049 * @sa_family: Socket Family that VXLAN is notifying us about
8050 * @port: UDP port number that VXLAN stopped listening to
8051 **/
8052 static void i40e_del_vxlan_port(struct net_device *netdev,
8053 sa_family_t sa_family, __be16 port)
8054 {
8055 struct i40e_netdev_priv *np = netdev_priv(netdev);
8056 struct i40e_vsi *vsi = np->vsi;
8057 struct i40e_pf *pf = vsi->back;
8058 u8 idx;
8059
8060 if (sa_family == AF_INET6)
8061 return;
8062
8063 idx = i40e_get_vxlan_port_idx(pf, port);
8064
8065 /* Check if port already exists */
8066 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8067 /* if port exists, set it to 0 (mark for deletion)
8068 * and make it pending
8069 */
8070 pf->vxlan_ports[idx] = 0;
8071 pf->pending_vxlan_bitmap |= BIT_ULL(idx);
8072 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
8073
8074 dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
8075 ntohs(port));
8076 } else {
8077 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
8078 ntohs(port));
8079 }
8080 }
8081
8082 #endif
8083 static int i40e_get_phys_port_id(struct net_device *netdev,
8084 struct netdev_phys_item_id *ppid)
8085 {
8086 struct i40e_netdev_priv *np = netdev_priv(netdev);
8087 struct i40e_pf *pf = np->vsi->back;
8088 struct i40e_hw *hw = &pf->hw;
8089
8090 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8091 return -EOPNOTSUPP;
8092
8093 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8094 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8095
8096 return 0;
8097 }
8098
8099 /**
8100 * i40e_ndo_fdb_add - add an entry to the hardware database
8101 * @ndm: the input from the stack
8102 * @tb: pointer to array of nladdr (unused)
8103 * @dev: the net device pointer
8104 * @addr: the MAC address entry being added
8105 * @flags: instructions from stack about fdb operation
8106 */
8107 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8108 struct net_device *dev,
8109 const unsigned char *addr, u16 vid,
8110 u16 flags)
8111 {
8112 struct i40e_netdev_priv *np = netdev_priv(dev);
8113 struct i40e_pf *pf = np->vsi->back;
8114 int err = 0;
8115
8116 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8117 return -EOPNOTSUPP;
8118
8119 if (vid) {
8120 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8121 return -EINVAL;
8122 }
8123
8124 /* Hardware does not support aging addresses so if a
8125 * ndm_state is given only allow permanent addresses
8126 */
8127 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8128 netdev_info(dev, "FDB only supports static addresses\n");
8129 return -EINVAL;
8130 }
8131
8132 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8133 err = dev_uc_add_excl(dev, addr);
8134 else if (is_multicast_ether_addr(addr))
8135 err = dev_mc_add_excl(dev, addr);
8136 else
8137 err = -EINVAL;
8138
8139 /* Only return duplicate errors if NLM_F_EXCL is set */
8140 if (err == -EEXIST && !(flags & NLM_F_EXCL))
8141 err = 0;
8142
8143 return err;
8144 }
8145
8146 /**
8147 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8148 * @dev: the netdev being configured
8149 * @nlh: RTNL message
8150 *
8151 * Inserts a new hardware bridge if not already created and
8152 * enables the bridging mode requested (VEB or VEPA). If the
8153 * hardware bridge has already been inserted and the request
8154 * is to change the mode then that requires a PF reset to
8155 * allow rebuild of the components with required hardware
8156 * bridge mode enabled.
8157 **/
8158 static int i40e_ndo_bridge_setlink(struct net_device *dev,
8159 struct nlmsghdr *nlh,
8160 u16 flags)
8161 {
8162 struct i40e_netdev_priv *np = netdev_priv(dev);
8163 struct i40e_vsi *vsi = np->vsi;
8164 struct i40e_pf *pf = vsi->back;
8165 struct i40e_veb *veb = NULL;
8166 struct nlattr *attr, *br_spec;
8167 int i, rem;
8168
8169 /* Only for PF VSI for now */
8170 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8171 return -EOPNOTSUPP;
8172
8173 /* Find the HW bridge for PF VSI */
8174 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8175 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8176 veb = pf->veb[i];
8177 }
8178
8179 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8180
8181 nla_for_each_nested(attr, br_spec, rem) {
8182 __u16 mode;
8183
8184 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8185 continue;
8186
8187 mode = nla_get_u16(attr);
8188 if ((mode != BRIDGE_MODE_VEPA) &&
8189 (mode != BRIDGE_MODE_VEB))
8190 return -EINVAL;
8191
8192 /* Insert a new HW bridge */
8193 if (!veb) {
8194 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8195 vsi->tc_config.enabled_tc);
8196 if (veb) {
8197 veb->bridge_mode = mode;
8198 i40e_config_bridge_mode(veb);
8199 } else {
8200 /* No Bridge HW offload available */
8201 return -ENOENT;
8202 }
8203 break;
8204 } else if (mode != veb->bridge_mode) {
8205 /* Existing HW bridge but different mode needs reset */
8206 veb->bridge_mode = mode;
8207 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8208 if (mode == BRIDGE_MODE_VEB)
8209 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8210 else
8211 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8212 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8213 break;
8214 }
8215 }
8216
8217 return 0;
8218 }
8219
8220 /**
8221 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8222 * @skb: skb buff
8223 * @pid: process id
8224 * @seq: RTNL message seq #
8225 * @dev: the netdev being configured
8226 * @filter_mask: unused
8227 *
8228 * Return the mode in which the hardware bridge is operating in
8229 * i.e VEB or VEPA.
8230 **/
8231 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8232 struct net_device *dev,
8233 u32 filter_mask, int nlflags)
8234 {
8235 struct i40e_netdev_priv *np = netdev_priv(dev);
8236 struct i40e_vsi *vsi = np->vsi;
8237 struct i40e_pf *pf = vsi->back;
8238 struct i40e_veb *veb = NULL;
8239 int i;
8240
8241 /* Only for PF VSI for now */
8242 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8243 return -EOPNOTSUPP;
8244
8245 /* Find the HW bridge for the PF VSI */
8246 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8247 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8248 veb = pf->veb[i];
8249 }
8250
8251 if (!veb)
8252 return 0;
8253
8254 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
8255 nlflags, 0, 0, filter_mask, NULL);
8256 }
8257
8258 #define I40E_MAX_TUNNEL_HDR_LEN 80
8259 /**
8260 * i40e_features_check - Validate encapsulated packet conforms to limits
8261 * @skb: skb buff
8262 * @netdev: This physical port's netdev
8263 * @features: Offload features that the stack believes apply
8264 **/
8265 static netdev_features_t i40e_features_check(struct sk_buff *skb,
8266 struct net_device *dev,
8267 netdev_features_t features)
8268 {
8269 if (skb->encapsulation &&
8270 (skb_inner_mac_header(skb) - skb_transport_header(skb) >
8271 I40E_MAX_TUNNEL_HDR_LEN))
8272 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
8273
8274 return features;
8275 }
8276
8277 static const struct net_device_ops i40e_netdev_ops = {
8278 .ndo_open = i40e_open,
8279 .ndo_stop = i40e_close,
8280 .ndo_start_xmit = i40e_lan_xmit_frame,
8281 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
8282 .ndo_set_rx_mode = i40e_set_rx_mode,
8283 .ndo_validate_addr = eth_validate_addr,
8284 .ndo_set_mac_address = i40e_set_mac,
8285 .ndo_change_mtu = i40e_change_mtu,
8286 .ndo_do_ioctl = i40e_ioctl,
8287 .ndo_tx_timeout = i40e_tx_timeout,
8288 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
8289 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
8290 #ifdef CONFIG_NET_POLL_CONTROLLER
8291 .ndo_poll_controller = i40e_netpoll,
8292 #endif
8293 .ndo_setup_tc = i40e_setup_tc,
8294 #ifdef I40E_FCOE
8295 .ndo_fcoe_enable = i40e_fcoe_enable,
8296 .ndo_fcoe_disable = i40e_fcoe_disable,
8297 #endif
8298 .ndo_set_features = i40e_set_features,
8299 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
8300 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
8301 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
8302 .ndo_get_vf_config = i40e_ndo_get_vf_config,
8303 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
8304 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
8305 #ifdef CONFIG_I40E_VXLAN
8306 .ndo_add_vxlan_port = i40e_add_vxlan_port,
8307 .ndo_del_vxlan_port = i40e_del_vxlan_port,
8308 #endif
8309 .ndo_get_phys_port_id = i40e_get_phys_port_id,
8310 .ndo_fdb_add = i40e_ndo_fdb_add,
8311 .ndo_features_check = i40e_features_check,
8312 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
8313 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
8314 };
8315
8316 /**
8317 * i40e_config_netdev - Setup the netdev flags
8318 * @vsi: the VSI being configured
8319 *
8320 * Returns 0 on success, negative value on failure
8321 **/
8322 static int i40e_config_netdev(struct i40e_vsi *vsi)
8323 {
8324 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8325 struct i40e_pf *pf = vsi->back;
8326 struct i40e_hw *hw = &pf->hw;
8327 struct i40e_netdev_priv *np;
8328 struct net_device *netdev;
8329 u8 mac_addr[ETH_ALEN];
8330 int etherdev_size;
8331
8332 etherdev_size = sizeof(struct i40e_netdev_priv);
8333 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
8334 if (!netdev)
8335 return -ENOMEM;
8336
8337 vsi->netdev = netdev;
8338 np = netdev_priv(netdev);
8339 np->vsi = vsi;
8340
8341 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
8342 NETIF_F_GSO_UDP_TUNNEL |
8343 NETIF_F_TSO;
8344
8345 netdev->features = NETIF_F_SG |
8346 NETIF_F_IP_CSUM |
8347 NETIF_F_SCTP_CSUM |
8348 NETIF_F_HIGHDMA |
8349 NETIF_F_GSO_UDP_TUNNEL |
8350 NETIF_F_HW_VLAN_CTAG_TX |
8351 NETIF_F_HW_VLAN_CTAG_RX |
8352 NETIF_F_HW_VLAN_CTAG_FILTER |
8353 NETIF_F_IPV6_CSUM |
8354 NETIF_F_TSO |
8355 NETIF_F_TSO_ECN |
8356 NETIF_F_TSO6 |
8357 NETIF_F_RXCSUM |
8358 NETIF_F_RXHASH |
8359 0;
8360
8361 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
8362 netdev->features |= NETIF_F_NTUPLE;
8363
8364 /* copy netdev features into list of user selectable features */
8365 netdev->hw_features |= netdev->features;
8366
8367 if (vsi->type == I40E_VSI_MAIN) {
8368 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
8369 ether_addr_copy(mac_addr, hw->mac.perm_addr);
8370 /* The following steps are necessary to prevent reception
8371 * of tagged packets - some older NVM configurations load a
8372 * default a MAC-VLAN filter that accepts any tagged packet
8373 * which must be replaced by a normal filter.
8374 */
8375 if (!i40e_rm_default_mac_filter(vsi, mac_addr))
8376 i40e_add_filter(vsi, mac_addr,
8377 I40E_VLAN_ANY, false, true);
8378 } else {
8379 /* relate the VSI_VMDQ name to the VSI_MAIN name */
8380 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
8381 pf->vsi[pf->lan_vsi]->netdev->name);
8382 random_ether_addr(mac_addr);
8383 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
8384 }
8385 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
8386
8387 ether_addr_copy(netdev->dev_addr, mac_addr);
8388 ether_addr_copy(netdev->perm_addr, mac_addr);
8389 /* vlan gets same features (except vlan offload)
8390 * after any tweaks for specific VSI types
8391 */
8392 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
8393 NETIF_F_HW_VLAN_CTAG_RX |
8394 NETIF_F_HW_VLAN_CTAG_FILTER);
8395 netdev->priv_flags |= IFF_UNICAST_FLT;
8396 netdev->priv_flags |= IFF_SUPP_NOFCS;
8397 /* Setup netdev TC information */
8398 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
8399
8400 netdev->netdev_ops = &i40e_netdev_ops;
8401 netdev->watchdog_timeo = 5 * HZ;
8402 i40e_set_ethtool_ops(netdev);
8403 #ifdef I40E_FCOE
8404 i40e_fcoe_config_netdev(netdev, vsi);
8405 #endif
8406
8407 return 0;
8408 }
8409
8410 /**
8411 * i40e_vsi_delete - Delete a VSI from the switch
8412 * @vsi: the VSI being removed
8413 *
8414 * Returns 0 on success, negative value on failure
8415 **/
8416 static void i40e_vsi_delete(struct i40e_vsi *vsi)
8417 {
8418 /* remove default VSI is not allowed */
8419 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
8420 return;
8421
8422 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
8423 }
8424
8425 /**
8426 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
8427 * @vsi: the VSI being queried
8428 *
8429 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
8430 **/
8431 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
8432 {
8433 struct i40e_veb *veb;
8434 struct i40e_pf *pf = vsi->back;
8435
8436 /* Uplink is not a bridge so default to VEB */
8437 if (vsi->veb_idx == I40E_NO_VEB)
8438 return 1;
8439
8440 veb = pf->veb[vsi->veb_idx];
8441 /* Uplink is a bridge in VEPA mode */
8442 if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
8443 return 0;
8444
8445 /* Uplink is a bridge in VEB mode */
8446 return 1;
8447 }
8448
8449 /**
8450 * i40e_add_vsi - Add a VSI to the switch
8451 * @vsi: the VSI being configured
8452 *
8453 * This initializes a VSI context depending on the VSI type to be added and
8454 * passes it down to the add_vsi aq command.
8455 **/
8456 static int i40e_add_vsi(struct i40e_vsi *vsi)
8457 {
8458 int ret = -ENODEV;
8459 struct i40e_mac_filter *f, *ftmp;
8460 struct i40e_pf *pf = vsi->back;
8461 struct i40e_hw *hw = &pf->hw;
8462 struct i40e_vsi_context ctxt;
8463 u8 enabled_tc = 0x1; /* TC0 enabled */
8464 int f_count = 0;
8465
8466 memset(&ctxt, 0, sizeof(ctxt));
8467 switch (vsi->type) {
8468 case I40E_VSI_MAIN:
8469 /* The PF's main VSI is already setup as part of the
8470 * device initialization, so we'll not bother with
8471 * the add_vsi call, but we will retrieve the current
8472 * VSI context.
8473 */
8474 ctxt.seid = pf->main_vsi_seid;
8475 ctxt.pf_num = pf->hw.pf_id;
8476 ctxt.vf_num = 0;
8477 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8478 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8479 if (ret) {
8480 dev_info(&pf->pdev->dev,
8481 "couldn't get PF vsi config, err %s aq_err %s\n",
8482 i40e_stat_str(&pf->hw, ret),
8483 i40e_aq_str(&pf->hw,
8484 pf->hw.aq.asq_last_status));
8485 return -ENOENT;
8486 }
8487 vsi->info = ctxt.info;
8488 vsi->info.valid_sections = 0;
8489
8490 vsi->seid = ctxt.seid;
8491 vsi->id = ctxt.vsi_number;
8492
8493 enabled_tc = i40e_pf_get_tc_map(pf);
8494
8495 /* MFP mode setup queue map and update VSI */
8496 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
8497 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
8498 memset(&ctxt, 0, sizeof(ctxt));
8499 ctxt.seid = pf->main_vsi_seid;
8500 ctxt.pf_num = pf->hw.pf_id;
8501 ctxt.vf_num = 0;
8502 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
8503 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
8504 if (ret) {
8505 dev_info(&pf->pdev->dev,
8506 "update vsi failed, err %s aq_err %s\n",
8507 i40e_stat_str(&pf->hw, ret),
8508 i40e_aq_str(&pf->hw,
8509 pf->hw.aq.asq_last_status));
8510 ret = -ENOENT;
8511 goto err;
8512 }
8513 /* update the local VSI info queue map */
8514 i40e_vsi_update_queue_map(vsi, &ctxt);
8515 vsi->info.valid_sections = 0;
8516 } else {
8517 /* Default/Main VSI is only enabled for TC0
8518 * reconfigure it to enable all TCs that are
8519 * available on the port in SFP mode.
8520 * For MFP case the iSCSI PF would use this
8521 * flow to enable LAN+iSCSI TC.
8522 */
8523 ret = i40e_vsi_config_tc(vsi, enabled_tc);
8524 if (ret) {
8525 dev_info(&pf->pdev->dev,
8526 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
8527 enabled_tc,
8528 i40e_stat_str(&pf->hw, ret),
8529 i40e_aq_str(&pf->hw,
8530 pf->hw.aq.asq_last_status));
8531 ret = -ENOENT;
8532 }
8533 }
8534 break;
8535
8536 case I40E_VSI_FDIR:
8537 ctxt.pf_num = hw->pf_id;
8538 ctxt.vf_num = 0;
8539 ctxt.uplink_seid = vsi->uplink_seid;
8540 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8541 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8542 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
8543 (i40e_is_vsi_uplink_mode_veb(vsi))) {
8544 ctxt.info.valid_sections |=
8545 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8546 ctxt.info.switch_id =
8547 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8548 }
8549 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8550 break;
8551
8552 case I40E_VSI_VMDQ2:
8553 ctxt.pf_num = hw->pf_id;
8554 ctxt.vf_num = 0;
8555 ctxt.uplink_seid = vsi->uplink_seid;
8556 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8557 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
8558
8559 /* This VSI is connected to VEB so the switch_id
8560 * should be set to zero by default.
8561 */
8562 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8563 ctxt.info.valid_sections |=
8564 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8565 ctxt.info.switch_id =
8566 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8567 }
8568
8569 /* Setup the VSI tx/rx queue map for TC0 only for now */
8570 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8571 break;
8572
8573 case I40E_VSI_SRIOV:
8574 ctxt.pf_num = hw->pf_id;
8575 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
8576 ctxt.uplink_seid = vsi->uplink_seid;
8577 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8578 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
8579
8580 /* This VSI is connected to VEB so the switch_id
8581 * should be set to zero by default.
8582 */
8583 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8584 ctxt.info.valid_sections |=
8585 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8586 ctxt.info.switch_id =
8587 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8588 }
8589
8590 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
8591 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
8592 if (pf->vf[vsi->vf_id].spoofchk) {
8593 ctxt.info.valid_sections |=
8594 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
8595 ctxt.info.sec_flags |=
8596 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
8597 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
8598 }
8599 /* Setup the VSI tx/rx queue map for TC0 only for now */
8600 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8601 break;
8602
8603 #ifdef I40E_FCOE
8604 case I40E_VSI_FCOE:
8605 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
8606 if (ret) {
8607 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
8608 return ret;
8609 }
8610 break;
8611
8612 #endif /* I40E_FCOE */
8613 default:
8614 return -ENODEV;
8615 }
8616
8617 if (vsi->type != I40E_VSI_MAIN) {
8618 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
8619 if (ret) {
8620 dev_info(&vsi->back->pdev->dev,
8621 "add vsi failed, err %s aq_err %s\n",
8622 i40e_stat_str(&pf->hw, ret),
8623 i40e_aq_str(&pf->hw,
8624 pf->hw.aq.asq_last_status));
8625 ret = -ENOENT;
8626 goto err;
8627 }
8628 vsi->info = ctxt.info;
8629 vsi->info.valid_sections = 0;
8630 vsi->seid = ctxt.seid;
8631 vsi->id = ctxt.vsi_number;
8632 }
8633
8634 /* If macvlan filters already exist, force them to get loaded */
8635 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
8636 f->changed = true;
8637 f_count++;
8638
8639 if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
8640 struct i40e_aqc_remove_macvlan_element_data element;
8641
8642 memset(&element, 0, sizeof(element));
8643 ether_addr_copy(element.mac_addr, f->macaddr);
8644 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
8645 ret = i40e_aq_remove_macvlan(hw, vsi->seid,
8646 &element, 1, NULL);
8647 if (ret) {
8648 /* some older FW has a different default */
8649 element.flags |=
8650 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
8651 i40e_aq_remove_macvlan(hw, vsi->seid,
8652 &element, 1, NULL);
8653 }
8654
8655 i40e_aq_mac_address_write(hw,
8656 I40E_AQC_WRITE_TYPE_LAA_WOL,
8657 f->macaddr, NULL);
8658 }
8659 }
8660 if (f_count) {
8661 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
8662 pf->flags |= I40E_FLAG_FILTER_SYNC;
8663 }
8664
8665 /* Update VSI BW information */
8666 ret = i40e_vsi_get_bw_info(vsi);
8667 if (ret) {
8668 dev_info(&pf->pdev->dev,
8669 "couldn't get vsi bw info, err %s aq_err %s\n",
8670 i40e_stat_str(&pf->hw, ret),
8671 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8672 /* VSI is already added so not tearing that up */
8673 ret = 0;
8674 }
8675
8676 err:
8677 return ret;
8678 }
8679
8680 /**
8681 * i40e_vsi_release - Delete a VSI and free its resources
8682 * @vsi: the VSI being removed
8683 *
8684 * Returns 0 on success or < 0 on error
8685 **/
8686 int i40e_vsi_release(struct i40e_vsi *vsi)
8687 {
8688 struct i40e_mac_filter *f, *ftmp;
8689 struct i40e_veb *veb = NULL;
8690 struct i40e_pf *pf;
8691 u16 uplink_seid;
8692 int i, n;
8693
8694 pf = vsi->back;
8695
8696 /* release of a VEB-owner or last VSI is not allowed */
8697 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
8698 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
8699 vsi->seid, vsi->uplink_seid);
8700 return -ENODEV;
8701 }
8702 if (vsi == pf->vsi[pf->lan_vsi] &&
8703 !test_bit(__I40E_DOWN, &pf->state)) {
8704 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
8705 return -ENODEV;
8706 }
8707
8708 uplink_seid = vsi->uplink_seid;
8709 if (vsi->type != I40E_VSI_SRIOV) {
8710 if (vsi->netdev_registered) {
8711 vsi->netdev_registered = false;
8712 if (vsi->netdev) {
8713 /* results in a call to i40e_close() */
8714 unregister_netdev(vsi->netdev);
8715 }
8716 } else {
8717 i40e_vsi_close(vsi);
8718 }
8719 i40e_vsi_disable_irq(vsi);
8720 }
8721
8722 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
8723 i40e_del_filter(vsi, f->macaddr, f->vlan,
8724 f->is_vf, f->is_netdev);
8725 i40e_sync_vsi_filters(vsi);
8726
8727 i40e_vsi_delete(vsi);
8728 i40e_vsi_free_q_vectors(vsi);
8729 if (vsi->netdev) {
8730 free_netdev(vsi->netdev);
8731 vsi->netdev = NULL;
8732 }
8733 i40e_vsi_clear_rings(vsi);
8734 i40e_vsi_clear(vsi);
8735
8736 /* If this was the last thing on the VEB, except for the
8737 * controlling VSI, remove the VEB, which puts the controlling
8738 * VSI onto the next level down in the switch.
8739 *
8740 * Well, okay, there's one more exception here: don't remove
8741 * the orphan VEBs yet. We'll wait for an explicit remove request
8742 * from up the network stack.
8743 */
8744 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
8745 if (pf->vsi[i] &&
8746 pf->vsi[i]->uplink_seid == uplink_seid &&
8747 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8748 n++; /* count the VSIs */
8749 }
8750 }
8751 for (i = 0; i < I40E_MAX_VEB; i++) {
8752 if (!pf->veb[i])
8753 continue;
8754 if (pf->veb[i]->uplink_seid == uplink_seid)
8755 n++; /* count the VEBs */
8756 if (pf->veb[i]->seid == uplink_seid)
8757 veb = pf->veb[i];
8758 }
8759 if (n == 0 && veb && veb->uplink_seid != 0)
8760 i40e_veb_release(veb);
8761
8762 return 0;
8763 }
8764
8765 /**
8766 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
8767 * @vsi: ptr to the VSI
8768 *
8769 * This should only be called after i40e_vsi_mem_alloc() which allocates the
8770 * corresponding SW VSI structure and initializes num_queue_pairs for the
8771 * newly allocated VSI.
8772 *
8773 * Returns 0 on success or negative on failure
8774 **/
8775 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
8776 {
8777 int ret = -ENOENT;
8778 struct i40e_pf *pf = vsi->back;
8779
8780 if (vsi->q_vectors[0]) {
8781 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
8782 vsi->seid);
8783 return -EEXIST;
8784 }
8785
8786 if (vsi->base_vector) {
8787 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
8788 vsi->seid, vsi->base_vector);
8789 return -EEXIST;
8790 }
8791
8792 ret = i40e_vsi_alloc_q_vectors(vsi);
8793 if (ret) {
8794 dev_info(&pf->pdev->dev,
8795 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
8796 vsi->num_q_vectors, vsi->seid, ret);
8797 vsi->num_q_vectors = 0;
8798 goto vector_setup_out;
8799 }
8800
8801 if (vsi->num_q_vectors)
8802 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
8803 vsi->num_q_vectors, vsi->idx);
8804 if (vsi->base_vector < 0) {
8805 dev_info(&pf->pdev->dev,
8806 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
8807 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
8808 i40e_vsi_free_q_vectors(vsi);
8809 ret = -ENOENT;
8810 goto vector_setup_out;
8811 }
8812
8813 vector_setup_out:
8814 return ret;
8815 }
8816
8817 /**
8818 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
8819 * @vsi: pointer to the vsi.
8820 *
8821 * This re-allocates a vsi's queue resources.
8822 *
8823 * Returns pointer to the successfully allocated and configured VSI sw struct
8824 * on success, otherwise returns NULL on failure.
8825 **/
8826 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
8827 {
8828 struct i40e_pf *pf = vsi->back;
8829 u8 enabled_tc;
8830 int ret;
8831
8832 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
8833 i40e_vsi_clear_rings(vsi);
8834
8835 i40e_vsi_free_arrays(vsi, false);
8836 i40e_set_num_rings_in_vsi(vsi);
8837 ret = i40e_vsi_alloc_arrays(vsi, false);
8838 if (ret)
8839 goto err_vsi;
8840
8841 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
8842 if (ret < 0) {
8843 dev_info(&pf->pdev->dev,
8844 "failed to get tracking for %d queues for VSI %d err %d\n",
8845 vsi->alloc_queue_pairs, vsi->seid, ret);
8846 goto err_vsi;
8847 }
8848 vsi->base_queue = ret;
8849
8850 /* Update the FW view of the VSI. Force a reset of TC and queue
8851 * layout configurations.
8852 */
8853 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8854 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8855 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8856 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8857
8858 /* assign it some queues */
8859 ret = i40e_alloc_rings(vsi);
8860 if (ret)
8861 goto err_rings;
8862
8863 /* map all of the rings to the q_vectors */
8864 i40e_vsi_map_rings_to_vectors(vsi);
8865 return vsi;
8866
8867 err_rings:
8868 i40e_vsi_free_q_vectors(vsi);
8869 if (vsi->netdev_registered) {
8870 vsi->netdev_registered = false;
8871 unregister_netdev(vsi->netdev);
8872 free_netdev(vsi->netdev);
8873 vsi->netdev = NULL;
8874 }
8875 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8876 err_vsi:
8877 i40e_vsi_clear(vsi);
8878 return NULL;
8879 }
8880
8881 /**
8882 * i40e_vsi_setup - Set up a VSI by a given type
8883 * @pf: board private structure
8884 * @type: VSI type
8885 * @uplink_seid: the switch element to link to
8886 * @param1: usage depends upon VSI type. For VF types, indicates VF id
8887 *
8888 * This allocates the sw VSI structure and its queue resources, then add a VSI
8889 * to the identified VEB.
8890 *
8891 * Returns pointer to the successfully allocated and configure VSI sw struct on
8892 * success, otherwise returns NULL on failure.
8893 **/
8894 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
8895 u16 uplink_seid, u32 param1)
8896 {
8897 struct i40e_vsi *vsi = NULL;
8898 struct i40e_veb *veb = NULL;
8899 int ret, i;
8900 int v_idx;
8901
8902 /* The requested uplink_seid must be either
8903 * - the PF's port seid
8904 * no VEB is needed because this is the PF
8905 * or this is a Flow Director special case VSI
8906 * - seid of an existing VEB
8907 * - seid of a VSI that owns an existing VEB
8908 * - seid of a VSI that doesn't own a VEB
8909 * a new VEB is created and the VSI becomes the owner
8910 * - seid of the PF VSI, which is what creates the first VEB
8911 * this is a special case of the previous
8912 *
8913 * Find which uplink_seid we were given and create a new VEB if needed
8914 */
8915 for (i = 0; i < I40E_MAX_VEB; i++) {
8916 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
8917 veb = pf->veb[i];
8918 break;
8919 }
8920 }
8921
8922 if (!veb && uplink_seid != pf->mac_seid) {
8923
8924 for (i = 0; i < pf->num_alloc_vsi; i++) {
8925 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
8926 vsi = pf->vsi[i];
8927 break;
8928 }
8929 }
8930 if (!vsi) {
8931 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
8932 uplink_seid);
8933 return NULL;
8934 }
8935
8936 if (vsi->uplink_seid == pf->mac_seid)
8937 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
8938 vsi->tc_config.enabled_tc);
8939 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
8940 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8941 vsi->tc_config.enabled_tc);
8942 if (veb) {
8943 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
8944 dev_info(&vsi->back->pdev->dev,
8945 "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
8946 __func__);
8947 return NULL;
8948 }
8949 /* We come up by default in VEPA mode if SRIOV is not
8950 * already enabled, in which case we can't force VEPA
8951 * mode.
8952 */
8953 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
8954 veb->bridge_mode = BRIDGE_MODE_VEPA;
8955 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8956 }
8957 i40e_config_bridge_mode(veb);
8958 }
8959 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8960 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8961 veb = pf->veb[i];
8962 }
8963 if (!veb) {
8964 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
8965 return NULL;
8966 }
8967
8968 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
8969 uplink_seid = veb->seid;
8970 }
8971
8972 /* get vsi sw struct */
8973 v_idx = i40e_vsi_mem_alloc(pf, type);
8974 if (v_idx < 0)
8975 goto err_alloc;
8976 vsi = pf->vsi[v_idx];
8977 if (!vsi)
8978 goto err_alloc;
8979 vsi->type = type;
8980 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
8981
8982 if (type == I40E_VSI_MAIN)
8983 pf->lan_vsi = v_idx;
8984 else if (type == I40E_VSI_SRIOV)
8985 vsi->vf_id = param1;
8986 /* assign it some queues */
8987 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
8988 vsi->idx);
8989 if (ret < 0) {
8990 dev_info(&pf->pdev->dev,
8991 "failed to get tracking for %d queues for VSI %d err=%d\n",
8992 vsi->alloc_queue_pairs, vsi->seid, ret);
8993 goto err_vsi;
8994 }
8995 vsi->base_queue = ret;
8996
8997 /* get a VSI from the hardware */
8998 vsi->uplink_seid = uplink_seid;
8999 ret = i40e_add_vsi(vsi);
9000 if (ret)
9001 goto err_vsi;
9002
9003 switch (vsi->type) {
9004 /* setup the netdev if needed */
9005 case I40E_VSI_MAIN:
9006 case I40E_VSI_VMDQ2:
9007 case I40E_VSI_FCOE:
9008 ret = i40e_config_netdev(vsi);
9009 if (ret)
9010 goto err_netdev;
9011 ret = register_netdev(vsi->netdev);
9012 if (ret)
9013 goto err_netdev;
9014 vsi->netdev_registered = true;
9015 netif_carrier_off(vsi->netdev);
9016 #ifdef CONFIG_I40E_DCB
9017 /* Setup DCB netlink interface */
9018 i40e_dcbnl_setup(vsi);
9019 #endif /* CONFIG_I40E_DCB */
9020 /* fall through */
9021
9022 case I40E_VSI_FDIR:
9023 /* set up vectors and rings if needed */
9024 ret = i40e_vsi_setup_vectors(vsi);
9025 if (ret)
9026 goto err_msix;
9027
9028 ret = i40e_alloc_rings(vsi);
9029 if (ret)
9030 goto err_rings;
9031
9032 /* map all of the rings to the q_vectors */
9033 i40e_vsi_map_rings_to_vectors(vsi);
9034
9035 i40e_vsi_reset_stats(vsi);
9036 break;
9037
9038 default:
9039 /* no netdev or rings for the other VSI types */
9040 break;
9041 }
9042
9043 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9044 (vsi->type == I40E_VSI_VMDQ2)) {
9045 ret = i40e_vsi_config_rss(vsi);
9046 }
9047 return vsi;
9048
9049 err_rings:
9050 i40e_vsi_free_q_vectors(vsi);
9051 err_msix:
9052 if (vsi->netdev_registered) {
9053 vsi->netdev_registered = false;
9054 unregister_netdev(vsi->netdev);
9055 free_netdev(vsi->netdev);
9056 vsi->netdev = NULL;
9057 }
9058 err_netdev:
9059 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9060 err_vsi:
9061 i40e_vsi_clear(vsi);
9062 err_alloc:
9063 return NULL;
9064 }
9065
9066 /**
9067 * i40e_veb_get_bw_info - Query VEB BW information
9068 * @veb: the veb to query
9069 *
9070 * Query the Tx scheduler BW configuration data for given VEB
9071 **/
9072 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9073 {
9074 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9075 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9076 struct i40e_pf *pf = veb->pf;
9077 struct i40e_hw *hw = &pf->hw;
9078 u32 tc_bw_max;
9079 int ret = 0;
9080 int i;
9081
9082 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9083 &bw_data, NULL);
9084 if (ret) {
9085 dev_info(&pf->pdev->dev,
9086 "query veb bw config failed, err %s aq_err %s\n",
9087 i40e_stat_str(&pf->hw, ret),
9088 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9089 goto out;
9090 }
9091
9092 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9093 &ets_data, NULL);
9094 if (ret) {
9095 dev_info(&pf->pdev->dev,
9096 "query veb bw ets config failed, err %s aq_err %s\n",
9097 i40e_stat_str(&pf->hw, ret),
9098 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9099 goto out;
9100 }
9101
9102 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9103 veb->bw_max_quanta = ets_data.tc_bw_max;
9104 veb->is_abs_credits = bw_data.absolute_credits_enable;
9105 veb->enabled_tc = ets_data.tc_valid_bits;
9106 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
9107 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
9108 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9109 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
9110 veb->bw_tc_limit_credits[i] =
9111 le16_to_cpu(bw_data.tc_bw_limits[i]);
9112 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
9113 }
9114
9115 out:
9116 return ret;
9117 }
9118
9119 /**
9120 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
9121 * @pf: board private structure
9122 *
9123 * On error: returns error code (negative)
9124 * On success: returns vsi index in PF (positive)
9125 **/
9126 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
9127 {
9128 int ret = -ENOENT;
9129 struct i40e_veb *veb;
9130 int i;
9131
9132 /* Need to protect the allocation of switch elements at the PF level */
9133 mutex_lock(&pf->switch_mutex);
9134
9135 /* VEB list may be fragmented if VEB creation/destruction has
9136 * been happening. We can afford to do a quick scan to look
9137 * for any free slots in the list.
9138 *
9139 * find next empty veb slot, looping back around if necessary
9140 */
9141 i = 0;
9142 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
9143 i++;
9144 if (i >= I40E_MAX_VEB) {
9145 ret = -ENOMEM;
9146 goto err_alloc_veb; /* out of VEB slots! */
9147 }
9148
9149 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
9150 if (!veb) {
9151 ret = -ENOMEM;
9152 goto err_alloc_veb;
9153 }
9154 veb->pf = pf;
9155 veb->idx = i;
9156 veb->enabled_tc = 1;
9157
9158 pf->veb[i] = veb;
9159 ret = i;
9160 err_alloc_veb:
9161 mutex_unlock(&pf->switch_mutex);
9162 return ret;
9163 }
9164
9165 /**
9166 * i40e_switch_branch_release - Delete a branch of the switch tree
9167 * @branch: where to start deleting
9168 *
9169 * This uses recursion to find the tips of the branch to be
9170 * removed, deleting until we get back to and can delete this VEB.
9171 **/
9172 static void i40e_switch_branch_release(struct i40e_veb *branch)
9173 {
9174 struct i40e_pf *pf = branch->pf;
9175 u16 branch_seid = branch->seid;
9176 u16 veb_idx = branch->idx;
9177 int i;
9178
9179 /* release any VEBs on this VEB - RECURSION */
9180 for (i = 0; i < I40E_MAX_VEB; i++) {
9181 if (!pf->veb[i])
9182 continue;
9183 if (pf->veb[i]->uplink_seid == branch->seid)
9184 i40e_switch_branch_release(pf->veb[i]);
9185 }
9186
9187 /* Release the VSIs on this VEB, but not the owner VSI.
9188 *
9189 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
9190 * the VEB itself, so don't use (*branch) after this loop.
9191 */
9192 for (i = 0; i < pf->num_alloc_vsi; i++) {
9193 if (!pf->vsi[i])
9194 continue;
9195 if (pf->vsi[i]->uplink_seid == branch_seid &&
9196 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9197 i40e_vsi_release(pf->vsi[i]);
9198 }
9199 }
9200
9201 /* There's one corner case where the VEB might not have been
9202 * removed, so double check it here and remove it if needed.
9203 * This case happens if the veb was created from the debugfs
9204 * commands and no VSIs were added to it.
9205 */
9206 if (pf->veb[veb_idx])
9207 i40e_veb_release(pf->veb[veb_idx]);
9208 }
9209
9210 /**
9211 * i40e_veb_clear - remove veb struct
9212 * @veb: the veb to remove
9213 **/
9214 static void i40e_veb_clear(struct i40e_veb *veb)
9215 {
9216 if (!veb)
9217 return;
9218
9219 if (veb->pf) {
9220 struct i40e_pf *pf = veb->pf;
9221
9222 mutex_lock(&pf->switch_mutex);
9223 if (pf->veb[veb->idx] == veb)
9224 pf->veb[veb->idx] = NULL;
9225 mutex_unlock(&pf->switch_mutex);
9226 }
9227
9228 kfree(veb);
9229 }
9230
9231 /**
9232 * i40e_veb_release - Delete a VEB and free its resources
9233 * @veb: the VEB being removed
9234 **/
9235 void i40e_veb_release(struct i40e_veb *veb)
9236 {
9237 struct i40e_vsi *vsi = NULL;
9238 struct i40e_pf *pf;
9239 int i, n = 0;
9240
9241 pf = veb->pf;
9242
9243 /* find the remaining VSI and check for extras */
9244 for (i = 0; i < pf->num_alloc_vsi; i++) {
9245 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
9246 n++;
9247 vsi = pf->vsi[i];
9248 }
9249 }
9250 if (n != 1) {
9251 dev_info(&pf->pdev->dev,
9252 "can't remove VEB %d with %d VSIs left\n",
9253 veb->seid, n);
9254 return;
9255 }
9256
9257 /* move the remaining VSI to uplink veb */
9258 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
9259 if (veb->uplink_seid) {
9260 vsi->uplink_seid = veb->uplink_seid;
9261 if (veb->uplink_seid == pf->mac_seid)
9262 vsi->veb_idx = I40E_NO_VEB;
9263 else
9264 vsi->veb_idx = veb->veb_idx;
9265 } else {
9266 /* floating VEB */
9267 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9268 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
9269 }
9270
9271 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9272 i40e_veb_clear(veb);
9273 }
9274
9275 /**
9276 * i40e_add_veb - create the VEB in the switch
9277 * @veb: the VEB to be instantiated
9278 * @vsi: the controlling VSI
9279 **/
9280 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
9281 {
9282 struct i40e_pf *pf = veb->pf;
9283 bool is_default = false;
9284 bool is_cloud = false;
9285 int ret;
9286
9287 /* get a VEB from the hardware */
9288 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
9289 veb->enabled_tc, is_default,
9290 is_cloud, &veb->seid, NULL);
9291 if (ret) {
9292 dev_info(&pf->pdev->dev,
9293 "couldn't add VEB, err %s aq_err %s\n",
9294 i40e_stat_str(&pf->hw, ret),
9295 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9296 return -EPERM;
9297 }
9298
9299 /* get statistics counter */
9300 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
9301 &veb->stats_idx, NULL, NULL, NULL);
9302 if (ret) {
9303 dev_info(&pf->pdev->dev,
9304 "couldn't get VEB statistics idx, err %s aq_err %s\n",
9305 i40e_stat_str(&pf->hw, ret),
9306 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9307 return -EPERM;
9308 }
9309 ret = i40e_veb_get_bw_info(veb);
9310 if (ret) {
9311 dev_info(&pf->pdev->dev,
9312 "couldn't get VEB bw info, err %s aq_err %s\n",
9313 i40e_stat_str(&pf->hw, ret),
9314 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9315 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9316 return -ENOENT;
9317 }
9318
9319 vsi->uplink_seid = veb->seid;
9320 vsi->veb_idx = veb->idx;
9321 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9322
9323 return 0;
9324 }
9325
9326 /**
9327 * i40e_veb_setup - Set up a VEB
9328 * @pf: board private structure
9329 * @flags: VEB setup flags
9330 * @uplink_seid: the switch element to link to
9331 * @vsi_seid: the initial VSI seid
9332 * @enabled_tc: Enabled TC bit-map
9333 *
9334 * This allocates the sw VEB structure and links it into the switch
9335 * It is possible and legal for this to be a duplicate of an already
9336 * existing VEB. It is also possible for both uplink and vsi seids
9337 * to be zero, in order to create a floating VEB.
9338 *
9339 * Returns pointer to the successfully allocated VEB sw struct on
9340 * success, otherwise returns NULL on failure.
9341 **/
9342 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
9343 u16 uplink_seid, u16 vsi_seid,
9344 u8 enabled_tc)
9345 {
9346 struct i40e_veb *veb, *uplink_veb = NULL;
9347 int vsi_idx, veb_idx;
9348 int ret;
9349
9350 /* if one seid is 0, the other must be 0 to create a floating relay */
9351 if ((uplink_seid == 0 || vsi_seid == 0) &&
9352 (uplink_seid + vsi_seid != 0)) {
9353 dev_info(&pf->pdev->dev,
9354 "one, not both seid's are 0: uplink=%d vsi=%d\n",
9355 uplink_seid, vsi_seid);
9356 return NULL;
9357 }
9358
9359 /* make sure there is such a vsi and uplink */
9360 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
9361 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
9362 break;
9363 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
9364 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
9365 vsi_seid);
9366 return NULL;
9367 }
9368
9369 if (uplink_seid && uplink_seid != pf->mac_seid) {
9370 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9371 if (pf->veb[veb_idx] &&
9372 pf->veb[veb_idx]->seid == uplink_seid) {
9373 uplink_veb = pf->veb[veb_idx];
9374 break;
9375 }
9376 }
9377 if (!uplink_veb) {
9378 dev_info(&pf->pdev->dev,
9379 "uplink seid %d not found\n", uplink_seid);
9380 return NULL;
9381 }
9382 }
9383
9384 /* get veb sw struct */
9385 veb_idx = i40e_veb_mem_alloc(pf);
9386 if (veb_idx < 0)
9387 goto err_alloc;
9388 veb = pf->veb[veb_idx];
9389 veb->flags = flags;
9390 veb->uplink_seid = uplink_seid;
9391 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
9392 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
9393
9394 /* create the VEB in the switch */
9395 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
9396 if (ret)
9397 goto err_veb;
9398 if (vsi_idx == pf->lan_vsi)
9399 pf->lan_veb = veb->idx;
9400
9401 return veb;
9402
9403 err_veb:
9404 i40e_veb_clear(veb);
9405 err_alloc:
9406 return NULL;
9407 }
9408
9409 /**
9410 * i40e_setup_pf_switch_element - set PF vars based on switch type
9411 * @pf: board private structure
9412 * @ele: element we are building info from
9413 * @num_reported: total number of elements
9414 * @printconfig: should we print the contents
9415 *
9416 * helper function to assist in extracting a few useful SEID values.
9417 **/
9418 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
9419 struct i40e_aqc_switch_config_element_resp *ele,
9420 u16 num_reported, bool printconfig)
9421 {
9422 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
9423 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
9424 u8 element_type = ele->element_type;
9425 u16 seid = le16_to_cpu(ele->seid);
9426
9427 if (printconfig)
9428 dev_info(&pf->pdev->dev,
9429 "type=%d seid=%d uplink=%d downlink=%d\n",
9430 element_type, seid, uplink_seid, downlink_seid);
9431
9432 switch (element_type) {
9433 case I40E_SWITCH_ELEMENT_TYPE_MAC:
9434 pf->mac_seid = seid;
9435 break;
9436 case I40E_SWITCH_ELEMENT_TYPE_VEB:
9437 /* Main VEB? */
9438 if (uplink_seid != pf->mac_seid)
9439 break;
9440 if (pf->lan_veb == I40E_NO_VEB) {
9441 int v;
9442
9443 /* find existing or else empty VEB */
9444 for (v = 0; v < I40E_MAX_VEB; v++) {
9445 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
9446 pf->lan_veb = v;
9447 break;
9448 }
9449 }
9450 if (pf->lan_veb == I40E_NO_VEB) {
9451 v = i40e_veb_mem_alloc(pf);
9452 if (v < 0)
9453 break;
9454 pf->lan_veb = v;
9455 }
9456 }
9457
9458 pf->veb[pf->lan_veb]->seid = seid;
9459 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
9460 pf->veb[pf->lan_veb]->pf = pf;
9461 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
9462 break;
9463 case I40E_SWITCH_ELEMENT_TYPE_VSI:
9464 if (num_reported != 1)
9465 break;
9466 /* This is immediately after a reset so we can assume this is
9467 * the PF's VSI
9468 */
9469 pf->mac_seid = uplink_seid;
9470 pf->pf_seid = downlink_seid;
9471 pf->main_vsi_seid = seid;
9472 if (printconfig)
9473 dev_info(&pf->pdev->dev,
9474 "pf_seid=%d main_vsi_seid=%d\n",
9475 pf->pf_seid, pf->main_vsi_seid);
9476 break;
9477 case I40E_SWITCH_ELEMENT_TYPE_PF:
9478 case I40E_SWITCH_ELEMENT_TYPE_VF:
9479 case I40E_SWITCH_ELEMENT_TYPE_EMP:
9480 case I40E_SWITCH_ELEMENT_TYPE_BMC:
9481 case I40E_SWITCH_ELEMENT_TYPE_PE:
9482 case I40E_SWITCH_ELEMENT_TYPE_PA:
9483 /* ignore these for now */
9484 break;
9485 default:
9486 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
9487 element_type, seid);
9488 break;
9489 }
9490 }
9491
9492 /**
9493 * i40e_fetch_switch_configuration - Get switch config from firmware
9494 * @pf: board private structure
9495 * @printconfig: should we print the contents
9496 *
9497 * Get the current switch configuration from the device and
9498 * extract a few useful SEID values.
9499 **/
9500 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
9501 {
9502 struct i40e_aqc_get_switch_config_resp *sw_config;
9503 u16 next_seid = 0;
9504 int ret = 0;
9505 u8 *aq_buf;
9506 int i;
9507
9508 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
9509 if (!aq_buf)
9510 return -ENOMEM;
9511
9512 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
9513 do {
9514 u16 num_reported, num_total;
9515
9516 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
9517 I40E_AQ_LARGE_BUF,
9518 &next_seid, NULL);
9519 if (ret) {
9520 dev_info(&pf->pdev->dev,
9521 "get switch config failed err %s aq_err %s\n",
9522 i40e_stat_str(&pf->hw, ret),
9523 i40e_aq_str(&pf->hw,
9524 pf->hw.aq.asq_last_status));
9525 kfree(aq_buf);
9526 return -ENOENT;
9527 }
9528
9529 num_reported = le16_to_cpu(sw_config->header.num_reported);
9530 num_total = le16_to_cpu(sw_config->header.num_total);
9531
9532 if (printconfig)
9533 dev_info(&pf->pdev->dev,
9534 "header: %d reported %d total\n",
9535 num_reported, num_total);
9536
9537 for (i = 0; i < num_reported; i++) {
9538 struct i40e_aqc_switch_config_element_resp *ele =
9539 &sw_config->element[i];
9540
9541 i40e_setup_pf_switch_element(pf, ele, num_reported,
9542 printconfig);
9543 }
9544 } while (next_seid != 0);
9545
9546 kfree(aq_buf);
9547 return ret;
9548 }
9549
9550 /**
9551 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
9552 * @pf: board private structure
9553 * @reinit: if the Main VSI needs to re-initialized.
9554 *
9555 * Returns 0 on success, negative value on failure
9556 **/
9557 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
9558 {
9559 int ret;
9560
9561 /* find out what's out there already */
9562 ret = i40e_fetch_switch_configuration(pf, false);
9563 if (ret) {
9564 dev_info(&pf->pdev->dev,
9565 "couldn't fetch switch config, err %s aq_err %s\n",
9566 i40e_stat_str(&pf->hw, ret),
9567 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9568 return ret;
9569 }
9570 i40e_pf_reset_stats(pf);
9571
9572 /* first time setup */
9573 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
9574 struct i40e_vsi *vsi = NULL;
9575 u16 uplink_seid;
9576
9577 /* Set up the PF VSI associated with the PF's main VSI
9578 * that is already in the HW switch
9579 */
9580 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
9581 uplink_seid = pf->veb[pf->lan_veb]->seid;
9582 else
9583 uplink_seid = pf->mac_seid;
9584 if (pf->lan_vsi == I40E_NO_VSI)
9585 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
9586 else if (reinit)
9587 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
9588 if (!vsi) {
9589 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
9590 i40e_fdir_teardown(pf);
9591 return -EAGAIN;
9592 }
9593 } else {
9594 /* force a reset of TC and queue layout configurations */
9595 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9596 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9597 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9598 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9599 }
9600 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
9601
9602 i40e_fdir_sb_setup(pf);
9603
9604 /* Setup static PF queue filter control settings */
9605 ret = i40e_setup_pf_filter_control(pf);
9606 if (ret) {
9607 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
9608 ret);
9609 /* Failure here should not stop continuing other steps */
9610 }
9611
9612 /* enable RSS in the HW, even for only one queue, as the stack can use
9613 * the hash
9614 */
9615 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
9616 i40e_config_rss(pf);
9617
9618 /* fill in link information and enable LSE reporting */
9619 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
9620 i40e_link_event(pf);
9621
9622 /* Initialize user-specific link properties */
9623 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
9624 I40E_AQ_AN_COMPLETED) ? true : false);
9625
9626 i40e_ptp_init(pf);
9627
9628 return ret;
9629 }
9630
9631 /**
9632 * i40e_determine_queue_usage - Work out queue distribution
9633 * @pf: board private structure
9634 **/
9635 static void i40e_determine_queue_usage(struct i40e_pf *pf)
9636 {
9637 int queues_left;
9638
9639 pf->num_lan_qps = 0;
9640 #ifdef I40E_FCOE
9641 pf->num_fcoe_qps = 0;
9642 #endif
9643
9644 /* Find the max queues to be put into basic use. We'll always be
9645 * using TC0, whether or not DCB is running, and TC0 will get the
9646 * big RSS set.
9647 */
9648 queues_left = pf->hw.func_caps.num_tx_qp;
9649
9650 if ((queues_left == 1) ||
9651 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
9652 /* one qp for PF, no queues for anything else */
9653 queues_left = 0;
9654 pf->rss_size = pf->num_lan_qps = 1;
9655
9656 /* make sure all the fancies are disabled */
9657 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
9658 #ifdef I40E_FCOE
9659 I40E_FLAG_FCOE_ENABLED |
9660 #endif
9661 I40E_FLAG_FD_SB_ENABLED |
9662 I40E_FLAG_FD_ATR_ENABLED |
9663 I40E_FLAG_DCB_CAPABLE |
9664 I40E_FLAG_SRIOV_ENABLED |
9665 I40E_FLAG_VMDQ_ENABLED);
9666 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
9667 I40E_FLAG_FD_SB_ENABLED |
9668 I40E_FLAG_FD_ATR_ENABLED |
9669 I40E_FLAG_DCB_CAPABLE))) {
9670 /* one qp for PF */
9671 pf->rss_size = pf->num_lan_qps = 1;
9672 queues_left -= pf->num_lan_qps;
9673
9674 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
9675 #ifdef I40E_FCOE
9676 I40E_FLAG_FCOE_ENABLED |
9677 #endif
9678 I40E_FLAG_FD_SB_ENABLED |
9679 I40E_FLAG_FD_ATR_ENABLED |
9680 I40E_FLAG_DCB_ENABLED |
9681 I40E_FLAG_VMDQ_ENABLED);
9682 } else {
9683 /* Not enough queues for all TCs */
9684 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
9685 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
9686 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9687 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
9688 }
9689 pf->num_lan_qps = max_t(int, pf->rss_size_max,
9690 num_online_cpus());
9691 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
9692 pf->hw.func_caps.num_tx_qp);
9693
9694 queues_left -= pf->num_lan_qps;
9695 }
9696
9697 #ifdef I40E_FCOE
9698 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
9699 if (I40E_DEFAULT_FCOE <= queues_left) {
9700 pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
9701 } else if (I40E_MINIMUM_FCOE <= queues_left) {
9702 pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
9703 } else {
9704 pf->num_fcoe_qps = 0;
9705 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
9706 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
9707 }
9708
9709 queues_left -= pf->num_fcoe_qps;
9710 }
9711
9712 #endif
9713 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9714 if (queues_left > 1) {
9715 queues_left -= 1; /* save 1 queue for FD */
9716 } else {
9717 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9718 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
9719 }
9720 }
9721
9722 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9723 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
9724 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
9725 (queues_left / pf->num_vf_qps));
9726 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
9727 }
9728
9729 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
9730 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
9731 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
9732 (queues_left / pf->num_vmdq_qps));
9733 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
9734 }
9735
9736 pf->queues_left = queues_left;
9737 #ifdef I40E_FCOE
9738 dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
9739 #endif
9740 }
9741
9742 /**
9743 * i40e_setup_pf_filter_control - Setup PF static filter control
9744 * @pf: PF to be setup
9745 *
9746 * i40e_setup_pf_filter_control sets up a PF's initial filter control
9747 * settings. If PE/FCoE are enabled then it will also set the per PF
9748 * based filter sizes required for them. It also enables Flow director,
9749 * ethertype and macvlan type filter settings for the pf.
9750 *
9751 * Returns 0 on success, negative on failure
9752 **/
9753 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
9754 {
9755 struct i40e_filter_control_settings *settings = &pf->filter_settings;
9756
9757 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
9758
9759 /* Flow Director is enabled */
9760 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
9761 settings->enable_fdir = true;
9762
9763 /* Ethtype and MACVLAN filters enabled for PF */
9764 settings->enable_ethtype = true;
9765 settings->enable_macvlan = true;
9766
9767 if (i40e_set_filter_control(&pf->hw, settings))
9768 return -ENOENT;
9769
9770 return 0;
9771 }
9772
9773 #define INFO_STRING_LEN 255
9774 static void i40e_print_features(struct i40e_pf *pf)
9775 {
9776 struct i40e_hw *hw = &pf->hw;
9777 char *buf, *string;
9778
9779 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
9780 if (!string) {
9781 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
9782 return;
9783 }
9784
9785 buf = string;
9786
9787 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
9788 #ifdef CONFIG_PCI_IOV
9789 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
9790 #endif
9791 buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ",
9792 pf->hw.func_caps.num_vsis,
9793 pf->vsi[pf->lan_vsi]->num_queue_pairs,
9794 pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
9795
9796 if (pf->flags & I40E_FLAG_RSS_ENABLED)
9797 buf += sprintf(buf, "RSS ");
9798 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
9799 buf += sprintf(buf, "FD_ATR ");
9800 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9801 buf += sprintf(buf, "FD_SB ");
9802 buf += sprintf(buf, "NTUPLE ");
9803 }
9804 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
9805 buf += sprintf(buf, "DCB ");
9806 if (pf->flags & I40E_FLAG_PTP)
9807 buf += sprintf(buf, "PTP ");
9808 #ifdef I40E_FCOE
9809 if (pf->flags & I40E_FLAG_FCOE_ENABLED)
9810 buf += sprintf(buf, "FCOE ");
9811 #endif
9812
9813 BUG_ON(buf > (string + INFO_STRING_LEN));
9814 dev_info(&pf->pdev->dev, "%s\n", string);
9815 kfree(string);
9816 }
9817
9818 /**
9819 * i40e_probe - Device initialization routine
9820 * @pdev: PCI device information struct
9821 * @ent: entry in i40e_pci_tbl
9822 *
9823 * i40e_probe initializes a PF identified by a pci_dev structure.
9824 * The OS initialization, configuring of the PF private structure,
9825 * and a hardware reset occur.
9826 *
9827 * Returns 0 on success, negative on failure
9828 **/
9829 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9830 {
9831 struct i40e_aq_get_phy_abilities_resp abilities;
9832 unsigned long ioremap_len;
9833 struct i40e_pf *pf;
9834 struct i40e_hw *hw;
9835 static u16 pfs_found;
9836 u16 link_status;
9837 int err = 0;
9838 u32 len;
9839 u32 i;
9840
9841 err = pci_enable_device_mem(pdev);
9842 if (err)
9843 return err;
9844
9845 /* set up for high or low dma */
9846 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9847 if (err) {
9848 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9849 if (err) {
9850 dev_err(&pdev->dev,
9851 "DMA configuration failed: 0x%x\n", err);
9852 goto err_dma;
9853 }
9854 }
9855
9856 /* set up pci connections */
9857 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
9858 IORESOURCE_MEM), i40e_driver_name);
9859 if (err) {
9860 dev_info(&pdev->dev,
9861 "pci_request_selected_regions failed %d\n", err);
9862 goto err_pci_reg;
9863 }
9864
9865 pci_enable_pcie_error_reporting(pdev);
9866 pci_set_master(pdev);
9867
9868 /* Now that we have a PCI connection, we need to do the
9869 * low level device setup. This is primarily setting up
9870 * the Admin Queue structures and then querying for the
9871 * device's current profile information.
9872 */
9873 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
9874 if (!pf) {
9875 err = -ENOMEM;
9876 goto err_pf_alloc;
9877 }
9878 pf->next_vsi = 0;
9879 pf->pdev = pdev;
9880 set_bit(__I40E_DOWN, &pf->state);
9881
9882 hw = &pf->hw;
9883 hw->back = pf;
9884
9885 ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
9886 I40E_MAX_CSR_SPACE);
9887
9888 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
9889 if (!hw->hw_addr) {
9890 err = -EIO;
9891 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
9892 (unsigned int)pci_resource_start(pdev, 0),
9893 (unsigned int)pci_resource_len(pdev, 0), err);
9894 goto err_ioremap;
9895 }
9896 hw->vendor_id = pdev->vendor;
9897 hw->device_id = pdev->device;
9898 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
9899 hw->subsystem_vendor_id = pdev->subsystem_vendor;
9900 hw->subsystem_device_id = pdev->subsystem_device;
9901 hw->bus.device = PCI_SLOT(pdev->devfn);
9902 hw->bus.func = PCI_FUNC(pdev->devfn);
9903 pf->instance = pfs_found;
9904
9905 if (debug != -1) {
9906 pf->msg_enable = pf->hw.debug_mask;
9907 pf->msg_enable = debug;
9908 }
9909
9910 /* do a special CORER for clearing PXE mode once at init */
9911 if (hw->revision_id == 0 &&
9912 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
9913 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
9914 i40e_flush(hw);
9915 msleep(200);
9916 pf->corer_count++;
9917
9918 i40e_clear_pxe_mode(hw);
9919 }
9920
9921 /* Reset here to make sure all is clean and to define PF 'n' */
9922 i40e_clear_hw(hw);
9923 err = i40e_pf_reset(hw);
9924 if (err) {
9925 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
9926 goto err_pf_reset;
9927 }
9928 pf->pfr_count++;
9929
9930 hw->aq.num_arq_entries = I40E_AQ_LEN;
9931 hw->aq.num_asq_entries = I40E_AQ_LEN;
9932 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9933 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9934 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
9935
9936 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
9937 "%s-%s:misc",
9938 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
9939
9940 err = i40e_init_shared_code(hw);
9941 if (err) {
9942 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
9943 err);
9944 goto err_pf_reset;
9945 }
9946
9947 /* set up a default setting for link flow control */
9948 pf->hw.fc.requested_mode = I40E_FC_NONE;
9949
9950 err = i40e_init_adminq(hw);
9951 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
9952 if (err) {
9953 dev_info(&pdev->dev,
9954 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
9955 goto err_pf_reset;
9956 }
9957
9958 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
9959 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
9960 dev_info(&pdev->dev,
9961 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
9962 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
9963 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
9964 dev_info(&pdev->dev,
9965 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
9966
9967 i40e_verify_eeprom(pf);
9968
9969 /* Rev 0 hardware was never productized */
9970 if (hw->revision_id < 1)
9971 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
9972
9973 i40e_clear_pxe_mode(hw);
9974 err = i40e_get_capabilities(pf);
9975 if (err)
9976 goto err_adminq_setup;
9977
9978 err = i40e_sw_init(pf);
9979 if (err) {
9980 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
9981 goto err_sw_init;
9982 }
9983
9984 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9985 hw->func_caps.num_rx_qp,
9986 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
9987 if (err) {
9988 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
9989 goto err_init_lan_hmc;
9990 }
9991
9992 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9993 if (err) {
9994 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
9995 err = -ENOENT;
9996 goto err_configure_lan_hmc;
9997 }
9998
9999 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
10000 * Ignore error return codes because if it was already disabled via
10001 * hardware settings this will fail
10002 */
10003 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
10004 (pf->hw.aq.fw_maj_ver < 4)) {
10005 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10006 i40e_aq_stop_lldp(hw, true, NULL);
10007 }
10008
10009 i40e_get_mac_addr(hw, hw->mac.addr);
10010 if (!is_valid_ether_addr(hw->mac.addr)) {
10011 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10012 err = -EIO;
10013 goto err_mac_addr;
10014 }
10015 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
10016 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
10017 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10018 if (is_valid_ether_addr(hw->mac.port_addr))
10019 pf->flags |= I40E_FLAG_PORT_ID_VALID;
10020 #ifdef I40E_FCOE
10021 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10022 if (err)
10023 dev_info(&pdev->dev,
10024 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10025 if (!is_valid_ether_addr(hw->mac.san_addr)) {
10026 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10027 hw->mac.san_addr);
10028 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10029 }
10030 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10031 #endif /* I40E_FCOE */
10032
10033 pci_set_drvdata(pdev, pf);
10034 pci_save_state(pdev);
10035 #ifdef CONFIG_I40E_DCB
10036 err = i40e_init_pf_dcb(pf);
10037 if (err) {
10038 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10039 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10040 /* Continue without DCB enabled */
10041 }
10042 #endif /* CONFIG_I40E_DCB */
10043
10044 /* set up periodic task facility */
10045 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10046 pf->service_timer_period = HZ;
10047
10048 INIT_WORK(&pf->service_task, i40e_service_task);
10049 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10050 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
10051 pf->link_check_timeout = jiffies;
10052
10053 /* WoL defaults to disabled */
10054 pf->wol_en = false;
10055 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
10056
10057 /* set up the main switch operations */
10058 i40e_determine_queue_usage(pf);
10059 err = i40e_init_interrupt_scheme(pf);
10060 if (err)
10061 goto err_switch_setup;
10062
10063 /* The number of VSIs reported by the FW is the minimum guaranteed
10064 * to us; HW supports far more and we share the remaining pool with
10065 * the other PFs. We allocate space for more than the guarantee with
10066 * the understanding that we might not get them all later.
10067 */
10068 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
10069 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
10070 else
10071 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
10072
10073 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
10074 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
10075 pf->vsi = kzalloc(len, GFP_KERNEL);
10076 if (!pf->vsi) {
10077 err = -ENOMEM;
10078 goto err_switch_setup;
10079 }
10080
10081 #ifdef CONFIG_PCI_IOV
10082 /* prep for VF support */
10083 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10084 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10085 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10086 if (pci_num_vf(pdev))
10087 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
10088 }
10089 #endif
10090 err = i40e_setup_pf_switch(pf, false);
10091 if (err) {
10092 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
10093 goto err_vsis;
10094 }
10095 /* if FDIR VSI was set up, start it now */
10096 for (i = 0; i < pf->num_alloc_vsi; i++) {
10097 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
10098 i40e_vsi_open(pf->vsi[i]);
10099 break;
10100 }
10101 }
10102
10103 /* driver is only interested in link up/down and module qualification
10104 * reports from firmware
10105 */
10106 err = i40e_aq_set_phy_int_mask(&pf->hw,
10107 I40E_AQ_EVENT_LINK_UPDOWN |
10108 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
10109 if (err)
10110 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10111 i40e_stat_str(&pf->hw, err),
10112 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10113
10114 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
10115 (pf->hw.aq.fw_maj_ver < 4)) {
10116 msleep(75);
10117 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10118 if (err)
10119 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10120 i40e_stat_str(&pf->hw, err),
10121 i40e_aq_str(&pf->hw,
10122 pf->hw.aq.asq_last_status));
10123 }
10124 /* The main driver is (mostly) up and happy. We need to set this state
10125 * before setting up the misc vector or we get a race and the vector
10126 * ends up disabled forever.
10127 */
10128 clear_bit(__I40E_DOWN, &pf->state);
10129
10130 /* In case of MSIX we are going to setup the misc vector right here
10131 * to handle admin queue events etc. In case of legacy and MSI
10132 * the misc functionality and queue processing is combined in
10133 * the same vector and that gets setup at open.
10134 */
10135 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10136 err = i40e_setup_misc_vector(pf);
10137 if (err) {
10138 dev_info(&pdev->dev,
10139 "setup of misc vector failed: %d\n", err);
10140 goto err_vsis;
10141 }
10142 }
10143
10144 #ifdef CONFIG_PCI_IOV
10145 /* prep for VF support */
10146 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10147 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10148 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10149 u32 val;
10150
10151 /* disable link interrupts for VFs */
10152 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
10153 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
10154 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
10155 i40e_flush(hw);
10156
10157 if (pci_num_vf(pdev)) {
10158 dev_info(&pdev->dev,
10159 "Active VFs found, allocating resources.\n");
10160 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
10161 if (err)
10162 dev_info(&pdev->dev,
10163 "Error %d allocating resources for existing VFs\n",
10164 err);
10165 }
10166 }
10167 #endif /* CONFIG_PCI_IOV */
10168
10169 pfs_found++;
10170
10171 i40e_dbg_pf_init(pf);
10172
10173 /* tell the firmware that we're starting */
10174 i40e_send_version(pf);
10175
10176 /* since everything's happy, start the service_task timer */
10177 mod_timer(&pf->service_timer,
10178 round_jiffies(jiffies + pf->service_timer_period));
10179
10180 #ifdef I40E_FCOE
10181 /* create FCoE interface */
10182 i40e_fcoe_vsi_setup(pf);
10183
10184 #endif
10185 /* Get the negotiated link width and speed from PCI config space */
10186 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
10187
10188 i40e_set_pci_config_data(hw, link_status);
10189
10190 dev_info(&pdev->dev, "PCI-Express: %s %s\n",
10191 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
10192 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
10193 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
10194 "Unknown"),
10195 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
10196 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
10197 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
10198 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
10199 "Unknown"));
10200
10201 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
10202 hw->bus.speed < i40e_bus_speed_8000) {
10203 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
10204 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
10205 }
10206
10207 /* get the requested speeds from the fw */
10208 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
10209 if (err)
10210 dev_info(&pf->pdev->dev,
10211 "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
10212 i40e_stat_str(&pf->hw, err),
10213 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10214 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
10215
10216 /* print a string summarizing features */
10217 i40e_print_features(pf);
10218
10219 return 0;
10220
10221 /* Unwind what we've done if something failed in the setup */
10222 err_vsis:
10223 set_bit(__I40E_DOWN, &pf->state);
10224 i40e_clear_interrupt_scheme(pf);
10225 kfree(pf->vsi);
10226 err_switch_setup:
10227 i40e_reset_interrupt_capability(pf);
10228 del_timer_sync(&pf->service_timer);
10229 err_mac_addr:
10230 err_configure_lan_hmc:
10231 (void)i40e_shutdown_lan_hmc(hw);
10232 err_init_lan_hmc:
10233 kfree(pf->qp_pile);
10234 err_sw_init:
10235 err_adminq_setup:
10236 (void)i40e_shutdown_adminq(hw);
10237 err_pf_reset:
10238 iounmap(hw->hw_addr);
10239 err_ioremap:
10240 kfree(pf);
10241 err_pf_alloc:
10242 pci_disable_pcie_error_reporting(pdev);
10243 pci_release_selected_regions(pdev,
10244 pci_select_bars(pdev, IORESOURCE_MEM));
10245 err_pci_reg:
10246 err_dma:
10247 pci_disable_device(pdev);
10248 return err;
10249 }
10250
10251 /**
10252 * i40e_remove - Device removal routine
10253 * @pdev: PCI device information struct
10254 *
10255 * i40e_remove is called by the PCI subsystem to alert the driver
10256 * that is should release a PCI device. This could be caused by a
10257 * Hot-Plug event, or because the driver is going to be removed from
10258 * memory.
10259 **/
10260 static void i40e_remove(struct pci_dev *pdev)
10261 {
10262 struct i40e_pf *pf = pci_get_drvdata(pdev);
10263 i40e_status ret_code;
10264 int i;
10265
10266 i40e_dbg_pf_exit(pf);
10267
10268 i40e_ptp_stop(pf);
10269
10270 /* no more scheduling of any task */
10271 set_bit(__I40E_DOWN, &pf->state);
10272 del_timer_sync(&pf->service_timer);
10273 cancel_work_sync(&pf->service_task);
10274 i40e_fdir_teardown(pf);
10275
10276 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
10277 i40e_free_vfs(pf);
10278 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
10279 }
10280
10281 i40e_fdir_teardown(pf);
10282
10283 /* If there is a switch structure or any orphans, remove them.
10284 * This will leave only the PF's VSI remaining.
10285 */
10286 for (i = 0; i < I40E_MAX_VEB; i++) {
10287 if (!pf->veb[i])
10288 continue;
10289
10290 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
10291 pf->veb[i]->uplink_seid == 0)
10292 i40e_switch_branch_release(pf->veb[i]);
10293 }
10294
10295 /* Now we can shutdown the PF's VSI, just before we kill
10296 * adminq and hmc.
10297 */
10298 if (pf->vsi[pf->lan_vsi])
10299 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
10300
10301 /* shutdown and destroy the HMC */
10302 if (pf->hw.hmc.hmc_obj) {
10303 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
10304 if (ret_code)
10305 dev_warn(&pdev->dev,
10306 "Failed to destroy the HMC resources: %d\n",
10307 ret_code);
10308 }
10309
10310 /* shutdown the adminq */
10311 ret_code = i40e_shutdown_adminq(&pf->hw);
10312 if (ret_code)
10313 dev_warn(&pdev->dev,
10314 "Failed to destroy the Admin Queue resources: %d\n",
10315 ret_code);
10316
10317 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
10318 i40e_clear_interrupt_scheme(pf);
10319 for (i = 0; i < pf->num_alloc_vsi; i++) {
10320 if (pf->vsi[i]) {
10321 i40e_vsi_clear_rings(pf->vsi[i]);
10322 i40e_vsi_clear(pf->vsi[i]);
10323 pf->vsi[i] = NULL;
10324 }
10325 }
10326
10327 for (i = 0; i < I40E_MAX_VEB; i++) {
10328 kfree(pf->veb[i]);
10329 pf->veb[i] = NULL;
10330 }
10331
10332 kfree(pf->qp_pile);
10333 kfree(pf->vsi);
10334
10335 iounmap(pf->hw.hw_addr);
10336 kfree(pf);
10337 pci_release_selected_regions(pdev,
10338 pci_select_bars(pdev, IORESOURCE_MEM));
10339
10340 pci_disable_pcie_error_reporting(pdev);
10341 pci_disable_device(pdev);
10342 }
10343
10344 /**
10345 * i40e_pci_error_detected - warning that something funky happened in PCI land
10346 * @pdev: PCI device information struct
10347 *
10348 * Called to warn that something happened and the error handling steps
10349 * are in progress. Allows the driver to quiesce things, be ready for
10350 * remediation.
10351 **/
10352 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
10353 enum pci_channel_state error)
10354 {
10355 struct i40e_pf *pf = pci_get_drvdata(pdev);
10356
10357 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
10358
10359 /* shutdown all operations */
10360 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
10361 rtnl_lock();
10362 i40e_prep_for_reset(pf);
10363 rtnl_unlock();
10364 }
10365
10366 /* Request a slot reset */
10367 return PCI_ERS_RESULT_NEED_RESET;
10368 }
10369
10370 /**
10371 * i40e_pci_error_slot_reset - a PCI slot reset just happened
10372 * @pdev: PCI device information struct
10373 *
10374 * Called to find if the driver can work with the device now that
10375 * the pci slot has been reset. If a basic connection seems good
10376 * (registers are readable and have sane content) then return a
10377 * happy little PCI_ERS_RESULT_xxx.
10378 **/
10379 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
10380 {
10381 struct i40e_pf *pf = pci_get_drvdata(pdev);
10382 pci_ers_result_t result;
10383 int err;
10384 u32 reg;
10385
10386 dev_info(&pdev->dev, "%s\n", __func__);
10387 if (pci_enable_device_mem(pdev)) {
10388 dev_info(&pdev->dev,
10389 "Cannot re-enable PCI device after reset.\n");
10390 result = PCI_ERS_RESULT_DISCONNECT;
10391 } else {
10392 pci_set_master(pdev);
10393 pci_restore_state(pdev);
10394 pci_save_state(pdev);
10395 pci_wake_from_d3(pdev, false);
10396
10397 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
10398 if (reg == 0)
10399 result = PCI_ERS_RESULT_RECOVERED;
10400 else
10401 result = PCI_ERS_RESULT_DISCONNECT;
10402 }
10403
10404 err = pci_cleanup_aer_uncorrect_error_status(pdev);
10405 if (err) {
10406 dev_info(&pdev->dev,
10407 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
10408 err);
10409 /* non-fatal, continue */
10410 }
10411
10412 return result;
10413 }
10414
10415 /**
10416 * i40e_pci_error_resume - restart operations after PCI error recovery
10417 * @pdev: PCI device information struct
10418 *
10419 * Called to allow the driver to bring things back up after PCI error
10420 * and/or reset recovery has finished.
10421 **/
10422 static void i40e_pci_error_resume(struct pci_dev *pdev)
10423 {
10424 struct i40e_pf *pf = pci_get_drvdata(pdev);
10425
10426 dev_info(&pdev->dev, "%s\n", __func__);
10427 if (test_bit(__I40E_SUSPENDED, &pf->state))
10428 return;
10429
10430 rtnl_lock();
10431 i40e_handle_reset_warning(pf);
10432 rtnl_lock();
10433 }
10434
10435 /**
10436 * i40e_shutdown - PCI callback for shutting down
10437 * @pdev: PCI device information struct
10438 **/
10439 static void i40e_shutdown(struct pci_dev *pdev)
10440 {
10441 struct i40e_pf *pf = pci_get_drvdata(pdev);
10442 struct i40e_hw *hw = &pf->hw;
10443
10444 set_bit(__I40E_SUSPENDED, &pf->state);
10445 set_bit(__I40E_DOWN, &pf->state);
10446 rtnl_lock();
10447 i40e_prep_for_reset(pf);
10448 rtnl_unlock();
10449
10450 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10451 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10452
10453 i40e_clear_interrupt_scheme(pf);
10454
10455 if (system_state == SYSTEM_POWER_OFF) {
10456 pci_wake_from_d3(pdev, pf->wol_en);
10457 pci_set_power_state(pdev, PCI_D3hot);
10458 }
10459 }
10460
10461 #ifdef CONFIG_PM
10462 /**
10463 * i40e_suspend - PCI callback for moving to D3
10464 * @pdev: PCI device information struct
10465 **/
10466 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
10467 {
10468 struct i40e_pf *pf = pci_get_drvdata(pdev);
10469 struct i40e_hw *hw = &pf->hw;
10470
10471 set_bit(__I40E_SUSPENDED, &pf->state);
10472 set_bit(__I40E_DOWN, &pf->state);
10473 del_timer_sync(&pf->service_timer);
10474 cancel_work_sync(&pf->service_task);
10475 i40e_fdir_teardown(pf);
10476
10477 rtnl_lock();
10478 i40e_prep_for_reset(pf);
10479 rtnl_unlock();
10480
10481 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10482 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10483
10484 pci_wake_from_d3(pdev, pf->wol_en);
10485 pci_set_power_state(pdev, PCI_D3hot);
10486
10487 return 0;
10488 }
10489
10490 /**
10491 * i40e_resume - PCI callback for waking up from D3
10492 * @pdev: PCI device information struct
10493 **/
10494 static int i40e_resume(struct pci_dev *pdev)
10495 {
10496 struct i40e_pf *pf = pci_get_drvdata(pdev);
10497 u32 err;
10498
10499 pci_set_power_state(pdev, PCI_D0);
10500 pci_restore_state(pdev);
10501 /* pci_restore_state() clears dev->state_saves, so
10502 * call pci_save_state() again to restore it.
10503 */
10504 pci_save_state(pdev);
10505
10506 err = pci_enable_device_mem(pdev);
10507 if (err) {
10508 dev_err(&pdev->dev,
10509 "%s: Cannot enable PCI device from suspend\n",
10510 __func__);
10511 return err;
10512 }
10513 pci_set_master(pdev);
10514
10515 /* no wakeup events while running */
10516 pci_wake_from_d3(pdev, false);
10517
10518 /* handling the reset will rebuild the device state */
10519 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
10520 clear_bit(__I40E_DOWN, &pf->state);
10521 rtnl_lock();
10522 i40e_reset_and_rebuild(pf, false);
10523 rtnl_unlock();
10524 }
10525
10526 return 0;
10527 }
10528
10529 #endif
10530 static const struct pci_error_handlers i40e_err_handler = {
10531 .error_detected = i40e_pci_error_detected,
10532 .slot_reset = i40e_pci_error_slot_reset,
10533 .resume = i40e_pci_error_resume,
10534 };
10535
10536 static struct pci_driver i40e_driver = {
10537 .name = i40e_driver_name,
10538 .id_table = i40e_pci_tbl,
10539 .probe = i40e_probe,
10540 .remove = i40e_remove,
10541 #ifdef CONFIG_PM
10542 .suspend = i40e_suspend,
10543 .resume = i40e_resume,
10544 #endif
10545 .shutdown = i40e_shutdown,
10546 .err_handler = &i40e_err_handler,
10547 .sriov_configure = i40e_pci_sriov_configure,
10548 };
10549
10550 /**
10551 * i40e_init_module - Driver registration routine
10552 *
10553 * i40e_init_module is the first routine called when the driver is
10554 * loaded. All it does is register with the PCI subsystem.
10555 **/
10556 static int __init i40e_init_module(void)
10557 {
10558 pr_info("%s: %s - version %s\n", i40e_driver_name,
10559 i40e_driver_string, i40e_driver_version_str);
10560 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
10561
10562 i40e_dbg_init();
10563 return pci_register_driver(&i40e_driver);
10564 }
10565 module_init(i40e_init_module);
10566
10567 /**
10568 * i40e_exit_module - Driver exit cleanup routine
10569 *
10570 * i40e_exit_module is called just before the driver is removed
10571 * from memory.
10572 **/
10573 static void __exit i40e_exit_module(void)
10574 {
10575 pci_unregister_driver(&i40e_driver);
10576 i40e_dbg_exit();
10577 }
10578 module_exit(i40e_exit_module);
This page took 0.255663 seconds and 4 git commands to generate.