i40e/i40evf: remove unused RX_LRO define
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
CommitLineData
41c445ff
JB
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
dc641b73 4 * Copyright(c) 2013 - 2014 Intel Corporation.
41c445ff
JB
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
dc641b73
GR
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
41c445ff
JB
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27/* Local includes */
28#include "i40e.h"
4eb3f768 29#include "i40e_diag.h"
a1c9a9d9
JK
30#ifdef CONFIG_I40E_VXLAN
31#include <net/vxlan.h>
32#endif
41c445ff
JB
33
34const char i40e_driver_name[] = "i40e";
35static const char i40e_driver_string[] =
36 "Intel(R) Ethernet Connection XL710 Network Driver";
37
38#define DRV_KERN "-k"
39
40#define DRV_VERSION_MAJOR 0
41#define DRV_VERSION_MINOR 3
ded7b9a3 42#define DRV_VERSION_BUILD 46
41c445ff
JB
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN
46const char i40e_driver_version_str[] = DRV_VERSION;
8fb905b3 47static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
41c445ff
JB
48
49/* a bit of forward declarations */
50static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51static void i40e_handle_reset_warning(struct i40e_pf *pf);
52static int i40e_add_vsi(struct i40e_vsi *vsi);
53static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
bc7d338f 54static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
41c445ff
JB
55static int i40e_setup_misc_vector(struct i40e_pf *pf);
56static void i40e_determine_queue_usage(struct i40e_pf *pf);
57static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
cbf61325 58static void i40e_fdir_sb_setup(struct i40e_pf *pf);
4e3b35b0 59static int i40e_veb_get_bw_info(struct i40e_veb *veb);
41c445ff
JB
60
61/* i40e_pci_tbl - PCI Device ID Table
62 *
63 * Last entry must be all 0s
64 *
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
67 */
68static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
ab60085e
SN
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
41c445ff
JB
79 /* required last entry */
80 {0, }
81};
82MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
83
84#define I40E_MAX_VF_COUNT 128
85static int debug = -1;
86module_param(debug, int, 0);
87MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
88
89MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
90MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
91MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_VERSION);
93
94/**
95 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
96 * @hw: pointer to the HW structure
97 * @mem: ptr to mem struct to fill out
98 * @size: size of memory requested
99 * @alignment: what to align the allocation to
100 **/
101int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
102 u64 size, u32 alignment)
103{
104 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
105
106 mem->size = ALIGN(size, alignment);
107 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
108 &mem->pa, GFP_KERNEL);
93bc73b8
JB
109 if (!mem->va)
110 return -ENOMEM;
41c445ff 111
93bc73b8 112 return 0;
41c445ff
JB
113}
114
115/**
116 * i40e_free_dma_mem_d - OS specific memory free for shared code
117 * @hw: pointer to the HW structure
118 * @mem: ptr to mem struct to free
119 **/
120int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
121{
122 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
123
124 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
125 mem->va = NULL;
126 mem->pa = 0;
127 mem->size = 0;
128
129 return 0;
130}
131
132/**
133 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
134 * @hw: pointer to the HW structure
135 * @mem: ptr to mem struct to fill out
136 * @size: size of memory requested
137 **/
138int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
139 u32 size)
140{
141 mem->size = size;
142 mem->va = kzalloc(size, GFP_KERNEL);
143
93bc73b8
JB
144 if (!mem->va)
145 return -ENOMEM;
41c445ff 146
93bc73b8 147 return 0;
41c445ff
JB
148}
149
150/**
151 * i40e_free_virt_mem_d - OS specific memory free for shared code
152 * @hw: pointer to the HW structure
153 * @mem: ptr to mem struct to free
154 **/
155int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
156{
157 /* it's ok to kfree a NULL pointer */
158 kfree(mem->va);
159 mem->va = NULL;
160 mem->size = 0;
161
162 return 0;
163}
164
165/**
166 * i40e_get_lump - find a lump of free generic resource
167 * @pf: board private structure
168 * @pile: the pile of resource to search
169 * @needed: the number of items needed
170 * @id: an owner id to stick on the items assigned
171 *
172 * Returns the base item index of the lump, or negative for error
173 *
174 * The search_hint trick and lack of advanced fit-finding only work
175 * because we're highly likely to have all the same size lump requests.
176 * Linear search time and any fragmentation should be minimal.
177 **/
178static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
179 u16 needed, u16 id)
180{
181 int ret = -ENOMEM;
ddf434ac 182 int i, j;
41c445ff
JB
183
184 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
185 dev_info(&pf->pdev->dev,
186 "param err: pile=%p needed=%d id=0x%04x\n",
187 pile, needed, id);
188 return -EINVAL;
189 }
190
191 /* start the linear search with an imperfect hint */
192 i = pile->search_hint;
ddf434ac 193 while (i < pile->num_entries) {
41c445ff
JB
194 /* skip already allocated entries */
195 if (pile->list[i] & I40E_PILE_VALID_BIT) {
196 i++;
197 continue;
198 }
199
200 /* do we have enough in this lump? */
201 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
202 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
203 break;
204 }
205
206 if (j == needed) {
207 /* there was enough, so assign it to the requestor */
208 for (j = 0; j < needed; j++)
209 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
210 ret = i;
211 pile->search_hint = i + j;
ddf434ac 212 break;
41c445ff
JB
213 } else {
214 /* not enough, so skip over it and continue looking */
215 i += j;
216 }
217 }
218
219 return ret;
220}
221
222/**
223 * i40e_put_lump - return a lump of generic resource
224 * @pile: the pile of resource to search
225 * @index: the base item index
226 * @id: the owner id of the items assigned
227 *
228 * Returns the count of items in the lump
229 **/
230static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
231{
232 int valid_id = (id | I40E_PILE_VALID_BIT);
233 int count = 0;
234 int i;
235
236 if (!pile || index >= pile->num_entries)
237 return -EINVAL;
238
239 for (i = index;
240 i < pile->num_entries && pile->list[i] == valid_id;
241 i++) {
242 pile->list[i] = 0;
243 count++;
244 }
245
246 if (count && index < pile->search_hint)
247 pile->search_hint = index;
248
249 return count;
250}
251
252/**
253 * i40e_service_event_schedule - Schedule the service task to wake up
254 * @pf: board private structure
255 *
256 * If not already scheduled, this puts the task into the work queue
257 **/
258static void i40e_service_event_schedule(struct i40e_pf *pf)
259{
260 if (!test_bit(__I40E_DOWN, &pf->state) &&
261 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
262 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
263 schedule_work(&pf->service_task);
264}
265
266/**
267 * i40e_tx_timeout - Respond to a Tx Hang
268 * @netdev: network interface device structure
269 *
270 * If any port has noticed a Tx timeout, it is likely that the whole
271 * device is munged, not just the one netdev port, so go for the full
272 * reset.
273 **/
274static void i40e_tx_timeout(struct net_device *netdev)
275{
276 struct i40e_netdev_priv *np = netdev_priv(netdev);
277 struct i40e_vsi *vsi = np->vsi;
278 struct i40e_pf *pf = vsi->back;
279
280 pf->tx_timeout_count++;
281
282 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
283 pf->tx_timeout_recovery_level = 0;
284 pf->tx_timeout_last_recovery = jiffies;
285 netdev_info(netdev, "tx_timeout recovery level %d\n",
286 pf->tx_timeout_recovery_level);
287
288 switch (pf->tx_timeout_recovery_level) {
289 case 0:
290 /* disable and re-enable queues for the VSI */
291 if (in_interrupt()) {
292 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
293 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
294 } else {
295 i40e_vsi_reinit_locked(vsi);
296 }
297 break;
298 case 1:
299 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
300 break;
301 case 2:
302 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
303 break;
304 case 3:
305 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
306 break;
307 default:
308 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
e108b0e3 309 set_bit(__I40E_DOWN, &vsi->state);
41c445ff
JB
310 i40e_down(vsi);
311 break;
312 }
313 i40e_service_event_schedule(pf);
314 pf->tx_timeout_recovery_level++;
315}
316
317/**
318 * i40e_release_rx_desc - Store the new tail and head values
319 * @rx_ring: ring to bump
320 * @val: new head index
321 **/
322static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
323{
324 rx_ring->next_to_use = val;
325
326 /* Force memory writes to complete before letting h/w
327 * know there are new descriptors to fetch. (Only
328 * applicable for weak-ordered memory model archs,
329 * such as IA-64).
330 */
331 wmb();
332 writel(val, rx_ring->tail);
333}
334
335/**
336 * i40e_get_vsi_stats_struct - Get System Network Statistics
337 * @vsi: the VSI we care about
338 *
339 * Returns the address of the device statistics structure.
340 * The statistics are actually updated from the service task.
341 **/
342struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
343{
344 return &vsi->net_stats;
345}
346
347/**
348 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
349 * @netdev: network interface device structure
350 *
351 * Returns the address of the device statistics structure.
352 * The statistics are actually updated from the service task.
353 **/
354static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
355 struct net_device *netdev,
980e9b11 356 struct rtnl_link_stats64 *stats)
41c445ff
JB
357{
358 struct i40e_netdev_priv *np = netdev_priv(netdev);
e7046ee1 359 struct i40e_ring *tx_ring, *rx_ring;
41c445ff 360 struct i40e_vsi *vsi = np->vsi;
980e9b11
AD
361 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
362 int i;
363
bc7d338f
ASJ
364 if (test_bit(__I40E_DOWN, &vsi->state))
365 return stats;
366
3c325ced
JB
367 if (!vsi->tx_rings)
368 return stats;
369
980e9b11
AD
370 rcu_read_lock();
371 for (i = 0; i < vsi->num_queue_pairs; i++) {
980e9b11
AD
372 u64 bytes, packets;
373 unsigned int start;
374
375 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
376 if (!tx_ring)
377 continue;
378
379 do {
57a7744e 380 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
980e9b11
AD
381 packets = tx_ring->stats.packets;
382 bytes = tx_ring->stats.bytes;
57a7744e 383 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
980e9b11
AD
384
385 stats->tx_packets += packets;
386 stats->tx_bytes += bytes;
387 rx_ring = &tx_ring[1];
388
389 do {
57a7744e 390 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
980e9b11
AD
391 packets = rx_ring->stats.packets;
392 bytes = rx_ring->stats.bytes;
57a7744e 393 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
41c445ff 394
980e9b11
AD
395 stats->rx_packets += packets;
396 stats->rx_bytes += bytes;
397 }
398 rcu_read_unlock();
399
400 /* following stats updated by ixgbe_watchdog_task() */
401 stats->multicast = vsi_stats->multicast;
402 stats->tx_errors = vsi_stats->tx_errors;
403 stats->tx_dropped = vsi_stats->tx_dropped;
404 stats->rx_errors = vsi_stats->rx_errors;
405 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
406 stats->rx_length_errors = vsi_stats->rx_length_errors;
41c445ff 407
980e9b11 408 return stats;
41c445ff
JB
409}
410
411/**
412 * i40e_vsi_reset_stats - Resets all stats of the given vsi
413 * @vsi: the VSI to have its stats reset
414 **/
415void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
416{
417 struct rtnl_link_stats64 *ns;
418 int i;
419
420 if (!vsi)
421 return;
422
423 ns = i40e_get_vsi_stats_struct(vsi);
424 memset(ns, 0, sizeof(*ns));
425 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
426 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
427 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
8e9dca53 428 if (vsi->rx_rings && vsi->rx_rings[0]) {
41c445ff 429 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
430 memset(&vsi->rx_rings[i]->stats, 0 ,
431 sizeof(vsi->rx_rings[i]->stats));
432 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
433 sizeof(vsi->rx_rings[i]->rx_stats));
434 memset(&vsi->tx_rings[i]->stats, 0 ,
435 sizeof(vsi->tx_rings[i]->stats));
436 memset(&vsi->tx_rings[i]->tx_stats, 0,
437 sizeof(vsi->tx_rings[i]->tx_stats));
41c445ff 438 }
8e9dca53 439 }
41c445ff
JB
440 vsi->stat_offsets_loaded = false;
441}
442
443/**
444 * i40e_pf_reset_stats - Reset all of the stats for the given pf
445 * @pf: the PF to be reset
446 **/
447void i40e_pf_reset_stats(struct i40e_pf *pf)
448{
449 memset(&pf->stats, 0, sizeof(pf->stats));
450 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
451 pf->stat_offsets_loaded = false;
452}
453
454/**
455 * i40e_stat_update48 - read and update a 48 bit stat from the chip
456 * @hw: ptr to the hardware info
457 * @hireg: the high 32 bit reg to read
458 * @loreg: the low 32 bit reg to read
459 * @offset_loaded: has the initial offset been loaded yet
460 * @offset: ptr to current offset value
461 * @stat: ptr to the stat
462 *
463 * Since the device stats are not reset at PFReset, they likely will not
464 * be zeroed when the driver starts. We'll save the first values read
465 * and use them as offsets to be subtracted from the raw values in order
466 * to report stats that count from zero. In the process, we also manage
467 * the potential roll-over.
468 **/
469static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
470 bool offset_loaded, u64 *offset, u64 *stat)
471{
472 u64 new_data;
473
ab60085e 474 if (hw->device_id == I40E_DEV_ID_QEMU) {
41c445ff
JB
475 new_data = rd32(hw, loreg);
476 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
477 } else {
478 new_data = rd64(hw, loreg);
479 }
480 if (!offset_loaded)
481 *offset = new_data;
482 if (likely(new_data >= *offset))
483 *stat = new_data - *offset;
484 else
485 *stat = (new_data + ((u64)1 << 48)) - *offset;
486 *stat &= 0xFFFFFFFFFFFFULL;
487}
488
489/**
490 * i40e_stat_update32 - read and update a 32 bit stat from the chip
491 * @hw: ptr to the hardware info
492 * @reg: the hw reg to read
493 * @offset_loaded: has the initial offset been loaded yet
494 * @offset: ptr to current offset value
495 * @stat: ptr to the stat
496 **/
497static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
498 bool offset_loaded, u64 *offset, u64 *stat)
499{
500 u32 new_data;
501
502 new_data = rd32(hw, reg);
503 if (!offset_loaded)
504 *offset = new_data;
505 if (likely(new_data >= *offset))
506 *stat = (u32)(new_data - *offset);
507 else
508 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
509}
510
511/**
512 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
513 * @vsi: the VSI to be updated
514 **/
515void i40e_update_eth_stats(struct i40e_vsi *vsi)
516{
517 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
518 struct i40e_pf *pf = vsi->back;
519 struct i40e_hw *hw = &pf->hw;
520 struct i40e_eth_stats *oes;
521 struct i40e_eth_stats *es; /* device's eth stats */
522
523 es = &vsi->eth_stats;
524 oes = &vsi->eth_stats_offsets;
525
526 /* Gather up the stats that the hw collects */
527 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
528 vsi->stat_offsets_loaded,
529 &oes->tx_errors, &es->tx_errors);
530 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
531 vsi->stat_offsets_loaded,
532 &oes->rx_discards, &es->rx_discards);
533
534 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
535 I40E_GLV_GORCL(stat_idx),
536 vsi->stat_offsets_loaded,
537 &oes->rx_bytes, &es->rx_bytes);
538 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
539 I40E_GLV_UPRCL(stat_idx),
540 vsi->stat_offsets_loaded,
541 &oes->rx_unicast, &es->rx_unicast);
542 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
543 I40E_GLV_MPRCL(stat_idx),
544 vsi->stat_offsets_loaded,
545 &oes->rx_multicast, &es->rx_multicast);
546 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
547 I40E_GLV_BPRCL(stat_idx),
548 vsi->stat_offsets_loaded,
549 &oes->rx_broadcast, &es->rx_broadcast);
550
551 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
552 I40E_GLV_GOTCL(stat_idx),
553 vsi->stat_offsets_loaded,
554 &oes->tx_bytes, &es->tx_bytes);
555 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
556 I40E_GLV_UPTCL(stat_idx),
557 vsi->stat_offsets_loaded,
558 &oes->tx_unicast, &es->tx_unicast);
559 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
560 I40E_GLV_MPTCL(stat_idx),
561 vsi->stat_offsets_loaded,
562 &oes->tx_multicast, &es->tx_multicast);
563 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
564 I40E_GLV_BPTCL(stat_idx),
565 vsi->stat_offsets_loaded,
566 &oes->tx_broadcast, &es->tx_broadcast);
567 vsi->stat_offsets_loaded = true;
568}
569
570/**
571 * i40e_update_veb_stats - Update Switch component statistics
572 * @veb: the VEB being updated
573 **/
574static void i40e_update_veb_stats(struct i40e_veb *veb)
575{
576 struct i40e_pf *pf = veb->pf;
577 struct i40e_hw *hw = &pf->hw;
578 struct i40e_eth_stats *oes;
579 struct i40e_eth_stats *es; /* device's eth stats */
580 int idx = 0;
581
582 idx = veb->stats_idx;
583 es = &veb->stats;
584 oes = &veb->stats_offsets;
585
586 /* Gather up the stats that the hw collects */
587 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
588 veb->stat_offsets_loaded,
589 &oes->tx_discards, &es->tx_discards);
7134f9ce
JB
590 if (hw->revision_id > 0)
591 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
592 veb->stat_offsets_loaded,
593 &oes->rx_unknown_protocol,
594 &es->rx_unknown_protocol);
41c445ff
JB
595 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
596 veb->stat_offsets_loaded,
597 &oes->rx_bytes, &es->rx_bytes);
598 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
599 veb->stat_offsets_loaded,
600 &oes->rx_unicast, &es->rx_unicast);
601 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
602 veb->stat_offsets_loaded,
603 &oes->rx_multicast, &es->rx_multicast);
604 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
605 veb->stat_offsets_loaded,
606 &oes->rx_broadcast, &es->rx_broadcast);
607
608 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
609 veb->stat_offsets_loaded,
610 &oes->tx_bytes, &es->tx_bytes);
611 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
612 veb->stat_offsets_loaded,
613 &oes->tx_unicast, &es->tx_unicast);
614 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
615 veb->stat_offsets_loaded,
616 &oes->tx_multicast, &es->tx_multicast);
617 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
618 veb->stat_offsets_loaded,
619 &oes->tx_broadcast, &es->tx_broadcast);
620 veb->stat_offsets_loaded = true;
621}
622
623/**
624 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
625 * @pf: the corresponding PF
626 *
627 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
628 **/
629static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
630{
631 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
632 struct i40e_hw_port_stats *nsd = &pf->stats;
633 struct i40e_hw *hw = &pf->hw;
634 u64 xoff = 0;
635 u16 i, v;
636
637 if ((hw->fc.current_mode != I40E_FC_FULL) &&
638 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
639 return;
640
641 xoff = nsd->link_xoff_rx;
642 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
643 pf->stat_offsets_loaded,
644 &osd->link_xoff_rx, &nsd->link_xoff_rx);
645
646 /* No new LFC xoff rx */
647 if (!(nsd->link_xoff_rx - xoff))
648 return;
649
650 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
651 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
652 struct i40e_vsi *vsi = pf->vsi[v];
653
654 if (!vsi)
655 continue;
656
657 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 658 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
659 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
660 }
661 }
662}
663
664/**
665 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
666 * @pf: the corresponding PF
667 *
668 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
669 **/
670static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
671{
672 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
673 struct i40e_hw_port_stats *nsd = &pf->stats;
674 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
675 struct i40e_dcbx_config *dcb_cfg;
676 struct i40e_hw *hw = &pf->hw;
677 u16 i, v;
678 u8 tc;
679
680 dcb_cfg = &hw->local_dcbx_config;
681
682 /* See if DCB enabled with PFC TC */
683 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
684 !(dcb_cfg->pfc.pfcenable)) {
685 i40e_update_link_xoff_rx(pf);
686 return;
687 }
688
689 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
690 u64 prio_xoff = nsd->priority_xoff_rx[i];
691 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
692 pf->stat_offsets_loaded,
693 &osd->priority_xoff_rx[i],
694 &nsd->priority_xoff_rx[i]);
695
696 /* No new PFC xoff rx */
697 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
698 continue;
699 /* Get the TC for given priority */
700 tc = dcb_cfg->etscfg.prioritytable[i];
701 xoff[tc] = true;
702 }
703
704 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
705 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
706 struct i40e_vsi *vsi = pf->vsi[v];
707
708 if (!vsi)
709 continue;
710
711 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 712 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
713
714 tc = ring->dcb_tc;
715 if (xoff[tc])
716 clear_bit(__I40E_HANG_CHECK_ARMED,
717 &ring->state);
718 }
719 }
720}
721
722/**
723 * i40e_update_stats - Update the board statistics counters.
724 * @vsi: the VSI to be updated
725 *
726 * There are a few instances where we store the same stat in a
727 * couple of different structs. This is partly because we have
728 * the netdev stats that need to be filled out, which is slightly
729 * different from the "eth_stats" defined by the chip and used in
730 * VF communications. We sort it all out here in a central place.
731 **/
732void i40e_update_stats(struct i40e_vsi *vsi)
733{
734 struct i40e_pf *pf = vsi->back;
735 struct i40e_hw *hw = &pf->hw;
736 struct rtnl_link_stats64 *ons;
737 struct rtnl_link_stats64 *ns; /* netdev stats */
738 struct i40e_eth_stats *oes;
739 struct i40e_eth_stats *es; /* device's eth stats */
740 u32 tx_restart, tx_busy;
741 u32 rx_page, rx_buf;
742 u64 rx_p, rx_b;
743 u64 tx_p, tx_b;
bee5af7e 744 u32 val;
41c445ff
JB
745 int i;
746 u16 q;
747
748 if (test_bit(__I40E_DOWN, &vsi->state) ||
749 test_bit(__I40E_CONFIG_BUSY, &pf->state))
750 return;
751
752 ns = i40e_get_vsi_stats_struct(vsi);
753 ons = &vsi->net_stats_offsets;
754 es = &vsi->eth_stats;
755 oes = &vsi->eth_stats_offsets;
756
757 /* Gather up the netdev and vsi stats that the driver collects
758 * on the fly during packet processing
759 */
760 rx_b = rx_p = 0;
761 tx_b = tx_p = 0;
762 tx_restart = tx_busy = 0;
763 rx_page = 0;
764 rx_buf = 0;
980e9b11 765 rcu_read_lock();
41c445ff
JB
766 for (q = 0; q < vsi->num_queue_pairs; q++) {
767 struct i40e_ring *p;
980e9b11
AD
768 u64 bytes, packets;
769 unsigned int start;
770
771 /* locate Tx ring */
772 p = ACCESS_ONCE(vsi->tx_rings[q]);
773
774 do {
57a7744e 775 start = u64_stats_fetch_begin_irq(&p->syncp);
980e9b11
AD
776 packets = p->stats.packets;
777 bytes = p->stats.bytes;
57a7744e 778 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
980e9b11
AD
779 tx_b += bytes;
780 tx_p += packets;
781 tx_restart += p->tx_stats.restart_queue;
782 tx_busy += p->tx_stats.tx_busy;
41c445ff 783
980e9b11
AD
784 /* Rx queue is part of the same block as Tx queue */
785 p = &p[1];
786 do {
57a7744e 787 start = u64_stats_fetch_begin_irq(&p->syncp);
980e9b11
AD
788 packets = p->stats.packets;
789 bytes = p->stats.bytes;
57a7744e 790 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
980e9b11
AD
791 rx_b += bytes;
792 rx_p += packets;
420136cc
MW
793 rx_buf += p->rx_stats.alloc_buff_failed;
794 rx_page += p->rx_stats.alloc_page_failed;
41c445ff 795 }
980e9b11 796 rcu_read_unlock();
41c445ff
JB
797 vsi->tx_restart = tx_restart;
798 vsi->tx_busy = tx_busy;
799 vsi->rx_page_failed = rx_page;
800 vsi->rx_buf_failed = rx_buf;
801
802 ns->rx_packets = rx_p;
803 ns->rx_bytes = rx_b;
804 ns->tx_packets = tx_p;
805 ns->tx_bytes = tx_b;
806
807 i40e_update_eth_stats(vsi);
808 /* update netdev stats from eth stats */
809 ons->rx_errors = oes->rx_errors;
810 ns->rx_errors = es->rx_errors;
811 ons->tx_errors = oes->tx_errors;
812 ns->tx_errors = es->tx_errors;
813 ons->multicast = oes->rx_multicast;
814 ns->multicast = es->rx_multicast;
815 ons->tx_dropped = oes->tx_discards;
816 ns->tx_dropped = es->tx_discards;
817
818 /* Get the port data only if this is the main PF VSI */
819 if (vsi == pf->vsi[pf->lan_vsi]) {
820 struct i40e_hw_port_stats *nsd = &pf->stats;
821 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
822
823 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
824 I40E_GLPRT_GORCL(hw->port),
825 pf->stat_offsets_loaded,
826 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
827 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
828 I40E_GLPRT_GOTCL(hw->port),
829 pf->stat_offsets_loaded,
830 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
831 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
832 pf->stat_offsets_loaded,
833 &osd->eth.rx_discards,
834 &nsd->eth.rx_discards);
835 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
836 pf->stat_offsets_loaded,
837 &osd->eth.tx_discards,
838 &nsd->eth.tx_discards);
839 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
840 I40E_GLPRT_MPRCL(hw->port),
841 pf->stat_offsets_loaded,
842 &osd->eth.rx_multicast,
843 &nsd->eth.rx_multicast);
844
845 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
846 pf->stat_offsets_loaded,
847 &osd->tx_dropped_link_down,
848 &nsd->tx_dropped_link_down);
849
850 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
851 pf->stat_offsets_loaded,
852 &osd->crc_errors, &nsd->crc_errors);
853 ns->rx_crc_errors = nsd->crc_errors;
854
855 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
856 pf->stat_offsets_loaded,
857 &osd->illegal_bytes, &nsd->illegal_bytes);
858 ns->rx_errors = nsd->crc_errors
859 + nsd->illegal_bytes;
860
861 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
862 pf->stat_offsets_loaded,
863 &osd->mac_local_faults,
864 &nsd->mac_local_faults);
865 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
866 pf->stat_offsets_loaded,
867 &osd->mac_remote_faults,
868 &nsd->mac_remote_faults);
869
870 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
871 pf->stat_offsets_loaded,
872 &osd->rx_length_errors,
873 &nsd->rx_length_errors);
874 ns->rx_length_errors = nsd->rx_length_errors;
875
876 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
877 pf->stat_offsets_loaded,
878 &osd->link_xon_rx, &nsd->link_xon_rx);
879 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
880 pf->stat_offsets_loaded,
881 &osd->link_xon_tx, &nsd->link_xon_tx);
882 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
883 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
884 pf->stat_offsets_loaded,
885 &osd->link_xoff_tx, &nsd->link_xoff_tx);
886
887 for (i = 0; i < 8; i++) {
888 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
889 pf->stat_offsets_loaded,
890 &osd->priority_xon_rx[i],
891 &nsd->priority_xon_rx[i]);
892 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
893 pf->stat_offsets_loaded,
894 &osd->priority_xon_tx[i],
895 &nsd->priority_xon_tx[i]);
896 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
897 pf->stat_offsets_loaded,
898 &osd->priority_xoff_tx[i],
899 &nsd->priority_xoff_tx[i]);
900 i40e_stat_update32(hw,
901 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
902 pf->stat_offsets_loaded,
903 &osd->priority_xon_2_xoff[i],
904 &nsd->priority_xon_2_xoff[i]);
905 }
906
907 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
908 I40E_GLPRT_PRC64L(hw->port),
909 pf->stat_offsets_loaded,
910 &osd->rx_size_64, &nsd->rx_size_64);
911 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
912 I40E_GLPRT_PRC127L(hw->port),
913 pf->stat_offsets_loaded,
914 &osd->rx_size_127, &nsd->rx_size_127);
915 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
916 I40E_GLPRT_PRC255L(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->rx_size_255, &nsd->rx_size_255);
919 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
920 I40E_GLPRT_PRC511L(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->rx_size_511, &nsd->rx_size_511);
923 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
924 I40E_GLPRT_PRC1023L(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->rx_size_1023, &nsd->rx_size_1023);
927 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
928 I40E_GLPRT_PRC1522L(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->rx_size_1522, &nsd->rx_size_1522);
931 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
932 I40E_GLPRT_PRC9522L(hw->port),
933 pf->stat_offsets_loaded,
934 &osd->rx_size_big, &nsd->rx_size_big);
935
936 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
937 I40E_GLPRT_PTC64L(hw->port),
938 pf->stat_offsets_loaded,
939 &osd->tx_size_64, &nsd->tx_size_64);
940 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
941 I40E_GLPRT_PTC127L(hw->port),
942 pf->stat_offsets_loaded,
943 &osd->tx_size_127, &nsd->tx_size_127);
944 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
945 I40E_GLPRT_PTC255L(hw->port),
946 pf->stat_offsets_loaded,
947 &osd->tx_size_255, &nsd->tx_size_255);
948 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
949 I40E_GLPRT_PTC511L(hw->port),
950 pf->stat_offsets_loaded,
951 &osd->tx_size_511, &nsd->tx_size_511);
952 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
953 I40E_GLPRT_PTC1023L(hw->port),
954 pf->stat_offsets_loaded,
955 &osd->tx_size_1023, &nsd->tx_size_1023);
956 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
957 I40E_GLPRT_PTC1522L(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->tx_size_1522, &nsd->tx_size_1522);
960 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
961 I40E_GLPRT_PTC9522L(hw->port),
962 pf->stat_offsets_loaded,
963 &osd->tx_size_big, &nsd->tx_size_big);
964
965 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
966 pf->stat_offsets_loaded,
967 &osd->rx_undersize, &nsd->rx_undersize);
968 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
969 pf->stat_offsets_loaded,
970 &osd->rx_fragments, &nsd->rx_fragments);
971 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
972 pf->stat_offsets_loaded,
973 &osd->rx_oversize, &nsd->rx_oversize);
974 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
975 pf->stat_offsets_loaded,
976 &osd->rx_jabber, &nsd->rx_jabber);
bee5af7e
ASJ
977
978 val = rd32(hw, I40E_PRTPM_EEE_STAT);
979 nsd->tx_lpi_status =
980 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
981 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
982 nsd->rx_lpi_status =
983 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
984 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
985 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
986 pf->stat_offsets_loaded,
987 &osd->tx_lpi_count, &nsd->tx_lpi_count);
988 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
989 pf->stat_offsets_loaded,
990 &osd->rx_lpi_count, &nsd->rx_lpi_count);
41c445ff
JB
991 }
992
993 pf->stat_offsets_loaded = true;
994}
995
996/**
997 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
998 * @vsi: the VSI to be searched
999 * @macaddr: the MAC address
1000 * @vlan: the vlan
1001 * @is_vf: make sure its a vf filter, else doesn't matter
1002 * @is_netdev: make sure its a netdev filter, else doesn't matter
1003 *
1004 * Returns ptr to the filter object or NULL
1005 **/
1006static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1007 u8 *macaddr, s16 vlan,
1008 bool is_vf, bool is_netdev)
1009{
1010 struct i40e_mac_filter *f;
1011
1012 if (!vsi || !macaddr)
1013 return NULL;
1014
1015 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1016 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1017 (vlan == f->vlan) &&
1018 (!is_vf || f->is_vf) &&
1019 (!is_netdev || f->is_netdev))
1020 return f;
1021 }
1022 return NULL;
1023}
1024
1025/**
1026 * i40e_find_mac - Find a mac addr in the macvlan filters list
1027 * @vsi: the VSI to be searched
1028 * @macaddr: the MAC address we are searching for
1029 * @is_vf: make sure its a vf filter, else doesn't matter
1030 * @is_netdev: make sure its a netdev filter, else doesn't matter
1031 *
1032 * Returns the first filter with the provided MAC address or NULL if
1033 * MAC address was not found
1034 **/
1035struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1036 bool is_vf, bool is_netdev)
1037{
1038 struct i40e_mac_filter *f;
1039
1040 if (!vsi || !macaddr)
1041 return NULL;
1042
1043 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1044 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1045 (!is_vf || f->is_vf) &&
1046 (!is_netdev || f->is_netdev))
1047 return f;
1048 }
1049 return NULL;
1050}
1051
1052/**
1053 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1054 * @vsi: the VSI to be searched
1055 *
1056 * Returns true if VSI is in vlan mode or false otherwise
1057 **/
1058bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1059{
1060 struct i40e_mac_filter *f;
1061
1062 /* Only -1 for all the filters denotes not in vlan mode
1063 * so we have to go through all the list in order to make sure
1064 */
1065 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1066 if (f->vlan >= 0)
1067 return true;
1068 }
1069
1070 return false;
1071}
1072
1073/**
1074 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1075 * @vsi: the VSI to be searched
1076 * @macaddr: the mac address to be filtered
1077 * @is_vf: true if it is a vf
1078 * @is_netdev: true if it is a netdev
1079 *
1080 * Goes through all the macvlan filters and adds a
1081 * macvlan filter for each unique vlan that already exists
1082 *
1083 * Returns first filter found on success, else NULL
1084 **/
1085struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1086 bool is_vf, bool is_netdev)
1087{
1088 struct i40e_mac_filter *f;
1089
1090 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1091 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1092 is_vf, is_netdev)) {
1093 if (!i40e_add_filter(vsi, macaddr, f->vlan,
8fb905b3 1094 is_vf, is_netdev))
41c445ff
JB
1095 return NULL;
1096 }
1097 }
1098
1099 return list_first_entry_or_null(&vsi->mac_filter_list,
1100 struct i40e_mac_filter, list);
1101}
1102
1103/**
1104 * i40e_add_filter - Add a mac/vlan filter to the VSI
1105 * @vsi: the VSI to be searched
1106 * @macaddr: the MAC address
1107 * @vlan: the vlan
1108 * @is_vf: make sure its a vf filter, else doesn't matter
1109 * @is_netdev: make sure its a netdev filter, else doesn't matter
1110 *
1111 * Returns ptr to the filter object or NULL when no memory available.
1112 **/
1113struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1114 u8 *macaddr, s16 vlan,
1115 bool is_vf, bool is_netdev)
1116{
1117 struct i40e_mac_filter *f;
1118
1119 if (!vsi || !macaddr)
1120 return NULL;
1121
1122 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1123 if (!f) {
1124 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1125 if (!f)
1126 goto add_filter_out;
1127
1128 memcpy(f->macaddr, macaddr, ETH_ALEN);
1129 f->vlan = vlan;
1130 f->changed = true;
1131
1132 INIT_LIST_HEAD(&f->list);
1133 list_add(&f->list, &vsi->mac_filter_list);
1134 }
1135
1136 /* increment counter and add a new flag if needed */
1137 if (is_vf) {
1138 if (!f->is_vf) {
1139 f->is_vf = true;
1140 f->counter++;
1141 }
1142 } else if (is_netdev) {
1143 if (!f->is_netdev) {
1144 f->is_netdev = true;
1145 f->counter++;
1146 }
1147 } else {
1148 f->counter++;
1149 }
1150
1151 /* changed tells sync_filters_subtask to
1152 * push the filter down to the firmware
1153 */
1154 if (f->changed) {
1155 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1156 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1157 }
1158
1159add_filter_out:
1160 return f;
1161}
1162
1163/**
1164 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1165 * @vsi: the VSI to be searched
1166 * @macaddr: the MAC address
1167 * @vlan: the vlan
1168 * @is_vf: make sure it's a vf filter, else doesn't matter
1169 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1170 **/
1171void i40e_del_filter(struct i40e_vsi *vsi,
1172 u8 *macaddr, s16 vlan,
1173 bool is_vf, bool is_netdev)
1174{
1175 struct i40e_mac_filter *f;
1176
1177 if (!vsi || !macaddr)
1178 return;
1179
1180 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1181 if (!f || f->counter == 0)
1182 return;
1183
1184 if (is_vf) {
1185 if (f->is_vf) {
1186 f->is_vf = false;
1187 f->counter--;
1188 }
1189 } else if (is_netdev) {
1190 if (f->is_netdev) {
1191 f->is_netdev = false;
1192 f->counter--;
1193 }
1194 } else {
1195 /* make sure we don't remove a filter in use by vf or netdev */
1196 int min_f = 0;
1197 min_f += (f->is_vf ? 1 : 0);
1198 min_f += (f->is_netdev ? 1 : 0);
1199
1200 if (f->counter > min_f)
1201 f->counter--;
1202 }
1203
1204 /* counter == 0 tells sync_filters_subtask to
1205 * remove the filter from the firmware's list
1206 */
1207 if (f->counter == 0) {
1208 f->changed = true;
1209 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1210 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1211 }
1212}
1213
1214/**
1215 * i40e_set_mac - NDO callback to set mac address
1216 * @netdev: network interface device structure
1217 * @p: pointer to an address structure
1218 *
1219 * Returns 0 on success, negative on failure
1220 **/
1221static int i40e_set_mac(struct net_device *netdev, void *p)
1222{
1223 struct i40e_netdev_priv *np = netdev_priv(netdev);
1224 struct i40e_vsi *vsi = np->vsi;
1225 struct sockaddr *addr = p;
1226 struct i40e_mac_filter *f;
1227
1228 if (!is_valid_ether_addr(addr->sa_data))
1229 return -EADDRNOTAVAIL;
1230
1231 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
1232
1233 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
1234 return 0;
1235
80f6428f
ASJ
1236 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1237 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1238 return -EADDRNOTAVAIL;
1239
41c445ff
JB
1240 if (vsi->type == I40E_VSI_MAIN) {
1241 i40e_status ret;
1242 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1243 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1244 addr->sa_data, NULL);
1245 if (ret) {
1246 netdev_info(netdev,
1247 "Addr change for Main VSI failed: %d\n",
1248 ret);
1249 return -EADDRNOTAVAIL;
1250 }
1251
1252 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
1253 }
1254
1255 /* In order to be sure to not drop any packets, add the new address
1256 * then delete the old one.
1257 */
1258 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
1259 if (!f)
1260 return -ENOMEM;
1261
1262 i40e_sync_vsi_filters(vsi);
1263 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1264 i40e_sync_vsi_filters(vsi);
1265
1266 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1267
1268 return 0;
1269}
1270
1271/**
1272 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1273 * @vsi: the VSI being setup
1274 * @ctxt: VSI context structure
1275 * @enabled_tc: Enabled TCs bitmap
1276 * @is_add: True if called before Add VSI
1277 *
1278 * Setup VSI queue mapping for enabled traffic classes.
1279 **/
1280static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1281 struct i40e_vsi_context *ctxt,
1282 u8 enabled_tc,
1283 bool is_add)
1284{
1285 struct i40e_pf *pf = vsi->back;
1286 u16 sections = 0;
1287 u8 netdev_tc = 0;
1288 u16 numtc = 0;
1289 u16 qcount;
1290 u8 offset;
1291 u16 qmap;
1292 int i;
4e3b35b0 1293 u16 num_tc_qps = 0;
41c445ff
JB
1294
1295 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1296 offset = 0;
1297
1298 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1299 /* Find numtc from enabled TC bitmap */
1300 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1301 if (enabled_tc & (1 << i)) /* TC is enabled */
1302 numtc++;
1303 }
1304 if (!numtc) {
1305 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1306 numtc = 1;
1307 }
1308 } else {
1309 /* At least TC0 is enabled in case of non-DCB case */
1310 numtc = 1;
1311 }
1312
1313 vsi->tc_config.numtc = numtc;
1314 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
4e3b35b0
NP
1315 /* Number of queues per enabled TC */
1316 num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc);
1317 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
41c445ff
JB
1318
1319 /* Setup queue offset/count for all TCs for given VSI */
1320 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1321 /* See if the given TC is enabled for the given VSI */
1322 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1323 int pow, num_qps;
1324
41c445ff
JB
1325 switch (vsi->type) {
1326 case I40E_VSI_MAIN:
4e3b35b0 1327 qcount = min_t(int, pf->rss_size, num_tc_qps);
41c445ff
JB
1328 break;
1329 case I40E_VSI_FDIR:
1330 case I40E_VSI_SRIOV:
1331 case I40E_VSI_VMDQ2:
1332 default:
4e3b35b0 1333 qcount = num_tc_qps;
41c445ff
JB
1334 WARN_ON(i != 0);
1335 break;
1336 }
4e3b35b0
NP
1337 vsi->tc_config.tc_info[i].qoffset = offset;
1338 vsi->tc_config.tc_info[i].qcount = qcount;
41c445ff
JB
1339
1340 /* find the power-of-2 of the number of queue pairs */
4e3b35b0 1341 num_qps = qcount;
41c445ff 1342 pow = 0;
4e3b35b0 1343 while (num_qps && ((1 << pow) < qcount)) {
41c445ff
JB
1344 pow++;
1345 num_qps >>= 1;
1346 }
1347
1348 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1349 qmap =
1350 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1351 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1352
4e3b35b0 1353 offset += qcount;
41c445ff
JB
1354 } else {
1355 /* TC is not enabled so set the offset to
1356 * default queue and allocate one queue
1357 * for the given TC.
1358 */
1359 vsi->tc_config.tc_info[i].qoffset = 0;
1360 vsi->tc_config.tc_info[i].qcount = 1;
1361 vsi->tc_config.tc_info[i].netdev_tc = 0;
1362
1363 qmap = 0;
1364 }
1365 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1366 }
1367
1368 /* Set actual Tx/Rx queue pairs */
1369 vsi->num_queue_pairs = offset;
1370
1371 /* Scheduler section valid can only be set for ADD VSI */
1372 if (is_add) {
1373 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1374
1375 ctxt->info.up_enable_bits = enabled_tc;
1376 }
1377 if (vsi->type == I40E_VSI_SRIOV) {
1378 ctxt->info.mapping_flags |=
1379 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1380 for (i = 0; i < vsi->num_queue_pairs; i++)
1381 ctxt->info.queue_mapping[i] =
1382 cpu_to_le16(vsi->base_queue + i);
1383 } else {
1384 ctxt->info.mapping_flags |=
1385 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1386 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1387 }
1388 ctxt->info.valid_sections |= cpu_to_le16(sections);
1389}
1390
1391/**
1392 * i40e_set_rx_mode - NDO callback to set the netdev filters
1393 * @netdev: network interface device structure
1394 **/
1395static void i40e_set_rx_mode(struct net_device *netdev)
1396{
1397 struct i40e_netdev_priv *np = netdev_priv(netdev);
1398 struct i40e_mac_filter *f, *ftmp;
1399 struct i40e_vsi *vsi = np->vsi;
1400 struct netdev_hw_addr *uca;
1401 struct netdev_hw_addr *mca;
1402 struct netdev_hw_addr *ha;
1403
1404 /* add addr if not already in the filter list */
1405 netdev_for_each_uc_addr(uca, netdev) {
1406 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1407 if (i40e_is_vsi_in_vlan(vsi))
1408 i40e_put_mac_in_vlan(vsi, uca->addr,
1409 false, true);
1410 else
1411 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1412 false, true);
1413 }
1414 }
1415
1416 netdev_for_each_mc_addr(mca, netdev) {
1417 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1418 if (i40e_is_vsi_in_vlan(vsi))
1419 i40e_put_mac_in_vlan(vsi, mca->addr,
1420 false, true);
1421 else
1422 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1423 false, true);
1424 }
1425 }
1426
1427 /* remove filter if not in netdev list */
1428 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1429 bool found = false;
1430
1431 if (!f->is_netdev)
1432 continue;
1433
1434 if (is_multicast_ether_addr(f->macaddr)) {
1435 netdev_for_each_mc_addr(mca, netdev) {
1436 if (ether_addr_equal(mca->addr, f->macaddr)) {
1437 found = true;
1438 break;
1439 }
1440 }
1441 } else {
1442 netdev_for_each_uc_addr(uca, netdev) {
1443 if (ether_addr_equal(uca->addr, f->macaddr)) {
1444 found = true;
1445 break;
1446 }
1447 }
1448
1449 for_each_dev_addr(netdev, ha) {
1450 if (ether_addr_equal(ha->addr, f->macaddr)) {
1451 found = true;
1452 break;
1453 }
1454 }
1455 }
1456 if (!found)
1457 i40e_del_filter(
1458 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1459 }
1460
1461 /* check for other flag changes */
1462 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1463 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1464 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1465 }
1466}
1467
1468/**
1469 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1470 * @vsi: ptr to the VSI
1471 *
1472 * Push any outstanding VSI filter changes through the AdminQ.
1473 *
1474 * Returns 0 or error value
1475 **/
1476int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1477{
1478 struct i40e_mac_filter *f, *ftmp;
1479 bool promisc_forced_on = false;
1480 bool add_happened = false;
1481 int filter_list_len = 0;
1482 u32 changed_flags = 0;
dcae29be 1483 i40e_status aq_ret = 0;
41c445ff
JB
1484 struct i40e_pf *pf;
1485 int num_add = 0;
1486 int num_del = 0;
1487 u16 cmd_flags;
1488
1489 /* empty array typed pointers, kcalloc later */
1490 struct i40e_aqc_add_macvlan_element_data *add_list;
1491 struct i40e_aqc_remove_macvlan_element_data *del_list;
1492
1493 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1494 usleep_range(1000, 2000);
1495 pf = vsi->back;
1496
1497 if (vsi->netdev) {
1498 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1499 vsi->current_netdev_flags = vsi->netdev->flags;
1500 }
1501
1502 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1503 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1504
1505 filter_list_len = pf->hw.aq.asq_buf_size /
1506 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1507 del_list = kcalloc(filter_list_len,
1508 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1509 GFP_KERNEL);
1510 if (!del_list)
1511 return -ENOMEM;
1512
1513 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1514 if (!f->changed)
1515 continue;
1516
1517 if (f->counter != 0)
1518 continue;
1519 f->changed = false;
1520 cmd_flags = 0;
1521
1522 /* add to delete list */
1523 memcpy(del_list[num_del].mac_addr,
1524 f->macaddr, ETH_ALEN);
1525 del_list[num_del].vlan_tag =
1526 cpu_to_le16((u16)(f->vlan ==
1527 I40E_VLAN_ANY ? 0 : f->vlan));
1528
41c445ff
JB
1529 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1530 del_list[num_del].flags = cmd_flags;
1531 num_del++;
1532
1533 /* unlink from filter list */
1534 list_del(&f->list);
1535 kfree(f);
1536
1537 /* flush a full buffer */
1538 if (num_del == filter_list_len) {
dcae29be 1539 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
41c445ff
JB
1540 vsi->seid, del_list, num_del,
1541 NULL);
1542 num_del = 0;
1543 memset(del_list, 0, sizeof(*del_list));
1544
dcae29be 1545 if (aq_ret)
41c445ff
JB
1546 dev_info(&pf->pdev->dev,
1547 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
dcae29be 1548 aq_ret,
41c445ff
JB
1549 pf->hw.aq.asq_last_status);
1550 }
1551 }
1552 if (num_del) {
dcae29be 1553 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
41c445ff
JB
1554 del_list, num_del, NULL);
1555 num_del = 0;
1556
dcae29be 1557 if (aq_ret)
41c445ff
JB
1558 dev_info(&pf->pdev->dev,
1559 "ignoring delete macvlan error, err %d, aq_err %d\n",
dcae29be 1560 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1561 }
1562
1563 kfree(del_list);
1564 del_list = NULL;
1565
1566 /* do all the adds now */
1567 filter_list_len = pf->hw.aq.asq_buf_size /
1568 sizeof(struct i40e_aqc_add_macvlan_element_data),
1569 add_list = kcalloc(filter_list_len,
1570 sizeof(struct i40e_aqc_add_macvlan_element_data),
1571 GFP_KERNEL);
1572 if (!add_list)
1573 return -ENOMEM;
1574
1575 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1576 if (!f->changed)
1577 continue;
1578
1579 if (f->counter == 0)
1580 continue;
1581 f->changed = false;
1582 add_happened = true;
1583 cmd_flags = 0;
1584
1585 /* add to add array */
1586 memcpy(add_list[num_add].mac_addr,
1587 f->macaddr, ETH_ALEN);
1588 add_list[num_add].vlan_tag =
1589 cpu_to_le16(
1590 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1591 add_list[num_add].queue_number = 0;
1592
1593 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
41c445ff
JB
1594 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1595 num_add++;
1596
1597 /* flush a full buffer */
1598 if (num_add == filter_list_len) {
dcae29be
JB
1599 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1600 add_list, num_add,
1601 NULL);
41c445ff
JB
1602 num_add = 0;
1603
dcae29be 1604 if (aq_ret)
41c445ff
JB
1605 break;
1606 memset(add_list, 0, sizeof(*add_list));
1607 }
1608 }
1609 if (num_add) {
dcae29be
JB
1610 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1611 add_list, num_add, NULL);
41c445ff
JB
1612 num_add = 0;
1613 }
1614 kfree(add_list);
1615 add_list = NULL;
1616
dcae29be 1617 if (add_happened && (!aq_ret)) {
41c445ff 1618 /* do nothing */;
dcae29be 1619 } else if (add_happened && (aq_ret)) {
41c445ff
JB
1620 dev_info(&pf->pdev->dev,
1621 "add filter failed, err %d, aq_err %d\n",
dcae29be 1622 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1623 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1624 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1625 &vsi->state)) {
1626 promisc_forced_on = true;
1627 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1628 &vsi->state);
1629 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1630 }
1631 }
1632 }
1633
1634 /* check for changes in promiscuous modes */
1635 if (changed_flags & IFF_ALLMULTI) {
1636 bool cur_multipromisc;
1637 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
dcae29be
JB
1638 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1639 vsi->seid,
1640 cur_multipromisc,
1641 NULL);
1642 if (aq_ret)
41c445ff
JB
1643 dev_info(&pf->pdev->dev,
1644 "set multi promisc failed, err %d, aq_err %d\n",
dcae29be 1645 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1646 }
1647 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1648 bool cur_promisc;
1649 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1650 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1651 &vsi->state));
dcae29be
JB
1652 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1653 vsi->seid,
1654 cur_promisc, NULL);
1655 if (aq_ret)
41c445ff
JB
1656 dev_info(&pf->pdev->dev,
1657 "set uni promisc failed, err %d, aq_err %d\n",
dcae29be 1658 aq_ret, pf->hw.aq.asq_last_status);
1a10370a
GR
1659 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1660 vsi->seid,
1661 cur_promisc, NULL);
1662 if (aq_ret)
1663 dev_info(&pf->pdev->dev,
1664 "set brdcast promisc failed, err %d, aq_err %d\n",
1665 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1666 }
1667
1668 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1669 return 0;
1670}
1671
1672/**
1673 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1674 * @pf: board private structure
1675 **/
1676static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1677{
1678 int v;
1679
1680 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1681 return;
1682 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1683
1684 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
1685 if (pf->vsi[v] &&
1686 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1687 i40e_sync_vsi_filters(pf->vsi[v]);
1688 }
1689}
1690
1691/**
1692 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1693 * @netdev: network interface device structure
1694 * @new_mtu: new value for maximum frame size
1695 *
1696 * Returns 0 on success, negative on failure
1697 **/
1698static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1699{
1700 struct i40e_netdev_priv *np = netdev_priv(netdev);
1701 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1702 struct i40e_vsi *vsi = np->vsi;
1703
1704 /* MTU < 68 is an error and causes problems on some kernels */
1705 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1706 return -EINVAL;
1707
1708 netdev_info(netdev, "changing MTU from %d to %d\n",
1709 netdev->mtu, new_mtu);
1710 netdev->mtu = new_mtu;
1711 if (netif_running(netdev))
1712 i40e_vsi_reinit_locked(vsi);
1713
1714 return 0;
1715}
1716
beb0dff1
JK
1717/**
1718 * i40e_ioctl - Access the hwtstamp interface
1719 * @netdev: network interface device structure
1720 * @ifr: interface request data
1721 * @cmd: ioctl command
1722 **/
1723int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1724{
1725 struct i40e_netdev_priv *np = netdev_priv(netdev);
1726 struct i40e_pf *pf = np->vsi->back;
1727
1728 switch (cmd) {
1729 case SIOCGHWTSTAMP:
1730 return i40e_ptp_get_ts_config(pf, ifr);
1731 case SIOCSHWTSTAMP:
1732 return i40e_ptp_set_ts_config(pf, ifr);
1733 default:
1734 return -EOPNOTSUPP;
1735 }
1736}
1737
41c445ff
JB
1738/**
1739 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1740 * @vsi: the vsi being adjusted
1741 **/
1742void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1743{
1744 struct i40e_vsi_context ctxt;
1745 i40e_status ret;
1746
1747 if ((vsi->info.valid_sections &
1748 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1749 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1750 return; /* already enabled */
1751
1752 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1753 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1754 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1755
1756 ctxt.seid = vsi->seid;
1757 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1758 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1759 if (ret) {
1760 dev_info(&vsi->back->pdev->dev,
1761 "%s: update vsi failed, aq_err=%d\n",
1762 __func__, vsi->back->hw.aq.asq_last_status);
1763 }
1764}
1765
1766/**
1767 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1768 * @vsi: the vsi being adjusted
1769 **/
1770void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1771{
1772 struct i40e_vsi_context ctxt;
1773 i40e_status ret;
1774
1775 if ((vsi->info.valid_sections &
1776 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1777 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1778 I40E_AQ_VSI_PVLAN_EMOD_MASK))
1779 return; /* already disabled */
1780
1781 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1782 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1783 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1784
1785 ctxt.seid = vsi->seid;
1786 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1787 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1788 if (ret) {
1789 dev_info(&vsi->back->pdev->dev,
1790 "%s: update vsi failed, aq_err=%d\n",
1791 __func__, vsi->back->hw.aq.asq_last_status);
1792 }
1793}
1794
1795/**
1796 * i40e_vlan_rx_register - Setup or shutdown vlan offload
1797 * @netdev: network interface to be adjusted
1798 * @features: netdev features to test if VLAN offload is enabled or not
1799 **/
1800static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
1801{
1802 struct i40e_netdev_priv *np = netdev_priv(netdev);
1803 struct i40e_vsi *vsi = np->vsi;
1804
1805 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1806 i40e_vlan_stripping_enable(vsi);
1807 else
1808 i40e_vlan_stripping_disable(vsi);
1809}
1810
1811/**
1812 * i40e_vsi_add_vlan - Add vsi membership for given vlan
1813 * @vsi: the vsi being configured
1814 * @vid: vlan id to be added (0 = untagged only , -1 = any)
1815 **/
1816int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1817{
1818 struct i40e_mac_filter *f, *add_f;
1819 bool is_netdev, is_vf;
41c445ff
JB
1820
1821 is_vf = (vsi->type == I40E_VSI_SRIOV);
1822 is_netdev = !!(vsi->netdev);
1823
1824 if (is_netdev) {
1825 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
1826 is_vf, is_netdev);
1827 if (!add_f) {
1828 dev_info(&vsi->back->pdev->dev,
1829 "Could not add vlan filter %d for %pM\n",
1830 vid, vsi->netdev->dev_addr);
1831 return -ENOMEM;
1832 }
1833 }
1834
1835 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1836 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1837 if (!add_f) {
1838 dev_info(&vsi->back->pdev->dev,
1839 "Could not add vlan filter %d for %pM\n",
1840 vid, f->macaddr);
1841 return -ENOMEM;
1842 }
1843 }
1844
41c445ff
JB
1845 /* Now if we add a vlan tag, make sure to check if it is the first
1846 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
1847 * with 0, so we now accept untagged and specified tagged traffic
1848 * (and not any taged and untagged)
1849 */
1850 if (vid > 0) {
1851 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
1852 I40E_VLAN_ANY,
1853 is_vf, is_netdev)) {
1854 i40e_del_filter(vsi, vsi->netdev->dev_addr,
1855 I40E_VLAN_ANY, is_vf, is_netdev);
1856 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
1857 is_vf, is_netdev);
1858 if (!add_f) {
1859 dev_info(&vsi->back->pdev->dev,
1860 "Could not add filter 0 for %pM\n",
1861 vsi->netdev->dev_addr);
1862 return -ENOMEM;
1863 }
1864 }
8d82a7c5 1865 }
41c445ff 1866
8d82a7c5
GR
1867 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
1868 if (vid > 0 && !vsi->info.pvid) {
41c445ff
JB
1869 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1870 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1871 is_vf, is_netdev)) {
1872 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1873 is_vf, is_netdev);
1874 add_f = i40e_add_filter(vsi, f->macaddr,
1875 0, is_vf, is_netdev);
1876 if (!add_f) {
1877 dev_info(&vsi->back->pdev->dev,
1878 "Could not add filter 0 for %pM\n",
1879 f->macaddr);
1880 return -ENOMEM;
1881 }
1882 }
1883 }
41c445ff
JB
1884 }
1885
80f6428f
ASJ
1886 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1887 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1888 return 0;
1889
1890 return i40e_sync_vsi_filters(vsi);
41c445ff
JB
1891}
1892
1893/**
1894 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1895 * @vsi: the vsi being configured
1896 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
078b5876
JB
1897 *
1898 * Return: 0 on success or negative otherwise
41c445ff
JB
1899 **/
1900int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1901{
1902 struct net_device *netdev = vsi->netdev;
1903 struct i40e_mac_filter *f, *add_f;
1904 bool is_vf, is_netdev;
1905 int filter_count = 0;
41c445ff
JB
1906
1907 is_vf = (vsi->type == I40E_VSI_SRIOV);
1908 is_netdev = !!(netdev);
1909
1910 if (is_netdev)
1911 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
1912
1913 list_for_each_entry(f, &vsi->mac_filter_list, list)
1914 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1915
41c445ff
JB
1916 /* go through all the filters for this VSI and if there is only
1917 * vid == 0 it means there are no other filters, so vid 0 must
1918 * be replaced with -1. This signifies that we should from now
1919 * on accept any traffic (with any tag present, or untagged)
1920 */
1921 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1922 if (is_netdev) {
1923 if (f->vlan &&
1924 ether_addr_equal(netdev->dev_addr, f->macaddr))
1925 filter_count++;
1926 }
1927
1928 if (f->vlan)
1929 filter_count++;
1930 }
1931
1932 if (!filter_count && is_netdev) {
1933 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
1934 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1935 is_vf, is_netdev);
1936 if (!f) {
1937 dev_info(&vsi->back->pdev->dev,
1938 "Could not add filter %d for %pM\n",
1939 I40E_VLAN_ANY, netdev->dev_addr);
1940 return -ENOMEM;
1941 }
1942 }
1943
1944 if (!filter_count) {
1945 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1946 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
1947 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1948 is_vf, is_netdev);
1949 if (!add_f) {
1950 dev_info(&vsi->back->pdev->dev,
1951 "Could not add filter %d for %pM\n",
1952 I40E_VLAN_ANY, f->macaddr);
1953 return -ENOMEM;
1954 }
1955 }
1956 }
1957
80f6428f
ASJ
1958 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1959 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1960 return 0;
1961
41c445ff
JB
1962 return i40e_sync_vsi_filters(vsi);
1963}
1964
1965/**
1966 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1967 * @netdev: network interface to be adjusted
1968 * @vid: vlan id to be added
078b5876
JB
1969 *
1970 * net_device_ops implementation for adding vlan ids
41c445ff
JB
1971 **/
1972static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1973 __always_unused __be16 proto, u16 vid)
1974{
1975 struct i40e_netdev_priv *np = netdev_priv(netdev);
1976 struct i40e_vsi *vsi = np->vsi;
078b5876 1977 int ret = 0;
41c445ff
JB
1978
1979 if (vid > 4095)
078b5876
JB
1980 return -EINVAL;
1981
1982 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
41c445ff 1983
6982d429
ASJ
1984 /* If the network stack called us with vid = 0 then
1985 * it is asking to receive priority tagged packets with
1986 * vlan id 0. Our HW receives them by default when configured
1987 * to receive untagged packets so there is no need to add an
1988 * extra filter for vlan 0 tagged packets.
41c445ff 1989 */
6982d429
ASJ
1990 if (vid)
1991 ret = i40e_vsi_add_vlan(vsi, vid);
41c445ff 1992
078b5876
JB
1993 if (!ret && (vid < VLAN_N_VID))
1994 set_bit(vid, vsi->active_vlans);
41c445ff 1995
078b5876 1996 return ret;
41c445ff
JB
1997}
1998
1999/**
2000 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2001 * @netdev: network interface to be adjusted
2002 * @vid: vlan id to be removed
078b5876 2003 *
fdfd943e 2004 * net_device_ops implementation for removing vlan ids
41c445ff
JB
2005 **/
2006static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2007 __always_unused __be16 proto, u16 vid)
2008{
2009 struct i40e_netdev_priv *np = netdev_priv(netdev);
2010 struct i40e_vsi *vsi = np->vsi;
2011
078b5876
JB
2012 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2013
41c445ff
JB
2014 /* return code is ignored as there is nothing a user
2015 * can do about failure to remove and a log message was
078b5876 2016 * already printed from the other function
41c445ff
JB
2017 */
2018 i40e_vsi_kill_vlan(vsi, vid);
2019
2020 clear_bit(vid, vsi->active_vlans);
078b5876 2021
41c445ff
JB
2022 return 0;
2023}
2024
2025/**
2026 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2027 * @vsi: the vsi being brought back up
2028 **/
2029static void i40e_restore_vlan(struct i40e_vsi *vsi)
2030{
2031 u16 vid;
2032
2033 if (!vsi->netdev)
2034 return;
2035
2036 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2037
2038 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2039 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2040 vid);
2041}
2042
2043/**
2044 * i40e_vsi_add_pvid - Add pvid for the VSI
2045 * @vsi: the vsi being adjusted
2046 * @vid: the vlan id to set as a PVID
2047 **/
dcae29be 2048int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
41c445ff
JB
2049{
2050 struct i40e_vsi_context ctxt;
dcae29be 2051 i40e_status aq_ret;
41c445ff
JB
2052
2053 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2054 vsi->info.pvid = cpu_to_le16(vid);
6c12fcbf
GR
2055 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2056 I40E_AQ_VSI_PVLAN_INSERT_PVID |
b774c7dd 2057 I40E_AQ_VSI_PVLAN_EMOD_STR;
41c445ff
JB
2058
2059 ctxt.seid = vsi->seid;
2060 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
dcae29be
JB
2061 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2062 if (aq_ret) {
41c445ff
JB
2063 dev_info(&vsi->back->pdev->dev,
2064 "%s: update vsi failed, aq_err=%d\n",
2065 __func__, vsi->back->hw.aq.asq_last_status);
dcae29be 2066 return -ENOENT;
41c445ff
JB
2067 }
2068
dcae29be 2069 return 0;
41c445ff
JB
2070}
2071
2072/**
2073 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2074 * @vsi: the vsi being adjusted
2075 *
2076 * Just use the vlan_rx_register() service to put it back to normal
2077 **/
2078void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2079{
6c12fcbf
GR
2080 i40e_vlan_stripping_disable(vsi);
2081
41c445ff 2082 vsi->info.pvid = 0;
41c445ff
JB
2083}
2084
2085/**
2086 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2087 * @vsi: ptr to the VSI
2088 *
2089 * If this function returns with an error, then it's possible one or
2090 * more of the rings is populated (while the rest are not). It is the
2091 * callers duty to clean those orphaned rings.
2092 *
2093 * Return 0 on success, negative on failure
2094 **/
2095static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2096{
2097 int i, err = 0;
2098
2099 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2100 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
41c445ff
JB
2101
2102 return err;
2103}
2104
2105/**
2106 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2107 * @vsi: ptr to the VSI
2108 *
2109 * Free VSI's transmit software resources
2110 **/
2111static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2112{
2113 int i;
2114
8e9dca53
GR
2115 if (!vsi->tx_rings)
2116 return;
2117
41c445ff 2118 for (i = 0; i < vsi->num_queue_pairs; i++)
8e9dca53 2119 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
9f65e15b 2120 i40e_free_tx_resources(vsi->tx_rings[i]);
41c445ff
JB
2121}
2122
2123/**
2124 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2125 * @vsi: ptr to the VSI
2126 *
2127 * If this function returns with an error, then it's possible one or
2128 * more of the rings is populated (while the rest are not). It is the
2129 * callers duty to clean those orphaned rings.
2130 *
2131 * Return 0 on success, negative on failure
2132 **/
2133static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2134{
2135 int i, err = 0;
2136
2137 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2138 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
41c445ff
JB
2139 return err;
2140}
2141
2142/**
2143 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2144 * @vsi: ptr to the VSI
2145 *
2146 * Free all receive software resources
2147 **/
2148static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2149{
2150 int i;
2151
8e9dca53
GR
2152 if (!vsi->rx_rings)
2153 return;
2154
41c445ff 2155 for (i = 0; i < vsi->num_queue_pairs; i++)
8e9dca53 2156 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
9f65e15b 2157 i40e_free_rx_resources(vsi->rx_rings[i]);
41c445ff
JB
2158}
2159
2160/**
2161 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2162 * @ring: The Tx ring to configure
2163 *
2164 * Configure the Tx descriptor ring in the HMC context.
2165 **/
2166static int i40e_configure_tx_ring(struct i40e_ring *ring)
2167{
2168 struct i40e_vsi *vsi = ring->vsi;
2169 u16 pf_q = vsi->base_queue + ring->queue_index;
2170 struct i40e_hw *hw = &vsi->back->hw;
2171 struct i40e_hmc_obj_txq tx_ctx;
2172 i40e_status err = 0;
2173 u32 qtx_ctl = 0;
2174
2175 /* some ATR related tx ring init */
60ea5f83 2176 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
41c445ff
JB
2177 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2178 ring->atr_count = 0;
2179 } else {
2180 ring->atr_sample_rate = 0;
2181 }
2182
2183 /* initialize XPS */
2184 if (ring->q_vector && ring->netdev &&
4e3b35b0 2185 vsi->tc_config.numtc <= 1 &&
41c445ff
JB
2186 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2187 netif_set_xps_queue(ring->netdev,
2188 &ring->q_vector->affinity_mask,
2189 ring->queue_index);
2190
2191 /* clear the context structure first */
2192 memset(&tx_ctx, 0, sizeof(tx_ctx));
2193
2194 tx_ctx.new_context = 1;
2195 tx_ctx.base = (ring->dma / 128);
2196 tx_ctx.qlen = ring->count;
60ea5f83
JB
2197 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2198 I40E_FLAG_FD_ATR_ENABLED));
beb0dff1 2199 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
1943d8ba
JB
2200 /* FDIR VSI tx ring can still use RS bit and writebacks */
2201 if (vsi->type != I40E_VSI_FDIR)
2202 tx_ctx.head_wb_ena = 1;
2203 tx_ctx.head_wb_addr = ring->dma +
2204 (ring->count * sizeof(struct i40e_tx_desc));
41c445ff
JB
2205
2206 /* As part of VSI creation/update, FW allocates certain
2207 * Tx arbitration queue sets for each TC enabled for
2208 * the VSI. The FW returns the handles to these queue
2209 * sets as part of the response buffer to Add VSI,
2210 * Update VSI, etc. AQ commands. It is expected that
2211 * these queue set handles be associated with the Tx
2212 * queues by the driver as part of the TX queue context
2213 * initialization. This has to be done regardless of
2214 * DCB as by default everything is mapped to TC0.
2215 */
2216 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2217 tx_ctx.rdylist_act = 0;
2218
2219 /* clear the context in the HMC */
2220 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2221 if (err) {
2222 dev_info(&vsi->back->pdev->dev,
2223 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2224 ring->queue_index, pf_q, err);
2225 return -ENOMEM;
2226 }
2227
2228 /* set the context in the HMC */
2229 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2230 if (err) {
2231 dev_info(&vsi->back->pdev->dev,
2232 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2233 ring->queue_index, pf_q, err);
2234 return -ENOMEM;
2235 }
2236
2237 /* Now associate this queue with this PCI function */
9d8bf547
SN
2238 if (vsi->type == I40E_VSI_VMDQ2)
2239 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2240 else
2241 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
13fd9774
SN
2242 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2243 I40E_QTX_CTL_PF_INDX_MASK);
41c445ff
JB
2244 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2245 i40e_flush(hw);
2246
2247 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2248
2249 /* cache tail off for easier writes later */
2250 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2251
2252 return 0;
2253}
2254
2255/**
2256 * i40e_configure_rx_ring - Configure a receive ring context
2257 * @ring: The Rx ring to configure
2258 *
2259 * Configure the Rx descriptor ring in the HMC context.
2260 **/
2261static int i40e_configure_rx_ring(struct i40e_ring *ring)
2262{
2263 struct i40e_vsi *vsi = ring->vsi;
2264 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2265 u16 pf_q = vsi->base_queue + ring->queue_index;
2266 struct i40e_hw *hw = &vsi->back->hw;
2267 struct i40e_hmc_obj_rxq rx_ctx;
2268 i40e_status err = 0;
2269
2270 ring->state = 0;
2271
2272 /* clear the context structure first */
2273 memset(&rx_ctx, 0, sizeof(rx_ctx));
2274
2275 ring->rx_buf_len = vsi->rx_buf_len;
2276 ring->rx_hdr_len = vsi->rx_hdr_len;
2277
2278 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2279 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2280
2281 rx_ctx.base = (ring->dma / 128);
2282 rx_ctx.qlen = ring->count;
2283
2284 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2285 set_ring_16byte_desc_enabled(ring);
2286 rx_ctx.dsize = 0;
2287 } else {
2288 rx_ctx.dsize = 1;
2289 }
2290
2291 rx_ctx.dtype = vsi->dtype;
2292 if (vsi->dtype) {
2293 set_ring_ps_enabled(ring);
2294 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2295 I40E_RX_SPLIT_IP |
2296 I40E_RX_SPLIT_TCP_UDP |
2297 I40E_RX_SPLIT_SCTP;
2298 } else {
2299 rx_ctx.hsplit_0 = 0;
2300 }
2301
2302 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2303 (chain_len * ring->rx_buf_len));
2304 rx_ctx.tphrdesc_ena = 1;
2305 rx_ctx.tphwdesc_ena = 1;
2306 rx_ctx.tphdata_ena = 1;
2307 rx_ctx.tphhead_ena = 1;
7134f9ce
JB
2308 if (hw->revision_id == 0)
2309 rx_ctx.lrxqthresh = 0;
2310 else
2311 rx_ctx.lrxqthresh = 2;
41c445ff
JB
2312 rx_ctx.crcstrip = 1;
2313 rx_ctx.l2tsel = 1;
2314 rx_ctx.showiv = 1;
acb3676b
CS
2315 /* set the prefena field to 1 because the manual says to */
2316 rx_ctx.prefena = 1;
41c445ff
JB
2317
2318 /* clear the context in the HMC */
2319 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2320 if (err) {
2321 dev_info(&vsi->back->pdev->dev,
2322 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2323 ring->queue_index, pf_q, err);
2324 return -ENOMEM;
2325 }
2326
2327 /* set the context in the HMC */
2328 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2329 if (err) {
2330 dev_info(&vsi->back->pdev->dev,
2331 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2332 ring->queue_index, pf_q, err);
2333 return -ENOMEM;
2334 }
2335
2336 /* cache tail for quicker writes, and clear the reg before use */
2337 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2338 writel(0, ring->tail);
2339
2340 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2341
2342 return 0;
2343}
2344
2345/**
2346 * i40e_vsi_configure_tx - Configure the VSI for Tx
2347 * @vsi: VSI structure describing this set of rings and resources
2348 *
2349 * Configure the Tx VSI for operation.
2350 **/
2351static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2352{
2353 int err = 0;
2354 u16 i;
2355
9f65e15b
AD
2356 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2357 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
41c445ff
JB
2358
2359 return err;
2360}
2361
2362/**
2363 * i40e_vsi_configure_rx - Configure the VSI for Rx
2364 * @vsi: the VSI being configured
2365 *
2366 * Configure the Rx VSI for operation.
2367 **/
2368static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2369{
2370 int err = 0;
2371 u16 i;
2372
2373 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2374 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2375 + ETH_FCS_LEN + VLAN_HLEN;
2376 else
2377 vsi->max_frame = I40E_RXBUFFER_2048;
2378
2379 /* figure out correct receive buffer length */
2380 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2381 I40E_FLAG_RX_PS_ENABLED)) {
2382 case I40E_FLAG_RX_1BUF_ENABLED:
2383 vsi->rx_hdr_len = 0;
2384 vsi->rx_buf_len = vsi->max_frame;
2385 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2386 break;
2387 case I40E_FLAG_RX_PS_ENABLED:
2388 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2389 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2390 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2391 break;
2392 default:
2393 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2394 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2395 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2396 break;
2397 }
2398
2399 /* round up for the chip's needs */
2400 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2401 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2402 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2403 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2404
2405 /* set up individual rings */
2406 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2407 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
2408
2409 return err;
2410}
2411
2412/**
2413 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2414 * @vsi: ptr to the VSI
2415 **/
2416static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2417{
e7046ee1 2418 struct i40e_ring *tx_ring, *rx_ring;
41c445ff
JB
2419 u16 qoffset, qcount;
2420 int i, n;
2421
2422 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2423 return;
2424
2425 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2426 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2427 continue;
2428
2429 qoffset = vsi->tc_config.tc_info[n].qoffset;
2430 qcount = vsi->tc_config.tc_info[n].qcount;
2431 for (i = qoffset; i < (qoffset + qcount); i++) {
e7046ee1
AA
2432 rx_ring = vsi->rx_rings[i];
2433 tx_ring = vsi->tx_rings[i];
41c445ff
JB
2434 rx_ring->dcb_tc = n;
2435 tx_ring->dcb_tc = n;
2436 }
2437 }
2438}
2439
2440/**
2441 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2442 * @vsi: ptr to the VSI
2443 **/
2444static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2445{
2446 if (vsi->netdev)
2447 i40e_set_rx_mode(vsi->netdev);
2448}
2449
17a73f6b
JG
2450/**
2451 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2452 * @vsi: Pointer to the targeted VSI
2453 *
2454 * This function replays the hlist on the hw where all the SB Flow Director
2455 * filters were saved.
2456 **/
2457static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2458{
2459 struct i40e_fdir_filter *filter;
2460 struct i40e_pf *pf = vsi->back;
2461 struct hlist_node *node;
2462
55a5e60b
ASJ
2463 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2464 return;
2465
17a73f6b
JG
2466 hlist_for_each_entry_safe(filter, node,
2467 &pf->fdir_filter_list, fdir_node) {
2468 i40e_add_del_fdir(vsi, filter, true);
2469 }
2470}
2471
41c445ff
JB
2472/**
2473 * i40e_vsi_configure - Set up the VSI for action
2474 * @vsi: the VSI being configured
2475 **/
2476static int i40e_vsi_configure(struct i40e_vsi *vsi)
2477{
2478 int err;
2479
2480 i40e_set_vsi_rx_mode(vsi);
2481 i40e_restore_vlan(vsi);
2482 i40e_vsi_config_dcb_rings(vsi);
2483 err = i40e_vsi_configure_tx(vsi);
2484 if (!err)
2485 err = i40e_vsi_configure_rx(vsi);
2486
2487 return err;
2488}
2489
2490/**
2491 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2492 * @vsi: the VSI being configured
2493 **/
2494static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2495{
2496 struct i40e_pf *pf = vsi->back;
2497 struct i40e_q_vector *q_vector;
2498 struct i40e_hw *hw = &pf->hw;
2499 u16 vector;
2500 int i, q;
2501 u32 val;
2502 u32 qp;
2503
2504 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2505 * and PFINT_LNKLSTn registers, e.g.:
2506 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2507 */
2508 qp = vsi->base_queue;
2509 vector = vsi->base_vector;
493fb300
AD
2510 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2511 q_vector = vsi->q_vectors[i];
41c445ff
JB
2512 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2513 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2514 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2515 q_vector->rx.itr);
2516 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2517 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2518 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2519 q_vector->tx.itr);
2520
2521 /* Linked list for the queuepairs assigned to this vector */
2522 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2523 for (q = 0; q < q_vector->num_ringpairs; q++) {
2524 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2525 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2526 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2527 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2528 (I40E_QUEUE_TYPE_TX
2529 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2530
2531 wr32(hw, I40E_QINT_RQCTL(qp), val);
2532
2533 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2534 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2535 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2536 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2537 (I40E_QUEUE_TYPE_RX
2538 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2539
2540 /* Terminate the linked list */
2541 if (q == (q_vector->num_ringpairs - 1))
2542 val |= (I40E_QUEUE_END_OF_LIST
2543 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2544
2545 wr32(hw, I40E_QINT_TQCTL(qp), val);
2546 qp++;
2547 }
2548 }
2549
2550 i40e_flush(hw);
2551}
2552
2553/**
2554 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2555 * @hw: ptr to the hardware info
2556 **/
2557static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2558{
2559 u32 val;
2560
2561 /* clear things first */
2562 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2563 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2564
2565 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2566 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2567 I40E_PFINT_ICR0_ENA_GRST_MASK |
2568 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2569 I40E_PFINT_ICR0_ENA_GPIO_MASK |
beb0dff1 2570 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
41c445ff
JB
2571 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2572 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2573 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2574
2575 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2576
2577 /* SW_ITR_IDX = 0, but don't change INTENA */
84ed40e7
ASJ
2578 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2579 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
41c445ff
JB
2580
2581 /* OTHER_ITR_IDX = 0 */
2582 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2583}
2584
2585/**
2586 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2587 * @vsi: the VSI being configured
2588 **/
2589static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2590{
493fb300 2591 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
41c445ff
JB
2592 struct i40e_pf *pf = vsi->back;
2593 struct i40e_hw *hw = &pf->hw;
2594 u32 val;
2595
2596 /* set the ITR configuration */
2597 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2598 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2599 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2600 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2601 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2602 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2603
2604 i40e_enable_misc_int_causes(hw);
2605
2606 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2607 wr32(hw, I40E_PFINT_LNKLST0, 0);
2608
f29eaa3d 2609 /* Associate the queue pair to the vector and enable the queue int */
41c445ff
JB
2610 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2611 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2612 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2613
2614 wr32(hw, I40E_QINT_RQCTL(0), val);
2615
2616 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2617 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2618 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2619
2620 wr32(hw, I40E_QINT_TQCTL(0), val);
2621 i40e_flush(hw);
2622}
2623
2ef28cfb
MW
2624/**
2625 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2626 * @pf: board private structure
2627 **/
2628void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2629{
2630 struct i40e_hw *hw = &pf->hw;
2631
2632 wr32(hw, I40E_PFINT_DYN_CTL0,
2633 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2634 i40e_flush(hw);
2635}
2636
41c445ff
JB
2637/**
2638 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2639 * @pf: board private structure
2640 **/
116a57d4 2641void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
41c445ff
JB
2642{
2643 struct i40e_hw *hw = &pf->hw;
2644 u32 val;
2645
2646 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2647 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2648 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2649
2650 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2651 i40e_flush(hw);
2652}
2653
2654/**
2655 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2656 * @vsi: pointer to a vsi
2657 * @vector: enable a particular Hw Interrupt vector
2658 **/
2659void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2660{
2661 struct i40e_pf *pf = vsi->back;
2662 struct i40e_hw *hw = &pf->hw;
2663 u32 val;
2664
2665 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2666 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2667 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2668 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
1022cb6c 2669 /* skip the flush */
41c445ff
JB
2670}
2671
2672/**
2673 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2674 * @irq: interrupt number
2675 * @data: pointer to a q_vector
2676 **/
2677static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2678{
2679 struct i40e_q_vector *q_vector = data;
2680
cd0b6fa6 2681 if (!q_vector->tx.ring && !q_vector->rx.ring)
41c445ff
JB
2682 return IRQ_HANDLED;
2683
2684 napi_schedule(&q_vector->napi);
2685
2686 return IRQ_HANDLED;
2687}
2688
41c445ff
JB
2689/**
2690 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2691 * @vsi: the VSI being configured
2692 * @basename: name for the vector
2693 *
2694 * Allocates MSI-X vectors and requests interrupts from the kernel.
2695 **/
2696static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2697{
2698 int q_vectors = vsi->num_q_vectors;
2699 struct i40e_pf *pf = vsi->back;
2700 int base = vsi->base_vector;
2701 int rx_int_idx = 0;
2702 int tx_int_idx = 0;
2703 int vector, err;
2704
2705 for (vector = 0; vector < q_vectors; vector++) {
493fb300 2706 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
41c445ff 2707
cd0b6fa6 2708 if (q_vector->tx.ring && q_vector->rx.ring) {
41c445ff
JB
2709 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2710 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2711 tx_int_idx++;
cd0b6fa6 2712 } else if (q_vector->rx.ring) {
41c445ff
JB
2713 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2714 "%s-%s-%d", basename, "rx", rx_int_idx++);
cd0b6fa6 2715 } else if (q_vector->tx.ring) {
41c445ff
JB
2716 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2717 "%s-%s-%d", basename, "tx", tx_int_idx++);
2718 } else {
2719 /* skip this unused q_vector */
2720 continue;
2721 }
2722 err = request_irq(pf->msix_entries[base + vector].vector,
2723 vsi->irq_handler,
2724 0,
2725 q_vector->name,
2726 q_vector);
2727 if (err) {
2728 dev_info(&pf->pdev->dev,
2729 "%s: request_irq failed, error: %d\n",
2730 __func__, err);
2731 goto free_queue_irqs;
2732 }
2733 /* assign the mask for this irq */
2734 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2735 &q_vector->affinity_mask);
2736 }
2737
2738 return 0;
2739
2740free_queue_irqs:
2741 while (vector) {
2742 vector--;
2743 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2744 NULL);
2745 free_irq(pf->msix_entries[base + vector].vector,
2746 &(vsi->q_vectors[vector]));
2747 }
2748 return err;
2749}
2750
2751/**
2752 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
2753 * @vsi: the VSI being un-configured
2754 **/
2755static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
2756{
2757 struct i40e_pf *pf = vsi->back;
2758 struct i40e_hw *hw = &pf->hw;
2759 int base = vsi->base_vector;
2760 int i;
2761
2762 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
2763 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
2764 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
41c445ff
JB
2765 }
2766
2767 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2768 for (i = vsi->base_vector;
2769 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2770 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
2771
2772 i40e_flush(hw);
2773 for (i = 0; i < vsi->num_q_vectors; i++)
2774 synchronize_irq(pf->msix_entries[i + base].vector);
2775 } else {
2776 /* Legacy and MSI mode - this stops all interrupt handling */
2777 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
2778 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
2779 i40e_flush(hw);
2780 synchronize_irq(pf->pdev->irq);
2781 }
2782}
2783
2784/**
2785 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
2786 * @vsi: the VSI being configured
2787 **/
2788static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2789{
2790 struct i40e_pf *pf = vsi->back;
2791 int i;
2792
2793 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2794 for (i = vsi->base_vector;
2795 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2796 i40e_irq_dynamic_enable(vsi, i);
2797 } else {
2798 i40e_irq_dynamic_enable_icr0(pf);
2799 }
2800
1022cb6c 2801 i40e_flush(&pf->hw);
41c445ff
JB
2802 return 0;
2803}
2804
2805/**
2806 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
2807 * @pf: board private structure
2808 **/
2809static void i40e_stop_misc_vector(struct i40e_pf *pf)
2810{
2811 /* Disable ICR 0 */
2812 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
2813 i40e_flush(&pf->hw);
2814}
2815
2816/**
2817 * i40e_intr - MSI/Legacy and non-queue interrupt handler
2818 * @irq: interrupt number
2819 * @data: pointer to a q_vector
2820 *
2821 * This is the handler used for all MSI/Legacy interrupts, and deals
2822 * with both queue and non-queue interrupts. This is also used in
2823 * MSIX mode to handle the non-queue interrupts.
2824 **/
2825static irqreturn_t i40e_intr(int irq, void *data)
2826{
2827 struct i40e_pf *pf = (struct i40e_pf *)data;
2828 struct i40e_hw *hw = &pf->hw;
5e823066 2829 irqreturn_t ret = IRQ_NONE;
41c445ff
JB
2830 u32 icr0, icr0_remaining;
2831 u32 val, ena_mask;
2832
2833 icr0 = rd32(hw, I40E_PFINT_ICR0);
5e823066 2834 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
41c445ff 2835
116a57d4
SN
2836 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2837 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
5e823066 2838 goto enable_intr;
41c445ff 2839
cd92e72f
SN
2840 /* if interrupt but no bits showing, must be SWINT */
2841 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
2842 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
2843 pf->sw_int_count++;
2844
41c445ff
JB
2845 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
2846 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
2847
2848 /* temporarily disable queue cause for NAPI processing */
2849 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
2850 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2851 wr32(hw, I40E_QINT_RQCTL(0), qval);
2852
2853 qval = rd32(hw, I40E_QINT_TQCTL(0));
2854 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2855 wr32(hw, I40E_QINT_TQCTL(0), qval);
41c445ff
JB
2856
2857 if (!test_bit(__I40E_DOWN, &pf->state))
493fb300 2858 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
41c445ff
JB
2859 }
2860
2861 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
2862 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2863 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
2864 }
2865
2866 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
2867 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2868 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
2869 }
2870
2871 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
2872 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
2873 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2874 }
2875
2876 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
2877 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
2878 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
2879 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
2880 val = rd32(hw, I40E_GLGEN_RSTAT);
2881 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
2882 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4eb3f768 2883 if (val == I40E_RESET_CORER) {
41c445ff 2884 pf->corer_count++;
4eb3f768 2885 } else if (val == I40E_RESET_GLOBR) {
41c445ff 2886 pf->globr_count++;
4eb3f768 2887 } else if (val == I40E_RESET_EMPR) {
41c445ff 2888 pf->empr_count++;
4eb3f768
SN
2889 set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
2890 }
41c445ff
JB
2891 }
2892
9c010ee0
ASJ
2893 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
2894 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
2895 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
2896 }
2897
beb0dff1
JK
2898 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
2899 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
2900
2901 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
cafa1fca 2902 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
beb0dff1 2903 i40e_ptp_tx_hwtstamp(pf);
beb0dff1 2904 }
beb0dff1
JK
2905 }
2906
41c445ff
JB
2907 /* If a critical error is pending we have no choice but to reset the
2908 * device.
2909 * Report and mask out any remaining unexpected interrupts.
2910 */
2911 icr0_remaining = icr0 & ena_mask;
2912 if (icr0_remaining) {
2913 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
2914 icr0_remaining);
9c010ee0 2915 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
41c445ff 2916 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
c0c28975 2917 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
9c010ee0
ASJ
2918 dev_info(&pf->pdev->dev, "device will be reset\n");
2919 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2920 i40e_service_event_schedule(pf);
41c445ff
JB
2921 }
2922 ena_mask &= ~icr0_remaining;
2923 }
5e823066 2924 ret = IRQ_HANDLED;
41c445ff 2925
5e823066 2926enable_intr:
41c445ff
JB
2927 /* re-enable interrupt causes */
2928 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
41c445ff
JB
2929 if (!test_bit(__I40E_DOWN, &pf->state)) {
2930 i40e_service_event_schedule(pf);
2931 i40e_irq_dynamic_enable_icr0(pf);
2932 }
2933
5e823066 2934 return ret;
41c445ff
JB
2935}
2936
cbf61325
ASJ
2937/**
2938 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
2939 * @tx_ring: tx ring to clean
2940 * @budget: how many cleans we're allowed
2941 *
2942 * Returns true if there's any budget left (e.g. the clean is finished)
2943 **/
2944static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
2945{
2946 struct i40e_vsi *vsi = tx_ring->vsi;
2947 u16 i = tx_ring->next_to_clean;
2948 struct i40e_tx_buffer *tx_buf;
2949 struct i40e_tx_desc *tx_desc;
2950
2951 tx_buf = &tx_ring->tx_bi[i];
2952 tx_desc = I40E_TX_DESC(tx_ring, i);
2953 i -= tx_ring->count;
2954
2955 do {
2956 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
2957
2958 /* if next_to_watch is not set then there is no work pending */
2959 if (!eop_desc)
2960 break;
2961
2962 /* prevent any other reads prior to eop_desc */
2963 read_barrier_depends();
2964
2965 /* if the descriptor isn't done, no work yet to do */
2966 if (!(eop_desc->cmd_type_offset_bsz &
2967 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
2968 break;
2969
2970 /* clear next_to_watch to prevent false hangs */
2971 tx_buf->next_to_watch = NULL;
2972
2973 /* unmap skb header data */
2974 dma_unmap_single(tx_ring->dev,
2975 dma_unmap_addr(tx_buf, dma),
2976 dma_unmap_len(tx_buf, len),
2977 DMA_TO_DEVICE);
2978
2979 dma_unmap_len_set(tx_buf, len, 0);
2980
2981
2982 /* move to the next desc and buffer to clean */
2983 tx_buf++;
2984 tx_desc++;
2985 i++;
2986 if (unlikely(!i)) {
2987 i -= tx_ring->count;
2988 tx_buf = tx_ring->tx_bi;
2989 tx_desc = I40E_TX_DESC(tx_ring, 0);
2990 }
2991
2992 /* update budget accounting */
2993 budget--;
2994 } while (likely(budget));
2995
2996 i += tx_ring->count;
2997 tx_ring->next_to_clean = i;
2998
2999 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3000 i40e_irq_dynamic_enable(vsi,
3001 tx_ring->q_vector->v_idx + vsi->base_vector);
3002 }
3003 return budget > 0;
3004}
3005
3006/**
3007 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3008 * @irq: interrupt number
3009 * @data: pointer to a q_vector
3010 **/
3011static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3012{
3013 struct i40e_q_vector *q_vector = data;
3014 struct i40e_vsi *vsi;
3015
3016 if (!q_vector->tx.ring)
3017 return IRQ_HANDLED;
3018
3019 vsi = q_vector->tx.ring->vsi;
3020 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3021
3022 return IRQ_HANDLED;
3023}
3024
41c445ff 3025/**
cd0b6fa6 3026 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
41c445ff
JB
3027 * @vsi: the VSI being configured
3028 * @v_idx: vector index
cd0b6fa6 3029 * @qp_idx: queue pair index
41c445ff 3030 **/
cd0b6fa6 3031static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
41c445ff 3032{
493fb300 3033 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
9f65e15b
AD
3034 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3035 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
41c445ff
JB
3036
3037 tx_ring->q_vector = q_vector;
cd0b6fa6
AD
3038 tx_ring->next = q_vector->tx.ring;
3039 q_vector->tx.ring = tx_ring;
41c445ff 3040 q_vector->tx.count++;
cd0b6fa6
AD
3041
3042 rx_ring->q_vector = q_vector;
3043 rx_ring->next = q_vector->rx.ring;
3044 q_vector->rx.ring = rx_ring;
3045 q_vector->rx.count++;
41c445ff
JB
3046}
3047
3048/**
3049 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3050 * @vsi: the VSI being configured
3051 *
3052 * This function maps descriptor rings to the queue-specific vectors
3053 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3054 * one vector per queue pair, but on a constrained vector budget, we
3055 * group the queue pairs as "efficiently" as possible.
3056 **/
3057static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3058{
3059 int qp_remaining = vsi->num_queue_pairs;
3060 int q_vectors = vsi->num_q_vectors;
cd0b6fa6 3061 int num_ringpairs;
41c445ff
JB
3062 int v_start = 0;
3063 int qp_idx = 0;
3064
3065 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3066 * group them so there are multiple queues per vector.
3067 */
3068 for (; v_start < q_vectors && qp_remaining; v_start++) {
cd0b6fa6
AD
3069 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3070
3071 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3072
3073 q_vector->num_ringpairs = num_ringpairs;
3074
3075 q_vector->rx.count = 0;
3076 q_vector->tx.count = 0;
3077 q_vector->rx.ring = NULL;
3078 q_vector->tx.ring = NULL;
3079
3080 while (num_ringpairs--) {
3081 map_vector_to_qp(vsi, v_start, qp_idx);
3082 qp_idx++;
3083 qp_remaining--;
41c445ff
JB
3084 }
3085 }
3086}
3087
3088/**
3089 * i40e_vsi_request_irq - Request IRQ from the OS
3090 * @vsi: the VSI being configured
3091 * @basename: name for the vector
3092 **/
3093static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3094{
3095 struct i40e_pf *pf = vsi->back;
3096 int err;
3097
3098 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3099 err = i40e_vsi_request_irq_msix(vsi, basename);
3100 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3101 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3102 pf->misc_int_name, pf);
3103 else
3104 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3105 pf->misc_int_name, pf);
3106
3107 if (err)
3108 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3109
3110 return err;
3111}
3112
3113#ifdef CONFIG_NET_POLL_CONTROLLER
3114/**
3115 * i40e_netpoll - A Polling 'interrupt'handler
3116 * @netdev: network interface device structure
3117 *
3118 * This is used by netconsole to send skbs without having to re-enable
3119 * interrupts. It's not called while the normal interrupt routine is executing.
3120 **/
3121static void i40e_netpoll(struct net_device *netdev)
3122{
3123 struct i40e_netdev_priv *np = netdev_priv(netdev);
3124 struct i40e_vsi *vsi = np->vsi;
3125 struct i40e_pf *pf = vsi->back;
3126 int i;
3127
3128 /* if interface is down do nothing */
3129 if (test_bit(__I40E_DOWN, &vsi->state))
3130 return;
3131
3132 pf->flags |= I40E_FLAG_IN_NETPOLL;
3133 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3134 for (i = 0; i < vsi->num_q_vectors; i++)
493fb300 3135 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
41c445ff
JB
3136 } else {
3137 i40e_intr(pf->pdev->irq, netdev);
3138 }
3139 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3140}
3141#endif
3142
3143/**
3144 * i40e_vsi_control_tx - Start or stop a VSI's rings
3145 * @vsi: the VSI being configured
3146 * @enable: start or stop the rings
3147 **/
3148static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3149{
3150 struct i40e_pf *pf = vsi->back;
3151 struct i40e_hw *hw = &pf->hw;
3152 int i, j, pf_q;
3153 u32 tx_reg;
3154
3155 pf_q = vsi->base_queue;
3156 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
6c5ef620 3157 for (j = 0; j < 50; j++) {
41c445ff 3158 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
6c5ef620
MW
3159 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3160 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3161 break;
3162 usleep_range(1000, 2000);
3163 }
fda972f6 3164 /* Skip if the queue is already in the requested state */
7c122007 3165 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
fda972f6 3166 continue;
41c445ff
JB
3167
3168 /* turn on/off the queue */
c5c9eb9e
SN
3169 if (enable) {
3170 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
6c5ef620 3171 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
c5c9eb9e 3172 } else {
41c445ff 3173 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
c5c9eb9e 3174 }
41c445ff
JB
3175
3176 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3177
3178 /* wait for the change to finish */
3179 for (j = 0; j < 10; j++) {
3180 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
7c122007
CS
3181 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3182 break;
41c445ff
JB
3183
3184 udelay(10);
3185 }
3186 if (j >= 10) {
3187 dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
3188 pf_q, (enable ? "en" : "dis"));
3189 return -ETIMEDOUT;
3190 }
3191 }
3192
7134f9ce
JB
3193 if (hw->revision_id == 0)
3194 mdelay(50);
3195
41c445ff
JB
3196 return 0;
3197}
3198
3199/**
3200 * i40e_vsi_control_rx - Start or stop a VSI's rings
3201 * @vsi: the VSI being configured
3202 * @enable: start or stop the rings
3203 **/
3204static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3205{
3206 struct i40e_pf *pf = vsi->back;
3207 struct i40e_hw *hw = &pf->hw;
3208 int i, j, pf_q;
3209 u32 rx_reg;
3210
3211 pf_q = vsi->base_queue;
3212 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
6c5ef620 3213 for (j = 0; j < 50; j++) {
41c445ff 3214 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
6c5ef620
MW
3215 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3216 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3217 break;
3218 usleep_range(1000, 2000);
3219 }
41c445ff 3220
7c122007
CS
3221 /* Skip if the queue is already in the requested state */
3222 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3223 continue;
41c445ff
JB
3224
3225 /* turn on/off the queue */
3226 if (enable)
6c5ef620 3227 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
41c445ff 3228 else
6c5ef620 3229 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
41c445ff
JB
3230 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3231
3232 /* wait for the change to finish */
3233 for (j = 0; j < 10; j++) {
3234 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3235
7c122007
CS
3236 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3237 break;
41c445ff
JB
3238
3239 udelay(10);
3240 }
3241 if (j >= 10) {
3242 dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
3243 pf_q, (enable ? "en" : "dis"));
3244 return -ETIMEDOUT;
3245 }
3246 }
3247
3248 return 0;
3249}
3250
3251/**
3252 * i40e_vsi_control_rings - Start or stop a VSI's rings
3253 * @vsi: the VSI being configured
3254 * @enable: start or stop the rings
3255 **/
fc18eaa0 3256int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
41c445ff 3257{
3b867b28 3258 int ret = 0;
41c445ff
JB
3259
3260 /* do rx first for enable and last for disable */
3261 if (request) {
3262 ret = i40e_vsi_control_rx(vsi, request);
3263 if (ret)
3264 return ret;
3265 ret = i40e_vsi_control_tx(vsi, request);
3266 } else {
3b867b28
ASJ
3267 /* Ignore return value, we need to shutdown whatever we can */
3268 i40e_vsi_control_tx(vsi, request);
3269 i40e_vsi_control_rx(vsi, request);
41c445ff
JB
3270 }
3271
3272 return ret;
3273}
3274
3275/**
3276 * i40e_vsi_free_irq - Free the irq association with the OS
3277 * @vsi: the VSI being configured
3278 **/
3279static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3280{
3281 struct i40e_pf *pf = vsi->back;
3282 struct i40e_hw *hw = &pf->hw;
3283 int base = vsi->base_vector;
3284 u32 val, qp;
3285 int i;
3286
3287 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3288 if (!vsi->q_vectors)
3289 return;
3290
3291 for (i = 0; i < vsi->num_q_vectors; i++) {
3292 u16 vector = i + base;
3293
3294 /* free only the irqs that were actually requested */
78681b1f
SN
3295 if (!vsi->q_vectors[i] ||
3296 !vsi->q_vectors[i]->num_ringpairs)
41c445ff
JB
3297 continue;
3298
3299 /* clear the affinity_mask in the IRQ descriptor */
3300 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3301 NULL);
3302 free_irq(pf->msix_entries[vector].vector,
493fb300 3303 vsi->q_vectors[i]);
41c445ff
JB
3304
3305 /* Tear down the interrupt queue link list
3306 *
3307 * We know that they come in pairs and always
3308 * the Rx first, then the Tx. To clear the
3309 * link list, stick the EOL value into the
3310 * next_q field of the registers.
3311 */
3312 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3313 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3314 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3315 val |= I40E_QUEUE_END_OF_LIST
3316 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3317 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3318
3319 while (qp != I40E_QUEUE_END_OF_LIST) {
3320 u32 next;
3321
3322 val = rd32(hw, I40E_QINT_RQCTL(qp));
3323
3324 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3325 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3326 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3327 I40E_QINT_RQCTL_INTEVENT_MASK);
3328
3329 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3330 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3331
3332 wr32(hw, I40E_QINT_RQCTL(qp), val);
3333
3334 val = rd32(hw, I40E_QINT_TQCTL(qp));
3335
3336 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3337 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3338
3339 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3340 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3341 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3342 I40E_QINT_TQCTL_INTEVENT_MASK);
3343
3344 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3345 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3346
3347 wr32(hw, I40E_QINT_TQCTL(qp), val);
3348 qp = next;
3349 }
3350 }
3351 } else {
3352 free_irq(pf->pdev->irq, pf);
3353
3354 val = rd32(hw, I40E_PFINT_LNKLST0);
3355 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3356 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3357 val |= I40E_QUEUE_END_OF_LIST
3358 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3359 wr32(hw, I40E_PFINT_LNKLST0, val);
3360
3361 val = rd32(hw, I40E_QINT_RQCTL(qp));
3362 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3363 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3364 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3365 I40E_QINT_RQCTL_INTEVENT_MASK);
3366
3367 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3368 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3369
3370 wr32(hw, I40E_QINT_RQCTL(qp), val);
3371
3372 val = rd32(hw, I40E_QINT_TQCTL(qp));
3373
3374 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3375 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3376 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3377 I40E_QINT_TQCTL_INTEVENT_MASK);
3378
3379 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3380 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3381
3382 wr32(hw, I40E_QINT_TQCTL(qp), val);
3383 }
3384}
3385
493fb300
AD
3386/**
3387 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3388 * @vsi: the VSI being configured
3389 * @v_idx: Index of vector to be freed
3390 *
3391 * This function frees the memory allocated to the q_vector. In addition if
3392 * NAPI is enabled it will delete any references to the NAPI struct prior
3393 * to freeing the q_vector.
3394 **/
3395static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3396{
3397 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
cd0b6fa6 3398 struct i40e_ring *ring;
493fb300
AD
3399
3400 if (!q_vector)
3401 return;
3402
3403 /* disassociate q_vector from rings */
cd0b6fa6
AD
3404 i40e_for_each_ring(ring, q_vector->tx)
3405 ring->q_vector = NULL;
3406
3407 i40e_for_each_ring(ring, q_vector->rx)
3408 ring->q_vector = NULL;
493fb300
AD
3409
3410 /* only VSI w/ an associated netdev is set up w/ NAPI */
3411 if (vsi->netdev)
3412 netif_napi_del(&q_vector->napi);
3413
3414 vsi->q_vectors[v_idx] = NULL;
3415
3416 kfree_rcu(q_vector, rcu);
3417}
3418
41c445ff
JB
3419/**
3420 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3421 * @vsi: the VSI being un-configured
3422 *
3423 * This frees the memory allocated to the q_vectors and
3424 * deletes references to the NAPI struct.
3425 **/
3426static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3427{
3428 int v_idx;
3429
493fb300
AD
3430 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3431 i40e_free_q_vector(vsi, v_idx);
41c445ff
JB
3432}
3433
3434/**
3435 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3436 * @pf: board private structure
3437 **/
3438static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3439{
3440 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3441 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3442 pci_disable_msix(pf->pdev);
3443 kfree(pf->msix_entries);
3444 pf->msix_entries = NULL;
3445 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3446 pci_disable_msi(pf->pdev);
3447 }
3448 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3449}
3450
3451/**
3452 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3453 * @pf: board private structure
3454 *
3455 * We go through and clear interrupt specific resources and reset the structure
3456 * to pre-load conditions
3457 **/
3458static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3459{
3460 int i;
3461
3462 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3463 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
3464 if (pf->vsi[i])
3465 i40e_vsi_free_q_vectors(pf->vsi[i]);
3466 i40e_reset_interrupt_capability(pf);
3467}
3468
3469/**
3470 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3471 * @vsi: the VSI being configured
3472 **/
3473static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3474{
3475 int q_idx;
3476
3477 if (!vsi->netdev)
3478 return;
3479
3480 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3481 napi_enable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3482}
3483
3484/**
3485 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3486 * @vsi: the VSI being configured
3487 **/
3488static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3489{
3490 int q_idx;
3491
3492 if (!vsi->netdev)
3493 return;
3494
3495 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3496 napi_disable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3497}
3498
90ef8d47
SN
3499/**
3500 * i40e_vsi_close - Shut down a VSI
3501 * @vsi: the vsi to be quelled
3502 **/
3503static void i40e_vsi_close(struct i40e_vsi *vsi)
3504{
3505 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3506 i40e_down(vsi);
3507 i40e_vsi_free_irq(vsi);
3508 i40e_vsi_free_tx_resources(vsi);
3509 i40e_vsi_free_rx_resources(vsi);
3510}
3511
41c445ff
JB
3512/**
3513 * i40e_quiesce_vsi - Pause a given VSI
3514 * @vsi: the VSI being paused
3515 **/
3516static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3517{
3518 if (test_bit(__I40E_DOWN, &vsi->state))
3519 return;
3520
3521 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3522 if (vsi->netdev && netif_running(vsi->netdev)) {
3523 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3524 } else {
90ef8d47 3525 i40e_vsi_close(vsi);
41c445ff
JB
3526 }
3527}
3528
3529/**
3530 * i40e_unquiesce_vsi - Resume a given VSI
3531 * @vsi: the VSI being resumed
3532 **/
3533static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3534{
3535 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3536 return;
3537
3538 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3539 if (vsi->netdev && netif_running(vsi->netdev))
3540 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3541 else
8276f757 3542 i40e_vsi_open(vsi); /* this clears the DOWN bit */
41c445ff
JB
3543}
3544
3545/**
3546 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3547 * @pf: the PF
3548 **/
3549static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3550{
3551 int v;
3552
3553 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3554 if (pf->vsi[v])
3555 i40e_quiesce_vsi(pf->vsi[v]);
3556 }
3557}
3558
3559/**
3560 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3561 * @pf: the PF
3562 **/
3563static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3564{
3565 int v;
3566
3567 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3568 if (pf->vsi[v])
3569 i40e_unquiesce_vsi(pf->vsi[v]);
3570 }
3571}
3572
3573/**
3574 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
3575 * @dcbcfg: the corresponding DCBx configuration structure
3576 *
3577 * Return the number of TCs from given DCBx configuration
3578 **/
3579static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3580{
078b5876
JB
3581 u8 num_tc = 0;
3582 int i;
41c445ff
JB
3583
3584 /* Scan the ETS Config Priority Table to find
3585 * traffic class enabled for a given priority
3586 * and use the traffic class index to get the
3587 * number of traffic classes enabled
3588 */
3589 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3590 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
3591 num_tc = dcbcfg->etscfg.prioritytable[i];
3592 }
3593
3594 /* Traffic class index starts from zero so
3595 * increment to return the actual count
3596 */
078b5876 3597 return num_tc + 1;
41c445ff
JB
3598}
3599
3600/**
3601 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
3602 * @dcbcfg: the corresponding DCBx configuration structure
3603 *
3604 * Query the current DCB configuration and return the number of
3605 * traffic classes enabled from the given DCBX config
3606 **/
3607static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
3608{
3609 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
3610 u8 enabled_tc = 1;
3611 u8 i;
3612
3613 for (i = 0; i < num_tc; i++)
3614 enabled_tc |= 1 << i;
3615
3616 return enabled_tc;
3617}
3618
3619/**
3620 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
3621 * @pf: PF being queried
3622 *
3623 * Return number of traffic classes enabled for the given PF
3624 **/
3625static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
3626{
3627 struct i40e_hw *hw = &pf->hw;
3628 u8 i, enabled_tc;
3629 u8 num_tc = 0;
3630 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3631
3632 /* If DCB is not enabled then always in single TC */
3633 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3634 return 1;
3635
3636 /* MFP mode return count of enabled TCs for this PF */
3637 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3638 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3639 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3640 if (enabled_tc & (1 << i))
3641 num_tc++;
3642 }
3643 return num_tc;
3644 }
3645
3646 /* SFP mode will be enabled for all TCs on port */
3647 return i40e_dcb_get_num_tc(dcbcfg);
3648}
3649
3650/**
3651 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
3652 * @pf: PF being queried
3653 *
3654 * Return a bitmap for first enabled traffic class for this PF.
3655 **/
3656static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
3657{
3658 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3659 u8 i = 0;
3660
3661 if (!enabled_tc)
3662 return 0x1; /* TC0 */
3663
3664 /* Find the first enabled TC */
3665 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3666 if (enabled_tc & (1 << i))
3667 break;
3668 }
3669
3670 return 1 << i;
3671}
3672
3673/**
3674 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
3675 * @pf: PF being queried
3676 *
3677 * Return a bitmap for enabled traffic classes for this PF.
3678 **/
3679static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
3680{
3681 /* If DCB is not enabled for this PF then just return default TC */
3682 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3683 return i40e_pf_get_default_tc(pf);
3684
3685 /* MFP mode will have enabled TCs set by FW */
3686 if (pf->flags & I40E_FLAG_MFP_ENABLED)
3687 return pf->hw.func_caps.enabled_tcmap;
3688
3689 /* SFP mode we want PF to be enabled for all TCs */
3690 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
3691}
3692
3693/**
3694 * i40e_vsi_get_bw_info - Query VSI BW Information
3695 * @vsi: the VSI being queried
3696 *
3697 * Returns 0 on success, negative value on failure
3698 **/
3699static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3700{
3701 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
3702 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3703 struct i40e_pf *pf = vsi->back;
3704 struct i40e_hw *hw = &pf->hw;
dcae29be 3705 i40e_status aq_ret;
41c445ff 3706 u32 tc_bw_max;
41c445ff
JB
3707 int i;
3708
3709 /* Get the VSI level BW configuration */
dcae29be
JB
3710 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3711 if (aq_ret) {
41c445ff
JB
3712 dev_info(&pf->pdev->dev,
3713 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
dcae29be
JB
3714 aq_ret, pf->hw.aq.asq_last_status);
3715 return -EINVAL;
41c445ff
JB
3716 }
3717
3718 /* Get the VSI level BW configuration per TC */
dcae29be 3719 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
6838b535 3720 NULL);
dcae29be 3721 if (aq_ret) {
41c445ff
JB
3722 dev_info(&pf->pdev->dev,
3723 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
dcae29be
JB
3724 aq_ret, pf->hw.aq.asq_last_status);
3725 return -EINVAL;
41c445ff
JB
3726 }
3727
3728 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
3729 dev_info(&pf->pdev->dev,
3730 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
3731 bw_config.tc_valid_bits,
3732 bw_ets_config.tc_valid_bits);
3733 /* Still continuing */
3734 }
3735
3736 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
3737 vsi->bw_max_quanta = bw_config.max_bw;
3738 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
3739 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
3740 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3741 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
3742 vsi->bw_ets_limit_credits[i] =
3743 le16_to_cpu(bw_ets_config.credits[i]);
3744 /* 3 bits out of 4 for each TC */
3745 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3746 }
078b5876 3747
dcae29be 3748 return 0;
41c445ff
JB
3749}
3750
3751/**
3752 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
3753 * @vsi: the VSI being configured
3754 * @enabled_tc: TC bitmap
3755 * @bw_credits: BW shared credits per TC
3756 *
3757 * Returns 0 on success, negative value on failure
3758 **/
dcae29be 3759static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
41c445ff
JB
3760 u8 *bw_share)
3761{
3762 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
dcae29be
JB
3763 i40e_status aq_ret;
3764 int i;
41c445ff
JB
3765
3766 bw_data.tc_valid_bits = enabled_tc;
3767 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3768 bw_data.tc_bw_credits[i] = bw_share[i];
3769
dcae29be
JB
3770 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3771 NULL);
3772 if (aq_ret) {
41c445ff 3773 dev_info(&vsi->back->pdev->dev,
69bfb110
JB
3774 "AQ command Config VSI BW allocation per TC failed = %d\n",
3775 vsi->back->hw.aq.asq_last_status);
dcae29be 3776 return -EINVAL;
41c445ff
JB
3777 }
3778
3779 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3780 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3781
dcae29be 3782 return 0;
41c445ff
JB
3783}
3784
3785/**
3786 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
3787 * @vsi: the VSI being configured
3788 * @enabled_tc: TC map to be enabled
3789 *
3790 **/
3791static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3792{
3793 struct net_device *netdev = vsi->netdev;
3794 struct i40e_pf *pf = vsi->back;
3795 struct i40e_hw *hw = &pf->hw;
3796 u8 netdev_tc = 0;
3797 int i;
3798 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3799
3800 if (!netdev)
3801 return;
3802
3803 if (!enabled_tc) {
3804 netdev_reset_tc(netdev);
3805 return;
3806 }
3807
3808 /* Set up actual enabled TCs on the VSI */
3809 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
3810 return;
3811
3812 /* set per TC queues for the VSI */
3813 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3814 /* Only set TC queues for enabled tcs
3815 *
3816 * e.g. For a VSI that has TC0 and TC3 enabled the
3817 * enabled_tc bitmap would be 0x00001001; the driver
3818 * will set the numtc for netdev as 2 that will be
3819 * referenced by the netdev layer as TC 0 and 1.
3820 */
3821 if (vsi->tc_config.enabled_tc & (1 << i))
3822 netdev_set_tc_queue(netdev,
3823 vsi->tc_config.tc_info[i].netdev_tc,
3824 vsi->tc_config.tc_info[i].qcount,
3825 vsi->tc_config.tc_info[i].qoffset);
3826 }
3827
3828 /* Assign UP2TC map for the VSI */
3829 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3830 /* Get the actual TC# for the UP */
3831 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
3832 /* Get the mapped netdev TC# for the UP */
3833 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
3834 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3835 }
3836}
3837
3838/**
3839 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
3840 * @vsi: the VSI being configured
3841 * @ctxt: the ctxt buffer returned from AQ VSI update param command
3842 **/
3843static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
3844 struct i40e_vsi_context *ctxt)
3845{
3846 /* copy just the sections touched not the entire info
3847 * since not all sections are valid as returned by
3848 * update vsi params
3849 */
3850 vsi->info.mapping_flags = ctxt->info.mapping_flags;
3851 memcpy(&vsi->info.queue_mapping,
3852 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
3853 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
3854 sizeof(vsi->info.tc_mapping));
3855}
3856
3857/**
3858 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
3859 * @vsi: VSI to be configured
3860 * @enabled_tc: TC bitmap
3861 *
3862 * This configures a particular VSI for TCs that are mapped to the
3863 * given TC bitmap. It uses default bandwidth share for TCs across
3864 * VSIs to configure TC for a particular VSI.
3865 *
3866 * NOTE:
3867 * It is expected that the VSI queues have been quisced before calling
3868 * this function.
3869 **/
3870static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3871{
3872 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
3873 struct i40e_vsi_context ctxt;
3874 int ret = 0;
3875 int i;
3876
3877 /* Check if enabled_tc is same as existing or new TCs */
3878 if (vsi->tc_config.enabled_tc == enabled_tc)
3879 return ret;
3880
3881 /* Enable ETS TCs with equal BW Share for now across all VSIs */
3882 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3883 if (enabled_tc & (1 << i))
3884 bw_share[i] = 1;
3885 }
3886
3887 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
3888 if (ret) {
3889 dev_info(&vsi->back->pdev->dev,
3890 "Failed configuring TC map %d for VSI %d\n",
3891 enabled_tc, vsi->seid);
3892 goto out;
3893 }
3894
3895 /* Update Queue Pairs Mapping for currently enabled UPs */
3896 ctxt.seid = vsi->seid;
3897 ctxt.pf_num = vsi->back->hw.pf_id;
3898 ctxt.vf_num = 0;
3899 ctxt.uplink_seid = vsi->uplink_seid;
3900 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3901 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
3902
3903 /* Update the VSI after updating the VSI queue-mapping information */
3904 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3905 if (ret) {
3906 dev_info(&vsi->back->pdev->dev,
3907 "update vsi failed, aq_err=%d\n",
3908 vsi->back->hw.aq.asq_last_status);
3909 goto out;
3910 }
3911 /* update the local VSI info with updated queue map */
3912 i40e_vsi_update_queue_map(vsi, &ctxt);
3913 vsi->info.valid_sections = 0;
3914
3915 /* Update current VSI BW information */
3916 ret = i40e_vsi_get_bw_info(vsi);
3917 if (ret) {
3918 dev_info(&vsi->back->pdev->dev,
3919 "Failed updating vsi bw info, aq_err=%d\n",
3920 vsi->back->hw.aq.asq_last_status);
3921 goto out;
3922 }
3923
3924 /* Update the netdev TC setup */
3925 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
3926out:
3927 return ret;
3928}
3929
4e3b35b0
NP
3930/**
3931 * i40e_veb_config_tc - Configure TCs for given VEB
3932 * @veb: given VEB
3933 * @enabled_tc: TC bitmap
3934 *
3935 * Configures given TC bitmap for VEB (switching) element
3936 **/
3937int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
3938{
3939 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
3940 struct i40e_pf *pf = veb->pf;
3941 int ret = 0;
3942 int i;
3943
3944 /* No TCs or already enabled TCs just return */
3945 if (!enabled_tc || veb->enabled_tc == enabled_tc)
3946 return ret;
3947
3948 bw_data.tc_valid_bits = enabled_tc;
3949 /* bw_data.absolute_credits is not set (relative) */
3950
3951 /* Enable ETS TCs with equal BW Share for now */
3952 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3953 if (enabled_tc & (1 << i))
3954 bw_data.tc_bw_share_credits[i] = 1;
3955 }
3956
3957 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
3958 &bw_data, NULL);
3959 if (ret) {
3960 dev_info(&pf->pdev->dev,
3961 "veb bw config failed, aq_err=%d\n",
3962 pf->hw.aq.asq_last_status);
3963 goto out;
3964 }
3965
3966 /* Update the BW information */
3967 ret = i40e_veb_get_bw_info(veb);
3968 if (ret) {
3969 dev_info(&pf->pdev->dev,
3970 "Failed getting veb bw config, aq_err=%d\n",
3971 pf->hw.aq.asq_last_status);
3972 }
3973
3974out:
3975 return ret;
3976}
3977
3978#ifdef CONFIG_I40E_DCB
3979/**
3980 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
3981 * @pf: PF struct
3982 *
3983 * Reconfigure VEB/VSIs on a given PF; it is assumed that
3984 * the caller would've quiesce all the VSIs before calling
3985 * this function
3986 **/
3987static void i40e_dcb_reconfigure(struct i40e_pf *pf)
3988{
3989 u8 tc_map = 0;
3990 int ret;
3991 u8 v;
3992
3993 /* Enable the TCs available on PF to all VEBs */
3994 tc_map = i40e_pf_get_tc_map(pf);
3995 for (v = 0; v < I40E_MAX_VEB; v++) {
3996 if (!pf->veb[v])
3997 continue;
3998 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
3999 if (ret) {
4000 dev_info(&pf->pdev->dev,
4001 "Failed configuring TC for VEB seid=%d\n",
4002 pf->veb[v]->seid);
4003 /* Will try to configure as many components */
4004 }
4005 }
4006
4007 /* Update each VSI */
4008 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4009 if (!pf->vsi[v])
4010 continue;
4011
4012 /* - Enable all TCs for the LAN VSI
4013 * - For all others keep them at TC0 for now
4014 */
4015 if (v == pf->lan_vsi)
4016 tc_map = i40e_pf_get_tc_map(pf);
4017 else
4018 tc_map = i40e_pf_get_default_tc(pf);
4019
4020 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4021 if (ret) {
4022 dev_info(&pf->pdev->dev,
4023 "Failed configuring TC for VSI seid=%d\n",
4024 pf->vsi[v]->seid);
4025 /* Will try to configure as many components */
4026 } else {
0672a091
NP
4027 /* Re-configure VSI vectors based on updated TC map */
4028 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4e3b35b0
NP
4029 if (pf->vsi[v]->netdev)
4030 i40e_dcbnl_set_all(pf->vsi[v]);
4031 }
4032 }
4033}
4034
4035/**
4036 * i40e_init_pf_dcb - Initialize DCB configuration
4037 * @pf: PF being configured
4038 *
4039 * Query the current DCB configuration and cache it
4040 * in the hardware structure
4041 **/
4042static int i40e_init_pf_dcb(struct i40e_pf *pf)
4043{
4044 struct i40e_hw *hw = &pf->hw;
4045 int err = 0;
4046
4047 if (pf->hw.func_caps.npar_enable)
4048 goto out;
4049
4050 /* Get the initial DCB configuration */
4051 err = i40e_init_dcb(hw);
4052 if (!err) {
4053 /* Device/Function is not DCBX capable */
4054 if ((!hw->func_caps.dcb) ||
4055 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4056 dev_info(&pf->pdev->dev,
4057 "DCBX offload is not supported or is disabled for this PF.\n");
4058
4059 if (pf->flags & I40E_FLAG_MFP_ENABLED)
4060 goto out;
4061
4062 } else {
4063 /* When status is not DISABLED then DCBX in FW */
4064 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4065 DCB_CAP_DCBX_VER_IEEE;
4066 pf->flags |= I40E_FLAG_DCB_ENABLED;
4067 }
014269ff
NP
4068 } else {
4069 dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
4070 pf->hw.aq.asq_last_status);
4e3b35b0
NP
4071 }
4072
4073out:
4074 return err;
4075}
4076#endif /* CONFIG_I40E_DCB */
4077
41c445ff
JB
4078/**
4079 * i40e_up_complete - Finish the last steps of bringing up a connection
4080 * @vsi: the VSI being configured
4081 **/
4082static int i40e_up_complete(struct i40e_vsi *vsi)
4083{
4084 struct i40e_pf *pf = vsi->back;
4085 int err;
4086
4087 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4088 i40e_vsi_configure_msix(vsi);
4089 else
4090 i40e_configure_msi_and_legacy(vsi);
4091
4092 /* start rings */
4093 err = i40e_vsi_control_rings(vsi, true);
4094 if (err)
4095 return err;
4096
4097 clear_bit(__I40E_DOWN, &vsi->state);
4098 i40e_napi_enable_all(vsi);
4099 i40e_vsi_enable_irq(vsi);
4100
4101 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4102 (vsi->netdev)) {
6d779b41 4103 netdev_info(vsi->netdev, "NIC Link is Up\n");
41c445ff
JB
4104 netif_tx_start_all_queues(vsi->netdev);
4105 netif_carrier_on(vsi->netdev);
6d779b41
AS
4106 } else if (vsi->netdev) {
4107 netdev_info(vsi->netdev, "NIC Link is Down\n");
41c445ff 4108 }
ca64fa4e
ASJ
4109
4110 /* replay FDIR SB filters */
4111 if (vsi->type == I40E_VSI_FDIR)
4112 i40e_fdir_filter_restore(vsi);
41c445ff
JB
4113 i40e_service_event_schedule(pf);
4114
4115 return 0;
4116}
4117
4118/**
4119 * i40e_vsi_reinit_locked - Reset the VSI
4120 * @vsi: the VSI being configured
4121 *
4122 * Rebuild the ring structs after some configuration
4123 * has changed, e.g. MTU size.
4124 **/
4125static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4126{
4127 struct i40e_pf *pf = vsi->back;
4128
4129 WARN_ON(in_interrupt());
4130 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4131 usleep_range(1000, 2000);
4132 i40e_down(vsi);
4133
4134 /* Give a VF some time to respond to the reset. The
4135 * two second wait is based upon the watchdog cycle in
4136 * the VF driver.
4137 */
4138 if (vsi->type == I40E_VSI_SRIOV)
4139 msleep(2000);
4140 i40e_up(vsi);
4141 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4142}
4143
4144/**
4145 * i40e_up - Bring the connection back up after being down
4146 * @vsi: the VSI being configured
4147 **/
4148int i40e_up(struct i40e_vsi *vsi)
4149{
4150 int err;
4151
4152 err = i40e_vsi_configure(vsi);
4153 if (!err)
4154 err = i40e_up_complete(vsi);
4155
4156 return err;
4157}
4158
4159/**
4160 * i40e_down - Shutdown the connection processing
4161 * @vsi: the VSI being stopped
4162 **/
4163void i40e_down(struct i40e_vsi *vsi)
4164{
4165 int i;
4166
4167 /* It is assumed that the caller of this function
4168 * sets the vsi->state __I40E_DOWN bit.
4169 */
4170 if (vsi->netdev) {
4171 netif_carrier_off(vsi->netdev);
4172 netif_tx_disable(vsi->netdev);
4173 }
4174 i40e_vsi_disable_irq(vsi);
4175 i40e_vsi_control_rings(vsi, false);
4176 i40e_napi_disable_all(vsi);
4177
4178 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
4179 i40e_clean_tx_ring(vsi->tx_rings[i]);
4180 i40e_clean_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
4181 }
4182}
4183
4184/**
4185 * i40e_setup_tc - configure multiple traffic classes
4186 * @netdev: net device to configure
4187 * @tc: number of traffic classes to enable
4188 **/
4189static int i40e_setup_tc(struct net_device *netdev, u8 tc)
4190{
4191 struct i40e_netdev_priv *np = netdev_priv(netdev);
4192 struct i40e_vsi *vsi = np->vsi;
4193 struct i40e_pf *pf = vsi->back;
4194 u8 enabled_tc = 0;
4195 int ret = -EINVAL;
4196 int i;
4197
4198 /* Check if DCB enabled to continue */
4199 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
4200 netdev_info(netdev, "DCB is not enabled for adapter\n");
4201 goto exit;
4202 }
4203
4204 /* Check if MFP enabled */
4205 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4206 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
4207 goto exit;
4208 }
4209
4210 /* Check whether tc count is within enabled limit */
4211 if (tc > i40e_pf_get_num_tc(pf)) {
4212 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
4213 goto exit;
4214 }
4215
4216 /* Generate TC map for number of tc requested */
4217 for (i = 0; i < tc; i++)
4218 enabled_tc |= (1 << i);
4219
4220 /* Requesting same TC configuration as already enabled */
4221 if (enabled_tc == vsi->tc_config.enabled_tc)
4222 return 0;
4223
4224 /* Quiesce VSI queues */
4225 i40e_quiesce_vsi(vsi);
4226
4227 /* Configure VSI for enabled TCs */
4228 ret = i40e_vsi_config_tc(vsi, enabled_tc);
4229 if (ret) {
4230 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
4231 vsi->seid);
4232 goto exit;
4233 }
4234
4235 /* Unquiesce VSI */
4236 i40e_unquiesce_vsi(vsi);
4237
4238exit:
4239 return ret;
4240}
4241
4242/**
4243 * i40e_open - Called when a network interface is made active
4244 * @netdev: network interface device structure
4245 *
4246 * The open entry point is called when a network interface is made
4247 * active by the system (IFF_UP). At this point all resources needed
4248 * for transmit and receive operations are allocated, the interrupt
4249 * handler is registered with the OS, the netdev watchdog subtask is
4250 * enabled, and the stack is notified that the interface is ready.
4251 *
4252 * Returns 0 on success, negative value on failure
4253 **/
4254static int i40e_open(struct net_device *netdev)
4255{
4256 struct i40e_netdev_priv *np = netdev_priv(netdev);
4257 struct i40e_vsi *vsi = np->vsi;
4258 struct i40e_pf *pf = vsi->back;
41c445ff
JB
4259 int err;
4260
4eb3f768
SN
4261 /* disallow open during test or if eeprom is broken */
4262 if (test_bit(__I40E_TESTING, &pf->state) ||
4263 test_bit(__I40E_BAD_EEPROM, &pf->state))
41c445ff
JB
4264 return -EBUSY;
4265
4266 netif_carrier_off(netdev);
4267
6c167f58
EK
4268 err = i40e_vsi_open(vsi);
4269 if (err)
4270 return err;
4271
059dab69
JB
4272 /* configure global TSO hardware offload settings */
4273 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4274 TCP_FLAG_FIN) >> 16);
4275 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4276 TCP_FLAG_FIN |
4277 TCP_FLAG_CWR) >> 16);
4278 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4279
6c167f58
EK
4280#ifdef CONFIG_I40E_VXLAN
4281 vxlan_get_rx_port(netdev);
4282#endif
4283
4284 return 0;
4285}
4286
4287/**
4288 * i40e_vsi_open -
4289 * @vsi: the VSI to open
4290 *
4291 * Finish initialization of the VSI.
4292 *
4293 * Returns 0 on success, negative value on failure
4294 **/
4295int i40e_vsi_open(struct i40e_vsi *vsi)
4296{
4297 struct i40e_pf *pf = vsi->back;
4298 char int_name[IFNAMSIZ];
4299 int err;
4300
41c445ff
JB
4301 /* allocate descriptors */
4302 err = i40e_vsi_setup_tx_resources(vsi);
4303 if (err)
4304 goto err_setup_tx;
4305 err = i40e_vsi_setup_rx_resources(vsi);
4306 if (err)
4307 goto err_setup_rx;
4308
4309 err = i40e_vsi_configure(vsi);
4310 if (err)
4311 goto err_setup_rx;
4312
c22e3c6c
SN
4313 if (vsi->netdev) {
4314 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4315 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4316 err = i40e_vsi_request_irq(vsi, int_name);
4317 if (err)
4318 goto err_setup_rx;
4319
4320 /* Notify the stack of the actual queue counts. */
4321 err = netif_set_real_num_tx_queues(vsi->netdev,
4322 vsi->num_queue_pairs);
4323 if (err)
4324 goto err_set_queues;
4325
4326 err = netif_set_real_num_rx_queues(vsi->netdev,
4327 vsi->num_queue_pairs);
4328 if (err)
4329 goto err_set_queues;
8a9eb7d3
SN
4330
4331 } else if (vsi->type == I40E_VSI_FDIR) {
4332 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4333 dev_driver_string(&pf->pdev->dev));
4334 err = i40e_vsi_request_irq(vsi, int_name);
c22e3c6c 4335 } else {
ce9ccb17 4336 err = -EINVAL;
6c167f58
EK
4337 goto err_setup_rx;
4338 }
25946ddb 4339
41c445ff
JB
4340 err = i40e_up_complete(vsi);
4341 if (err)
4342 goto err_up_complete;
4343
41c445ff
JB
4344 return 0;
4345
4346err_up_complete:
4347 i40e_down(vsi);
25946ddb 4348err_set_queues:
41c445ff
JB
4349 i40e_vsi_free_irq(vsi);
4350err_setup_rx:
4351 i40e_vsi_free_rx_resources(vsi);
4352err_setup_tx:
4353 i40e_vsi_free_tx_resources(vsi);
4354 if (vsi == pf->vsi[pf->lan_vsi])
4355 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
4356
4357 return err;
4358}
4359
17a73f6b
JG
4360/**
4361 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
4362 * @pf: Pointer to pf
4363 *
4364 * This function destroys the hlist where all the Flow Director
4365 * filters were saved.
4366 **/
4367static void i40e_fdir_filter_exit(struct i40e_pf *pf)
4368{
4369 struct i40e_fdir_filter *filter;
4370 struct hlist_node *node2;
4371
4372 hlist_for_each_entry_safe(filter, node2,
4373 &pf->fdir_filter_list, fdir_node) {
4374 hlist_del(&filter->fdir_node);
4375 kfree(filter);
4376 }
4377 pf->fdir_pf_active_filters = 0;
4378}
4379
41c445ff
JB
4380/**
4381 * i40e_close - Disables a network interface
4382 * @netdev: network interface device structure
4383 *
4384 * The close entry point is called when an interface is de-activated
4385 * by the OS. The hardware is still under the driver's control, but
4386 * this netdev interface is disabled.
4387 *
4388 * Returns 0, this is not allowed to fail
4389 **/
4390static int i40e_close(struct net_device *netdev)
4391{
4392 struct i40e_netdev_priv *np = netdev_priv(netdev);
4393 struct i40e_vsi *vsi = np->vsi;
4394
90ef8d47 4395 i40e_vsi_close(vsi);
41c445ff
JB
4396
4397 return 0;
4398}
4399
4400/**
4401 * i40e_do_reset - Start a PF or Core Reset sequence
4402 * @pf: board private structure
4403 * @reset_flags: which reset is requested
4404 *
4405 * The essential difference in resets is that the PF Reset
4406 * doesn't clear the packet buffers, doesn't reset the PE
4407 * firmware, and doesn't bother the other PFs on the chip.
4408 **/
4409void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4410{
4411 u32 val;
4412
4413 WARN_ON(in_interrupt());
4414
4415 /* do the biggest reset indicated */
4416 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
4417
4418 /* Request a Global Reset
4419 *
4420 * This will start the chip's countdown to the actual full
4421 * chip reset event, and a warning interrupt to be sent
4422 * to all PFs, including the requestor. Our handler
4423 * for the warning interrupt will deal with the shutdown
4424 * and recovery of the switch setup.
4425 */
69bfb110 4426 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
41c445ff
JB
4427 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4428 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
4429 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4430
4431 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
4432
4433 /* Request a Core Reset
4434 *
4435 * Same as Global Reset, except does *not* include the MAC/PHY
4436 */
69bfb110 4437 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
41c445ff
JB
4438 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4439 val |= I40E_GLGEN_RTRIG_CORER_MASK;
4440 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4441 i40e_flush(&pf->hw);
4442
7823fe34
SN
4443 } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
4444
4445 /* Request a Firmware Reset
4446 *
4447 * Same as Global reset, plus restarting the
4448 * embedded firmware engine.
4449 */
4450 /* enable EMP Reset */
4451 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
4452 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
4453 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
4454
4455 /* force the reset */
4456 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4457 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
4458 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4459 i40e_flush(&pf->hw);
4460
41c445ff
JB
4461 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
4462
4463 /* Request a PF Reset
4464 *
4465 * Resets only the PF-specific registers
4466 *
4467 * This goes directly to the tear-down and rebuild of
4468 * the switch, since we need to do all the recovery as
4469 * for the Core Reset.
4470 */
69bfb110 4471 dev_dbg(&pf->pdev->dev, "PFR requested\n");
41c445ff
JB
4472 i40e_handle_reset_warning(pf);
4473
4474 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
4475 int v;
4476
4477 /* Find the VSI(s) that requested a re-init */
4478 dev_info(&pf->pdev->dev,
4479 "VSI reinit requested\n");
4480 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4481 struct i40e_vsi *vsi = pf->vsi[v];
4482 if (vsi != NULL &&
4483 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
4484 i40e_vsi_reinit_locked(pf->vsi[v]);
4485 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
4486 }
4487 }
4488
4489 /* no further action needed, so return now */
4490 return;
4491 } else {
4492 dev_info(&pf->pdev->dev,
4493 "bad reset request 0x%08x\n", reset_flags);
4494 return;
4495 }
4496}
4497
4e3b35b0
NP
4498#ifdef CONFIG_I40E_DCB
4499/**
4500 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
4501 * @pf: board private structure
4502 * @old_cfg: current DCB config
4503 * @new_cfg: new DCB config
4504 **/
4505bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
4506 struct i40e_dcbx_config *old_cfg,
4507 struct i40e_dcbx_config *new_cfg)
4508{
4509 bool need_reconfig = false;
4510
4511 /* Check if ETS configuration has changed */
4512 if (memcmp(&new_cfg->etscfg,
4513 &old_cfg->etscfg,
4514 sizeof(new_cfg->etscfg))) {
4515 /* If Priority Table has changed reconfig is needed */
4516 if (memcmp(&new_cfg->etscfg.prioritytable,
4517 &old_cfg->etscfg.prioritytable,
4518 sizeof(new_cfg->etscfg.prioritytable))) {
4519 need_reconfig = true;
69bfb110 4520 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
4e3b35b0
NP
4521 }
4522
4523 if (memcmp(&new_cfg->etscfg.tcbwtable,
4524 &old_cfg->etscfg.tcbwtable,
4525 sizeof(new_cfg->etscfg.tcbwtable)))
69bfb110 4526 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
4e3b35b0
NP
4527
4528 if (memcmp(&new_cfg->etscfg.tsatable,
4529 &old_cfg->etscfg.tsatable,
4530 sizeof(new_cfg->etscfg.tsatable)))
69bfb110 4531 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
4e3b35b0
NP
4532 }
4533
4534 /* Check if PFC configuration has changed */
4535 if (memcmp(&new_cfg->pfc,
4536 &old_cfg->pfc,
4537 sizeof(new_cfg->pfc))) {
4538 need_reconfig = true;
69bfb110 4539 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
4e3b35b0
NP
4540 }
4541
4542 /* Check if APP Table has changed */
4543 if (memcmp(&new_cfg->app,
4544 &old_cfg->app,
3d9667a9 4545 sizeof(new_cfg->app))) {
4e3b35b0 4546 need_reconfig = true;
69bfb110 4547 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
3d9667a9 4548 }
4e3b35b0
NP
4549
4550 return need_reconfig;
4551}
4552
4553/**
4554 * i40e_handle_lldp_event - Handle LLDP Change MIB event
4555 * @pf: board private structure
4556 * @e: event info posted on ARQ
4557 **/
4558static int i40e_handle_lldp_event(struct i40e_pf *pf,
4559 struct i40e_arq_event_info *e)
4560{
4561 struct i40e_aqc_lldp_get_mib *mib =
4562 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
4563 struct i40e_hw *hw = &pf->hw;
4564 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
4565 struct i40e_dcbx_config tmp_dcbx_cfg;
4566 bool need_reconfig = false;
4567 int ret = 0;
4568 u8 type;
4569
4570 /* Ignore if event is not for Nearest Bridge */
4571 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
4572 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
4573 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
4574 return ret;
4575
4576 /* Check MIB Type and return if event for Remote MIB update */
4577 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
4578 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
4579 /* Update the remote cached instance and return */
4580 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
4581 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
4582 &hw->remote_dcbx_config);
4583 goto exit;
4584 }
4585
4586 /* Convert/store the DCBX data from LLDPDU temporarily */
4587 memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
4588 ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg);
4589 if (ret) {
4590 /* Error in LLDPDU parsing return */
4591 dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n");
4592 goto exit;
4593 }
4594
4595 /* No change detected in DCBX configs */
4596 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
69bfb110 4597 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
4e3b35b0
NP
4598 goto exit;
4599 }
4600
4601 need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg);
4602
4603 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg);
4604
4605 /* Overwrite the new configuration */
4606 *dcbx_cfg = tmp_dcbx_cfg;
4607
4608 if (!need_reconfig)
4609 goto exit;
4610
4611 /* Reconfiguration needed quiesce all VSIs */
4612 i40e_pf_quiesce_all_vsi(pf);
4613
4614 /* Changes in configuration update VEB/VSI */
4615 i40e_dcb_reconfigure(pf);
4616
4617 i40e_pf_unquiesce_all_vsi(pf);
4618exit:
4619 return ret;
4620}
4621#endif /* CONFIG_I40E_DCB */
4622
23326186
ASJ
4623/**
4624 * i40e_do_reset_safe - Protected reset path for userland calls.
4625 * @pf: board private structure
4626 * @reset_flags: which reset is requested
4627 *
4628 **/
4629void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
4630{
4631 rtnl_lock();
4632 i40e_do_reset(pf, reset_flags);
4633 rtnl_unlock();
4634}
4635
41c445ff
JB
4636/**
4637 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
4638 * @pf: board private structure
4639 * @e: event info posted on ARQ
4640 *
4641 * Handler for LAN Queue Overflow Event generated by the firmware for PF
4642 * and VF queues
4643 **/
4644static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
4645 struct i40e_arq_event_info *e)
4646{
4647 struct i40e_aqc_lan_overflow *data =
4648 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
4649 u32 queue = le32_to_cpu(data->prtdcb_rupto);
4650 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
4651 struct i40e_hw *hw = &pf->hw;
4652 struct i40e_vf *vf;
4653 u16 vf_id;
4654
69bfb110
JB
4655 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
4656 queue, qtx_ctl);
41c445ff
JB
4657
4658 /* Queue belongs to VF, find the VF and issue VF reset */
4659 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
4660 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
4661 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
4662 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
4663 vf_id -= hw->func_caps.vf_base_id;
4664 vf = &pf->vf[vf_id];
4665 i40e_vc_notify_vf_reset(vf);
4666 /* Allow VF to process pending reset notification */
4667 msleep(20);
4668 i40e_reset_vf(vf, false);
4669 }
4670}
4671
4672/**
4673 * i40e_service_event_complete - Finish up the service event
4674 * @pf: board private structure
4675 **/
4676static void i40e_service_event_complete(struct i40e_pf *pf)
4677{
4678 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
4679
4680 /* flush memory to make sure state is correct before next watchog */
4681 smp_mb__before_clear_bit();
4682 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
4683}
4684
55a5e60b
ASJ
4685/**
4686 * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW
4687 * @pf: board private structure
4688 **/
4689int i40e_get_current_fd_count(struct i40e_pf *pf)
4690{
4691 int val, fcnt_prog;
4692 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
4693 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
4694 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
4695 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
4696 return fcnt_prog;
4697}
4698
4699/**
4700 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
4701 * @pf: board private structure
4702 **/
4703void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
4704{
4705 u32 fcnt_prog, fcnt_avail;
4706
4707 /* Check if, FD SB or ATR was auto disabled and if there is enough room
4708 * to re-enable
4709 */
4710 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4711 (pf->flags & I40E_FLAG_FD_SB_ENABLED))
4712 return;
4713 fcnt_prog = i40e_get_current_fd_count(pf);
89132783 4714 fcnt_avail = i40e_get_fd_cnt_all(pf);
55a5e60b
ASJ
4715 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
4716 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
4717 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
4718 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
4719 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
4720 }
4721 }
4722 /* Wait for some more space to be available to turn on ATR */
4723 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
4724 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4725 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
4726 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4727 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
4728 }
4729 }
4730}
4731
41c445ff
JB
4732/**
4733 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
4734 * @pf: board private structure
4735 **/
4736static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
4737{
4738 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
4739 return;
4740
41c445ff
JB
4741 /* if interface is down do nothing */
4742 if (test_bit(__I40E_DOWN, &pf->state))
4743 return;
55a5e60b
ASJ
4744 i40e_fdir_check_and_reenable(pf);
4745
4746 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4747 (pf->flags & I40E_FLAG_FD_SB_ENABLED))
4748 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
41c445ff
JB
4749}
4750
4751/**
4752 * i40e_vsi_link_event - notify VSI of a link event
4753 * @vsi: vsi to be notified
4754 * @link_up: link up or down
4755 **/
4756static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
4757{
4758 if (!vsi)
4759 return;
4760
4761 switch (vsi->type) {
4762 case I40E_VSI_MAIN:
4763 if (!vsi->netdev || !vsi->netdev_registered)
4764 break;
4765
4766 if (link_up) {
4767 netif_carrier_on(vsi->netdev);
4768 netif_tx_wake_all_queues(vsi->netdev);
4769 } else {
4770 netif_carrier_off(vsi->netdev);
4771 netif_tx_stop_all_queues(vsi->netdev);
4772 }
4773 break;
4774
4775 case I40E_VSI_SRIOV:
4776 break;
4777
4778 case I40E_VSI_VMDQ2:
4779 case I40E_VSI_CTRL:
4780 case I40E_VSI_MIRROR:
4781 default:
4782 /* there is no notification for other VSIs */
4783 break;
4784 }
4785}
4786
4787/**
4788 * i40e_veb_link_event - notify elements on the veb of a link event
4789 * @veb: veb to be notified
4790 * @link_up: link up or down
4791 **/
4792static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4793{
4794 struct i40e_pf *pf;
4795 int i;
4796
4797 if (!veb || !veb->pf)
4798 return;
4799 pf = veb->pf;
4800
4801 /* depth first... */
4802 for (i = 0; i < I40E_MAX_VEB; i++)
4803 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
4804 i40e_veb_link_event(pf->veb[i], link_up);
4805
4806 /* ... now the local VSIs */
4807 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4808 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4809 i40e_vsi_link_event(pf->vsi[i], link_up);
4810}
4811
4812/**
4813 * i40e_link_event - Update netif_carrier status
4814 * @pf: board private structure
4815 **/
4816static void i40e_link_event(struct i40e_pf *pf)
4817{
4818 bool new_link, old_link;
4819
4820 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
4821 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
4822
4823 if (new_link == old_link)
4824 return;
4825
6d779b41
AS
4826 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
4827 netdev_info(pf->vsi[pf->lan_vsi]->netdev,
4828 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
41c445ff
JB
4829
4830 /* Notify the base of the switch tree connected to
4831 * the link. Floating VEBs are not notified.
4832 */
4833 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
4834 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
4835 else
4836 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
4837
4838 if (pf->vf)
4839 i40e_vc_notify_link_state(pf);
beb0dff1
JK
4840
4841 if (pf->flags & I40E_FLAG_PTP)
4842 i40e_ptp_set_increment(pf);
41c445ff
JB
4843}
4844
4845/**
4846 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
4847 * @pf: board private structure
4848 *
4849 * Set the per-queue flags to request a check for stuck queues in the irq
4850 * clean functions, then force interrupts to be sure the irq clean is called.
4851 **/
4852static void i40e_check_hang_subtask(struct i40e_pf *pf)
4853{
4854 int i, v;
4855
4856 /* If we're down or resetting, just bail */
4857 if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
4858 return;
4859
4860 /* for each VSI/netdev
4861 * for each Tx queue
4862 * set the check flag
4863 * for each q_vector
4864 * force an interrupt
4865 */
4866 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4867 struct i40e_vsi *vsi = pf->vsi[v];
4868 int armed = 0;
4869
4870 if (!pf->vsi[v] ||
4871 test_bit(__I40E_DOWN, &vsi->state) ||
4872 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
4873 continue;
4874
4875 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 4876 set_check_for_tx_hang(vsi->tx_rings[i]);
41c445ff 4877 if (test_bit(__I40E_HANG_CHECK_ARMED,
9f65e15b 4878 &vsi->tx_rings[i]->state))
41c445ff
JB
4879 armed++;
4880 }
4881
4882 if (armed) {
4883 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
4884 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
4885 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
4886 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
4887 } else {
4888 u16 vec = vsi->base_vector - 1;
4889 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
4890 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
4891 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
4892 wr32(&vsi->back->hw,
4893 I40E_PFINT_DYN_CTLN(vec), val);
4894 }
4895 i40e_flush(&vsi->back->hw);
4896 }
4897 }
4898}
4899
4900/**
4901 * i40e_watchdog_subtask - Check and bring link up
4902 * @pf: board private structure
4903 **/
4904static void i40e_watchdog_subtask(struct i40e_pf *pf)
4905{
4906 int i;
4907
4908 /* if interface is down do nothing */
4909 if (test_bit(__I40E_DOWN, &pf->state) ||
4910 test_bit(__I40E_CONFIG_BUSY, &pf->state))
4911 return;
4912
4913 /* Update the stats for active netdevs so the network stack
4914 * can look at updated numbers whenever it cares to
4915 */
4916 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4917 if (pf->vsi[i] && pf->vsi[i]->netdev)
4918 i40e_update_stats(pf->vsi[i]);
4919
4920 /* Update the stats for the active switching components */
4921 for (i = 0; i < I40E_MAX_VEB; i++)
4922 if (pf->veb[i])
4923 i40e_update_veb_stats(pf->veb[i]);
beb0dff1
JK
4924
4925 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
41c445ff
JB
4926}
4927
4928/**
4929 * i40e_reset_subtask - Set up for resetting the device and driver
4930 * @pf: board private structure
4931 **/
4932static void i40e_reset_subtask(struct i40e_pf *pf)
4933{
4934 u32 reset_flags = 0;
4935
23326186 4936 rtnl_lock();
41c445ff
JB
4937 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
4938 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
4939 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
4940 }
4941 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
4942 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
4943 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4944 }
4945 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
4946 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
4947 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
4948 }
4949 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
4950 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
4951 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
4952 }
4953
4954 /* If there's a recovery already waiting, it takes
4955 * precedence before starting a new reset sequence.
4956 */
4957 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
4958 i40e_handle_reset_warning(pf);
23326186 4959 goto unlock;
41c445ff
JB
4960 }
4961
4962 /* If we're already down or resetting, just bail */
4963 if (reset_flags &&
4964 !test_bit(__I40E_DOWN, &pf->state) &&
4965 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
4966 i40e_do_reset(pf, reset_flags);
23326186
ASJ
4967
4968unlock:
4969 rtnl_unlock();
41c445ff
JB
4970}
4971
4972/**
4973 * i40e_handle_link_event - Handle link event
4974 * @pf: board private structure
4975 * @e: event info posted on ARQ
4976 **/
4977static void i40e_handle_link_event(struct i40e_pf *pf,
4978 struct i40e_arq_event_info *e)
4979{
4980 struct i40e_hw *hw = &pf->hw;
4981 struct i40e_aqc_get_link_status *status =
4982 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
4983 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
4984
4985 /* save off old link status information */
4986 memcpy(&pf->hw.phy.link_info_old, hw_link_info,
4987 sizeof(pf->hw.phy.link_info_old));
4988
4989 /* update link status */
4990 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
4991 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
4992 hw_link_info->link_info = status->link_info;
4993 hw_link_info->an_info = status->an_info;
4994 hw_link_info->ext_info = status->ext_info;
4995 hw_link_info->lse_enable =
4996 le16_to_cpu(status->command_flags) &
4997 I40E_AQ_LSE_ENABLE;
4998
4999 /* process the event */
5000 i40e_link_event(pf);
5001
5002 /* Do a new status request to re-enable LSE reporting
5003 * and load new status information into the hw struct,
5004 * then see if the status changed while processing the
5005 * initial event.
5006 */
5007 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
5008 i40e_link_event(pf);
5009}
5010
5011/**
5012 * i40e_clean_adminq_subtask - Clean the AdminQ rings
5013 * @pf: board private structure
5014 **/
5015static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5016{
5017 struct i40e_arq_event_info event;
5018 struct i40e_hw *hw = &pf->hw;
5019 u16 pending, i = 0;
5020 i40e_status ret;
5021 u16 opcode;
5022 u32 val;
5023
5024 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
5025 return;
5026
3197ce22 5027 event.msg_size = I40E_MAX_AQ_BUF_SIZE;
41c445ff
JB
5028 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
5029 if (!event.msg_buf)
5030 return;
5031
5032 do {
2f019123 5033 event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
41c445ff
JB
5034 ret = i40e_clean_arq_element(hw, &event, &pending);
5035 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
5036 dev_info(&pf->pdev->dev, "No ARQ event found\n");
5037 break;
5038 } else if (ret) {
5039 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
5040 break;
5041 }
5042
5043 opcode = le16_to_cpu(event.desc.opcode);
5044 switch (opcode) {
5045
5046 case i40e_aqc_opc_get_link_status:
5047 i40e_handle_link_event(pf, &event);
5048 break;
5049 case i40e_aqc_opc_send_msg_to_pf:
5050 ret = i40e_vc_process_vf_msg(pf,
5051 le16_to_cpu(event.desc.retval),
5052 le32_to_cpu(event.desc.cookie_high),
5053 le32_to_cpu(event.desc.cookie_low),
5054 event.msg_buf,
5055 event.msg_size);
5056 break;
5057 case i40e_aqc_opc_lldp_update_mib:
69bfb110 5058 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4e3b35b0
NP
5059#ifdef CONFIG_I40E_DCB
5060 rtnl_lock();
5061 ret = i40e_handle_lldp_event(pf, &event);
5062 rtnl_unlock();
5063#endif /* CONFIG_I40E_DCB */
41c445ff
JB
5064 break;
5065 case i40e_aqc_opc_event_lan_overflow:
69bfb110 5066 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
41c445ff
JB
5067 i40e_handle_lan_overflow_event(pf, &event);
5068 break;
0467bc91
SN
5069 case i40e_aqc_opc_send_msg_to_peer:
5070 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
5071 break;
41c445ff
JB
5072 default:
5073 dev_info(&pf->pdev->dev,
0467bc91
SN
5074 "ARQ Error: Unknown event 0x%04x received\n",
5075 opcode);
41c445ff
JB
5076 break;
5077 }
5078 } while (pending && (i++ < pf->adminq_work_limit));
5079
5080 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
5081 /* re-enable Admin queue interrupt cause */
5082 val = rd32(hw, I40E_PFINT_ICR0_ENA);
5083 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
5084 wr32(hw, I40E_PFINT_ICR0_ENA, val);
5085 i40e_flush(hw);
5086
5087 kfree(event.msg_buf);
5088}
5089
4eb3f768
SN
5090/**
5091 * i40e_verify_eeprom - make sure eeprom is good to use
5092 * @pf: board private structure
5093 **/
5094static void i40e_verify_eeprom(struct i40e_pf *pf)
5095{
5096 int err;
5097
5098 err = i40e_diag_eeprom_test(&pf->hw);
5099 if (err) {
5100 /* retry in case of garbage read */
5101 err = i40e_diag_eeprom_test(&pf->hw);
5102 if (err) {
5103 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
5104 err);
5105 set_bit(__I40E_BAD_EEPROM, &pf->state);
5106 }
5107 }
5108
5109 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
5110 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
5111 clear_bit(__I40E_BAD_EEPROM, &pf->state);
5112 }
5113}
5114
41c445ff
JB
5115/**
5116 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
5117 * @veb: pointer to the VEB instance
5118 *
5119 * This is a recursive function that first builds the attached VSIs then
5120 * recurses in to build the next layer of VEB. We track the connections
5121 * through our own index numbers because the seid's from the HW could
5122 * change across the reset.
5123 **/
5124static int i40e_reconstitute_veb(struct i40e_veb *veb)
5125{
5126 struct i40e_vsi *ctl_vsi = NULL;
5127 struct i40e_pf *pf = veb->pf;
5128 int v, veb_idx;
5129 int ret;
5130
5131 /* build VSI that owns this VEB, temporarily attached to base VEB */
5132 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
5133 if (pf->vsi[v] &&
5134 pf->vsi[v]->veb_idx == veb->idx &&
5135 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
5136 ctl_vsi = pf->vsi[v];
5137 break;
5138 }
5139 }
5140 if (!ctl_vsi) {
5141 dev_info(&pf->pdev->dev,
5142 "missing owner VSI for veb_idx %d\n", veb->idx);
5143 ret = -ENOENT;
5144 goto end_reconstitute;
5145 }
5146 if (ctl_vsi != pf->vsi[pf->lan_vsi])
5147 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
5148 ret = i40e_add_vsi(ctl_vsi);
5149 if (ret) {
5150 dev_info(&pf->pdev->dev,
5151 "rebuild of owner VSI failed: %d\n", ret);
5152 goto end_reconstitute;
5153 }
5154 i40e_vsi_reset_stats(ctl_vsi);
5155
5156 /* create the VEB in the switch and move the VSI onto the VEB */
5157 ret = i40e_add_veb(veb, ctl_vsi);
5158 if (ret)
5159 goto end_reconstitute;
5160
5161 /* create the remaining VSIs attached to this VEB */
5162 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5163 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
5164 continue;
5165
5166 if (pf->vsi[v]->veb_idx == veb->idx) {
5167 struct i40e_vsi *vsi = pf->vsi[v];
5168 vsi->uplink_seid = veb->seid;
5169 ret = i40e_add_vsi(vsi);
5170 if (ret) {
5171 dev_info(&pf->pdev->dev,
5172 "rebuild of vsi_idx %d failed: %d\n",
5173 v, ret);
5174 goto end_reconstitute;
5175 }
5176 i40e_vsi_reset_stats(vsi);
5177 }
5178 }
5179
5180 /* create any VEBs attached to this VEB - RECURSION */
5181 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
5182 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
5183 pf->veb[veb_idx]->uplink_seid = veb->seid;
5184 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
5185 if (ret)
5186 break;
5187 }
5188 }
5189
5190end_reconstitute:
5191 return ret;
5192}
5193
5194/**
5195 * i40e_get_capabilities - get info about the HW
5196 * @pf: the PF struct
5197 **/
5198static int i40e_get_capabilities(struct i40e_pf *pf)
5199{
5200 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
5201 u16 data_size;
5202 int buf_len;
5203 int err;
5204
5205 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
5206 do {
5207 cap_buf = kzalloc(buf_len, GFP_KERNEL);
5208 if (!cap_buf)
5209 return -ENOMEM;
5210
5211 /* this loads the data into the hw struct for us */
5212 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
5213 &data_size,
5214 i40e_aqc_opc_list_func_capabilities,
5215 NULL);
5216 /* data loaded, buffer no longer needed */
5217 kfree(cap_buf);
5218
5219 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
5220 /* retry with a larger buffer */
5221 buf_len = data_size;
5222 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
5223 dev_info(&pf->pdev->dev,
5224 "capability discovery failed: aq=%d\n",
5225 pf->hw.aq.asq_last_status);
5226 return -ENODEV;
5227 }
5228 } while (err);
5229
ac71b7ba
ASJ
5230 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
5231 (pf->hw.aq.fw_maj_ver < 2)) {
5232 pf->hw.func_caps.num_msix_vectors++;
5233 pf->hw.func_caps.num_msix_vectors_vf++;
5234 }
5235
41c445ff
JB
5236 if (pf->hw.debug_mask & I40E_DEBUG_USER)
5237 dev_info(&pf->pdev->dev,
5238 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
5239 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
5240 pf->hw.func_caps.num_msix_vectors,
5241 pf->hw.func_caps.num_msix_vectors_vf,
5242 pf->hw.func_caps.fd_filters_guaranteed,
5243 pf->hw.func_caps.fd_filters_best_effort,
5244 pf->hw.func_caps.num_tx_qp,
5245 pf->hw.func_caps.num_vsis);
5246
7134f9ce
JB
5247#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
5248 + pf->hw.func_caps.num_vfs)
5249 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
5250 dev_info(&pf->pdev->dev,
5251 "got num_vsis %d, setting num_vsis to %d\n",
5252 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
5253 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
5254 }
5255
41c445ff
JB
5256 return 0;
5257}
5258
cbf61325
ASJ
5259static int i40e_vsi_clear(struct i40e_vsi *vsi);
5260
41c445ff 5261/**
cbf61325 5262 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
41c445ff
JB
5263 * @pf: board private structure
5264 **/
cbf61325 5265static void i40e_fdir_sb_setup(struct i40e_pf *pf)
41c445ff
JB
5266{
5267 struct i40e_vsi *vsi;
8a9eb7d3 5268 int i;
41c445ff 5269
cbf61325 5270 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
41c445ff
JB
5271 return;
5272
cbf61325 5273 /* find existing VSI and see if it needs configuring */
41c445ff 5274 vsi = NULL;
cbf61325
ASJ
5275 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
5276 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
41c445ff 5277 vsi = pf->vsi[i];
cbf61325
ASJ
5278 break;
5279 }
5280 }
5281
5282 /* create a new VSI if none exists */
41c445ff 5283 if (!vsi) {
cbf61325
ASJ
5284 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
5285 pf->vsi[pf->lan_vsi]->seid, 0);
41c445ff
JB
5286 if (!vsi) {
5287 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
8a9eb7d3
SN
5288 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
5289 return;
41c445ff 5290 }
41c445ff 5291 }
cbf61325 5292
8a9eb7d3 5293 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
41c445ff
JB
5294}
5295
5296/**
5297 * i40e_fdir_teardown - release the Flow Director resources
5298 * @pf: board private structure
5299 **/
5300static void i40e_fdir_teardown(struct i40e_pf *pf)
5301{
5302 int i;
5303
17a73f6b 5304 i40e_fdir_filter_exit(pf);
41c445ff
JB
5305 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
5306 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5307 i40e_vsi_release(pf->vsi[i]);
5308 break;
5309 }
5310 }
5311}
5312
5313/**
f650a38b 5314 * i40e_prep_for_reset - prep for the core to reset
41c445ff
JB
5315 * @pf: board private structure
5316 *
f650a38b
ASJ
5317 * Close up the VFs and other things in prep for pf Reset.
5318 **/
5319static int i40e_prep_for_reset(struct i40e_pf *pf)
41c445ff 5320{
41c445ff
JB
5321 struct i40e_hw *hw = &pf->hw;
5322 i40e_status ret;
5323 u32 v;
5324
5325 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
5326 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
f650a38b 5327 return 0;
41c445ff 5328
69bfb110 5329 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
41c445ff 5330
37f0be6d
ASJ
5331 if (i40e_check_asq_alive(hw))
5332 i40e_vc_notify_reset(pf);
41c445ff
JB
5333
5334 /* quiesce the VSIs and their queues that are not already DOWN */
5335 i40e_pf_quiesce_all_vsi(pf);
5336
5337 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5338 if (pf->vsi[v])
5339 pf->vsi[v]->seid = 0;
5340 }
5341
5342 i40e_shutdown_adminq(&pf->hw);
5343
f650a38b
ASJ
5344 /* call shutdown HMC */
5345 ret = i40e_shutdown_lan_hmc(hw);
5346 if (ret) {
5347 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
5348 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
5349 }
5350 return ret;
5351}
5352
5353/**
4dda12e6 5354 * i40e_reset_and_rebuild - reset and rebuild using a saved config
f650a38b 5355 * @pf: board private structure
bc7d338f 5356 * @reinit: if the Main VSI needs to re-initialized.
f650a38b 5357 **/
bc7d338f 5358static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
f650a38b
ASJ
5359{
5360 struct i40e_driver_version dv;
5361 struct i40e_hw *hw = &pf->hw;
5362 i40e_status ret;
5363 u32 v;
5364
41c445ff
JB
5365 /* Now we wait for GRST to settle out.
5366 * We don't have to delete the VEBs or VSIs from the hw switch
5367 * because the reset will make them disappear.
5368 */
5369 ret = i40e_pf_reset(hw);
b5565400 5370 if (ret) {
41c445ff 5371 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
b5565400
AA
5372 goto end_core_reset;
5373 }
41c445ff
JB
5374 pf->pfr_count++;
5375
5376 if (test_bit(__I40E_DOWN, &pf->state))
5377 goto end_core_reset;
69bfb110 5378 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
41c445ff
JB
5379
5380 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
5381 ret = i40e_init_adminq(&pf->hw);
5382 if (ret) {
5383 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
5384 goto end_core_reset;
5385 }
5386
4eb3f768
SN
5387 /* re-verify the eeprom if we just had an EMP reset */
5388 if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
5389 clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
5390 i40e_verify_eeprom(pf);
5391 }
5392
41c445ff
JB
5393 ret = i40e_get_capabilities(pf);
5394 if (ret) {
5395 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
5396 ret);
5397 goto end_core_reset;
5398 }
5399
41c445ff
JB
5400 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
5401 hw->func_caps.num_rx_qp,
5402 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
5403 if (ret) {
5404 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
5405 goto end_core_reset;
5406 }
5407 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
5408 if (ret) {
5409 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
5410 goto end_core_reset;
5411 }
5412
4e3b35b0
NP
5413#ifdef CONFIG_I40E_DCB
5414 ret = i40e_init_pf_dcb(pf);
5415 if (ret) {
5416 dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret);
5417 goto end_core_reset;
5418 }
5419#endif /* CONFIG_I40E_DCB */
5420
41c445ff 5421 /* do basic switch setup */
bc7d338f 5422 ret = i40e_setup_pf_switch(pf, reinit);
41c445ff
JB
5423 if (ret)
5424 goto end_core_reset;
5425
5426 /* Rebuild the VSIs and VEBs that existed before reset.
5427 * They are still in our local switch element arrays, so only
5428 * need to rebuild the switch model in the HW.
5429 *
5430 * If there were VEBs but the reconstitution failed, we'll try
5431 * try to recover minimal use by getting the basic PF VSI working.
5432 */
5433 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
69bfb110 5434 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
41c445ff
JB
5435 /* find the one VEB connected to the MAC, and find orphans */
5436 for (v = 0; v < I40E_MAX_VEB; v++) {
5437 if (!pf->veb[v])
5438 continue;
5439
5440 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
5441 pf->veb[v]->uplink_seid == 0) {
5442 ret = i40e_reconstitute_veb(pf->veb[v]);
5443
5444 if (!ret)
5445 continue;
5446
5447 /* If Main VEB failed, we're in deep doodoo,
5448 * so give up rebuilding the switch and set up
5449 * for minimal rebuild of PF VSI.
5450 * If orphan failed, we'll report the error
5451 * but try to keep going.
5452 */
5453 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
5454 dev_info(&pf->pdev->dev,
5455 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
5456 ret);
5457 pf->vsi[pf->lan_vsi]->uplink_seid
5458 = pf->mac_seid;
5459 break;
5460 } else if (pf->veb[v]->uplink_seid == 0) {
5461 dev_info(&pf->pdev->dev,
5462 "rebuild of orphan VEB failed: %d\n",
5463 ret);
5464 }
5465 }
5466 }
5467 }
5468
5469 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
5470 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
5471 /* no VEB, so rebuild only the Main VSI */
5472 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
5473 if (ret) {
5474 dev_info(&pf->pdev->dev,
5475 "rebuild of Main VSI failed: %d\n", ret);
5476 goto end_core_reset;
5477 }
5478 }
5479
5480 /* reinit the misc interrupt */
5481 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5482 ret = i40e_setup_misc_vector(pf);
5483
5484 /* restart the VSIs that were rebuilt and running before the reset */
5485 i40e_pf_unquiesce_all_vsi(pf);
5486
69f64b2b
MW
5487 if (pf->num_alloc_vfs) {
5488 for (v = 0; v < pf->num_alloc_vfs; v++)
5489 i40e_reset_vf(&pf->vf[v], true);
5490 }
5491
41c445ff
JB
5492 /* tell the firmware that we're starting */
5493 dv.major_version = DRV_VERSION_MAJOR;
5494 dv.minor_version = DRV_VERSION_MINOR;
5495 dv.build_version = DRV_VERSION_BUILD;
5496 dv.subbuild_version = 0;
5497 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
5498
69bfb110 5499 dev_info(&pf->pdev->dev, "reset complete\n");
41c445ff
JB
5500
5501end_core_reset:
5502 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
5503}
5504
f650a38b
ASJ
5505/**
5506 * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
5507 * @pf: board private structure
5508 *
5509 * Close up the VFs and other things in prep for a Core Reset,
5510 * then get ready to rebuild the world.
5511 **/
5512static void i40e_handle_reset_warning(struct i40e_pf *pf)
5513{
5514 i40e_status ret;
5515
5516 ret = i40e_prep_for_reset(pf);
5517 if (!ret)
bc7d338f 5518 i40e_reset_and_rebuild(pf, false);
f650a38b
ASJ
5519}
5520
41c445ff
JB
5521/**
5522 * i40e_handle_mdd_event
5523 * @pf: pointer to the pf structure
5524 *
5525 * Called from the MDD irq handler to identify possibly malicious vfs
5526 **/
5527static void i40e_handle_mdd_event(struct i40e_pf *pf)
5528{
5529 struct i40e_hw *hw = &pf->hw;
5530 bool mdd_detected = false;
5531 struct i40e_vf *vf;
5532 u32 reg;
5533 int i;
5534
5535 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
5536 return;
5537
5538 /* find what triggered the MDD event */
5539 reg = rd32(hw, I40E_GL_MDET_TX);
5540 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
5541 u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
5542 >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
5543 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
5544 >> I40E_GL_MDET_TX_EVENT_SHIFT;
5545 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
5546 >> I40E_GL_MDET_TX_QUEUE_SHIFT;
5547 dev_info(&pf->pdev->dev,
f29eaa3d 5548 "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",
41c445ff
JB
5549 event, queue, func);
5550 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
5551 mdd_detected = true;
5552 }
5553 reg = rd32(hw, I40E_GL_MDET_RX);
5554 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
5555 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
5556 >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
5557 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
5558 >> I40E_GL_MDET_RX_EVENT_SHIFT;
5559 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
5560 >> I40E_GL_MDET_RX_QUEUE_SHIFT;
5561 dev_info(&pf->pdev->dev,
f29eaa3d 5562 "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
41c445ff
JB
5563 event, queue, func);
5564 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
5565 mdd_detected = true;
5566 }
5567
5568 /* see if one of the VFs needs its hand slapped */
5569 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
5570 vf = &(pf->vf[i]);
5571 reg = rd32(hw, I40E_VP_MDET_TX(i));
5572 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
5573 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
5574 vf->num_mdd_events++;
5575 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
5576 }
5577
5578 reg = rd32(hw, I40E_VP_MDET_RX(i));
5579 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
5580 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
5581 vf->num_mdd_events++;
5582 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
5583 }
5584
5585 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
5586 dev_info(&pf->pdev->dev,
5587 "Too many MDD events on VF %d, disabled\n", i);
5588 dev_info(&pf->pdev->dev,
5589 "Use PF Control I/F to re-enable the VF\n");
5590 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
5591 }
5592 }
5593
5594 /* re-enable mdd interrupt cause */
5595 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
5596 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
5597 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
5598 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
5599 i40e_flush(hw);
5600}
5601
a1c9a9d9
JK
5602#ifdef CONFIG_I40E_VXLAN
5603/**
5604 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
5605 * @pf: board private structure
5606 **/
5607static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
5608{
a1c9a9d9
JK
5609 struct i40e_hw *hw = &pf->hw;
5610 i40e_status ret;
5611 u8 filter_index;
5612 __be16 port;
5613 int i;
5614
5615 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
5616 return;
5617
5618 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
5619
5620 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
5621 if (pf->pending_vxlan_bitmap & (1 << i)) {
5622 pf->pending_vxlan_bitmap &= ~(1 << i);
5623 port = pf->vxlan_ports[i];
5624 ret = port ?
5625 i40e_aq_add_udp_tunnel(hw, ntohs(port),
a1c9a9d9
JK
5626 I40E_AQC_TUNNEL_TYPE_VXLAN,
5627 &filter_index, NULL)
5628 : i40e_aq_del_udp_tunnel(hw, i, NULL);
5629
5630 if (ret) {
5631 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
5632 port ? "adding" : "deleting",
5633 ntohs(port), port ? i : i);
5634
5635 pf->vxlan_ports[i] = 0;
5636 } else {
5637 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
5638 port ? "Added" : "Deleted",
5639 ntohs(port), port ? i : filter_index);
5640 }
5641 }
5642 }
5643}
5644
5645#endif
41c445ff
JB
5646/**
5647 * i40e_service_task - Run the driver's async subtasks
5648 * @work: pointer to work_struct containing our data
5649 **/
5650static void i40e_service_task(struct work_struct *work)
5651{
5652 struct i40e_pf *pf = container_of(work,
5653 struct i40e_pf,
5654 service_task);
5655 unsigned long start_time = jiffies;
5656
5657 i40e_reset_subtask(pf);
5658 i40e_handle_mdd_event(pf);
5659 i40e_vc_process_vflr_event(pf);
5660 i40e_watchdog_subtask(pf);
5661 i40e_fdir_reinit_subtask(pf);
5662 i40e_check_hang_subtask(pf);
5663 i40e_sync_filters_subtask(pf);
a1c9a9d9
JK
5664#ifdef CONFIG_I40E_VXLAN
5665 i40e_sync_vxlan_filters_subtask(pf);
5666#endif
41c445ff
JB
5667 i40e_clean_adminq_subtask(pf);
5668
5669 i40e_service_event_complete(pf);
5670
5671 /* If the tasks have taken longer than one timer cycle or there
5672 * is more work to be done, reschedule the service task now
5673 * rather than wait for the timer to tick again.
5674 */
5675 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
5676 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
5677 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
5678 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
5679 i40e_service_event_schedule(pf);
5680}
5681
5682/**
5683 * i40e_service_timer - timer callback
5684 * @data: pointer to PF struct
5685 **/
5686static void i40e_service_timer(unsigned long data)
5687{
5688 struct i40e_pf *pf = (struct i40e_pf *)data;
5689
5690 mod_timer(&pf->service_timer,
5691 round_jiffies(jiffies + pf->service_timer_period));
5692 i40e_service_event_schedule(pf);
5693}
5694
5695/**
5696 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
5697 * @vsi: the VSI being configured
5698 **/
5699static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
5700{
5701 struct i40e_pf *pf = vsi->back;
5702
5703 switch (vsi->type) {
5704 case I40E_VSI_MAIN:
5705 vsi->alloc_queue_pairs = pf->num_lan_qps;
5706 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5707 I40E_REQ_DESCRIPTOR_MULTIPLE);
5708 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5709 vsi->num_q_vectors = pf->num_lan_msix;
5710 else
5711 vsi->num_q_vectors = 1;
5712
5713 break;
5714
5715 case I40E_VSI_FDIR:
5716 vsi->alloc_queue_pairs = 1;
5717 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
5718 I40E_REQ_DESCRIPTOR_MULTIPLE);
5719 vsi->num_q_vectors = 1;
5720 break;
5721
5722 case I40E_VSI_VMDQ2:
5723 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
5724 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5725 I40E_REQ_DESCRIPTOR_MULTIPLE);
5726 vsi->num_q_vectors = pf->num_vmdq_msix;
5727 break;
5728
5729 case I40E_VSI_SRIOV:
5730 vsi->alloc_queue_pairs = pf->num_vf_qps;
5731 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5732 I40E_REQ_DESCRIPTOR_MULTIPLE);
5733 break;
5734
5735 default:
5736 WARN_ON(1);
5737 return -ENODATA;
5738 }
5739
5740 return 0;
5741}
5742
f650a38b
ASJ
5743/**
5744 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
5745 * @type: VSI pointer
bc7d338f 5746 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
f650a38b
ASJ
5747 *
5748 * On error: returns error code (negative)
5749 * On success: returns 0
5750 **/
bc7d338f 5751static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
f650a38b
ASJ
5752{
5753 int size;
5754 int ret = 0;
5755
ac6c5e3d 5756 /* allocate memory for both Tx and Rx ring pointers */
f650a38b
ASJ
5757 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
5758 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
5759 if (!vsi->tx_rings)
5760 return -ENOMEM;
f650a38b
ASJ
5761 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
5762
bc7d338f
ASJ
5763 if (alloc_qvectors) {
5764 /* allocate memory for q_vector pointers */
5765 size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
5766 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
5767 if (!vsi->q_vectors) {
5768 ret = -ENOMEM;
5769 goto err_vectors;
5770 }
f650a38b
ASJ
5771 }
5772 return ret;
5773
5774err_vectors:
5775 kfree(vsi->tx_rings);
5776 return ret;
5777}
5778
41c445ff
JB
5779/**
5780 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
5781 * @pf: board private structure
5782 * @type: type of VSI
5783 *
5784 * On error: returns error code (negative)
5785 * On success: returns vsi index in PF (positive)
5786 **/
5787static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5788{
5789 int ret = -ENODEV;
5790 struct i40e_vsi *vsi;
5791 int vsi_idx;
5792 int i;
5793
5794 /* Need to protect the allocation of the VSIs at the PF level */
5795 mutex_lock(&pf->switch_mutex);
5796
5797 /* VSI list may be fragmented if VSI creation/destruction has
5798 * been happening. We can afford to do a quick scan to look
5799 * for any free VSIs in the list.
5800 *
5801 * find next empty vsi slot, looping back around if necessary
5802 */
5803 i = pf->next_vsi;
5804 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
5805 i++;
5806 if (i >= pf->hw.func_caps.num_vsis) {
5807 i = 0;
5808 while (i < pf->next_vsi && pf->vsi[i])
5809 i++;
5810 }
5811
5812 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
5813 vsi_idx = i; /* Found one! */
5814 } else {
5815 ret = -ENODEV;
493fb300 5816 goto unlock_pf; /* out of VSI slots! */
41c445ff
JB
5817 }
5818 pf->next_vsi = ++i;
5819
5820 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
5821 if (!vsi) {
5822 ret = -ENOMEM;
493fb300 5823 goto unlock_pf;
41c445ff
JB
5824 }
5825 vsi->type = type;
5826 vsi->back = pf;
5827 set_bit(__I40E_DOWN, &vsi->state);
5828 vsi->flags = 0;
5829 vsi->idx = vsi_idx;
5830 vsi->rx_itr_setting = pf->rx_itr_default;
5831 vsi->tx_itr_setting = pf->tx_itr_default;
5832 vsi->netdev_registered = false;
5833 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
5834 INIT_LIST_HEAD(&vsi->mac_filter_list);
5835
9f65e15b
AD
5836 ret = i40e_set_num_rings_in_vsi(vsi);
5837 if (ret)
5838 goto err_rings;
5839
bc7d338f 5840 ret = i40e_vsi_alloc_arrays(vsi, true);
f650a38b 5841 if (ret)
9f65e15b 5842 goto err_rings;
493fb300 5843
41c445ff
JB
5844 /* Setup default MSIX irq handler for VSI */
5845 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
5846
5847 pf->vsi[vsi_idx] = vsi;
5848 ret = vsi_idx;
493fb300
AD
5849 goto unlock_pf;
5850
9f65e15b 5851err_rings:
493fb300
AD
5852 pf->next_vsi = i - 1;
5853 kfree(vsi);
5854unlock_pf:
41c445ff
JB
5855 mutex_unlock(&pf->switch_mutex);
5856 return ret;
5857}
5858
f650a38b
ASJ
5859/**
5860 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
5861 * @type: VSI pointer
bc7d338f 5862 * @free_qvectors: a bool to specify if q_vectors need to be freed.
f650a38b
ASJ
5863 *
5864 * On error: returns error code (negative)
5865 * On success: returns 0
5866 **/
bc7d338f 5867static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
f650a38b
ASJ
5868{
5869 /* free the ring and vector containers */
bc7d338f
ASJ
5870 if (free_qvectors) {
5871 kfree(vsi->q_vectors);
5872 vsi->q_vectors = NULL;
5873 }
f650a38b
ASJ
5874 kfree(vsi->tx_rings);
5875 vsi->tx_rings = NULL;
5876 vsi->rx_rings = NULL;
5877}
5878
41c445ff
JB
5879/**
5880 * i40e_vsi_clear - Deallocate the VSI provided
5881 * @vsi: the VSI being un-configured
5882 **/
5883static int i40e_vsi_clear(struct i40e_vsi *vsi)
5884{
5885 struct i40e_pf *pf;
5886
5887 if (!vsi)
5888 return 0;
5889
5890 if (!vsi->back)
5891 goto free_vsi;
5892 pf = vsi->back;
5893
5894 mutex_lock(&pf->switch_mutex);
5895 if (!pf->vsi[vsi->idx]) {
5896 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
5897 vsi->idx, vsi->idx, vsi, vsi->type);
5898 goto unlock_vsi;
5899 }
5900
5901 if (pf->vsi[vsi->idx] != vsi) {
5902 dev_err(&pf->pdev->dev,
5903 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
5904 pf->vsi[vsi->idx]->idx,
5905 pf->vsi[vsi->idx],
5906 pf->vsi[vsi->idx]->type,
5907 vsi->idx, vsi, vsi->type);
5908 goto unlock_vsi;
5909 }
5910
5911 /* updates the pf for this cleared vsi */
5912 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
5913 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
5914
bc7d338f 5915 i40e_vsi_free_arrays(vsi, true);
493fb300 5916
41c445ff
JB
5917 pf->vsi[vsi->idx] = NULL;
5918 if (vsi->idx < pf->next_vsi)
5919 pf->next_vsi = vsi->idx;
5920
5921unlock_vsi:
5922 mutex_unlock(&pf->switch_mutex);
5923free_vsi:
5924 kfree(vsi);
5925
5926 return 0;
5927}
5928
9f65e15b
AD
5929/**
5930 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
5931 * @vsi: the VSI being cleaned
5932 **/
be1d5eea 5933static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
9f65e15b
AD
5934{
5935 int i;
5936
8e9dca53 5937 if (vsi->tx_rings && vsi->tx_rings[0]) {
d7397644 5938 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
00403f04
MW
5939 kfree_rcu(vsi->tx_rings[i], rcu);
5940 vsi->tx_rings[i] = NULL;
5941 vsi->rx_rings[i] = NULL;
5942 }
be1d5eea 5943 }
9f65e15b
AD
5944}
5945
41c445ff
JB
5946/**
5947 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
5948 * @vsi: the VSI being configured
5949 **/
5950static int i40e_alloc_rings(struct i40e_vsi *vsi)
5951{
e7046ee1 5952 struct i40e_ring *tx_ring, *rx_ring;
41c445ff 5953 struct i40e_pf *pf = vsi->back;
41c445ff
JB
5954 int i;
5955
41c445ff 5956 /* Set basic values in the rings to be used later during open() */
d7397644 5957 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
ac6c5e3d 5958 /* allocate space for both Tx and Rx in one shot */
9f65e15b
AD
5959 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
5960 if (!tx_ring)
5961 goto err_out;
41c445ff
JB
5962
5963 tx_ring->queue_index = i;
5964 tx_ring->reg_idx = vsi->base_queue + i;
5965 tx_ring->ring_active = false;
5966 tx_ring->vsi = vsi;
5967 tx_ring->netdev = vsi->netdev;
5968 tx_ring->dev = &pf->pdev->dev;
5969 tx_ring->count = vsi->num_desc;
5970 tx_ring->size = 0;
5971 tx_ring->dcb_tc = 0;
9f65e15b 5972 vsi->tx_rings[i] = tx_ring;
41c445ff 5973
9f65e15b 5974 rx_ring = &tx_ring[1];
41c445ff
JB
5975 rx_ring->queue_index = i;
5976 rx_ring->reg_idx = vsi->base_queue + i;
5977 rx_ring->ring_active = false;
5978 rx_ring->vsi = vsi;
5979 rx_ring->netdev = vsi->netdev;
5980 rx_ring->dev = &pf->pdev->dev;
5981 rx_ring->count = vsi->num_desc;
5982 rx_ring->size = 0;
5983 rx_ring->dcb_tc = 0;
5984 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
5985 set_ring_16byte_desc_enabled(rx_ring);
5986 else
5987 clear_ring_16byte_desc_enabled(rx_ring);
9f65e15b 5988 vsi->rx_rings[i] = rx_ring;
41c445ff
JB
5989 }
5990
5991 return 0;
9f65e15b
AD
5992
5993err_out:
5994 i40e_vsi_clear_rings(vsi);
5995 return -ENOMEM;
41c445ff
JB
5996}
5997
5998/**
5999 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
6000 * @pf: board private structure
6001 * @vectors: the number of MSI-X vectors to request
6002 *
6003 * Returns the number of vectors reserved, or error
6004 **/
6005static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
6006{
7b37f376
AG
6007 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
6008 I40E_MIN_MSIX, vectors);
6009 if (vectors < 0) {
41c445ff 6010 dev_info(&pf->pdev->dev,
7b37f376 6011 "MSI-X vector reservation failed: %d\n", vectors);
41c445ff
JB
6012 vectors = 0;
6013 }
6014
7b37f376
AG
6015 pf->num_msix_entries = vectors;
6016
41c445ff
JB
6017 return vectors;
6018}
6019
6020/**
6021 * i40e_init_msix - Setup the MSIX capability
6022 * @pf: board private structure
6023 *
6024 * Work with the OS to set up the MSIX vectors needed.
6025 *
6026 * Returns 0 on success, negative on failure
6027 **/
6028static int i40e_init_msix(struct i40e_pf *pf)
6029{
6030 i40e_status err = 0;
6031 struct i40e_hw *hw = &pf->hw;
6032 int v_budget, i;
6033 int vec;
6034
6035 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6036 return -ENODEV;
6037
6038 /* The number of vectors we'll request will be comprised of:
6039 * - Add 1 for "other" cause for Admin Queue events, etc.
6040 * - The number of LAN queue pairs
f8ff1464
ASJ
6041 * - Queues being used for RSS.
6042 * We don't need as many as max_rss_size vectors.
6043 * use rss_size instead in the calculation since that
6044 * is governed by number of cpus in the system.
6045 * - assumes symmetric Tx/Rx pairing
41c445ff
JB
6046 * - The number of VMDq pairs
6047 * Once we count this up, try the request.
6048 *
6049 * If we can't get what we want, we'll simplify to nearly nothing
6050 * and try again. If that still fails, we punt.
6051 */
f8ff1464 6052 pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
41c445ff
JB
6053 pf->num_vmdq_msix = pf->num_vmdq_qps;
6054 v_budget = 1 + pf->num_lan_msix;
6055 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
60ea5f83 6056 if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
41c445ff
JB
6057 v_budget++;
6058
6059 /* Scale down if necessary, and the rings will share vectors */
6060 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
6061
6062 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
6063 GFP_KERNEL);
6064 if (!pf->msix_entries)
6065 return -ENOMEM;
6066
6067 for (i = 0; i < v_budget; i++)
6068 pf->msix_entries[i].entry = i;
6069 vec = i40e_reserve_msix_vectors(pf, v_budget);
6070 if (vec < I40E_MIN_MSIX) {
6071 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
6072 kfree(pf->msix_entries);
6073 pf->msix_entries = NULL;
6074 return -ENODEV;
6075
6076 } else if (vec == I40E_MIN_MSIX) {
6077 /* Adjust for minimal MSIX use */
77fa28be 6078 dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
41c445ff
JB
6079 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
6080 pf->num_vmdq_vsis = 0;
6081 pf->num_vmdq_qps = 0;
6082 pf->num_vmdq_msix = 0;
6083 pf->num_lan_qps = 1;
6084 pf->num_lan_msix = 1;
6085
6086 } else if (vec != v_budget) {
6087 /* Scale vector usage down */
6088 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
6089 vec--; /* reserve the misc vector */
6090
6091 /* partition out the remaining vectors */
6092 switch (vec) {
6093 case 2:
6094 pf->num_vmdq_vsis = 1;
6095 pf->num_lan_msix = 1;
6096 break;
6097 case 3:
6098 pf->num_vmdq_vsis = 1;
6099 pf->num_lan_msix = 2;
6100 break;
6101 default:
6102 pf->num_lan_msix = min_t(int, (vec / 2),
6103 pf->num_lan_qps);
6104 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
6105 I40E_DEFAULT_NUM_VMDQ_VSI);
6106 break;
6107 }
6108 }
6109
6110 return err;
6111}
6112
493fb300 6113/**
90e04070 6114 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
493fb300
AD
6115 * @vsi: the VSI being configured
6116 * @v_idx: index of the vector in the vsi struct
6117 *
6118 * We allocate one q_vector. If allocation fails we return -ENOMEM.
6119 **/
90e04070 6120static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
493fb300
AD
6121{
6122 struct i40e_q_vector *q_vector;
6123
6124 /* allocate q_vector */
6125 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
6126 if (!q_vector)
6127 return -ENOMEM;
6128
6129 q_vector->vsi = vsi;
6130 q_vector->v_idx = v_idx;
6131 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
6132 if (vsi->netdev)
6133 netif_napi_add(vsi->netdev, &q_vector->napi,
6134 i40e_napi_poll, vsi->work_limit);
6135
cd0b6fa6
AD
6136 q_vector->rx.latency_range = I40E_LOW_LATENCY;
6137 q_vector->tx.latency_range = I40E_LOW_LATENCY;
6138
493fb300
AD
6139 /* tie q_vector and vsi together */
6140 vsi->q_vectors[v_idx] = q_vector;
6141
6142 return 0;
6143}
6144
41c445ff 6145/**
90e04070 6146 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
41c445ff
JB
6147 * @vsi: the VSI being configured
6148 *
6149 * We allocate one q_vector per queue interrupt. If allocation fails we
6150 * return -ENOMEM.
6151 **/
90e04070 6152static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
41c445ff
JB
6153{
6154 struct i40e_pf *pf = vsi->back;
6155 int v_idx, num_q_vectors;
493fb300 6156 int err;
41c445ff
JB
6157
6158 /* if not MSIX, give the one vector only to the LAN VSI */
6159 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6160 num_q_vectors = vsi->num_q_vectors;
6161 else if (vsi == pf->vsi[pf->lan_vsi])
6162 num_q_vectors = 1;
6163 else
6164 return -EINVAL;
6165
41c445ff 6166 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
90e04070 6167 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
493fb300
AD
6168 if (err)
6169 goto err_out;
41c445ff
JB
6170 }
6171
6172 return 0;
493fb300
AD
6173
6174err_out:
6175 while (v_idx--)
6176 i40e_free_q_vector(vsi, v_idx);
6177
6178 return err;
41c445ff
JB
6179}
6180
6181/**
6182 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
6183 * @pf: board private structure to initialize
6184 **/
6185static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
6186{
6187 int err = 0;
6188
6189 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
6190 err = i40e_init_msix(pf);
6191 if (err) {
60ea5f83
JB
6192 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
6193 I40E_FLAG_RSS_ENABLED |
6194 I40E_FLAG_DCB_ENABLED |
6195 I40E_FLAG_SRIOV_ENABLED |
6196 I40E_FLAG_FD_SB_ENABLED |
6197 I40E_FLAG_FD_ATR_ENABLED |
6198 I40E_FLAG_VMDQ_ENABLED);
41c445ff
JB
6199
6200 /* rework the queue expectations without MSIX */
6201 i40e_determine_queue_usage(pf);
6202 }
6203 }
6204
6205 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
6206 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
77fa28be 6207 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
41c445ff
JB
6208 err = pci_enable_msi(pf->pdev);
6209 if (err) {
958a3e3b 6210 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
41c445ff
JB
6211 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
6212 }
6213 }
6214
958a3e3b 6215 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
77fa28be 6216 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
958a3e3b 6217
41c445ff
JB
6218 /* track first vector for misc interrupts */
6219 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
6220}
6221
6222/**
6223 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
6224 * @pf: board private structure
6225 *
6226 * This sets up the handler for MSIX 0, which is used to manage the
6227 * non-queue interrupts, e.g. AdminQ and errors. This is not used
6228 * when in MSI or Legacy interrupt mode.
6229 **/
6230static int i40e_setup_misc_vector(struct i40e_pf *pf)
6231{
6232 struct i40e_hw *hw = &pf->hw;
6233 int err = 0;
6234
6235 /* Only request the irq if this is the first time through, and
6236 * not when we're rebuilding after a Reset
6237 */
6238 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6239 err = request_irq(pf->msix_entries[0].vector,
6240 i40e_intr, 0, pf->misc_int_name, pf);
6241 if (err) {
6242 dev_info(&pf->pdev->dev,
77fa28be
CS
6243 "request_irq for %s failed: %d\n",
6244 pf->misc_int_name, err);
41c445ff
JB
6245 return -EFAULT;
6246 }
6247 }
6248
6249 i40e_enable_misc_int_causes(hw);
6250
6251 /* associate no queues to the misc vector */
6252 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
6253 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
6254
6255 i40e_flush(hw);
6256
6257 i40e_irq_dynamic_enable_icr0(pf);
6258
6259 return err;
6260}
6261
6262/**
6263 * i40e_config_rss - Prepare for RSS if used
6264 * @pf: board private structure
6265 **/
6266static int i40e_config_rss(struct i40e_pf *pf)
6267{
41c445ff
JB
6268 /* Set of random keys generated using kernel random number generator */
6269 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
6270 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
6271 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
6272 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
4617e8c0
ASJ
6273 struct i40e_hw *hw = &pf->hw;
6274 u32 lut = 0;
6275 int i, j;
6276 u64 hena;
41c445ff
JB
6277
6278 /* Fill out hash function seed */
6279 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6280 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
6281
6282 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
6283 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
6284 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
12dc4fe3 6285 hena |= I40E_DEFAULT_RSS_HENA;
41c445ff
JB
6286 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
6287 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
6288
6289 /* Populate the LUT with max no. of queues in round robin fashion */
6290 for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
6291
6292 /* The assumption is that lan qp count will be the highest
6293 * qp count for any PF VSI that needs RSS.
6294 * If multiple VSIs need RSS support, all the qp counts
6295 * for those VSIs should be a power of 2 for RSS to work.
6296 * If LAN VSI is the only consumer for RSS then this requirement
6297 * is not necessary.
6298 */
6299 if (j == pf->rss_size)
6300 j = 0;
6301 /* lut = 4-byte sliding window of 4 lut entries */
6302 lut = (lut << 8) | (j &
6303 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
6304 /* On i = 3, we have 4 entries in lut; write to the register */
6305 if ((i & 3) == 3)
6306 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
6307 }
6308 i40e_flush(hw);
6309
6310 return 0;
6311}
6312
f8ff1464
ASJ
6313/**
6314 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
6315 * @pf: board private structure
6316 * @queue_count: the requested queue count for rss.
6317 *
6318 * returns 0 if rss is not enabled, if enabled returns the final rss queue
6319 * count which may be different from the requested queue count.
6320 **/
6321int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
6322{
6323 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
6324 return 0;
6325
6326 queue_count = min_t(int, queue_count, pf->rss_size_max);
6327 queue_count = rounddown_pow_of_two(queue_count);
6328
6329 if (queue_count != pf->rss_size) {
f8ff1464
ASJ
6330 i40e_prep_for_reset(pf);
6331
f8ff1464
ASJ
6332 pf->rss_size = queue_count;
6333
6334 i40e_reset_and_rebuild(pf, true);
6335 i40e_config_rss(pf);
6336 }
6337 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
6338 return pf->rss_size;
6339}
6340
41c445ff
JB
6341/**
6342 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
6343 * @pf: board private structure to initialize
6344 *
6345 * i40e_sw_init initializes the Adapter private data structure.
6346 * Fields are initialized based on PCI device information and
6347 * OS network device settings (MTU size).
6348 **/
6349static int i40e_sw_init(struct i40e_pf *pf)
6350{
6351 int err = 0;
6352 int size;
6353
6354 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
6355 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
2759997b 6356 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
41c445ff
JB
6357 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
6358 if (I40E_DEBUG_USER & debug)
6359 pf->hw.debug_mask = debug;
6360 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
6361 I40E_DEFAULT_MSG_ENABLE);
6362 }
6363
6364 /* Set default capability flags */
6365 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
6366 I40E_FLAG_MSI_ENABLED |
6367 I40E_FLAG_MSIX_ENABLED |
41c445ff
JB
6368 I40E_FLAG_RX_1BUF_ENABLED;
6369
ca99eb99
MW
6370 /* Set default ITR */
6371 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
6372 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
6373
7134f9ce
JB
6374 /* Depending on PF configurations, it is possible that the RSS
6375 * maximum might end up larger than the available queues
6376 */
41c445ff 6377 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
7134f9ce
JB
6378 pf->rss_size_max = min_t(int, pf->rss_size_max,
6379 pf->hw.func_caps.num_tx_qp);
41c445ff
JB
6380 if (pf->hw.func_caps.rss) {
6381 pf->flags |= I40E_FLAG_RSS_ENABLED;
bf051a3b 6382 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
cbf61325 6383 pf->rss_size = rounddown_pow_of_two(pf->rss_size);
41c445ff
JB
6384 } else {
6385 pf->rss_size = 1;
6386 }
6387
2050bc65
CS
6388 /* MFP mode enabled */
6389 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
6390 pf->flags |= I40E_FLAG_MFP_ENABLED;
6391 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
6392 }
6393
cbf61325
ASJ
6394 /* FW/NVM is not yet fixed in this regard */
6395 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
6396 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
6397 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6398 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
cbf61325 6399 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
60ea5f83 6400 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
cbf61325
ASJ
6401 } else {
6402 dev_info(&pf->pdev->dev,
0b67584f 6403 "Flow Director Sideband mode Disabled in MFP mode\n");
41c445ff 6404 }
cbf61325
ASJ
6405 pf->fdir_pf_filter_count =
6406 pf->hw.func_caps.fd_filters_guaranteed;
6407 pf->hw.fdir_shared_filter_count =
6408 pf->hw.func_caps.fd_filters_best_effort;
41c445ff
JB
6409 }
6410
6411 if (pf->hw.func_caps.vmdq) {
6412 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
6413 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
6414 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
6415 }
6416
41c445ff
JB
6417#ifdef CONFIG_PCI_IOV
6418 if (pf->hw.func_caps.num_vfs) {
6419 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
6420 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
6421 pf->num_req_vfs = min_t(int,
6422 pf->hw.func_caps.num_vfs,
6423 I40E_MAX_VF_COUNT);
6424 }
6425#endif /* CONFIG_PCI_IOV */
6426 pf->eeprom_version = 0xDEAD;
6427 pf->lan_veb = I40E_NO_VEB;
6428 pf->lan_vsi = I40E_NO_VSI;
6429
6430 /* set up queue assignment tracking */
6431 size = sizeof(struct i40e_lump_tracking)
6432 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
6433 pf->qp_pile = kzalloc(size, GFP_KERNEL);
6434 if (!pf->qp_pile) {
6435 err = -ENOMEM;
6436 goto sw_init_done;
6437 }
6438 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
6439 pf->qp_pile->search_hint = 0;
6440
6441 /* set up vector assignment tracking */
6442 size = sizeof(struct i40e_lump_tracking)
6443 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
6444 pf->irq_pile = kzalloc(size, GFP_KERNEL);
6445 if (!pf->irq_pile) {
6446 kfree(pf->qp_pile);
6447 err = -ENOMEM;
6448 goto sw_init_done;
6449 }
6450 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
6451 pf->irq_pile->search_hint = 0;
6452
6453 mutex_init(&pf->switch_mutex);
6454
6455sw_init_done:
6456 return err;
6457}
6458
7c3c288b
ASJ
6459/**
6460 * i40e_set_ntuple - set the ntuple feature flag and take action
6461 * @pf: board private structure to initialize
6462 * @features: the feature set that the stack is suggesting
6463 *
6464 * returns a bool to indicate if reset needs to happen
6465 **/
6466bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
6467{
6468 bool need_reset = false;
6469
6470 /* Check if Flow Director n-tuple support was enabled or disabled. If
6471 * the state changed, we need to reset.
6472 */
6473 if (features & NETIF_F_NTUPLE) {
6474 /* Enable filters and mark for reset */
6475 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6476 need_reset = true;
6477 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
6478 } else {
6479 /* turn off filters, mark for reset and clear SW filter list */
6480 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
6481 need_reset = true;
6482 i40e_fdir_filter_exit(pf);
6483 }
6484 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6485 /* if ATR was disabled it can be re-enabled. */
6486 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
6487 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6488 }
6489 return need_reset;
6490}
6491
41c445ff
JB
6492/**
6493 * i40e_set_features - set the netdev feature flags
6494 * @netdev: ptr to the netdev being adjusted
6495 * @features: the feature set that the stack is suggesting
6496 **/
6497static int i40e_set_features(struct net_device *netdev,
6498 netdev_features_t features)
6499{
6500 struct i40e_netdev_priv *np = netdev_priv(netdev);
6501 struct i40e_vsi *vsi = np->vsi;
7c3c288b
ASJ
6502 struct i40e_pf *pf = vsi->back;
6503 bool need_reset;
41c445ff
JB
6504
6505 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6506 i40e_vlan_stripping_enable(vsi);
6507 else
6508 i40e_vlan_stripping_disable(vsi);
6509
7c3c288b
ASJ
6510 need_reset = i40e_set_ntuple(pf, features);
6511
6512 if (need_reset)
6513 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
6514
41c445ff
JB
6515 return 0;
6516}
6517
a1c9a9d9
JK
6518#ifdef CONFIG_I40E_VXLAN
6519/**
6520 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
6521 * @pf: board private structure
6522 * @port: The UDP port to look up
6523 *
6524 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
6525 **/
6526static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
6527{
6528 u8 i;
6529
6530 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6531 if (pf->vxlan_ports[i] == port)
6532 return i;
6533 }
6534
6535 return i;
6536}
6537
6538/**
6539 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
6540 * @netdev: This physical port's netdev
6541 * @sa_family: Socket Family that VXLAN is notifying us about
6542 * @port: New UDP port number that VXLAN started listening to
6543 **/
6544static void i40e_add_vxlan_port(struct net_device *netdev,
6545 sa_family_t sa_family, __be16 port)
6546{
6547 struct i40e_netdev_priv *np = netdev_priv(netdev);
6548 struct i40e_vsi *vsi = np->vsi;
6549 struct i40e_pf *pf = vsi->back;
6550 u8 next_idx;
6551 u8 idx;
6552
6553 if (sa_family == AF_INET6)
6554 return;
6555
6556 idx = i40e_get_vxlan_port_idx(pf, port);
6557
6558 /* Check if port already exists */
6559 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
6560 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
6561 return;
6562 }
6563
6564 /* Now check if there is space to add the new port */
6565 next_idx = i40e_get_vxlan_port_idx(pf, 0);
6566
6567 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
6568 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
6569 ntohs(port));
6570 return;
6571 }
6572
6573 /* New port: add it and mark its index in the bitmap */
6574 pf->vxlan_ports[next_idx] = port;
6575 pf->pending_vxlan_bitmap |= (1 << next_idx);
6576
6577 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
6578}
6579
6580/**
6581 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
6582 * @netdev: This physical port's netdev
6583 * @sa_family: Socket Family that VXLAN is notifying us about
6584 * @port: UDP port number that VXLAN stopped listening to
6585 **/
6586static void i40e_del_vxlan_port(struct net_device *netdev,
6587 sa_family_t sa_family, __be16 port)
6588{
6589 struct i40e_netdev_priv *np = netdev_priv(netdev);
6590 struct i40e_vsi *vsi = np->vsi;
6591 struct i40e_pf *pf = vsi->back;
6592 u8 idx;
6593
6594 if (sa_family == AF_INET6)
6595 return;
6596
6597 idx = i40e_get_vxlan_port_idx(pf, port);
6598
6599 /* Check if port already exists */
6600 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
6601 /* if port exists, set it to 0 (mark for deletion)
6602 * and make it pending
6603 */
6604 pf->vxlan_ports[idx] = 0;
6605
6606 pf->pending_vxlan_bitmap |= (1 << idx);
6607
6608 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
6609 } else {
6610 netdev_warn(netdev, "Port %d was not found, not deleting\n",
6611 ntohs(port));
6612 }
6613}
6614
6615#endif
4ba0dea5
GR
6616#ifdef HAVE_FDB_OPS
6617#ifdef USE_CONST_DEV_UC_CHAR
6618static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
6619 struct net_device *dev,
6620 const unsigned char *addr,
6621 u16 flags)
6622#else
6623static int i40e_ndo_fdb_add(struct ndmsg *ndm,
6624 struct net_device *dev,
6625 unsigned char *addr,
6626 u16 flags)
6627#endif
6628{
6629 struct i40e_netdev_priv *np = netdev_priv(dev);
6630 struct i40e_pf *pf = np->vsi->back;
6631 int err = 0;
6632
6633 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
6634 return -EOPNOTSUPP;
6635
6636 /* Hardware does not support aging addresses so if a
6637 * ndm_state is given only allow permanent addresses
6638 */
6639 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6640 netdev_info(dev, "FDB only supports static addresses\n");
6641 return -EINVAL;
6642 }
6643
6644 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6645 err = dev_uc_add_excl(dev, addr);
6646 else if (is_multicast_ether_addr(addr))
6647 err = dev_mc_add_excl(dev, addr);
6648 else
6649 err = -EINVAL;
6650
6651 /* Only return duplicate errors if NLM_F_EXCL is set */
6652 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6653 err = 0;
6654
6655 return err;
6656}
6657
6658#ifndef USE_DEFAULT_FDB_DEL_DUMP
6659#ifdef USE_CONST_DEV_UC_CHAR
6660static int i40e_ndo_fdb_del(struct ndmsg *ndm,
6661 struct net_device *dev,
6662 const unsigned char *addr)
6663#else
6664static int i40e_ndo_fdb_del(struct ndmsg *ndm,
6665 struct net_device *dev,
6666 unsigned char *addr)
6667#endif
6668{
6669 struct i40e_netdev_priv *np = netdev_priv(dev);
6670 struct i40e_pf *pf = np->vsi->back;
6671 int err = -EOPNOTSUPP;
6672
6673 if (ndm->ndm_state & NUD_PERMANENT) {
6674 netdev_info(dev, "FDB only supports static addresses\n");
6675 return -EINVAL;
6676 }
6677
6678 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
6679 if (is_unicast_ether_addr(addr))
6680 err = dev_uc_del(dev, addr);
6681 else if (is_multicast_ether_addr(addr))
6682 err = dev_mc_del(dev, addr);
6683 else
6684 err = -EINVAL;
6685 }
6686
6687 return err;
6688}
6689
6690static int i40e_ndo_fdb_dump(struct sk_buff *skb,
6691 struct netlink_callback *cb,
6692 struct net_device *dev,
6693 int idx)
6694{
6695 struct i40e_netdev_priv *np = netdev_priv(dev);
6696 struct i40e_pf *pf = np->vsi->back;
6697
6698 if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
6699 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
6700
6701 return idx;
6702}
6703
6704#endif /* USE_DEFAULT_FDB_DEL_DUMP */
6705#endif /* HAVE_FDB_OPS */
41c445ff
JB
6706static const struct net_device_ops i40e_netdev_ops = {
6707 .ndo_open = i40e_open,
6708 .ndo_stop = i40e_close,
6709 .ndo_start_xmit = i40e_lan_xmit_frame,
6710 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
6711 .ndo_set_rx_mode = i40e_set_rx_mode,
6712 .ndo_validate_addr = eth_validate_addr,
6713 .ndo_set_mac_address = i40e_set_mac,
6714 .ndo_change_mtu = i40e_change_mtu,
beb0dff1 6715 .ndo_do_ioctl = i40e_ioctl,
41c445ff
JB
6716 .ndo_tx_timeout = i40e_tx_timeout,
6717 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
6718 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
6719#ifdef CONFIG_NET_POLL_CONTROLLER
6720 .ndo_poll_controller = i40e_netpoll,
6721#endif
6722 .ndo_setup_tc = i40e_setup_tc,
6723 .ndo_set_features = i40e_set_features,
6724 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
6725 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
ed616689 6726 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
41c445ff 6727 .ndo_get_vf_config = i40e_ndo_get_vf_config,
588aefa0 6728 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
a1c9a9d9
JK
6729#ifdef CONFIG_I40E_VXLAN
6730 .ndo_add_vxlan_port = i40e_add_vxlan_port,
6731 .ndo_del_vxlan_port = i40e_del_vxlan_port,
6732#endif
4ba0dea5
GR
6733#ifdef HAVE_FDB_OPS
6734 .ndo_fdb_add = i40e_ndo_fdb_add,
6735#ifndef USE_DEFAULT_FDB_DEL_DUMP
6736 .ndo_fdb_del = i40e_ndo_fdb_del,
6737 .ndo_fdb_dump = i40e_ndo_fdb_dump,
6738#endif
6739#endif
41c445ff
JB
6740};
6741
6742/**
6743 * i40e_config_netdev - Setup the netdev flags
6744 * @vsi: the VSI being configured
6745 *
6746 * Returns 0 on success, negative value on failure
6747 **/
6748static int i40e_config_netdev(struct i40e_vsi *vsi)
6749{
1a10370a 6750 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
41c445ff
JB
6751 struct i40e_pf *pf = vsi->back;
6752 struct i40e_hw *hw = &pf->hw;
6753 struct i40e_netdev_priv *np;
6754 struct net_device *netdev;
6755 u8 mac_addr[ETH_ALEN];
6756 int etherdev_size;
6757
6758 etherdev_size = sizeof(struct i40e_netdev_priv);
f8ff1464 6759 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
41c445ff
JB
6760 if (!netdev)
6761 return -ENOMEM;
6762
6763 vsi->netdev = netdev;
6764 np = netdev_priv(netdev);
6765 np->vsi = vsi;
6766
d70e941b 6767 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
41c445ff 6768 NETIF_F_GSO_UDP_TUNNEL |
d70e941b 6769 NETIF_F_TSO;
41c445ff
JB
6770
6771 netdev->features = NETIF_F_SG |
6772 NETIF_F_IP_CSUM |
6773 NETIF_F_SCTP_CSUM |
6774 NETIF_F_HIGHDMA |
6775 NETIF_F_GSO_UDP_TUNNEL |
6776 NETIF_F_HW_VLAN_CTAG_TX |
6777 NETIF_F_HW_VLAN_CTAG_RX |
6778 NETIF_F_HW_VLAN_CTAG_FILTER |
6779 NETIF_F_IPV6_CSUM |
6780 NETIF_F_TSO |
059dab69 6781 NETIF_F_TSO_ECN |
41c445ff
JB
6782 NETIF_F_TSO6 |
6783 NETIF_F_RXCSUM |
6784 NETIF_F_RXHASH |
6785 0;
6786
2e86a0b6
ASJ
6787 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
6788 netdev->features |= NETIF_F_NTUPLE;
6789
41c445ff
JB
6790 /* copy netdev features into list of user selectable features */
6791 netdev->hw_features |= netdev->features;
6792
6793 if (vsi->type == I40E_VSI_MAIN) {
6794 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
6795 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
6796 } else {
6797 /* relate the VSI_VMDQ name to the VSI_MAIN name */
6798 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
6799 pf->vsi[pf->lan_vsi]->netdev->name);
6800 random_ether_addr(mac_addr);
6801 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
6802 }
1a10370a 6803 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
41c445ff
JB
6804
6805 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
6806 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
6807 /* vlan gets same features (except vlan offload)
6808 * after any tweaks for specific VSI types
6809 */
6810 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
6811 NETIF_F_HW_VLAN_CTAG_RX |
6812 NETIF_F_HW_VLAN_CTAG_FILTER);
6813 netdev->priv_flags |= IFF_UNICAST_FLT;
6814 netdev->priv_flags |= IFF_SUPP_NOFCS;
6815 /* Setup netdev TC information */
6816 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
6817
6818 netdev->netdev_ops = &i40e_netdev_ops;
6819 netdev->watchdog_timeo = 5 * HZ;
6820 i40e_set_ethtool_ops(netdev);
6821
6822 return 0;
6823}
6824
6825/**
6826 * i40e_vsi_delete - Delete a VSI from the switch
6827 * @vsi: the VSI being removed
6828 *
6829 * Returns 0 on success, negative value on failure
6830 **/
6831static void i40e_vsi_delete(struct i40e_vsi *vsi)
6832{
6833 /* remove default VSI is not allowed */
6834 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
6835 return;
6836
41c445ff 6837 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
41c445ff
JB
6838}
6839
6840/**
6841 * i40e_add_vsi - Add a VSI to the switch
6842 * @vsi: the VSI being configured
6843 *
6844 * This initializes a VSI context depending on the VSI type to be added and
6845 * passes it down to the add_vsi aq command.
6846 **/
6847static int i40e_add_vsi(struct i40e_vsi *vsi)
6848{
6849 int ret = -ENODEV;
6850 struct i40e_mac_filter *f, *ftmp;
6851 struct i40e_pf *pf = vsi->back;
6852 struct i40e_hw *hw = &pf->hw;
6853 struct i40e_vsi_context ctxt;
6854 u8 enabled_tc = 0x1; /* TC0 enabled */
6855 int f_count = 0;
6856
6857 memset(&ctxt, 0, sizeof(ctxt));
6858 switch (vsi->type) {
6859 case I40E_VSI_MAIN:
6860 /* The PF's main VSI is already setup as part of the
6861 * device initialization, so we'll not bother with
6862 * the add_vsi call, but we will retrieve the current
6863 * VSI context.
6864 */
6865 ctxt.seid = pf->main_vsi_seid;
6866 ctxt.pf_num = pf->hw.pf_id;
6867 ctxt.vf_num = 0;
6868 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6869 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6870 if (ret) {
6871 dev_info(&pf->pdev->dev,
6872 "couldn't get pf vsi config, err %d, aq_err %d\n",
6873 ret, pf->hw.aq.asq_last_status);
6874 return -ENOENT;
6875 }
6876 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
6877 vsi->info.valid_sections = 0;
6878
6879 vsi->seid = ctxt.seid;
6880 vsi->id = ctxt.vsi_number;
6881
6882 enabled_tc = i40e_pf_get_tc_map(pf);
6883
6884 /* MFP mode setup queue map and update VSI */
6885 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
6886 memset(&ctxt, 0, sizeof(ctxt));
6887 ctxt.seid = pf->main_vsi_seid;
6888 ctxt.pf_num = pf->hw.pf_id;
6889 ctxt.vf_num = 0;
6890 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
6891 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6892 if (ret) {
6893 dev_info(&pf->pdev->dev,
6894 "update vsi failed, aq_err=%d\n",
6895 pf->hw.aq.asq_last_status);
6896 ret = -ENOENT;
6897 goto err;
6898 }
6899 /* update the local VSI info queue map */
6900 i40e_vsi_update_queue_map(vsi, &ctxt);
6901 vsi->info.valid_sections = 0;
6902 } else {
6903 /* Default/Main VSI is only enabled for TC0
6904 * reconfigure it to enable all TCs that are
6905 * available on the port in SFP mode.
6906 */
6907 ret = i40e_vsi_config_tc(vsi, enabled_tc);
6908 if (ret) {
6909 dev_info(&pf->pdev->dev,
6910 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
6911 enabled_tc, ret,
6912 pf->hw.aq.asq_last_status);
6913 ret = -ENOENT;
6914 }
6915 }
6916 break;
6917
6918 case I40E_VSI_FDIR:
cbf61325
ASJ
6919 ctxt.pf_num = hw->pf_id;
6920 ctxt.vf_num = 0;
6921 ctxt.uplink_seid = vsi->uplink_seid;
6922 ctxt.connection_type = 0x1; /* regular data port */
6923 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
41c445ff 6924 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
41c445ff
JB
6925 break;
6926
6927 case I40E_VSI_VMDQ2:
6928 ctxt.pf_num = hw->pf_id;
6929 ctxt.vf_num = 0;
6930 ctxt.uplink_seid = vsi->uplink_seid;
6931 ctxt.connection_type = 0x1; /* regular data port */
6932 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6933
6934 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6935
6936 /* This VSI is connected to VEB so the switch_id
6937 * should be set to zero by default.
6938 */
6939 ctxt.info.switch_id = 0;
6940 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
6941 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6942
6943 /* Setup the VSI tx/rx queue map for TC0 only for now */
6944 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6945 break;
6946
6947 case I40E_VSI_SRIOV:
6948 ctxt.pf_num = hw->pf_id;
6949 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
6950 ctxt.uplink_seid = vsi->uplink_seid;
6951 ctxt.connection_type = 0x1; /* regular data port */
6952 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
6953
6954 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6955
6956 /* This VSI is connected to VEB so the switch_id
6957 * should be set to zero by default.
6958 */
6959 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6960
6961 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
6962 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
6963 /* Setup the VSI tx/rx queue map for TC0 only for now */
6964 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6965 break;
6966
6967 default:
6968 return -ENODEV;
6969 }
6970
6971 if (vsi->type != I40E_VSI_MAIN) {
6972 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6973 if (ret) {
6974 dev_info(&vsi->back->pdev->dev,
6975 "add vsi failed, aq_err=%d\n",
6976 vsi->back->hw.aq.asq_last_status);
6977 ret = -ENOENT;
6978 goto err;
6979 }
6980 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
6981 vsi->info.valid_sections = 0;
6982 vsi->seid = ctxt.seid;
6983 vsi->id = ctxt.vsi_number;
6984 }
6985
6986 /* If macvlan filters already exist, force them to get loaded */
6987 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
6988 f->changed = true;
6989 f_count++;
6990 }
6991 if (f_count) {
6992 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
6993 pf->flags |= I40E_FLAG_FILTER_SYNC;
6994 }
6995
6996 /* Update VSI BW information */
6997 ret = i40e_vsi_get_bw_info(vsi);
6998 if (ret) {
6999 dev_info(&pf->pdev->dev,
7000 "couldn't get vsi bw info, err %d, aq_err %d\n",
7001 ret, pf->hw.aq.asq_last_status);
7002 /* VSI is already added so not tearing that up */
7003 ret = 0;
7004 }
7005
7006err:
7007 return ret;
7008}
7009
7010/**
7011 * i40e_vsi_release - Delete a VSI and free its resources
7012 * @vsi: the VSI being removed
7013 *
7014 * Returns 0 on success or < 0 on error
7015 **/
7016int i40e_vsi_release(struct i40e_vsi *vsi)
7017{
7018 struct i40e_mac_filter *f, *ftmp;
7019 struct i40e_veb *veb = NULL;
7020 struct i40e_pf *pf;
7021 u16 uplink_seid;
7022 int i, n;
7023
7024 pf = vsi->back;
7025
7026 /* release of a VEB-owner or last VSI is not allowed */
7027 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
7028 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
7029 vsi->seid, vsi->uplink_seid);
7030 return -ENODEV;
7031 }
7032 if (vsi == pf->vsi[pf->lan_vsi] &&
7033 !test_bit(__I40E_DOWN, &pf->state)) {
7034 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
7035 return -ENODEV;
7036 }
7037
7038 uplink_seid = vsi->uplink_seid;
7039 if (vsi->type != I40E_VSI_SRIOV) {
7040 if (vsi->netdev_registered) {
7041 vsi->netdev_registered = false;
7042 if (vsi->netdev) {
7043 /* results in a call to i40e_close() */
7044 unregister_netdev(vsi->netdev);
41c445ff
JB
7045 }
7046 } else {
90ef8d47 7047 i40e_vsi_close(vsi);
41c445ff
JB
7048 }
7049 i40e_vsi_disable_irq(vsi);
7050 }
7051
7052 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
7053 i40e_del_filter(vsi, f->macaddr, f->vlan,
7054 f->is_vf, f->is_netdev);
7055 i40e_sync_vsi_filters(vsi);
7056
7057 i40e_vsi_delete(vsi);
7058 i40e_vsi_free_q_vectors(vsi);
a4866597
SN
7059 if (vsi->netdev) {
7060 free_netdev(vsi->netdev);
7061 vsi->netdev = NULL;
7062 }
41c445ff
JB
7063 i40e_vsi_clear_rings(vsi);
7064 i40e_vsi_clear(vsi);
7065
7066 /* If this was the last thing on the VEB, except for the
7067 * controlling VSI, remove the VEB, which puts the controlling
7068 * VSI onto the next level down in the switch.
7069 *
7070 * Well, okay, there's one more exception here: don't remove
7071 * the orphan VEBs yet. We'll wait for an explicit remove request
7072 * from up the network stack.
7073 */
7074 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7075 if (pf->vsi[i] &&
7076 pf->vsi[i]->uplink_seid == uplink_seid &&
7077 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
7078 n++; /* count the VSIs */
7079 }
7080 }
7081 for (i = 0; i < I40E_MAX_VEB; i++) {
7082 if (!pf->veb[i])
7083 continue;
7084 if (pf->veb[i]->uplink_seid == uplink_seid)
7085 n++; /* count the VEBs */
7086 if (pf->veb[i]->seid == uplink_seid)
7087 veb = pf->veb[i];
7088 }
7089 if (n == 0 && veb && veb->uplink_seid != 0)
7090 i40e_veb_release(veb);
7091
7092 return 0;
7093}
7094
7095/**
7096 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
7097 * @vsi: ptr to the VSI
7098 *
7099 * This should only be called after i40e_vsi_mem_alloc() which allocates the
7100 * corresponding SW VSI structure and initializes num_queue_pairs for the
7101 * newly allocated VSI.
7102 *
7103 * Returns 0 on success or negative on failure
7104 **/
7105static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
7106{
7107 int ret = -ENOENT;
7108 struct i40e_pf *pf = vsi->back;
7109
493fb300 7110 if (vsi->q_vectors[0]) {
41c445ff
JB
7111 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
7112 vsi->seid);
7113 return -EEXIST;
7114 }
7115
7116 if (vsi->base_vector) {
f29eaa3d 7117 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
41c445ff
JB
7118 vsi->seid, vsi->base_vector);
7119 return -EEXIST;
7120 }
7121
90e04070 7122 ret = i40e_vsi_alloc_q_vectors(vsi);
41c445ff
JB
7123 if (ret) {
7124 dev_info(&pf->pdev->dev,
7125 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
7126 vsi->num_q_vectors, vsi->seid, ret);
7127 vsi->num_q_vectors = 0;
7128 goto vector_setup_out;
7129 }
7130
958a3e3b
SN
7131 if (vsi->num_q_vectors)
7132 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
7133 vsi->num_q_vectors, vsi->idx);
41c445ff
JB
7134 if (vsi->base_vector < 0) {
7135 dev_info(&pf->pdev->dev,
f29eaa3d 7136 "failed to get queue tracking for VSI %d, err=%d\n",
41c445ff
JB
7137 vsi->seid, vsi->base_vector);
7138 i40e_vsi_free_q_vectors(vsi);
7139 ret = -ENOENT;
7140 goto vector_setup_out;
7141 }
7142
7143vector_setup_out:
7144 return ret;
7145}
7146
bc7d338f
ASJ
7147/**
7148 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
7149 * @vsi: pointer to the vsi.
7150 *
7151 * This re-allocates a vsi's queue resources.
7152 *
7153 * Returns pointer to the successfully allocated and configured VSI sw struct
7154 * on success, otherwise returns NULL on failure.
7155 **/
7156static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
7157{
7158 struct i40e_pf *pf = vsi->back;
7159 u8 enabled_tc;
7160 int ret;
7161
7162 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7163 i40e_vsi_clear_rings(vsi);
7164
7165 i40e_vsi_free_arrays(vsi, false);
7166 i40e_set_num_rings_in_vsi(vsi);
7167 ret = i40e_vsi_alloc_arrays(vsi, false);
7168 if (ret)
7169 goto err_vsi;
7170
7171 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
7172 if (ret < 0) {
7173 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
7174 vsi->seid, ret);
7175 goto err_vsi;
7176 }
7177 vsi->base_queue = ret;
7178
7179 /* Update the FW view of the VSI. Force a reset of TC and queue
7180 * layout configurations.
7181 */
7182 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
7183 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
7184 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
7185 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
7186
7187 /* assign it some queues */
7188 ret = i40e_alloc_rings(vsi);
7189 if (ret)
7190 goto err_rings;
7191
7192 /* map all of the rings to the q_vectors */
7193 i40e_vsi_map_rings_to_vectors(vsi);
7194 return vsi;
7195
7196err_rings:
7197 i40e_vsi_free_q_vectors(vsi);
7198 if (vsi->netdev_registered) {
7199 vsi->netdev_registered = false;
7200 unregister_netdev(vsi->netdev);
7201 free_netdev(vsi->netdev);
7202 vsi->netdev = NULL;
7203 }
7204 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
7205err_vsi:
7206 i40e_vsi_clear(vsi);
7207 return NULL;
7208}
7209
41c445ff
JB
7210/**
7211 * i40e_vsi_setup - Set up a VSI by a given type
7212 * @pf: board private structure
7213 * @type: VSI type
7214 * @uplink_seid: the switch element to link to
7215 * @param1: usage depends upon VSI type. For VF types, indicates VF id
7216 *
7217 * This allocates the sw VSI structure and its queue resources, then add a VSI
7218 * to the identified VEB.
7219 *
7220 * Returns pointer to the successfully allocated and configure VSI sw struct on
7221 * success, otherwise returns NULL on failure.
7222 **/
7223struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
7224 u16 uplink_seid, u32 param1)
7225{
7226 struct i40e_vsi *vsi = NULL;
7227 struct i40e_veb *veb = NULL;
7228 int ret, i;
7229 int v_idx;
7230
7231 /* The requested uplink_seid must be either
7232 * - the PF's port seid
7233 * no VEB is needed because this is the PF
7234 * or this is a Flow Director special case VSI
7235 * - seid of an existing VEB
7236 * - seid of a VSI that owns an existing VEB
7237 * - seid of a VSI that doesn't own a VEB
7238 * a new VEB is created and the VSI becomes the owner
7239 * - seid of the PF VSI, which is what creates the first VEB
7240 * this is a special case of the previous
7241 *
7242 * Find which uplink_seid we were given and create a new VEB if needed
7243 */
7244 for (i = 0; i < I40E_MAX_VEB; i++) {
7245 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
7246 veb = pf->veb[i];
7247 break;
7248 }
7249 }
7250
7251 if (!veb && uplink_seid != pf->mac_seid) {
7252
7253 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7254 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
7255 vsi = pf->vsi[i];
7256 break;
7257 }
7258 }
7259 if (!vsi) {
7260 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
7261 uplink_seid);
7262 return NULL;
7263 }
7264
7265 if (vsi->uplink_seid == pf->mac_seid)
7266 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
7267 vsi->tc_config.enabled_tc);
7268 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
7269 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
7270 vsi->tc_config.enabled_tc);
7271
7272 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
7273 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
7274 veb = pf->veb[i];
7275 }
7276 if (!veb) {
7277 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
7278 return NULL;
7279 }
7280
7281 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
7282 uplink_seid = veb->seid;
7283 }
7284
7285 /* get vsi sw struct */
7286 v_idx = i40e_vsi_mem_alloc(pf, type);
7287 if (v_idx < 0)
7288 goto err_alloc;
7289 vsi = pf->vsi[v_idx];
cbf61325
ASJ
7290 if (!vsi)
7291 goto err_alloc;
41c445ff
JB
7292 vsi->type = type;
7293 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
7294
7295 if (type == I40E_VSI_MAIN)
7296 pf->lan_vsi = v_idx;
7297 else if (type == I40E_VSI_SRIOV)
7298 vsi->vf_id = param1;
7299 /* assign it some queues */
cbf61325
ASJ
7300 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
7301 vsi->idx);
41c445ff
JB
7302 if (ret < 0) {
7303 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
7304 vsi->seid, ret);
7305 goto err_vsi;
7306 }
7307 vsi->base_queue = ret;
7308
7309 /* get a VSI from the hardware */
7310 vsi->uplink_seid = uplink_seid;
7311 ret = i40e_add_vsi(vsi);
7312 if (ret)
7313 goto err_vsi;
7314
7315 switch (vsi->type) {
7316 /* setup the netdev if needed */
7317 case I40E_VSI_MAIN:
7318 case I40E_VSI_VMDQ2:
7319 ret = i40e_config_netdev(vsi);
7320 if (ret)
7321 goto err_netdev;
7322 ret = register_netdev(vsi->netdev);
7323 if (ret)
7324 goto err_netdev;
7325 vsi->netdev_registered = true;
7326 netif_carrier_off(vsi->netdev);
4e3b35b0
NP
7327#ifdef CONFIG_I40E_DCB
7328 /* Setup DCB netlink interface */
7329 i40e_dcbnl_setup(vsi);
7330#endif /* CONFIG_I40E_DCB */
41c445ff
JB
7331 /* fall through */
7332
7333 case I40E_VSI_FDIR:
7334 /* set up vectors and rings if needed */
7335 ret = i40e_vsi_setup_vectors(vsi);
7336 if (ret)
7337 goto err_msix;
7338
7339 ret = i40e_alloc_rings(vsi);
7340 if (ret)
7341 goto err_rings;
7342
7343 /* map all of the rings to the q_vectors */
7344 i40e_vsi_map_rings_to_vectors(vsi);
7345
7346 i40e_vsi_reset_stats(vsi);
7347 break;
7348
7349 default:
7350 /* no netdev or rings for the other VSI types */
7351 break;
7352 }
7353
7354 return vsi;
7355
7356err_rings:
7357 i40e_vsi_free_q_vectors(vsi);
7358err_msix:
7359 if (vsi->netdev_registered) {
7360 vsi->netdev_registered = false;
7361 unregister_netdev(vsi->netdev);
7362 free_netdev(vsi->netdev);
7363 vsi->netdev = NULL;
7364 }
7365err_netdev:
7366 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
7367err_vsi:
7368 i40e_vsi_clear(vsi);
7369err_alloc:
7370 return NULL;
7371}
7372
7373/**
7374 * i40e_veb_get_bw_info - Query VEB BW information
7375 * @veb: the veb to query
7376 *
7377 * Query the Tx scheduler BW configuration data for given VEB
7378 **/
7379static int i40e_veb_get_bw_info(struct i40e_veb *veb)
7380{
7381 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
7382 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
7383 struct i40e_pf *pf = veb->pf;
7384 struct i40e_hw *hw = &pf->hw;
7385 u32 tc_bw_max;
7386 int ret = 0;
7387 int i;
7388
7389 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
7390 &bw_data, NULL);
7391 if (ret) {
7392 dev_info(&pf->pdev->dev,
7393 "query veb bw config failed, aq_err=%d\n",
7394 hw->aq.asq_last_status);
7395 goto out;
7396 }
7397
7398 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
7399 &ets_data, NULL);
7400 if (ret) {
7401 dev_info(&pf->pdev->dev,
7402 "query veb bw ets config failed, aq_err=%d\n",
7403 hw->aq.asq_last_status);
7404 goto out;
7405 }
7406
7407 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
7408 veb->bw_max_quanta = ets_data.tc_bw_max;
7409 veb->is_abs_credits = bw_data.absolute_credits_enable;
7410 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
7411 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
7412 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7413 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
7414 veb->bw_tc_limit_credits[i] =
7415 le16_to_cpu(bw_data.tc_bw_limits[i]);
7416 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
7417 }
7418
7419out:
7420 return ret;
7421}
7422
7423/**
7424 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
7425 * @pf: board private structure
7426 *
7427 * On error: returns error code (negative)
7428 * On success: returns vsi index in PF (positive)
7429 **/
7430static int i40e_veb_mem_alloc(struct i40e_pf *pf)
7431{
7432 int ret = -ENOENT;
7433 struct i40e_veb *veb;
7434 int i;
7435
7436 /* Need to protect the allocation of switch elements at the PF level */
7437 mutex_lock(&pf->switch_mutex);
7438
7439 /* VEB list may be fragmented if VEB creation/destruction has
7440 * been happening. We can afford to do a quick scan to look
7441 * for any free slots in the list.
7442 *
7443 * find next empty veb slot, looping back around if necessary
7444 */
7445 i = 0;
7446 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
7447 i++;
7448 if (i >= I40E_MAX_VEB) {
7449 ret = -ENOMEM;
7450 goto err_alloc_veb; /* out of VEB slots! */
7451 }
7452
7453 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
7454 if (!veb) {
7455 ret = -ENOMEM;
7456 goto err_alloc_veb;
7457 }
7458 veb->pf = pf;
7459 veb->idx = i;
7460 veb->enabled_tc = 1;
7461
7462 pf->veb[i] = veb;
7463 ret = i;
7464err_alloc_veb:
7465 mutex_unlock(&pf->switch_mutex);
7466 return ret;
7467}
7468
7469/**
7470 * i40e_switch_branch_release - Delete a branch of the switch tree
7471 * @branch: where to start deleting
7472 *
7473 * This uses recursion to find the tips of the branch to be
7474 * removed, deleting until we get back to and can delete this VEB.
7475 **/
7476static void i40e_switch_branch_release(struct i40e_veb *branch)
7477{
7478 struct i40e_pf *pf = branch->pf;
7479 u16 branch_seid = branch->seid;
7480 u16 veb_idx = branch->idx;
7481 int i;
7482
7483 /* release any VEBs on this VEB - RECURSION */
7484 for (i = 0; i < I40E_MAX_VEB; i++) {
7485 if (!pf->veb[i])
7486 continue;
7487 if (pf->veb[i]->uplink_seid == branch->seid)
7488 i40e_switch_branch_release(pf->veb[i]);
7489 }
7490
7491 /* Release the VSIs on this VEB, but not the owner VSI.
7492 *
7493 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
7494 * the VEB itself, so don't use (*branch) after this loop.
7495 */
7496 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7497 if (!pf->vsi[i])
7498 continue;
7499 if (pf->vsi[i]->uplink_seid == branch_seid &&
7500 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
7501 i40e_vsi_release(pf->vsi[i]);
7502 }
7503 }
7504
7505 /* There's one corner case where the VEB might not have been
7506 * removed, so double check it here and remove it if needed.
7507 * This case happens if the veb was created from the debugfs
7508 * commands and no VSIs were added to it.
7509 */
7510 if (pf->veb[veb_idx])
7511 i40e_veb_release(pf->veb[veb_idx]);
7512}
7513
7514/**
7515 * i40e_veb_clear - remove veb struct
7516 * @veb: the veb to remove
7517 **/
7518static void i40e_veb_clear(struct i40e_veb *veb)
7519{
7520 if (!veb)
7521 return;
7522
7523 if (veb->pf) {
7524 struct i40e_pf *pf = veb->pf;
7525
7526 mutex_lock(&pf->switch_mutex);
7527 if (pf->veb[veb->idx] == veb)
7528 pf->veb[veb->idx] = NULL;
7529 mutex_unlock(&pf->switch_mutex);
7530 }
7531
7532 kfree(veb);
7533}
7534
7535/**
7536 * i40e_veb_release - Delete a VEB and free its resources
7537 * @veb: the VEB being removed
7538 **/
7539void i40e_veb_release(struct i40e_veb *veb)
7540{
7541 struct i40e_vsi *vsi = NULL;
7542 struct i40e_pf *pf;
7543 int i, n = 0;
7544
7545 pf = veb->pf;
7546
7547 /* find the remaining VSI and check for extras */
7548 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7549 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
7550 n++;
7551 vsi = pf->vsi[i];
7552 }
7553 }
7554 if (n != 1) {
7555 dev_info(&pf->pdev->dev,
7556 "can't remove VEB %d with %d VSIs left\n",
7557 veb->seid, n);
7558 return;
7559 }
7560
7561 /* move the remaining VSI to uplink veb */
7562 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
7563 if (veb->uplink_seid) {
7564 vsi->uplink_seid = veb->uplink_seid;
7565 if (veb->uplink_seid == pf->mac_seid)
7566 vsi->veb_idx = I40E_NO_VEB;
7567 else
7568 vsi->veb_idx = veb->veb_idx;
7569 } else {
7570 /* floating VEB */
7571 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
7572 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
7573 }
7574
7575 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
7576 i40e_veb_clear(veb);
41c445ff
JB
7577}
7578
7579/**
7580 * i40e_add_veb - create the VEB in the switch
7581 * @veb: the VEB to be instantiated
7582 * @vsi: the controlling VSI
7583 **/
7584static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
7585{
56747264 7586 bool is_default = false;
e1c51b95 7587 bool is_cloud = false;
41c445ff
JB
7588 int ret;
7589
7590 /* get a VEB from the hardware */
7591 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
e1c51b95
KS
7592 veb->enabled_tc, is_default,
7593 is_cloud, &veb->seid, NULL);
41c445ff
JB
7594 if (ret) {
7595 dev_info(&veb->pf->pdev->dev,
7596 "couldn't add VEB, err %d, aq_err %d\n",
7597 ret, veb->pf->hw.aq.asq_last_status);
7598 return -EPERM;
7599 }
7600
7601 /* get statistics counter */
7602 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
7603 &veb->stats_idx, NULL, NULL, NULL);
7604 if (ret) {
7605 dev_info(&veb->pf->pdev->dev,
7606 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
7607 ret, veb->pf->hw.aq.asq_last_status);
7608 return -EPERM;
7609 }
7610 ret = i40e_veb_get_bw_info(veb);
7611 if (ret) {
7612 dev_info(&veb->pf->pdev->dev,
7613 "couldn't get VEB bw info, err %d, aq_err %d\n",
7614 ret, veb->pf->hw.aq.asq_last_status);
7615 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
7616 return -ENOENT;
7617 }
7618
7619 vsi->uplink_seid = veb->seid;
7620 vsi->veb_idx = veb->idx;
7621 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
7622
7623 return 0;
7624}
7625
7626/**
7627 * i40e_veb_setup - Set up a VEB
7628 * @pf: board private structure
7629 * @flags: VEB setup flags
7630 * @uplink_seid: the switch element to link to
7631 * @vsi_seid: the initial VSI seid
7632 * @enabled_tc: Enabled TC bit-map
7633 *
7634 * This allocates the sw VEB structure and links it into the switch
7635 * It is possible and legal for this to be a duplicate of an already
7636 * existing VEB. It is also possible for both uplink and vsi seids
7637 * to be zero, in order to create a floating VEB.
7638 *
7639 * Returns pointer to the successfully allocated VEB sw struct on
7640 * success, otherwise returns NULL on failure.
7641 **/
7642struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
7643 u16 uplink_seid, u16 vsi_seid,
7644 u8 enabled_tc)
7645{
7646 struct i40e_veb *veb, *uplink_veb = NULL;
7647 int vsi_idx, veb_idx;
7648 int ret;
7649
7650 /* if one seid is 0, the other must be 0 to create a floating relay */
7651 if ((uplink_seid == 0 || vsi_seid == 0) &&
7652 (uplink_seid + vsi_seid != 0)) {
7653 dev_info(&pf->pdev->dev,
7654 "one, not both seid's are 0: uplink=%d vsi=%d\n",
7655 uplink_seid, vsi_seid);
7656 return NULL;
7657 }
7658
7659 /* make sure there is such a vsi and uplink */
7660 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
7661 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
7662 break;
7663 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
7664 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
7665 vsi_seid);
7666 return NULL;
7667 }
7668
7669 if (uplink_seid && uplink_seid != pf->mac_seid) {
7670 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
7671 if (pf->veb[veb_idx] &&
7672 pf->veb[veb_idx]->seid == uplink_seid) {
7673 uplink_veb = pf->veb[veb_idx];
7674 break;
7675 }
7676 }
7677 if (!uplink_veb) {
7678 dev_info(&pf->pdev->dev,
7679 "uplink seid %d not found\n", uplink_seid);
7680 return NULL;
7681 }
7682 }
7683
7684 /* get veb sw struct */
7685 veb_idx = i40e_veb_mem_alloc(pf);
7686 if (veb_idx < 0)
7687 goto err_alloc;
7688 veb = pf->veb[veb_idx];
7689 veb->flags = flags;
7690 veb->uplink_seid = uplink_seid;
7691 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
7692 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
7693
7694 /* create the VEB in the switch */
7695 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
7696 if (ret)
7697 goto err_veb;
7698
7699 return veb;
7700
7701err_veb:
7702 i40e_veb_clear(veb);
7703err_alloc:
7704 return NULL;
7705}
7706
7707/**
7708 * i40e_setup_pf_switch_element - set pf vars based on switch type
7709 * @pf: board private structure
7710 * @ele: element we are building info from
7711 * @num_reported: total number of elements
7712 * @printconfig: should we print the contents
7713 *
7714 * helper function to assist in extracting a few useful SEID values.
7715 **/
7716static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
7717 struct i40e_aqc_switch_config_element_resp *ele,
7718 u16 num_reported, bool printconfig)
7719{
7720 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
7721 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
7722 u8 element_type = ele->element_type;
7723 u16 seid = le16_to_cpu(ele->seid);
7724
7725 if (printconfig)
7726 dev_info(&pf->pdev->dev,
7727 "type=%d seid=%d uplink=%d downlink=%d\n",
7728 element_type, seid, uplink_seid, downlink_seid);
7729
7730 switch (element_type) {
7731 case I40E_SWITCH_ELEMENT_TYPE_MAC:
7732 pf->mac_seid = seid;
7733 break;
7734 case I40E_SWITCH_ELEMENT_TYPE_VEB:
7735 /* Main VEB? */
7736 if (uplink_seid != pf->mac_seid)
7737 break;
7738 if (pf->lan_veb == I40E_NO_VEB) {
7739 int v;
7740
7741 /* find existing or else empty VEB */
7742 for (v = 0; v < I40E_MAX_VEB; v++) {
7743 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
7744 pf->lan_veb = v;
7745 break;
7746 }
7747 }
7748 if (pf->lan_veb == I40E_NO_VEB) {
7749 v = i40e_veb_mem_alloc(pf);
7750 if (v < 0)
7751 break;
7752 pf->lan_veb = v;
7753 }
7754 }
7755
7756 pf->veb[pf->lan_veb]->seid = seid;
7757 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
7758 pf->veb[pf->lan_veb]->pf = pf;
7759 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
7760 break;
7761 case I40E_SWITCH_ELEMENT_TYPE_VSI:
7762 if (num_reported != 1)
7763 break;
7764 /* This is immediately after a reset so we can assume this is
7765 * the PF's VSI
7766 */
7767 pf->mac_seid = uplink_seid;
7768 pf->pf_seid = downlink_seid;
7769 pf->main_vsi_seid = seid;
7770 if (printconfig)
7771 dev_info(&pf->pdev->dev,
7772 "pf_seid=%d main_vsi_seid=%d\n",
7773 pf->pf_seid, pf->main_vsi_seid);
7774 break;
7775 case I40E_SWITCH_ELEMENT_TYPE_PF:
7776 case I40E_SWITCH_ELEMENT_TYPE_VF:
7777 case I40E_SWITCH_ELEMENT_TYPE_EMP:
7778 case I40E_SWITCH_ELEMENT_TYPE_BMC:
7779 case I40E_SWITCH_ELEMENT_TYPE_PE:
7780 case I40E_SWITCH_ELEMENT_TYPE_PA:
7781 /* ignore these for now */
7782 break;
7783 default:
7784 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
7785 element_type, seid);
7786 break;
7787 }
7788}
7789
7790/**
7791 * i40e_fetch_switch_configuration - Get switch config from firmware
7792 * @pf: board private structure
7793 * @printconfig: should we print the contents
7794 *
7795 * Get the current switch configuration from the device and
7796 * extract a few useful SEID values.
7797 **/
7798int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
7799{
7800 struct i40e_aqc_get_switch_config_resp *sw_config;
7801 u16 next_seid = 0;
7802 int ret = 0;
7803 u8 *aq_buf;
7804 int i;
7805
7806 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
7807 if (!aq_buf)
7808 return -ENOMEM;
7809
7810 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
7811 do {
7812 u16 num_reported, num_total;
7813
7814 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
7815 I40E_AQ_LARGE_BUF,
7816 &next_seid, NULL);
7817 if (ret) {
7818 dev_info(&pf->pdev->dev,
7819 "get switch config failed %d aq_err=%x\n",
7820 ret, pf->hw.aq.asq_last_status);
7821 kfree(aq_buf);
7822 return -ENOENT;
7823 }
7824
7825 num_reported = le16_to_cpu(sw_config->header.num_reported);
7826 num_total = le16_to_cpu(sw_config->header.num_total);
7827
7828 if (printconfig)
7829 dev_info(&pf->pdev->dev,
7830 "header: %d reported %d total\n",
7831 num_reported, num_total);
7832
7833 if (num_reported) {
7834 int sz = sizeof(*sw_config) * num_reported;
7835
7836 kfree(pf->sw_config);
7837 pf->sw_config = kzalloc(sz, GFP_KERNEL);
7838 if (pf->sw_config)
7839 memcpy(pf->sw_config, sw_config, sz);
7840 }
7841
7842 for (i = 0; i < num_reported; i++) {
7843 struct i40e_aqc_switch_config_element_resp *ele =
7844 &sw_config->element[i];
7845
7846 i40e_setup_pf_switch_element(pf, ele, num_reported,
7847 printconfig);
7848 }
7849 } while (next_seid != 0);
7850
7851 kfree(aq_buf);
7852 return ret;
7853}
7854
7855/**
7856 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
7857 * @pf: board private structure
bc7d338f 7858 * @reinit: if the Main VSI needs to re-initialized.
41c445ff
JB
7859 *
7860 * Returns 0 on success, negative value on failure
7861 **/
bc7d338f 7862static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
41c445ff 7863{
895106a5 7864 u32 rxfc = 0, txfc = 0, rxfc_reg;
41c445ff
JB
7865 int ret;
7866
7867 /* find out what's out there already */
7868 ret = i40e_fetch_switch_configuration(pf, false);
7869 if (ret) {
7870 dev_info(&pf->pdev->dev,
7871 "couldn't fetch switch config, err %d, aq_err %d\n",
7872 ret, pf->hw.aq.asq_last_status);
7873 return ret;
7874 }
7875 i40e_pf_reset_stats(pf);
7876
41c445ff 7877 /* first time setup */
bc7d338f 7878 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
41c445ff
JB
7879 struct i40e_vsi *vsi = NULL;
7880 u16 uplink_seid;
7881
7882 /* Set up the PF VSI associated with the PF's main VSI
7883 * that is already in the HW switch
7884 */
7885 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
7886 uplink_seid = pf->veb[pf->lan_veb]->seid;
7887 else
7888 uplink_seid = pf->mac_seid;
bc7d338f
ASJ
7889 if (pf->lan_vsi == I40E_NO_VSI)
7890 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
7891 else if (reinit)
7892 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
41c445ff
JB
7893 if (!vsi) {
7894 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
7895 i40e_fdir_teardown(pf);
7896 return -EAGAIN;
7897 }
41c445ff
JB
7898 } else {
7899 /* force a reset of TC and queue layout configurations */
7900 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
7901 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
7902 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
7903 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
7904 }
7905 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
7906
cbf61325
ASJ
7907 i40e_fdir_sb_setup(pf);
7908
41c445ff
JB
7909 /* Setup static PF queue filter control settings */
7910 ret = i40e_setup_pf_filter_control(pf);
7911 if (ret) {
7912 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
7913 ret);
7914 /* Failure here should not stop continuing other steps */
7915 }
7916
7917 /* enable RSS in the HW, even for only one queue, as the stack can use
7918 * the hash
7919 */
7920 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
7921 i40e_config_rss(pf);
7922
7923 /* fill in link information and enable LSE reporting */
7924 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
7925 i40e_link_event(pf);
7926
d52c20b7 7927 /* Initialize user-specific link properties */
41c445ff
JB
7928 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
7929 I40E_AQ_AN_COMPLETED) ? true : false);
d52c20b7
JB
7930 /* requested_mode is set in probe or by ethtool */
7931 if (!pf->fc_autoneg_status)
7932 goto no_autoneg;
7933
7934 if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
7935 (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
41c445ff
JB
7936 pf->hw.fc.current_mode = I40E_FC_FULL;
7937 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
7938 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
7939 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
7940 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
7941 else
d52c20b7
JB
7942 pf->hw.fc.current_mode = I40E_FC_NONE;
7943
7944 /* sync the flow control settings with the auto-neg values */
7945 switch (pf->hw.fc.current_mode) {
7946 case I40E_FC_FULL:
7947 txfc = 1;
7948 rxfc = 1;
7949 break;
7950 case I40E_FC_TX_PAUSE:
7951 txfc = 1;
7952 rxfc = 0;
7953 break;
7954 case I40E_FC_RX_PAUSE:
7955 txfc = 0;
7956 rxfc = 1;
7957 break;
7958 case I40E_FC_NONE:
7959 case I40E_FC_DEFAULT:
7960 txfc = 0;
7961 rxfc = 0;
7962 break;
7963 case I40E_FC_PFC:
7964 /* TBD */
7965 break;
7966 /* no default case, we have to handle all possibilities here */
7967 }
7968
7969 wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
7970
7971 rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
7972 ~I40E_PRTDCB_MFLCN_RFCE_MASK;
7973 rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
7974
7975 wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
41c445ff 7976
d52c20b7
JB
7977 goto fc_complete;
7978
7979no_autoneg:
7980 /* disable L2 flow control, user can turn it on if they wish */
7981 wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
7982 wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
7983 ~I40E_PRTDCB_MFLCN_RFCE_MASK);
7984
7985fc_complete:
beb0dff1
JK
7986 i40e_ptp_init(pf);
7987
41c445ff
JB
7988 return ret;
7989}
7990
41c445ff
JB
7991/**
7992 * i40e_determine_queue_usage - Work out queue distribution
7993 * @pf: board private structure
7994 **/
7995static void i40e_determine_queue_usage(struct i40e_pf *pf)
7996{
41c445ff
JB
7997 int queues_left;
7998
7999 pf->num_lan_qps = 0;
41c445ff
JB
8000
8001 /* Find the max queues to be put into basic use. We'll always be
8002 * using TC0, whether or not DCB is running, and TC0 will get the
8003 * big RSS set.
8004 */
8005 queues_left = pf->hw.func_caps.num_tx_qp;
8006
cbf61325
ASJ
8007 if ((queues_left == 1) ||
8008 !(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
8009 !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
8010 I40E_FLAG_DCB_ENABLED))) {
41c445ff
JB
8011 /* one qp for PF, no queues for anything else */
8012 queues_left = 0;
8013 pf->rss_size = pf->num_lan_qps = 1;
8014
8015 /* make sure all the fancies are disabled */
60ea5f83
JB
8016 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
8017 I40E_FLAG_FD_SB_ENABLED |
8018 I40E_FLAG_FD_ATR_ENABLED |
8019 I40E_FLAG_DCB_ENABLED |
8020 I40E_FLAG_SRIOV_ENABLED |
8021 I40E_FLAG_VMDQ_ENABLED);
41c445ff 8022 } else {
cbf61325
ASJ
8023 /* Not enough queues for all TCs */
8024 if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
8025 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
8026 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8027 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
8028 }
8029 pf->num_lan_qps = pf->rss_size_max;
8030 queues_left -= pf->num_lan_qps;
8031 }
8032
8033 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8034 if (queues_left > 1) {
8035 queues_left -= 1; /* save 1 queue for FD */
8036 } else {
8037 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8038 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
8039 }
41c445ff
JB
8040 }
8041
8042 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
8043 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
cbf61325
ASJ
8044 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
8045 (queues_left / pf->num_vf_qps));
41c445ff
JB
8046 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
8047 }
8048
8049 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
8050 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
8051 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
8052 (queues_left / pf->num_vmdq_qps));
8053 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
8054 }
8055
f8ff1464 8056 pf->queues_left = queues_left;
41c445ff
JB
8057}
8058
8059/**
8060 * i40e_setup_pf_filter_control - Setup PF static filter control
8061 * @pf: PF to be setup
8062 *
8063 * i40e_setup_pf_filter_control sets up a pf's initial filter control
8064 * settings. If PE/FCoE are enabled then it will also set the per PF
8065 * based filter sizes required for them. It also enables Flow director,
8066 * ethertype and macvlan type filter settings for the pf.
8067 *
8068 * Returns 0 on success, negative on failure
8069 **/
8070static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
8071{
8072 struct i40e_filter_control_settings *settings = &pf->filter_settings;
8073
8074 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
8075
8076 /* Flow Director is enabled */
60ea5f83 8077 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
41c445ff
JB
8078 settings->enable_fdir = true;
8079
8080 /* Ethtype and MACVLAN filters enabled for PF */
8081 settings->enable_ethtype = true;
8082 settings->enable_macvlan = true;
8083
8084 if (i40e_set_filter_control(&pf->hw, settings))
8085 return -ENOENT;
8086
8087 return 0;
8088}
8089
0c22b3dd
JB
8090#define INFO_STRING_LEN 255
8091static void i40e_print_features(struct i40e_pf *pf)
8092{
8093 struct i40e_hw *hw = &pf->hw;
8094 char *buf, *string;
8095
8096 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
8097 if (!string) {
8098 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
8099 return;
8100 }
8101
8102 buf = string;
8103
8104 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
8105#ifdef CONFIG_PCI_IOV
8106 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
8107#endif
8108 buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
8109 pf->vsi[pf->lan_vsi]->num_queue_pairs);
8110
8111 if (pf->flags & I40E_FLAG_RSS_ENABLED)
8112 buf += sprintf(buf, "RSS ");
8113 buf += sprintf(buf, "FDir ");
8114 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
8115 buf += sprintf(buf, "ATR ");
8116 if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
8117 buf += sprintf(buf, "NTUPLE ");
8118 if (pf->flags & I40E_FLAG_DCB_ENABLED)
8119 buf += sprintf(buf, "DCB ");
8120 if (pf->flags & I40E_FLAG_PTP)
8121 buf += sprintf(buf, "PTP ");
8122
8123 BUG_ON(buf > (string + INFO_STRING_LEN));
8124 dev_info(&pf->pdev->dev, "%s\n", string);
8125 kfree(string);
8126}
8127
41c445ff
JB
8128/**
8129 * i40e_probe - Device initialization routine
8130 * @pdev: PCI device information struct
8131 * @ent: entry in i40e_pci_tbl
8132 *
8133 * i40e_probe initializes a pf identified by a pci_dev structure.
8134 * The OS initialization, configuring of the pf private structure,
8135 * and a hardware reset occur.
8136 *
8137 * Returns 0 on success, negative on failure
8138 **/
8139static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8140{
8141 struct i40e_driver_version dv;
8142 struct i40e_pf *pf;
8143 struct i40e_hw *hw;
93cd765b 8144 static u16 pfs_found;
d4dfb81a 8145 u16 link_status;
41c445ff
JB
8146 int err = 0;
8147 u32 len;
8a9eb7d3 8148 u32 i;
41c445ff
JB
8149
8150 err = pci_enable_device_mem(pdev);
8151 if (err)
8152 return err;
8153
8154 /* set up for high or low dma */
6494294f 8155 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6494294f 8156 if (err) {
e3e3bfdd
JS
8157 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8158 if (err) {
8159 dev_err(&pdev->dev,
8160 "DMA configuration failed: 0x%x\n", err);
8161 goto err_dma;
8162 }
41c445ff
JB
8163 }
8164
8165 /* set up pci connections */
8166 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
8167 IORESOURCE_MEM), i40e_driver_name);
8168 if (err) {
8169 dev_info(&pdev->dev,
8170 "pci_request_selected_regions failed %d\n", err);
8171 goto err_pci_reg;
8172 }
8173
8174 pci_enable_pcie_error_reporting(pdev);
8175 pci_set_master(pdev);
8176
8177 /* Now that we have a PCI connection, we need to do the
8178 * low level device setup. This is primarily setting up
8179 * the Admin Queue structures and then querying for the
8180 * device's current profile information.
8181 */
8182 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
8183 if (!pf) {
8184 err = -ENOMEM;
8185 goto err_pf_alloc;
8186 }
8187 pf->next_vsi = 0;
8188 pf->pdev = pdev;
8189 set_bit(__I40E_DOWN, &pf->state);
8190
8191 hw = &pf->hw;
8192 hw->back = pf;
8193 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
8194 pci_resource_len(pdev, 0));
8195 if (!hw->hw_addr) {
8196 err = -EIO;
8197 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
8198 (unsigned int)pci_resource_start(pdev, 0),
8199 (unsigned int)pci_resource_len(pdev, 0), err);
8200 goto err_ioremap;
8201 }
8202 hw->vendor_id = pdev->vendor;
8203 hw->device_id = pdev->device;
8204 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
8205 hw->subsystem_vendor_id = pdev->subsystem_vendor;
8206 hw->subsystem_device_id = pdev->subsystem_device;
8207 hw->bus.device = PCI_SLOT(pdev->devfn);
8208 hw->bus.func = PCI_FUNC(pdev->devfn);
93cd765b 8209 pf->instance = pfs_found;
41c445ff 8210
7134f9ce
JB
8211 /* do a special CORER for clearing PXE mode once at init */
8212 if (hw->revision_id == 0 &&
8213 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
8214 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
8215 i40e_flush(hw);
8216 msleep(200);
8217 pf->corer_count++;
8218
8219 i40e_clear_pxe_mode(hw);
8220 }
8221
41c445ff
JB
8222 /* Reset here to make sure all is clean and to define PF 'n' */
8223 err = i40e_pf_reset(hw);
8224 if (err) {
8225 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
8226 goto err_pf_reset;
8227 }
8228 pf->pfr_count++;
8229
8230 hw->aq.num_arq_entries = I40E_AQ_LEN;
8231 hw->aq.num_asq_entries = I40E_AQ_LEN;
8232 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
8233 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
8234 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
8235 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
8236 "%s-pf%d:misc",
8237 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
8238
8239 err = i40e_init_shared_code(hw);
8240 if (err) {
8241 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
8242 goto err_pf_reset;
8243 }
8244
d52c20b7
JB
8245 /* set up a default setting for link flow control */
8246 pf->hw.fc.requested_mode = I40E_FC_NONE;
8247
41c445ff
JB
8248 err = i40e_init_adminq(hw);
8249 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
8250 if (err) {
8251 dev_info(&pdev->dev,
8252 "init_adminq failed: %d expecting API %02x.%02x\n",
8253 err,
8254 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
8255 goto err_pf_reset;
8256 }
8257
4eb3f768
SN
8258 i40e_verify_eeprom(pf);
8259
6ff4ef86 8260 i40e_clear_pxe_mode(hw);
41c445ff
JB
8261 err = i40e_get_capabilities(pf);
8262 if (err)
8263 goto err_adminq_setup;
8264
8265 err = i40e_sw_init(pf);
8266 if (err) {
8267 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
8268 goto err_sw_init;
8269 }
8270
8271 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
8272 hw->func_caps.num_rx_qp,
8273 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
8274 if (err) {
8275 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
8276 goto err_init_lan_hmc;
8277 }
8278
8279 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
8280 if (err) {
8281 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
8282 err = -ENOENT;
8283 goto err_configure_lan_hmc;
8284 }
8285
8286 i40e_get_mac_addr(hw, hw->mac.addr);
f62b5060 8287 if (!is_valid_ether_addr(hw->mac.addr)) {
41c445ff
JB
8288 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
8289 err = -EIO;
8290 goto err_mac_addr;
8291 }
8292 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
8293 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
8294
8295 pci_set_drvdata(pdev, pf);
8296 pci_save_state(pdev);
4e3b35b0
NP
8297#ifdef CONFIG_I40E_DCB
8298 err = i40e_init_pf_dcb(pf);
8299 if (err) {
8300 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
8301 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
014269ff 8302 /* Continue without DCB enabled */
4e3b35b0
NP
8303 }
8304#endif /* CONFIG_I40E_DCB */
41c445ff
JB
8305
8306 /* set up periodic task facility */
8307 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
8308 pf->service_timer_period = HZ;
8309
8310 INIT_WORK(&pf->service_task, i40e_service_task);
8311 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
8312 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
8313 pf->link_check_timeout = jiffies;
8314
8e2773ae
SN
8315 /* WoL defaults to disabled */
8316 pf->wol_en = false;
8317 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
8318
41c445ff
JB
8319 /* set up the main switch operations */
8320 i40e_determine_queue_usage(pf);
8321 i40e_init_interrupt_scheme(pf);
8322
8323 /* Set up the *vsi struct based on the number of VSIs in the HW,
8324 * and set up our local tracking of the MAIN PF vsi.
8325 */
8326 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
8327 pf->vsi = kzalloc(len, GFP_KERNEL);
ed87ac09
WY
8328 if (!pf->vsi) {
8329 err = -ENOMEM;
41c445ff 8330 goto err_switch_setup;
ed87ac09 8331 }
41c445ff 8332
bc7d338f 8333 err = i40e_setup_pf_switch(pf, false);
41c445ff
JB
8334 if (err) {
8335 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
8336 goto err_vsis;
8337 }
8a9eb7d3
SN
8338 /* if FDIR VSI was set up, start it now */
8339 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
8340 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
8341 i40e_vsi_open(pf->vsi[i]);
8342 break;
8343 }
8344 }
41c445ff
JB
8345
8346 /* The main driver is (mostly) up and happy. We need to set this state
8347 * before setting up the misc vector or we get a race and the vector
8348 * ends up disabled forever.
8349 */
8350 clear_bit(__I40E_DOWN, &pf->state);
8351
8352 /* In case of MSIX we are going to setup the misc vector right here
8353 * to handle admin queue events etc. In case of legacy and MSI
8354 * the misc functionality and queue processing is combined in
8355 * the same vector and that gets setup at open.
8356 */
8357 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
8358 err = i40e_setup_misc_vector(pf);
8359 if (err) {
8360 dev_info(&pdev->dev,
8361 "setup of misc vector failed: %d\n", err);
8362 goto err_vsis;
8363 }
8364 }
8365
df805f62 8366#ifdef CONFIG_PCI_IOV
41c445ff
JB
8367 /* prep for VF support */
8368 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
4eb3f768
SN
8369 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
8370 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
41c445ff
JB
8371 u32 val;
8372
8373 /* disable link interrupts for VFs */
8374 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
8375 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
8376 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
8377 i40e_flush(hw);
4aeec010
MW
8378
8379 if (pci_num_vf(pdev)) {
8380 dev_info(&pdev->dev,
8381 "Active VFs found, allocating resources.\n");
8382 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
8383 if (err)
8384 dev_info(&pdev->dev,
8385 "Error %d allocating resources for existing VFs\n",
8386 err);
8387 }
41c445ff 8388 }
df805f62 8389#endif /* CONFIG_PCI_IOV */
41c445ff 8390
93cd765b
ASJ
8391 pfs_found++;
8392
41c445ff
JB
8393 i40e_dbg_pf_init(pf);
8394
8395 /* tell the firmware that we're starting */
8396 dv.major_version = DRV_VERSION_MAJOR;
8397 dv.minor_version = DRV_VERSION_MINOR;
8398 dv.build_version = DRV_VERSION_BUILD;
8399 dv.subbuild_version = 0;
d2466013 8400 strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
41c445ff
JB
8401 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
8402
8403 /* since everything's happy, start the service_task timer */
8404 mod_timer(&pf->service_timer,
8405 round_jiffies(jiffies + pf->service_timer_period));
8406
d4dfb81a
CS
8407 /* Get the negotiated link width and speed from PCI config space */
8408 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
8409
8410 i40e_set_pci_config_data(hw, link_status);
8411
69bfb110 8412 dev_info(&pdev->dev, "PCI-Express: %s %s\n",
d4dfb81a
CS
8413 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
8414 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
8415 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
8416 "Unknown"),
8417 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
8418 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
8419 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
8420 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
8421 "Unknown"));
8422
8423 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
8424 hw->bus.speed < i40e_bus_speed_8000) {
8425 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
8426 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
8427 }
8428
0c22b3dd
JB
8429 /* print a string summarizing features */
8430 i40e_print_features(pf);
8431
41c445ff
JB
8432 return 0;
8433
8434 /* Unwind what we've done if something failed in the setup */
8435err_vsis:
8436 set_bit(__I40E_DOWN, &pf->state);
41c445ff
JB
8437 i40e_clear_interrupt_scheme(pf);
8438 kfree(pf->vsi);
04b03013
SN
8439err_switch_setup:
8440 i40e_reset_interrupt_capability(pf);
41c445ff
JB
8441 del_timer_sync(&pf->service_timer);
8442err_mac_addr:
8443err_configure_lan_hmc:
8444 (void)i40e_shutdown_lan_hmc(hw);
8445err_init_lan_hmc:
8446 kfree(pf->qp_pile);
8447 kfree(pf->irq_pile);
8448err_sw_init:
8449err_adminq_setup:
8450 (void)i40e_shutdown_adminq(hw);
8451err_pf_reset:
8452 iounmap(hw->hw_addr);
8453err_ioremap:
8454 kfree(pf);
8455err_pf_alloc:
8456 pci_disable_pcie_error_reporting(pdev);
8457 pci_release_selected_regions(pdev,
8458 pci_select_bars(pdev, IORESOURCE_MEM));
8459err_pci_reg:
8460err_dma:
8461 pci_disable_device(pdev);
8462 return err;
8463}
8464
8465/**
8466 * i40e_remove - Device removal routine
8467 * @pdev: PCI device information struct
8468 *
8469 * i40e_remove is called by the PCI subsystem to alert the driver
8470 * that is should release a PCI device. This could be caused by a
8471 * Hot-Plug event, or because the driver is going to be removed from
8472 * memory.
8473 **/
8474static void i40e_remove(struct pci_dev *pdev)
8475{
8476 struct i40e_pf *pf = pci_get_drvdata(pdev);
8477 i40e_status ret_code;
8478 u32 reg;
8479 int i;
8480
8481 i40e_dbg_pf_exit(pf);
8482
beb0dff1
JK
8483 i40e_ptp_stop(pf);
8484
41c445ff
JB
8485 /* no more scheduling of any task */
8486 set_bit(__I40E_DOWN, &pf->state);
8487 del_timer_sync(&pf->service_timer);
8488 cancel_work_sync(&pf->service_task);
8489
eb2d80bc
MW
8490 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
8491 i40e_free_vfs(pf);
8492 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
8493 }
8494
41c445ff
JB
8495 i40e_fdir_teardown(pf);
8496
8497 /* If there is a switch structure or any orphans, remove them.
8498 * This will leave only the PF's VSI remaining.
8499 */
8500 for (i = 0; i < I40E_MAX_VEB; i++) {
8501 if (!pf->veb[i])
8502 continue;
8503
8504 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
8505 pf->veb[i]->uplink_seid == 0)
8506 i40e_switch_branch_release(pf->veb[i]);
8507 }
8508
8509 /* Now we can shutdown the PF's VSI, just before we kill
8510 * adminq and hmc.
8511 */
8512 if (pf->vsi[pf->lan_vsi])
8513 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
8514
8515 i40e_stop_misc_vector(pf);
8516 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
8517 synchronize_irq(pf->msix_entries[0].vector);
8518 free_irq(pf->msix_entries[0].vector, pf);
8519 }
8520
8521 /* shutdown and destroy the HMC */
8522 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
8523 if (ret_code)
8524 dev_warn(&pdev->dev,
8525 "Failed to destroy the HMC resources: %d\n", ret_code);
8526
8527 /* shutdown the adminq */
41c445ff
JB
8528 ret_code = i40e_shutdown_adminq(&pf->hw);
8529 if (ret_code)
8530 dev_warn(&pdev->dev,
8531 "Failed to destroy the Admin Queue resources: %d\n",
8532 ret_code);
8533
8534 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
8535 i40e_clear_interrupt_scheme(pf);
8536 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
8537 if (pf->vsi[i]) {
8538 i40e_vsi_clear_rings(pf->vsi[i]);
8539 i40e_vsi_clear(pf->vsi[i]);
8540 pf->vsi[i] = NULL;
8541 }
8542 }
8543
8544 for (i = 0; i < I40E_MAX_VEB; i++) {
8545 kfree(pf->veb[i]);
8546 pf->veb[i] = NULL;
8547 }
8548
8549 kfree(pf->qp_pile);
8550 kfree(pf->irq_pile);
8551 kfree(pf->sw_config);
8552 kfree(pf->vsi);
8553
8554 /* force a PF reset to clean anything leftover */
8555 reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
8556 wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
8557 i40e_flush(&pf->hw);
8558
8559 iounmap(pf->hw.hw_addr);
8560 kfree(pf);
8561 pci_release_selected_regions(pdev,
8562 pci_select_bars(pdev, IORESOURCE_MEM));
8563
8564 pci_disable_pcie_error_reporting(pdev);
8565 pci_disable_device(pdev);
8566}
8567
8568/**
8569 * i40e_pci_error_detected - warning that something funky happened in PCI land
8570 * @pdev: PCI device information struct
8571 *
8572 * Called to warn that something happened and the error handling steps
8573 * are in progress. Allows the driver to quiesce things, be ready for
8574 * remediation.
8575 **/
8576static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
8577 enum pci_channel_state error)
8578{
8579 struct i40e_pf *pf = pci_get_drvdata(pdev);
8580
8581 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
8582
8583 /* shutdown all operations */
9007bccd
SN
8584 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
8585 rtnl_lock();
8586 i40e_prep_for_reset(pf);
8587 rtnl_unlock();
8588 }
41c445ff
JB
8589
8590 /* Request a slot reset */
8591 return PCI_ERS_RESULT_NEED_RESET;
8592}
8593
8594/**
8595 * i40e_pci_error_slot_reset - a PCI slot reset just happened
8596 * @pdev: PCI device information struct
8597 *
8598 * Called to find if the driver can work with the device now that
8599 * the pci slot has been reset. If a basic connection seems good
8600 * (registers are readable and have sane content) then return a
8601 * happy little PCI_ERS_RESULT_xxx.
8602 **/
8603static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
8604{
8605 struct i40e_pf *pf = pci_get_drvdata(pdev);
8606 pci_ers_result_t result;
8607 int err;
8608 u32 reg;
8609
8610 dev_info(&pdev->dev, "%s\n", __func__);
8611 if (pci_enable_device_mem(pdev)) {
8612 dev_info(&pdev->dev,
8613 "Cannot re-enable PCI device after reset.\n");
8614 result = PCI_ERS_RESULT_DISCONNECT;
8615 } else {
8616 pci_set_master(pdev);
8617 pci_restore_state(pdev);
8618 pci_save_state(pdev);
8619 pci_wake_from_d3(pdev, false);
8620
8621 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8622 if (reg == 0)
8623 result = PCI_ERS_RESULT_RECOVERED;
8624 else
8625 result = PCI_ERS_RESULT_DISCONNECT;
8626 }
8627
8628 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8629 if (err) {
8630 dev_info(&pdev->dev,
8631 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8632 err);
8633 /* non-fatal, continue */
8634 }
8635
8636 return result;
8637}
8638
8639/**
8640 * i40e_pci_error_resume - restart operations after PCI error recovery
8641 * @pdev: PCI device information struct
8642 *
8643 * Called to allow the driver to bring things back up after PCI error
8644 * and/or reset recovery has finished.
8645 **/
8646static void i40e_pci_error_resume(struct pci_dev *pdev)
8647{
8648 struct i40e_pf *pf = pci_get_drvdata(pdev);
8649
8650 dev_info(&pdev->dev, "%s\n", __func__);
9007bccd
SN
8651 if (test_bit(__I40E_SUSPENDED, &pf->state))
8652 return;
8653
8654 rtnl_lock();
41c445ff 8655 i40e_handle_reset_warning(pf);
9007bccd
SN
8656 rtnl_lock();
8657}
8658
8659/**
8660 * i40e_shutdown - PCI callback for shutting down
8661 * @pdev: PCI device information struct
8662 **/
8663static void i40e_shutdown(struct pci_dev *pdev)
8664{
8665 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 8666 struct i40e_hw *hw = &pf->hw;
9007bccd
SN
8667
8668 set_bit(__I40E_SUSPENDED, &pf->state);
8669 set_bit(__I40E_DOWN, &pf->state);
8670 rtnl_lock();
8671 i40e_prep_for_reset(pf);
8672 rtnl_unlock();
8673
8e2773ae
SN
8674 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
8675 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
8676
9007bccd 8677 if (system_state == SYSTEM_POWER_OFF) {
8e2773ae 8678 pci_wake_from_d3(pdev, pf->wol_en);
9007bccd
SN
8679 pci_set_power_state(pdev, PCI_D3hot);
8680 }
8681}
8682
8683#ifdef CONFIG_PM
8684/**
8685 * i40e_suspend - PCI callback for moving to D3
8686 * @pdev: PCI device information struct
8687 **/
8688static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
8689{
8690 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 8691 struct i40e_hw *hw = &pf->hw;
9007bccd
SN
8692
8693 set_bit(__I40E_SUSPENDED, &pf->state);
8694 set_bit(__I40E_DOWN, &pf->state);
8695 rtnl_lock();
8696 i40e_prep_for_reset(pf);
8697 rtnl_unlock();
8698
8e2773ae
SN
8699 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
8700 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
8701
8702 pci_wake_from_d3(pdev, pf->wol_en);
9007bccd
SN
8703 pci_set_power_state(pdev, PCI_D3hot);
8704
8705 return 0;
41c445ff
JB
8706}
8707
9007bccd
SN
8708/**
8709 * i40e_resume - PCI callback for waking up from D3
8710 * @pdev: PCI device information struct
8711 **/
8712static int i40e_resume(struct pci_dev *pdev)
8713{
8714 struct i40e_pf *pf = pci_get_drvdata(pdev);
8715 u32 err;
8716
8717 pci_set_power_state(pdev, PCI_D0);
8718 pci_restore_state(pdev);
8719 /* pci_restore_state() clears dev->state_saves, so
8720 * call pci_save_state() again to restore it.
8721 */
8722 pci_save_state(pdev);
8723
8724 err = pci_enable_device_mem(pdev);
8725 if (err) {
8726 dev_err(&pdev->dev,
8727 "%s: Cannot enable PCI device from suspend\n",
8728 __func__);
8729 return err;
8730 }
8731 pci_set_master(pdev);
8732
8733 /* no wakeup events while running */
8734 pci_wake_from_d3(pdev, false);
8735
8736 /* handling the reset will rebuild the device state */
8737 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
8738 clear_bit(__I40E_DOWN, &pf->state);
8739 rtnl_lock();
8740 i40e_reset_and_rebuild(pf, false);
8741 rtnl_unlock();
8742 }
8743
8744 return 0;
8745}
8746
8747#endif
41c445ff
JB
8748static const struct pci_error_handlers i40e_err_handler = {
8749 .error_detected = i40e_pci_error_detected,
8750 .slot_reset = i40e_pci_error_slot_reset,
8751 .resume = i40e_pci_error_resume,
8752};
8753
8754static struct pci_driver i40e_driver = {
8755 .name = i40e_driver_name,
8756 .id_table = i40e_pci_tbl,
8757 .probe = i40e_probe,
8758 .remove = i40e_remove,
9007bccd
SN
8759#ifdef CONFIG_PM
8760 .suspend = i40e_suspend,
8761 .resume = i40e_resume,
8762#endif
8763 .shutdown = i40e_shutdown,
41c445ff
JB
8764 .err_handler = &i40e_err_handler,
8765 .sriov_configure = i40e_pci_sriov_configure,
8766};
8767
8768/**
8769 * i40e_init_module - Driver registration routine
8770 *
8771 * i40e_init_module is the first routine called when the driver is
8772 * loaded. All it does is register with the PCI subsystem.
8773 **/
8774static int __init i40e_init_module(void)
8775{
8776 pr_info("%s: %s - version %s\n", i40e_driver_name,
8777 i40e_driver_string, i40e_driver_version_str);
8778 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
8779 i40e_dbg_init();
8780 return pci_register_driver(&i40e_driver);
8781}
8782module_init(i40e_init_module);
8783
8784/**
8785 * i40e_exit_module - Driver exit cleanup routine
8786 *
8787 * i40e_exit_module is called just before the driver is removed
8788 * from memory.
8789 **/
8790static void __exit i40e_exit_module(void)
8791{
8792 pci_unregister_driver(&i40e_driver);
8793 i40e_dbg_exit();
8794}
8795module_exit(i40e_exit_module);
This page took 0.577809 seconds and 5 git commands to generate.