xen-netback: fix gso_prefix check
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
CommitLineData
41c445ff
JB
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28/* Local includes */
29#include "i40e.h"
30
31const char i40e_driver_name[] = "i40e";
32static const char i40e_driver_string[] =
33 "Intel(R) Ethernet Connection XL710 Network Driver";
34
35#define DRV_KERN "-k"
36
37#define DRV_VERSION_MAJOR 0
38#define DRV_VERSION_MINOR 3
1de046b9 39#define DRV_VERSION_BUILD 11
41c445ff
JB
40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
41 __stringify(DRV_VERSION_MINOR) "." \
42 __stringify(DRV_VERSION_BUILD) DRV_KERN
43const char i40e_driver_version_str[] = DRV_VERSION;
44static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
45
46/* a bit of forward declarations */
47static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
48static void i40e_handle_reset_warning(struct i40e_pf *pf);
49static int i40e_add_vsi(struct i40e_vsi *vsi);
50static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
51static int i40e_setup_pf_switch(struct i40e_pf *pf);
52static int i40e_setup_misc_vector(struct i40e_pf *pf);
53static void i40e_determine_queue_usage(struct i40e_pf *pf);
54static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
55
56/* i40e_pci_tbl - PCI Device ID Table
57 *
58 * Last entry must be all 0s
59 *
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
61 * Class, Class Mask, private data (not used) }
62 */
63static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
64 {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
65 {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
66 {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
67 {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
68 {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
69 {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
70 {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
71 {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
72 {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
73 {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
74 /* required last entry */
75 {0, }
76};
77MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
78
79#define I40E_MAX_VF_COUNT 128
80static int debug = -1;
81module_param(debug, int, 0);
82MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
83
84MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
85MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(DRV_VERSION);
88
89/**
90 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
91 * @hw: pointer to the HW structure
92 * @mem: ptr to mem struct to fill out
93 * @size: size of memory requested
94 * @alignment: what to align the allocation to
95 **/
96int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
97 u64 size, u32 alignment)
98{
99 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
100
101 mem->size = ALIGN(size, alignment);
102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
103 &mem->pa, GFP_KERNEL);
93bc73b8
JB
104 if (!mem->va)
105 return -ENOMEM;
41c445ff 106
93bc73b8 107 return 0;
41c445ff
JB
108}
109
110/**
111 * i40e_free_dma_mem_d - OS specific memory free for shared code
112 * @hw: pointer to the HW structure
113 * @mem: ptr to mem struct to free
114 **/
115int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
116{
117 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
118
119 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
120 mem->va = NULL;
121 mem->pa = 0;
122 mem->size = 0;
123
124 return 0;
125}
126
127/**
128 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
129 * @hw: pointer to the HW structure
130 * @mem: ptr to mem struct to fill out
131 * @size: size of memory requested
132 **/
133int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
134 u32 size)
135{
136 mem->size = size;
137 mem->va = kzalloc(size, GFP_KERNEL);
138
93bc73b8
JB
139 if (!mem->va)
140 return -ENOMEM;
41c445ff 141
93bc73b8 142 return 0;
41c445ff
JB
143}
144
145/**
146 * i40e_free_virt_mem_d - OS specific memory free for shared code
147 * @hw: pointer to the HW structure
148 * @mem: ptr to mem struct to free
149 **/
150int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
151{
152 /* it's ok to kfree a NULL pointer */
153 kfree(mem->va);
154 mem->va = NULL;
155 mem->size = 0;
156
157 return 0;
158}
159
160/**
161 * i40e_get_lump - find a lump of free generic resource
162 * @pf: board private structure
163 * @pile: the pile of resource to search
164 * @needed: the number of items needed
165 * @id: an owner id to stick on the items assigned
166 *
167 * Returns the base item index of the lump, or negative for error
168 *
169 * The search_hint trick and lack of advanced fit-finding only work
170 * because we're highly likely to have all the same size lump requests.
171 * Linear search time and any fragmentation should be minimal.
172 **/
173static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
174 u16 needed, u16 id)
175{
176 int ret = -ENOMEM;
ddf434ac 177 int i, j;
41c445ff
JB
178
179 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
180 dev_info(&pf->pdev->dev,
181 "param err: pile=%p needed=%d id=0x%04x\n",
182 pile, needed, id);
183 return -EINVAL;
184 }
185
186 /* start the linear search with an imperfect hint */
187 i = pile->search_hint;
ddf434ac 188 while (i < pile->num_entries) {
41c445ff
JB
189 /* skip already allocated entries */
190 if (pile->list[i] & I40E_PILE_VALID_BIT) {
191 i++;
192 continue;
193 }
194
195 /* do we have enough in this lump? */
196 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
197 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
198 break;
199 }
200
201 if (j == needed) {
202 /* there was enough, so assign it to the requestor */
203 for (j = 0; j < needed; j++)
204 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
205 ret = i;
206 pile->search_hint = i + j;
ddf434ac 207 break;
41c445ff
JB
208 } else {
209 /* not enough, so skip over it and continue looking */
210 i += j;
211 }
212 }
213
214 return ret;
215}
216
217/**
218 * i40e_put_lump - return a lump of generic resource
219 * @pile: the pile of resource to search
220 * @index: the base item index
221 * @id: the owner id of the items assigned
222 *
223 * Returns the count of items in the lump
224 **/
225static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
226{
227 int valid_id = (id | I40E_PILE_VALID_BIT);
228 int count = 0;
229 int i;
230
231 if (!pile || index >= pile->num_entries)
232 return -EINVAL;
233
234 for (i = index;
235 i < pile->num_entries && pile->list[i] == valid_id;
236 i++) {
237 pile->list[i] = 0;
238 count++;
239 }
240
241 if (count && index < pile->search_hint)
242 pile->search_hint = index;
243
244 return count;
245}
246
247/**
248 * i40e_service_event_schedule - Schedule the service task to wake up
249 * @pf: board private structure
250 *
251 * If not already scheduled, this puts the task into the work queue
252 **/
253static void i40e_service_event_schedule(struct i40e_pf *pf)
254{
255 if (!test_bit(__I40E_DOWN, &pf->state) &&
256 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
257 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
258 schedule_work(&pf->service_task);
259}
260
261/**
262 * i40e_tx_timeout - Respond to a Tx Hang
263 * @netdev: network interface device structure
264 *
265 * If any port has noticed a Tx timeout, it is likely that the whole
266 * device is munged, not just the one netdev port, so go for the full
267 * reset.
268 **/
269static void i40e_tx_timeout(struct net_device *netdev)
270{
271 struct i40e_netdev_priv *np = netdev_priv(netdev);
272 struct i40e_vsi *vsi = np->vsi;
273 struct i40e_pf *pf = vsi->back;
274
275 pf->tx_timeout_count++;
276
277 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
278 pf->tx_timeout_recovery_level = 0;
279 pf->tx_timeout_last_recovery = jiffies;
280 netdev_info(netdev, "tx_timeout recovery level %d\n",
281 pf->tx_timeout_recovery_level);
282
283 switch (pf->tx_timeout_recovery_level) {
284 case 0:
285 /* disable and re-enable queues for the VSI */
286 if (in_interrupt()) {
287 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
288 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
289 } else {
290 i40e_vsi_reinit_locked(vsi);
291 }
292 break;
293 case 1:
294 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
295 break;
296 case 2:
297 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
298 break;
299 case 3:
300 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
301 break;
302 default:
303 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
304 i40e_down(vsi);
305 break;
306 }
307 i40e_service_event_schedule(pf);
308 pf->tx_timeout_recovery_level++;
309}
310
311/**
312 * i40e_release_rx_desc - Store the new tail and head values
313 * @rx_ring: ring to bump
314 * @val: new head index
315 **/
316static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
317{
318 rx_ring->next_to_use = val;
319
320 /* Force memory writes to complete before letting h/w
321 * know there are new descriptors to fetch. (Only
322 * applicable for weak-ordered memory model archs,
323 * such as IA-64).
324 */
325 wmb();
326 writel(val, rx_ring->tail);
327}
328
329/**
330 * i40e_get_vsi_stats_struct - Get System Network Statistics
331 * @vsi: the VSI we care about
332 *
333 * Returns the address of the device statistics structure.
334 * The statistics are actually updated from the service task.
335 **/
336struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
337{
338 return &vsi->net_stats;
339}
340
341/**
342 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
343 * @netdev: network interface device structure
344 *
345 * Returns the address of the device statistics structure.
346 * The statistics are actually updated from the service task.
347 **/
348static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
349 struct net_device *netdev,
980e9b11 350 struct rtnl_link_stats64 *stats)
41c445ff
JB
351{
352 struct i40e_netdev_priv *np = netdev_priv(netdev);
353 struct i40e_vsi *vsi = np->vsi;
980e9b11
AD
354 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
355 int i;
356
357 rcu_read_lock();
358 for (i = 0; i < vsi->num_queue_pairs; i++) {
359 struct i40e_ring *tx_ring, *rx_ring;
360 u64 bytes, packets;
361 unsigned int start;
362
363 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
364 if (!tx_ring)
365 continue;
366
367 do {
368 start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
369 packets = tx_ring->stats.packets;
370 bytes = tx_ring->stats.bytes;
371 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
372
373 stats->tx_packets += packets;
374 stats->tx_bytes += bytes;
375 rx_ring = &tx_ring[1];
376
377 do {
378 start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
379 packets = rx_ring->stats.packets;
380 bytes = rx_ring->stats.bytes;
381 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
41c445ff 382
980e9b11
AD
383 stats->rx_packets += packets;
384 stats->rx_bytes += bytes;
385 }
386 rcu_read_unlock();
387
388 /* following stats updated by ixgbe_watchdog_task() */
389 stats->multicast = vsi_stats->multicast;
390 stats->tx_errors = vsi_stats->tx_errors;
391 stats->tx_dropped = vsi_stats->tx_dropped;
392 stats->rx_errors = vsi_stats->rx_errors;
393 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
394 stats->rx_length_errors = vsi_stats->rx_length_errors;
41c445ff 395
980e9b11 396 return stats;
41c445ff
JB
397}
398
399/**
400 * i40e_vsi_reset_stats - Resets all stats of the given vsi
401 * @vsi: the VSI to have its stats reset
402 **/
403void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
404{
405 struct rtnl_link_stats64 *ns;
406 int i;
407
408 if (!vsi)
409 return;
410
411 ns = i40e_get_vsi_stats_struct(vsi);
412 memset(ns, 0, sizeof(*ns));
413 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
414 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
415 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
416 if (vsi->rx_rings)
417 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
418 memset(&vsi->rx_rings[i]->stats, 0 ,
419 sizeof(vsi->rx_rings[i]->stats));
420 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
421 sizeof(vsi->rx_rings[i]->rx_stats));
422 memset(&vsi->tx_rings[i]->stats, 0 ,
423 sizeof(vsi->tx_rings[i]->stats));
424 memset(&vsi->tx_rings[i]->tx_stats, 0,
425 sizeof(vsi->tx_rings[i]->tx_stats));
41c445ff
JB
426 }
427 vsi->stat_offsets_loaded = false;
428}
429
430/**
431 * i40e_pf_reset_stats - Reset all of the stats for the given pf
432 * @pf: the PF to be reset
433 **/
434void i40e_pf_reset_stats(struct i40e_pf *pf)
435{
436 memset(&pf->stats, 0, sizeof(pf->stats));
437 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
438 pf->stat_offsets_loaded = false;
439}
440
441/**
442 * i40e_stat_update48 - read and update a 48 bit stat from the chip
443 * @hw: ptr to the hardware info
444 * @hireg: the high 32 bit reg to read
445 * @loreg: the low 32 bit reg to read
446 * @offset_loaded: has the initial offset been loaded yet
447 * @offset: ptr to current offset value
448 * @stat: ptr to the stat
449 *
450 * Since the device stats are not reset at PFReset, they likely will not
451 * be zeroed when the driver starts. We'll save the first values read
452 * and use them as offsets to be subtracted from the raw values in order
453 * to report stats that count from zero. In the process, we also manage
454 * the potential roll-over.
455 **/
456static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
457 bool offset_loaded, u64 *offset, u64 *stat)
458{
459 u64 new_data;
460
461 if (hw->device_id == I40E_QEMU_DEVICE_ID) {
462 new_data = rd32(hw, loreg);
463 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
464 } else {
465 new_data = rd64(hw, loreg);
466 }
467 if (!offset_loaded)
468 *offset = new_data;
469 if (likely(new_data >= *offset))
470 *stat = new_data - *offset;
471 else
472 *stat = (new_data + ((u64)1 << 48)) - *offset;
473 *stat &= 0xFFFFFFFFFFFFULL;
474}
475
476/**
477 * i40e_stat_update32 - read and update a 32 bit stat from the chip
478 * @hw: ptr to the hardware info
479 * @reg: the hw reg to read
480 * @offset_loaded: has the initial offset been loaded yet
481 * @offset: ptr to current offset value
482 * @stat: ptr to the stat
483 **/
484static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
485 bool offset_loaded, u64 *offset, u64 *stat)
486{
487 u32 new_data;
488
489 new_data = rd32(hw, reg);
490 if (!offset_loaded)
491 *offset = new_data;
492 if (likely(new_data >= *offset))
493 *stat = (u32)(new_data - *offset);
494 else
495 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
496}
497
498/**
499 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
500 * @vsi: the VSI to be updated
501 **/
502void i40e_update_eth_stats(struct i40e_vsi *vsi)
503{
504 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
505 struct i40e_pf *pf = vsi->back;
506 struct i40e_hw *hw = &pf->hw;
507 struct i40e_eth_stats *oes;
508 struct i40e_eth_stats *es; /* device's eth stats */
509
510 es = &vsi->eth_stats;
511 oes = &vsi->eth_stats_offsets;
512
513 /* Gather up the stats that the hw collects */
514 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
515 vsi->stat_offsets_loaded,
516 &oes->tx_errors, &es->tx_errors);
517 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
518 vsi->stat_offsets_loaded,
519 &oes->rx_discards, &es->rx_discards);
520
521 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
522 I40E_GLV_GORCL(stat_idx),
523 vsi->stat_offsets_loaded,
524 &oes->rx_bytes, &es->rx_bytes);
525 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
526 I40E_GLV_UPRCL(stat_idx),
527 vsi->stat_offsets_loaded,
528 &oes->rx_unicast, &es->rx_unicast);
529 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
530 I40E_GLV_MPRCL(stat_idx),
531 vsi->stat_offsets_loaded,
532 &oes->rx_multicast, &es->rx_multicast);
533 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
534 I40E_GLV_BPRCL(stat_idx),
535 vsi->stat_offsets_loaded,
536 &oes->rx_broadcast, &es->rx_broadcast);
537
538 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
539 I40E_GLV_GOTCL(stat_idx),
540 vsi->stat_offsets_loaded,
541 &oes->tx_bytes, &es->tx_bytes);
542 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
543 I40E_GLV_UPTCL(stat_idx),
544 vsi->stat_offsets_loaded,
545 &oes->tx_unicast, &es->tx_unicast);
546 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
547 I40E_GLV_MPTCL(stat_idx),
548 vsi->stat_offsets_loaded,
549 &oes->tx_multicast, &es->tx_multicast);
550 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
551 I40E_GLV_BPTCL(stat_idx),
552 vsi->stat_offsets_loaded,
553 &oes->tx_broadcast, &es->tx_broadcast);
554 vsi->stat_offsets_loaded = true;
555}
556
557/**
558 * i40e_update_veb_stats - Update Switch component statistics
559 * @veb: the VEB being updated
560 **/
561static void i40e_update_veb_stats(struct i40e_veb *veb)
562{
563 struct i40e_pf *pf = veb->pf;
564 struct i40e_hw *hw = &pf->hw;
565 struct i40e_eth_stats *oes;
566 struct i40e_eth_stats *es; /* device's eth stats */
567 int idx = 0;
568
569 idx = veb->stats_idx;
570 es = &veb->stats;
571 oes = &veb->stats_offsets;
572
573 /* Gather up the stats that the hw collects */
574 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
575 veb->stat_offsets_loaded,
576 &oes->tx_discards, &es->tx_discards);
577 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
578 veb->stat_offsets_loaded,
579 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
580
581 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
582 veb->stat_offsets_loaded,
583 &oes->rx_bytes, &es->rx_bytes);
584 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
585 veb->stat_offsets_loaded,
586 &oes->rx_unicast, &es->rx_unicast);
587 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
588 veb->stat_offsets_loaded,
589 &oes->rx_multicast, &es->rx_multicast);
590 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
591 veb->stat_offsets_loaded,
592 &oes->rx_broadcast, &es->rx_broadcast);
593
594 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
595 veb->stat_offsets_loaded,
596 &oes->tx_bytes, &es->tx_bytes);
597 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
598 veb->stat_offsets_loaded,
599 &oes->tx_unicast, &es->tx_unicast);
600 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
601 veb->stat_offsets_loaded,
602 &oes->tx_multicast, &es->tx_multicast);
603 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
604 veb->stat_offsets_loaded,
605 &oes->tx_broadcast, &es->tx_broadcast);
606 veb->stat_offsets_loaded = true;
607}
608
609/**
610 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
611 * @pf: the corresponding PF
612 *
613 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
614 **/
615static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
616{
617 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
618 struct i40e_hw_port_stats *nsd = &pf->stats;
619 struct i40e_hw *hw = &pf->hw;
620 u64 xoff = 0;
621 u16 i, v;
622
623 if ((hw->fc.current_mode != I40E_FC_FULL) &&
624 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
625 return;
626
627 xoff = nsd->link_xoff_rx;
628 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
629 pf->stat_offsets_loaded,
630 &osd->link_xoff_rx, &nsd->link_xoff_rx);
631
632 /* No new LFC xoff rx */
633 if (!(nsd->link_xoff_rx - xoff))
634 return;
635
636 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
637 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
638 struct i40e_vsi *vsi = pf->vsi[v];
639
640 if (!vsi)
641 continue;
642
643 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 644 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
645 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
646 }
647 }
648}
649
650/**
651 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
652 * @pf: the corresponding PF
653 *
654 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
655 **/
656static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
657{
658 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
659 struct i40e_hw_port_stats *nsd = &pf->stats;
660 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
661 struct i40e_dcbx_config *dcb_cfg;
662 struct i40e_hw *hw = &pf->hw;
663 u16 i, v;
664 u8 tc;
665
666 dcb_cfg = &hw->local_dcbx_config;
667
668 /* See if DCB enabled with PFC TC */
669 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
670 !(dcb_cfg->pfc.pfcenable)) {
671 i40e_update_link_xoff_rx(pf);
672 return;
673 }
674
675 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
676 u64 prio_xoff = nsd->priority_xoff_rx[i];
677 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
678 pf->stat_offsets_loaded,
679 &osd->priority_xoff_rx[i],
680 &nsd->priority_xoff_rx[i]);
681
682 /* No new PFC xoff rx */
683 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
684 continue;
685 /* Get the TC for given priority */
686 tc = dcb_cfg->etscfg.prioritytable[i];
687 xoff[tc] = true;
688 }
689
690 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
691 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
692 struct i40e_vsi *vsi = pf->vsi[v];
693
694 if (!vsi)
695 continue;
696
697 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 698 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
699
700 tc = ring->dcb_tc;
701 if (xoff[tc])
702 clear_bit(__I40E_HANG_CHECK_ARMED,
703 &ring->state);
704 }
705 }
706}
707
708/**
709 * i40e_update_stats - Update the board statistics counters.
710 * @vsi: the VSI to be updated
711 *
712 * There are a few instances where we store the same stat in a
713 * couple of different structs. This is partly because we have
714 * the netdev stats that need to be filled out, which is slightly
715 * different from the "eth_stats" defined by the chip and used in
716 * VF communications. We sort it all out here in a central place.
717 **/
718void i40e_update_stats(struct i40e_vsi *vsi)
719{
720 struct i40e_pf *pf = vsi->back;
721 struct i40e_hw *hw = &pf->hw;
722 struct rtnl_link_stats64 *ons;
723 struct rtnl_link_stats64 *ns; /* netdev stats */
724 struct i40e_eth_stats *oes;
725 struct i40e_eth_stats *es; /* device's eth stats */
726 u32 tx_restart, tx_busy;
727 u32 rx_page, rx_buf;
728 u64 rx_p, rx_b;
729 u64 tx_p, tx_b;
730 int i;
731 u16 q;
732
733 if (test_bit(__I40E_DOWN, &vsi->state) ||
734 test_bit(__I40E_CONFIG_BUSY, &pf->state))
735 return;
736
737 ns = i40e_get_vsi_stats_struct(vsi);
738 ons = &vsi->net_stats_offsets;
739 es = &vsi->eth_stats;
740 oes = &vsi->eth_stats_offsets;
741
742 /* Gather up the netdev and vsi stats that the driver collects
743 * on the fly during packet processing
744 */
745 rx_b = rx_p = 0;
746 tx_b = tx_p = 0;
747 tx_restart = tx_busy = 0;
748 rx_page = 0;
749 rx_buf = 0;
980e9b11 750 rcu_read_lock();
41c445ff
JB
751 for (q = 0; q < vsi->num_queue_pairs; q++) {
752 struct i40e_ring *p;
980e9b11
AD
753 u64 bytes, packets;
754 unsigned int start;
755
756 /* locate Tx ring */
757 p = ACCESS_ONCE(vsi->tx_rings[q]);
758
759 do {
760 start = u64_stats_fetch_begin_bh(&p->syncp);
761 packets = p->stats.packets;
762 bytes = p->stats.bytes;
763 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
764 tx_b += bytes;
765 tx_p += packets;
766 tx_restart += p->tx_stats.restart_queue;
767 tx_busy += p->tx_stats.tx_busy;
41c445ff 768
980e9b11
AD
769 /* Rx queue is part of the same block as Tx queue */
770 p = &p[1];
771 do {
772 start = u64_stats_fetch_begin_bh(&p->syncp);
773 packets = p->stats.packets;
774 bytes = p->stats.bytes;
775 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
776 rx_b += bytes;
777 rx_p += packets;
41c445ff
JB
778 rx_buf += p->rx_stats.alloc_rx_buff_failed;
779 rx_page += p->rx_stats.alloc_rx_page_failed;
41c445ff 780 }
980e9b11 781 rcu_read_unlock();
41c445ff
JB
782 vsi->tx_restart = tx_restart;
783 vsi->tx_busy = tx_busy;
784 vsi->rx_page_failed = rx_page;
785 vsi->rx_buf_failed = rx_buf;
786
787 ns->rx_packets = rx_p;
788 ns->rx_bytes = rx_b;
789 ns->tx_packets = tx_p;
790 ns->tx_bytes = tx_b;
791
792 i40e_update_eth_stats(vsi);
793 /* update netdev stats from eth stats */
794 ons->rx_errors = oes->rx_errors;
795 ns->rx_errors = es->rx_errors;
796 ons->tx_errors = oes->tx_errors;
797 ns->tx_errors = es->tx_errors;
798 ons->multicast = oes->rx_multicast;
799 ns->multicast = es->rx_multicast;
800 ons->tx_dropped = oes->tx_discards;
801 ns->tx_dropped = es->tx_discards;
802
803 /* Get the port data only if this is the main PF VSI */
804 if (vsi == pf->vsi[pf->lan_vsi]) {
805 struct i40e_hw_port_stats *nsd = &pf->stats;
806 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
807
808 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
809 I40E_GLPRT_GORCL(hw->port),
810 pf->stat_offsets_loaded,
811 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
812 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
813 I40E_GLPRT_GOTCL(hw->port),
814 pf->stat_offsets_loaded,
815 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
816 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
817 pf->stat_offsets_loaded,
818 &osd->eth.rx_discards,
819 &nsd->eth.rx_discards);
820 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
821 pf->stat_offsets_loaded,
822 &osd->eth.tx_discards,
823 &nsd->eth.tx_discards);
824 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
825 I40E_GLPRT_MPRCL(hw->port),
826 pf->stat_offsets_loaded,
827 &osd->eth.rx_multicast,
828 &nsd->eth.rx_multicast);
829
830 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
831 pf->stat_offsets_loaded,
832 &osd->tx_dropped_link_down,
833 &nsd->tx_dropped_link_down);
834
835 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
836 pf->stat_offsets_loaded,
837 &osd->crc_errors, &nsd->crc_errors);
838 ns->rx_crc_errors = nsd->crc_errors;
839
840 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
841 pf->stat_offsets_loaded,
842 &osd->illegal_bytes, &nsd->illegal_bytes);
843 ns->rx_errors = nsd->crc_errors
844 + nsd->illegal_bytes;
845
846 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
847 pf->stat_offsets_loaded,
848 &osd->mac_local_faults,
849 &nsd->mac_local_faults);
850 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
851 pf->stat_offsets_loaded,
852 &osd->mac_remote_faults,
853 &nsd->mac_remote_faults);
854
855 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
856 pf->stat_offsets_loaded,
857 &osd->rx_length_errors,
858 &nsd->rx_length_errors);
859 ns->rx_length_errors = nsd->rx_length_errors;
860
861 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
862 pf->stat_offsets_loaded,
863 &osd->link_xon_rx, &nsd->link_xon_rx);
864 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
865 pf->stat_offsets_loaded,
866 &osd->link_xon_tx, &nsd->link_xon_tx);
867 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
868 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
869 pf->stat_offsets_loaded,
870 &osd->link_xoff_tx, &nsd->link_xoff_tx);
871
872 for (i = 0; i < 8; i++) {
873 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
874 pf->stat_offsets_loaded,
875 &osd->priority_xon_rx[i],
876 &nsd->priority_xon_rx[i]);
877 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
878 pf->stat_offsets_loaded,
879 &osd->priority_xon_tx[i],
880 &nsd->priority_xon_tx[i]);
881 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
882 pf->stat_offsets_loaded,
883 &osd->priority_xoff_tx[i],
884 &nsd->priority_xoff_tx[i]);
885 i40e_stat_update32(hw,
886 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
887 pf->stat_offsets_loaded,
888 &osd->priority_xon_2_xoff[i],
889 &nsd->priority_xon_2_xoff[i]);
890 }
891
892 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
893 I40E_GLPRT_PRC64L(hw->port),
894 pf->stat_offsets_loaded,
895 &osd->rx_size_64, &nsd->rx_size_64);
896 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
897 I40E_GLPRT_PRC127L(hw->port),
898 pf->stat_offsets_loaded,
899 &osd->rx_size_127, &nsd->rx_size_127);
900 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
901 I40E_GLPRT_PRC255L(hw->port),
902 pf->stat_offsets_loaded,
903 &osd->rx_size_255, &nsd->rx_size_255);
904 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
905 I40E_GLPRT_PRC511L(hw->port),
906 pf->stat_offsets_loaded,
907 &osd->rx_size_511, &nsd->rx_size_511);
908 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
909 I40E_GLPRT_PRC1023L(hw->port),
910 pf->stat_offsets_loaded,
911 &osd->rx_size_1023, &nsd->rx_size_1023);
912 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
913 I40E_GLPRT_PRC1522L(hw->port),
914 pf->stat_offsets_loaded,
915 &osd->rx_size_1522, &nsd->rx_size_1522);
916 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
917 I40E_GLPRT_PRC9522L(hw->port),
918 pf->stat_offsets_loaded,
919 &osd->rx_size_big, &nsd->rx_size_big);
920
921 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
922 I40E_GLPRT_PTC64L(hw->port),
923 pf->stat_offsets_loaded,
924 &osd->tx_size_64, &nsd->tx_size_64);
925 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
926 I40E_GLPRT_PTC127L(hw->port),
927 pf->stat_offsets_loaded,
928 &osd->tx_size_127, &nsd->tx_size_127);
929 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
930 I40E_GLPRT_PTC255L(hw->port),
931 pf->stat_offsets_loaded,
932 &osd->tx_size_255, &nsd->tx_size_255);
933 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
934 I40E_GLPRT_PTC511L(hw->port),
935 pf->stat_offsets_loaded,
936 &osd->tx_size_511, &nsd->tx_size_511);
937 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
938 I40E_GLPRT_PTC1023L(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->tx_size_1023, &nsd->tx_size_1023);
941 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
942 I40E_GLPRT_PTC1522L(hw->port),
943 pf->stat_offsets_loaded,
944 &osd->tx_size_1522, &nsd->tx_size_1522);
945 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
946 I40E_GLPRT_PTC9522L(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->tx_size_big, &nsd->tx_size_big);
949
950 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
951 pf->stat_offsets_loaded,
952 &osd->rx_undersize, &nsd->rx_undersize);
953 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
954 pf->stat_offsets_loaded,
955 &osd->rx_fragments, &nsd->rx_fragments);
956 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
957 pf->stat_offsets_loaded,
958 &osd->rx_oversize, &nsd->rx_oversize);
959 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
960 pf->stat_offsets_loaded,
961 &osd->rx_jabber, &nsd->rx_jabber);
962 }
963
964 pf->stat_offsets_loaded = true;
965}
966
967/**
968 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
969 * @vsi: the VSI to be searched
970 * @macaddr: the MAC address
971 * @vlan: the vlan
972 * @is_vf: make sure its a vf filter, else doesn't matter
973 * @is_netdev: make sure its a netdev filter, else doesn't matter
974 *
975 * Returns ptr to the filter object or NULL
976 **/
977static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
978 u8 *macaddr, s16 vlan,
979 bool is_vf, bool is_netdev)
980{
981 struct i40e_mac_filter *f;
982
983 if (!vsi || !macaddr)
984 return NULL;
985
986 list_for_each_entry(f, &vsi->mac_filter_list, list) {
987 if ((ether_addr_equal(macaddr, f->macaddr)) &&
988 (vlan == f->vlan) &&
989 (!is_vf || f->is_vf) &&
990 (!is_netdev || f->is_netdev))
991 return f;
992 }
993 return NULL;
994}
995
996/**
997 * i40e_find_mac - Find a mac addr in the macvlan filters list
998 * @vsi: the VSI to be searched
999 * @macaddr: the MAC address we are searching for
1000 * @is_vf: make sure its a vf filter, else doesn't matter
1001 * @is_netdev: make sure its a netdev filter, else doesn't matter
1002 *
1003 * Returns the first filter with the provided MAC address or NULL if
1004 * MAC address was not found
1005 **/
1006struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1007 bool is_vf, bool is_netdev)
1008{
1009 struct i40e_mac_filter *f;
1010
1011 if (!vsi || !macaddr)
1012 return NULL;
1013
1014 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1015 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1016 (!is_vf || f->is_vf) &&
1017 (!is_netdev || f->is_netdev))
1018 return f;
1019 }
1020 return NULL;
1021}
1022
1023/**
1024 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1025 * @vsi: the VSI to be searched
1026 *
1027 * Returns true if VSI is in vlan mode or false otherwise
1028 **/
1029bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1030{
1031 struct i40e_mac_filter *f;
1032
1033 /* Only -1 for all the filters denotes not in vlan mode
1034 * so we have to go through all the list in order to make sure
1035 */
1036 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1037 if (f->vlan >= 0)
1038 return true;
1039 }
1040
1041 return false;
1042}
1043
1044/**
1045 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1046 * @vsi: the VSI to be searched
1047 * @macaddr: the mac address to be filtered
1048 * @is_vf: true if it is a vf
1049 * @is_netdev: true if it is a netdev
1050 *
1051 * Goes through all the macvlan filters and adds a
1052 * macvlan filter for each unique vlan that already exists
1053 *
1054 * Returns first filter found on success, else NULL
1055 **/
1056struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1057 bool is_vf, bool is_netdev)
1058{
1059 struct i40e_mac_filter *f;
1060
1061 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1062 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1063 is_vf, is_netdev)) {
1064 if (!i40e_add_filter(vsi, macaddr, f->vlan,
1065 is_vf, is_netdev))
1066 return NULL;
1067 }
1068 }
1069
1070 return list_first_entry_or_null(&vsi->mac_filter_list,
1071 struct i40e_mac_filter, list);
1072}
1073
1074/**
1075 * i40e_add_filter - Add a mac/vlan filter to the VSI
1076 * @vsi: the VSI to be searched
1077 * @macaddr: the MAC address
1078 * @vlan: the vlan
1079 * @is_vf: make sure its a vf filter, else doesn't matter
1080 * @is_netdev: make sure its a netdev filter, else doesn't matter
1081 *
1082 * Returns ptr to the filter object or NULL when no memory available.
1083 **/
1084struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1085 u8 *macaddr, s16 vlan,
1086 bool is_vf, bool is_netdev)
1087{
1088 struct i40e_mac_filter *f;
1089
1090 if (!vsi || !macaddr)
1091 return NULL;
1092
1093 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1094 if (!f) {
1095 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1096 if (!f)
1097 goto add_filter_out;
1098
1099 memcpy(f->macaddr, macaddr, ETH_ALEN);
1100 f->vlan = vlan;
1101 f->changed = true;
1102
1103 INIT_LIST_HEAD(&f->list);
1104 list_add(&f->list, &vsi->mac_filter_list);
1105 }
1106
1107 /* increment counter and add a new flag if needed */
1108 if (is_vf) {
1109 if (!f->is_vf) {
1110 f->is_vf = true;
1111 f->counter++;
1112 }
1113 } else if (is_netdev) {
1114 if (!f->is_netdev) {
1115 f->is_netdev = true;
1116 f->counter++;
1117 }
1118 } else {
1119 f->counter++;
1120 }
1121
1122 /* changed tells sync_filters_subtask to
1123 * push the filter down to the firmware
1124 */
1125 if (f->changed) {
1126 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1127 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1128 }
1129
1130add_filter_out:
1131 return f;
1132}
1133
1134/**
1135 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1136 * @vsi: the VSI to be searched
1137 * @macaddr: the MAC address
1138 * @vlan: the vlan
1139 * @is_vf: make sure it's a vf filter, else doesn't matter
1140 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1141 **/
1142void i40e_del_filter(struct i40e_vsi *vsi,
1143 u8 *macaddr, s16 vlan,
1144 bool is_vf, bool is_netdev)
1145{
1146 struct i40e_mac_filter *f;
1147
1148 if (!vsi || !macaddr)
1149 return;
1150
1151 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1152 if (!f || f->counter == 0)
1153 return;
1154
1155 if (is_vf) {
1156 if (f->is_vf) {
1157 f->is_vf = false;
1158 f->counter--;
1159 }
1160 } else if (is_netdev) {
1161 if (f->is_netdev) {
1162 f->is_netdev = false;
1163 f->counter--;
1164 }
1165 } else {
1166 /* make sure we don't remove a filter in use by vf or netdev */
1167 int min_f = 0;
1168 min_f += (f->is_vf ? 1 : 0);
1169 min_f += (f->is_netdev ? 1 : 0);
1170
1171 if (f->counter > min_f)
1172 f->counter--;
1173 }
1174
1175 /* counter == 0 tells sync_filters_subtask to
1176 * remove the filter from the firmware's list
1177 */
1178 if (f->counter == 0) {
1179 f->changed = true;
1180 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1181 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1182 }
1183}
1184
1185/**
1186 * i40e_set_mac - NDO callback to set mac address
1187 * @netdev: network interface device structure
1188 * @p: pointer to an address structure
1189 *
1190 * Returns 0 on success, negative on failure
1191 **/
1192static int i40e_set_mac(struct net_device *netdev, void *p)
1193{
1194 struct i40e_netdev_priv *np = netdev_priv(netdev);
1195 struct i40e_vsi *vsi = np->vsi;
1196 struct sockaddr *addr = p;
1197 struct i40e_mac_filter *f;
1198
1199 if (!is_valid_ether_addr(addr->sa_data))
1200 return -EADDRNOTAVAIL;
1201
1202 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
1203
1204 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
1205 return 0;
1206
1207 if (vsi->type == I40E_VSI_MAIN) {
1208 i40e_status ret;
1209 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1210 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1211 addr->sa_data, NULL);
1212 if (ret) {
1213 netdev_info(netdev,
1214 "Addr change for Main VSI failed: %d\n",
1215 ret);
1216 return -EADDRNOTAVAIL;
1217 }
1218
1219 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
1220 }
1221
1222 /* In order to be sure to not drop any packets, add the new address
1223 * then delete the old one.
1224 */
1225 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
1226 if (!f)
1227 return -ENOMEM;
1228
1229 i40e_sync_vsi_filters(vsi);
1230 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1231 i40e_sync_vsi_filters(vsi);
1232
1233 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1234
1235 return 0;
1236}
1237
1238/**
1239 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1240 * @vsi: the VSI being setup
1241 * @ctxt: VSI context structure
1242 * @enabled_tc: Enabled TCs bitmap
1243 * @is_add: True if called before Add VSI
1244 *
1245 * Setup VSI queue mapping for enabled traffic classes.
1246 **/
1247static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1248 struct i40e_vsi_context *ctxt,
1249 u8 enabled_tc,
1250 bool is_add)
1251{
1252 struct i40e_pf *pf = vsi->back;
1253 u16 sections = 0;
1254 u8 netdev_tc = 0;
1255 u16 numtc = 0;
1256 u16 qcount;
1257 u8 offset;
1258 u16 qmap;
1259 int i;
1260
1261 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1262 offset = 0;
1263
1264 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1265 /* Find numtc from enabled TC bitmap */
1266 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1267 if (enabled_tc & (1 << i)) /* TC is enabled */
1268 numtc++;
1269 }
1270 if (!numtc) {
1271 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1272 numtc = 1;
1273 }
1274 } else {
1275 /* At least TC0 is enabled in case of non-DCB case */
1276 numtc = 1;
1277 }
1278
1279 vsi->tc_config.numtc = numtc;
1280 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1281
1282 /* Setup queue offset/count for all TCs for given VSI */
1283 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1284 /* See if the given TC is enabled for the given VSI */
1285 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1286 int pow, num_qps;
1287
1288 vsi->tc_config.tc_info[i].qoffset = offset;
1289 switch (vsi->type) {
1290 case I40E_VSI_MAIN:
1291 if (i == 0)
1292 qcount = pf->rss_size;
1293 else
1294 qcount = pf->num_tc_qps;
1295 vsi->tc_config.tc_info[i].qcount = qcount;
1296 break;
1297 case I40E_VSI_FDIR:
1298 case I40E_VSI_SRIOV:
1299 case I40E_VSI_VMDQ2:
1300 default:
1301 qcount = vsi->alloc_queue_pairs;
1302 vsi->tc_config.tc_info[i].qcount = qcount;
1303 WARN_ON(i != 0);
1304 break;
1305 }
1306
1307 /* find the power-of-2 of the number of queue pairs */
1308 num_qps = vsi->tc_config.tc_info[i].qcount;
1309 pow = 0;
1310 while (num_qps &&
1311 ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
1312 pow++;
1313 num_qps >>= 1;
1314 }
1315
1316 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1317 qmap =
1318 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1319 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1320
1321 offset += vsi->tc_config.tc_info[i].qcount;
1322 } else {
1323 /* TC is not enabled so set the offset to
1324 * default queue and allocate one queue
1325 * for the given TC.
1326 */
1327 vsi->tc_config.tc_info[i].qoffset = 0;
1328 vsi->tc_config.tc_info[i].qcount = 1;
1329 vsi->tc_config.tc_info[i].netdev_tc = 0;
1330
1331 qmap = 0;
1332 }
1333 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1334 }
1335
1336 /* Set actual Tx/Rx queue pairs */
1337 vsi->num_queue_pairs = offset;
1338
1339 /* Scheduler section valid can only be set for ADD VSI */
1340 if (is_add) {
1341 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1342
1343 ctxt->info.up_enable_bits = enabled_tc;
1344 }
1345 if (vsi->type == I40E_VSI_SRIOV) {
1346 ctxt->info.mapping_flags |=
1347 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1348 for (i = 0; i < vsi->num_queue_pairs; i++)
1349 ctxt->info.queue_mapping[i] =
1350 cpu_to_le16(vsi->base_queue + i);
1351 } else {
1352 ctxt->info.mapping_flags |=
1353 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1354 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1355 }
1356 ctxt->info.valid_sections |= cpu_to_le16(sections);
1357}
1358
1359/**
1360 * i40e_set_rx_mode - NDO callback to set the netdev filters
1361 * @netdev: network interface device structure
1362 **/
1363static void i40e_set_rx_mode(struct net_device *netdev)
1364{
1365 struct i40e_netdev_priv *np = netdev_priv(netdev);
1366 struct i40e_mac_filter *f, *ftmp;
1367 struct i40e_vsi *vsi = np->vsi;
1368 struct netdev_hw_addr *uca;
1369 struct netdev_hw_addr *mca;
1370 struct netdev_hw_addr *ha;
1371
1372 /* add addr if not already in the filter list */
1373 netdev_for_each_uc_addr(uca, netdev) {
1374 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1375 if (i40e_is_vsi_in_vlan(vsi))
1376 i40e_put_mac_in_vlan(vsi, uca->addr,
1377 false, true);
1378 else
1379 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1380 false, true);
1381 }
1382 }
1383
1384 netdev_for_each_mc_addr(mca, netdev) {
1385 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1386 if (i40e_is_vsi_in_vlan(vsi))
1387 i40e_put_mac_in_vlan(vsi, mca->addr,
1388 false, true);
1389 else
1390 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1391 false, true);
1392 }
1393 }
1394
1395 /* remove filter if not in netdev list */
1396 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1397 bool found = false;
1398
1399 if (!f->is_netdev)
1400 continue;
1401
1402 if (is_multicast_ether_addr(f->macaddr)) {
1403 netdev_for_each_mc_addr(mca, netdev) {
1404 if (ether_addr_equal(mca->addr, f->macaddr)) {
1405 found = true;
1406 break;
1407 }
1408 }
1409 } else {
1410 netdev_for_each_uc_addr(uca, netdev) {
1411 if (ether_addr_equal(uca->addr, f->macaddr)) {
1412 found = true;
1413 break;
1414 }
1415 }
1416
1417 for_each_dev_addr(netdev, ha) {
1418 if (ether_addr_equal(ha->addr, f->macaddr)) {
1419 found = true;
1420 break;
1421 }
1422 }
1423 }
1424 if (!found)
1425 i40e_del_filter(
1426 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1427 }
1428
1429 /* check for other flag changes */
1430 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1431 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1432 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1433 }
1434}
1435
1436/**
1437 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1438 * @vsi: ptr to the VSI
1439 *
1440 * Push any outstanding VSI filter changes through the AdminQ.
1441 *
1442 * Returns 0 or error value
1443 **/
1444int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1445{
1446 struct i40e_mac_filter *f, *ftmp;
1447 bool promisc_forced_on = false;
1448 bool add_happened = false;
1449 int filter_list_len = 0;
1450 u32 changed_flags = 0;
dcae29be 1451 i40e_status aq_ret = 0;
41c445ff
JB
1452 struct i40e_pf *pf;
1453 int num_add = 0;
1454 int num_del = 0;
1455 u16 cmd_flags;
1456
1457 /* empty array typed pointers, kcalloc later */
1458 struct i40e_aqc_add_macvlan_element_data *add_list;
1459 struct i40e_aqc_remove_macvlan_element_data *del_list;
1460
1461 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1462 usleep_range(1000, 2000);
1463 pf = vsi->back;
1464
1465 if (vsi->netdev) {
1466 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1467 vsi->current_netdev_flags = vsi->netdev->flags;
1468 }
1469
1470 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1471 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1472
1473 filter_list_len = pf->hw.aq.asq_buf_size /
1474 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1475 del_list = kcalloc(filter_list_len,
1476 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1477 GFP_KERNEL);
1478 if (!del_list)
1479 return -ENOMEM;
1480
1481 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1482 if (!f->changed)
1483 continue;
1484
1485 if (f->counter != 0)
1486 continue;
1487 f->changed = false;
1488 cmd_flags = 0;
1489
1490 /* add to delete list */
1491 memcpy(del_list[num_del].mac_addr,
1492 f->macaddr, ETH_ALEN);
1493 del_list[num_del].vlan_tag =
1494 cpu_to_le16((u16)(f->vlan ==
1495 I40E_VLAN_ANY ? 0 : f->vlan));
1496
1497 /* vlan0 as wild card to allow packets from all vlans */
1498 if (f->vlan == I40E_VLAN_ANY ||
1499 (vsi->netdev && !(vsi->netdev->features &
1500 NETIF_F_HW_VLAN_CTAG_FILTER)))
1501 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1502 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1503 del_list[num_del].flags = cmd_flags;
1504 num_del++;
1505
1506 /* unlink from filter list */
1507 list_del(&f->list);
1508 kfree(f);
1509
1510 /* flush a full buffer */
1511 if (num_del == filter_list_len) {
dcae29be 1512 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
41c445ff
JB
1513 vsi->seid, del_list, num_del,
1514 NULL);
1515 num_del = 0;
1516 memset(del_list, 0, sizeof(*del_list));
1517
dcae29be 1518 if (aq_ret)
41c445ff
JB
1519 dev_info(&pf->pdev->dev,
1520 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
dcae29be 1521 aq_ret,
41c445ff
JB
1522 pf->hw.aq.asq_last_status);
1523 }
1524 }
1525 if (num_del) {
dcae29be 1526 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
41c445ff
JB
1527 del_list, num_del, NULL);
1528 num_del = 0;
1529
dcae29be 1530 if (aq_ret)
41c445ff
JB
1531 dev_info(&pf->pdev->dev,
1532 "ignoring delete macvlan error, err %d, aq_err %d\n",
dcae29be 1533 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1534 }
1535
1536 kfree(del_list);
1537 del_list = NULL;
1538
1539 /* do all the adds now */
1540 filter_list_len = pf->hw.aq.asq_buf_size /
1541 sizeof(struct i40e_aqc_add_macvlan_element_data),
1542 add_list = kcalloc(filter_list_len,
1543 sizeof(struct i40e_aqc_add_macvlan_element_data),
1544 GFP_KERNEL);
1545 if (!add_list)
1546 return -ENOMEM;
1547
1548 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1549 if (!f->changed)
1550 continue;
1551
1552 if (f->counter == 0)
1553 continue;
1554 f->changed = false;
1555 add_happened = true;
1556 cmd_flags = 0;
1557
1558 /* add to add array */
1559 memcpy(add_list[num_add].mac_addr,
1560 f->macaddr, ETH_ALEN);
1561 add_list[num_add].vlan_tag =
1562 cpu_to_le16(
1563 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1564 add_list[num_add].queue_number = 0;
1565
1566 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1567
1568 /* vlan0 as wild card to allow packets from all vlans */
1569 if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
1570 !(vsi->netdev->features &
1571 NETIF_F_HW_VLAN_CTAG_FILTER)))
1572 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1573 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1574 num_add++;
1575
1576 /* flush a full buffer */
1577 if (num_add == filter_list_len) {
dcae29be
JB
1578 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1579 add_list, num_add,
1580 NULL);
41c445ff
JB
1581 num_add = 0;
1582
dcae29be 1583 if (aq_ret)
41c445ff
JB
1584 break;
1585 memset(add_list, 0, sizeof(*add_list));
1586 }
1587 }
1588 if (num_add) {
dcae29be
JB
1589 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1590 add_list, num_add, NULL);
41c445ff
JB
1591 num_add = 0;
1592 }
1593 kfree(add_list);
1594 add_list = NULL;
1595
dcae29be 1596 if (add_happened && (!aq_ret)) {
41c445ff 1597 /* do nothing */;
dcae29be 1598 } else if (add_happened && (aq_ret)) {
41c445ff
JB
1599 dev_info(&pf->pdev->dev,
1600 "add filter failed, err %d, aq_err %d\n",
dcae29be 1601 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1602 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1603 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1604 &vsi->state)) {
1605 promisc_forced_on = true;
1606 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1607 &vsi->state);
1608 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1609 }
1610 }
1611 }
1612
1613 /* check for changes in promiscuous modes */
1614 if (changed_flags & IFF_ALLMULTI) {
1615 bool cur_multipromisc;
1616 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
dcae29be
JB
1617 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1618 vsi->seid,
1619 cur_multipromisc,
1620 NULL);
1621 if (aq_ret)
41c445ff
JB
1622 dev_info(&pf->pdev->dev,
1623 "set multi promisc failed, err %d, aq_err %d\n",
dcae29be 1624 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1625 }
1626 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1627 bool cur_promisc;
1628 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1629 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1630 &vsi->state));
dcae29be
JB
1631 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1632 vsi->seid,
1633 cur_promisc, NULL);
1634 if (aq_ret)
41c445ff
JB
1635 dev_info(&pf->pdev->dev,
1636 "set uni promisc failed, err %d, aq_err %d\n",
dcae29be 1637 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1638 }
1639
1640 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1641 return 0;
1642}
1643
1644/**
1645 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1646 * @pf: board private structure
1647 **/
1648static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1649{
1650 int v;
1651
1652 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1653 return;
1654 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1655
1656 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
1657 if (pf->vsi[v] &&
1658 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1659 i40e_sync_vsi_filters(pf->vsi[v]);
1660 }
1661}
1662
1663/**
1664 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1665 * @netdev: network interface device structure
1666 * @new_mtu: new value for maximum frame size
1667 *
1668 * Returns 0 on success, negative on failure
1669 **/
1670static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1671{
1672 struct i40e_netdev_priv *np = netdev_priv(netdev);
1673 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1674 struct i40e_vsi *vsi = np->vsi;
1675
1676 /* MTU < 68 is an error and causes problems on some kernels */
1677 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1678 return -EINVAL;
1679
1680 netdev_info(netdev, "changing MTU from %d to %d\n",
1681 netdev->mtu, new_mtu);
1682 netdev->mtu = new_mtu;
1683 if (netif_running(netdev))
1684 i40e_vsi_reinit_locked(vsi);
1685
1686 return 0;
1687}
1688
1689/**
1690 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1691 * @vsi: the vsi being adjusted
1692 **/
1693void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1694{
1695 struct i40e_vsi_context ctxt;
1696 i40e_status ret;
1697
1698 if ((vsi->info.valid_sections &
1699 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1700 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1701 return; /* already enabled */
1702
1703 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1704 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1705 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1706
1707 ctxt.seid = vsi->seid;
1708 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1709 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1710 if (ret) {
1711 dev_info(&vsi->back->pdev->dev,
1712 "%s: update vsi failed, aq_err=%d\n",
1713 __func__, vsi->back->hw.aq.asq_last_status);
1714 }
1715}
1716
1717/**
1718 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1719 * @vsi: the vsi being adjusted
1720 **/
1721void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1722{
1723 struct i40e_vsi_context ctxt;
1724 i40e_status ret;
1725
1726 if ((vsi->info.valid_sections &
1727 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1728 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1729 I40E_AQ_VSI_PVLAN_EMOD_MASK))
1730 return; /* already disabled */
1731
1732 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1733 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1734 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1735
1736 ctxt.seid = vsi->seid;
1737 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1738 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1739 if (ret) {
1740 dev_info(&vsi->back->pdev->dev,
1741 "%s: update vsi failed, aq_err=%d\n",
1742 __func__, vsi->back->hw.aq.asq_last_status);
1743 }
1744}
1745
1746/**
1747 * i40e_vlan_rx_register - Setup or shutdown vlan offload
1748 * @netdev: network interface to be adjusted
1749 * @features: netdev features to test if VLAN offload is enabled or not
1750 **/
1751static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
1752{
1753 struct i40e_netdev_priv *np = netdev_priv(netdev);
1754 struct i40e_vsi *vsi = np->vsi;
1755
1756 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1757 i40e_vlan_stripping_enable(vsi);
1758 else
1759 i40e_vlan_stripping_disable(vsi);
1760}
1761
1762/**
1763 * i40e_vsi_add_vlan - Add vsi membership for given vlan
1764 * @vsi: the vsi being configured
1765 * @vid: vlan id to be added (0 = untagged only , -1 = any)
1766 **/
1767int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1768{
1769 struct i40e_mac_filter *f, *add_f;
1770 bool is_netdev, is_vf;
1771 int ret;
1772
1773 is_vf = (vsi->type == I40E_VSI_SRIOV);
1774 is_netdev = !!(vsi->netdev);
1775
1776 if (is_netdev) {
1777 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
1778 is_vf, is_netdev);
1779 if (!add_f) {
1780 dev_info(&vsi->back->pdev->dev,
1781 "Could not add vlan filter %d for %pM\n",
1782 vid, vsi->netdev->dev_addr);
1783 return -ENOMEM;
1784 }
1785 }
1786
1787 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1788 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1789 if (!add_f) {
1790 dev_info(&vsi->back->pdev->dev,
1791 "Could not add vlan filter %d for %pM\n",
1792 vid, f->macaddr);
1793 return -ENOMEM;
1794 }
1795 }
1796
1797 ret = i40e_sync_vsi_filters(vsi);
1798 if (ret) {
1799 dev_info(&vsi->back->pdev->dev,
1800 "Could not sync filters for vid %d\n", vid);
1801 return ret;
1802 }
1803
1804 /* Now if we add a vlan tag, make sure to check if it is the first
1805 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
1806 * with 0, so we now accept untagged and specified tagged traffic
1807 * (and not any taged and untagged)
1808 */
1809 if (vid > 0) {
1810 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
1811 I40E_VLAN_ANY,
1812 is_vf, is_netdev)) {
1813 i40e_del_filter(vsi, vsi->netdev->dev_addr,
1814 I40E_VLAN_ANY, is_vf, is_netdev);
1815 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
1816 is_vf, is_netdev);
1817 if (!add_f) {
1818 dev_info(&vsi->back->pdev->dev,
1819 "Could not add filter 0 for %pM\n",
1820 vsi->netdev->dev_addr);
1821 return -ENOMEM;
1822 }
1823 }
1824
1825 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1826 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1827 is_vf, is_netdev)) {
1828 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1829 is_vf, is_netdev);
1830 add_f = i40e_add_filter(vsi, f->macaddr,
1831 0, is_vf, is_netdev);
1832 if (!add_f) {
1833 dev_info(&vsi->back->pdev->dev,
1834 "Could not add filter 0 for %pM\n",
1835 f->macaddr);
1836 return -ENOMEM;
1837 }
1838 }
1839 }
1840 ret = i40e_sync_vsi_filters(vsi);
1841 }
1842
1843 return ret;
1844}
1845
1846/**
1847 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1848 * @vsi: the vsi being configured
1849 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
078b5876
JB
1850 *
1851 * Return: 0 on success or negative otherwise
41c445ff
JB
1852 **/
1853int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1854{
1855 struct net_device *netdev = vsi->netdev;
1856 struct i40e_mac_filter *f, *add_f;
1857 bool is_vf, is_netdev;
1858 int filter_count = 0;
1859 int ret;
1860
1861 is_vf = (vsi->type == I40E_VSI_SRIOV);
1862 is_netdev = !!(netdev);
1863
1864 if (is_netdev)
1865 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
1866
1867 list_for_each_entry(f, &vsi->mac_filter_list, list)
1868 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1869
1870 ret = i40e_sync_vsi_filters(vsi);
1871 if (ret) {
1872 dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
1873 return ret;
1874 }
1875
1876 /* go through all the filters for this VSI and if there is only
1877 * vid == 0 it means there are no other filters, so vid 0 must
1878 * be replaced with -1. This signifies that we should from now
1879 * on accept any traffic (with any tag present, or untagged)
1880 */
1881 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1882 if (is_netdev) {
1883 if (f->vlan &&
1884 ether_addr_equal(netdev->dev_addr, f->macaddr))
1885 filter_count++;
1886 }
1887
1888 if (f->vlan)
1889 filter_count++;
1890 }
1891
1892 if (!filter_count && is_netdev) {
1893 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
1894 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1895 is_vf, is_netdev);
1896 if (!f) {
1897 dev_info(&vsi->back->pdev->dev,
1898 "Could not add filter %d for %pM\n",
1899 I40E_VLAN_ANY, netdev->dev_addr);
1900 return -ENOMEM;
1901 }
1902 }
1903
1904 if (!filter_count) {
1905 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1906 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
1907 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1908 is_vf, is_netdev);
1909 if (!add_f) {
1910 dev_info(&vsi->back->pdev->dev,
1911 "Could not add filter %d for %pM\n",
1912 I40E_VLAN_ANY, f->macaddr);
1913 return -ENOMEM;
1914 }
1915 }
1916 }
1917
1918 return i40e_sync_vsi_filters(vsi);
1919}
1920
1921/**
1922 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1923 * @netdev: network interface to be adjusted
1924 * @vid: vlan id to be added
078b5876
JB
1925 *
1926 * net_device_ops implementation for adding vlan ids
41c445ff
JB
1927 **/
1928static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1929 __always_unused __be16 proto, u16 vid)
1930{
1931 struct i40e_netdev_priv *np = netdev_priv(netdev);
1932 struct i40e_vsi *vsi = np->vsi;
078b5876 1933 int ret = 0;
41c445ff
JB
1934
1935 if (vid > 4095)
078b5876
JB
1936 return -EINVAL;
1937
1938 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
41c445ff 1939
41c445ff
JB
1940 /* If the network stack called us with vid = 0, we should
1941 * indicate to i40e_vsi_add_vlan() that we want to receive
1942 * any traffic (i.e. with any vlan tag, or untagged)
1943 */
1944 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
1945
078b5876
JB
1946 if (!ret && (vid < VLAN_N_VID))
1947 set_bit(vid, vsi->active_vlans);
41c445ff 1948
078b5876 1949 return ret;
41c445ff
JB
1950}
1951
1952/**
1953 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
1954 * @netdev: network interface to be adjusted
1955 * @vid: vlan id to be removed
078b5876
JB
1956 *
1957 * net_device_ops implementation for adding vlan ids
41c445ff
JB
1958 **/
1959static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1960 __always_unused __be16 proto, u16 vid)
1961{
1962 struct i40e_netdev_priv *np = netdev_priv(netdev);
1963 struct i40e_vsi *vsi = np->vsi;
1964
078b5876
JB
1965 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
1966
41c445ff
JB
1967 /* return code is ignored as there is nothing a user
1968 * can do about failure to remove and a log message was
078b5876 1969 * already printed from the other function
41c445ff
JB
1970 */
1971 i40e_vsi_kill_vlan(vsi, vid);
1972
1973 clear_bit(vid, vsi->active_vlans);
078b5876 1974
41c445ff
JB
1975 return 0;
1976}
1977
1978/**
1979 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
1980 * @vsi: the vsi being brought back up
1981 **/
1982static void i40e_restore_vlan(struct i40e_vsi *vsi)
1983{
1984 u16 vid;
1985
1986 if (!vsi->netdev)
1987 return;
1988
1989 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
1990
1991 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
1992 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
1993 vid);
1994}
1995
1996/**
1997 * i40e_vsi_add_pvid - Add pvid for the VSI
1998 * @vsi: the vsi being adjusted
1999 * @vid: the vlan id to set as a PVID
2000 **/
dcae29be 2001int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
41c445ff
JB
2002{
2003 struct i40e_vsi_context ctxt;
dcae29be 2004 i40e_status aq_ret;
41c445ff
JB
2005
2006 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2007 vsi->info.pvid = cpu_to_le16(vid);
2008 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
2009 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2010
2011 ctxt.seid = vsi->seid;
2012 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
dcae29be
JB
2013 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2014 if (aq_ret) {
41c445ff
JB
2015 dev_info(&vsi->back->pdev->dev,
2016 "%s: update vsi failed, aq_err=%d\n",
2017 __func__, vsi->back->hw.aq.asq_last_status);
dcae29be 2018 return -ENOENT;
41c445ff
JB
2019 }
2020
dcae29be 2021 return 0;
41c445ff
JB
2022}
2023
2024/**
2025 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2026 * @vsi: the vsi being adjusted
2027 *
2028 * Just use the vlan_rx_register() service to put it back to normal
2029 **/
2030void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2031{
2032 vsi->info.pvid = 0;
2033 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2034}
2035
2036/**
2037 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2038 * @vsi: ptr to the VSI
2039 *
2040 * If this function returns with an error, then it's possible one or
2041 * more of the rings is populated (while the rest are not). It is the
2042 * callers duty to clean those orphaned rings.
2043 *
2044 * Return 0 on success, negative on failure
2045 **/
2046static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2047{
2048 int i, err = 0;
2049
2050 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2051 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
41c445ff
JB
2052
2053 return err;
2054}
2055
2056/**
2057 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2058 * @vsi: ptr to the VSI
2059 *
2060 * Free VSI's transmit software resources
2061 **/
2062static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2063{
2064 int i;
2065
2066 for (i = 0; i < vsi->num_queue_pairs; i++)
9f65e15b
AD
2067 if (vsi->tx_rings[i]->desc)
2068 i40e_free_tx_resources(vsi->tx_rings[i]);
41c445ff
JB
2069}
2070
2071/**
2072 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2073 * @vsi: ptr to the VSI
2074 *
2075 * If this function returns with an error, then it's possible one or
2076 * more of the rings is populated (while the rest are not). It is the
2077 * callers duty to clean those orphaned rings.
2078 *
2079 * Return 0 on success, negative on failure
2080 **/
2081static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2082{
2083 int i, err = 0;
2084
2085 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2086 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
41c445ff
JB
2087 return err;
2088}
2089
2090/**
2091 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2092 * @vsi: ptr to the VSI
2093 *
2094 * Free all receive software resources
2095 **/
2096static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2097{
2098 int i;
2099
2100 for (i = 0; i < vsi->num_queue_pairs; i++)
9f65e15b
AD
2101 if (vsi->rx_rings[i]->desc)
2102 i40e_free_rx_resources(vsi->rx_rings[i]);
41c445ff
JB
2103}
2104
2105/**
2106 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2107 * @ring: The Tx ring to configure
2108 *
2109 * Configure the Tx descriptor ring in the HMC context.
2110 **/
2111static int i40e_configure_tx_ring(struct i40e_ring *ring)
2112{
2113 struct i40e_vsi *vsi = ring->vsi;
2114 u16 pf_q = vsi->base_queue + ring->queue_index;
2115 struct i40e_hw *hw = &vsi->back->hw;
2116 struct i40e_hmc_obj_txq tx_ctx;
2117 i40e_status err = 0;
2118 u32 qtx_ctl = 0;
2119
2120 /* some ATR related tx ring init */
2121 if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
2122 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2123 ring->atr_count = 0;
2124 } else {
2125 ring->atr_sample_rate = 0;
2126 }
2127
2128 /* initialize XPS */
2129 if (ring->q_vector && ring->netdev &&
2130 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2131 netif_set_xps_queue(ring->netdev,
2132 &ring->q_vector->affinity_mask,
2133 ring->queue_index);
2134
2135 /* clear the context structure first */
2136 memset(&tx_ctx, 0, sizeof(tx_ctx));
2137
2138 tx_ctx.new_context = 1;
2139 tx_ctx.base = (ring->dma / 128);
2140 tx_ctx.qlen = ring->count;
2141 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
2142 I40E_FLAG_FDIR_ATR_ENABLED));
2143
2144 /* As part of VSI creation/update, FW allocates certain
2145 * Tx arbitration queue sets for each TC enabled for
2146 * the VSI. The FW returns the handles to these queue
2147 * sets as part of the response buffer to Add VSI,
2148 * Update VSI, etc. AQ commands. It is expected that
2149 * these queue set handles be associated with the Tx
2150 * queues by the driver as part of the TX queue context
2151 * initialization. This has to be done regardless of
2152 * DCB as by default everything is mapped to TC0.
2153 */
2154 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2155 tx_ctx.rdylist_act = 0;
2156
2157 /* clear the context in the HMC */
2158 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2159 if (err) {
2160 dev_info(&vsi->back->pdev->dev,
2161 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2162 ring->queue_index, pf_q, err);
2163 return -ENOMEM;
2164 }
2165
2166 /* set the context in the HMC */
2167 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2168 if (err) {
2169 dev_info(&vsi->back->pdev->dev,
2170 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2171 ring->queue_index, pf_q, err);
2172 return -ENOMEM;
2173 }
2174
2175 /* Now associate this queue with this PCI function */
2176 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
13fd9774
SN
2177 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2178 I40E_QTX_CTL_PF_INDX_MASK);
41c445ff
JB
2179 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2180 i40e_flush(hw);
2181
2182 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2183
2184 /* cache tail off for easier writes later */
2185 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2186
2187 return 0;
2188}
2189
2190/**
2191 * i40e_configure_rx_ring - Configure a receive ring context
2192 * @ring: The Rx ring to configure
2193 *
2194 * Configure the Rx descriptor ring in the HMC context.
2195 **/
2196static int i40e_configure_rx_ring(struct i40e_ring *ring)
2197{
2198 struct i40e_vsi *vsi = ring->vsi;
2199 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2200 u16 pf_q = vsi->base_queue + ring->queue_index;
2201 struct i40e_hw *hw = &vsi->back->hw;
2202 struct i40e_hmc_obj_rxq rx_ctx;
2203 i40e_status err = 0;
2204
2205 ring->state = 0;
2206
2207 /* clear the context structure first */
2208 memset(&rx_ctx, 0, sizeof(rx_ctx));
2209
2210 ring->rx_buf_len = vsi->rx_buf_len;
2211 ring->rx_hdr_len = vsi->rx_hdr_len;
2212
2213 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2214 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2215
2216 rx_ctx.base = (ring->dma / 128);
2217 rx_ctx.qlen = ring->count;
2218
2219 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2220 set_ring_16byte_desc_enabled(ring);
2221 rx_ctx.dsize = 0;
2222 } else {
2223 rx_ctx.dsize = 1;
2224 }
2225
2226 rx_ctx.dtype = vsi->dtype;
2227 if (vsi->dtype) {
2228 set_ring_ps_enabled(ring);
2229 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2230 I40E_RX_SPLIT_IP |
2231 I40E_RX_SPLIT_TCP_UDP |
2232 I40E_RX_SPLIT_SCTP;
2233 } else {
2234 rx_ctx.hsplit_0 = 0;
2235 }
2236
2237 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2238 (chain_len * ring->rx_buf_len));
2239 rx_ctx.tphrdesc_ena = 1;
2240 rx_ctx.tphwdesc_ena = 1;
2241 rx_ctx.tphdata_ena = 1;
2242 rx_ctx.tphhead_ena = 1;
2243 rx_ctx.lrxqthresh = 2;
2244 rx_ctx.crcstrip = 1;
2245 rx_ctx.l2tsel = 1;
2246 rx_ctx.showiv = 1;
2247
2248 /* clear the context in the HMC */
2249 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2250 if (err) {
2251 dev_info(&vsi->back->pdev->dev,
2252 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2253 ring->queue_index, pf_q, err);
2254 return -ENOMEM;
2255 }
2256
2257 /* set the context in the HMC */
2258 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2259 if (err) {
2260 dev_info(&vsi->back->pdev->dev,
2261 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2262 ring->queue_index, pf_q, err);
2263 return -ENOMEM;
2264 }
2265
2266 /* cache tail for quicker writes, and clear the reg before use */
2267 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2268 writel(0, ring->tail);
2269
2270 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2271
2272 return 0;
2273}
2274
2275/**
2276 * i40e_vsi_configure_tx - Configure the VSI for Tx
2277 * @vsi: VSI structure describing this set of rings and resources
2278 *
2279 * Configure the Tx VSI for operation.
2280 **/
2281static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2282{
2283 int err = 0;
2284 u16 i;
2285
9f65e15b
AD
2286 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2287 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
41c445ff
JB
2288
2289 return err;
2290}
2291
2292/**
2293 * i40e_vsi_configure_rx - Configure the VSI for Rx
2294 * @vsi: the VSI being configured
2295 *
2296 * Configure the Rx VSI for operation.
2297 **/
2298static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2299{
2300 int err = 0;
2301 u16 i;
2302
2303 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2304 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2305 + ETH_FCS_LEN + VLAN_HLEN;
2306 else
2307 vsi->max_frame = I40E_RXBUFFER_2048;
2308
2309 /* figure out correct receive buffer length */
2310 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2311 I40E_FLAG_RX_PS_ENABLED)) {
2312 case I40E_FLAG_RX_1BUF_ENABLED:
2313 vsi->rx_hdr_len = 0;
2314 vsi->rx_buf_len = vsi->max_frame;
2315 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2316 break;
2317 case I40E_FLAG_RX_PS_ENABLED:
2318 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2319 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2320 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2321 break;
2322 default:
2323 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2324 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2325 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2326 break;
2327 }
2328
2329 /* round up for the chip's needs */
2330 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2331 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2332 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2333 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2334
2335 /* set up individual rings */
2336 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2337 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
2338
2339 return err;
2340}
2341
2342/**
2343 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2344 * @vsi: ptr to the VSI
2345 **/
2346static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2347{
2348 u16 qoffset, qcount;
2349 int i, n;
2350
2351 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2352 return;
2353
2354 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2355 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2356 continue;
2357
2358 qoffset = vsi->tc_config.tc_info[n].qoffset;
2359 qcount = vsi->tc_config.tc_info[n].qcount;
2360 for (i = qoffset; i < (qoffset + qcount); i++) {
9f65e15b
AD
2361 struct i40e_ring *rx_ring = vsi->rx_rings[i];
2362 struct i40e_ring *tx_ring = vsi->tx_rings[i];
41c445ff
JB
2363 rx_ring->dcb_tc = n;
2364 tx_ring->dcb_tc = n;
2365 }
2366 }
2367}
2368
2369/**
2370 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2371 * @vsi: ptr to the VSI
2372 **/
2373static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2374{
2375 if (vsi->netdev)
2376 i40e_set_rx_mode(vsi->netdev);
2377}
2378
2379/**
2380 * i40e_vsi_configure - Set up the VSI for action
2381 * @vsi: the VSI being configured
2382 **/
2383static int i40e_vsi_configure(struct i40e_vsi *vsi)
2384{
2385 int err;
2386
2387 i40e_set_vsi_rx_mode(vsi);
2388 i40e_restore_vlan(vsi);
2389 i40e_vsi_config_dcb_rings(vsi);
2390 err = i40e_vsi_configure_tx(vsi);
2391 if (!err)
2392 err = i40e_vsi_configure_rx(vsi);
2393
2394 return err;
2395}
2396
2397/**
2398 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2399 * @vsi: the VSI being configured
2400 **/
2401static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2402{
2403 struct i40e_pf *pf = vsi->back;
2404 struct i40e_q_vector *q_vector;
2405 struct i40e_hw *hw = &pf->hw;
2406 u16 vector;
2407 int i, q;
2408 u32 val;
2409 u32 qp;
2410
2411 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2412 * and PFINT_LNKLSTn registers, e.g.:
2413 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2414 */
2415 qp = vsi->base_queue;
2416 vector = vsi->base_vector;
493fb300
AD
2417 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2418 q_vector = vsi->q_vectors[i];
41c445ff
JB
2419 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2420 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2421 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2422 q_vector->rx.itr);
2423 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2424 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2425 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2426 q_vector->tx.itr);
2427
2428 /* Linked list for the queuepairs assigned to this vector */
2429 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2430 for (q = 0; q < q_vector->num_ringpairs; q++) {
2431 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2432 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2433 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2434 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2435 (I40E_QUEUE_TYPE_TX
2436 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2437
2438 wr32(hw, I40E_QINT_RQCTL(qp), val);
2439
2440 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2441 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2442 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2443 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2444 (I40E_QUEUE_TYPE_RX
2445 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2446
2447 /* Terminate the linked list */
2448 if (q == (q_vector->num_ringpairs - 1))
2449 val |= (I40E_QUEUE_END_OF_LIST
2450 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2451
2452 wr32(hw, I40E_QINT_TQCTL(qp), val);
2453 qp++;
2454 }
2455 }
2456
2457 i40e_flush(hw);
2458}
2459
2460/**
2461 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2462 * @hw: ptr to the hardware info
2463 **/
2464static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2465{
2466 u32 val;
2467
2468 /* clear things first */
2469 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2470 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2471
2472 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2473 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2474 I40E_PFINT_ICR0_ENA_GRST_MASK |
2475 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2476 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2477 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
2478 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2479 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2480 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2481
2482 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2483
2484 /* SW_ITR_IDX = 0, but don't change INTENA */
2485 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2486 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2487
2488 /* OTHER_ITR_IDX = 0 */
2489 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2490}
2491
2492/**
2493 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2494 * @vsi: the VSI being configured
2495 **/
2496static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2497{
493fb300 2498 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
41c445ff
JB
2499 struct i40e_pf *pf = vsi->back;
2500 struct i40e_hw *hw = &pf->hw;
2501 u32 val;
2502
2503 /* set the ITR configuration */
2504 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2505 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2506 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2507 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2508 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2509 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2510
2511 i40e_enable_misc_int_causes(hw);
2512
2513 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2514 wr32(hw, I40E_PFINT_LNKLST0, 0);
2515
2516 /* Associate the queue pair to the vector and enable the q int */
2517 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2518 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2519 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2520
2521 wr32(hw, I40E_QINT_RQCTL(0), val);
2522
2523 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2524 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2525 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2526
2527 wr32(hw, I40E_QINT_TQCTL(0), val);
2528 i40e_flush(hw);
2529}
2530
2531/**
2532 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2533 * @pf: board private structure
2534 **/
116a57d4 2535void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
41c445ff
JB
2536{
2537 struct i40e_hw *hw = &pf->hw;
2538 u32 val;
2539
2540 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2541 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2542 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2543
2544 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2545 i40e_flush(hw);
2546}
2547
2548/**
2549 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2550 * @vsi: pointer to a vsi
2551 * @vector: enable a particular Hw Interrupt vector
2552 **/
2553void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2554{
2555 struct i40e_pf *pf = vsi->back;
2556 struct i40e_hw *hw = &pf->hw;
2557 u32 val;
2558
2559 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2560 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2561 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2562 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
1022cb6c 2563 /* skip the flush */
41c445ff
JB
2564}
2565
2566/**
2567 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2568 * @irq: interrupt number
2569 * @data: pointer to a q_vector
2570 **/
2571static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2572{
2573 struct i40e_q_vector *q_vector = data;
2574
cd0b6fa6 2575 if (!q_vector->tx.ring && !q_vector->rx.ring)
41c445ff
JB
2576 return IRQ_HANDLED;
2577
2578 napi_schedule(&q_vector->napi);
2579
2580 return IRQ_HANDLED;
2581}
2582
2583/**
2584 * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
2585 * @irq: interrupt number
2586 * @data: pointer to a q_vector
2587 **/
2588static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
2589{
2590 struct i40e_q_vector *q_vector = data;
2591
cd0b6fa6 2592 if (!q_vector->tx.ring && !q_vector->rx.ring)
41c445ff
JB
2593 return IRQ_HANDLED;
2594
2595 pr_info("fdir ring cleaning needed\n");
2596
2597 return IRQ_HANDLED;
2598}
2599
2600/**
2601 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2602 * @vsi: the VSI being configured
2603 * @basename: name for the vector
2604 *
2605 * Allocates MSI-X vectors and requests interrupts from the kernel.
2606 **/
2607static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2608{
2609 int q_vectors = vsi->num_q_vectors;
2610 struct i40e_pf *pf = vsi->back;
2611 int base = vsi->base_vector;
2612 int rx_int_idx = 0;
2613 int tx_int_idx = 0;
2614 int vector, err;
2615
2616 for (vector = 0; vector < q_vectors; vector++) {
493fb300 2617 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
41c445ff 2618
cd0b6fa6 2619 if (q_vector->tx.ring && q_vector->rx.ring) {
41c445ff
JB
2620 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2621 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2622 tx_int_idx++;
cd0b6fa6 2623 } else if (q_vector->rx.ring) {
41c445ff
JB
2624 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2625 "%s-%s-%d", basename, "rx", rx_int_idx++);
cd0b6fa6 2626 } else if (q_vector->tx.ring) {
41c445ff
JB
2627 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2628 "%s-%s-%d", basename, "tx", tx_int_idx++);
2629 } else {
2630 /* skip this unused q_vector */
2631 continue;
2632 }
2633 err = request_irq(pf->msix_entries[base + vector].vector,
2634 vsi->irq_handler,
2635 0,
2636 q_vector->name,
2637 q_vector);
2638 if (err) {
2639 dev_info(&pf->pdev->dev,
2640 "%s: request_irq failed, error: %d\n",
2641 __func__, err);
2642 goto free_queue_irqs;
2643 }
2644 /* assign the mask for this irq */
2645 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2646 &q_vector->affinity_mask);
2647 }
2648
2649 return 0;
2650
2651free_queue_irqs:
2652 while (vector) {
2653 vector--;
2654 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2655 NULL);
2656 free_irq(pf->msix_entries[base + vector].vector,
2657 &(vsi->q_vectors[vector]));
2658 }
2659 return err;
2660}
2661
2662/**
2663 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
2664 * @vsi: the VSI being un-configured
2665 **/
2666static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
2667{
2668 struct i40e_pf *pf = vsi->back;
2669 struct i40e_hw *hw = &pf->hw;
2670 int base = vsi->base_vector;
2671 int i;
2672
2673 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
2674 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
2675 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
41c445ff
JB
2676 }
2677
2678 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2679 for (i = vsi->base_vector;
2680 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2681 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
2682
2683 i40e_flush(hw);
2684 for (i = 0; i < vsi->num_q_vectors; i++)
2685 synchronize_irq(pf->msix_entries[i + base].vector);
2686 } else {
2687 /* Legacy and MSI mode - this stops all interrupt handling */
2688 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
2689 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
2690 i40e_flush(hw);
2691 synchronize_irq(pf->pdev->irq);
2692 }
2693}
2694
2695/**
2696 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
2697 * @vsi: the VSI being configured
2698 **/
2699static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2700{
2701 struct i40e_pf *pf = vsi->back;
2702 int i;
2703
2704 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2705 for (i = vsi->base_vector;
2706 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2707 i40e_irq_dynamic_enable(vsi, i);
2708 } else {
2709 i40e_irq_dynamic_enable_icr0(pf);
2710 }
2711
1022cb6c 2712 i40e_flush(&pf->hw);
41c445ff
JB
2713 return 0;
2714}
2715
2716/**
2717 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
2718 * @pf: board private structure
2719 **/
2720static void i40e_stop_misc_vector(struct i40e_pf *pf)
2721{
2722 /* Disable ICR 0 */
2723 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
2724 i40e_flush(&pf->hw);
2725}
2726
2727/**
2728 * i40e_intr - MSI/Legacy and non-queue interrupt handler
2729 * @irq: interrupt number
2730 * @data: pointer to a q_vector
2731 *
2732 * This is the handler used for all MSI/Legacy interrupts, and deals
2733 * with both queue and non-queue interrupts. This is also used in
2734 * MSIX mode to handle the non-queue interrupts.
2735 **/
2736static irqreturn_t i40e_intr(int irq, void *data)
2737{
2738 struct i40e_pf *pf = (struct i40e_pf *)data;
2739 struct i40e_hw *hw = &pf->hw;
2740 u32 icr0, icr0_remaining;
2741 u32 val, ena_mask;
2742
2743 icr0 = rd32(hw, I40E_PFINT_ICR0);
2744
41c445ff
JB
2745 val = rd32(hw, I40E_PFINT_DYN_CTL0);
2746 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
2747 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2748
116a57d4
SN
2749 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2750 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2751 return IRQ_NONE;
2752
41c445ff
JB
2753 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
2754
2755 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
2756 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
2757
2758 /* temporarily disable queue cause for NAPI processing */
2759 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
2760 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2761 wr32(hw, I40E_QINT_RQCTL(0), qval);
2762
2763 qval = rd32(hw, I40E_QINT_TQCTL(0));
2764 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2765 wr32(hw, I40E_QINT_TQCTL(0), qval);
41c445ff
JB
2766
2767 if (!test_bit(__I40E_DOWN, &pf->state))
493fb300 2768 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
41c445ff
JB
2769 }
2770
2771 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
2772 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2773 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
2774 }
2775
2776 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
2777 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2778 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
2779 }
2780
2781 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
2782 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
2783 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2784 }
2785
2786 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
2787 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
2788 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
2789 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
2790 val = rd32(hw, I40E_GLGEN_RSTAT);
2791 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
2792 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
2793 if (val & I40E_RESET_CORER)
2794 pf->corer_count++;
2795 else if (val & I40E_RESET_GLOBR)
2796 pf->globr_count++;
2797 else if (val & I40E_RESET_EMPR)
2798 pf->empr_count++;
2799 }
2800
2801 /* If a critical error is pending we have no choice but to reset the
2802 * device.
2803 * Report and mask out any remaining unexpected interrupts.
2804 */
2805 icr0_remaining = icr0 & ena_mask;
2806 if (icr0_remaining) {
2807 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
2808 icr0_remaining);
2809 if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
2810 (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
2811 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
2812 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
2813 (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
2814 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
2815 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
2816 } else {
2817 dev_info(&pf->pdev->dev, "device will be reset\n");
2818 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2819 i40e_service_event_schedule(pf);
2820 }
2821 }
2822 ena_mask &= ~icr0_remaining;
2823 }
2824
2825 /* re-enable interrupt causes */
2826 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
41c445ff
JB
2827 if (!test_bit(__I40E_DOWN, &pf->state)) {
2828 i40e_service_event_schedule(pf);
2829 i40e_irq_dynamic_enable_icr0(pf);
2830 }
2831
2832 return IRQ_HANDLED;
2833}
2834
2835/**
cd0b6fa6 2836 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
41c445ff
JB
2837 * @vsi: the VSI being configured
2838 * @v_idx: vector index
cd0b6fa6 2839 * @qp_idx: queue pair index
41c445ff 2840 **/
cd0b6fa6 2841static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
41c445ff 2842{
493fb300 2843 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
9f65e15b
AD
2844 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
2845 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
41c445ff
JB
2846
2847 tx_ring->q_vector = q_vector;
cd0b6fa6
AD
2848 tx_ring->next = q_vector->tx.ring;
2849 q_vector->tx.ring = tx_ring;
41c445ff 2850 q_vector->tx.count++;
cd0b6fa6
AD
2851
2852 rx_ring->q_vector = q_vector;
2853 rx_ring->next = q_vector->rx.ring;
2854 q_vector->rx.ring = rx_ring;
2855 q_vector->rx.count++;
41c445ff
JB
2856}
2857
2858/**
2859 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
2860 * @vsi: the VSI being configured
2861 *
2862 * This function maps descriptor rings to the queue-specific vectors
2863 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2864 * one vector per queue pair, but on a constrained vector budget, we
2865 * group the queue pairs as "efficiently" as possible.
2866 **/
2867static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
2868{
2869 int qp_remaining = vsi->num_queue_pairs;
2870 int q_vectors = vsi->num_q_vectors;
cd0b6fa6 2871 int num_ringpairs;
41c445ff
JB
2872 int v_start = 0;
2873 int qp_idx = 0;
2874
2875 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
2876 * group them so there are multiple queues per vector.
2877 */
2878 for (; v_start < q_vectors && qp_remaining; v_start++) {
cd0b6fa6
AD
2879 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
2880
2881 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
2882
2883 q_vector->num_ringpairs = num_ringpairs;
2884
2885 q_vector->rx.count = 0;
2886 q_vector->tx.count = 0;
2887 q_vector->rx.ring = NULL;
2888 q_vector->tx.ring = NULL;
2889
2890 while (num_ringpairs--) {
2891 map_vector_to_qp(vsi, v_start, qp_idx);
2892 qp_idx++;
2893 qp_remaining--;
41c445ff
JB
2894 }
2895 }
2896}
2897
2898/**
2899 * i40e_vsi_request_irq - Request IRQ from the OS
2900 * @vsi: the VSI being configured
2901 * @basename: name for the vector
2902 **/
2903static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
2904{
2905 struct i40e_pf *pf = vsi->back;
2906 int err;
2907
2908 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2909 err = i40e_vsi_request_irq_msix(vsi, basename);
2910 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
2911 err = request_irq(pf->pdev->irq, i40e_intr, 0,
2912 pf->misc_int_name, pf);
2913 else
2914 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
2915 pf->misc_int_name, pf);
2916
2917 if (err)
2918 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
2919
2920 return err;
2921}
2922
2923#ifdef CONFIG_NET_POLL_CONTROLLER
2924/**
2925 * i40e_netpoll - A Polling 'interrupt'handler
2926 * @netdev: network interface device structure
2927 *
2928 * This is used by netconsole to send skbs without having to re-enable
2929 * interrupts. It's not called while the normal interrupt routine is executing.
2930 **/
2931static void i40e_netpoll(struct net_device *netdev)
2932{
2933 struct i40e_netdev_priv *np = netdev_priv(netdev);
2934 struct i40e_vsi *vsi = np->vsi;
2935 struct i40e_pf *pf = vsi->back;
2936 int i;
2937
2938 /* if interface is down do nothing */
2939 if (test_bit(__I40E_DOWN, &vsi->state))
2940 return;
2941
2942 pf->flags |= I40E_FLAG_IN_NETPOLL;
2943 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2944 for (i = 0; i < vsi->num_q_vectors; i++)
493fb300 2945 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
41c445ff
JB
2946 } else {
2947 i40e_intr(pf->pdev->irq, netdev);
2948 }
2949 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
2950}
2951#endif
2952
2953/**
2954 * i40e_vsi_control_tx - Start or stop a VSI's rings
2955 * @vsi: the VSI being configured
2956 * @enable: start or stop the rings
2957 **/
2958static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
2959{
2960 struct i40e_pf *pf = vsi->back;
2961 struct i40e_hw *hw = &pf->hw;
2962 int i, j, pf_q;
2963 u32 tx_reg;
2964
2965 pf_q = vsi->base_queue;
2966 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
2967 j = 1000;
2968 do {
2969 usleep_range(1000, 2000);
2970 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
2971 } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
2972 ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
2973
2974 if (enable) {
2975 /* is STAT set ? */
2976 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
2977 dev_info(&pf->pdev->dev,
2978 "Tx %d already enabled\n", i);
2979 continue;
2980 }
2981 } else {
2982 /* is !STAT set ? */
2983 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
2984 dev_info(&pf->pdev->dev,
2985 "Tx %d already disabled\n", i);
2986 continue;
2987 }
2988 }
2989
2990 /* turn on/off the queue */
2991 if (enable)
2992 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
2993 I40E_QTX_ENA_QENA_STAT_MASK;
2994 else
2995 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2996
2997 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
2998
2999 /* wait for the change to finish */
3000 for (j = 0; j < 10; j++) {
3001 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3002 if (enable) {
3003 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3004 break;
3005 } else {
3006 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3007 break;
3008 }
3009
3010 udelay(10);
3011 }
3012 if (j >= 10) {
3013 dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
3014 pf_q, (enable ? "en" : "dis"));
3015 return -ETIMEDOUT;
3016 }
3017 }
3018
3019 return 0;
3020}
3021
3022/**
3023 * i40e_vsi_control_rx - Start or stop a VSI's rings
3024 * @vsi: the VSI being configured
3025 * @enable: start or stop the rings
3026 **/
3027static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3028{
3029 struct i40e_pf *pf = vsi->back;
3030 struct i40e_hw *hw = &pf->hw;
3031 int i, j, pf_q;
3032 u32 rx_reg;
3033
3034 pf_q = vsi->base_queue;
3035 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3036 j = 1000;
3037 do {
3038 usleep_range(1000, 2000);
3039 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3040 } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
3041 ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
3042
3043 if (enable) {
3044 /* is STAT set ? */
3045 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3046 continue;
3047 } else {
3048 /* is !STAT set ? */
3049 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3050 continue;
3051 }
3052
3053 /* turn on/off the queue */
3054 if (enable)
3055 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3056 I40E_QRX_ENA_QENA_STAT_MASK;
3057 else
3058 rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
3059 I40E_QRX_ENA_QENA_STAT_MASK);
3060 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3061
3062 /* wait for the change to finish */
3063 for (j = 0; j < 10; j++) {
3064 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3065
3066 if (enable) {
3067 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3068 break;
3069 } else {
3070 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3071 break;
3072 }
3073
3074 udelay(10);
3075 }
3076 if (j >= 10) {
3077 dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
3078 pf_q, (enable ? "en" : "dis"));
3079 return -ETIMEDOUT;
3080 }
3081 }
3082
3083 return 0;
3084}
3085
3086/**
3087 * i40e_vsi_control_rings - Start or stop a VSI's rings
3088 * @vsi: the VSI being configured
3089 * @enable: start or stop the rings
3090 **/
3091static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3092{
3093 int ret;
3094
3095 /* do rx first for enable and last for disable */
3096 if (request) {
3097 ret = i40e_vsi_control_rx(vsi, request);
3098 if (ret)
3099 return ret;
3100 ret = i40e_vsi_control_tx(vsi, request);
3101 } else {
3102 ret = i40e_vsi_control_tx(vsi, request);
3103 if (ret)
3104 return ret;
3105 ret = i40e_vsi_control_rx(vsi, request);
3106 }
3107
3108 return ret;
3109}
3110
3111/**
3112 * i40e_vsi_free_irq - Free the irq association with the OS
3113 * @vsi: the VSI being configured
3114 **/
3115static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3116{
3117 struct i40e_pf *pf = vsi->back;
3118 struct i40e_hw *hw = &pf->hw;
3119 int base = vsi->base_vector;
3120 u32 val, qp;
3121 int i;
3122
3123 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3124 if (!vsi->q_vectors)
3125 return;
3126
3127 for (i = 0; i < vsi->num_q_vectors; i++) {
3128 u16 vector = i + base;
3129
3130 /* free only the irqs that were actually requested */
493fb300 3131 if (vsi->q_vectors[i]->num_ringpairs == 0)
41c445ff
JB
3132 continue;
3133
3134 /* clear the affinity_mask in the IRQ descriptor */
3135 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3136 NULL);
3137 free_irq(pf->msix_entries[vector].vector,
493fb300 3138 vsi->q_vectors[i]);
41c445ff
JB
3139
3140 /* Tear down the interrupt queue link list
3141 *
3142 * We know that they come in pairs and always
3143 * the Rx first, then the Tx. To clear the
3144 * link list, stick the EOL value into the
3145 * next_q field of the registers.
3146 */
3147 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3148 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3149 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3150 val |= I40E_QUEUE_END_OF_LIST
3151 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3152 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3153
3154 while (qp != I40E_QUEUE_END_OF_LIST) {
3155 u32 next;
3156
3157 val = rd32(hw, I40E_QINT_RQCTL(qp));
3158
3159 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3160 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3161 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3162 I40E_QINT_RQCTL_INTEVENT_MASK);
3163
3164 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3165 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3166
3167 wr32(hw, I40E_QINT_RQCTL(qp), val);
3168
3169 val = rd32(hw, I40E_QINT_TQCTL(qp));
3170
3171 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3172 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3173
3174 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3175 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3176 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3177 I40E_QINT_TQCTL_INTEVENT_MASK);
3178
3179 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3180 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3181
3182 wr32(hw, I40E_QINT_TQCTL(qp), val);
3183 qp = next;
3184 }
3185 }
3186 } else {
3187 free_irq(pf->pdev->irq, pf);
3188
3189 val = rd32(hw, I40E_PFINT_LNKLST0);
3190 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3191 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3192 val |= I40E_QUEUE_END_OF_LIST
3193 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3194 wr32(hw, I40E_PFINT_LNKLST0, val);
3195
3196 val = rd32(hw, I40E_QINT_RQCTL(qp));
3197 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3198 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3199 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3200 I40E_QINT_RQCTL_INTEVENT_MASK);
3201
3202 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3203 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3204
3205 wr32(hw, I40E_QINT_RQCTL(qp), val);
3206
3207 val = rd32(hw, I40E_QINT_TQCTL(qp));
3208
3209 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3210 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3211 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3212 I40E_QINT_TQCTL_INTEVENT_MASK);
3213
3214 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3215 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3216
3217 wr32(hw, I40E_QINT_TQCTL(qp), val);
3218 }
3219}
3220
493fb300
AD
3221/**
3222 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3223 * @vsi: the VSI being configured
3224 * @v_idx: Index of vector to be freed
3225 *
3226 * This function frees the memory allocated to the q_vector. In addition if
3227 * NAPI is enabled it will delete any references to the NAPI struct prior
3228 * to freeing the q_vector.
3229 **/
3230static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3231{
3232 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
cd0b6fa6 3233 struct i40e_ring *ring;
493fb300
AD
3234
3235 if (!q_vector)
3236 return;
3237
3238 /* disassociate q_vector from rings */
cd0b6fa6
AD
3239 i40e_for_each_ring(ring, q_vector->tx)
3240 ring->q_vector = NULL;
3241
3242 i40e_for_each_ring(ring, q_vector->rx)
3243 ring->q_vector = NULL;
493fb300
AD
3244
3245 /* only VSI w/ an associated netdev is set up w/ NAPI */
3246 if (vsi->netdev)
3247 netif_napi_del(&q_vector->napi);
3248
3249 vsi->q_vectors[v_idx] = NULL;
3250
3251 kfree_rcu(q_vector, rcu);
3252}
3253
41c445ff
JB
3254/**
3255 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3256 * @vsi: the VSI being un-configured
3257 *
3258 * This frees the memory allocated to the q_vectors and
3259 * deletes references to the NAPI struct.
3260 **/
3261static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3262{
3263 int v_idx;
3264
493fb300
AD
3265 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3266 i40e_free_q_vector(vsi, v_idx);
41c445ff
JB
3267}
3268
3269/**
3270 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3271 * @pf: board private structure
3272 **/
3273static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3274{
3275 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3276 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3277 pci_disable_msix(pf->pdev);
3278 kfree(pf->msix_entries);
3279 pf->msix_entries = NULL;
3280 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3281 pci_disable_msi(pf->pdev);
3282 }
3283 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3284}
3285
3286/**
3287 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3288 * @pf: board private structure
3289 *
3290 * We go through and clear interrupt specific resources and reset the structure
3291 * to pre-load conditions
3292 **/
3293static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3294{
3295 int i;
3296
3297 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3298 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
3299 if (pf->vsi[i])
3300 i40e_vsi_free_q_vectors(pf->vsi[i]);
3301 i40e_reset_interrupt_capability(pf);
3302}
3303
3304/**
3305 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3306 * @vsi: the VSI being configured
3307 **/
3308static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3309{
3310 int q_idx;
3311
3312 if (!vsi->netdev)
3313 return;
3314
3315 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3316 napi_enable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3317}
3318
3319/**
3320 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3321 * @vsi: the VSI being configured
3322 **/
3323static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3324{
3325 int q_idx;
3326
3327 if (!vsi->netdev)
3328 return;
3329
3330 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3331 napi_disable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3332}
3333
3334/**
3335 * i40e_quiesce_vsi - Pause a given VSI
3336 * @vsi: the VSI being paused
3337 **/
3338static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3339{
3340 if (test_bit(__I40E_DOWN, &vsi->state))
3341 return;
3342
3343 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3344 if (vsi->netdev && netif_running(vsi->netdev)) {
3345 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3346 } else {
3347 set_bit(__I40E_DOWN, &vsi->state);
3348 i40e_down(vsi);
3349 }
3350}
3351
3352/**
3353 * i40e_unquiesce_vsi - Resume a given VSI
3354 * @vsi: the VSI being resumed
3355 **/
3356static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3357{
3358 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3359 return;
3360
3361 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3362 if (vsi->netdev && netif_running(vsi->netdev))
3363 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3364 else
3365 i40e_up(vsi); /* this clears the DOWN bit */
3366}
3367
3368/**
3369 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3370 * @pf: the PF
3371 **/
3372static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3373{
3374 int v;
3375
3376 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3377 if (pf->vsi[v])
3378 i40e_quiesce_vsi(pf->vsi[v]);
3379 }
3380}
3381
3382/**
3383 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3384 * @pf: the PF
3385 **/
3386static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3387{
3388 int v;
3389
3390 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3391 if (pf->vsi[v])
3392 i40e_unquiesce_vsi(pf->vsi[v]);
3393 }
3394}
3395
3396/**
3397 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
3398 * @dcbcfg: the corresponding DCBx configuration structure
3399 *
3400 * Return the number of TCs from given DCBx configuration
3401 **/
3402static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3403{
078b5876
JB
3404 u8 num_tc = 0;
3405 int i;
41c445ff
JB
3406
3407 /* Scan the ETS Config Priority Table to find
3408 * traffic class enabled for a given priority
3409 * and use the traffic class index to get the
3410 * number of traffic classes enabled
3411 */
3412 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3413 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
3414 num_tc = dcbcfg->etscfg.prioritytable[i];
3415 }
3416
3417 /* Traffic class index starts from zero so
3418 * increment to return the actual count
3419 */
078b5876 3420 return num_tc + 1;
41c445ff
JB
3421}
3422
3423/**
3424 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
3425 * @dcbcfg: the corresponding DCBx configuration structure
3426 *
3427 * Query the current DCB configuration and return the number of
3428 * traffic classes enabled from the given DCBX config
3429 **/
3430static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
3431{
3432 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
3433 u8 enabled_tc = 1;
3434 u8 i;
3435
3436 for (i = 0; i < num_tc; i++)
3437 enabled_tc |= 1 << i;
3438
3439 return enabled_tc;
3440}
3441
3442/**
3443 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
3444 * @pf: PF being queried
3445 *
3446 * Return number of traffic classes enabled for the given PF
3447 **/
3448static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
3449{
3450 struct i40e_hw *hw = &pf->hw;
3451 u8 i, enabled_tc;
3452 u8 num_tc = 0;
3453 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3454
3455 /* If DCB is not enabled then always in single TC */
3456 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3457 return 1;
3458
3459 /* MFP mode return count of enabled TCs for this PF */
3460 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3461 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3462 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3463 if (enabled_tc & (1 << i))
3464 num_tc++;
3465 }
3466 return num_tc;
3467 }
3468
3469 /* SFP mode will be enabled for all TCs on port */
3470 return i40e_dcb_get_num_tc(dcbcfg);
3471}
3472
3473/**
3474 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
3475 * @pf: PF being queried
3476 *
3477 * Return a bitmap for first enabled traffic class for this PF.
3478 **/
3479static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
3480{
3481 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3482 u8 i = 0;
3483
3484 if (!enabled_tc)
3485 return 0x1; /* TC0 */
3486
3487 /* Find the first enabled TC */
3488 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3489 if (enabled_tc & (1 << i))
3490 break;
3491 }
3492
3493 return 1 << i;
3494}
3495
3496/**
3497 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
3498 * @pf: PF being queried
3499 *
3500 * Return a bitmap for enabled traffic classes for this PF.
3501 **/
3502static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
3503{
3504 /* If DCB is not enabled for this PF then just return default TC */
3505 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3506 return i40e_pf_get_default_tc(pf);
3507
3508 /* MFP mode will have enabled TCs set by FW */
3509 if (pf->flags & I40E_FLAG_MFP_ENABLED)
3510 return pf->hw.func_caps.enabled_tcmap;
3511
3512 /* SFP mode we want PF to be enabled for all TCs */
3513 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
3514}
3515
3516/**
3517 * i40e_vsi_get_bw_info - Query VSI BW Information
3518 * @vsi: the VSI being queried
3519 *
3520 * Returns 0 on success, negative value on failure
3521 **/
3522static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3523{
3524 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
3525 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3526 struct i40e_pf *pf = vsi->back;
3527 struct i40e_hw *hw = &pf->hw;
dcae29be 3528 i40e_status aq_ret;
41c445ff 3529 u32 tc_bw_max;
41c445ff
JB
3530 int i;
3531
3532 /* Get the VSI level BW configuration */
dcae29be
JB
3533 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3534 if (aq_ret) {
41c445ff
JB
3535 dev_info(&pf->pdev->dev,
3536 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
dcae29be
JB
3537 aq_ret, pf->hw.aq.asq_last_status);
3538 return -EINVAL;
41c445ff
JB
3539 }
3540
3541 /* Get the VSI level BW configuration per TC */
dcae29be
JB
3542 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
3543 NULL);
3544 if (aq_ret) {
41c445ff
JB
3545 dev_info(&pf->pdev->dev,
3546 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
dcae29be
JB
3547 aq_ret, pf->hw.aq.asq_last_status);
3548 return -EINVAL;
41c445ff
JB
3549 }
3550
3551 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
3552 dev_info(&pf->pdev->dev,
3553 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
3554 bw_config.tc_valid_bits,
3555 bw_ets_config.tc_valid_bits);
3556 /* Still continuing */
3557 }
3558
3559 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
3560 vsi->bw_max_quanta = bw_config.max_bw;
3561 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
3562 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
3563 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3564 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
3565 vsi->bw_ets_limit_credits[i] =
3566 le16_to_cpu(bw_ets_config.credits[i]);
3567 /* 3 bits out of 4 for each TC */
3568 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3569 }
078b5876 3570
dcae29be 3571 return 0;
41c445ff
JB
3572}
3573
3574/**
3575 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
3576 * @vsi: the VSI being configured
3577 * @enabled_tc: TC bitmap
3578 * @bw_credits: BW shared credits per TC
3579 *
3580 * Returns 0 on success, negative value on failure
3581 **/
dcae29be 3582static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
41c445ff
JB
3583 u8 *bw_share)
3584{
3585 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
dcae29be
JB
3586 i40e_status aq_ret;
3587 int i;
41c445ff
JB
3588
3589 bw_data.tc_valid_bits = enabled_tc;
3590 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3591 bw_data.tc_bw_credits[i] = bw_share[i];
3592
dcae29be
JB
3593 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3594 NULL);
3595 if (aq_ret) {
41c445ff
JB
3596 dev_info(&vsi->back->pdev->dev,
3597 "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
3598 __func__, vsi->back->hw.aq.asq_last_status);
dcae29be 3599 return -EINVAL;
41c445ff
JB
3600 }
3601
3602 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3603 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3604
dcae29be 3605 return 0;
41c445ff
JB
3606}
3607
3608/**
3609 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
3610 * @vsi: the VSI being configured
3611 * @enabled_tc: TC map to be enabled
3612 *
3613 **/
3614static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3615{
3616 struct net_device *netdev = vsi->netdev;
3617 struct i40e_pf *pf = vsi->back;
3618 struct i40e_hw *hw = &pf->hw;
3619 u8 netdev_tc = 0;
3620 int i;
3621 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3622
3623 if (!netdev)
3624 return;
3625
3626 if (!enabled_tc) {
3627 netdev_reset_tc(netdev);
3628 return;
3629 }
3630
3631 /* Set up actual enabled TCs on the VSI */
3632 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
3633 return;
3634
3635 /* set per TC queues for the VSI */
3636 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3637 /* Only set TC queues for enabled tcs
3638 *
3639 * e.g. For a VSI that has TC0 and TC3 enabled the
3640 * enabled_tc bitmap would be 0x00001001; the driver
3641 * will set the numtc for netdev as 2 that will be
3642 * referenced by the netdev layer as TC 0 and 1.
3643 */
3644 if (vsi->tc_config.enabled_tc & (1 << i))
3645 netdev_set_tc_queue(netdev,
3646 vsi->tc_config.tc_info[i].netdev_tc,
3647 vsi->tc_config.tc_info[i].qcount,
3648 vsi->tc_config.tc_info[i].qoffset);
3649 }
3650
3651 /* Assign UP2TC map for the VSI */
3652 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3653 /* Get the actual TC# for the UP */
3654 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
3655 /* Get the mapped netdev TC# for the UP */
3656 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
3657 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3658 }
3659}
3660
3661/**
3662 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
3663 * @vsi: the VSI being configured
3664 * @ctxt: the ctxt buffer returned from AQ VSI update param command
3665 **/
3666static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
3667 struct i40e_vsi_context *ctxt)
3668{
3669 /* copy just the sections touched not the entire info
3670 * since not all sections are valid as returned by
3671 * update vsi params
3672 */
3673 vsi->info.mapping_flags = ctxt->info.mapping_flags;
3674 memcpy(&vsi->info.queue_mapping,
3675 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
3676 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
3677 sizeof(vsi->info.tc_mapping));
3678}
3679
3680/**
3681 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
3682 * @vsi: VSI to be configured
3683 * @enabled_tc: TC bitmap
3684 *
3685 * This configures a particular VSI for TCs that are mapped to the
3686 * given TC bitmap. It uses default bandwidth share for TCs across
3687 * VSIs to configure TC for a particular VSI.
3688 *
3689 * NOTE:
3690 * It is expected that the VSI queues have been quisced before calling
3691 * this function.
3692 **/
3693static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3694{
3695 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
3696 struct i40e_vsi_context ctxt;
3697 int ret = 0;
3698 int i;
3699
3700 /* Check if enabled_tc is same as existing or new TCs */
3701 if (vsi->tc_config.enabled_tc == enabled_tc)
3702 return ret;
3703
3704 /* Enable ETS TCs with equal BW Share for now across all VSIs */
3705 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3706 if (enabled_tc & (1 << i))
3707 bw_share[i] = 1;
3708 }
3709
3710 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
3711 if (ret) {
3712 dev_info(&vsi->back->pdev->dev,
3713 "Failed configuring TC map %d for VSI %d\n",
3714 enabled_tc, vsi->seid);
3715 goto out;
3716 }
3717
3718 /* Update Queue Pairs Mapping for currently enabled UPs */
3719 ctxt.seid = vsi->seid;
3720 ctxt.pf_num = vsi->back->hw.pf_id;
3721 ctxt.vf_num = 0;
3722 ctxt.uplink_seid = vsi->uplink_seid;
3723 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3724 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
3725
3726 /* Update the VSI after updating the VSI queue-mapping information */
3727 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3728 if (ret) {
3729 dev_info(&vsi->back->pdev->dev,
3730 "update vsi failed, aq_err=%d\n",
3731 vsi->back->hw.aq.asq_last_status);
3732 goto out;
3733 }
3734 /* update the local VSI info with updated queue map */
3735 i40e_vsi_update_queue_map(vsi, &ctxt);
3736 vsi->info.valid_sections = 0;
3737
3738 /* Update current VSI BW information */
3739 ret = i40e_vsi_get_bw_info(vsi);
3740 if (ret) {
3741 dev_info(&vsi->back->pdev->dev,
3742 "Failed updating vsi bw info, aq_err=%d\n",
3743 vsi->back->hw.aq.asq_last_status);
3744 goto out;
3745 }
3746
3747 /* Update the netdev TC setup */
3748 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
3749out:
3750 return ret;
3751}
3752
3753/**
3754 * i40e_up_complete - Finish the last steps of bringing up a connection
3755 * @vsi: the VSI being configured
3756 **/
3757static int i40e_up_complete(struct i40e_vsi *vsi)
3758{
3759 struct i40e_pf *pf = vsi->back;
3760 int err;
3761
3762 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3763 i40e_vsi_configure_msix(vsi);
3764 else
3765 i40e_configure_msi_and_legacy(vsi);
3766
3767 /* start rings */
3768 err = i40e_vsi_control_rings(vsi, true);
3769 if (err)
3770 return err;
3771
3772 clear_bit(__I40E_DOWN, &vsi->state);
3773 i40e_napi_enable_all(vsi);
3774 i40e_vsi_enable_irq(vsi);
3775
3776 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
3777 (vsi->netdev)) {
6d779b41 3778 netdev_info(vsi->netdev, "NIC Link is Up\n");
41c445ff
JB
3779 netif_tx_start_all_queues(vsi->netdev);
3780 netif_carrier_on(vsi->netdev);
6d779b41
AS
3781 } else if (vsi->netdev) {
3782 netdev_info(vsi->netdev, "NIC Link is Down\n");
41c445ff
JB
3783 }
3784 i40e_service_event_schedule(pf);
3785
3786 return 0;
3787}
3788
3789/**
3790 * i40e_vsi_reinit_locked - Reset the VSI
3791 * @vsi: the VSI being configured
3792 *
3793 * Rebuild the ring structs after some configuration
3794 * has changed, e.g. MTU size.
3795 **/
3796static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
3797{
3798 struct i40e_pf *pf = vsi->back;
3799
3800 WARN_ON(in_interrupt());
3801 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
3802 usleep_range(1000, 2000);
3803 i40e_down(vsi);
3804
3805 /* Give a VF some time to respond to the reset. The
3806 * two second wait is based upon the watchdog cycle in
3807 * the VF driver.
3808 */
3809 if (vsi->type == I40E_VSI_SRIOV)
3810 msleep(2000);
3811 i40e_up(vsi);
3812 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
3813}
3814
3815/**
3816 * i40e_up - Bring the connection back up after being down
3817 * @vsi: the VSI being configured
3818 **/
3819int i40e_up(struct i40e_vsi *vsi)
3820{
3821 int err;
3822
3823 err = i40e_vsi_configure(vsi);
3824 if (!err)
3825 err = i40e_up_complete(vsi);
3826
3827 return err;
3828}
3829
3830/**
3831 * i40e_down - Shutdown the connection processing
3832 * @vsi: the VSI being stopped
3833 **/
3834void i40e_down(struct i40e_vsi *vsi)
3835{
3836 int i;
3837
3838 /* It is assumed that the caller of this function
3839 * sets the vsi->state __I40E_DOWN bit.
3840 */
3841 if (vsi->netdev) {
3842 netif_carrier_off(vsi->netdev);
3843 netif_tx_disable(vsi->netdev);
3844 }
3845 i40e_vsi_disable_irq(vsi);
3846 i40e_vsi_control_rings(vsi, false);
3847 i40e_napi_disable_all(vsi);
3848
3849 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
3850 i40e_clean_tx_ring(vsi->tx_rings[i]);
3851 i40e_clean_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
3852 }
3853}
3854
3855/**
3856 * i40e_setup_tc - configure multiple traffic classes
3857 * @netdev: net device to configure
3858 * @tc: number of traffic classes to enable
3859 **/
3860static int i40e_setup_tc(struct net_device *netdev, u8 tc)
3861{
3862 struct i40e_netdev_priv *np = netdev_priv(netdev);
3863 struct i40e_vsi *vsi = np->vsi;
3864 struct i40e_pf *pf = vsi->back;
3865 u8 enabled_tc = 0;
3866 int ret = -EINVAL;
3867 int i;
3868
3869 /* Check if DCB enabled to continue */
3870 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
3871 netdev_info(netdev, "DCB is not enabled for adapter\n");
3872 goto exit;
3873 }
3874
3875 /* Check if MFP enabled */
3876 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3877 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
3878 goto exit;
3879 }
3880
3881 /* Check whether tc count is within enabled limit */
3882 if (tc > i40e_pf_get_num_tc(pf)) {
3883 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
3884 goto exit;
3885 }
3886
3887 /* Generate TC map for number of tc requested */
3888 for (i = 0; i < tc; i++)
3889 enabled_tc |= (1 << i);
3890
3891 /* Requesting same TC configuration as already enabled */
3892 if (enabled_tc == vsi->tc_config.enabled_tc)
3893 return 0;
3894
3895 /* Quiesce VSI queues */
3896 i40e_quiesce_vsi(vsi);
3897
3898 /* Configure VSI for enabled TCs */
3899 ret = i40e_vsi_config_tc(vsi, enabled_tc);
3900 if (ret) {
3901 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
3902 vsi->seid);
3903 goto exit;
3904 }
3905
3906 /* Unquiesce VSI */
3907 i40e_unquiesce_vsi(vsi);
3908
3909exit:
3910 return ret;
3911}
3912
3913/**
3914 * i40e_open - Called when a network interface is made active
3915 * @netdev: network interface device structure
3916 *
3917 * The open entry point is called when a network interface is made
3918 * active by the system (IFF_UP). At this point all resources needed
3919 * for transmit and receive operations are allocated, the interrupt
3920 * handler is registered with the OS, the netdev watchdog subtask is
3921 * enabled, and the stack is notified that the interface is ready.
3922 *
3923 * Returns 0 on success, negative value on failure
3924 **/
3925static int i40e_open(struct net_device *netdev)
3926{
3927 struct i40e_netdev_priv *np = netdev_priv(netdev);
3928 struct i40e_vsi *vsi = np->vsi;
3929 struct i40e_pf *pf = vsi->back;
3930 char int_name[IFNAMSIZ];
3931 int err;
3932
3933 /* disallow open during test */
3934 if (test_bit(__I40E_TESTING, &pf->state))
3935 return -EBUSY;
3936
3937 netif_carrier_off(netdev);
3938
3939 /* allocate descriptors */
3940 err = i40e_vsi_setup_tx_resources(vsi);
3941 if (err)
3942 goto err_setup_tx;
3943 err = i40e_vsi_setup_rx_resources(vsi);
3944 if (err)
3945 goto err_setup_rx;
3946
3947 err = i40e_vsi_configure(vsi);
3948 if (err)
3949 goto err_setup_rx;
3950
3951 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
3952 dev_driver_string(&pf->pdev->dev), netdev->name);
3953 err = i40e_vsi_request_irq(vsi, int_name);
3954 if (err)
3955 goto err_setup_rx;
3956
3957 err = i40e_up_complete(vsi);
3958 if (err)
3959 goto err_up_complete;
3960
3961 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
3962 err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
3963 if (err)
3964 netdev_info(netdev,
3965 "couldn't set broadcast err %d aq_err %d\n",
3966 err, pf->hw.aq.asq_last_status);
3967 }
3968
3969 return 0;
3970
3971err_up_complete:
3972 i40e_down(vsi);
3973 i40e_vsi_free_irq(vsi);
3974err_setup_rx:
3975 i40e_vsi_free_rx_resources(vsi);
3976err_setup_tx:
3977 i40e_vsi_free_tx_resources(vsi);
3978 if (vsi == pf->vsi[pf->lan_vsi])
3979 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
3980
3981 return err;
3982}
3983
3984/**
3985 * i40e_close - Disables a network interface
3986 * @netdev: network interface device structure
3987 *
3988 * The close entry point is called when an interface is de-activated
3989 * by the OS. The hardware is still under the driver's control, but
3990 * this netdev interface is disabled.
3991 *
3992 * Returns 0, this is not allowed to fail
3993 **/
3994static int i40e_close(struct net_device *netdev)
3995{
3996 struct i40e_netdev_priv *np = netdev_priv(netdev);
3997 struct i40e_vsi *vsi = np->vsi;
3998
3999 if (test_and_set_bit(__I40E_DOWN, &vsi->state))
4000 return 0;
4001
4002 i40e_down(vsi);
4003 i40e_vsi_free_irq(vsi);
4004
4005 i40e_vsi_free_tx_resources(vsi);
4006 i40e_vsi_free_rx_resources(vsi);
4007
4008 return 0;
4009}
4010
4011/**
4012 * i40e_do_reset - Start a PF or Core Reset sequence
4013 * @pf: board private structure
4014 * @reset_flags: which reset is requested
4015 *
4016 * The essential difference in resets is that the PF Reset
4017 * doesn't clear the packet buffers, doesn't reset the PE
4018 * firmware, and doesn't bother the other PFs on the chip.
4019 **/
4020void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4021{
4022 u32 val;
4023
4024 WARN_ON(in_interrupt());
4025
4026 /* do the biggest reset indicated */
4027 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
4028
4029 /* Request a Global Reset
4030 *
4031 * This will start the chip's countdown to the actual full
4032 * chip reset event, and a warning interrupt to be sent
4033 * to all PFs, including the requestor. Our handler
4034 * for the warning interrupt will deal with the shutdown
4035 * and recovery of the switch setup.
4036 */
4037 dev_info(&pf->pdev->dev, "GlobalR requested\n");
4038 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4039 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
4040 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4041
4042 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
4043
4044 /* Request a Core Reset
4045 *
4046 * Same as Global Reset, except does *not* include the MAC/PHY
4047 */
4048 dev_info(&pf->pdev->dev, "CoreR requested\n");
4049 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4050 val |= I40E_GLGEN_RTRIG_CORER_MASK;
4051 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4052 i40e_flush(&pf->hw);
4053
4054 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
4055
4056 /* Request a PF Reset
4057 *
4058 * Resets only the PF-specific registers
4059 *
4060 * This goes directly to the tear-down and rebuild of
4061 * the switch, since we need to do all the recovery as
4062 * for the Core Reset.
4063 */
4064 dev_info(&pf->pdev->dev, "PFR requested\n");
4065 i40e_handle_reset_warning(pf);
4066
4067 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
4068 int v;
4069
4070 /* Find the VSI(s) that requested a re-init */
4071 dev_info(&pf->pdev->dev,
4072 "VSI reinit requested\n");
4073 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4074 struct i40e_vsi *vsi = pf->vsi[v];
4075 if (vsi != NULL &&
4076 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
4077 i40e_vsi_reinit_locked(pf->vsi[v]);
4078 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
4079 }
4080 }
4081
4082 /* no further action needed, so return now */
4083 return;
4084 } else {
4085 dev_info(&pf->pdev->dev,
4086 "bad reset request 0x%08x\n", reset_flags);
4087 return;
4088 }
4089}
4090
4091/**
4092 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
4093 * @pf: board private structure
4094 * @e: event info posted on ARQ
4095 *
4096 * Handler for LAN Queue Overflow Event generated by the firmware for PF
4097 * and VF queues
4098 **/
4099static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
4100 struct i40e_arq_event_info *e)
4101{
4102 struct i40e_aqc_lan_overflow *data =
4103 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
4104 u32 queue = le32_to_cpu(data->prtdcb_rupto);
4105 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
4106 struct i40e_hw *hw = &pf->hw;
4107 struct i40e_vf *vf;
4108 u16 vf_id;
4109
4110 dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
4111 __func__, queue, qtx_ctl);
4112
4113 /* Queue belongs to VF, find the VF and issue VF reset */
4114 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
4115 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
4116 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
4117 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
4118 vf_id -= hw->func_caps.vf_base_id;
4119 vf = &pf->vf[vf_id];
4120 i40e_vc_notify_vf_reset(vf);
4121 /* Allow VF to process pending reset notification */
4122 msleep(20);
4123 i40e_reset_vf(vf, false);
4124 }
4125}
4126
4127/**
4128 * i40e_service_event_complete - Finish up the service event
4129 * @pf: board private structure
4130 **/
4131static void i40e_service_event_complete(struct i40e_pf *pf)
4132{
4133 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
4134
4135 /* flush memory to make sure state is correct before next watchog */
4136 smp_mb__before_clear_bit();
4137 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
4138}
4139
4140/**
4141 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
4142 * @pf: board private structure
4143 **/
4144static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
4145{
4146 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
4147 return;
4148
4149 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
4150
4151 /* if interface is down do nothing */
4152 if (test_bit(__I40E_DOWN, &pf->state))
4153 return;
4154}
4155
4156/**
4157 * i40e_vsi_link_event - notify VSI of a link event
4158 * @vsi: vsi to be notified
4159 * @link_up: link up or down
4160 **/
4161static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
4162{
4163 if (!vsi)
4164 return;
4165
4166 switch (vsi->type) {
4167 case I40E_VSI_MAIN:
4168 if (!vsi->netdev || !vsi->netdev_registered)
4169 break;
4170
4171 if (link_up) {
4172 netif_carrier_on(vsi->netdev);
4173 netif_tx_wake_all_queues(vsi->netdev);
4174 } else {
4175 netif_carrier_off(vsi->netdev);
4176 netif_tx_stop_all_queues(vsi->netdev);
4177 }
4178 break;
4179
4180 case I40E_VSI_SRIOV:
4181 break;
4182
4183 case I40E_VSI_VMDQ2:
4184 case I40E_VSI_CTRL:
4185 case I40E_VSI_MIRROR:
4186 default:
4187 /* there is no notification for other VSIs */
4188 break;
4189 }
4190}
4191
4192/**
4193 * i40e_veb_link_event - notify elements on the veb of a link event
4194 * @veb: veb to be notified
4195 * @link_up: link up or down
4196 **/
4197static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4198{
4199 struct i40e_pf *pf;
4200 int i;
4201
4202 if (!veb || !veb->pf)
4203 return;
4204 pf = veb->pf;
4205
4206 /* depth first... */
4207 for (i = 0; i < I40E_MAX_VEB; i++)
4208 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
4209 i40e_veb_link_event(pf->veb[i], link_up);
4210
4211 /* ... now the local VSIs */
4212 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4213 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4214 i40e_vsi_link_event(pf->vsi[i], link_up);
4215}
4216
4217/**
4218 * i40e_link_event - Update netif_carrier status
4219 * @pf: board private structure
4220 **/
4221static void i40e_link_event(struct i40e_pf *pf)
4222{
4223 bool new_link, old_link;
4224
4225 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
4226 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
4227
4228 if (new_link == old_link)
4229 return;
4230
6d779b41
AS
4231 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
4232 netdev_info(pf->vsi[pf->lan_vsi]->netdev,
4233 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
41c445ff
JB
4234
4235 /* Notify the base of the switch tree connected to
4236 * the link. Floating VEBs are not notified.
4237 */
4238 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
4239 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
4240 else
4241 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
4242
4243 if (pf->vf)
4244 i40e_vc_notify_link_state(pf);
4245}
4246
4247/**
4248 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
4249 * @pf: board private structure
4250 *
4251 * Set the per-queue flags to request a check for stuck queues in the irq
4252 * clean functions, then force interrupts to be sure the irq clean is called.
4253 **/
4254static void i40e_check_hang_subtask(struct i40e_pf *pf)
4255{
4256 int i, v;
4257
4258 /* If we're down or resetting, just bail */
4259 if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
4260 return;
4261
4262 /* for each VSI/netdev
4263 * for each Tx queue
4264 * set the check flag
4265 * for each q_vector
4266 * force an interrupt
4267 */
4268 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4269 struct i40e_vsi *vsi = pf->vsi[v];
4270 int armed = 0;
4271
4272 if (!pf->vsi[v] ||
4273 test_bit(__I40E_DOWN, &vsi->state) ||
4274 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
4275 continue;
4276
4277 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 4278 set_check_for_tx_hang(vsi->tx_rings[i]);
41c445ff 4279 if (test_bit(__I40E_HANG_CHECK_ARMED,
9f65e15b 4280 &vsi->tx_rings[i]->state))
41c445ff
JB
4281 armed++;
4282 }
4283
4284 if (armed) {
4285 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
4286 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
4287 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
4288 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
4289 } else {
4290 u16 vec = vsi->base_vector - 1;
4291 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
4292 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
4293 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
4294 wr32(&vsi->back->hw,
4295 I40E_PFINT_DYN_CTLN(vec), val);
4296 }
4297 i40e_flush(&vsi->back->hw);
4298 }
4299 }
4300}
4301
4302/**
4303 * i40e_watchdog_subtask - Check and bring link up
4304 * @pf: board private structure
4305 **/
4306static void i40e_watchdog_subtask(struct i40e_pf *pf)
4307{
4308 int i;
4309
4310 /* if interface is down do nothing */
4311 if (test_bit(__I40E_DOWN, &pf->state) ||
4312 test_bit(__I40E_CONFIG_BUSY, &pf->state))
4313 return;
4314
4315 /* Update the stats for active netdevs so the network stack
4316 * can look at updated numbers whenever it cares to
4317 */
4318 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4319 if (pf->vsi[i] && pf->vsi[i]->netdev)
4320 i40e_update_stats(pf->vsi[i]);
4321
4322 /* Update the stats for the active switching components */
4323 for (i = 0; i < I40E_MAX_VEB; i++)
4324 if (pf->veb[i])
4325 i40e_update_veb_stats(pf->veb[i]);
4326}
4327
4328/**
4329 * i40e_reset_subtask - Set up for resetting the device and driver
4330 * @pf: board private structure
4331 **/
4332static void i40e_reset_subtask(struct i40e_pf *pf)
4333{
4334 u32 reset_flags = 0;
4335
4336 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
4337 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
4338 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
4339 }
4340 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
4341 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
4342 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4343 }
4344 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
4345 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
4346 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
4347 }
4348 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
4349 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
4350 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
4351 }
4352
4353 /* If there's a recovery already waiting, it takes
4354 * precedence before starting a new reset sequence.
4355 */
4356 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
4357 i40e_handle_reset_warning(pf);
4358 return;
4359 }
4360
4361 /* If we're already down or resetting, just bail */
4362 if (reset_flags &&
4363 !test_bit(__I40E_DOWN, &pf->state) &&
4364 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
4365 i40e_do_reset(pf, reset_flags);
4366}
4367
4368/**
4369 * i40e_handle_link_event - Handle link event
4370 * @pf: board private structure
4371 * @e: event info posted on ARQ
4372 **/
4373static void i40e_handle_link_event(struct i40e_pf *pf,
4374 struct i40e_arq_event_info *e)
4375{
4376 struct i40e_hw *hw = &pf->hw;
4377 struct i40e_aqc_get_link_status *status =
4378 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
4379 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
4380
4381 /* save off old link status information */
4382 memcpy(&pf->hw.phy.link_info_old, hw_link_info,
4383 sizeof(pf->hw.phy.link_info_old));
4384
4385 /* update link status */
4386 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
4387 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
4388 hw_link_info->link_info = status->link_info;
4389 hw_link_info->an_info = status->an_info;
4390 hw_link_info->ext_info = status->ext_info;
4391 hw_link_info->lse_enable =
4392 le16_to_cpu(status->command_flags) &
4393 I40E_AQ_LSE_ENABLE;
4394
4395 /* process the event */
4396 i40e_link_event(pf);
4397
4398 /* Do a new status request to re-enable LSE reporting
4399 * and load new status information into the hw struct,
4400 * then see if the status changed while processing the
4401 * initial event.
4402 */
4403 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
4404 i40e_link_event(pf);
4405}
4406
4407/**
4408 * i40e_clean_adminq_subtask - Clean the AdminQ rings
4409 * @pf: board private structure
4410 **/
4411static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
4412{
4413 struct i40e_arq_event_info event;
4414 struct i40e_hw *hw = &pf->hw;
4415 u16 pending, i = 0;
4416 i40e_status ret;
4417 u16 opcode;
4418 u32 val;
4419
4420 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
4421 return;
4422
4423 event.msg_size = I40E_MAX_AQ_BUF_SIZE;
4424 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
4425 if (!event.msg_buf)
4426 return;
4427
4428 do {
4429 ret = i40e_clean_arq_element(hw, &event, &pending);
4430 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
4431 dev_info(&pf->pdev->dev, "No ARQ event found\n");
4432 break;
4433 } else if (ret) {
4434 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
4435 break;
4436 }
4437
4438 opcode = le16_to_cpu(event.desc.opcode);
4439 switch (opcode) {
4440
4441 case i40e_aqc_opc_get_link_status:
4442 i40e_handle_link_event(pf, &event);
4443 break;
4444 case i40e_aqc_opc_send_msg_to_pf:
4445 ret = i40e_vc_process_vf_msg(pf,
4446 le16_to_cpu(event.desc.retval),
4447 le32_to_cpu(event.desc.cookie_high),
4448 le32_to_cpu(event.desc.cookie_low),
4449 event.msg_buf,
4450 event.msg_size);
4451 break;
4452 case i40e_aqc_opc_lldp_update_mib:
4453 dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4454 break;
4455 case i40e_aqc_opc_event_lan_overflow:
4456 dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
4457 i40e_handle_lan_overflow_event(pf, &event);
4458 break;
4459 default:
4460 dev_info(&pf->pdev->dev,
4461 "ARQ Error: Unknown event %d received\n",
4462 event.desc.opcode);
4463 break;
4464 }
4465 } while (pending && (i++ < pf->adminq_work_limit));
4466
4467 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
4468 /* re-enable Admin queue interrupt cause */
4469 val = rd32(hw, I40E_PFINT_ICR0_ENA);
4470 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4471 wr32(hw, I40E_PFINT_ICR0_ENA, val);
4472 i40e_flush(hw);
4473
4474 kfree(event.msg_buf);
4475}
4476
4477/**
4478 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
4479 * @veb: pointer to the VEB instance
4480 *
4481 * This is a recursive function that first builds the attached VSIs then
4482 * recurses in to build the next layer of VEB. We track the connections
4483 * through our own index numbers because the seid's from the HW could
4484 * change across the reset.
4485 **/
4486static int i40e_reconstitute_veb(struct i40e_veb *veb)
4487{
4488 struct i40e_vsi *ctl_vsi = NULL;
4489 struct i40e_pf *pf = veb->pf;
4490 int v, veb_idx;
4491 int ret;
4492
4493 /* build VSI that owns this VEB, temporarily attached to base VEB */
4494 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
4495 if (pf->vsi[v] &&
4496 pf->vsi[v]->veb_idx == veb->idx &&
4497 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
4498 ctl_vsi = pf->vsi[v];
4499 break;
4500 }
4501 }
4502 if (!ctl_vsi) {
4503 dev_info(&pf->pdev->dev,
4504 "missing owner VSI for veb_idx %d\n", veb->idx);
4505 ret = -ENOENT;
4506 goto end_reconstitute;
4507 }
4508 if (ctl_vsi != pf->vsi[pf->lan_vsi])
4509 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
4510 ret = i40e_add_vsi(ctl_vsi);
4511 if (ret) {
4512 dev_info(&pf->pdev->dev,
4513 "rebuild of owner VSI failed: %d\n", ret);
4514 goto end_reconstitute;
4515 }
4516 i40e_vsi_reset_stats(ctl_vsi);
4517
4518 /* create the VEB in the switch and move the VSI onto the VEB */
4519 ret = i40e_add_veb(veb, ctl_vsi);
4520 if (ret)
4521 goto end_reconstitute;
4522
4523 /* create the remaining VSIs attached to this VEB */
4524 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4525 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
4526 continue;
4527
4528 if (pf->vsi[v]->veb_idx == veb->idx) {
4529 struct i40e_vsi *vsi = pf->vsi[v];
4530 vsi->uplink_seid = veb->seid;
4531 ret = i40e_add_vsi(vsi);
4532 if (ret) {
4533 dev_info(&pf->pdev->dev,
4534 "rebuild of vsi_idx %d failed: %d\n",
4535 v, ret);
4536 goto end_reconstitute;
4537 }
4538 i40e_vsi_reset_stats(vsi);
4539 }
4540 }
4541
4542 /* create any VEBs attached to this VEB - RECURSION */
4543 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
4544 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
4545 pf->veb[veb_idx]->uplink_seid = veb->seid;
4546 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
4547 if (ret)
4548 break;
4549 }
4550 }
4551
4552end_reconstitute:
4553 return ret;
4554}
4555
4556/**
4557 * i40e_get_capabilities - get info about the HW
4558 * @pf: the PF struct
4559 **/
4560static int i40e_get_capabilities(struct i40e_pf *pf)
4561{
4562 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
4563 u16 data_size;
4564 int buf_len;
4565 int err;
4566
4567 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
4568 do {
4569 cap_buf = kzalloc(buf_len, GFP_KERNEL);
4570 if (!cap_buf)
4571 return -ENOMEM;
4572
4573 /* this loads the data into the hw struct for us */
4574 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
4575 &data_size,
4576 i40e_aqc_opc_list_func_capabilities,
4577 NULL);
4578 /* data loaded, buffer no longer needed */
4579 kfree(cap_buf);
4580
4581 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
4582 /* retry with a larger buffer */
4583 buf_len = data_size;
4584 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
4585 dev_info(&pf->pdev->dev,
4586 "capability discovery failed: aq=%d\n",
4587 pf->hw.aq.asq_last_status);
4588 return -ENODEV;
4589 }
4590 } while (err);
4591
4592 if (pf->hw.debug_mask & I40E_DEBUG_USER)
4593 dev_info(&pf->pdev->dev,
4594 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
4595 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
4596 pf->hw.func_caps.num_msix_vectors,
4597 pf->hw.func_caps.num_msix_vectors_vf,
4598 pf->hw.func_caps.fd_filters_guaranteed,
4599 pf->hw.func_caps.fd_filters_best_effort,
4600 pf->hw.func_caps.num_tx_qp,
4601 pf->hw.func_caps.num_vsis);
4602
4603 return 0;
4604}
4605
4606/**
4607 * i40e_fdir_setup - initialize the Flow Director resources
4608 * @pf: board private structure
4609 **/
4610static void i40e_fdir_setup(struct i40e_pf *pf)
4611{
4612 struct i40e_vsi *vsi;
4613 bool new_vsi = false;
4614 int err, i;
4615
958a3e3b
SN
4616 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
4617 I40E_FLAG_FDIR_ATR_ENABLED)))
41c445ff
JB
4618 return;
4619
4620 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
4621
4622 /* find existing or make new FDIR VSI */
4623 vsi = NULL;
4624 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4625 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
4626 vsi = pf->vsi[i];
4627 if (!vsi) {
4628 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
4629 if (!vsi) {
4630 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
4631 pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
4632 return;
4633 }
4634 new_vsi = true;
4635 }
4636 WARN_ON(vsi->base_queue != I40E_FDIR_RING);
4637 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
4638
4639 err = i40e_vsi_setup_tx_resources(vsi);
4640 if (!err)
4641 err = i40e_vsi_setup_rx_resources(vsi);
4642 if (!err)
4643 err = i40e_vsi_configure(vsi);
4644 if (!err && new_vsi) {
4645 char int_name[IFNAMSIZ + 9];
4646 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4647 dev_driver_string(&pf->pdev->dev));
4648 err = i40e_vsi_request_irq(vsi, int_name);
4649 }
4650 if (!err)
4651 err = i40e_up_complete(vsi);
4652
4653 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4654}
4655
4656/**
4657 * i40e_fdir_teardown - release the Flow Director resources
4658 * @pf: board private structure
4659 **/
4660static void i40e_fdir_teardown(struct i40e_pf *pf)
4661{
4662 int i;
4663
4664 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
4665 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
4666 i40e_vsi_release(pf->vsi[i]);
4667 break;
4668 }
4669 }
4670}
4671
4672/**
4673 * i40e_handle_reset_warning - prep for the core to reset
4674 * @pf: board private structure
4675 *
4676 * Close up the VFs and other things in prep for a Core Reset,
4677 * then get ready to rebuild the world.
4678 **/
4679static void i40e_handle_reset_warning(struct i40e_pf *pf)
4680{
4681 struct i40e_driver_version dv;
4682 struct i40e_hw *hw = &pf->hw;
4683 i40e_status ret;
4684 u32 v;
4685
4686 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
4687 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
4688 return;
4689
4690 dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
4691
4692 i40e_vc_notify_reset(pf);
4693
4694 /* quiesce the VSIs and their queues that are not already DOWN */
4695 i40e_pf_quiesce_all_vsi(pf);
4696
4697 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4698 if (pf->vsi[v])
4699 pf->vsi[v]->seid = 0;
4700 }
4701
4702 i40e_shutdown_adminq(&pf->hw);
4703
4704 /* Now we wait for GRST to settle out.
4705 * We don't have to delete the VEBs or VSIs from the hw switch
4706 * because the reset will make them disappear.
4707 */
4708 ret = i40e_pf_reset(hw);
4709 if (ret)
4710 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
4711 pf->pfr_count++;
4712
4713 if (test_bit(__I40E_DOWN, &pf->state))
4714 goto end_core_reset;
4715 dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
4716
4717 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
4718 ret = i40e_init_adminq(&pf->hw);
4719 if (ret) {
4720 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
4721 goto end_core_reset;
4722 }
4723
4724 ret = i40e_get_capabilities(pf);
4725 if (ret) {
4726 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
4727 ret);
4728 goto end_core_reset;
4729 }
4730
4731 /* call shutdown HMC */
4732 ret = i40e_shutdown_lan_hmc(hw);
4733 if (ret) {
4734 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
4735 goto end_core_reset;
4736 }
4737
4738 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
4739 hw->func_caps.num_rx_qp,
4740 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
4741 if (ret) {
4742 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
4743 goto end_core_reset;
4744 }
4745 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
4746 if (ret) {
4747 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
4748 goto end_core_reset;
4749 }
4750
4751 /* do basic switch setup */
4752 ret = i40e_setup_pf_switch(pf);
4753 if (ret)
4754 goto end_core_reset;
4755
4756 /* Rebuild the VSIs and VEBs that existed before reset.
4757 * They are still in our local switch element arrays, so only
4758 * need to rebuild the switch model in the HW.
4759 *
4760 * If there were VEBs but the reconstitution failed, we'll try
4761 * try to recover minimal use by getting the basic PF VSI working.
4762 */
4763 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
4764 dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
4765 /* find the one VEB connected to the MAC, and find orphans */
4766 for (v = 0; v < I40E_MAX_VEB; v++) {
4767 if (!pf->veb[v])
4768 continue;
4769
4770 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
4771 pf->veb[v]->uplink_seid == 0) {
4772 ret = i40e_reconstitute_veb(pf->veb[v]);
4773
4774 if (!ret)
4775 continue;
4776
4777 /* If Main VEB failed, we're in deep doodoo,
4778 * so give up rebuilding the switch and set up
4779 * for minimal rebuild of PF VSI.
4780 * If orphan failed, we'll report the error
4781 * but try to keep going.
4782 */
4783 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
4784 dev_info(&pf->pdev->dev,
4785 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
4786 ret);
4787 pf->vsi[pf->lan_vsi]->uplink_seid
4788 = pf->mac_seid;
4789 break;
4790 } else if (pf->veb[v]->uplink_seid == 0) {
4791 dev_info(&pf->pdev->dev,
4792 "rebuild of orphan VEB failed: %d\n",
4793 ret);
4794 }
4795 }
4796 }
4797 }
4798
4799 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
4800 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
4801 /* no VEB, so rebuild only the Main VSI */
4802 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
4803 if (ret) {
4804 dev_info(&pf->pdev->dev,
4805 "rebuild of Main VSI failed: %d\n", ret);
4806 goto end_core_reset;
4807 }
4808 }
4809
4810 /* reinit the misc interrupt */
4811 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4812 ret = i40e_setup_misc_vector(pf);
4813
4814 /* restart the VSIs that were rebuilt and running before the reset */
4815 i40e_pf_unquiesce_all_vsi(pf);
4816
4817 /* tell the firmware that we're starting */
4818 dv.major_version = DRV_VERSION_MAJOR;
4819 dv.minor_version = DRV_VERSION_MINOR;
4820 dv.build_version = DRV_VERSION_BUILD;
4821 dv.subbuild_version = 0;
4822 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
4823
4824 dev_info(&pf->pdev->dev, "PF reset done\n");
4825
4826end_core_reset:
4827 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
4828}
4829
4830/**
4831 * i40e_handle_mdd_event
4832 * @pf: pointer to the pf structure
4833 *
4834 * Called from the MDD irq handler to identify possibly malicious vfs
4835 **/
4836static void i40e_handle_mdd_event(struct i40e_pf *pf)
4837{
4838 struct i40e_hw *hw = &pf->hw;
4839 bool mdd_detected = false;
4840 struct i40e_vf *vf;
4841 u32 reg;
4842 int i;
4843
4844 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
4845 return;
4846
4847 /* find what triggered the MDD event */
4848 reg = rd32(hw, I40E_GL_MDET_TX);
4849 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4850 u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
4851 >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
4852 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
4853 >> I40E_GL_MDET_TX_EVENT_SHIFT;
4854 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
4855 >> I40E_GL_MDET_TX_QUEUE_SHIFT;
4856 dev_info(&pf->pdev->dev,
4857 "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
4858 event, queue, func);
4859 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
4860 mdd_detected = true;
4861 }
4862 reg = rd32(hw, I40E_GL_MDET_RX);
4863 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4864 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
4865 >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
4866 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
4867 >> I40E_GL_MDET_RX_EVENT_SHIFT;
4868 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
4869 >> I40E_GL_MDET_RX_QUEUE_SHIFT;
4870 dev_info(&pf->pdev->dev,
4871 "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
4872 event, queue, func);
4873 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4874 mdd_detected = true;
4875 }
4876
4877 /* see if one of the VFs needs its hand slapped */
4878 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
4879 vf = &(pf->vf[i]);
4880 reg = rd32(hw, I40E_VP_MDET_TX(i));
4881 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
4882 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
4883 vf->num_mdd_events++;
4884 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
4885 }
4886
4887 reg = rd32(hw, I40E_VP_MDET_RX(i));
4888 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
4889 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
4890 vf->num_mdd_events++;
4891 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
4892 }
4893
4894 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
4895 dev_info(&pf->pdev->dev,
4896 "Too many MDD events on VF %d, disabled\n", i);
4897 dev_info(&pf->pdev->dev,
4898 "Use PF Control I/F to re-enable the VF\n");
4899 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
4900 }
4901 }
4902
4903 /* re-enable mdd interrupt cause */
4904 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
4905 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4906 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4907 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4908 i40e_flush(hw);
4909}
4910
4911/**
4912 * i40e_service_task - Run the driver's async subtasks
4913 * @work: pointer to work_struct containing our data
4914 **/
4915static void i40e_service_task(struct work_struct *work)
4916{
4917 struct i40e_pf *pf = container_of(work,
4918 struct i40e_pf,
4919 service_task);
4920 unsigned long start_time = jiffies;
4921
4922 i40e_reset_subtask(pf);
4923 i40e_handle_mdd_event(pf);
4924 i40e_vc_process_vflr_event(pf);
4925 i40e_watchdog_subtask(pf);
4926 i40e_fdir_reinit_subtask(pf);
4927 i40e_check_hang_subtask(pf);
4928 i40e_sync_filters_subtask(pf);
4929 i40e_clean_adminq_subtask(pf);
4930
4931 i40e_service_event_complete(pf);
4932
4933 /* If the tasks have taken longer than one timer cycle or there
4934 * is more work to be done, reschedule the service task now
4935 * rather than wait for the timer to tick again.
4936 */
4937 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
4938 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
4939 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
4940 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
4941 i40e_service_event_schedule(pf);
4942}
4943
4944/**
4945 * i40e_service_timer - timer callback
4946 * @data: pointer to PF struct
4947 **/
4948static void i40e_service_timer(unsigned long data)
4949{
4950 struct i40e_pf *pf = (struct i40e_pf *)data;
4951
4952 mod_timer(&pf->service_timer,
4953 round_jiffies(jiffies + pf->service_timer_period));
4954 i40e_service_event_schedule(pf);
4955}
4956
4957/**
4958 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
4959 * @vsi: the VSI being configured
4960 **/
4961static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
4962{
4963 struct i40e_pf *pf = vsi->back;
4964
4965 switch (vsi->type) {
4966 case I40E_VSI_MAIN:
4967 vsi->alloc_queue_pairs = pf->num_lan_qps;
4968 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4969 I40E_REQ_DESCRIPTOR_MULTIPLE);
4970 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4971 vsi->num_q_vectors = pf->num_lan_msix;
4972 else
4973 vsi->num_q_vectors = 1;
4974
4975 break;
4976
4977 case I40E_VSI_FDIR:
4978 vsi->alloc_queue_pairs = 1;
4979 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
4980 I40E_REQ_DESCRIPTOR_MULTIPLE);
4981 vsi->num_q_vectors = 1;
4982 break;
4983
4984 case I40E_VSI_VMDQ2:
4985 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
4986 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4987 I40E_REQ_DESCRIPTOR_MULTIPLE);
4988 vsi->num_q_vectors = pf->num_vmdq_msix;
4989 break;
4990
4991 case I40E_VSI_SRIOV:
4992 vsi->alloc_queue_pairs = pf->num_vf_qps;
4993 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
4994 I40E_REQ_DESCRIPTOR_MULTIPLE);
4995 break;
4996
4997 default:
4998 WARN_ON(1);
4999 return -ENODATA;
5000 }
5001
5002 return 0;
5003}
5004
5005/**
5006 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
5007 * @pf: board private structure
5008 * @type: type of VSI
5009 *
5010 * On error: returns error code (negative)
5011 * On success: returns vsi index in PF (positive)
5012 **/
5013static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5014{
5015 int ret = -ENODEV;
5016 struct i40e_vsi *vsi;
493fb300 5017 int sz_vectors;
9f65e15b 5018 int sz_rings;
41c445ff
JB
5019 int vsi_idx;
5020 int i;
5021
5022 /* Need to protect the allocation of the VSIs at the PF level */
5023 mutex_lock(&pf->switch_mutex);
5024
5025 /* VSI list may be fragmented if VSI creation/destruction has
5026 * been happening. We can afford to do a quick scan to look
5027 * for any free VSIs in the list.
5028 *
5029 * find next empty vsi slot, looping back around if necessary
5030 */
5031 i = pf->next_vsi;
5032 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
5033 i++;
5034 if (i >= pf->hw.func_caps.num_vsis) {
5035 i = 0;
5036 while (i < pf->next_vsi && pf->vsi[i])
5037 i++;
5038 }
5039
5040 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
5041 vsi_idx = i; /* Found one! */
5042 } else {
5043 ret = -ENODEV;
493fb300 5044 goto unlock_pf; /* out of VSI slots! */
41c445ff
JB
5045 }
5046 pf->next_vsi = ++i;
5047
5048 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
5049 if (!vsi) {
5050 ret = -ENOMEM;
493fb300 5051 goto unlock_pf;
41c445ff
JB
5052 }
5053 vsi->type = type;
5054 vsi->back = pf;
5055 set_bit(__I40E_DOWN, &vsi->state);
5056 vsi->flags = 0;
5057 vsi->idx = vsi_idx;
5058 vsi->rx_itr_setting = pf->rx_itr_default;
5059 vsi->tx_itr_setting = pf->tx_itr_default;
5060 vsi->netdev_registered = false;
5061 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
5062 INIT_LIST_HEAD(&vsi->mac_filter_list);
5063
9f65e15b
AD
5064 ret = i40e_set_num_rings_in_vsi(vsi);
5065 if (ret)
5066 goto err_rings;
5067
5068 /* allocate memory for ring pointers */
5069 sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
5070 vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
5071 if (!vsi->tx_rings) {
5072 ret = -ENOMEM;
5073 goto err_rings;
5074 }
5075 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
41c445ff 5076
493fb300
AD
5077 /* allocate memory for q_vector pointers */
5078 sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
5079 vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
5080 if (!vsi->q_vectors) {
5081 ret = -ENOMEM;
5082 goto err_vectors;
5083 }
5084
41c445ff
JB
5085 /* Setup default MSIX irq handler for VSI */
5086 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
5087
5088 pf->vsi[vsi_idx] = vsi;
5089 ret = vsi_idx;
493fb300
AD
5090 goto unlock_pf;
5091
5092err_vectors:
9f65e15b
AD
5093 kfree(vsi->tx_rings);
5094err_rings:
493fb300
AD
5095 pf->next_vsi = i - 1;
5096 kfree(vsi);
5097unlock_pf:
41c445ff
JB
5098 mutex_unlock(&pf->switch_mutex);
5099 return ret;
5100}
5101
5102/**
5103 * i40e_vsi_clear - Deallocate the VSI provided
5104 * @vsi: the VSI being un-configured
5105 **/
5106static int i40e_vsi_clear(struct i40e_vsi *vsi)
5107{
5108 struct i40e_pf *pf;
5109
5110 if (!vsi)
5111 return 0;
5112
5113 if (!vsi->back)
5114 goto free_vsi;
5115 pf = vsi->back;
5116
5117 mutex_lock(&pf->switch_mutex);
5118 if (!pf->vsi[vsi->idx]) {
5119 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
5120 vsi->idx, vsi->idx, vsi, vsi->type);
5121 goto unlock_vsi;
5122 }
5123
5124 if (pf->vsi[vsi->idx] != vsi) {
5125 dev_err(&pf->pdev->dev,
5126 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
5127 pf->vsi[vsi->idx]->idx,
5128 pf->vsi[vsi->idx],
5129 pf->vsi[vsi->idx]->type,
5130 vsi->idx, vsi, vsi->type);
5131 goto unlock_vsi;
5132 }
5133
5134 /* updates the pf for this cleared vsi */
5135 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
5136 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
5137
493fb300
AD
5138 /* free the ring and vector containers */
5139 kfree(vsi->q_vectors);
9f65e15b 5140 kfree(vsi->tx_rings);
493fb300 5141
41c445ff
JB
5142 pf->vsi[vsi->idx] = NULL;
5143 if (vsi->idx < pf->next_vsi)
5144 pf->next_vsi = vsi->idx;
5145
5146unlock_vsi:
5147 mutex_unlock(&pf->switch_mutex);
5148free_vsi:
5149 kfree(vsi);
5150
5151 return 0;
5152}
5153
9f65e15b
AD
5154/**
5155 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
5156 * @vsi: the VSI being cleaned
5157 **/
5158static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5159{
5160 int i;
5161
00403f04
MW
5162 if (vsi->tx_rings[0])
5163 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5164 kfree_rcu(vsi->tx_rings[i], rcu);
5165 vsi->tx_rings[i] = NULL;
5166 vsi->rx_rings[i] = NULL;
5167 }
9f65e15b
AD
5168
5169 return 0;
5170}
5171
41c445ff
JB
5172/**
5173 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
5174 * @vsi: the VSI being configured
5175 **/
5176static int i40e_alloc_rings(struct i40e_vsi *vsi)
5177{
5178 struct i40e_pf *pf = vsi->back;
41c445ff
JB
5179 int i;
5180
41c445ff
JB
5181 /* Set basic values in the rings to be used later during open() */
5182 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
9f65e15b
AD
5183 struct i40e_ring *tx_ring;
5184 struct i40e_ring *rx_ring;
5185
5186 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
5187 if (!tx_ring)
5188 goto err_out;
41c445ff
JB
5189
5190 tx_ring->queue_index = i;
5191 tx_ring->reg_idx = vsi->base_queue + i;
5192 tx_ring->ring_active = false;
5193 tx_ring->vsi = vsi;
5194 tx_ring->netdev = vsi->netdev;
5195 tx_ring->dev = &pf->pdev->dev;
5196 tx_ring->count = vsi->num_desc;
5197 tx_ring->size = 0;
5198 tx_ring->dcb_tc = 0;
9f65e15b 5199 vsi->tx_rings[i] = tx_ring;
41c445ff 5200
9f65e15b 5201 rx_ring = &tx_ring[1];
41c445ff
JB
5202 rx_ring->queue_index = i;
5203 rx_ring->reg_idx = vsi->base_queue + i;
5204 rx_ring->ring_active = false;
5205 rx_ring->vsi = vsi;
5206 rx_ring->netdev = vsi->netdev;
5207 rx_ring->dev = &pf->pdev->dev;
5208 rx_ring->count = vsi->num_desc;
5209 rx_ring->size = 0;
5210 rx_ring->dcb_tc = 0;
5211 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
5212 set_ring_16byte_desc_enabled(rx_ring);
5213 else
5214 clear_ring_16byte_desc_enabled(rx_ring);
9f65e15b 5215 vsi->rx_rings[i] = rx_ring;
41c445ff
JB
5216 }
5217
5218 return 0;
9f65e15b
AD
5219
5220err_out:
5221 i40e_vsi_clear_rings(vsi);
5222 return -ENOMEM;
41c445ff
JB
5223}
5224
5225/**
5226 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
5227 * @pf: board private structure
5228 * @vectors: the number of MSI-X vectors to request
5229 *
5230 * Returns the number of vectors reserved, or error
5231 **/
5232static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
5233{
5234 int err = 0;
5235
5236 pf->num_msix_entries = 0;
5237 while (vectors >= I40E_MIN_MSIX) {
5238 err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
5239 if (err == 0) {
5240 /* good to go */
5241 pf->num_msix_entries = vectors;
5242 break;
5243 } else if (err < 0) {
5244 /* total failure */
5245 dev_info(&pf->pdev->dev,
5246 "MSI-X vector reservation failed: %d\n", err);
5247 vectors = 0;
5248 break;
5249 } else {
5250 /* err > 0 is the hint for retry */
5251 dev_info(&pf->pdev->dev,
5252 "MSI-X vectors wanted %d, retrying with %d\n",
5253 vectors, err);
5254 vectors = err;
5255 }
5256 }
5257
5258 if (vectors > 0 && vectors < I40E_MIN_MSIX) {
5259 dev_info(&pf->pdev->dev,
5260 "Couldn't get enough vectors, only %d available\n",
5261 vectors);
5262 vectors = 0;
5263 }
5264
5265 return vectors;
5266}
5267
5268/**
5269 * i40e_init_msix - Setup the MSIX capability
5270 * @pf: board private structure
5271 *
5272 * Work with the OS to set up the MSIX vectors needed.
5273 *
5274 * Returns 0 on success, negative on failure
5275 **/
5276static int i40e_init_msix(struct i40e_pf *pf)
5277{
5278 i40e_status err = 0;
5279 struct i40e_hw *hw = &pf->hw;
5280 int v_budget, i;
5281 int vec;
5282
5283 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
5284 return -ENODEV;
5285
5286 /* The number of vectors we'll request will be comprised of:
5287 * - Add 1 for "other" cause for Admin Queue events, etc.
5288 * - The number of LAN queue pairs
5289 * already adjusted for the NUMA node
5290 * assumes symmetric Tx/Rx pairing
5291 * - The number of VMDq pairs
5292 * Once we count this up, try the request.
5293 *
5294 * If we can't get what we want, we'll simplify to nearly nothing
5295 * and try again. If that still fails, we punt.
5296 */
5297 pf->num_lan_msix = pf->num_lan_qps;
5298 pf->num_vmdq_msix = pf->num_vmdq_qps;
5299 v_budget = 1 + pf->num_lan_msix;
5300 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
5301 if (pf->flags & I40E_FLAG_FDIR_ENABLED)
5302 v_budget++;
5303
5304 /* Scale down if necessary, and the rings will share vectors */
5305 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
5306
5307 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
5308 GFP_KERNEL);
5309 if (!pf->msix_entries)
5310 return -ENOMEM;
5311
5312 for (i = 0; i < v_budget; i++)
5313 pf->msix_entries[i].entry = i;
5314 vec = i40e_reserve_msix_vectors(pf, v_budget);
5315 if (vec < I40E_MIN_MSIX) {
5316 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
5317 kfree(pf->msix_entries);
5318 pf->msix_entries = NULL;
5319 return -ENODEV;
5320
5321 } else if (vec == I40E_MIN_MSIX) {
5322 /* Adjust for minimal MSIX use */
5323 dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
5324 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
5325 pf->num_vmdq_vsis = 0;
5326 pf->num_vmdq_qps = 0;
5327 pf->num_vmdq_msix = 0;
5328 pf->num_lan_qps = 1;
5329 pf->num_lan_msix = 1;
5330
5331 } else if (vec != v_budget) {
5332 /* Scale vector usage down */
5333 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
5334 vec--; /* reserve the misc vector */
5335
5336 /* partition out the remaining vectors */
5337 switch (vec) {
5338 case 2:
5339 pf->num_vmdq_vsis = 1;
5340 pf->num_lan_msix = 1;
5341 break;
5342 case 3:
5343 pf->num_vmdq_vsis = 1;
5344 pf->num_lan_msix = 2;
5345 break;
5346 default:
5347 pf->num_lan_msix = min_t(int, (vec / 2),
5348 pf->num_lan_qps);
5349 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
5350 I40E_DEFAULT_NUM_VMDQ_VSI);
5351 break;
5352 }
5353 }
5354
5355 return err;
5356}
5357
493fb300
AD
5358/**
5359 * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
5360 * @vsi: the VSI being configured
5361 * @v_idx: index of the vector in the vsi struct
5362 *
5363 * We allocate one q_vector. If allocation fails we return -ENOMEM.
5364 **/
5365static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
5366{
5367 struct i40e_q_vector *q_vector;
5368
5369 /* allocate q_vector */
5370 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
5371 if (!q_vector)
5372 return -ENOMEM;
5373
5374 q_vector->vsi = vsi;
5375 q_vector->v_idx = v_idx;
5376 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
5377 if (vsi->netdev)
5378 netif_napi_add(vsi->netdev, &q_vector->napi,
5379 i40e_napi_poll, vsi->work_limit);
5380
cd0b6fa6
AD
5381 q_vector->rx.latency_range = I40E_LOW_LATENCY;
5382 q_vector->tx.latency_range = I40E_LOW_LATENCY;
5383
493fb300
AD
5384 /* tie q_vector and vsi together */
5385 vsi->q_vectors[v_idx] = q_vector;
5386
5387 return 0;
5388}
5389
41c445ff
JB
5390/**
5391 * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
5392 * @vsi: the VSI being configured
5393 *
5394 * We allocate one q_vector per queue interrupt. If allocation fails we
5395 * return -ENOMEM.
5396 **/
5397static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
5398{
5399 struct i40e_pf *pf = vsi->back;
5400 int v_idx, num_q_vectors;
493fb300 5401 int err;
41c445ff
JB
5402
5403 /* if not MSIX, give the one vector only to the LAN VSI */
5404 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5405 num_q_vectors = vsi->num_q_vectors;
5406 else if (vsi == pf->vsi[pf->lan_vsi])
5407 num_q_vectors = 1;
5408 else
5409 return -EINVAL;
5410
41c445ff 5411 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
493fb300
AD
5412 err = i40e_alloc_q_vector(vsi, v_idx);
5413 if (err)
5414 goto err_out;
41c445ff
JB
5415 }
5416
5417 return 0;
493fb300
AD
5418
5419err_out:
5420 while (v_idx--)
5421 i40e_free_q_vector(vsi, v_idx);
5422
5423 return err;
41c445ff
JB
5424}
5425
5426/**
5427 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
5428 * @pf: board private structure to initialize
5429 **/
5430static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
5431{
5432 int err = 0;
5433
5434 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5435 err = i40e_init_msix(pf);
5436 if (err) {
958a3e3b
SN
5437 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
5438 I40E_FLAG_RSS_ENABLED |
41c445ff
JB
5439 I40E_FLAG_MQ_ENABLED |
5440 I40E_FLAG_DCB_ENABLED |
5441 I40E_FLAG_SRIOV_ENABLED |
5442 I40E_FLAG_FDIR_ENABLED |
5443 I40E_FLAG_FDIR_ATR_ENABLED |
5444 I40E_FLAG_VMDQ_ENABLED);
5445
5446 /* rework the queue expectations without MSIX */
5447 i40e_determine_queue_usage(pf);
5448 }
5449 }
5450
5451 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
5452 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
958a3e3b 5453 dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
41c445ff
JB
5454 err = pci_enable_msi(pf->pdev);
5455 if (err) {
958a3e3b 5456 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
41c445ff
JB
5457 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
5458 }
5459 }
5460
958a3e3b
SN
5461 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
5462 dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
5463
41c445ff
JB
5464 /* track first vector for misc interrupts */
5465 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
5466}
5467
5468/**
5469 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
5470 * @pf: board private structure
5471 *
5472 * This sets up the handler for MSIX 0, which is used to manage the
5473 * non-queue interrupts, e.g. AdminQ and errors. This is not used
5474 * when in MSI or Legacy interrupt mode.
5475 **/
5476static int i40e_setup_misc_vector(struct i40e_pf *pf)
5477{
5478 struct i40e_hw *hw = &pf->hw;
5479 int err = 0;
5480
5481 /* Only request the irq if this is the first time through, and
5482 * not when we're rebuilding after a Reset
5483 */
5484 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
5485 err = request_irq(pf->msix_entries[0].vector,
5486 i40e_intr, 0, pf->misc_int_name, pf);
5487 if (err) {
5488 dev_info(&pf->pdev->dev,
5489 "request_irq for msix_misc failed: %d\n", err);
5490 return -EFAULT;
5491 }
5492 }
5493
5494 i40e_enable_misc_int_causes(hw);
5495
5496 /* associate no queues to the misc vector */
5497 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
5498 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
5499
5500 i40e_flush(hw);
5501
5502 i40e_irq_dynamic_enable_icr0(pf);
5503
5504 return err;
5505}
5506
5507/**
5508 * i40e_config_rss - Prepare for RSS if used
5509 * @pf: board private structure
5510 **/
5511static int i40e_config_rss(struct i40e_pf *pf)
5512{
5513 struct i40e_hw *hw = &pf->hw;
5514 u32 lut = 0;
5515 int i, j;
5516 u64 hena;
5517 /* Set of random keys generated using kernel random number generator */
5518 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
5519 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
5520 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
5521 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
5522
5523 /* Fill out hash function seed */
5524 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
5525 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
5526
5527 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
5528 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
5529 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
5530 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
5531 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
5532 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
5533 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
5534 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
5535 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
5536 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
5537 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
5538 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
5539 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
5540 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
5541 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
5542
5543 /* Populate the LUT with max no. of queues in round robin fashion */
5544 for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
5545
5546 /* The assumption is that lan qp count will be the highest
5547 * qp count for any PF VSI that needs RSS.
5548 * If multiple VSIs need RSS support, all the qp counts
5549 * for those VSIs should be a power of 2 for RSS to work.
5550 * If LAN VSI is the only consumer for RSS then this requirement
5551 * is not necessary.
5552 */
5553 if (j == pf->rss_size)
5554 j = 0;
5555 /* lut = 4-byte sliding window of 4 lut entries */
5556 lut = (lut << 8) | (j &
5557 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
5558 /* On i = 3, we have 4 entries in lut; write to the register */
5559 if ((i & 3) == 3)
5560 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
5561 }
5562 i40e_flush(hw);
5563
5564 return 0;
5565}
5566
5567/**
5568 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
5569 * @pf: board private structure to initialize
5570 *
5571 * i40e_sw_init initializes the Adapter private data structure.
5572 * Fields are initialized based on PCI device information and
5573 * OS network device settings (MTU size).
5574 **/
5575static int i40e_sw_init(struct i40e_pf *pf)
5576{
5577 int err = 0;
5578 int size;
5579
5580 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
5581 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
5582 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
5583 if (I40E_DEBUG_USER & debug)
5584 pf->hw.debug_mask = debug;
5585 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
5586 I40E_DEFAULT_MSG_ENABLE);
5587 }
5588
5589 /* Set default capability flags */
5590 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
5591 I40E_FLAG_MSI_ENABLED |
5592 I40E_FLAG_MSIX_ENABLED |
5593 I40E_FLAG_RX_PS_ENABLED |
5594 I40E_FLAG_MQ_ENABLED |
5595 I40E_FLAG_RX_1BUF_ENABLED;
5596
5597 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
5598 if (pf->hw.func_caps.rss) {
5599 pf->flags |= I40E_FLAG_RSS_ENABLED;
5600 pf->rss_size = min_t(int, pf->rss_size_max,
5601 nr_cpus_node(numa_node_id()));
5602 } else {
5603 pf->rss_size = 1;
5604 }
5605
5606 if (pf->hw.func_caps.dcb)
5607 pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
5608 else
5609 pf->num_tc_qps = 0;
5610
5611 if (pf->hw.func_caps.fd) {
5612 /* FW/NVM is not yet fixed in this regard */
5613 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
5614 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
5615 pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
5616 dev_info(&pf->pdev->dev,
5617 "Flow Director ATR mode Enabled\n");
5618 pf->flags |= I40E_FLAG_FDIR_ENABLED;
5619 dev_info(&pf->pdev->dev,
5620 "Flow Director Side Band mode Enabled\n");
5621 pf->fdir_pf_filter_count =
5622 pf->hw.func_caps.fd_filters_guaranteed;
5623 }
5624 } else {
5625 pf->fdir_pf_filter_count = 0;
5626 }
5627
5628 if (pf->hw.func_caps.vmdq) {
5629 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
5630 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
5631 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
5632 }
5633
5634 /* MFP mode enabled */
5635 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
5636 pf->flags |= I40E_FLAG_MFP_ENABLED;
5637 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
5638 }
5639
5640#ifdef CONFIG_PCI_IOV
5641 if (pf->hw.func_caps.num_vfs) {
5642 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
5643 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
5644 pf->num_req_vfs = min_t(int,
5645 pf->hw.func_caps.num_vfs,
5646 I40E_MAX_VF_COUNT);
5647 }
5648#endif /* CONFIG_PCI_IOV */
5649 pf->eeprom_version = 0xDEAD;
5650 pf->lan_veb = I40E_NO_VEB;
5651 pf->lan_vsi = I40E_NO_VSI;
5652
5653 /* set up queue assignment tracking */
5654 size = sizeof(struct i40e_lump_tracking)
5655 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
5656 pf->qp_pile = kzalloc(size, GFP_KERNEL);
5657 if (!pf->qp_pile) {
5658 err = -ENOMEM;
5659 goto sw_init_done;
5660 }
5661 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
5662 pf->qp_pile->search_hint = 0;
5663
5664 /* set up vector assignment tracking */
5665 size = sizeof(struct i40e_lump_tracking)
5666 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
5667 pf->irq_pile = kzalloc(size, GFP_KERNEL);
5668 if (!pf->irq_pile) {
5669 kfree(pf->qp_pile);
5670 err = -ENOMEM;
5671 goto sw_init_done;
5672 }
5673 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
5674 pf->irq_pile->search_hint = 0;
5675
5676 mutex_init(&pf->switch_mutex);
5677
5678sw_init_done:
5679 return err;
5680}
5681
5682/**
5683 * i40e_set_features - set the netdev feature flags
5684 * @netdev: ptr to the netdev being adjusted
5685 * @features: the feature set that the stack is suggesting
5686 **/
5687static int i40e_set_features(struct net_device *netdev,
5688 netdev_features_t features)
5689{
5690 struct i40e_netdev_priv *np = netdev_priv(netdev);
5691 struct i40e_vsi *vsi = np->vsi;
5692
5693 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5694 i40e_vlan_stripping_enable(vsi);
5695 else
5696 i40e_vlan_stripping_disable(vsi);
5697
5698 return 0;
5699}
5700
5701static const struct net_device_ops i40e_netdev_ops = {
5702 .ndo_open = i40e_open,
5703 .ndo_stop = i40e_close,
5704 .ndo_start_xmit = i40e_lan_xmit_frame,
5705 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
5706 .ndo_set_rx_mode = i40e_set_rx_mode,
5707 .ndo_validate_addr = eth_validate_addr,
5708 .ndo_set_mac_address = i40e_set_mac,
5709 .ndo_change_mtu = i40e_change_mtu,
5710 .ndo_tx_timeout = i40e_tx_timeout,
5711 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
5712 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
5713#ifdef CONFIG_NET_POLL_CONTROLLER
5714 .ndo_poll_controller = i40e_netpoll,
5715#endif
5716 .ndo_setup_tc = i40e_setup_tc,
5717 .ndo_set_features = i40e_set_features,
5718 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
5719 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
5720 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
5721 .ndo_get_vf_config = i40e_ndo_get_vf_config,
5722};
5723
5724/**
5725 * i40e_config_netdev - Setup the netdev flags
5726 * @vsi: the VSI being configured
5727 *
5728 * Returns 0 on success, negative value on failure
5729 **/
5730static int i40e_config_netdev(struct i40e_vsi *vsi)
5731{
5732 struct i40e_pf *pf = vsi->back;
5733 struct i40e_hw *hw = &pf->hw;
5734 struct i40e_netdev_priv *np;
5735 struct net_device *netdev;
5736 u8 mac_addr[ETH_ALEN];
5737 int etherdev_size;
5738
5739 etherdev_size = sizeof(struct i40e_netdev_priv);
5740 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
5741 if (!netdev)
5742 return -ENOMEM;
5743
5744 vsi->netdev = netdev;
5745 np = netdev_priv(netdev);
5746 np->vsi = vsi;
5747
5748 netdev->hw_enc_features = NETIF_F_IP_CSUM |
5749 NETIF_F_GSO_UDP_TUNNEL |
5750 NETIF_F_TSO |
5751 NETIF_F_SG;
5752
5753 netdev->features = NETIF_F_SG |
5754 NETIF_F_IP_CSUM |
5755 NETIF_F_SCTP_CSUM |
5756 NETIF_F_HIGHDMA |
5757 NETIF_F_GSO_UDP_TUNNEL |
5758 NETIF_F_HW_VLAN_CTAG_TX |
5759 NETIF_F_HW_VLAN_CTAG_RX |
5760 NETIF_F_HW_VLAN_CTAG_FILTER |
5761 NETIF_F_IPV6_CSUM |
5762 NETIF_F_TSO |
5763 NETIF_F_TSO6 |
5764 NETIF_F_RXCSUM |
5765 NETIF_F_RXHASH |
5766 0;
5767
5768 /* copy netdev features into list of user selectable features */
5769 netdev->hw_features |= netdev->features;
5770
5771 if (vsi->type == I40E_VSI_MAIN) {
5772 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
5773 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
5774 } else {
5775 /* relate the VSI_VMDQ name to the VSI_MAIN name */
5776 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
5777 pf->vsi[pf->lan_vsi]->netdev->name);
5778 random_ether_addr(mac_addr);
5779 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
5780 }
5781
5782 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
5783 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
5784 /* vlan gets same features (except vlan offload)
5785 * after any tweaks for specific VSI types
5786 */
5787 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
5788 NETIF_F_HW_VLAN_CTAG_RX |
5789 NETIF_F_HW_VLAN_CTAG_FILTER);
5790 netdev->priv_flags |= IFF_UNICAST_FLT;
5791 netdev->priv_flags |= IFF_SUPP_NOFCS;
5792 /* Setup netdev TC information */
5793 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
5794
5795 netdev->netdev_ops = &i40e_netdev_ops;
5796 netdev->watchdog_timeo = 5 * HZ;
5797 i40e_set_ethtool_ops(netdev);
5798
5799 return 0;
5800}
5801
5802/**
5803 * i40e_vsi_delete - Delete a VSI from the switch
5804 * @vsi: the VSI being removed
5805 *
5806 * Returns 0 on success, negative value on failure
5807 **/
5808static void i40e_vsi_delete(struct i40e_vsi *vsi)
5809{
5810 /* remove default VSI is not allowed */
5811 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
5812 return;
5813
5814 /* there is no HW VSI for FDIR */
5815 if (vsi->type == I40E_VSI_FDIR)
5816 return;
5817
5818 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
5819 return;
5820}
5821
5822/**
5823 * i40e_add_vsi - Add a VSI to the switch
5824 * @vsi: the VSI being configured
5825 *
5826 * This initializes a VSI context depending on the VSI type to be added and
5827 * passes it down to the add_vsi aq command.
5828 **/
5829static int i40e_add_vsi(struct i40e_vsi *vsi)
5830{
5831 int ret = -ENODEV;
5832 struct i40e_mac_filter *f, *ftmp;
5833 struct i40e_pf *pf = vsi->back;
5834 struct i40e_hw *hw = &pf->hw;
5835 struct i40e_vsi_context ctxt;
5836 u8 enabled_tc = 0x1; /* TC0 enabled */
5837 int f_count = 0;
5838
5839 memset(&ctxt, 0, sizeof(ctxt));
5840 switch (vsi->type) {
5841 case I40E_VSI_MAIN:
5842 /* The PF's main VSI is already setup as part of the
5843 * device initialization, so we'll not bother with
5844 * the add_vsi call, but we will retrieve the current
5845 * VSI context.
5846 */
5847 ctxt.seid = pf->main_vsi_seid;
5848 ctxt.pf_num = pf->hw.pf_id;
5849 ctxt.vf_num = 0;
5850 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
5851 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5852 if (ret) {
5853 dev_info(&pf->pdev->dev,
5854 "couldn't get pf vsi config, err %d, aq_err %d\n",
5855 ret, pf->hw.aq.asq_last_status);
5856 return -ENOENT;
5857 }
5858 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5859 vsi->info.valid_sections = 0;
5860
5861 vsi->seid = ctxt.seid;
5862 vsi->id = ctxt.vsi_number;
5863
5864 enabled_tc = i40e_pf_get_tc_map(pf);
5865
5866 /* MFP mode setup queue map and update VSI */
5867 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5868 memset(&ctxt, 0, sizeof(ctxt));
5869 ctxt.seid = pf->main_vsi_seid;
5870 ctxt.pf_num = pf->hw.pf_id;
5871 ctxt.vf_num = 0;
5872 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5873 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5874 if (ret) {
5875 dev_info(&pf->pdev->dev,
5876 "update vsi failed, aq_err=%d\n",
5877 pf->hw.aq.asq_last_status);
5878 ret = -ENOENT;
5879 goto err;
5880 }
5881 /* update the local VSI info queue map */
5882 i40e_vsi_update_queue_map(vsi, &ctxt);
5883 vsi->info.valid_sections = 0;
5884 } else {
5885 /* Default/Main VSI is only enabled for TC0
5886 * reconfigure it to enable all TCs that are
5887 * available on the port in SFP mode.
5888 */
5889 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5890 if (ret) {
5891 dev_info(&pf->pdev->dev,
5892 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
5893 enabled_tc, ret,
5894 pf->hw.aq.asq_last_status);
5895 ret = -ENOENT;
5896 }
5897 }
5898 break;
5899
5900 case I40E_VSI_FDIR:
5901 /* no queue mapping or actual HW VSI needed */
5902 vsi->info.valid_sections = 0;
5903 vsi->seid = 0;
5904 vsi->id = 0;
5905 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5906 return 0;
5907 break;
5908
5909 case I40E_VSI_VMDQ2:
5910 ctxt.pf_num = hw->pf_id;
5911 ctxt.vf_num = 0;
5912 ctxt.uplink_seid = vsi->uplink_seid;
5913 ctxt.connection_type = 0x1; /* regular data port */
5914 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5915
5916 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5917
5918 /* This VSI is connected to VEB so the switch_id
5919 * should be set to zero by default.
5920 */
5921 ctxt.info.switch_id = 0;
5922 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5923 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5924
5925 /* Setup the VSI tx/rx queue map for TC0 only for now */
5926 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5927 break;
5928
5929 case I40E_VSI_SRIOV:
5930 ctxt.pf_num = hw->pf_id;
5931 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
5932 ctxt.uplink_seid = vsi->uplink_seid;
5933 ctxt.connection_type = 0x1; /* regular data port */
5934 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5935
5936 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5937
5938 /* This VSI is connected to VEB so the switch_id
5939 * should be set to zero by default.
5940 */
5941 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5942
5943 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
5944 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5945 /* Setup the VSI tx/rx queue map for TC0 only for now */
5946 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
5947 break;
5948
5949 default:
5950 return -ENODEV;
5951 }
5952
5953 if (vsi->type != I40E_VSI_MAIN) {
5954 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5955 if (ret) {
5956 dev_info(&vsi->back->pdev->dev,
5957 "add vsi failed, aq_err=%d\n",
5958 vsi->back->hw.aq.asq_last_status);
5959 ret = -ENOENT;
5960 goto err;
5961 }
5962 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5963 vsi->info.valid_sections = 0;
5964 vsi->seid = ctxt.seid;
5965 vsi->id = ctxt.vsi_number;
5966 }
5967
5968 /* If macvlan filters already exist, force them to get loaded */
5969 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
5970 f->changed = true;
5971 f_count++;
5972 }
5973 if (f_count) {
5974 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
5975 pf->flags |= I40E_FLAG_FILTER_SYNC;
5976 }
5977
5978 /* Update VSI BW information */
5979 ret = i40e_vsi_get_bw_info(vsi);
5980 if (ret) {
5981 dev_info(&pf->pdev->dev,
5982 "couldn't get vsi bw info, err %d, aq_err %d\n",
5983 ret, pf->hw.aq.asq_last_status);
5984 /* VSI is already added so not tearing that up */
5985 ret = 0;
5986 }
5987
5988err:
5989 return ret;
5990}
5991
5992/**
5993 * i40e_vsi_release - Delete a VSI and free its resources
5994 * @vsi: the VSI being removed
5995 *
5996 * Returns 0 on success or < 0 on error
5997 **/
5998int i40e_vsi_release(struct i40e_vsi *vsi)
5999{
6000 struct i40e_mac_filter *f, *ftmp;
6001 struct i40e_veb *veb = NULL;
6002 struct i40e_pf *pf;
6003 u16 uplink_seid;
6004 int i, n;
6005
6006 pf = vsi->back;
6007
6008 /* release of a VEB-owner or last VSI is not allowed */
6009 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
6010 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
6011 vsi->seid, vsi->uplink_seid);
6012 return -ENODEV;
6013 }
6014 if (vsi == pf->vsi[pf->lan_vsi] &&
6015 !test_bit(__I40E_DOWN, &pf->state)) {
6016 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
6017 return -ENODEV;
6018 }
6019
6020 uplink_seid = vsi->uplink_seid;
6021 if (vsi->type != I40E_VSI_SRIOV) {
6022 if (vsi->netdev_registered) {
6023 vsi->netdev_registered = false;
6024 if (vsi->netdev) {
6025 /* results in a call to i40e_close() */
6026 unregister_netdev(vsi->netdev);
6027 free_netdev(vsi->netdev);
6028 vsi->netdev = NULL;
6029 }
6030 } else {
6031 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
6032 i40e_down(vsi);
6033 i40e_vsi_free_irq(vsi);
6034 i40e_vsi_free_tx_resources(vsi);
6035 i40e_vsi_free_rx_resources(vsi);
6036 }
6037 i40e_vsi_disable_irq(vsi);
6038 }
6039
6040 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
6041 i40e_del_filter(vsi, f->macaddr, f->vlan,
6042 f->is_vf, f->is_netdev);
6043 i40e_sync_vsi_filters(vsi);
6044
6045 i40e_vsi_delete(vsi);
6046 i40e_vsi_free_q_vectors(vsi);
6047 i40e_vsi_clear_rings(vsi);
6048 i40e_vsi_clear(vsi);
6049
6050 /* If this was the last thing on the VEB, except for the
6051 * controlling VSI, remove the VEB, which puts the controlling
6052 * VSI onto the next level down in the switch.
6053 *
6054 * Well, okay, there's one more exception here: don't remove
6055 * the orphan VEBs yet. We'll wait for an explicit remove request
6056 * from up the network stack.
6057 */
6058 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6059 if (pf->vsi[i] &&
6060 pf->vsi[i]->uplink_seid == uplink_seid &&
6061 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
6062 n++; /* count the VSIs */
6063 }
6064 }
6065 for (i = 0; i < I40E_MAX_VEB; i++) {
6066 if (!pf->veb[i])
6067 continue;
6068 if (pf->veb[i]->uplink_seid == uplink_seid)
6069 n++; /* count the VEBs */
6070 if (pf->veb[i]->seid == uplink_seid)
6071 veb = pf->veb[i];
6072 }
6073 if (n == 0 && veb && veb->uplink_seid != 0)
6074 i40e_veb_release(veb);
6075
6076 return 0;
6077}
6078
6079/**
6080 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
6081 * @vsi: ptr to the VSI
6082 *
6083 * This should only be called after i40e_vsi_mem_alloc() which allocates the
6084 * corresponding SW VSI structure and initializes num_queue_pairs for the
6085 * newly allocated VSI.
6086 *
6087 * Returns 0 on success or negative on failure
6088 **/
6089static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
6090{
6091 int ret = -ENOENT;
6092 struct i40e_pf *pf = vsi->back;
6093
493fb300 6094 if (vsi->q_vectors[0]) {
41c445ff
JB
6095 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
6096 vsi->seid);
6097 return -EEXIST;
6098 }
6099
6100 if (vsi->base_vector) {
6101 dev_info(&pf->pdev->dev,
6102 "VSI %d has non-zero base vector %d\n",
6103 vsi->seid, vsi->base_vector);
6104 return -EEXIST;
6105 }
6106
6107 ret = i40e_alloc_q_vectors(vsi);
6108 if (ret) {
6109 dev_info(&pf->pdev->dev,
6110 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
6111 vsi->num_q_vectors, vsi->seid, ret);
6112 vsi->num_q_vectors = 0;
6113 goto vector_setup_out;
6114 }
6115
958a3e3b
SN
6116 if (vsi->num_q_vectors)
6117 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
6118 vsi->num_q_vectors, vsi->idx);
41c445ff
JB
6119 if (vsi->base_vector < 0) {
6120 dev_info(&pf->pdev->dev,
6121 "failed to get q tracking for VSI %d, err=%d\n",
6122 vsi->seid, vsi->base_vector);
6123 i40e_vsi_free_q_vectors(vsi);
6124 ret = -ENOENT;
6125 goto vector_setup_out;
6126 }
6127
6128vector_setup_out:
6129 return ret;
6130}
6131
6132/**
6133 * i40e_vsi_setup - Set up a VSI by a given type
6134 * @pf: board private structure
6135 * @type: VSI type
6136 * @uplink_seid: the switch element to link to
6137 * @param1: usage depends upon VSI type. For VF types, indicates VF id
6138 *
6139 * This allocates the sw VSI structure and its queue resources, then add a VSI
6140 * to the identified VEB.
6141 *
6142 * Returns pointer to the successfully allocated and configure VSI sw struct on
6143 * success, otherwise returns NULL on failure.
6144 **/
6145struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
6146 u16 uplink_seid, u32 param1)
6147{
6148 struct i40e_vsi *vsi = NULL;
6149 struct i40e_veb *veb = NULL;
6150 int ret, i;
6151 int v_idx;
6152
6153 /* The requested uplink_seid must be either
6154 * - the PF's port seid
6155 * no VEB is needed because this is the PF
6156 * or this is a Flow Director special case VSI
6157 * - seid of an existing VEB
6158 * - seid of a VSI that owns an existing VEB
6159 * - seid of a VSI that doesn't own a VEB
6160 * a new VEB is created and the VSI becomes the owner
6161 * - seid of the PF VSI, which is what creates the first VEB
6162 * this is a special case of the previous
6163 *
6164 * Find which uplink_seid we were given and create a new VEB if needed
6165 */
6166 for (i = 0; i < I40E_MAX_VEB; i++) {
6167 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
6168 veb = pf->veb[i];
6169 break;
6170 }
6171 }
6172
6173 if (!veb && uplink_seid != pf->mac_seid) {
6174
6175 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6176 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
6177 vsi = pf->vsi[i];
6178 break;
6179 }
6180 }
6181 if (!vsi) {
6182 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
6183 uplink_seid);
6184 return NULL;
6185 }
6186
6187 if (vsi->uplink_seid == pf->mac_seid)
6188 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
6189 vsi->tc_config.enabled_tc);
6190 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
6191 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
6192 vsi->tc_config.enabled_tc);
6193
6194 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
6195 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
6196 veb = pf->veb[i];
6197 }
6198 if (!veb) {
6199 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
6200 return NULL;
6201 }
6202
6203 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6204 uplink_seid = veb->seid;
6205 }
6206
6207 /* get vsi sw struct */
6208 v_idx = i40e_vsi_mem_alloc(pf, type);
6209 if (v_idx < 0)
6210 goto err_alloc;
6211 vsi = pf->vsi[v_idx];
6212 vsi->type = type;
6213 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
6214
6215 if (type == I40E_VSI_MAIN)
6216 pf->lan_vsi = v_idx;
6217 else if (type == I40E_VSI_SRIOV)
6218 vsi->vf_id = param1;
6219 /* assign it some queues */
6220 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
6221 if (ret < 0) {
6222 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
6223 vsi->seid, ret);
6224 goto err_vsi;
6225 }
6226 vsi->base_queue = ret;
6227
6228 /* get a VSI from the hardware */
6229 vsi->uplink_seid = uplink_seid;
6230 ret = i40e_add_vsi(vsi);
6231 if (ret)
6232 goto err_vsi;
6233
6234 switch (vsi->type) {
6235 /* setup the netdev if needed */
6236 case I40E_VSI_MAIN:
6237 case I40E_VSI_VMDQ2:
6238 ret = i40e_config_netdev(vsi);
6239 if (ret)
6240 goto err_netdev;
6241 ret = register_netdev(vsi->netdev);
6242 if (ret)
6243 goto err_netdev;
6244 vsi->netdev_registered = true;
6245 netif_carrier_off(vsi->netdev);
6246 /* fall through */
6247
6248 case I40E_VSI_FDIR:
6249 /* set up vectors and rings if needed */
6250 ret = i40e_vsi_setup_vectors(vsi);
6251 if (ret)
6252 goto err_msix;
6253
6254 ret = i40e_alloc_rings(vsi);
6255 if (ret)
6256 goto err_rings;
6257
6258 /* map all of the rings to the q_vectors */
6259 i40e_vsi_map_rings_to_vectors(vsi);
6260
6261 i40e_vsi_reset_stats(vsi);
6262 break;
6263
6264 default:
6265 /* no netdev or rings for the other VSI types */
6266 break;
6267 }
6268
6269 return vsi;
6270
6271err_rings:
6272 i40e_vsi_free_q_vectors(vsi);
6273err_msix:
6274 if (vsi->netdev_registered) {
6275 vsi->netdev_registered = false;
6276 unregister_netdev(vsi->netdev);
6277 free_netdev(vsi->netdev);
6278 vsi->netdev = NULL;
6279 }
6280err_netdev:
6281 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
6282err_vsi:
6283 i40e_vsi_clear(vsi);
6284err_alloc:
6285 return NULL;
6286}
6287
6288/**
6289 * i40e_veb_get_bw_info - Query VEB BW information
6290 * @veb: the veb to query
6291 *
6292 * Query the Tx scheduler BW configuration data for given VEB
6293 **/
6294static int i40e_veb_get_bw_info(struct i40e_veb *veb)
6295{
6296 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
6297 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
6298 struct i40e_pf *pf = veb->pf;
6299 struct i40e_hw *hw = &pf->hw;
6300 u32 tc_bw_max;
6301 int ret = 0;
6302 int i;
6303
6304 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
6305 &bw_data, NULL);
6306 if (ret) {
6307 dev_info(&pf->pdev->dev,
6308 "query veb bw config failed, aq_err=%d\n",
6309 hw->aq.asq_last_status);
6310 goto out;
6311 }
6312
6313 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
6314 &ets_data, NULL);
6315 if (ret) {
6316 dev_info(&pf->pdev->dev,
6317 "query veb bw ets config failed, aq_err=%d\n",
6318 hw->aq.asq_last_status);
6319 goto out;
6320 }
6321
6322 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
6323 veb->bw_max_quanta = ets_data.tc_bw_max;
6324 veb->is_abs_credits = bw_data.absolute_credits_enable;
6325 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
6326 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
6327 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6328 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
6329 veb->bw_tc_limit_credits[i] =
6330 le16_to_cpu(bw_data.tc_bw_limits[i]);
6331 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
6332 }
6333
6334out:
6335 return ret;
6336}
6337
6338/**
6339 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
6340 * @pf: board private structure
6341 *
6342 * On error: returns error code (negative)
6343 * On success: returns vsi index in PF (positive)
6344 **/
6345static int i40e_veb_mem_alloc(struct i40e_pf *pf)
6346{
6347 int ret = -ENOENT;
6348 struct i40e_veb *veb;
6349 int i;
6350
6351 /* Need to protect the allocation of switch elements at the PF level */
6352 mutex_lock(&pf->switch_mutex);
6353
6354 /* VEB list may be fragmented if VEB creation/destruction has
6355 * been happening. We can afford to do a quick scan to look
6356 * for any free slots in the list.
6357 *
6358 * find next empty veb slot, looping back around if necessary
6359 */
6360 i = 0;
6361 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
6362 i++;
6363 if (i >= I40E_MAX_VEB) {
6364 ret = -ENOMEM;
6365 goto err_alloc_veb; /* out of VEB slots! */
6366 }
6367
6368 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
6369 if (!veb) {
6370 ret = -ENOMEM;
6371 goto err_alloc_veb;
6372 }
6373 veb->pf = pf;
6374 veb->idx = i;
6375 veb->enabled_tc = 1;
6376
6377 pf->veb[i] = veb;
6378 ret = i;
6379err_alloc_veb:
6380 mutex_unlock(&pf->switch_mutex);
6381 return ret;
6382}
6383
6384/**
6385 * i40e_switch_branch_release - Delete a branch of the switch tree
6386 * @branch: where to start deleting
6387 *
6388 * This uses recursion to find the tips of the branch to be
6389 * removed, deleting until we get back to and can delete this VEB.
6390 **/
6391static void i40e_switch_branch_release(struct i40e_veb *branch)
6392{
6393 struct i40e_pf *pf = branch->pf;
6394 u16 branch_seid = branch->seid;
6395 u16 veb_idx = branch->idx;
6396 int i;
6397
6398 /* release any VEBs on this VEB - RECURSION */
6399 for (i = 0; i < I40E_MAX_VEB; i++) {
6400 if (!pf->veb[i])
6401 continue;
6402 if (pf->veb[i]->uplink_seid == branch->seid)
6403 i40e_switch_branch_release(pf->veb[i]);
6404 }
6405
6406 /* Release the VSIs on this VEB, but not the owner VSI.
6407 *
6408 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
6409 * the VEB itself, so don't use (*branch) after this loop.
6410 */
6411 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6412 if (!pf->vsi[i])
6413 continue;
6414 if (pf->vsi[i]->uplink_seid == branch_seid &&
6415 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
6416 i40e_vsi_release(pf->vsi[i]);
6417 }
6418 }
6419
6420 /* There's one corner case where the VEB might not have been
6421 * removed, so double check it here and remove it if needed.
6422 * This case happens if the veb was created from the debugfs
6423 * commands and no VSIs were added to it.
6424 */
6425 if (pf->veb[veb_idx])
6426 i40e_veb_release(pf->veb[veb_idx]);
6427}
6428
6429/**
6430 * i40e_veb_clear - remove veb struct
6431 * @veb: the veb to remove
6432 **/
6433static void i40e_veb_clear(struct i40e_veb *veb)
6434{
6435 if (!veb)
6436 return;
6437
6438 if (veb->pf) {
6439 struct i40e_pf *pf = veb->pf;
6440
6441 mutex_lock(&pf->switch_mutex);
6442 if (pf->veb[veb->idx] == veb)
6443 pf->veb[veb->idx] = NULL;
6444 mutex_unlock(&pf->switch_mutex);
6445 }
6446
6447 kfree(veb);
6448}
6449
6450/**
6451 * i40e_veb_release - Delete a VEB and free its resources
6452 * @veb: the VEB being removed
6453 **/
6454void i40e_veb_release(struct i40e_veb *veb)
6455{
6456 struct i40e_vsi *vsi = NULL;
6457 struct i40e_pf *pf;
6458 int i, n = 0;
6459
6460 pf = veb->pf;
6461
6462 /* find the remaining VSI and check for extras */
6463 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6464 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
6465 n++;
6466 vsi = pf->vsi[i];
6467 }
6468 }
6469 if (n != 1) {
6470 dev_info(&pf->pdev->dev,
6471 "can't remove VEB %d with %d VSIs left\n",
6472 veb->seid, n);
6473 return;
6474 }
6475
6476 /* move the remaining VSI to uplink veb */
6477 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
6478 if (veb->uplink_seid) {
6479 vsi->uplink_seid = veb->uplink_seid;
6480 if (veb->uplink_seid == pf->mac_seid)
6481 vsi->veb_idx = I40E_NO_VEB;
6482 else
6483 vsi->veb_idx = veb->veb_idx;
6484 } else {
6485 /* floating VEB */
6486 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6487 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
6488 }
6489
6490 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
6491 i40e_veb_clear(veb);
6492
6493 return;
6494}
6495
6496/**
6497 * i40e_add_veb - create the VEB in the switch
6498 * @veb: the VEB to be instantiated
6499 * @vsi: the controlling VSI
6500 **/
6501static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
6502{
6503 bool is_default = (vsi->idx == vsi->back->lan_vsi);
6504 int ret;
6505
6506 /* get a VEB from the hardware */
6507 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
6508 veb->enabled_tc, is_default, &veb->seid, NULL);
6509 if (ret) {
6510 dev_info(&veb->pf->pdev->dev,
6511 "couldn't add VEB, err %d, aq_err %d\n",
6512 ret, veb->pf->hw.aq.asq_last_status);
6513 return -EPERM;
6514 }
6515
6516 /* get statistics counter */
6517 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
6518 &veb->stats_idx, NULL, NULL, NULL);
6519 if (ret) {
6520 dev_info(&veb->pf->pdev->dev,
6521 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
6522 ret, veb->pf->hw.aq.asq_last_status);
6523 return -EPERM;
6524 }
6525 ret = i40e_veb_get_bw_info(veb);
6526 if (ret) {
6527 dev_info(&veb->pf->pdev->dev,
6528 "couldn't get VEB bw info, err %d, aq_err %d\n",
6529 ret, veb->pf->hw.aq.asq_last_status);
6530 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
6531 return -ENOENT;
6532 }
6533
6534 vsi->uplink_seid = veb->seid;
6535 vsi->veb_idx = veb->idx;
6536 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6537
6538 return 0;
6539}
6540
6541/**
6542 * i40e_veb_setup - Set up a VEB
6543 * @pf: board private structure
6544 * @flags: VEB setup flags
6545 * @uplink_seid: the switch element to link to
6546 * @vsi_seid: the initial VSI seid
6547 * @enabled_tc: Enabled TC bit-map
6548 *
6549 * This allocates the sw VEB structure and links it into the switch
6550 * It is possible and legal for this to be a duplicate of an already
6551 * existing VEB. It is also possible for both uplink and vsi seids
6552 * to be zero, in order to create a floating VEB.
6553 *
6554 * Returns pointer to the successfully allocated VEB sw struct on
6555 * success, otherwise returns NULL on failure.
6556 **/
6557struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
6558 u16 uplink_seid, u16 vsi_seid,
6559 u8 enabled_tc)
6560{
6561 struct i40e_veb *veb, *uplink_veb = NULL;
6562 int vsi_idx, veb_idx;
6563 int ret;
6564
6565 /* if one seid is 0, the other must be 0 to create a floating relay */
6566 if ((uplink_seid == 0 || vsi_seid == 0) &&
6567 (uplink_seid + vsi_seid != 0)) {
6568 dev_info(&pf->pdev->dev,
6569 "one, not both seid's are 0: uplink=%d vsi=%d\n",
6570 uplink_seid, vsi_seid);
6571 return NULL;
6572 }
6573
6574 /* make sure there is such a vsi and uplink */
6575 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
6576 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
6577 break;
6578 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
6579 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
6580 vsi_seid);
6581 return NULL;
6582 }
6583
6584 if (uplink_seid && uplink_seid != pf->mac_seid) {
6585 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6586 if (pf->veb[veb_idx] &&
6587 pf->veb[veb_idx]->seid == uplink_seid) {
6588 uplink_veb = pf->veb[veb_idx];
6589 break;
6590 }
6591 }
6592 if (!uplink_veb) {
6593 dev_info(&pf->pdev->dev,
6594 "uplink seid %d not found\n", uplink_seid);
6595 return NULL;
6596 }
6597 }
6598
6599 /* get veb sw struct */
6600 veb_idx = i40e_veb_mem_alloc(pf);
6601 if (veb_idx < 0)
6602 goto err_alloc;
6603 veb = pf->veb[veb_idx];
6604 veb->flags = flags;
6605 veb->uplink_seid = uplink_seid;
6606 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
6607 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
6608
6609 /* create the VEB in the switch */
6610 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
6611 if (ret)
6612 goto err_veb;
6613
6614 return veb;
6615
6616err_veb:
6617 i40e_veb_clear(veb);
6618err_alloc:
6619 return NULL;
6620}
6621
6622/**
6623 * i40e_setup_pf_switch_element - set pf vars based on switch type
6624 * @pf: board private structure
6625 * @ele: element we are building info from
6626 * @num_reported: total number of elements
6627 * @printconfig: should we print the contents
6628 *
6629 * helper function to assist in extracting a few useful SEID values.
6630 **/
6631static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
6632 struct i40e_aqc_switch_config_element_resp *ele,
6633 u16 num_reported, bool printconfig)
6634{
6635 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
6636 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
6637 u8 element_type = ele->element_type;
6638 u16 seid = le16_to_cpu(ele->seid);
6639
6640 if (printconfig)
6641 dev_info(&pf->pdev->dev,
6642 "type=%d seid=%d uplink=%d downlink=%d\n",
6643 element_type, seid, uplink_seid, downlink_seid);
6644
6645 switch (element_type) {
6646 case I40E_SWITCH_ELEMENT_TYPE_MAC:
6647 pf->mac_seid = seid;
6648 break;
6649 case I40E_SWITCH_ELEMENT_TYPE_VEB:
6650 /* Main VEB? */
6651 if (uplink_seid != pf->mac_seid)
6652 break;
6653 if (pf->lan_veb == I40E_NO_VEB) {
6654 int v;
6655
6656 /* find existing or else empty VEB */
6657 for (v = 0; v < I40E_MAX_VEB; v++) {
6658 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
6659 pf->lan_veb = v;
6660 break;
6661 }
6662 }
6663 if (pf->lan_veb == I40E_NO_VEB) {
6664 v = i40e_veb_mem_alloc(pf);
6665 if (v < 0)
6666 break;
6667 pf->lan_veb = v;
6668 }
6669 }
6670
6671 pf->veb[pf->lan_veb]->seid = seid;
6672 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
6673 pf->veb[pf->lan_veb]->pf = pf;
6674 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
6675 break;
6676 case I40E_SWITCH_ELEMENT_TYPE_VSI:
6677 if (num_reported != 1)
6678 break;
6679 /* This is immediately after a reset so we can assume this is
6680 * the PF's VSI
6681 */
6682 pf->mac_seid = uplink_seid;
6683 pf->pf_seid = downlink_seid;
6684 pf->main_vsi_seid = seid;
6685 if (printconfig)
6686 dev_info(&pf->pdev->dev,
6687 "pf_seid=%d main_vsi_seid=%d\n",
6688 pf->pf_seid, pf->main_vsi_seid);
6689 break;
6690 case I40E_SWITCH_ELEMENT_TYPE_PF:
6691 case I40E_SWITCH_ELEMENT_TYPE_VF:
6692 case I40E_SWITCH_ELEMENT_TYPE_EMP:
6693 case I40E_SWITCH_ELEMENT_TYPE_BMC:
6694 case I40E_SWITCH_ELEMENT_TYPE_PE:
6695 case I40E_SWITCH_ELEMENT_TYPE_PA:
6696 /* ignore these for now */
6697 break;
6698 default:
6699 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
6700 element_type, seid);
6701 break;
6702 }
6703}
6704
6705/**
6706 * i40e_fetch_switch_configuration - Get switch config from firmware
6707 * @pf: board private structure
6708 * @printconfig: should we print the contents
6709 *
6710 * Get the current switch configuration from the device and
6711 * extract a few useful SEID values.
6712 **/
6713int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
6714{
6715 struct i40e_aqc_get_switch_config_resp *sw_config;
6716 u16 next_seid = 0;
6717 int ret = 0;
6718 u8 *aq_buf;
6719 int i;
6720
6721 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
6722 if (!aq_buf)
6723 return -ENOMEM;
6724
6725 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
6726 do {
6727 u16 num_reported, num_total;
6728
6729 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
6730 I40E_AQ_LARGE_BUF,
6731 &next_seid, NULL);
6732 if (ret) {
6733 dev_info(&pf->pdev->dev,
6734 "get switch config failed %d aq_err=%x\n",
6735 ret, pf->hw.aq.asq_last_status);
6736 kfree(aq_buf);
6737 return -ENOENT;
6738 }
6739
6740 num_reported = le16_to_cpu(sw_config->header.num_reported);
6741 num_total = le16_to_cpu(sw_config->header.num_total);
6742
6743 if (printconfig)
6744 dev_info(&pf->pdev->dev,
6745 "header: %d reported %d total\n",
6746 num_reported, num_total);
6747
6748 if (num_reported) {
6749 int sz = sizeof(*sw_config) * num_reported;
6750
6751 kfree(pf->sw_config);
6752 pf->sw_config = kzalloc(sz, GFP_KERNEL);
6753 if (pf->sw_config)
6754 memcpy(pf->sw_config, sw_config, sz);
6755 }
6756
6757 for (i = 0; i < num_reported; i++) {
6758 struct i40e_aqc_switch_config_element_resp *ele =
6759 &sw_config->element[i];
6760
6761 i40e_setup_pf_switch_element(pf, ele, num_reported,
6762 printconfig);
6763 }
6764 } while (next_seid != 0);
6765
6766 kfree(aq_buf);
6767 return ret;
6768}
6769
6770/**
6771 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
6772 * @pf: board private structure
6773 *
6774 * Returns 0 on success, negative value on failure
6775 **/
6776static int i40e_setup_pf_switch(struct i40e_pf *pf)
6777{
6778 int ret;
6779
6780 /* find out what's out there already */
6781 ret = i40e_fetch_switch_configuration(pf, false);
6782 if (ret) {
6783 dev_info(&pf->pdev->dev,
6784 "couldn't fetch switch config, err %d, aq_err %d\n",
6785 ret, pf->hw.aq.asq_last_status);
6786 return ret;
6787 }
6788 i40e_pf_reset_stats(pf);
6789
6790 /* fdir VSI must happen first to be sure it gets queue 0, but only
6791 * if there is enough room for the fdir VSI
6792 */
6793 if (pf->num_lan_qps > 1)
6794 i40e_fdir_setup(pf);
6795
6796 /* first time setup */
6797 if (pf->lan_vsi == I40E_NO_VSI) {
6798 struct i40e_vsi *vsi = NULL;
6799 u16 uplink_seid;
6800
6801 /* Set up the PF VSI associated with the PF's main VSI
6802 * that is already in the HW switch
6803 */
6804 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6805 uplink_seid = pf->veb[pf->lan_veb]->seid;
6806 else
6807 uplink_seid = pf->mac_seid;
6808
6809 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
6810 if (!vsi) {
6811 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
6812 i40e_fdir_teardown(pf);
6813 return -EAGAIN;
6814 }
6815 /* accommodate kcompat by copying the main VSI queue count
6816 * into the pf, since this newer code pushes the pf queue
6817 * info down a level into a VSI
6818 */
6819 pf->num_rx_queues = vsi->alloc_queue_pairs;
6820 pf->num_tx_queues = vsi->alloc_queue_pairs;
6821 } else {
6822 /* force a reset of TC and queue layout configurations */
6823 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
6824 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
6825 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
6826 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
6827 }
6828 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
6829
6830 /* Setup static PF queue filter control settings */
6831 ret = i40e_setup_pf_filter_control(pf);
6832 if (ret) {
6833 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
6834 ret);
6835 /* Failure here should not stop continuing other steps */
6836 }
6837
6838 /* enable RSS in the HW, even for only one queue, as the stack can use
6839 * the hash
6840 */
6841 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
6842 i40e_config_rss(pf);
6843
6844 /* fill in link information and enable LSE reporting */
6845 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
6846 i40e_link_event(pf);
6847
6848 /* Initialize user-specifics link properties */
6849 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
6850 I40E_AQ_AN_COMPLETED) ? true : false);
6851 pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
6852 if (pf->hw.phy.link_info.an_info &
6853 (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
6854 pf->hw.fc.current_mode = I40E_FC_FULL;
6855 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
6856 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
6857 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
6858 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
6859 else
6860 pf->hw.fc.current_mode = I40E_FC_DEFAULT;
6861
6862 return ret;
6863}
6864
6865/**
6866 * i40e_set_rss_size - helper to set rss_size
6867 * @pf: board private structure
6868 * @queues_left: how many queues
6869 */
6870static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
6871{
6872 int num_tc0;
6873
6874 num_tc0 = min_t(int, queues_left, pf->rss_size_max);
6875 num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
6876 num_tc0 = rounddown_pow_of_two(num_tc0);
6877
6878 return num_tc0;
6879}
6880
6881/**
6882 * i40e_determine_queue_usage - Work out queue distribution
6883 * @pf: board private structure
6884 **/
6885static void i40e_determine_queue_usage(struct i40e_pf *pf)
6886{
6887 int accum_tc_size;
6888 int queues_left;
6889
6890 pf->num_lan_qps = 0;
6891 pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
6892 accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
6893
6894 /* Find the max queues to be put into basic use. We'll always be
6895 * using TC0, whether or not DCB is running, and TC0 will get the
6896 * big RSS set.
6897 */
6898 queues_left = pf->hw.func_caps.num_tx_qp;
6899
6900 if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
6901 (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
6902 !(pf->flags & (I40E_FLAG_RSS_ENABLED |
6903 I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
6904 (queues_left == 1)) {
6905
6906 /* one qp for PF, no queues for anything else */
6907 queues_left = 0;
6908 pf->rss_size = pf->num_lan_qps = 1;
6909
6910 /* make sure all the fancies are disabled */
6911 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
6912 I40E_FLAG_MQ_ENABLED |
6913 I40E_FLAG_FDIR_ENABLED |
6914 I40E_FLAG_FDIR_ATR_ENABLED |
6915 I40E_FLAG_DCB_ENABLED |
6916 I40E_FLAG_SRIOV_ENABLED |
6917 I40E_FLAG_VMDQ_ENABLED);
6918
6919 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6920 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6921 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6922
6923 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6924
6925 queues_left -= pf->rss_size;
6926 pf->num_lan_qps = pf->rss_size;
6927
6928 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6929 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6930 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
6931
6932 /* save num_tc_qps queues for TCs 1 thru 7 and the rest
6933 * are set up for RSS in TC0
6934 */
6935 queues_left -= accum_tc_size;
6936
6937 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6938
6939 queues_left -= pf->rss_size;
6940 if (queues_left < 0) {
6941 dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
6942 return;
6943 }
6944
6945 pf->num_lan_qps = pf->rss_size + accum_tc_size;
6946
6947 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6948 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6949 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6950
6951 queues_left -= 1; /* save 1 queue for FD */
6952
6953 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6954
6955 queues_left -= pf->rss_size;
6956 if (queues_left < 0) {
6957 dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
6958 return;
6959 }
6960
6961 pf->num_lan_qps = pf->rss_size;
6962
6963 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
6964 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
6965 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
6966
6967 /* save 1 queue for TCs 1 thru 7,
6968 * 1 queue for flow director,
6969 * and the rest are set up for RSS in TC0
6970 */
6971 queues_left -= 1;
6972 queues_left -= accum_tc_size;
6973
6974 pf->rss_size = i40e_set_rss_size(pf, queues_left);
6975 queues_left -= pf->rss_size;
6976 if (queues_left < 0) {
6977 dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
6978 return;
6979 }
6980
6981 pf->num_lan_qps = pf->rss_size + accum_tc_size;
6982
6983 } else {
6984 dev_info(&pf->pdev->dev,
6985 "Invalid configuration, flags=0x%08llx\n", pf->flags);
6986 return;
6987 }
6988
6989 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
6990 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
6991 pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
6992 pf->num_vf_qps));
6993 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
6994 }
6995
6996 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
6997 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
6998 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
6999 (queues_left / pf->num_vmdq_qps));
7000 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
7001 }
7002
7003 return;
7004}
7005
7006/**
7007 * i40e_setup_pf_filter_control - Setup PF static filter control
7008 * @pf: PF to be setup
7009 *
7010 * i40e_setup_pf_filter_control sets up a pf's initial filter control
7011 * settings. If PE/FCoE are enabled then it will also set the per PF
7012 * based filter sizes required for them. It also enables Flow director,
7013 * ethertype and macvlan type filter settings for the pf.
7014 *
7015 * Returns 0 on success, negative on failure
7016 **/
7017static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
7018{
7019 struct i40e_filter_control_settings *settings = &pf->filter_settings;
7020
7021 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
7022
7023 /* Flow Director is enabled */
7024 if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
7025 settings->enable_fdir = true;
7026
7027 /* Ethtype and MACVLAN filters enabled for PF */
7028 settings->enable_ethtype = true;
7029 settings->enable_macvlan = true;
7030
7031 if (i40e_set_filter_control(&pf->hw, settings))
7032 return -ENOENT;
7033
7034 return 0;
7035}
7036
7037/**
7038 * i40e_probe - Device initialization routine
7039 * @pdev: PCI device information struct
7040 * @ent: entry in i40e_pci_tbl
7041 *
7042 * i40e_probe initializes a pf identified by a pci_dev structure.
7043 * The OS initialization, configuring of the pf private structure,
7044 * and a hardware reset occur.
7045 *
7046 * Returns 0 on success, negative on failure
7047 **/
7048static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7049{
7050 struct i40e_driver_version dv;
7051 struct i40e_pf *pf;
7052 struct i40e_hw *hw;
7053 int err = 0;
7054 u32 len;
7055
7056 err = pci_enable_device_mem(pdev);
7057 if (err)
7058 return err;
7059
7060 /* set up for high or low dma */
7061 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7062 /* coherent mask for the same size will always succeed if
7063 * dma_set_mask does
7064 */
7065 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
7066 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
7067 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7068 } else {
7069 dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
7070 err = -EIO;
7071 goto err_dma;
7072 }
7073
7074 /* set up pci connections */
7075 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
7076 IORESOURCE_MEM), i40e_driver_name);
7077 if (err) {
7078 dev_info(&pdev->dev,
7079 "pci_request_selected_regions failed %d\n", err);
7080 goto err_pci_reg;
7081 }
7082
7083 pci_enable_pcie_error_reporting(pdev);
7084 pci_set_master(pdev);
7085
7086 /* Now that we have a PCI connection, we need to do the
7087 * low level device setup. This is primarily setting up
7088 * the Admin Queue structures and then querying for the
7089 * device's current profile information.
7090 */
7091 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
7092 if (!pf) {
7093 err = -ENOMEM;
7094 goto err_pf_alloc;
7095 }
7096 pf->next_vsi = 0;
7097 pf->pdev = pdev;
7098 set_bit(__I40E_DOWN, &pf->state);
7099
7100 hw = &pf->hw;
7101 hw->back = pf;
7102 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
7103 pci_resource_len(pdev, 0));
7104 if (!hw->hw_addr) {
7105 err = -EIO;
7106 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
7107 (unsigned int)pci_resource_start(pdev, 0),
7108 (unsigned int)pci_resource_len(pdev, 0), err);
7109 goto err_ioremap;
7110 }
7111 hw->vendor_id = pdev->vendor;
7112 hw->device_id = pdev->device;
7113 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
7114 hw->subsystem_vendor_id = pdev->subsystem_vendor;
7115 hw->subsystem_device_id = pdev->subsystem_device;
7116 hw->bus.device = PCI_SLOT(pdev->devfn);
7117 hw->bus.func = PCI_FUNC(pdev->devfn);
7118
7119 /* Reset here to make sure all is clean and to define PF 'n' */
7120 err = i40e_pf_reset(hw);
7121 if (err) {
7122 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
7123 goto err_pf_reset;
7124 }
7125 pf->pfr_count++;
7126
7127 hw->aq.num_arq_entries = I40E_AQ_LEN;
7128 hw->aq.num_asq_entries = I40E_AQ_LEN;
7129 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
7130 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
7131 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
7132 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
7133 "%s-pf%d:misc",
7134 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
7135
7136 err = i40e_init_shared_code(hw);
7137 if (err) {
7138 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
7139 goto err_pf_reset;
7140 }
7141
7142 err = i40e_init_adminq(hw);
7143 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
7144 if (err) {
7145 dev_info(&pdev->dev,
7146 "init_adminq failed: %d expecting API %02x.%02x\n",
7147 err,
7148 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
7149 goto err_pf_reset;
7150 }
7151
7152 err = i40e_get_capabilities(pf);
7153 if (err)
7154 goto err_adminq_setup;
7155
7156 err = i40e_sw_init(pf);
7157 if (err) {
7158 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
7159 goto err_sw_init;
7160 }
7161
7162 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
7163 hw->func_caps.num_rx_qp,
7164 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
7165 if (err) {
7166 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
7167 goto err_init_lan_hmc;
7168 }
7169
7170 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
7171 if (err) {
7172 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
7173 err = -ENOENT;
7174 goto err_configure_lan_hmc;
7175 }
7176
7177 i40e_get_mac_addr(hw, hw->mac.addr);
7178 if (i40e_validate_mac_addr(hw->mac.addr)) {
7179 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
7180 err = -EIO;
7181 goto err_mac_addr;
7182 }
7183 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
7184 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
7185
7186 pci_set_drvdata(pdev, pf);
7187 pci_save_state(pdev);
7188
7189 /* set up periodic task facility */
7190 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
7191 pf->service_timer_period = HZ;
7192
7193 INIT_WORK(&pf->service_task, i40e_service_task);
7194 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
7195 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
7196 pf->link_check_timeout = jiffies;
7197
7198 /* set up the main switch operations */
7199 i40e_determine_queue_usage(pf);
7200 i40e_init_interrupt_scheme(pf);
7201
7202 /* Set up the *vsi struct based on the number of VSIs in the HW,
7203 * and set up our local tracking of the MAIN PF vsi.
7204 */
7205 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
7206 pf->vsi = kzalloc(len, GFP_KERNEL);
ed87ac09
WY
7207 if (!pf->vsi) {
7208 err = -ENOMEM;
41c445ff 7209 goto err_switch_setup;
ed87ac09 7210 }
41c445ff
JB
7211
7212 err = i40e_setup_pf_switch(pf);
7213 if (err) {
7214 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
7215 goto err_vsis;
7216 }
7217
7218 /* The main driver is (mostly) up and happy. We need to set this state
7219 * before setting up the misc vector or we get a race and the vector
7220 * ends up disabled forever.
7221 */
7222 clear_bit(__I40E_DOWN, &pf->state);
7223
7224 /* In case of MSIX we are going to setup the misc vector right here
7225 * to handle admin queue events etc. In case of legacy and MSI
7226 * the misc functionality and queue processing is combined in
7227 * the same vector and that gets setup at open.
7228 */
7229 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7230 err = i40e_setup_misc_vector(pf);
7231 if (err) {
7232 dev_info(&pdev->dev,
7233 "setup of misc vector failed: %d\n", err);
7234 goto err_vsis;
7235 }
7236 }
7237
7238 /* prep for VF support */
7239 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
7240 (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
7241 u32 val;
7242
7243 /* disable link interrupts for VFs */
7244 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
7245 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
7246 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
7247 i40e_flush(hw);
7248 }
7249
7250 i40e_dbg_pf_init(pf);
7251
7252 /* tell the firmware that we're starting */
7253 dv.major_version = DRV_VERSION_MAJOR;
7254 dv.minor_version = DRV_VERSION_MINOR;
7255 dv.build_version = DRV_VERSION_BUILD;
7256 dv.subbuild_version = 0;
7257 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
7258
7259 /* since everything's happy, start the service_task timer */
7260 mod_timer(&pf->service_timer,
7261 round_jiffies(jiffies + pf->service_timer_period));
7262
7263 return 0;
7264
7265 /* Unwind what we've done if something failed in the setup */
7266err_vsis:
7267 set_bit(__I40E_DOWN, &pf->state);
7268err_switch_setup:
7269 i40e_clear_interrupt_scheme(pf);
7270 kfree(pf->vsi);
7271 del_timer_sync(&pf->service_timer);
7272err_mac_addr:
7273err_configure_lan_hmc:
7274 (void)i40e_shutdown_lan_hmc(hw);
7275err_init_lan_hmc:
7276 kfree(pf->qp_pile);
7277 kfree(pf->irq_pile);
7278err_sw_init:
7279err_adminq_setup:
7280 (void)i40e_shutdown_adminq(hw);
7281err_pf_reset:
7282 iounmap(hw->hw_addr);
7283err_ioremap:
7284 kfree(pf);
7285err_pf_alloc:
7286 pci_disable_pcie_error_reporting(pdev);
7287 pci_release_selected_regions(pdev,
7288 pci_select_bars(pdev, IORESOURCE_MEM));
7289err_pci_reg:
7290err_dma:
7291 pci_disable_device(pdev);
7292 return err;
7293}
7294
7295/**
7296 * i40e_remove - Device removal routine
7297 * @pdev: PCI device information struct
7298 *
7299 * i40e_remove is called by the PCI subsystem to alert the driver
7300 * that is should release a PCI device. This could be caused by a
7301 * Hot-Plug event, or because the driver is going to be removed from
7302 * memory.
7303 **/
7304static void i40e_remove(struct pci_dev *pdev)
7305{
7306 struct i40e_pf *pf = pci_get_drvdata(pdev);
7307 i40e_status ret_code;
7308 u32 reg;
7309 int i;
7310
7311 i40e_dbg_pf_exit(pf);
7312
7313 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
7314 i40e_free_vfs(pf);
7315 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
7316 }
7317
7318 /* no more scheduling of any task */
7319 set_bit(__I40E_DOWN, &pf->state);
7320 del_timer_sync(&pf->service_timer);
7321 cancel_work_sync(&pf->service_task);
7322
7323 i40e_fdir_teardown(pf);
7324
7325 /* If there is a switch structure or any orphans, remove them.
7326 * This will leave only the PF's VSI remaining.
7327 */
7328 for (i = 0; i < I40E_MAX_VEB; i++) {
7329 if (!pf->veb[i])
7330 continue;
7331
7332 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
7333 pf->veb[i]->uplink_seid == 0)
7334 i40e_switch_branch_release(pf->veb[i]);
7335 }
7336
7337 /* Now we can shutdown the PF's VSI, just before we kill
7338 * adminq and hmc.
7339 */
7340 if (pf->vsi[pf->lan_vsi])
7341 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
7342
7343 i40e_stop_misc_vector(pf);
7344 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7345 synchronize_irq(pf->msix_entries[0].vector);
7346 free_irq(pf->msix_entries[0].vector, pf);
7347 }
7348
7349 /* shutdown and destroy the HMC */
7350 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
7351 if (ret_code)
7352 dev_warn(&pdev->dev,
7353 "Failed to destroy the HMC resources: %d\n", ret_code);
7354
7355 /* shutdown the adminq */
7356 i40e_aq_queue_shutdown(&pf->hw, true);
7357 ret_code = i40e_shutdown_adminq(&pf->hw);
7358 if (ret_code)
7359 dev_warn(&pdev->dev,
7360 "Failed to destroy the Admin Queue resources: %d\n",
7361 ret_code);
7362
7363 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
7364 i40e_clear_interrupt_scheme(pf);
7365 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7366 if (pf->vsi[i]) {
7367 i40e_vsi_clear_rings(pf->vsi[i]);
7368 i40e_vsi_clear(pf->vsi[i]);
7369 pf->vsi[i] = NULL;
7370 }
7371 }
7372
7373 for (i = 0; i < I40E_MAX_VEB; i++) {
7374 kfree(pf->veb[i]);
7375 pf->veb[i] = NULL;
7376 }
7377
7378 kfree(pf->qp_pile);
7379 kfree(pf->irq_pile);
7380 kfree(pf->sw_config);
7381 kfree(pf->vsi);
7382
7383 /* force a PF reset to clean anything leftover */
7384 reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
7385 wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
7386 i40e_flush(&pf->hw);
7387
7388 iounmap(pf->hw.hw_addr);
7389 kfree(pf);
7390 pci_release_selected_regions(pdev,
7391 pci_select_bars(pdev, IORESOURCE_MEM));
7392
7393 pci_disable_pcie_error_reporting(pdev);
7394 pci_disable_device(pdev);
7395}
7396
7397/**
7398 * i40e_pci_error_detected - warning that something funky happened in PCI land
7399 * @pdev: PCI device information struct
7400 *
7401 * Called to warn that something happened and the error handling steps
7402 * are in progress. Allows the driver to quiesce things, be ready for
7403 * remediation.
7404 **/
7405static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
7406 enum pci_channel_state error)
7407{
7408 struct i40e_pf *pf = pci_get_drvdata(pdev);
7409
7410 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
7411
7412 /* shutdown all operations */
7413 i40e_pf_quiesce_all_vsi(pf);
7414
7415 /* Request a slot reset */
7416 return PCI_ERS_RESULT_NEED_RESET;
7417}
7418
7419/**
7420 * i40e_pci_error_slot_reset - a PCI slot reset just happened
7421 * @pdev: PCI device information struct
7422 *
7423 * Called to find if the driver can work with the device now that
7424 * the pci slot has been reset. If a basic connection seems good
7425 * (registers are readable and have sane content) then return a
7426 * happy little PCI_ERS_RESULT_xxx.
7427 **/
7428static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
7429{
7430 struct i40e_pf *pf = pci_get_drvdata(pdev);
7431 pci_ers_result_t result;
7432 int err;
7433 u32 reg;
7434
7435 dev_info(&pdev->dev, "%s\n", __func__);
7436 if (pci_enable_device_mem(pdev)) {
7437 dev_info(&pdev->dev,
7438 "Cannot re-enable PCI device after reset.\n");
7439 result = PCI_ERS_RESULT_DISCONNECT;
7440 } else {
7441 pci_set_master(pdev);
7442 pci_restore_state(pdev);
7443 pci_save_state(pdev);
7444 pci_wake_from_d3(pdev, false);
7445
7446 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7447 if (reg == 0)
7448 result = PCI_ERS_RESULT_RECOVERED;
7449 else
7450 result = PCI_ERS_RESULT_DISCONNECT;
7451 }
7452
7453 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7454 if (err) {
7455 dev_info(&pdev->dev,
7456 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7457 err);
7458 /* non-fatal, continue */
7459 }
7460
7461 return result;
7462}
7463
7464/**
7465 * i40e_pci_error_resume - restart operations after PCI error recovery
7466 * @pdev: PCI device information struct
7467 *
7468 * Called to allow the driver to bring things back up after PCI error
7469 * and/or reset recovery has finished.
7470 **/
7471static void i40e_pci_error_resume(struct pci_dev *pdev)
7472{
7473 struct i40e_pf *pf = pci_get_drvdata(pdev);
7474
7475 dev_info(&pdev->dev, "%s\n", __func__);
7476 i40e_handle_reset_warning(pf);
7477}
7478
7479static const struct pci_error_handlers i40e_err_handler = {
7480 .error_detected = i40e_pci_error_detected,
7481 .slot_reset = i40e_pci_error_slot_reset,
7482 .resume = i40e_pci_error_resume,
7483};
7484
7485static struct pci_driver i40e_driver = {
7486 .name = i40e_driver_name,
7487 .id_table = i40e_pci_tbl,
7488 .probe = i40e_probe,
7489 .remove = i40e_remove,
7490 .err_handler = &i40e_err_handler,
7491 .sriov_configure = i40e_pci_sriov_configure,
7492};
7493
7494/**
7495 * i40e_init_module - Driver registration routine
7496 *
7497 * i40e_init_module is the first routine called when the driver is
7498 * loaded. All it does is register with the PCI subsystem.
7499 **/
7500static int __init i40e_init_module(void)
7501{
7502 pr_info("%s: %s - version %s\n", i40e_driver_name,
7503 i40e_driver_string, i40e_driver_version_str);
7504 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
7505 i40e_dbg_init();
7506 return pci_register_driver(&i40e_driver);
7507}
7508module_init(i40e_init_module);
7509
7510/**
7511 * i40e_exit_module - Driver exit cleanup routine
7512 *
7513 * i40e_exit_module is called just before the driver is removed
7514 * from memory.
7515 **/
7516static void __exit i40e_exit_module(void)
7517{
7518 pci_unregister_driver(&i40e_driver);
7519 i40e_dbg_exit();
7520}
7521module_exit(i40e_exit_module);
This page took 0.357145 seconds and 5 git commands to generate.