Commit | Line | Data |
---|---|---|
41c445ff JB |
1 | /******************************************************************************* |
2 | * | |
3 | * Intel Ethernet Controller XL710 Family Linux Driver | |
4 | * Copyright(c) 2013 Intel Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms and conditions of the GNU General Public License, | |
8 | * version 2, as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along with | |
16 | * this program; if not, write to the Free Software Foundation, Inc., | |
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | * | |
19 | * The full GNU General Public License is included in this distribution in | |
20 | * the file called "COPYING". | |
21 | * | |
22 | * Contact Information: | |
23 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
24 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
25 | * | |
26 | ******************************************************************************/ | |
27 | ||
28 | /* Local includes */ | |
29 | #include "i40e.h" | |
30 | ||
31 | const char i40e_driver_name[] = "i40e"; | |
32 | static const char i40e_driver_string[] = | |
33 | "Intel(R) Ethernet Connection XL710 Network Driver"; | |
34 | ||
35 | #define DRV_KERN "-k" | |
36 | ||
37 | #define DRV_VERSION_MAJOR 0 | |
38 | #define DRV_VERSION_MINOR 3 | |
1de046b9 | 39 | #define DRV_VERSION_BUILD 11 |
41c445ff JB |
40 | #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ |
41 | __stringify(DRV_VERSION_MINOR) "." \ | |
42 | __stringify(DRV_VERSION_BUILD) DRV_KERN | |
43 | const char i40e_driver_version_str[] = DRV_VERSION; | |
44 | static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation."; | |
45 | ||
46 | /* a bit of forward declarations */ | |
47 | static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); | |
48 | static void i40e_handle_reset_warning(struct i40e_pf *pf); | |
49 | static int i40e_add_vsi(struct i40e_vsi *vsi); | |
50 | static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); | |
51 | static int i40e_setup_pf_switch(struct i40e_pf *pf); | |
52 | static int i40e_setup_misc_vector(struct i40e_pf *pf); | |
53 | static void i40e_determine_queue_usage(struct i40e_pf *pf); | |
54 | static int i40e_setup_pf_filter_control(struct i40e_pf *pf); | |
55 | ||
56 | /* i40e_pci_tbl - PCI Device ID Table | |
57 | * | |
58 | * Last entry must be all 0s | |
59 | * | |
60 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, | |
61 | * Class, Class Mask, private data (not used) } | |
62 | */ | |
63 | static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = { | |
64 | {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0}, | |
65 | {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0}, | |
66 | {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0}, | |
67 | {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0}, | |
68 | {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0}, | |
69 | {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0}, | |
70 | {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0}, | |
71 | {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0}, | |
72 | {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0}, | |
73 | {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0}, | |
74 | /* required last entry */ | |
75 | {0, } | |
76 | }; | |
77 | MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); | |
78 | ||
79 | #define I40E_MAX_VF_COUNT 128 | |
80 | static int debug = -1; | |
81 | module_param(debug, int, 0); | |
82 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | |
83 | ||
84 | MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); | |
85 | MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); | |
86 | MODULE_LICENSE("GPL"); | |
87 | MODULE_VERSION(DRV_VERSION); | |
88 | ||
89 | /** | |
90 | * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code | |
91 | * @hw: pointer to the HW structure | |
92 | * @mem: ptr to mem struct to fill out | |
93 | * @size: size of memory requested | |
94 | * @alignment: what to align the allocation to | |
95 | **/ | |
96 | int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, | |
97 | u64 size, u32 alignment) | |
98 | { | |
99 | struct i40e_pf *pf = (struct i40e_pf *)hw->back; | |
100 | ||
101 | mem->size = ALIGN(size, alignment); | |
102 | mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, | |
103 | &mem->pa, GFP_KERNEL); | |
93bc73b8 JB |
104 | if (!mem->va) |
105 | return -ENOMEM; | |
41c445ff | 106 | |
93bc73b8 | 107 | return 0; |
41c445ff JB |
108 | } |
109 | ||
110 | /** | |
111 | * i40e_free_dma_mem_d - OS specific memory free for shared code | |
112 | * @hw: pointer to the HW structure | |
113 | * @mem: ptr to mem struct to free | |
114 | **/ | |
115 | int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) | |
116 | { | |
117 | struct i40e_pf *pf = (struct i40e_pf *)hw->back; | |
118 | ||
119 | dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); | |
120 | mem->va = NULL; | |
121 | mem->pa = 0; | |
122 | mem->size = 0; | |
123 | ||
124 | return 0; | |
125 | } | |
126 | ||
127 | /** | |
128 | * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code | |
129 | * @hw: pointer to the HW structure | |
130 | * @mem: ptr to mem struct to fill out | |
131 | * @size: size of memory requested | |
132 | **/ | |
133 | int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, | |
134 | u32 size) | |
135 | { | |
136 | mem->size = size; | |
137 | mem->va = kzalloc(size, GFP_KERNEL); | |
138 | ||
93bc73b8 JB |
139 | if (!mem->va) |
140 | return -ENOMEM; | |
41c445ff | 141 | |
93bc73b8 | 142 | return 0; |
41c445ff JB |
143 | } |
144 | ||
145 | /** | |
146 | * i40e_free_virt_mem_d - OS specific memory free for shared code | |
147 | * @hw: pointer to the HW structure | |
148 | * @mem: ptr to mem struct to free | |
149 | **/ | |
150 | int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) | |
151 | { | |
152 | /* it's ok to kfree a NULL pointer */ | |
153 | kfree(mem->va); | |
154 | mem->va = NULL; | |
155 | mem->size = 0; | |
156 | ||
157 | return 0; | |
158 | } | |
159 | ||
160 | /** | |
161 | * i40e_get_lump - find a lump of free generic resource | |
162 | * @pf: board private structure | |
163 | * @pile: the pile of resource to search | |
164 | * @needed: the number of items needed | |
165 | * @id: an owner id to stick on the items assigned | |
166 | * | |
167 | * Returns the base item index of the lump, or negative for error | |
168 | * | |
169 | * The search_hint trick and lack of advanced fit-finding only work | |
170 | * because we're highly likely to have all the same size lump requests. | |
171 | * Linear search time and any fragmentation should be minimal. | |
172 | **/ | |
173 | static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, | |
174 | u16 needed, u16 id) | |
175 | { | |
176 | int ret = -ENOMEM; | |
ddf434ac | 177 | int i, j; |
41c445ff JB |
178 | |
179 | if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { | |
180 | dev_info(&pf->pdev->dev, | |
181 | "param err: pile=%p needed=%d id=0x%04x\n", | |
182 | pile, needed, id); | |
183 | return -EINVAL; | |
184 | } | |
185 | ||
186 | /* start the linear search with an imperfect hint */ | |
187 | i = pile->search_hint; | |
ddf434ac | 188 | while (i < pile->num_entries) { |
41c445ff JB |
189 | /* skip already allocated entries */ |
190 | if (pile->list[i] & I40E_PILE_VALID_BIT) { | |
191 | i++; | |
192 | continue; | |
193 | } | |
194 | ||
195 | /* do we have enough in this lump? */ | |
196 | for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { | |
197 | if (pile->list[i+j] & I40E_PILE_VALID_BIT) | |
198 | break; | |
199 | } | |
200 | ||
201 | if (j == needed) { | |
202 | /* there was enough, so assign it to the requestor */ | |
203 | for (j = 0; j < needed; j++) | |
204 | pile->list[i+j] = id | I40E_PILE_VALID_BIT; | |
205 | ret = i; | |
206 | pile->search_hint = i + j; | |
ddf434ac | 207 | break; |
41c445ff JB |
208 | } else { |
209 | /* not enough, so skip over it and continue looking */ | |
210 | i += j; | |
211 | } | |
212 | } | |
213 | ||
214 | return ret; | |
215 | } | |
216 | ||
217 | /** | |
218 | * i40e_put_lump - return a lump of generic resource | |
219 | * @pile: the pile of resource to search | |
220 | * @index: the base item index | |
221 | * @id: the owner id of the items assigned | |
222 | * | |
223 | * Returns the count of items in the lump | |
224 | **/ | |
225 | static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) | |
226 | { | |
227 | int valid_id = (id | I40E_PILE_VALID_BIT); | |
228 | int count = 0; | |
229 | int i; | |
230 | ||
231 | if (!pile || index >= pile->num_entries) | |
232 | return -EINVAL; | |
233 | ||
234 | for (i = index; | |
235 | i < pile->num_entries && pile->list[i] == valid_id; | |
236 | i++) { | |
237 | pile->list[i] = 0; | |
238 | count++; | |
239 | } | |
240 | ||
241 | if (count && index < pile->search_hint) | |
242 | pile->search_hint = index; | |
243 | ||
244 | return count; | |
245 | } | |
246 | ||
247 | /** | |
248 | * i40e_service_event_schedule - Schedule the service task to wake up | |
249 | * @pf: board private structure | |
250 | * | |
251 | * If not already scheduled, this puts the task into the work queue | |
252 | **/ | |
253 | static void i40e_service_event_schedule(struct i40e_pf *pf) | |
254 | { | |
255 | if (!test_bit(__I40E_DOWN, &pf->state) && | |
256 | !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && | |
257 | !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) | |
258 | schedule_work(&pf->service_task); | |
259 | } | |
260 | ||
261 | /** | |
262 | * i40e_tx_timeout - Respond to a Tx Hang | |
263 | * @netdev: network interface device structure | |
264 | * | |
265 | * If any port has noticed a Tx timeout, it is likely that the whole | |
266 | * device is munged, not just the one netdev port, so go for the full | |
267 | * reset. | |
268 | **/ | |
269 | static void i40e_tx_timeout(struct net_device *netdev) | |
270 | { | |
271 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
272 | struct i40e_vsi *vsi = np->vsi; | |
273 | struct i40e_pf *pf = vsi->back; | |
274 | ||
275 | pf->tx_timeout_count++; | |
276 | ||
277 | if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) | |
278 | pf->tx_timeout_recovery_level = 0; | |
279 | pf->tx_timeout_last_recovery = jiffies; | |
280 | netdev_info(netdev, "tx_timeout recovery level %d\n", | |
281 | pf->tx_timeout_recovery_level); | |
282 | ||
283 | switch (pf->tx_timeout_recovery_level) { | |
284 | case 0: | |
285 | /* disable and re-enable queues for the VSI */ | |
286 | if (in_interrupt()) { | |
287 | set_bit(__I40E_REINIT_REQUESTED, &pf->state); | |
288 | set_bit(__I40E_REINIT_REQUESTED, &vsi->state); | |
289 | } else { | |
290 | i40e_vsi_reinit_locked(vsi); | |
291 | } | |
292 | break; | |
293 | case 1: | |
294 | set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); | |
295 | break; | |
296 | case 2: | |
297 | set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); | |
298 | break; | |
299 | case 3: | |
300 | set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); | |
301 | break; | |
302 | default: | |
303 | netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); | |
304 | i40e_down(vsi); | |
305 | break; | |
306 | } | |
307 | i40e_service_event_schedule(pf); | |
308 | pf->tx_timeout_recovery_level++; | |
309 | } | |
310 | ||
311 | /** | |
312 | * i40e_release_rx_desc - Store the new tail and head values | |
313 | * @rx_ring: ring to bump | |
314 | * @val: new head index | |
315 | **/ | |
316 | static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) | |
317 | { | |
318 | rx_ring->next_to_use = val; | |
319 | ||
320 | /* Force memory writes to complete before letting h/w | |
321 | * know there are new descriptors to fetch. (Only | |
322 | * applicable for weak-ordered memory model archs, | |
323 | * such as IA-64). | |
324 | */ | |
325 | wmb(); | |
326 | writel(val, rx_ring->tail); | |
327 | } | |
328 | ||
329 | /** | |
330 | * i40e_get_vsi_stats_struct - Get System Network Statistics | |
331 | * @vsi: the VSI we care about | |
332 | * | |
333 | * Returns the address of the device statistics structure. | |
334 | * The statistics are actually updated from the service task. | |
335 | **/ | |
336 | struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) | |
337 | { | |
338 | return &vsi->net_stats; | |
339 | } | |
340 | ||
341 | /** | |
342 | * i40e_get_netdev_stats_struct - Get statistics for netdev interface | |
343 | * @netdev: network interface device structure | |
344 | * | |
345 | * Returns the address of the device statistics structure. | |
346 | * The statistics are actually updated from the service task. | |
347 | **/ | |
348 | static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( | |
349 | struct net_device *netdev, | |
980e9b11 | 350 | struct rtnl_link_stats64 *stats) |
41c445ff JB |
351 | { |
352 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
353 | struct i40e_vsi *vsi = np->vsi; | |
980e9b11 AD |
354 | struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); |
355 | int i; | |
356 | ||
357 | rcu_read_lock(); | |
358 | for (i = 0; i < vsi->num_queue_pairs; i++) { | |
359 | struct i40e_ring *tx_ring, *rx_ring; | |
360 | u64 bytes, packets; | |
361 | unsigned int start; | |
362 | ||
363 | tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); | |
364 | if (!tx_ring) | |
365 | continue; | |
366 | ||
367 | do { | |
368 | start = u64_stats_fetch_begin_bh(&tx_ring->syncp); | |
369 | packets = tx_ring->stats.packets; | |
370 | bytes = tx_ring->stats.bytes; | |
371 | } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); | |
372 | ||
373 | stats->tx_packets += packets; | |
374 | stats->tx_bytes += bytes; | |
375 | rx_ring = &tx_ring[1]; | |
376 | ||
377 | do { | |
378 | start = u64_stats_fetch_begin_bh(&rx_ring->syncp); | |
379 | packets = rx_ring->stats.packets; | |
380 | bytes = rx_ring->stats.bytes; | |
381 | } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); | |
41c445ff | 382 | |
980e9b11 AD |
383 | stats->rx_packets += packets; |
384 | stats->rx_bytes += bytes; | |
385 | } | |
386 | rcu_read_unlock(); | |
387 | ||
388 | /* following stats updated by ixgbe_watchdog_task() */ | |
389 | stats->multicast = vsi_stats->multicast; | |
390 | stats->tx_errors = vsi_stats->tx_errors; | |
391 | stats->tx_dropped = vsi_stats->tx_dropped; | |
392 | stats->rx_errors = vsi_stats->rx_errors; | |
393 | stats->rx_crc_errors = vsi_stats->rx_crc_errors; | |
394 | stats->rx_length_errors = vsi_stats->rx_length_errors; | |
41c445ff | 395 | |
980e9b11 | 396 | return stats; |
41c445ff JB |
397 | } |
398 | ||
399 | /** | |
400 | * i40e_vsi_reset_stats - Resets all stats of the given vsi | |
401 | * @vsi: the VSI to have its stats reset | |
402 | **/ | |
403 | void i40e_vsi_reset_stats(struct i40e_vsi *vsi) | |
404 | { | |
405 | struct rtnl_link_stats64 *ns; | |
406 | int i; | |
407 | ||
408 | if (!vsi) | |
409 | return; | |
410 | ||
411 | ns = i40e_get_vsi_stats_struct(vsi); | |
412 | memset(ns, 0, sizeof(*ns)); | |
413 | memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); | |
414 | memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); | |
415 | memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); | |
416 | if (vsi->rx_rings) | |
417 | for (i = 0; i < vsi->num_queue_pairs; i++) { | |
9f65e15b AD |
418 | memset(&vsi->rx_rings[i]->stats, 0 , |
419 | sizeof(vsi->rx_rings[i]->stats)); | |
420 | memset(&vsi->rx_rings[i]->rx_stats, 0 , | |
421 | sizeof(vsi->rx_rings[i]->rx_stats)); | |
422 | memset(&vsi->tx_rings[i]->stats, 0 , | |
423 | sizeof(vsi->tx_rings[i]->stats)); | |
424 | memset(&vsi->tx_rings[i]->tx_stats, 0, | |
425 | sizeof(vsi->tx_rings[i]->tx_stats)); | |
41c445ff JB |
426 | } |
427 | vsi->stat_offsets_loaded = false; | |
428 | } | |
429 | ||
430 | /** | |
431 | * i40e_pf_reset_stats - Reset all of the stats for the given pf | |
432 | * @pf: the PF to be reset | |
433 | **/ | |
434 | void i40e_pf_reset_stats(struct i40e_pf *pf) | |
435 | { | |
436 | memset(&pf->stats, 0, sizeof(pf->stats)); | |
437 | memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); | |
438 | pf->stat_offsets_loaded = false; | |
439 | } | |
440 | ||
441 | /** | |
442 | * i40e_stat_update48 - read and update a 48 bit stat from the chip | |
443 | * @hw: ptr to the hardware info | |
444 | * @hireg: the high 32 bit reg to read | |
445 | * @loreg: the low 32 bit reg to read | |
446 | * @offset_loaded: has the initial offset been loaded yet | |
447 | * @offset: ptr to current offset value | |
448 | * @stat: ptr to the stat | |
449 | * | |
450 | * Since the device stats are not reset at PFReset, they likely will not | |
451 | * be zeroed when the driver starts. We'll save the first values read | |
452 | * and use them as offsets to be subtracted from the raw values in order | |
453 | * to report stats that count from zero. In the process, we also manage | |
454 | * the potential roll-over. | |
455 | **/ | |
456 | static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, | |
457 | bool offset_loaded, u64 *offset, u64 *stat) | |
458 | { | |
459 | u64 new_data; | |
460 | ||
461 | if (hw->device_id == I40E_QEMU_DEVICE_ID) { | |
462 | new_data = rd32(hw, loreg); | |
463 | new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; | |
464 | } else { | |
465 | new_data = rd64(hw, loreg); | |
466 | } | |
467 | if (!offset_loaded) | |
468 | *offset = new_data; | |
469 | if (likely(new_data >= *offset)) | |
470 | *stat = new_data - *offset; | |
471 | else | |
472 | *stat = (new_data + ((u64)1 << 48)) - *offset; | |
473 | *stat &= 0xFFFFFFFFFFFFULL; | |
474 | } | |
475 | ||
476 | /** | |
477 | * i40e_stat_update32 - read and update a 32 bit stat from the chip | |
478 | * @hw: ptr to the hardware info | |
479 | * @reg: the hw reg to read | |
480 | * @offset_loaded: has the initial offset been loaded yet | |
481 | * @offset: ptr to current offset value | |
482 | * @stat: ptr to the stat | |
483 | **/ | |
484 | static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, | |
485 | bool offset_loaded, u64 *offset, u64 *stat) | |
486 | { | |
487 | u32 new_data; | |
488 | ||
489 | new_data = rd32(hw, reg); | |
490 | if (!offset_loaded) | |
491 | *offset = new_data; | |
492 | if (likely(new_data >= *offset)) | |
493 | *stat = (u32)(new_data - *offset); | |
494 | else | |
495 | *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); | |
496 | } | |
497 | ||
498 | /** | |
499 | * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. | |
500 | * @vsi: the VSI to be updated | |
501 | **/ | |
502 | void i40e_update_eth_stats(struct i40e_vsi *vsi) | |
503 | { | |
504 | int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); | |
505 | struct i40e_pf *pf = vsi->back; | |
506 | struct i40e_hw *hw = &pf->hw; | |
507 | struct i40e_eth_stats *oes; | |
508 | struct i40e_eth_stats *es; /* device's eth stats */ | |
509 | ||
510 | es = &vsi->eth_stats; | |
511 | oes = &vsi->eth_stats_offsets; | |
512 | ||
513 | /* Gather up the stats that the hw collects */ | |
514 | i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), | |
515 | vsi->stat_offsets_loaded, | |
516 | &oes->tx_errors, &es->tx_errors); | |
517 | i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), | |
518 | vsi->stat_offsets_loaded, | |
519 | &oes->rx_discards, &es->rx_discards); | |
520 | ||
521 | i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), | |
522 | I40E_GLV_GORCL(stat_idx), | |
523 | vsi->stat_offsets_loaded, | |
524 | &oes->rx_bytes, &es->rx_bytes); | |
525 | i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), | |
526 | I40E_GLV_UPRCL(stat_idx), | |
527 | vsi->stat_offsets_loaded, | |
528 | &oes->rx_unicast, &es->rx_unicast); | |
529 | i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), | |
530 | I40E_GLV_MPRCL(stat_idx), | |
531 | vsi->stat_offsets_loaded, | |
532 | &oes->rx_multicast, &es->rx_multicast); | |
533 | i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), | |
534 | I40E_GLV_BPRCL(stat_idx), | |
535 | vsi->stat_offsets_loaded, | |
536 | &oes->rx_broadcast, &es->rx_broadcast); | |
537 | ||
538 | i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), | |
539 | I40E_GLV_GOTCL(stat_idx), | |
540 | vsi->stat_offsets_loaded, | |
541 | &oes->tx_bytes, &es->tx_bytes); | |
542 | i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), | |
543 | I40E_GLV_UPTCL(stat_idx), | |
544 | vsi->stat_offsets_loaded, | |
545 | &oes->tx_unicast, &es->tx_unicast); | |
546 | i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), | |
547 | I40E_GLV_MPTCL(stat_idx), | |
548 | vsi->stat_offsets_loaded, | |
549 | &oes->tx_multicast, &es->tx_multicast); | |
550 | i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), | |
551 | I40E_GLV_BPTCL(stat_idx), | |
552 | vsi->stat_offsets_loaded, | |
553 | &oes->tx_broadcast, &es->tx_broadcast); | |
554 | vsi->stat_offsets_loaded = true; | |
555 | } | |
556 | ||
557 | /** | |
558 | * i40e_update_veb_stats - Update Switch component statistics | |
559 | * @veb: the VEB being updated | |
560 | **/ | |
561 | static void i40e_update_veb_stats(struct i40e_veb *veb) | |
562 | { | |
563 | struct i40e_pf *pf = veb->pf; | |
564 | struct i40e_hw *hw = &pf->hw; | |
565 | struct i40e_eth_stats *oes; | |
566 | struct i40e_eth_stats *es; /* device's eth stats */ | |
567 | int idx = 0; | |
568 | ||
569 | idx = veb->stats_idx; | |
570 | es = &veb->stats; | |
571 | oes = &veb->stats_offsets; | |
572 | ||
573 | /* Gather up the stats that the hw collects */ | |
574 | i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), | |
575 | veb->stat_offsets_loaded, | |
576 | &oes->tx_discards, &es->tx_discards); | |
577 | i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), | |
578 | veb->stat_offsets_loaded, | |
579 | &oes->rx_unknown_protocol, &es->rx_unknown_protocol); | |
580 | ||
581 | i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), | |
582 | veb->stat_offsets_loaded, | |
583 | &oes->rx_bytes, &es->rx_bytes); | |
584 | i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), | |
585 | veb->stat_offsets_loaded, | |
586 | &oes->rx_unicast, &es->rx_unicast); | |
587 | i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), | |
588 | veb->stat_offsets_loaded, | |
589 | &oes->rx_multicast, &es->rx_multicast); | |
590 | i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), | |
591 | veb->stat_offsets_loaded, | |
592 | &oes->rx_broadcast, &es->rx_broadcast); | |
593 | ||
594 | i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), | |
595 | veb->stat_offsets_loaded, | |
596 | &oes->tx_bytes, &es->tx_bytes); | |
597 | i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), | |
598 | veb->stat_offsets_loaded, | |
599 | &oes->tx_unicast, &es->tx_unicast); | |
600 | i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), | |
601 | veb->stat_offsets_loaded, | |
602 | &oes->tx_multicast, &es->tx_multicast); | |
603 | i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), | |
604 | veb->stat_offsets_loaded, | |
605 | &oes->tx_broadcast, &es->tx_broadcast); | |
606 | veb->stat_offsets_loaded = true; | |
607 | } | |
608 | ||
609 | /** | |
610 | * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode | |
611 | * @pf: the corresponding PF | |
612 | * | |
613 | * Update the Rx XOFF counter (PAUSE frames) in link flow control mode | |
614 | **/ | |
615 | static void i40e_update_link_xoff_rx(struct i40e_pf *pf) | |
616 | { | |
617 | struct i40e_hw_port_stats *osd = &pf->stats_offsets; | |
618 | struct i40e_hw_port_stats *nsd = &pf->stats; | |
619 | struct i40e_hw *hw = &pf->hw; | |
620 | u64 xoff = 0; | |
621 | u16 i, v; | |
622 | ||
623 | if ((hw->fc.current_mode != I40E_FC_FULL) && | |
624 | (hw->fc.current_mode != I40E_FC_RX_PAUSE)) | |
625 | return; | |
626 | ||
627 | xoff = nsd->link_xoff_rx; | |
628 | i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), | |
629 | pf->stat_offsets_loaded, | |
630 | &osd->link_xoff_rx, &nsd->link_xoff_rx); | |
631 | ||
632 | /* No new LFC xoff rx */ | |
633 | if (!(nsd->link_xoff_rx - xoff)) | |
634 | return; | |
635 | ||
636 | /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ | |
637 | for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { | |
638 | struct i40e_vsi *vsi = pf->vsi[v]; | |
639 | ||
640 | if (!vsi) | |
641 | continue; | |
642 | ||
643 | for (i = 0; i < vsi->num_queue_pairs; i++) { | |
9f65e15b | 644 | struct i40e_ring *ring = vsi->tx_rings[i]; |
41c445ff JB |
645 | clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); |
646 | } | |
647 | } | |
648 | } | |
649 | ||
650 | /** | |
651 | * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode | |
652 | * @pf: the corresponding PF | |
653 | * | |
654 | * Update the Rx XOFF counter (PAUSE frames) in PFC mode | |
655 | **/ | |
656 | static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) | |
657 | { | |
658 | struct i40e_hw_port_stats *osd = &pf->stats_offsets; | |
659 | struct i40e_hw_port_stats *nsd = &pf->stats; | |
660 | bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; | |
661 | struct i40e_dcbx_config *dcb_cfg; | |
662 | struct i40e_hw *hw = &pf->hw; | |
663 | u16 i, v; | |
664 | u8 tc; | |
665 | ||
666 | dcb_cfg = &hw->local_dcbx_config; | |
667 | ||
668 | /* See if DCB enabled with PFC TC */ | |
669 | if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || | |
670 | !(dcb_cfg->pfc.pfcenable)) { | |
671 | i40e_update_link_xoff_rx(pf); | |
672 | return; | |
673 | } | |
674 | ||
675 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { | |
676 | u64 prio_xoff = nsd->priority_xoff_rx[i]; | |
677 | i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), | |
678 | pf->stat_offsets_loaded, | |
679 | &osd->priority_xoff_rx[i], | |
680 | &nsd->priority_xoff_rx[i]); | |
681 | ||
682 | /* No new PFC xoff rx */ | |
683 | if (!(nsd->priority_xoff_rx[i] - prio_xoff)) | |
684 | continue; | |
685 | /* Get the TC for given priority */ | |
686 | tc = dcb_cfg->etscfg.prioritytable[i]; | |
687 | xoff[tc] = true; | |
688 | } | |
689 | ||
690 | /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ | |
691 | for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { | |
692 | struct i40e_vsi *vsi = pf->vsi[v]; | |
693 | ||
694 | if (!vsi) | |
695 | continue; | |
696 | ||
697 | for (i = 0; i < vsi->num_queue_pairs; i++) { | |
9f65e15b | 698 | struct i40e_ring *ring = vsi->tx_rings[i]; |
41c445ff JB |
699 | |
700 | tc = ring->dcb_tc; | |
701 | if (xoff[tc]) | |
702 | clear_bit(__I40E_HANG_CHECK_ARMED, | |
703 | &ring->state); | |
704 | } | |
705 | } | |
706 | } | |
707 | ||
708 | /** | |
709 | * i40e_update_stats - Update the board statistics counters. | |
710 | * @vsi: the VSI to be updated | |
711 | * | |
712 | * There are a few instances where we store the same stat in a | |
713 | * couple of different structs. This is partly because we have | |
714 | * the netdev stats that need to be filled out, which is slightly | |
715 | * different from the "eth_stats" defined by the chip and used in | |
716 | * VF communications. We sort it all out here in a central place. | |
717 | **/ | |
718 | void i40e_update_stats(struct i40e_vsi *vsi) | |
719 | { | |
720 | struct i40e_pf *pf = vsi->back; | |
721 | struct i40e_hw *hw = &pf->hw; | |
722 | struct rtnl_link_stats64 *ons; | |
723 | struct rtnl_link_stats64 *ns; /* netdev stats */ | |
724 | struct i40e_eth_stats *oes; | |
725 | struct i40e_eth_stats *es; /* device's eth stats */ | |
726 | u32 tx_restart, tx_busy; | |
727 | u32 rx_page, rx_buf; | |
728 | u64 rx_p, rx_b; | |
729 | u64 tx_p, tx_b; | |
730 | int i; | |
731 | u16 q; | |
732 | ||
733 | if (test_bit(__I40E_DOWN, &vsi->state) || | |
734 | test_bit(__I40E_CONFIG_BUSY, &pf->state)) | |
735 | return; | |
736 | ||
737 | ns = i40e_get_vsi_stats_struct(vsi); | |
738 | ons = &vsi->net_stats_offsets; | |
739 | es = &vsi->eth_stats; | |
740 | oes = &vsi->eth_stats_offsets; | |
741 | ||
742 | /* Gather up the netdev and vsi stats that the driver collects | |
743 | * on the fly during packet processing | |
744 | */ | |
745 | rx_b = rx_p = 0; | |
746 | tx_b = tx_p = 0; | |
747 | tx_restart = tx_busy = 0; | |
748 | rx_page = 0; | |
749 | rx_buf = 0; | |
980e9b11 | 750 | rcu_read_lock(); |
41c445ff JB |
751 | for (q = 0; q < vsi->num_queue_pairs; q++) { |
752 | struct i40e_ring *p; | |
980e9b11 AD |
753 | u64 bytes, packets; |
754 | unsigned int start; | |
755 | ||
756 | /* locate Tx ring */ | |
757 | p = ACCESS_ONCE(vsi->tx_rings[q]); | |
758 | ||
759 | do { | |
760 | start = u64_stats_fetch_begin_bh(&p->syncp); | |
761 | packets = p->stats.packets; | |
762 | bytes = p->stats.bytes; | |
763 | } while (u64_stats_fetch_retry_bh(&p->syncp, start)); | |
764 | tx_b += bytes; | |
765 | tx_p += packets; | |
766 | tx_restart += p->tx_stats.restart_queue; | |
767 | tx_busy += p->tx_stats.tx_busy; | |
41c445ff | 768 | |
980e9b11 AD |
769 | /* Rx queue is part of the same block as Tx queue */ |
770 | p = &p[1]; | |
771 | do { | |
772 | start = u64_stats_fetch_begin_bh(&p->syncp); | |
773 | packets = p->stats.packets; | |
774 | bytes = p->stats.bytes; | |
775 | } while (u64_stats_fetch_retry_bh(&p->syncp, start)); | |
776 | rx_b += bytes; | |
777 | rx_p += packets; | |
41c445ff JB |
778 | rx_buf += p->rx_stats.alloc_rx_buff_failed; |
779 | rx_page += p->rx_stats.alloc_rx_page_failed; | |
41c445ff | 780 | } |
980e9b11 | 781 | rcu_read_unlock(); |
41c445ff JB |
782 | vsi->tx_restart = tx_restart; |
783 | vsi->tx_busy = tx_busy; | |
784 | vsi->rx_page_failed = rx_page; | |
785 | vsi->rx_buf_failed = rx_buf; | |
786 | ||
787 | ns->rx_packets = rx_p; | |
788 | ns->rx_bytes = rx_b; | |
789 | ns->tx_packets = tx_p; | |
790 | ns->tx_bytes = tx_b; | |
791 | ||
792 | i40e_update_eth_stats(vsi); | |
793 | /* update netdev stats from eth stats */ | |
794 | ons->rx_errors = oes->rx_errors; | |
795 | ns->rx_errors = es->rx_errors; | |
796 | ons->tx_errors = oes->tx_errors; | |
797 | ns->tx_errors = es->tx_errors; | |
798 | ons->multicast = oes->rx_multicast; | |
799 | ns->multicast = es->rx_multicast; | |
800 | ons->tx_dropped = oes->tx_discards; | |
801 | ns->tx_dropped = es->tx_discards; | |
802 | ||
803 | /* Get the port data only if this is the main PF VSI */ | |
804 | if (vsi == pf->vsi[pf->lan_vsi]) { | |
805 | struct i40e_hw_port_stats *nsd = &pf->stats; | |
806 | struct i40e_hw_port_stats *osd = &pf->stats_offsets; | |
807 | ||
808 | i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), | |
809 | I40E_GLPRT_GORCL(hw->port), | |
810 | pf->stat_offsets_loaded, | |
811 | &osd->eth.rx_bytes, &nsd->eth.rx_bytes); | |
812 | i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), | |
813 | I40E_GLPRT_GOTCL(hw->port), | |
814 | pf->stat_offsets_loaded, | |
815 | &osd->eth.tx_bytes, &nsd->eth.tx_bytes); | |
816 | i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), | |
817 | pf->stat_offsets_loaded, | |
818 | &osd->eth.rx_discards, | |
819 | &nsd->eth.rx_discards); | |
820 | i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), | |
821 | pf->stat_offsets_loaded, | |
822 | &osd->eth.tx_discards, | |
823 | &nsd->eth.tx_discards); | |
824 | i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), | |
825 | I40E_GLPRT_MPRCL(hw->port), | |
826 | pf->stat_offsets_loaded, | |
827 | &osd->eth.rx_multicast, | |
828 | &nsd->eth.rx_multicast); | |
829 | ||
830 | i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), | |
831 | pf->stat_offsets_loaded, | |
832 | &osd->tx_dropped_link_down, | |
833 | &nsd->tx_dropped_link_down); | |
834 | ||
835 | i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), | |
836 | pf->stat_offsets_loaded, | |
837 | &osd->crc_errors, &nsd->crc_errors); | |
838 | ns->rx_crc_errors = nsd->crc_errors; | |
839 | ||
840 | i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), | |
841 | pf->stat_offsets_loaded, | |
842 | &osd->illegal_bytes, &nsd->illegal_bytes); | |
843 | ns->rx_errors = nsd->crc_errors | |
844 | + nsd->illegal_bytes; | |
845 | ||
846 | i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), | |
847 | pf->stat_offsets_loaded, | |
848 | &osd->mac_local_faults, | |
849 | &nsd->mac_local_faults); | |
850 | i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), | |
851 | pf->stat_offsets_loaded, | |
852 | &osd->mac_remote_faults, | |
853 | &nsd->mac_remote_faults); | |
854 | ||
855 | i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), | |
856 | pf->stat_offsets_loaded, | |
857 | &osd->rx_length_errors, | |
858 | &nsd->rx_length_errors); | |
859 | ns->rx_length_errors = nsd->rx_length_errors; | |
860 | ||
861 | i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), | |
862 | pf->stat_offsets_loaded, | |
863 | &osd->link_xon_rx, &nsd->link_xon_rx); | |
864 | i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), | |
865 | pf->stat_offsets_loaded, | |
866 | &osd->link_xon_tx, &nsd->link_xon_tx); | |
867 | i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ | |
868 | i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), | |
869 | pf->stat_offsets_loaded, | |
870 | &osd->link_xoff_tx, &nsd->link_xoff_tx); | |
871 | ||
872 | for (i = 0; i < 8; i++) { | |
873 | i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), | |
874 | pf->stat_offsets_loaded, | |
875 | &osd->priority_xon_rx[i], | |
876 | &nsd->priority_xon_rx[i]); | |
877 | i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), | |
878 | pf->stat_offsets_loaded, | |
879 | &osd->priority_xon_tx[i], | |
880 | &nsd->priority_xon_tx[i]); | |
881 | i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), | |
882 | pf->stat_offsets_loaded, | |
883 | &osd->priority_xoff_tx[i], | |
884 | &nsd->priority_xoff_tx[i]); | |
885 | i40e_stat_update32(hw, | |
886 | I40E_GLPRT_RXON2OFFCNT(hw->port, i), | |
887 | pf->stat_offsets_loaded, | |
888 | &osd->priority_xon_2_xoff[i], | |
889 | &nsd->priority_xon_2_xoff[i]); | |
890 | } | |
891 | ||
892 | i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), | |
893 | I40E_GLPRT_PRC64L(hw->port), | |
894 | pf->stat_offsets_loaded, | |
895 | &osd->rx_size_64, &nsd->rx_size_64); | |
896 | i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), | |
897 | I40E_GLPRT_PRC127L(hw->port), | |
898 | pf->stat_offsets_loaded, | |
899 | &osd->rx_size_127, &nsd->rx_size_127); | |
900 | i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), | |
901 | I40E_GLPRT_PRC255L(hw->port), | |
902 | pf->stat_offsets_loaded, | |
903 | &osd->rx_size_255, &nsd->rx_size_255); | |
904 | i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), | |
905 | I40E_GLPRT_PRC511L(hw->port), | |
906 | pf->stat_offsets_loaded, | |
907 | &osd->rx_size_511, &nsd->rx_size_511); | |
908 | i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), | |
909 | I40E_GLPRT_PRC1023L(hw->port), | |
910 | pf->stat_offsets_loaded, | |
911 | &osd->rx_size_1023, &nsd->rx_size_1023); | |
912 | i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), | |
913 | I40E_GLPRT_PRC1522L(hw->port), | |
914 | pf->stat_offsets_loaded, | |
915 | &osd->rx_size_1522, &nsd->rx_size_1522); | |
916 | i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), | |
917 | I40E_GLPRT_PRC9522L(hw->port), | |
918 | pf->stat_offsets_loaded, | |
919 | &osd->rx_size_big, &nsd->rx_size_big); | |
920 | ||
921 | i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), | |
922 | I40E_GLPRT_PTC64L(hw->port), | |
923 | pf->stat_offsets_loaded, | |
924 | &osd->tx_size_64, &nsd->tx_size_64); | |
925 | i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), | |
926 | I40E_GLPRT_PTC127L(hw->port), | |
927 | pf->stat_offsets_loaded, | |
928 | &osd->tx_size_127, &nsd->tx_size_127); | |
929 | i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), | |
930 | I40E_GLPRT_PTC255L(hw->port), | |
931 | pf->stat_offsets_loaded, | |
932 | &osd->tx_size_255, &nsd->tx_size_255); | |
933 | i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), | |
934 | I40E_GLPRT_PTC511L(hw->port), | |
935 | pf->stat_offsets_loaded, | |
936 | &osd->tx_size_511, &nsd->tx_size_511); | |
937 | i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), | |
938 | I40E_GLPRT_PTC1023L(hw->port), | |
939 | pf->stat_offsets_loaded, | |
940 | &osd->tx_size_1023, &nsd->tx_size_1023); | |
941 | i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), | |
942 | I40E_GLPRT_PTC1522L(hw->port), | |
943 | pf->stat_offsets_loaded, | |
944 | &osd->tx_size_1522, &nsd->tx_size_1522); | |
945 | i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), | |
946 | I40E_GLPRT_PTC9522L(hw->port), | |
947 | pf->stat_offsets_loaded, | |
948 | &osd->tx_size_big, &nsd->tx_size_big); | |
949 | ||
950 | i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), | |
951 | pf->stat_offsets_loaded, | |
952 | &osd->rx_undersize, &nsd->rx_undersize); | |
953 | i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), | |
954 | pf->stat_offsets_loaded, | |
955 | &osd->rx_fragments, &nsd->rx_fragments); | |
956 | i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), | |
957 | pf->stat_offsets_loaded, | |
958 | &osd->rx_oversize, &nsd->rx_oversize); | |
959 | i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), | |
960 | pf->stat_offsets_loaded, | |
961 | &osd->rx_jabber, &nsd->rx_jabber); | |
962 | } | |
963 | ||
964 | pf->stat_offsets_loaded = true; | |
965 | } | |
966 | ||
967 | /** | |
968 | * i40e_find_filter - Search VSI filter list for specific mac/vlan filter | |
969 | * @vsi: the VSI to be searched | |
970 | * @macaddr: the MAC address | |
971 | * @vlan: the vlan | |
972 | * @is_vf: make sure its a vf filter, else doesn't matter | |
973 | * @is_netdev: make sure its a netdev filter, else doesn't matter | |
974 | * | |
975 | * Returns ptr to the filter object or NULL | |
976 | **/ | |
977 | static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, | |
978 | u8 *macaddr, s16 vlan, | |
979 | bool is_vf, bool is_netdev) | |
980 | { | |
981 | struct i40e_mac_filter *f; | |
982 | ||
983 | if (!vsi || !macaddr) | |
984 | return NULL; | |
985 | ||
986 | list_for_each_entry(f, &vsi->mac_filter_list, list) { | |
987 | if ((ether_addr_equal(macaddr, f->macaddr)) && | |
988 | (vlan == f->vlan) && | |
989 | (!is_vf || f->is_vf) && | |
990 | (!is_netdev || f->is_netdev)) | |
991 | return f; | |
992 | } | |
993 | return NULL; | |
994 | } | |
995 | ||
996 | /** | |
997 | * i40e_find_mac - Find a mac addr in the macvlan filters list | |
998 | * @vsi: the VSI to be searched | |
999 | * @macaddr: the MAC address we are searching for | |
1000 | * @is_vf: make sure its a vf filter, else doesn't matter | |
1001 | * @is_netdev: make sure its a netdev filter, else doesn't matter | |
1002 | * | |
1003 | * Returns the first filter with the provided MAC address or NULL if | |
1004 | * MAC address was not found | |
1005 | **/ | |
1006 | struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, | |
1007 | bool is_vf, bool is_netdev) | |
1008 | { | |
1009 | struct i40e_mac_filter *f; | |
1010 | ||
1011 | if (!vsi || !macaddr) | |
1012 | return NULL; | |
1013 | ||
1014 | list_for_each_entry(f, &vsi->mac_filter_list, list) { | |
1015 | if ((ether_addr_equal(macaddr, f->macaddr)) && | |
1016 | (!is_vf || f->is_vf) && | |
1017 | (!is_netdev || f->is_netdev)) | |
1018 | return f; | |
1019 | } | |
1020 | return NULL; | |
1021 | } | |
1022 | ||
1023 | /** | |
1024 | * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode | |
1025 | * @vsi: the VSI to be searched | |
1026 | * | |
1027 | * Returns true if VSI is in vlan mode or false otherwise | |
1028 | **/ | |
1029 | bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) | |
1030 | { | |
1031 | struct i40e_mac_filter *f; | |
1032 | ||
1033 | /* Only -1 for all the filters denotes not in vlan mode | |
1034 | * so we have to go through all the list in order to make sure | |
1035 | */ | |
1036 | list_for_each_entry(f, &vsi->mac_filter_list, list) { | |
1037 | if (f->vlan >= 0) | |
1038 | return true; | |
1039 | } | |
1040 | ||
1041 | return false; | |
1042 | } | |
1043 | ||
1044 | /** | |
1045 | * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans | |
1046 | * @vsi: the VSI to be searched | |
1047 | * @macaddr: the mac address to be filtered | |
1048 | * @is_vf: true if it is a vf | |
1049 | * @is_netdev: true if it is a netdev | |
1050 | * | |
1051 | * Goes through all the macvlan filters and adds a | |
1052 | * macvlan filter for each unique vlan that already exists | |
1053 | * | |
1054 | * Returns first filter found on success, else NULL | |
1055 | **/ | |
1056 | struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, | |
1057 | bool is_vf, bool is_netdev) | |
1058 | { | |
1059 | struct i40e_mac_filter *f; | |
1060 | ||
1061 | list_for_each_entry(f, &vsi->mac_filter_list, list) { | |
1062 | if (!i40e_find_filter(vsi, macaddr, f->vlan, | |
1063 | is_vf, is_netdev)) { | |
1064 | if (!i40e_add_filter(vsi, macaddr, f->vlan, | |
1065 | is_vf, is_netdev)) | |
1066 | return NULL; | |
1067 | } | |
1068 | } | |
1069 | ||
1070 | return list_first_entry_or_null(&vsi->mac_filter_list, | |
1071 | struct i40e_mac_filter, list); | |
1072 | } | |
1073 | ||
1074 | /** | |
1075 | * i40e_add_filter - Add a mac/vlan filter to the VSI | |
1076 | * @vsi: the VSI to be searched | |
1077 | * @macaddr: the MAC address | |
1078 | * @vlan: the vlan | |
1079 | * @is_vf: make sure its a vf filter, else doesn't matter | |
1080 | * @is_netdev: make sure its a netdev filter, else doesn't matter | |
1081 | * | |
1082 | * Returns ptr to the filter object or NULL when no memory available. | |
1083 | **/ | |
1084 | struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, | |
1085 | u8 *macaddr, s16 vlan, | |
1086 | bool is_vf, bool is_netdev) | |
1087 | { | |
1088 | struct i40e_mac_filter *f; | |
1089 | ||
1090 | if (!vsi || !macaddr) | |
1091 | return NULL; | |
1092 | ||
1093 | f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); | |
1094 | if (!f) { | |
1095 | f = kzalloc(sizeof(*f), GFP_ATOMIC); | |
1096 | if (!f) | |
1097 | goto add_filter_out; | |
1098 | ||
1099 | memcpy(f->macaddr, macaddr, ETH_ALEN); | |
1100 | f->vlan = vlan; | |
1101 | f->changed = true; | |
1102 | ||
1103 | INIT_LIST_HEAD(&f->list); | |
1104 | list_add(&f->list, &vsi->mac_filter_list); | |
1105 | } | |
1106 | ||
1107 | /* increment counter and add a new flag if needed */ | |
1108 | if (is_vf) { | |
1109 | if (!f->is_vf) { | |
1110 | f->is_vf = true; | |
1111 | f->counter++; | |
1112 | } | |
1113 | } else if (is_netdev) { | |
1114 | if (!f->is_netdev) { | |
1115 | f->is_netdev = true; | |
1116 | f->counter++; | |
1117 | } | |
1118 | } else { | |
1119 | f->counter++; | |
1120 | } | |
1121 | ||
1122 | /* changed tells sync_filters_subtask to | |
1123 | * push the filter down to the firmware | |
1124 | */ | |
1125 | if (f->changed) { | |
1126 | vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; | |
1127 | vsi->back->flags |= I40E_FLAG_FILTER_SYNC; | |
1128 | } | |
1129 | ||
1130 | add_filter_out: | |
1131 | return f; | |
1132 | } | |
1133 | ||
1134 | /** | |
1135 | * i40e_del_filter - Remove a mac/vlan filter from the VSI | |
1136 | * @vsi: the VSI to be searched | |
1137 | * @macaddr: the MAC address | |
1138 | * @vlan: the vlan | |
1139 | * @is_vf: make sure it's a vf filter, else doesn't matter | |
1140 | * @is_netdev: make sure it's a netdev filter, else doesn't matter | |
1141 | **/ | |
1142 | void i40e_del_filter(struct i40e_vsi *vsi, | |
1143 | u8 *macaddr, s16 vlan, | |
1144 | bool is_vf, bool is_netdev) | |
1145 | { | |
1146 | struct i40e_mac_filter *f; | |
1147 | ||
1148 | if (!vsi || !macaddr) | |
1149 | return; | |
1150 | ||
1151 | f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); | |
1152 | if (!f || f->counter == 0) | |
1153 | return; | |
1154 | ||
1155 | if (is_vf) { | |
1156 | if (f->is_vf) { | |
1157 | f->is_vf = false; | |
1158 | f->counter--; | |
1159 | } | |
1160 | } else if (is_netdev) { | |
1161 | if (f->is_netdev) { | |
1162 | f->is_netdev = false; | |
1163 | f->counter--; | |
1164 | } | |
1165 | } else { | |
1166 | /* make sure we don't remove a filter in use by vf or netdev */ | |
1167 | int min_f = 0; | |
1168 | min_f += (f->is_vf ? 1 : 0); | |
1169 | min_f += (f->is_netdev ? 1 : 0); | |
1170 | ||
1171 | if (f->counter > min_f) | |
1172 | f->counter--; | |
1173 | } | |
1174 | ||
1175 | /* counter == 0 tells sync_filters_subtask to | |
1176 | * remove the filter from the firmware's list | |
1177 | */ | |
1178 | if (f->counter == 0) { | |
1179 | f->changed = true; | |
1180 | vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; | |
1181 | vsi->back->flags |= I40E_FLAG_FILTER_SYNC; | |
1182 | } | |
1183 | } | |
1184 | ||
1185 | /** | |
1186 | * i40e_set_mac - NDO callback to set mac address | |
1187 | * @netdev: network interface device structure | |
1188 | * @p: pointer to an address structure | |
1189 | * | |
1190 | * Returns 0 on success, negative on failure | |
1191 | **/ | |
1192 | static int i40e_set_mac(struct net_device *netdev, void *p) | |
1193 | { | |
1194 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
1195 | struct i40e_vsi *vsi = np->vsi; | |
1196 | struct sockaddr *addr = p; | |
1197 | struct i40e_mac_filter *f; | |
1198 | ||
1199 | if (!is_valid_ether_addr(addr->sa_data)) | |
1200 | return -EADDRNOTAVAIL; | |
1201 | ||
1202 | netdev_info(netdev, "set mac address=%pM\n", addr->sa_data); | |
1203 | ||
1204 | if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) | |
1205 | return 0; | |
1206 | ||
1207 | if (vsi->type == I40E_VSI_MAIN) { | |
1208 | i40e_status ret; | |
1209 | ret = i40e_aq_mac_address_write(&vsi->back->hw, | |
1210 | I40E_AQC_WRITE_TYPE_LAA_ONLY, | |
1211 | addr->sa_data, NULL); | |
1212 | if (ret) { | |
1213 | netdev_info(netdev, | |
1214 | "Addr change for Main VSI failed: %d\n", | |
1215 | ret); | |
1216 | return -EADDRNOTAVAIL; | |
1217 | } | |
1218 | ||
1219 | memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len); | |
1220 | } | |
1221 | ||
1222 | /* In order to be sure to not drop any packets, add the new address | |
1223 | * then delete the old one. | |
1224 | */ | |
1225 | f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false); | |
1226 | if (!f) | |
1227 | return -ENOMEM; | |
1228 | ||
1229 | i40e_sync_vsi_filters(vsi); | |
1230 | i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); | |
1231 | i40e_sync_vsi_filters(vsi); | |
1232 | ||
1233 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
1234 | ||
1235 | return 0; | |
1236 | } | |
1237 | ||
1238 | /** | |
1239 | * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc | |
1240 | * @vsi: the VSI being setup | |
1241 | * @ctxt: VSI context structure | |
1242 | * @enabled_tc: Enabled TCs bitmap | |
1243 | * @is_add: True if called before Add VSI | |
1244 | * | |
1245 | * Setup VSI queue mapping for enabled traffic classes. | |
1246 | **/ | |
1247 | static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, | |
1248 | struct i40e_vsi_context *ctxt, | |
1249 | u8 enabled_tc, | |
1250 | bool is_add) | |
1251 | { | |
1252 | struct i40e_pf *pf = vsi->back; | |
1253 | u16 sections = 0; | |
1254 | u8 netdev_tc = 0; | |
1255 | u16 numtc = 0; | |
1256 | u16 qcount; | |
1257 | u8 offset; | |
1258 | u16 qmap; | |
1259 | int i; | |
1260 | ||
1261 | sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; | |
1262 | offset = 0; | |
1263 | ||
1264 | if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { | |
1265 | /* Find numtc from enabled TC bitmap */ | |
1266 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { | |
1267 | if (enabled_tc & (1 << i)) /* TC is enabled */ | |
1268 | numtc++; | |
1269 | } | |
1270 | if (!numtc) { | |
1271 | dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); | |
1272 | numtc = 1; | |
1273 | } | |
1274 | } else { | |
1275 | /* At least TC0 is enabled in case of non-DCB case */ | |
1276 | numtc = 1; | |
1277 | } | |
1278 | ||
1279 | vsi->tc_config.numtc = numtc; | |
1280 | vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; | |
1281 | ||
1282 | /* Setup queue offset/count for all TCs for given VSI */ | |
1283 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { | |
1284 | /* See if the given TC is enabled for the given VSI */ | |
1285 | if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */ | |
1286 | int pow, num_qps; | |
1287 | ||
1288 | vsi->tc_config.tc_info[i].qoffset = offset; | |
1289 | switch (vsi->type) { | |
1290 | case I40E_VSI_MAIN: | |
1291 | if (i == 0) | |
1292 | qcount = pf->rss_size; | |
1293 | else | |
1294 | qcount = pf->num_tc_qps; | |
1295 | vsi->tc_config.tc_info[i].qcount = qcount; | |
1296 | break; | |
1297 | case I40E_VSI_FDIR: | |
1298 | case I40E_VSI_SRIOV: | |
1299 | case I40E_VSI_VMDQ2: | |
1300 | default: | |
1301 | qcount = vsi->alloc_queue_pairs; | |
1302 | vsi->tc_config.tc_info[i].qcount = qcount; | |
1303 | WARN_ON(i != 0); | |
1304 | break; | |
1305 | } | |
1306 | ||
1307 | /* find the power-of-2 of the number of queue pairs */ | |
1308 | num_qps = vsi->tc_config.tc_info[i].qcount; | |
1309 | pow = 0; | |
1310 | while (num_qps && | |
1311 | ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) { | |
1312 | pow++; | |
1313 | num_qps >>= 1; | |
1314 | } | |
1315 | ||
1316 | vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; | |
1317 | qmap = | |
1318 | (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | | |
1319 | (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); | |
1320 | ||
1321 | offset += vsi->tc_config.tc_info[i].qcount; | |
1322 | } else { | |
1323 | /* TC is not enabled so set the offset to | |
1324 | * default queue and allocate one queue | |
1325 | * for the given TC. | |
1326 | */ | |
1327 | vsi->tc_config.tc_info[i].qoffset = 0; | |
1328 | vsi->tc_config.tc_info[i].qcount = 1; | |
1329 | vsi->tc_config.tc_info[i].netdev_tc = 0; | |
1330 | ||
1331 | qmap = 0; | |
1332 | } | |
1333 | ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); | |
1334 | } | |
1335 | ||
1336 | /* Set actual Tx/Rx queue pairs */ | |
1337 | vsi->num_queue_pairs = offset; | |
1338 | ||
1339 | /* Scheduler section valid can only be set for ADD VSI */ | |
1340 | if (is_add) { | |
1341 | sections |= I40E_AQ_VSI_PROP_SCHED_VALID; | |
1342 | ||
1343 | ctxt->info.up_enable_bits = enabled_tc; | |
1344 | } | |
1345 | if (vsi->type == I40E_VSI_SRIOV) { | |
1346 | ctxt->info.mapping_flags |= | |
1347 | cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); | |
1348 | for (i = 0; i < vsi->num_queue_pairs; i++) | |
1349 | ctxt->info.queue_mapping[i] = | |
1350 | cpu_to_le16(vsi->base_queue + i); | |
1351 | } else { | |
1352 | ctxt->info.mapping_flags |= | |
1353 | cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); | |
1354 | ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); | |
1355 | } | |
1356 | ctxt->info.valid_sections |= cpu_to_le16(sections); | |
1357 | } | |
1358 | ||
1359 | /** | |
1360 | * i40e_set_rx_mode - NDO callback to set the netdev filters | |
1361 | * @netdev: network interface device structure | |
1362 | **/ | |
1363 | static void i40e_set_rx_mode(struct net_device *netdev) | |
1364 | { | |
1365 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
1366 | struct i40e_mac_filter *f, *ftmp; | |
1367 | struct i40e_vsi *vsi = np->vsi; | |
1368 | struct netdev_hw_addr *uca; | |
1369 | struct netdev_hw_addr *mca; | |
1370 | struct netdev_hw_addr *ha; | |
1371 | ||
1372 | /* add addr if not already in the filter list */ | |
1373 | netdev_for_each_uc_addr(uca, netdev) { | |
1374 | if (!i40e_find_mac(vsi, uca->addr, false, true)) { | |
1375 | if (i40e_is_vsi_in_vlan(vsi)) | |
1376 | i40e_put_mac_in_vlan(vsi, uca->addr, | |
1377 | false, true); | |
1378 | else | |
1379 | i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, | |
1380 | false, true); | |
1381 | } | |
1382 | } | |
1383 | ||
1384 | netdev_for_each_mc_addr(mca, netdev) { | |
1385 | if (!i40e_find_mac(vsi, mca->addr, false, true)) { | |
1386 | if (i40e_is_vsi_in_vlan(vsi)) | |
1387 | i40e_put_mac_in_vlan(vsi, mca->addr, | |
1388 | false, true); | |
1389 | else | |
1390 | i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, | |
1391 | false, true); | |
1392 | } | |
1393 | } | |
1394 | ||
1395 | /* remove filter if not in netdev list */ | |
1396 | list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { | |
1397 | bool found = false; | |
1398 | ||
1399 | if (!f->is_netdev) | |
1400 | continue; | |
1401 | ||
1402 | if (is_multicast_ether_addr(f->macaddr)) { | |
1403 | netdev_for_each_mc_addr(mca, netdev) { | |
1404 | if (ether_addr_equal(mca->addr, f->macaddr)) { | |
1405 | found = true; | |
1406 | break; | |
1407 | } | |
1408 | } | |
1409 | } else { | |
1410 | netdev_for_each_uc_addr(uca, netdev) { | |
1411 | if (ether_addr_equal(uca->addr, f->macaddr)) { | |
1412 | found = true; | |
1413 | break; | |
1414 | } | |
1415 | } | |
1416 | ||
1417 | for_each_dev_addr(netdev, ha) { | |
1418 | if (ether_addr_equal(ha->addr, f->macaddr)) { | |
1419 | found = true; | |
1420 | break; | |
1421 | } | |
1422 | } | |
1423 | } | |
1424 | if (!found) | |
1425 | i40e_del_filter( | |
1426 | vsi, f->macaddr, I40E_VLAN_ANY, false, true); | |
1427 | } | |
1428 | ||
1429 | /* check for other flag changes */ | |
1430 | if (vsi->current_netdev_flags != vsi->netdev->flags) { | |
1431 | vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; | |
1432 | vsi->back->flags |= I40E_FLAG_FILTER_SYNC; | |
1433 | } | |
1434 | } | |
1435 | ||
1436 | /** | |
1437 | * i40e_sync_vsi_filters - Update the VSI filter list to the HW | |
1438 | * @vsi: ptr to the VSI | |
1439 | * | |
1440 | * Push any outstanding VSI filter changes through the AdminQ. | |
1441 | * | |
1442 | * Returns 0 or error value | |
1443 | **/ | |
1444 | int i40e_sync_vsi_filters(struct i40e_vsi *vsi) | |
1445 | { | |
1446 | struct i40e_mac_filter *f, *ftmp; | |
1447 | bool promisc_forced_on = false; | |
1448 | bool add_happened = false; | |
1449 | int filter_list_len = 0; | |
1450 | u32 changed_flags = 0; | |
dcae29be | 1451 | i40e_status aq_ret = 0; |
41c445ff JB |
1452 | struct i40e_pf *pf; |
1453 | int num_add = 0; | |
1454 | int num_del = 0; | |
1455 | u16 cmd_flags; | |
1456 | ||
1457 | /* empty array typed pointers, kcalloc later */ | |
1458 | struct i40e_aqc_add_macvlan_element_data *add_list; | |
1459 | struct i40e_aqc_remove_macvlan_element_data *del_list; | |
1460 | ||
1461 | while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) | |
1462 | usleep_range(1000, 2000); | |
1463 | pf = vsi->back; | |
1464 | ||
1465 | if (vsi->netdev) { | |
1466 | changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; | |
1467 | vsi->current_netdev_flags = vsi->netdev->flags; | |
1468 | } | |
1469 | ||
1470 | if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { | |
1471 | vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; | |
1472 | ||
1473 | filter_list_len = pf->hw.aq.asq_buf_size / | |
1474 | sizeof(struct i40e_aqc_remove_macvlan_element_data); | |
1475 | del_list = kcalloc(filter_list_len, | |
1476 | sizeof(struct i40e_aqc_remove_macvlan_element_data), | |
1477 | GFP_KERNEL); | |
1478 | if (!del_list) | |
1479 | return -ENOMEM; | |
1480 | ||
1481 | list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { | |
1482 | if (!f->changed) | |
1483 | continue; | |
1484 | ||
1485 | if (f->counter != 0) | |
1486 | continue; | |
1487 | f->changed = false; | |
1488 | cmd_flags = 0; | |
1489 | ||
1490 | /* add to delete list */ | |
1491 | memcpy(del_list[num_del].mac_addr, | |
1492 | f->macaddr, ETH_ALEN); | |
1493 | del_list[num_del].vlan_tag = | |
1494 | cpu_to_le16((u16)(f->vlan == | |
1495 | I40E_VLAN_ANY ? 0 : f->vlan)); | |
1496 | ||
1497 | /* vlan0 as wild card to allow packets from all vlans */ | |
1498 | if (f->vlan == I40E_VLAN_ANY || | |
1499 | (vsi->netdev && !(vsi->netdev->features & | |
1500 | NETIF_F_HW_VLAN_CTAG_FILTER))) | |
1501 | cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; | |
1502 | cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; | |
1503 | del_list[num_del].flags = cmd_flags; | |
1504 | num_del++; | |
1505 | ||
1506 | /* unlink from filter list */ | |
1507 | list_del(&f->list); | |
1508 | kfree(f); | |
1509 | ||
1510 | /* flush a full buffer */ | |
1511 | if (num_del == filter_list_len) { | |
dcae29be | 1512 | aq_ret = i40e_aq_remove_macvlan(&pf->hw, |
41c445ff JB |
1513 | vsi->seid, del_list, num_del, |
1514 | NULL); | |
1515 | num_del = 0; | |
1516 | memset(del_list, 0, sizeof(*del_list)); | |
1517 | ||
dcae29be | 1518 | if (aq_ret) |
41c445ff JB |
1519 | dev_info(&pf->pdev->dev, |
1520 | "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", | |
dcae29be | 1521 | aq_ret, |
41c445ff JB |
1522 | pf->hw.aq.asq_last_status); |
1523 | } | |
1524 | } | |
1525 | if (num_del) { | |
dcae29be | 1526 | aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, |
41c445ff JB |
1527 | del_list, num_del, NULL); |
1528 | num_del = 0; | |
1529 | ||
dcae29be | 1530 | if (aq_ret) |
41c445ff JB |
1531 | dev_info(&pf->pdev->dev, |
1532 | "ignoring delete macvlan error, err %d, aq_err %d\n", | |
dcae29be | 1533 | aq_ret, pf->hw.aq.asq_last_status); |
41c445ff JB |
1534 | } |
1535 | ||
1536 | kfree(del_list); | |
1537 | del_list = NULL; | |
1538 | ||
1539 | /* do all the adds now */ | |
1540 | filter_list_len = pf->hw.aq.asq_buf_size / | |
1541 | sizeof(struct i40e_aqc_add_macvlan_element_data), | |
1542 | add_list = kcalloc(filter_list_len, | |
1543 | sizeof(struct i40e_aqc_add_macvlan_element_data), | |
1544 | GFP_KERNEL); | |
1545 | if (!add_list) | |
1546 | return -ENOMEM; | |
1547 | ||
1548 | list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { | |
1549 | if (!f->changed) | |
1550 | continue; | |
1551 | ||
1552 | if (f->counter == 0) | |
1553 | continue; | |
1554 | f->changed = false; | |
1555 | add_happened = true; | |
1556 | cmd_flags = 0; | |
1557 | ||
1558 | /* add to add array */ | |
1559 | memcpy(add_list[num_add].mac_addr, | |
1560 | f->macaddr, ETH_ALEN); | |
1561 | add_list[num_add].vlan_tag = | |
1562 | cpu_to_le16( | |
1563 | (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); | |
1564 | add_list[num_add].queue_number = 0; | |
1565 | ||
1566 | cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; | |
1567 | ||
1568 | /* vlan0 as wild card to allow packets from all vlans */ | |
1569 | if (f->vlan == I40E_VLAN_ANY || (vsi->netdev && | |
1570 | !(vsi->netdev->features & | |
1571 | NETIF_F_HW_VLAN_CTAG_FILTER))) | |
1572 | cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; | |
1573 | add_list[num_add].flags = cpu_to_le16(cmd_flags); | |
1574 | num_add++; | |
1575 | ||
1576 | /* flush a full buffer */ | |
1577 | if (num_add == filter_list_len) { | |
dcae29be JB |
1578 | aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, |
1579 | add_list, num_add, | |
1580 | NULL); | |
41c445ff JB |
1581 | num_add = 0; |
1582 | ||
dcae29be | 1583 | if (aq_ret) |
41c445ff JB |
1584 | break; |
1585 | memset(add_list, 0, sizeof(*add_list)); | |
1586 | } | |
1587 | } | |
1588 | if (num_add) { | |
dcae29be JB |
1589 | aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, |
1590 | add_list, num_add, NULL); | |
41c445ff JB |
1591 | num_add = 0; |
1592 | } | |
1593 | kfree(add_list); | |
1594 | add_list = NULL; | |
1595 | ||
dcae29be | 1596 | if (add_happened && (!aq_ret)) { |
41c445ff | 1597 | /* do nothing */; |
dcae29be | 1598 | } else if (add_happened && (aq_ret)) { |
41c445ff JB |
1599 | dev_info(&pf->pdev->dev, |
1600 | "add filter failed, err %d, aq_err %d\n", | |
dcae29be | 1601 | aq_ret, pf->hw.aq.asq_last_status); |
41c445ff JB |
1602 | if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && |
1603 | !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, | |
1604 | &vsi->state)) { | |
1605 | promisc_forced_on = true; | |
1606 | set_bit(__I40E_FILTER_OVERFLOW_PROMISC, | |
1607 | &vsi->state); | |
1608 | dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); | |
1609 | } | |
1610 | } | |
1611 | } | |
1612 | ||
1613 | /* check for changes in promiscuous modes */ | |
1614 | if (changed_flags & IFF_ALLMULTI) { | |
1615 | bool cur_multipromisc; | |
1616 | cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); | |
dcae29be JB |
1617 | aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, |
1618 | vsi->seid, | |
1619 | cur_multipromisc, | |
1620 | NULL); | |
1621 | if (aq_ret) | |
41c445ff JB |
1622 | dev_info(&pf->pdev->dev, |
1623 | "set multi promisc failed, err %d, aq_err %d\n", | |
dcae29be | 1624 | aq_ret, pf->hw.aq.asq_last_status); |
41c445ff JB |
1625 | } |
1626 | if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { | |
1627 | bool cur_promisc; | |
1628 | cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || | |
1629 | test_bit(__I40E_FILTER_OVERFLOW_PROMISC, | |
1630 | &vsi->state)); | |
dcae29be JB |
1631 | aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, |
1632 | vsi->seid, | |
1633 | cur_promisc, NULL); | |
1634 | if (aq_ret) | |
41c445ff JB |
1635 | dev_info(&pf->pdev->dev, |
1636 | "set uni promisc failed, err %d, aq_err %d\n", | |
dcae29be | 1637 | aq_ret, pf->hw.aq.asq_last_status); |
41c445ff JB |
1638 | } |
1639 | ||
1640 | clear_bit(__I40E_CONFIG_BUSY, &vsi->state); | |
1641 | return 0; | |
1642 | } | |
1643 | ||
1644 | /** | |
1645 | * i40e_sync_filters_subtask - Sync the VSI filter list with HW | |
1646 | * @pf: board private structure | |
1647 | **/ | |
1648 | static void i40e_sync_filters_subtask(struct i40e_pf *pf) | |
1649 | { | |
1650 | int v; | |
1651 | ||
1652 | if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) | |
1653 | return; | |
1654 | pf->flags &= ~I40E_FLAG_FILTER_SYNC; | |
1655 | ||
1656 | for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { | |
1657 | if (pf->vsi[v] && | |
1658 | (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) | |
1659 | i40e_sync_vsi_filters(pf->vsi[v]); | |
1660 | } | |
1661 | } | |
1662 | ||
1663 | /** | |
1664 | * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit | |
1665 | * @netdev: network interface device structure | |
1666 | * @new_mtu: new value for maximum frame size | |
1667 | * | |
1668 | * Returns 0 on success, negative on failure | |
1669 | **/ | |
1670 | static int i40e_change_mtu(struct net_device *netdev, int new_mtu) | |
1671 | { | |
1672 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
1673 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | |
1674 | struct i40e_vsi *vsi = np->vsi; | |
1675 | ||
1676 | /* MTU < 68 is an error and causes problems on some kernels */ | |
1677 | if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) | |
1678 | return -EINVAL; | |
1679 | ||
1680 | netdev_info(netdev, "changing MTU from %d to %d\n", | |
1681 | netdev->mtu, new_mtu); | |
1682 | netdev->mtu = new_mtu; | |
1683 | if (netif_running(netdev)) | |
1684 | i40e_vsi_reinit_locked(vsi); | |
1685 | ||
1686 | return 0; | |
1687 | } | |
1688 | ||
1689 | /** | |
1690 | * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI | |
1691 | * @vsi: the vsi being adjusted | |
1692 | **/ | |
1693 | void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) | |
1694 | { | |
1695 | struct i40e_vsi_context ctxt; | |
1696 | i40e_status ret; | |
1697 | ||
1698 | if ((vsi->info.valid_sections & | |
1699 | cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && | |
1700 | ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) | |
1701 | return; /* already enabled */ | |
1702 | ||
1703 | vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); | |
1704 | vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | | |
1705 | I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; | |
1706 | ||
1707 | ctxt.seid = vsi->seid; | |
1708 | memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); | |
1709 | ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); | |
1710 | if (ret) { | |
1711 | dev_info(&vsi->back->pdev->dev, | |
1712 | "%s: update vsi failed, aq_err=%d\n", | |
1713 | __func__, vsi->back->hw.aq.asq_last_status); | |
1714 | } | |
1715 | } | |
1716 | ||
1717 | /** | |
1718 | * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI | |
1719 | * @vsi: the vsi being adjusted | |
1720 | **/ | |
1721 | void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) | |
1722 | { | |
1723 | struct i40e_vsi_context ctxt; | |
1724 | i40e_status ret; | |
1725 | ||
1726 | if ((vsi->info.valid_sections & | |
1727 | cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && | |
1728 | ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == | |
1729 | I40E_AQ_VSI_PVLAN_EMOD_MASK)) | |
1730 | return; /* already disabled */ | |
1731 | ||
1732 | vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); | |
1733 | vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | | |
1734 | I40E_AQ_VSI_PVLAN_EMOD_NOTHING; | |
1735 | ||
1736 | ctxt.seid = vsi->seid; | |
1737 | memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); | |
1738 | ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); | |
1739 | if (ret) { | |
1740 | dev_info(&vsi->back->pdev->dev, | |
1741 | "%s: update vsi failed, aq_err=%d\n", | |
1742 | __func__, vsi->back->hw.aq.asq_last_status); | |
1743 | } | |
1744 | } | |
1745 | ||
1746 | /** | |
1747 | * i40e_vlan_rx_register - Setup or shutdown vlan offload | |
1748 | * @netdev: network interface to be adjusted | |
1749 | * @features: netdev features to test if VLAN offload is enabled or not | |
1750 | **/ | |
1751 | static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) | |
1752 | { | |
1753 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
1754 | struct i40e_vsi *vsi = np->vsi; | |
1755 | ||
1756 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | |
1757 | i40e_vlan_stripping_enable(vsi); | |
1758 | else | |
1759 | i40e_vlan_stripping_disable(vsi); | |
1760 | } | |
1761 | ||
1762 | /** | |
1763 | * i40e_vsi_add_vlan - Add vsi membership for given vlan | |
1764 | * @vsi: the vsi being configured | |
1765 | * @vid: vlan id to be added (0 = untagged only , -1 = any) | |
1766 | **/ | |
1767 | int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) | |
1768 | { | |
1769 | struct i40e_mac_filter *f, *add_f; | |
1770 | bool is_netdev, is_vf; | |
1771 | int ret; | |
1772 | ||
1773 | is_vf = (vsi->type == I40E_VSI_SRIOV); | |
1774 | is_netdev = !!(vsi->netdev); | |
1775 | ||
1776 | if (is_netdev) { | |
1777 | add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, | |
1778 | is_vf, is_netdev); | |
1779 | if (!add_f) { | |
1780 | dev_info(&vsi->back->pdev->dev, | |
1781 | "Could not add vlan filter %d for %pM\n", | |
1782 | vid, vsi->netdev->dev_addr); | |
1783 | return -ENOMEM; | |
1784 | } | |
1785 | } | |
1786 | ||
1787 | list_for_each_entry(f, &vsi->mac_filter_list, list) { | |
1788 | add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); | |
1789 | if (!add_f) { | |
1790 | dev_info(&vsi->back->pdev->dev, | |
1791 | "Could not add vlan filter %d for %pM\n", | |
1792 | vid, f->macaddr); | |
1793 | return -ENOMEM; | |
1794 | } | |
1795 | } | |
1796 | ||
1797 | ret = i40e_sync_vsi_filters(vsi); | |
1798 | if (ret) { | |
1799 | dev_info(&vsi->back->pdev->dev, | |
1800 | "Could not sync filters for vid %d\n", vid); | |
1801 | return ret; | |
1802 | } | |
1803 | ||
1804 | /* Now if we add a vlan tag, make sure to check if it is the first | |
1805 | * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" | |
1806 | * with 0, so we now accept untagged and specified tagged traffic | |
1807 | * (and not any taged and untagged) | |
1808 | */ | |
1809 | if (vid > 0) { | |
1810 | if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, | |
1811 | I40E_VLAN_ANY, | |
1812 | is_vf, is_netdev)) { | |
1813 | i40e_del_filter(vsi, vsi->netdev->dev_addr, | |
1814 | I40E_VLAN_ANY, is_vf, is_netdev); | |
1815 | add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, | |
1816 | is_vf, is_netdev); | |
1817 | if (!add_f) { | |
1818 | dev_info(&vsi->back->pdev->dev, | |
1819 | "Could not add filter 0 for %pM\n", | |
1820 | vsi->netdev->dev_addr); | |
1821 | return -ENOMEM; | |
1822 | } | |
1823 | } | |
1824 | ||
1825 | list_for_each_entry(f, &vsi->mac_filter_list, list) { | |
1826 | if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, | |
1827 | is_vf, is_netdev)) { | |
1828 | i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, | |
1829 | is_vf, is_netdev); | |
1830 | add_f = i40e_add_filter(vsi, f->macaddr, | |
1831 | 0, is_vf, is_netdev); | |
1832 | if (!add_f) { | |
1833 | dev_info(&vsi->back->pdev->dev, | |
1834 | "Could not add filter 0 for %pM\n", | |
1835 | f->macaddr); | |
1836 | return -ENOMEM; | |
1837 | } | |
1838 | } | |
1839 | } | |
1840 | ret = i40e_sync_vsi_filters(vsi); | |
1841 | } | |
1842 | ||
1843 | return ret; | |
1844 | } | |
1845 | ||
1846 | /** | |
1847 | * i40e_vsi_kill_vlan - Remove vsi membership for given vlan | |
1848 | * @vsi: the vsi being configured | |
1849 | * @vid: vlan id to be removed (0 = untagged only , -1 = any) | |
078b5876 JB |
1850 | * |
1851 | * Return: 0 on success or negative otherwise | |
41c445ff JB |
1852 | **/ |
1853 | int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) | |
1854 | { | |
1855 | struct net_device *netdev = vsi->netdev; | |
1856 | struct i40e_mac_filter *f, *add_f; | |
1857 | bool is_vf, is_netdev; | |
1858 | int filter_count = 0; | |
1859 | int ret; | |
1860 | ||
1861 | is_vf = (vsi->type == I40E_VSI_SRIOV); | |
1862 | is_netdev = !!(netdev); | |
1863 | ||
1864 | if (is_netdev) | |
1865 | i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); | |
1866 | ||
1867 | list_for_each_entry(f, &vsi->mac_filter_list, list) | |
1868 | i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); | |
1869 | ||
1870 | ret = i40e_sync_vsi_filters(vsi); | |
1871 | if (ret) { | |
1872 | dev_info(&vsi->back->pdev->dev, "Could not sync filters\n"); | |
1873 | return ret; | |
1874 | } | |
1875 | ||
1876 | /* go through all the filters for this VSI and if there is only | |
1877 | * vid == 0 it means there are no other filters, so vid 0 must | |
1878 | * be replaced with -1. This signifies that we should from now | |
1879 | * on accept any traffic (with any tag present, or untagged) | |
1880 | */ | |
1881 | list_for_each_entry(f, &vsi->mac_filter_list, list) { | |
1882 | if (is_netdev) { | |
1883 | if (f->vlan && | |
1884 | ether_addr_equal(netdev->dev_addr, f->macaddr)) | |
1885 | filter_count++; | |
1886 | } | |
1887 | ||
1888 | if (f->vlan) | |
1889 | filter_count++; | |
1890 | } | |
1891 | ||
1892 | if (!filter_count && is_netdev) { | |
1893 | i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); | |
1894 | f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, | |
1895 | is_vf, is_netdev); | |
1896 | if (!f) { | |
1897 | dev_info(&vsi->back->pdev->dev, | |
1898 | "Could not add filter %d for %pM\n", | |
1899 | I40E_VLAN_ANY, netdev->dev_addr); | |
1900 | return -ENOMEM; | |
1901 | } | |
1902 | } | |
1903 | ||
1904 | if (!filter_count) { | |
1905 | list_for_each_entry(f, &vsi->mac_filter_list, list) { | |
1906 | i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); | |
1907 | add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, | |
1908 | is_vf, is_netdev); | |
1909 | if (!add_f) { | |
1910 | dev_info(&vsi->back->pdev->dev, | |
1911 | "Could not add filter %d for %pM\n", | |
1912 | I40E_VLAN_ANY, f->macaddr); | |
1913 | return -ENOMEM; | |
1914 | } | |
1915 | } | |
1916 | } | |
1917 | ||
1918 | return i40e_sync_vsi_filters(vsi); | |
1919 | } | |
1920 | ||
1921 | /** | |
1922 | * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload | |
1923 | * @netdev: network interface to be adjusted | |
1924 | * @vid: vlan id to be added | |
078b5876 JB |
1925 | * |
1926 | * net_device_ops implementation for adding vlan ids | |
41c445ff JB |
1927 | **/ |
1928 | static int i40e_vlan_rx_add_vid(struct net_device *netdev, | |
1929 | __always_unused __be16 proto, u16 vid) | |
1930 | { | |
1931 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
1932 | struct i40e_vsi *vsi = np->vsi; | |
078b5876 | 1933 | int ret = 0; |
41c445ff JB |
1934 | |
1935 | if (vid > 4095) | |
078b5876 JB |
1936 | return -EINVAL; |
1937 | ||
1938 | netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); | |
41c445ff | 1939 | |
41c445ff JB |
1940 | /* If the network stack called us with vid = 0, we should |
1941 | * indicate to i40e_vsi_add_vlan() that we want to receive | |
1942 | * any traffic (i.e. with any vlan tag, or untagged) | |
1943 | */ | |
1944 | ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); | |
1945 | ||
078b5876 JB |
1946 | if (!ret && (vid < VLAN_N_VID)) |
1947 | set_bit(vid, vsi->active_vlans); | |
41c445ff | 1948 | |
078b5876 | 1949 | return ret; |
41c445ff JB |
1950 | } |
1951 | ||
1952 | /** | |
1953 | * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload | |
1954 | * @netdev: network interface to be adjusted | |
1955 | * @vid: vlan id to be removed | |
078b5876 JB |
1956 | * |
1957 | * net_device_ops implementation for adding vlan ids | |
41c445ff JB |
1958 | **/ |
1959 | static int i40e_vlan_rx_kill_vid(struct net_device *netdev, | |
1960 | __always_unused __be16 proto, u16 vid) | |
1961 | { | |
1962 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
1963 | struct i40e_vsi *vsi = np->vsi; | |
1964 | ||
078b5876 JB |
1965 | netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); |
1966 | ||
41c445ff JB |
1967 | /* return code is ignored as there is nothing a user |
1968 | * can do about failure to remove and a log message was | |
078b5876 | 1969 | * already printed from the other function |
41c445ff JB |
1970 | */ |
1971 | i40e_vsi_kill_vlan(vsi, vid); | |
1972 | ||
1973 | clear_bit(vid, vsi->active_vlans); | |
078b5876 | 1974 | |
41c445ff JB |
1975 | return 0; |
1976 | } | |
1977 | ||
1978 | /** | |
1979 | * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up | |
1980 | * @vsi: the vsi being brought back up | |
1981 | **/ | |
1982 | static void i40e_restore_vlan(struct i40e_vsi *vsi) | |
1983 | { | |
1984 | u16 vid; | |
1985 | ||
1986 | if (!vsi->netdev) | |
1987 | return; | |
1988 | ||
1989 | i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); | |
1990 | ||
1991 | for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) | |
1992 | i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), | |
1993 | vid); | |
1994 | } | |
1995 | ||
1996 | /** | |
1997 | * i40e_vsi_add_pvid - Add pvid for the VSI | |
1998 | * @vsi: the vsi being adjusted | |
1999 | * @vid: the vlan id to set as a PVID | |
2000 | **/ | |
dcae29be | 2001 | int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) |
41c445ff JB |
2002 | { |
2003 | struct i40e_vsi_context ctxt; | |
dcae29be | 2004 | i40e_status aq_ret; |
41c445ff JB |
2005 | |
2006 | vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); | |
2007 | vsi->info.pvid = cpu_to_le16(vid); | |
2008 | vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID; | |
2009 | vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED; | |
2010 | ||
2011 | ctxt.seid = vsi->seid; | |
2012 | memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); | |
dcae29be JB |
2013 | aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); |
2014 | if (aq_ret) { | |
41c445ff JB |
2015 | dev_info(&vsi->back->pdev->dev, |
2016 | "%s: update vsi failed, aq_err=%d\n", | |
2017 | __func__, vsi->back->hw.aq.asq_last_status); | |
dcae29be | 2018 | return -ENOENT; |
41c445ff JB |
2019 | } |
2020 | ||
dcae29be | 2021 | return 0; |
41c445ff JB |
2022 | } |
2023 | ||
2024 | /** | |
2025 | * i40e_vsi_remove_pvid - Remove the pvid from the VSI | |
2026 | * @vsi: the vsi being adjusted | |
2027 | * | |
2028 | * Just use the vlan_rx_register() service to put it back to normal | |
2029 | **/ | |
2030 | void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) | |
2031 | { | |
2032 | vsi->info.pvid = 0; | |
2033 | i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); | |
2034 | } | |
2035 | ||
2036 | /** | |
2037 | * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources | |
2038 | * @vsi: ptr to the VSI | |
2039 | * | |
2040 | * If this function returns with an error, then it's possible one or | |
2041 | * more of the rings is populated (while the rest are not). It is the | |
2042 | * callers duty to clean those orphaned rings. | |
2043 | * | |
2044 | * Return 0 on success, negative on failure | |
2045 | **/ | |
2046 | static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) | |
2047 | { | |
2048 | int i, err = 0; | |
2049 | ||
2050 | for (i = 0; i < vsi->num_queue_pairs && !err; i++) | |
9f65e15b | 2051 | err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); |
41c445ff JB |
2052 | |
2053 | return err; | |
2054 | } | |
2055 | ||
2056 | /** | |
2057 | * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues | |
2058 | * @vsi: ptr to the VSI | |
2059 | * | |
2060 | * Free VSI's transmit software resources | |
2061 | **/ | |
2062 | static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) | |
2063 | { | |
2064 | int i; | |
2065 | ||
2066 | for (i = 0; i < vsi->num_queue_pairs; i++) | |
9f65e15b AD |
2067 | if (vsi->tx_rings[i]->desc) |
2068 | i40e_free_tx_resources(vsi->tx_rings[i]); | |
41c445ff JB |
2069 | } |
2070 | ||
2071 | /** | |
2072 | * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources | |
2073 | * @vsi: ptr to the VSI | |
2074 | * | |
2075 | * If this function returns with an error, then it's possible one or | |
2076 | * more of the rings is populated (while the rest are not). It is the | |
2077 | * callers duty to clean those orphaned rings. | |
2078 | * | |
2079 | * Return 0 on success, negative on failure | |
2080 | **/ | |
2081 | static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) | |
2082 | { | |
2083 | int i, err = 0; | |
2084 | ||
2085 | for (i = 0; i < vsi->num_queue_pairs && !err; i++) | |
9f65e15b | 2086 | err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); |
41c445ff JB |
2087 | return err; |
2088 | } | |
2089 | ||
2090 | /** | |
2091 | * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues | |
2092 | * @vsi: ptr to the VSI | |
2093 | * | |
2094 | * Free all receive software resources | |
2095 | **/ | |
2096 | static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) | |
2097 | { | |
2098 | int i; | |
2099 | ||
2100 | for (i = 0; i < vsi->num_queue_pairs; i++) | |
9f65e15b AD |
2101 | if (vsi->rx_rings[i]->desc) |
2102 | i40e_free_rx_resources(vsi->rx_rings[i]); | |
41c445ff JB |
2103 | } |
2104 | ||
2105 | /** | |
2106 | * i40e_configure_tx_ring - Configure a transmit ring context and rest | |
2107 | * @ring: The Tx ring to configure | |
2108 | * | |
2109 | * Configure the Tx descriptor ring in the HMC context. | |
2110 | **/ | |
2111 | static int i40e_configure_tx_ring(struct i40e_ring *ring) | |
2112 | { | |
2113 | struct i40e_vsi *vsi = ring->vsi; | |
2114 | u16 pf_q = vsi->base_queue + ring->queue_index; | |
2115 | struct i40e_hw *hw = &vsi->back->hw; | |
2116 | struct i40e_hmc_obj_txq tx_ctx; | |
2117 | i40e_status err = 0; | |
2118 | u32 qtx_ctl = 0; | |
2119 | ||
2120 | /* some ATR related tx ring init */ | |
2121 | if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) { | |
2122 | ring->atr_sample_rate = vsi->back->atr_sample_rate; | |
2123 | ring->atr_count = 0; | |
2124 | } else { | |
2125 | ring->atr_sample_rate = 0; | |
2126 | } | |
2127 | ||
2128 | /* initialize XPS */ | |
2129 | if (ring->q_vector && ring->netdev && | |
2130 | !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) | |
2131 | netif_set_xps_queue(ring->netdev, | |
2132 | &ring->q_vector->affinity_mask, | |
2133 | ring->queue_index); | |
2134 | ||
2135 | /* clear the context structure first */ | |
2136 | memset(&tx_ctx, 0, sizeof(tx_ctx)); | |
2137 | ||
2138 | tx_ctx.new_context = 1; | |
2139 | tx_ctx.base = (ring->dma / 128); | |
2140 | tx_ctx.qlen = ring->count; | |
2141 | tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED | | |
2142 | I40E_FLAG_FDIR_ATR_ENABLED)); | |
2143 | ||
2144 | /* As part of VSI creation/update, FW allocates certain | |
2145 | * Tx arbitration queue sets for each TC enabled for | |
2146 | * the VSI. The FW returns the handles to these queue | |
2147 | * sets as part of the response buffer to Add VSI, | |
2148 | * Update VSI, etc. AQ commands. It is expected that | |
2149 | * these queue set handles be associated with the Tx | |
2150 | * queues by the driver as part of the TX queue context | |
2151 | * initialization. This has to be done regardless of | |
2152 | * DCB as by default everything is mapped to TC0. | |
2153 | */ | |
2154 | tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); | |
2155 | tx_ctx.rdylist_act = 0; | |
2156 | ||
2157 | /* clear the context in the HMC */ | |
2158 | err = i40e_clear_lan_tx_queue_context(hw, pf_q); | |
2159 | if (err) { | |
2160 | dev_info(&vsi->back->pdev->dev, | |
2161 | "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", | |
2162 | ring->queue_index, pf_q, err); | |
2163 | return -ENOMEM; | |
2164 | } | |
2165 | ||
2166 | /* set the context in the HMC */ | |
2167 | err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); | |
2168 | if (err) { | |
2169 | dev_info(&vsi->back->pdev->dev, | |
2170 | "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", | |
2171 | ring->queue_index, pf_q, err); | |
2172 | return -ENOMEM; | |
2173 | } | |
2174 | ||
2175 | /* Now associate this queue with this PCI function */ | |
2176 | qtx_ctl = I40E_QTX_CTL_PF_QUEUE; | |
13fd9774 SN |
2177 | qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & |
2178 | I40E_QTX_CTL_PF_INDX_MASK); | |
41c445ff JB |
2179 | wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); |
2180 | i40e_flush(hw); | |
2181 | ||
2182 | clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); | |
2183 | ||
2184 | /* cache tail off for easier writes later */ | |
2185 | ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); | |
2186 | ||
2187 | return 0; | |
2188 | } | |
2189 | ||
2190 | /** | |
2191 | * i40e_configure_rx_ring - Configure a receive ring context | |
2192 | * @ring: The Rx ring to configure | |
2193 | * | |
2194 | * Configure the Rx descriptor ring in the HMC context. | |
2195 | **/ | |
2196 | static int i40e_configure_rx_ring(struct i40e_ring *ring) | |
2197 | { | |
2198 | struct i40e_vsi *vsi = ring->vsi; | |
2199 | u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; | |
2200 | u16 pf_q = vsi->base_queue + ring->queue_index; | |
2201 | struct i40e_hw *hw = &vsi->back->hw; | |
2202 | struct i40e_hmc_obj_rxq rx_ctx; | |
2203 | i40e_status err = 0; | |
2204 | ||
2205 | ring->state = 0; | |
2206 | ||
2207 | /* clear the context structure first */ | |
2208 | memset(&rx_ctx, 0, sizeof(rx_ctx)); | |
2209 | ||
2210 | ring->rx_buf_len = vsi->rx_buf_len; | |
2211 | ring->rx_hdr_len = vsi->rx_hdr_len; | |
2212 | ||
2213 | rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; | |
2214 | rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; | |
2215 | ||
2216 | rx_ctx.base = (ring->dma / 128); | |
2217 | rx_ctx.qlen = ring->count; | |
2218 | ||
2219 | if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { | |
2220 | set_ring_16byte_desc_enabled(ring); | |
2221 | rx_ctx.dsize = 0; | |
2222 | } else { | |
2223 | rx_ctx.dsize = 1; | |
2224 | } | |
2225 | ||
2226 | rx_ctx.dtype = vsi->dtype; | |
2227 | if (vsi->dtype) { | |
2228 | set_ring_ps_enabled(ring); | |
2229 | rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | | |
2230 | I40E_RX_SPLIT_IP | | |
2231 | I40E_RX_SPLIT_TCP_UDP | | |
2232 | I40E_RX_SPLIT_SCTP; | |
2233 | } else { | |
2234 | rx_ctx.hsplit_0 = 0; | |
2235 | } | |
2236 | ||
2237 | rx_ctx.rxmax = min_t(u16, vsi->max_frame, | |
2238 | (chain_len * ring->rx_buf_len)); | |
2239 | rx_ctx.tphrdesc_ena = 1; | |
2240 | rx_ctx.tphwdesc_ena = 1; | |
2241 | rx_ctx.tphdata_ena = 1; | |
2242 | rx_ctx.tphhead_ena = 1; | |
2243 | rx_ctx.lrxqthresh = 2; | |
2244 | rx_ctx.crcstrip = 1; | |
2245 | rx_ctx.l2tsel = 1; | |
2246 | rx_ctx.showiv = 1; | |
2247 | ||
2248 | /* clear the context in the HMC */ | |
2249 | err = i40e_clear_lan_rx_queue_context(hw, pf_q); | |
2250 | if (err) { | |
2251 | dev_info(&vsi->back->pdev->dev, | |
2252 | "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", | |
2253 | ring->queue_index, pf_q, err); | |
2254 | return -ENOMEM; | |
2255 | } | |
2256 | ||
2257 | /* set the context in the HMC */ | |
2258 | err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); | |
2259 | if (err) { | |
2260 | dev_info(&vsi->back->pdev->dev, | |
2261 | "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", | |
2262 | ring->queue_index, pf_q, err); | |
2263 | return -ENOMEM; | |
2264 | } | |
2265 | ||
2266 | /* cache tail for quicker writes, and clear the reg before use */ | |
2267 | ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); | |
2268 | writel(0, ring->tail); | |
2269 | ||
2270 | i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); | |
2271 | ||
2272 | return 0; | |
2273 | } | |
2274 | ||
2275 | /** | |
2276 | * i40e_vsi_configure_tx - Configure the VSI for Tx | |
2277 | * @vsi: VSI structure describing this set of rings and resources | |
2278 | * | |
2279 | * Configure the Tx VSI for operation. | |
2280 | **/ | |
2281 | static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) | |
2282 | { | |
2283 | int err = 0; | |
2284 | u16 i; | |
2285 | ||
9f65e15b AD |
2286 | for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) |
2287 | err = i40e_configure_tx_ring(vsi->tx_rings[i]); | |
41c445ff JB |
2288 | |
2289 | return err; | |
2290 | } | |
2291 | ||
2292 | /** | |
2293 | * i40e_vsi_configure_rx - Configure the VSI for Rx | |
2294 | * @vsi: the VSI being configured | |
2295 | * | |
2296 | * Configure the Rx VSI for operation. | |
2297 | **/ | |
2298 | static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) | |
2299 | { | |
2300 | int err = 0; | |
2301 | u16 i; | |
2302 | ||
2303 | if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) | |
2304 | vsi->max_frame = vsi->netdev->mtu + ETH_HLEN | |
2305 | + ETH_FCS_LEN + VLAN_HLEN; | |
2306 | else | |
2307 | vsi->max_frame = I40E_RXBUFFER_2048; | |
2308 | ||
2309 | /* figure out correct receive buffer length */ | |
2310 | switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | | |
2311 | I40E_FLAG_RX_PS_ENABLED)) { | |
2312 | case I40E_FLAG_RX_1BUF_ENABLED: | |
2313 | vsi->rx_hdr_len = 0; | |
2314 | vsi->rx_buf_len = vsi->max_frame; | |
2315 | vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; | |
2316 | break; | |
2317 | case I40E_FLAG_RX_PS_ENABLED: | |
2318 | vsi->rx_hdr_len = I40E_RX_HDR_SIZE; | |
2319 | vsi->rx_buf_len = I40E_RXBUFFER_2048; | |
2320 | vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; | |
2321 | break; | |
2322 | default: | |
2323 | vsi->rx_hdr_len = I40E_RX_HDR_SIZE; | |
2324 | vsi->rx_buf_len = I40E_RXBUFFER_2048; | |
2325 | vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; | |
2326 | break; | |
2327 | } | |
2328 | ||
2329 | /* round up for the chip's needs */ | |
2330 | vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, | |
2331 | (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); | |
2332 | vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, | |
2333 | (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); | |
2334 | ||
2335 | /* set up individual rings */ | |
2336 | for (i = 0; i < vsi->num_queue_pairs && !err; i++) | |
9f65e15b | 2337 | err = i40e_configure_rx_ring(vsi->rx_rings[i]); |
41c445ff JB |
2338 | |
2339 | return err; | |
2340 | } | |
2341 | ||
2342 | /** | |
2343 | * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC | |
2344 | * @vsi: ptr to the VSI | |
2345 | **/ | |
2346 | static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) | |
2347 | { | |
2348 | u16 qoffset, qcount; | |
2349 | int i, n; | |
2350 | ||
2351 | if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) | |
2352 | return; | |
2353 | ||
2354 | for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { | |
2355 | if (!(vsi->tc_config.enabled_tc & (1 << n))) | |
2356 | continue; | |
2357 | ||
2358 | qoffset = vsi->tc_config.tc_info[n].qoffset; | |
2359 | qcount = vsi->tc_config.tc_info[n].qcount; | |
2360 | for (i = qoffset; i < (qoffset + qcount); i++) { | |
9f65e15b AD |
2361 | struct i40e_ring *rx_ring = vsi->rx_rings[i]; |
2362 | struct i40e_ring *tx_ring = vsi->tx_rings[i]; | |
41c445ff JB |
2363 | rx_ring->dcb_tc = n; |
2364 | tx_ring->dcb_tc = n; | |
2365 | } | |
2366 | } | |
2367 | } | |
2368 | ||
2369 | /** | |
2370 | * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI | |
2371 | * @vsi: ptr to the VSI | |
2372 | **/ | |
2373 | static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) | |
2374 | { | |
2375 | if (vsi->netdev) | |
2376 | i40e_set_rx_mode(vsi->netdev); | |
2377 | } | |
2378 | ||
2379 | /** | |
2380 | * i40e_vsi_configure - Set up the VSI for action | |
2381 | * @vsi: the VSI being configured | |
2382 | **/ | |
2383 | static int i40e_vsi_configure(struct i40e_vsi *vsi) | |
2384 | { | |
2385 | int err; | |
2386 | ||
2387 | i40e_set_vsi_rx_mode(vsi); | |
2388 | i40e_restore_vlan(vsi); | |
2389 | i40e_vsi_config_dcb_rings(vsi); | |
2390 | err = i40e_vsi_configure_tx(vsi); | |
2391 | if (!err) | |
2392 | err = i40e_vsi_configure_rx(vsi); | |
2393 | ||
2394 | return err; | |
2395 | } | |
2396 | ||
2397 | /** | |
2398 | * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW | |
2399 | * @vsi: the VSI being configured | |
2400 | **/ | |
2401 | static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) | |
2402 | { | |
2403 | struct i40e_pf *pf = vsi->back; | |
2404 | struct i40e_q_vector *q_vector; | |
2405 | struct i40e_hw *hw = &pf->hw; | |
2406 | u16 vector; | |
2407 | int i, q; | |
2408 | u32 val; | |
2409 | u32 qp; | |
2410 | ||
2411 | /* The interrupt indexing is offset by 1 in the PFINT_ITRn | |
2412 | * and PFINT_LNKLSTn registers, e.g.: | |
2413 | * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) | |
2414 | */ | |
2415 | qp = vsi->base_queue; | |
2416 | vector = vsi->base_vector; | |
493fb300 AD |
2417 | for (i = 0; i < vsi->num_q_vectors; i++, vector++) { |
2418 | q_vector = vsi->q_vectors[i]; | |
41c445ff JB |
2419 | q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); |
2420 | q_vector->rx.latency_range = I40E_LOW_LATENCY; | |
2421 | wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), | |
2422 | q_vector->rx.itr); | |
2423 | q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); | |
2424 | q_vector->tx.latency_range = I40E_LOW_LATENCY; | |
2425 | wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), | |
2426 | q_vector->tx.itr); | |
2427 | ||
2428 | /* Linked list for the queuepairs assigned to this vector */ | |
2429 | wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); | |
2430 | for (q = 0; q < q_vector->num_ringpairs; q++) { | |
2431 | val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | | |
2432 | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | | |
2433 | (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | | |
2434 | (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| | |
2435 | (I40E_QUEUE_TYPE_TX | |
2436 | << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); | |
2437 | ||
2438 | wr32(hw, I40E_QINT_RQCTL(qp), val); | |
2439 | ||
2440 | val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | | |
2441 | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | | |
2442 | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | | |
2443 | ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| | |
2444 | (I40E_QUEUE_TYPE_RX | |
2445 | << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); | |
2446 | ||
2447 | /* Terminate the linked list */ | |
2448 | if (q == (q_vector->num_ringpairs - 1)) | |
2449 | val |= (I40E_QUEUE_END_OF_LIST | |
2450 | << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); | |
2451 | ||
2452 | wr32(hw, I40E_QINT_TQCTL(qp), val); | |
2453 | qp++; | |
2454 | } | |
2455 | } | |
2456 | ||
2457 | i40e_flush(hw); | |
2458 | } | |
2459 | ||
2460 | /** | |
2461 | * i40e_enable_misc_int_causes - enable the non-queue interrupts | |
2462 | * @hw: ptr to the hardware info | |
2463 | **/ | |
2464 | static void i40e_enable_misc_int_causes(struct i40e_hw *hw) | |
2465 | { | |
2466 | u32 val; | |
2467 | ||
2468 | /* clear things first */ | |
2469 | wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ | |
2470 | rd32(hw, I40E_PFINT_ICR0); /* read to clear */ | |
2471 | ||
2472 | val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | | |
2473 | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | | |
2474 | I40E_PFINT_ICR0_ENA_GRST_MASK | | |
2475 | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | | |
2476 | I40E_PFINT_ICR0_ENA_GPIO_MASK | | |
2477 | I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | | |
2478 | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | | |
2479 | I40E_PFINT_ICR0_ENA_VFLR_MASK | | |
2480 | I40E_PFINT_ICR0_ENA_ADMINQ_MASK; | |
2481 | ||
2482 | wr32(hw, I40E_PFINT_ICR0_ENA, val); | |
2483 | ||
2484 | /* SW_ITR_IDX = 0, but don't change INTENA */ | |
2485 | wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK | | |
2486 | I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK); | |
2487 | ||
2488 | /* OTHER_ITR_IDX = 0 */ | |
2489 | wr32(hw, I40E_PFINT_STAT_CTL0, 0); | |
2490 | } | |
2491 | ||
2492 | /** | |
2493 | * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW | |
2494 | * @vsi: the VSI being configured | |
2495 | **/ | |
2496 | static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) | |
2497 | { | |
493fb300 | 2498 | struct i40e_q_vector *q_vector = vsi->q_vectors[0]; |
41c445ff JB |
2499 | struct i40e_pf *pf = vsi->back; |
2500 | struct i40e_hw *hw = &pf->hw; | |
2501 | u32 val; | |
2502 | ||
2503 | /* set the ITR configuration */ | |
2504 | q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); | |
2505 | q_vector->rx.latency_range = I40E_LOW_LATENCY; | |
2506 | wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); | |
2507 | q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); | |
2508 | q_vector->tx.latency_range = I40E_LOW_LATENCY; | |
2509 | wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); | |
2510 | ||
2511 | i40e_enable_misc_int_causes(hw); | |
2512 | ||
2513 | /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ | |
2514 | wr32(hw, I40E_PFINT_LNKLST0, 0); | |
2515 | ||
2516 | /* Associate the queue pair to the vector and enable the q int */ | |
2517 | val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | | |
2518 | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | | |
2519 | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); | |
2520 | ||
2521 | wr32(hw, I40E_QINT_RQCTL(0), val); | |
2522 | ||
2523 | val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | | |
2524 | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | | |
2525 | (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); | |
2526 | ||
2527 | wr32(hw, I40E_QINT_TQCTL(0), val); | |
2528 | i40e_flush(hw); | |
2529 | } | |
2530 | ||
2531 | /** | |
2532 | * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 | |
2533 | * @pf: board private structure | |
2534 | **/ | |
116a57d4 | 2535 | void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) |
41c445ff JB |
2536 | { |
2537 | struct i40e_hw *hw = &pf->hw; | |
2538 | u32 val; | |
2539 | ||
2540 | val = I40E_PFINT_DYN_CTL0_INTENA_MASK | | |
2541 | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | | |
2542 | (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); | |
2543 | ||
2544 | wr32(hw, I40E_PFINT_DYN_CTL0, val); | |
2545 | i40e_flush(hw); | |
2546 | } | |
2547 | ||
2548 | /** | |
2549 | * i40e_irq_dynamic_enable - Enable default interrupt generation settings | |
2550 | * @vsi: pointer to a vsi | |
2551 | * @vector: enable a particular Hw Interrupt vector | |
2552 | **/ | |
2553 | void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) | |
2554 | { | |
2555 | struct i40e_pf *pf = vsi->back; | |
2556 | struct i40e_hw *hw = &pf->hw; | |
2557 | u32 val; | |
2558 | ||
2559 | val = I40E_PFINT_DYN_CTLN_INTENA_MASK | | |
2560 | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | | |
2561 | (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); | |
2562 | wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); | |
1022cb6c | 2563 | /* skip the flush */ |
41c445ff JB |
2564 | } |
2565 | ||
2566 | /** | |
2567 | * i40e_msix_clean_rings - MSIX mode Interrupt Handler | |
2568 | * @irq: interrupt number | |
2569 | * @data: pointer to a q_vector | |
2570 | **/ | |
2571 | static irqreturn_t i40e_msix_clean_rings(int irq, void *data) | |
2572 | { | |
2573 | struct i40e_q_vector *q_vector = data; | |
2574 | ||
cd0b6fa6 | 2575 | if (!q_vector->tx.ring && !q_vector->rx.ring) |
41c445ff JB |
2576 | return IRQ_HANDLED; |
2577 | ||
2578 | napi_schedule(&q_vector->napi); | |
2579 | ||
2580 | return IRQ_HANDLED; | |
2581 | } | |
2582 | ||
2583 | /** | |
2584 | * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings | |
2585 | * @irq: interrupt number | |
2586 | * @data: pointer to a q_vector | |
2587 | **/ | |
2588 | static irqreturn_t i40e_fdir_clean_rings(int irq, void *data) | |
2589 | { | |
2590 | struct i40e_q_vector *q_vector = data; | |
2591 | ||
cd0b6fa6 | 2592 | if (!q_vector->tx.ring && !q_vector->rx.ring) |
41c445ff JB |
2593 | return IRQ_HANDLED; |
2594 | ||
2595 | pr_info("fdir ring cleaning needed\n"); | |
2596 | ||
2597 | return IRQ_HANDLED; | |
2598 | } | |
2599 | ||
2600 | /** | |
2601 | * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts | |
2602 | * @vsi: the VSI being configured | |
2603 | * @basename: name for the vector | |
2604 | * | |
2605 | * Allocates MSI-X vectors and requests interrupts from the kernel. | |
2606 | **/ | |
2607 | static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) | |
2608 | { | |
2609 | int q_vectors = vsi->num_q_vectors; | |
2610 | struct i40e_pf *pf = vsi->back; | |
2611 | int base = vsi->base_vector; | |
2612 | int rx_int_idx = 0; | |
2613 | int tx_int_idx = 0; | |
2614 | int vector, err; | |
2615 | ||
2616 | for (vector = 0; vector < q_vectors; vector++) { | |
493fb300 | 2617 | struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; |
41c445ff | 2618 | |
cd0b6fa6 | 2619 | if (q_vector->tx.ring && q_vector->rx.ring) { |
41c445ff JB |
2620 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
2621 | "%s-%s-%d", basename, "TxRx", rx_int_idx++); | |
2622 | tx_int_idx++; | |
cd0b6fa6 | 2623 | } else if (q_vector->rx.ring) { |
41c445ff JB |
2624 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
2625 | "%s-%s-%d", basename, "rx", rx_int_idx++); | |
cd0b6fa6 | 2626 | } else if (q_vector->tx.ring) { |
41c445ff JB |
2627 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
2628 | "%s-%s-%d", basename, "tx", tx_int_idx++); | |
2629 | } else { | |
2630 | /* skip this unused q_vector */ | |
2631 | continue; | |
2632 | } | |
2633 | err = request_irq(pf->msix_entries[base + vector].vector, | |
2634 | vsi->irq_handler, | |
2635 | 0, | |
2636 | q_vector->name, | |
2637 | q_vector); | |
2638 | if (err) { | |
2639 | dev_info(&pf->pdev->dev, | |
2640 | "%s: request_irq failed, error: %d\n", | |
2641 | __func__, err); | |
2642 | goto free_queue_irqs; | |
2643 | } | |
2644 | /* assign the mask for this irq */ | |
2645 | irq_set_affinity_hint(pf->msix_entries[base + vector].vector, | |
2646 | &q_vector->affinity_mask); | |
2647 | } | |
2648 | ||
2649 | return 0; | |
2650 | ||
2651 | free_queue_irqs: | |
2652 | while (vector) { | |
2653 | vector--; | |
2654 | irq_set_affinity_hint(pf->msix_entries[base + vector].vector, | |
2655 | NULL); | |
2656 | free_irq(pf->msix_entries[base + vector].vector, | |
2657 | &(vsi->q_vectors[vector])); | |
2658 | } | |
2659 | return err; | |
2660 | } | |
2661 | ||
2662 | /** | |
2663 | * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI | |
2664 | * @vsi: the VSI being un-configured | |
2665 | **/ | |
2666 | static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) | |
2667 | { | |
2668 | struct i40e_pf *pf = vsi->back; | |
2669 | struct i40e_hw *hw = &pf->hw; | |
2670 | int base = vsi->base_vector; | |
2671 | int i; | |
2672 | ||
2673 | for (i = 0; i < vsi->num_queue_pairs; i++) { | |
9f65e15b AD |
2674 | wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); |
2675 | wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); | |
41c445ff JB |
2676 | } |
2677 | ||
2678 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | |
2679 | for (i = vsi->base_vector; | |
2680 | i < (vsi->num_q_vectors + vsi->base_vector); i++) | |
2681 | wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); | |
2682 | ||
2683 | i40e_flush(hw); | |
2684 | for (i = 0; i < vsi->num_q_vectors; i++) | |
2685 | synchronize_irq(pf->msix_entries[i + base].vector); | |
2686 | } else { | |
2687 | /* Legacy and MSI mode - this stops all interrupt handling */ | |
2688 | wr32(hw, I40E_PFINT_ICR0_ENA, 0); | |
2689 | wr32(hw, I40E_PFINT_DYN_CTL0, 0); | |
2690 | i40e_flush(hw); | |
2691 | synchronize_irq(pf->pdev->irq); | |
2692 | } | |
2693 | } | |
2694 | ||
2695 | /** | |
2696 | * i40e_vsi_enable_irq - Enable IRQ for the given VSI | |
2697 | * @vsi: the VSI being configured | |
2698 | **/ | |
2699 | static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) | |
2700 | { | |
2701 | struct i40e_pf *pf = vsi->back; | |
2702 | int i; | |
2703 | ||
2704 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | |
2705 | for (i = vsi->base_vector; | |
2706 | i < (vsi->num_q_vectors + vsi->base_vector); i++) | |
2707 | i40e_irq_dynamic_enable(vsi, i); | |
2708 | } else { | |
2709 | i40e_irq_dynamic_enable_icr0(pf); | |
2710 | } | |
2711 | ||
1022cb6c | 2712 | i40e_flush(&pf->hw); |
41c445ff JB |
2713 | return 0; |
2714 | } | |
2715 | ||
2716 | /** | |
2717 | * i40e_stop_misc_vector - Stop the vector that handles non-queue events | |
2718 | * @pf: board private structure | |
2719 | **/ | |
2720 | static void i40e_stop_misc_vector(struct i40e_pf *pf) | |
2721 | { | |
2722 | /* Disable ICR 0 */ | |
2723 | wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); | |
2724 | i40e_flush(&pf->hw); | |
2725 | } | |
2726 | ||
2727 | /** | |
2728 | * i40e_intr - MSI/Legacy and non-queue interrupt handler | |
2729 | * @irq: interrupt number | |
2730 | * @data: pointer to a q_vector | |
2731 | * | |
2732 | * This is the handler used for all MSI/Legacy interrupts, and deals | |
2733 | * with both queue and non-queue interrupts. This is also used in | |
2734 | * MSIX mode to handle the non-queue interrupts. | |
2735 | **/ | |
2736 | static irqreturn_t i40e_intr(int irq, void *data) | |
2737 | { | |
2738 | struct i40e_pf *pf = (struct i40e_pf *)data; | |
2739 | struct i40e_hw *hw = &pf->hw; | |
2740 | u32 icr0, icr0_remaining; | |
2741 | u32 val, ena_mask; | |
2742 | ||
2743 | icr0 = rd32(hw, I40E_PFINT_ICR0); | |
2744 | ||
41c445ff JB |
2745 | val = rd32(hw, I40E_PFINT_DYN_CTL0); |
2746 | val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; | |
2747 | wr32(hw, I40E_PFINT_DYN_CTL0, val); | |
2748 | ||
116a57d4 SN |
2749 | /* if sharing a legacy IRQ, we might get called w/o an intr pending */ |
2750 | if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) | |
2751 | return IRQ_NONE; | |
2752 | ||
41c445ff JB |
2753 | ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); |
2754 | ||
2755 | /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ | |
2756 | if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { | |
2757 | ||
2758 | /* temporarily disable queue cause for NAPI processing */ | |
2759 | u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); | |
2760 | qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; | |
2761 | wr32(hw, I40E_QINT_RQCTL(0), qval); | |
2762 | ||
2763 | qval = rd32(hw, I40E_QINT_TQCTL(0)); | |
2764 | qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; | |
2765 | wr32(hw, I40E_QINT_TQCTL(0), qval); | |
41c445ff JB |
2766 | |
2767 | if (!test_bit(__I40E_DOWN, &pf->state)) | |
493fb300 | 2768 | napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); |
41c445ff JB |
2769 | } |
2770 | ||
2771 | if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { | |
2772 | ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; | |
2773 | set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); | |
2774 | } | |
2775 | ||
2776 | if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { | |
2777 | ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; | |
2778 | set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); | |
2779 | } | |
2780 | ||
2781 | if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { | |
2782 | ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; | |
2783 | set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); | |
2784 | } | |
2785 | ||
2786 | if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { | |
2787 | if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) | |
2788 | set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); | |
2789 | ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; | |
2790 | val = rd32(hw, I40E_GLGEN_RSTAT); | |
2791 | val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) | |
2792 | >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; | |
d52cf0a9 | 2793 | if (val == I40E_RESET_CORER) |
41c445ff | 2794 | pf->corer_count++; |
d52cf0a9 | 2795 | else if (val == I40E_RESET_GLOBR) |
41c445ff | 2796 | pf->globr_count++; |
d52cf0a9 | 2797 | else if (val == I40E_RESET_EMPR) |
41c445ff JB |
2798 | pf->empr_count++; |
2799 | } | |
2800 | ||
2801 | /* If a critical error is pending we have no choice but to reset the | |
2802 | * device. | |
2803 | * Report and mask out any remaining unexpected interrupts. | |
2804 | */ | |
2805 | icr0_remaining = icr0 & ena_mask; | |
2806 | if (icr0_remaining) { | |
2807 | dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", | |
2808 | icr0_remaining); | |
2809 | if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) || | |
2810 | (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || | |
2811 | (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || | |
2812 | (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) || | |
2813 | (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) { | |
2814 | if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { | |
2815 | dev_info(&pf->pdev->dev, "HMC error interrupt\n"); | |
2816 | } else { | |
2817 | dev_info(&pf->pdev->dev, "device will be reset\n"); | |
2818 | set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); | |
2819 | i40e_service_event_schedule(pf); | |
2820 | } | |
2821 | } | |
2822 | ena_mask &= ~icr0_remaining; | |
2823 | } | |
2824 | ||
2825 | /* re-enable interrupt causes */ | |
2826 | wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); | |
41c445ff JB |
2827 | if (!test_bit(__I40E_DOWN, &pf->state)) { |
2828 | i40e_service_event_schedule(pf); | |
2829 | i40e_irq_dynamic_enable_icr0(pf); | |
2830 | } | |
2831 | ||
2832 | return IRQ_HANDLED; | |
2833 | } | |
2834 | ||
2835 | /** | |
cd0b6fa6 | 2836 | * i40e_map_vector_to_qp - Assigns the queue pair to the vector |
41c445ff JB |
2837 | * @vsi: the VSI being configured |
2838 | * @v_idx: vector index | |
cd0b6fa6 | 2839 | * @qp_idx: queue pair index |
41c445ff | 2840 | **/ |
cd0b6fa6 | 2841 | static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) |
41c445ff | 2842 | { |
493fb300 | 2843 | struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; |
9f65e15b AD |
2844 | struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; |
2845 | struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; | |
41c445ff JB |
2846 | |
2847 | tx_ring->q_vector = q_vector; | |
cd0b6fa6 AD |
2848 | tx_ring->next = q_vector->tx.ring; |
2849 | q_vector->tx.ring = tx_ring; | |
41c445ff | 2850 | q_vector->tx.count++; |
cd0b6fa6 AD |
2851 | |
2852 | rx_ring->q_vector = q_vector; | |
2853 | rx_ring->next = q_vector->rx.ring; | |
2854 | q_vector->rx.ring = rx_ring; | |
2855 | q_vector->rx.count++; | |
41c445ff JB |
2856 | } |
2857 | ||
2858 | /** | |
2859 | * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors | |
2860 | * @vsi: the VSI being configured | |
2861 | * | |
2862 | * This function maps descriptor rings to the queue-specific vectors | |
2863 | * we were allotted through the MSI-X enabling code. Ideally, we'd have | |
2864 | * one vector per queue pair, but on a constrained vector budget, we | |
2865 | * group the queue pairs as "efficiently" as possible. | |
2866 | **/ | |
2867 | static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) | |
2868 | { | |
2869 | int qp_remaining = vsi->num_queue_pairs; | |
2870 | int q_vectors = vsi->num_q_vectors; | |
cd0b6fa6 | 2871 | int num_ringpairs; |
41c445ff JB |
2872 | int v_start = 0; |
2873 | int qp_idx = 0; | |
2874 | ||
2875 | /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to | |
2876 | * group them so there are multiple queues per vector. | |
2877 | */ | |
2878 | for (; v_start < q_vectors && qp_remaining; v_start++) { | |
cd0b6fa6 AD |
2879 | struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; |
2880 | ||
2881 | num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); | |
2882 | ||
2883 | q_vector->num_ringpairs = num_ringpairs; | |
2884 | ||
2885 | q_vector->rx.count = 0; | |
2886 | q_vector->tx.count = 0; | |
2887 | q_vector->rx.ring = NULL; | |
2888 | q_vector->tx.ring = NULL; | |
2889 | ||
2890 | while (num_ringpairs--) { | |
2891 | map_vector_to_qp(vsi, v_start, qp_idx); | |
2892 | qp_idx++; | |
2893 | qp_remaining--; | |
41c445ff JB |
2894 | } |
2895 | } | |
2896 | } | |
2897 | ||
2898 | /** | |
2899 | * i40e_vsi_request_irq - Request IRQ from the OS | |
2900 | * @vsi: the VSI being configured | |
2901 | * @basename: name for the vector | |
2902 | **/ | |
2903 | static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) | |
2904 | { | |
2905 | struct i40e_pf *pf = vsi->back; | |
2906 | int err; | |
2907 | ||
2908 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) | |
2909 | err = i40e_vsi_request_irq_msix(vsi, basename); | |
2910 | else if (pf->flags & I40E_FLAG_MSI_ENABLED) | |
2911 | err = request_irq(pf->pdev->irq, i40e_intr, 0, | |
2912 | pf->misc_int_name, pf); | |
2913 | else | |
2914 | err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, | |
2915 | pf->misc_int_name, pf); | |
2916 | ||
2917 | if (err) | |
2918 | dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); | |
2919 | ||
2920 | return err; | |
2921 | } | |
2922 | ||
2923 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
2924 | /** | |
2925 | * i40e_netpoll - A Polling 'interrupt'handler | |
2926 | * @netdev: network interface device structure | |
2927 | * | |
2928 | * This is used by netconsole to send skbs without having to re-enable | |
2929 | * interrupts. It's not called while the normal interrupt routine is executing. | |
2930 | **/ | |
2931 | static void i40e_netpoll(struct net_device *netdev) | |
2932 | { | |
2933 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
2934 | struct i40e_vsi *vsi = np->vsi; | |
2935 | struct i40e_pf *pf = vsi->back; | |
2936 | int i; | |
2937 | ||
2938 | /* if interface is down do nothing */ | |
2939 | if (test_bit(__I40E_DOWN, &vsi->state)) | |
2940 | return; | |
2941 | ||
2942 | pf->flags |= I40E_FLAG_IN_NETPOLL; | |
2943 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | |
2944 | for (i = 0; i < vsi->num_q_vectors; i++) | |
493fb300 | 2945 | i40e_msix_clean_rings(0, vsi->q_vectors[i]); |
41c445ff JB |
2946 | } else { |
2947 | i40e_intr(pf->pdev->irq, netdev); | |
2948 | } | |
2949 | pf->flags &= ~I40E_FLAG_IN_NETPOLL; | |
2950 | } | |
2951 | #endif | |
2952 | ||
2953 | /** | |
2954 | * i40e_vsi_control_tx - Start or stop a VSI's rings | |
2955 | * @vsi: the VSI being configured | |
2956 | * @enable: start or stop the rings | |
2957 | **/ | |
2958 | static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) | |
2959 | { | |
2960 | struct i40e_pf *pf = vsi->back; | |
2961 | struct i40e_hw *hw = &pf->hw; | |
2962 | int i, j, pf_q; | |
2963 | u32 tx_reg; | |
2964 | ||
2965 | pf_q = vsi->base_queue; | |
2966 | for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { | |
2967 | j = 1000; | |
2968 | do { | |
2969 | usleep_range(1000, 2000); | |
2970 | tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); | |
2971 | } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) | |
2972 | ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1); | |
2973 | ||
2974 | if (enable) { | |
2975 | /* is STAT set ? */ | |
2976 | if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) { | |
2977 | dev_info(&pf->pdev->dev, | |
2978 | "Tx %d already enabled\n", i); | |
2979 | continue; | |
2980 | } | |
2981 | } else { | |
2982 | /* is !STAT set ? */ | |
2983 | if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) { | |
2984 | dev_info(&pf->pdev->dev, | |
2985 | "Tx %d already disabled\n", i); | |
2986 | continue; | |
2987 | } | |
2988 | } | |
2989 | ||
2990 | /* turn on/off the queue */ | |
2991 | if (enable) | |
2992 | tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK | | |
2993 | I40E_QTX_ENA_QENA_STAT_MASK; | |
2994 | else | |
2995 | tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; | |
2996 | ||
2997 | wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); | |
2998 | ||
2999 | /* wait for the change to finish */ | |
3000 | for (j = 0; j < 10; j++) { | |
3001 | tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); | |
3002 | if (enable) { | |
3003 | if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) | |
3004 | break; | |
3005 | } else { | |
3006 | if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) | |
3007 | break; | |
3008 | } | |
3009 | ||
3010 | udelay(10); | |
3011 | } | |
3012 | if (j >= 10) { | |
3013 | dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n", | |
3014 | pf_q, (enable ? "en" : "dis")); | |
3015 | return -ETIMEDOUT; | |
3016 | } | |
3017 | } | |
3018 | ||
3019 | return 0; | |
3020 | } | |
3021 | ||
3022 | /** | |
3023 | * i40e_vsi_control_rx - Start or stop a VSI's rings | |
3024 | * @vsi: the VSI being configured | |
3025 | * @enable: start or stop the rings | |
3026 | **/ | |
3027 | static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) | |
3028 | { | |
3029 | struct i40e_pf *pf = vsi->back; | |
3030 | struct i40e_hw *hw = &pf->hw; | |
3031 | int i, j, pf_q; | |
3032 | u32 rx_reg; | |
3033 | ||
3034 | pf_q = vsi->base_queue; | |
3035 | for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { | |
3036 | j = 1000; | |
3037 | do { | |
3038 | usleep_range(1000, 2000); | |
3039 | rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); | |
3040 | } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) | |
3041 | ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1); | |
3042 | ||
3043 | if (enable) { | |
3044 | /* is STAT set ? */ | |
3045 | if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) | |
3046 | continue; | |
3047 | } else { | |
3048 | /* is !STAT set ? */ | |
3049 | if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) | |
3050 | continue; | |
3051 | } | |
3052 | ||
3053 | /* turn on/off the queue */ | |
3054 | if (enable) | |
3055 | rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK | | |
3056 | I40E_QRX_ENA_QENA_STAT_MASK; | |
3057 | else | |
3058 | rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK | | |
3059 | I40E_QRX_ENA_QENA_STAT_MASK); | |
3060 | wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); | |
3061 | ||
3062 | /* wait for the change to finish */ | |
3063 | for (j = 0; j < 10; j++) { | |
3064 | rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); | |
3065 | ||
3066 | if (enable) { | |
3067 | if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) | |
3068 | break; | |
3069 | } else { | |
3070 | if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) | |
3071 | break; | |
3072 | } | |
3073 | ||
3074 | udelay(10); | |
3075 | } | |
3076 | if (j >= 10) { | |
3077 | dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n", | |
3078 | pf_q, (enable ? "en" : "dis")); | |
3079 | return -ETIMEDOUT; | |
3080 | } | |
3081 | } | |
3082 | ||
3083 | return 0; | |
3084 | } | |
3085 | ||
3086 | /** | |
3087 | * i40e_vsi_control_rings - Start or stop a VSI's rings | |
3088 | * @vsi: the VSI being configured | |
3089 | * @enable: start or stop the rings | |
3090 | **/ | |
3091 | static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) | |
3092 | { | |
3093 | int ret; | |
3094 | ||
3095 | /* do rx first for enable and last for disable */ | |
3096 | if (request) { | |
3097 | ret = i40e_vsi_control_rx(vsi, request); | |
3098 | if (ret) | |
3099 | return ret; | |
3100 | ret = i40e_vsi_control_tx(vsi, request); | |
3101 | } else { | |
3102 | ret = i40e_vsi_control_tx(vsi, request); | |
3103 | if (ret) | |
3104 | return ret; | |
3105 | ret = i40e_vsi_control_rx(vsi, request); | |
3106 | } | |
3107 | ||
3108 | return ret; | |
3109 | } | |
3110 | ||
3111 | /** | |
3112 | * i40e_vsi_free_irq - Free the irq association with the OS | |
3113 | * @vsi: the VSI being configured | |
3114 | **/ | |
3115 | static void i40e_vsi_free_irq(struct i40e_vsi *vsi) | |
3116 | { | |
3117 | struct i40e_pf *pf = vsi->back; | |
3118 | struct i40e_hw *hw = &pf->hw; | |
3119 | int base = vsi->base_vector; | |
3120 | u32 val, qp; | |
3121 | int i; | |
3122 | ||
3123 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | |
3124 | if (!vsi->q_vectors) | |
3125 | return; | |
3126 | ||
3127 | for (i = 0; i < vsi->num_q_vectors; i++) { | |
3128 | u16 vector = i + base; | |
3129 | ||
3130 | /* free only the irqs that were actually requested */ | |
493fb300 | 3131 | if (vsi->q_vectors[i]->num_ringpairs == 0) |
41c445ff JB |
3132 | continue; |
3133 | ||
3134 | /* clear the affinity_mask in the IRQ descriptor */ | |
3135 | irq_set_affinity_hint(pf->msix_entries[vector].vector, | |
3136 | NULL); | |
3137 | free_irq(pf->msix_entries[vector].vector, | |
493fb300 | 3138 | vsi->q_vectors[i]); |
41c445ff JB |
3139 | |
3140 | /* Tear down the interrupt queue link list | |
3141 | * | |
3142 | * We know that they come in pairs and always | |
3143 | * the Rx first, then the Tx. To clear the | |
3144 | * link list, stick the EOL value into the | |
3145 | * next_q field of the registers. | |
3146 | */ | |
3147 | val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); | |
3148 | qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) | |
3149 | >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; | |
3150 | val |= I40E_QUEUE_END_OF_LIST | |
3151 | << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; | |
3152 | wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); | |
3153 | ||
3154 | while (qp != I40E_QUEUE_END_OF_LIST) { | |
3155 | u32 next; | |
3156 | ||
3157 | val = rd32(hw, I40E_QINT_RQCTL(qp)); | |
3158 | ||
3159 | val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | | |
3160 | I40E_QINT_RQCTL_MSIX0_INDX_MASK | | |
3161 | I40E_QINT_RQCTL_CAUSE_ENA_MASK | | |
3162 | I40E_QINT_RQCTL_INTEVENT_MASK); | |
3163 | ||
3164 | val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | | |
3165 | I40E_QINT_RQCTL_NEXTQ_INDX_MASK); | |
3166 | ||
3167 | wr32(hw, I40E_QINT_RQCTL(qp), val); | |
3168 | ||
3169 | val = rd32(hw, I40E_QINT_TQCTL(qp)); | |
3170 | ||
3171 | next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) | |
3172 | >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; | |
3173 | ||
3174 | val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | | |
3175 | I40E_QINT_TQCTL_MSIX0_INDX_MASK | | |
3176 | I40E_QINT_TQCTL_CAUSE_ENA_MASK | | |
3177 | I40E_QINT_TQCTL_INTEVENT_MASK); | |
3178 | ||
3179 | val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | | |
3180 | I40E_QINT_TQCTL_NEXTQ_INDX_MASK); | |
3181 | ||
3182 | wr32(hw, I40E_QINT_TQCTL(qp), val); | |
3183 | qp = next; | |
3184 | } | |
3185 | } | |
3186 | } else { | |
3187 | free_irq(pf->pdev->irq, pf); | |
3188 | ||
3189 | val = rd32(hw, I40E_PFINT_LNKLST0); | |
3190 | qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) | |
3191 | >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; | |
3192 | val |= I40E_QUEUE_END_OF_LIST | |
3193 | << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; | |
3194 | wr32(hw, I40E_PFINT_LNKLST0, val); | |
3195 | ||
3196 | val = rd32(hw, I40E_QINT_RQCTL(qp)); | |
3197 | val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | | |
3198 | I40E_QINT_RQCTL_MSIX0_INDX_MASK | | |
3199 | I40E_QINT_RQCTL_CAUSE_ENA_MASK | | |
3200 | I40E_QINT_RQCTL_INTEVENT_MASK); | |
3201 | ||
3202 | val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | | |
3203 | I40E_QINT_RQCTL_NEXTQ_INDX_MASK); | |
3204 | ||
3205 | wr32(hw, I40E_QINT_RQCTL(qp), val); | |
3206 | ||
3207 | val = rd32(hw, I40E_QINT_TQCTL(qp)); | |
3208 | ||
3209 | val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | | |
3210 | I40E_QINT_TQCTL_MSIX0_INDX_MASK | | |
3211 | I40E_QINT_TQCTL_CAUSE_ENA_MASK | | |
3212 | I40E_QINT_TQCTL_INTEVENT_MASK); | |
3213 | ||
3214 | val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | | |
3215 | I40E_QINT_TQCTL_NEXTQ_INDX_MASK); | |
3216 | ||
3217 | wr32(hw, I40E_QINT_TQCTL(qp), val); | |
3218 | } | |
3219 | } | |
3220 | ||
493fb300 AD |
3221 | /** |
3222 | * i40e_free_q_vector - Free memory allocated for specific interrupt vector | |
3223 | * @vsi: the VSI being configured | |
3224 | * @v_idx: Index of vector to be freed | |
3225 | * | |
3226 | * This function frees the memory allocated to the q_vector. In addition if | |
3227 | * NAPI is enabled it will delete any references to the NAPI struct prior | |
3228 | * to freeing the q_vector. | |
3229 | **/ | |
3230 | static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) | |
3231 | { | |
3232 | struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; | |
cd0b6fa6 | 3233 | struct i40e_ring *ring; |
493fb300 AD |
3234 | |
3235 | if (!q_vector) | |
3236 | return; | |
3237 | ||
3238 | /* disassociate q_vector from rings */ | |
cd0b6fa6 AD |
3239 | i40e_for_each_ring(ring, q_vector->tx) |
3240 | ring->q_vector = NULL; | |
3241 | ||
3242 | i40e_for_each_ring(ring, q_vector->rx) | |
3243 | ring->q_vector = NULL; | |
493fb300 AD |
3244 | |
3245 | /* only VSI w/ an associated netdev is set up w/ NAPI */ | |
3246 | if (vsi->netdev) | |
3247 | netif_napi_del(&q_vector->napi); | |
3248 | ||
3249 | vsi->q_vectors[v_idx] = NULL; | |
3250 | ||
3251 | kfree_rcu(q_vector, rcu); | |
3252 | } | |
3253 | ||
41c445ff JB |
3254 | /** |
3255 | * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors | |
3256 | * @vsi: the VSI being un-configured | |
3257 | * | |
3258 | * This frees the memory allocated to the q_vectors and | |
3259 | * deletes references to the NAPI struct. | |
3260 | **/ | |
3261 | static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) | |
3262 | { | |
3263 | int v_idx; | |
3264 | ||
493fb300 AD |
3265 | for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) |
3266 | i40e_free_q_vector(vsi, v_idx); | |
41c445ff JB |
3267 | } |
3268 | ||
3269 | /** | |
3270 | * i40e_reset_interrupt_capability - Disable interrupt setup in OS | |
3271 | * @pf: board private structure | |
3272 | **/ | |
3273 | static void i40e_reset_interrupt_capability(struct i40e_pf *pf) | |
3274 | { | |
3275 | /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ | |
3276 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | |
3277 | pci_disable_msix(pf->pdev); | |
3278 | kfree(pf->msix_entries); | |
3279 | pf->msix_entries = NULL; | |
3280 | } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { | |
3281 | pci_disable_msi(pf->pdev); | |
3282 | } | |
3283 | pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); | |
3284 | } | |
3285 | ||
3286 | /** | |
3287 | * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings | |
3288 | * @pf: board private structure | |
3289 | * | |
3290 | * We go through and clear interrupt specific resources and reset the structure | |
3291 | * to pre-load conditions | |
3292 | **/ | |
3293 | static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) | |
3294 | { | |
3295 | int i; | |
3296 | ||
3297 | i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); | |
3298 | for (i = 0; i < pf->hw.func_caps.num_vsis; i++) | |
3299 | if (pf->vsi[i]) | |
3300 | i40e_vsi_free_q_vectors(pf->vsi[i]); | |
3301 | i40e_reset_interrupt_capability(pf); | |
3302 | } | |
3303 | ||
3304 | /** | |
3305 | * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI | |
3306 | * @vsi: the VSI being configured | |
3307 | **/ | |
3308 | static void i40e_napi_enable_all(struct i40e_vsi *vsi) | |
3309 | { | |
3310 | int q_idx; | |
3311 | ||
3312 | if (!vsi->netdev) | |
3313 | return; | |
3314 | ||
3315 | for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) | |
493fb300 | 3316 | napi_enable(&vsi->q_vectors[q_idx]->napi); |
41c445ff JB |
3317 | } |
3318 | ||
3319 | /** | |
3320 | * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI | |
3321 | * @vsi: the VSI being configured | |
3322 | **/ | |
3323 | static void i40e_napi_disable_all(struct i40e_vsi *vsi) | |
3324 | { | |
3325 | int q_idx; | |
3326 | ||
3327 | if (!vsi->netdev) | |
3328 | return; | |
3329 | ||
3330 | for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) | |
493fb300 | 3331 | napi_disable(&vsi->q_vectors[q_idx]->napi); |
41c445ff JB |
3332 | } |
3333 | ||
3334 | /** | |
3335 | * i40e_quiesce_vsi - Pause a given VSI | |
3336 | * @vsi: the VSI being paused | |
3337 | **/ | |
3338 | static void i40e_quiesce_vsi(struct i40e_vsi *vsi) | |
3339 | { | |
3340 | if (test_bit(__I40E_DOWN, &vsi->state)) | |
3341 | return; | |
3342 | ||
3343 | set_bit(__I40E_NEEDS_RESTART, &vsi->state); | |
3344 | if (vsi->netdev && netif_running(vsi->netdev)) { | |
3345 | vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); | |
3346 | } else { | |
3347 | set_bit(__I40E_DOWN, &vsi->state); | |
3348 | i40e_down(vsi); | |
3349 | } | |
3350 | } | |
3351 | ||
3352 | /** | |
3353 | * i40e_unquiesce_vsi - Resume a given VSI | |
3354 | * @vsi: the VSI being resumed | |
3355 | **/ | |
3356 | static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) | |
3357 | { | |
3358 | if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) | |
3359 | return; | |
3360 | ||
3361 | clear_bit(__I40E_NEEDS_RESTART, &vsi->state); | |
3362 | if (vsi->netdev && netif_running(vsi->netdev)) | |
3363 | vsi->netdev->netdev_ops->ndo_open(vsi->netdev); | |
3364 | else | |
3365 | i40e_up(vsi); /* this clears the DOWN bit */ | |
3366 | } | |
3367 | ||
3368 | /** | |
3369 | * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF | |
3370 | * @pf: the PF | |
3371 | **/ | |
3372 | static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) | |
3373 | { | |
3374 | int v; | |
3375 | ||
3376 | for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { | |
3377 | if (pf->vsi[v]) | |
3378 | i40e_quiesce_vsi(pf->vsi[v]); | |
3379 | } | |
3380 | } | |
3381 | ||
3382 | /** | |
3383 | * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF | |
3384 | * @pf: the PF | |
3385 | **/ | |
3386 | static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) | |
3387 | { | |
3388 | int v; | |
3389 | ||
3390 | for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { | |
3391 | if (pf->vsi[v]) | |
3392 | i40e_unquiesce_vsi(pf->vsi[v]); | |
3393 | } | |
3394 | } | |
3395 | ||
3396 | /** | |
3397 | * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config | |
3398 | * @dcbcfg: the corresponding DCBx configuration structure | |
3399 | * | |
3400 | * Return the number of TCs from given DCBx configuration | |
3401 | **/ | |
3402 | static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) | |
3403 | { | |
078b5876 JB |
3404 | u8 num_tc = 0; |
3405 | int i; | |
41c445ff JB |
3406 | |
3407 | /* Scan the ETS Config Priority Table to find | |
3408 | * traffic class enabled for a given priority | |
3409 | * and use the traffic class index to get the | |
3410 | * number of traffic classes enabled | |
3411 | */ | |
3412 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { | |
3413 | if (dcbcfg->etscfg.prioritytable[i] > num_tc) | |
3414 | num_tc = dcbcfg->etscfg.prioritytable[i]; | |
3415 | } | |
3416 | ||
3417 | /* Traffic class index starts from zero so | |
3418 | * increment to return the actual count | |
3419 | */ | |
078b5876 | 3420 | return num_tc + 1; |
41c445ff JB |
3421 | } |
3422 | ||
3423 | /** | |
3424 | * i40e_dcb_get_enabled_tc - Get enabled traffic classes | |
3425 | * @dcbcfg: the corresponding DCBx configuration structure | |
3426 | * | |
3427 | * Query the current DCB configuration and return the number of | |
3428 | * traffic classes enabled from the given DCBX config | |
3429 | **/ | |
3430 | static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) | |
3431 | { | |
3432 | u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); | |
3433 | u8 enabled_tc = 1; | |
3434 | u8 i; | |
3435 | ||
3436 | for (i = 0; i < num_tc; i++) | |
3437 | enabled_tc |= 1 << i; | |
3438 | ||
3439 | return enabled_tc; | |
3440 | } | |
3441 | ||
3442 | /** | |
3443 | * i40e_pf_get_num_tc - Get enabled traffic classes for PF | |
3444 | * @pf: PF being queried | |
3445 | * | |
3446 | * Return number of traffic classes enabled for the given PF | |
3447 | **/ | |
3448 | static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) | |
3449 | { | |
3450 | struct i40e_hw *hw = &pf->hw; | |
3451 | u8 i, enabled_tc; | |
3452 | u8 num_tc = 0; | |
3453 | struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; | |
3454 | ||
3455 | /* If DCB is not enabled then always in single TC */ | |
3456 | if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) | |
3457 | return 1; | |
3458 | ||
3459 | /* MFP mode return count of enabled TCs for this PF */ | |
3460 | if (pf->flags & I40E_FLAG_MFP_ENABLED) { | |
3461 | enabled_tc = pf->hw.func_caps.enabled_tcmap; | |
3462 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { | |
3463 | if (enabled_tc & (1 << i)) | |
3464 | num_tc++; | |
3465 | } | |
3466 | return num_tc; | |
3467 | } | |
3468 | ||
3469 | /* SFP mode will be enabled for all TCs on port */ | |
3470 | return i40e_dcb_get_num_tc(dcbcfg); | |
3471 | } | |
3472 | ||
3473 | /** | |
3474 | * i40e_pf_get_default_tc - Get bitmap for first enabled TC | |
3475 | * @pf: PF being queried | |
3476 | * | |
3477 | * Return a bitmap for first enabled traffic class for this PF. | |
3478 | **/ | |
3479 | static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) | |
3480 | { | |
3481 | u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; | |
3482 | u8 i = 0; | |
3483 | ||
3484 | if (!enabled_tc) | |
3485 | return 0x1; /* TC0 */ | |
3486 | ||
3487 | /* Find the first enabled TC */ | |
3488 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { | |
3489 | if (enabled_tc & (1 << i)) | |
3490 | break; | |
3491 | } | |
3492 | ||
3493 | return 1 << i; | |
3494 | } | |
3495 | ||
3496 | /** | |
3497 | * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes | |
3498 | * @pf: PF being queried | |
3499 | * | |
3500 | * Return a bitmap for enabled traffic classes for this PF. | |
3501 | **/ | |
3502 | static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) | |
3503 | { | |
3504 | /* If DCB is not enabled for this PF then just return default TC */ | |
3505 | if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) | |
3506 | return i40e_pf_get_default_tc(pf); | |
3507 | ||
3508 | /* MFP mode will have enabled TCs set by FW */ | |
3509 | if (pf->flags & I40E_FLAG_MFP_ENABLED) | |
3510 | return pf->hw.func_caps.enabled_tcmap; | |
3511 | ||
3512 | /* SFP mode we want PF to be enabled for all TCs */ | |
3513 | return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); | |
3514 | } | |
3515 | ||
3516 | /** | |
3517 | * i40e_vsi_get_bw_info - Query VSI BW Information | |
3518 | * @vsi: the VSI being queried | |
3519 | * | |
3520 | * Returns 0 on success, negative value on failure | |
3521 | **/ | |
3522 | static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) | |
3523 | { | |
3524 | struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; | |
3525 | struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; | |
3526 | struct i40e_pf *pf = vsi->back; | |
3527 | struct i40e_hw *hw = &pf->hw; | |
dcae29be | 3528 | i40e_status aq_ret; |
41c445ff | 3529 | u32 tc_bw_max; |
41c445ff JB |
3530 | int i; |
3531 | ||
3532 | /* Get the VSI level BW configuration */ | |
dcae29be JB |
3533 | aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); |
3534 | if (aq_ret) { | |
41c445ff JB |
3535 | dev_info(&pf->pdev->dev, |
3536 | "couldn't get pf vsi bw config, err %d, aq_err %d\n", | |
dcae29be JB |
3537 | aq_ret, pf->hw.aq.asq_last_status); |
3538 | return -EINVAL; | |
41c445ff JB |
3539 | } |
3540 | ||
3541 | /* Get the VSI level BW configuration per TC */ | |
dcae29be JB |
3542 | aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, |
3543 | NULL); | |
3544 | if (aq_ret) { | |
41c445ff JB |
3545 | dev_info(&pf->pdev->dev, |
3546 | "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", | |
dcae29be JB |
3547 | aq_ret, pf->hw.aq.asq_last_status); |
3548 | return -EINVAL; | |
41c445ff JB |
3549 | } |
3550 | ||
3551 | if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { | |
3552 | dev_info(&pf->pdev->dev, | |
3553 | "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", | |
3554 | bw_config.tc_valid_bits, | |
3555 | bw_ets_config.tc_valid_bits); | |
3556 | /* Still continuing */ | |
3557 | } | |
3558 | ||
3559 | vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); | |
3560 | vsi->bw_max_quanta = bw_config.max_bw; | |
3561 | tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | | |
3562 | (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); | |
3563 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { | |
3564 | vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; | |
3565 | vsi->bw_ets_limit_credits[i] = | |
3566 | le16_to_cpu(bw_ets_config.credits[i]); | |
3567 | /* 3 bits out of 4 for each TC */ | |
3568 | vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); | |
3569 | } | |
078b5876 | 3570 | |
dcae29be | 3571 | return 0; |
41c445ff JB |
3572 | } |
3573 | ||
3574 | /** | |
3575 | * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC | |
3576 | * @vsi: the VSI being configured | |
3577 | * @enabled_tc: TC bitmap | |
3578 | * @bw_credits: BW shared credits per TC | |
3579 | * | |
3580 | * Returns 0 on success, negative value on failure | |
3581 | **/ | |
dcae29be | 3582 | static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, |
41c445ff JB |
3583 | u8 *bw_share) |
3584 | { | |
3585 | struct i40e_aqc_configure_vsi_tc_bw_data bw_data; | |
dcae29be JB |
3586 | i40e_status aq_ret; |
3587 | int i; | |
41c445ff JB |
3588 | |
3589 | bw_data.tc_valid_bits = enabled_tc; | |
3590 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) | |
3591 | bw_data.tc_bw_credits[i] = bw_share[i]; | |
3592 | ||
dcae29be JB |
3593 | aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, |
3594 | NULL); | |
3595 | if (aq_ret) { | |
41c445ff JB |
3596 | dev_info(&vsi->back->pdev->dev, |
3597 | "%s: AQ command Config VSI BW allocation per TC failed = %d\n", | |
3598 | __func__, vsi->back->hw.aq.asq_last_status); | |
dcae29be | 3599 | return -EINVAL; |
41c445ff JB |
3600 | } |
3601 | ||
3602 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) | |
3603 | vsi->info.qs_handle[i] = bw_data.qs_handles[i]; | |
3604 | ||
dcae29be | 3605 | return 0; |
41c445ff JB |
3606 | } |
3607 | ||
3608 | /** | |
3609 | * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration | |
3610 | * @vsi: the VSI being configured | |
3611 | * @enabled_tc: TC map to be enabled | |
3612 | * | |
3613 | **/ | |
3614 | static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) | |
3615 | { | |
3616 | struct net_device *netdev = vsi->netdev; | |
3617 | struct i40e_pf *pf = vsi->back; | |
3618 | struct i40e_hw *hw = &pf->hw; | |
3619 | u8 netdev_tc = 0; | |
3620 | int i; | |
3621 | struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; | |
3622 | ||
3623 | if (!netdev) | |
3624 | return; | |
3625 | ||
3626 | if (!enabled_tc) { | |
3627 | netdev_reset_tc(netdev); | |
3628 | return; | |
3629 | } | |
3630 | ||
3631 | /* Set up actual enabled TCs on the VSI */ | |
3632 | if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) | |
3633 | return; | |
3634 | ||
3635 | /* set per TC queues for the VSI */ | |
3636 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { | |
3637 | /* Only set TC queues for enabled tcs | |
3638 | * | |
3639 | * e.g. For a VSI that has TC0 and TC3 enabled the | |
3640 | * enabled_tc bitmap would be 0x00001001; the driver | |
3641 | * will set the numtc for netdev as 2 that will be | |
3642 | * referenced by the netdev layer as TC 0 and 1. | |
3643 | */ | |
3644 | if (vsi->tc_config.enabled_tc & (1 << i)) | |
3645 | netdev_set_tc_queue(netdev, | |
3646 | vsi->tc_config.tc_info[i].netdev_tc, | |
3647 | vsi->tc_config.tc_info[i].qcount, | |
3648 | vsi->tc_config.tc_info[i].qoffset); | |
3649 | } | |
3650 | ||
3651 | /* Assign UP2TC map for the VSI */ | |
3652 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { | |
3653 | /* Get the actual TC# for the UP */ | |
3654 | u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; | |
3655 | /* Get the mapped netdev TC# for the UP */ | |
3656 | netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; | |
3657 | netdev_set_prio_tc_map(netdev, i, netdev_tc); | |
3658 | } | |
3659 | } | |
3660 | ||
3661 | /** | |
3662 | * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map | |
3663 | * @vsi: the VSI being configured | |
3664 | * @ctxt: the ctxt buffer returned from AQ VSI update param command | |
3665 | **/ | |
3666 | static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, | |
3667 | struct i40e_vsi_context *ctxt) | |
3668 | { | |
3669 | /* copy just the sections touched not the entire info | |
3670 | * since not all sections are valid as returned by | |
3671 | * update vsi params | |
3672 | */ | |
3673 | vsi->info.mapping_flags = ctxt->info.mapping_flags; | |
3674 | memcpy(&vsi->info.queue_mapping, | |
3675 | &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); | |
3676 | memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, | |
3677 | sizeof(vsi->info.tc_mapping)); | |
3678 | } | |
3679 | ||
3680 | /** | |
3681 | * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map | |
3682 | * @vsi: VSI to be configured | |
3683 | * @enabled_tc: TC bitmap | |
3684 | * | |
3685 | * This configures a particular VSI for TCs that are mapped to the | |
3686 | * given TC bitmap. It uses default bandwidth share for TCs across | |
3687 | * VSIs to configure TC for a particular VSI. | |
3688 | * | |
3689 | * NOTE: | |
3690 | * It is expected that the VSI queues have been quisced before calling | |
3691 | * this function. | |
3692 | **/ | |
3693 | static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) | |
3694 | { | |
3695 | u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; | |
3696 | struct i40e_vsi_context ctxt; | |
3697 | int ret = 0; | |
3698 | int i; | |
3699 | ||
3700 | /* Check if enabled_tc is same as existing or new TCs */ | |
3701 | if (vsi->tc_config.enabled_tc == enabled_tc) | |
3702 | return ret; | |
3703 | ||
3704 | /* Enable ETS TCs with equal BW Share for now across all VSIs */ | |
3705 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { | |
3706 | if (enabled_tc & (1 << i)) | |
3707 | bw_share[i] = 1; | |
3708 | } | |
3709 | ||
3710 | ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); | |
3711 | if (ret) { | |
3712 | dev_info(&vsi->back->pdev->dev, | |
3713 | "Failed configuring TC map %d for VSI %d\n", | |
3714 | enabled_tc, vsi->seid); | |
3715 | goto out; | |
3716 | } | |
3717 | ||
3718 | /* Update Queue Pairs Mapping for currently enabled UPs */ | |
3719 | ctxt.seid = vsi->seid; | |
3720 | ctxt.pf_num = vsi->back->hw.pf_id; | |
3721 | ctxt.vf_num = 0; | |
3722 | ctxt.uplink_seid = vsi->uplink_seid; | |
3723 | memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); | |
3724 | i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); | |
3725 | ||
3726 | /* Update the VSI after updating the VSI queue-mapping information */ | |
3727 | ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); | |
3728 | if (ret) { | |
3729 | dev_info(&vsi->back->pdev->dev, | |
3730 | "update vsi failed, aq_err=%d\n", | |
3731 | vsi->back->hw.aq.asq_last_status); | |
3732 | goto out; | |
3733 | } | |
3734 | /* update the local VSI info with updated queue map */ | |
3735 | i40e_vsi_update_queue_map(vsi, &ctxt); | |
3736 | vsi->info.valid_sections = 0; | |
3737 | ||
3738 | /* Update current VSI BW information */ | |
3739 | ret = i40e_vsi_get_bw_info(vsi); | |
3740 | if (ret) { | |
3741 | dev_info(&vsi->back->pdev->dev, | |
3742 | "Failed updating vsi bw info, aq_err=%d\n", | |
3743 | vsi->back->hw.aq.asq_last_status); | |
3744 | goto out; | |
3745 | } | |
3746 | ||
3747 | /* Update the netdev TC setup */ | |
3748 | i40e_vsi_config_netdev_tc(vsi, enabled_tc); | |
3749 | out: | |
3750 | return ret; | |
3751 | } | |
3752 | ||
3753 | /** | |
3754 | * i40e_up_complete - Finish the last steps of bringing up a connection | |
3755 | * @vsi: the VSI being configured | |
3756 | **/ | |
3757 | static int i40e_up_complete(struct i40e_vsi *vsi) | |
3758 | { | |
3759 | struct i40e_pf *pf = vsi->back; | |
3760 | int err; | |
3761 | ||
3762 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) | |
3763 | i40e_vsi_configure_msix(vsi); | |
3764 | else | |
3765 | i40e_configure_msi_and_legacy(vsi); | |
3766 | ||
3767 | /* start rings */ | |
3768 | err = i40e_vsi_control_rings(vsi, true); | |
3769 | if (err) | |
3770 | return err; | |
3771 | ||
3772 | clear_bit(__I40E_DOWN, &vsi->state); | |
3773 | i40e_napi_enable_all(vsi); | |
3774 | i40e_vsi_enable_irq(vsi); | |
3775 | ||
3776 | if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && | |
3777 | (vsi->netdev)) { | |
6d779b41 | 3778 | netdev_info(vsi->netdev, "NIC Link is Up\n"); |
41c445ff JB |
3779 | netif_tx_start_all_queues(vsi->netdev); |
3780 | netif_carrier_on(vsi->netdev); | |
6d779b41 AS |
3781 | } else if (vsi->netdev) { |
3782 | netdev_info(vsi->netdev, "NIC Link is Down\n"); | |
41c445ff JB |
3783 | } |
3784 | i40e_service_event_schedule(pf); | |
3785 | ||
3786 | return 0; | |
3787 | } | |
3788 | ||
3789 | /** | |
3790 | * i40e_vsi_reinit_locked - Reset the VSI | |
3791 | * @vsi: the VSI being configured | |
3792 | * | |
3793 | * Rebuild the ring structs after some configuration | |
3794 | * has changed, e.g. MTU size. | |
3795 | **/ | |
3796 | static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) | |
3797 | { | |
3798 | struct i40e_pf *pf = vsi->back; | |
3799 | ||
3800 | WARN_ON(in_interrupt()); | |
3801 | while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) | |
3802 | usleep_range(1000, 2000); | |
3803 | i40e_down(vsi); | |
3804 | ||
3805 | /* Give a VF some time to respond to the reset. The | |
3806 | * two second wait is based upon the watchdog cycle in | |
3807 | * the VF driver. | |
3808 | */ | |
3809 | if (vsi->type == I40E_VSI_SRIOV) | |
3810 | msleep(2000); | |
3811 | i40e_up(vsi); | |
3812 | clear_bit(__I40E_CONFIG_BUSY, &pf->state); | |
3813 | } | |
3814 | ||
3815 | /** | |
3816 | * i40e_up - Bring the connection back up after being down | |
3817 | * @vsi: the VSI being configured | |
3818 | **/ | |
3819 | int i40e_up(struct i40e_vsi *vsi) | |
3820 | { | |
3821 | int err; | |
3822 | ||
3823 | err = i40e_vsi_configure(vsi); | |
3824 | if (!err) | |
3825 | err = i40e_up_complete(vsi); | |
3826 | ||
3827 | return err; | |
3828 | } | |
3829 | ||
3830 | /** | |
3831 | * i40e_down - Shutdown the connection processing | |
3832 | * @vsi: the VSI being stopped | |
3833 | **/ | |
3834 | void i40e_down(struct i40e_vsi *vsi) | |
3835 | { | |
3836 | int i; | |
3837 | ||
3838 | /* It is assumed that the caller of this function | |
3839 | * sets the vsi->state __I40E_DOWN bit. | |
3840 | */ | |
3841 | if (vsi->netdev) { | |
3842 | netif_carrier_off(vsi->netdev); | |
3843 | netif_tx_disable(vsi->netdev); | |
3844 | } | |
3845 | i40e_vsi_disable_irq(vsi); | |
3846 | i40e_vsi_control_rings(vsi, false); | |
3847 | i40e_napi_disable_all(vsi); | |
3848 | ||
3849 | for (i = 0; i < vsi->num_queue_pairs; i++) { | |
9f65e15b AD |
3850 | i40e_clean_tx_ring(vsi->tx_rings[i]); |
3851 | i40e_clean_rx_ring(vsi->rx_rings[i]); | |
41c445ff JB |
3852 | } |
3853 | } | |
3854 | ||
3855 | /** | |
3856 | * i40e_setup_tc - configure multiple traffic classes | |
3857 | * @netdev: net device to configure | |
3858 | * @tc: number of traffic classes to enable | |
3859 | **/ | |
3860 | static int i40e_setup_tc(struct net_device *netdev, u8 tc) | |
3861 | { | |
3862 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
3863 | struct i40e_vsi *vsi = np->vsi; | |
3864 | struct i40e_pf *pf = vsi->back; | |
3865 | u8 enabled_tc = 0; | |
3866 | int ret = -EINVAL; | |
3867 | int i; | |
3868 | ||
3869 | /* Check if DCB enabled to continue */ | |
3870 | if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { | |
3871 | netdev_info(netdev, "DCB is not enabled for adapter\n"); | |
3872 | goto exit; | |
3873 | } | |
3874 | ||
3875 | /* Check if MFP enabled */ | |
3876 | if (pf->flags & I40E_FLAG_MFP_ENABLED) { | |
3877 | netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); | |
3878 | goto exit; | |
3879 | } | |
3880 | ||
3881 | /* Check whether tc count is within enabled limit */ | |
3882 | if (tc > i40e_pf_get_num_tc(pf)) { | |
3883 | netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); | |
3884 | goto exit; | |
3885 | } | |
3886 | ||
3887 | /* Generate TC map for number of tc requested */ | |
3888 | for (i = 0; i < tc; i++) | |
3889 | enabled_tc |= (1 << i); | |
3890 | ||
3891 | /* Requesting same TC configuration as already enabled */ | |
3892 | if (enabled_tc == vsi->tc_config.enabled_tc) | |
3893 | return 0; | |
3894 | ||
3895 | /* Quiesce VSI queues */ | |
3896 | i40e_quiesce_vsi(vsi); | |
3897 | ||
3898 | /* Configure VSI for enabled TCs */ | |
3899 | ret = i40e_vsi_config_tc(vsi, enabled_tc); | |
3900 | if (ret) { | |
3901 | netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", | |
3902 | vsi->seid); | |
3903 | goto exit; | |
3904 | } | |
3905 | ||
3906 | /* Unquiesce VSI */ | |
3907 | i40e_unquiesce_vsi(vsi); | |
3908 | ||
3909 | exit: | |
3910 | return ret; | |
3911 | } | |
3912 | ||
3913 | /** | |
3914 | * i40e_open - Called when a network interface is made active | |
3915 | * @netdev: network interface device structure | |
3916 | * | |
3917 | * The open entry point is called when a network interface is made | |
3918 | * active by the system (IFF_UP). At this point all resources needed | |
3919 | * for transmit and receive operations are allocated, the interrupt | |
3920 | * handler is registered with the OS, the netdev watchdog subtask is | |
3921 | * enabled, and the stack is notified that the interface is ready. | |
3922 | * | |
3923 | * Returns 0 on success, negative value on failure | |
3924 | **/ | |
3925 | static int i40e_open(struct net_device *netdev) | |
3926 | { | |
3927 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
3928 | struct i40e_vsi *vsi = np->vsi; | |
3929 | struct i40e_pf *pf = vsi->back; | |
3930 | char int_name[IFNAMSIZ]; | |
3931 | int err; | |
3932 | ||
3933 | /* disallow open during test */ | |
3934 | if (test_bit(__I40E_TESTING, &pf->state)) | |
3935 | return -EBUSY; | |
3936 | ||
3937 | netif_carrier_off(netdev); | |
3938 | ||
3939 | /* allocate descriptors */ | |
3940 | err = i40e_vsi_setup_tx_resources(vsi); | |
3941 | if (err) | |
3942 | goto err_setup_tx; | |
3943 | err = i40e_vsi_setup_rx_resources(vsi); | |
3944 | if (err) | |
3945 | goto err_setup_rx; | |
3946 | ||
3947 | err = i40e_vsi_configure(vsi); | |
3948 | if (err) | |
3949 | goto err_setup_rx; | |
3950 | ||
3951 | snprintf(int_name, sizeof(int_name) - 1, "%s-%s", | |
3952 | dev_driver_string(&pf->pdev->dev), netdev->name); | |
3953 | err = i40e_vsi_request_irq(vsi, int_name); | |
3954 | if (err) | |
3955 | goto err_setup_rx; | |
3956 | ||
3957 | err = i40e_up_complete(vsi); | |
3958 | if (err) | |
3959 | goto err_up_complete; | |
3960 | ||
3961 | if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) { | |
3962 | err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL); | |
3963 | if (err) | |
3964 | netdev_info(netdev, | |
3965 | "couldn't set broadcast err %d aq_err %d\n", | |
3966 | err, pf->hw.aq.asq_last_status); | |
3967 | } | |
3968 | ||
3969 | return 0; | |
3970 | ||
3971 | err_up_complete: | |
3972 | i40e_down(vsi); | |
3973 | i40e_vsi_free_irq(vsi); | |
3974 | err_setup_rx: | |
3975 | i40e_vsi_free_rx_resources(vsi); | |
3976 | err_setup_tx: | |
3977 | i40e_vsi_free_tx_resources(vsi); | |
3978 | if (vsi == pf->vsi[pf->lan_vsi]) | |
3979 | i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); | |
3980 | ||
3981 | return err; | |
3982 | } | |
3983 | ||
3984 | /** | |
3985 | * i40e_close - Disables a network interface | |
3986 | * @netdev: network interface device structure | |
3987 | * | |
3988 | * The close entry point is called when an interface is de-activated | |
3989 | * by the OS. The hardware is still under the driver's control, but | |
3990 | * this netdev interface is disabled. | |
3991 | * | |
3992 | * Returns 0, this is not allowed to fail | |
3993 | **/ | |
3994 | static int i40e_close(struct net_device *netdev) | |
3995 | { | |
3996 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
3997 | struct i40e_vsi *vsi = np->vsi; | |
3998 | ||
3999 | if (test_and_set_bit(__I40E_DOWN, &vsi->state)) | |
4000 | return 0; | |
4001 | ||
4002 | i40e_down(vsi); | |
4003 | i40e_vsi_free_irq(vsi); | |
4004 | ||
4005 | i40e_vsi_free_tx_resources(vsi); | |
4006 | i40e_vsi_free_rx_resources(vsi); | |
4007 | ||
4008 | return 0; | |
4009 | } | |
4010 | ||
4011 | /** | |
4012 | * i40e_do_reset - Start a PF or Core Reset sequence | |
4013 | * @pf: board private structure | |
4014 | * @reset_flags: which reset is requested | |
4015 | * | |
4016 | * The essential difference in resets is that the PF Reset | |
4017 | * doesn't clear the packet buffers, doesn't reset the PE | |
4018 | * firmware, and doesn't bother the other PFs on the chip. | |
4019 | **/ | |
4020 | void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) | |
4021 | { | |
4022 | u32 val; | |
4023 | ||
4024 | WARN_ON(in_interrupt()); | |
4025 | ||
4026 | /* do the biggest reset indicated */ | |
4027 | if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { | |
4028 | ||
4029 | /* Request a Global Reset | |
4030 | * | |
4031 | * This will start the chip's countdown to the actual full | |
4032 | * chip reset event, and a warning interrupt to be sent | |
4033 | * to all PFs, including the requestor. Our handler | |
4034 | * for the warning interrupt will deal with the shutdown | |
4035 | * and recovery of the switch setup. | |
4036 | */ | |
4037 | dev_info(&pf->pdev->dev, "GlobalR requested\n"); | |
4038 | val = rd32(&pf->hw, I40E_GLGEN_RTRIG); | |
4039 | val |= I40E_GLGEN_RTRIG_GLOBR_MASK; | |
4040 | wr32(&pf->hw, I40E_GLGEN_RTRIG, val); | |
4041 | ||
4042 | } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) { | |
4043 | ||
4044 | /* Request a Core Reset | |
4045 | * | |
4046 | * Same as Global Reset, except does *not* include the MAC/PHY | |
4047 | */ | |
4048 | dev_info(&pf->pdev->dev, "CoreR requested\n"); | |
4049 | val = rd32(&pf->hw, I40E_GLGEN_RTRIG); | |
4050 | val |= I40E_GLGEN_RTRIG_CORER_MASK; | |
4051 | wr32(&pf->hw, I40E_GLGEN_RTRIG, val); | |
4052 | i40e_flush(&pf->hw); | |
4053 | ||
4054 | } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { | |
4055 | ||
4056 | /* Request a PF Reset | |
4057 | * | |
4058 | * Resets only the PF-specific registers | |
4059 | * | |
4060 | * This goes directly to the tear-down and rebuild of | |
4061 | * the switch, since we need to do all the recovery as | |
4062 | * for the Core Reset. | |
4063 | */ | |
4064 | dev_info(&pf->pdev->dev, "PFR requested\n"); | |
4065 | i40e_handle_reset_warning(pf); | |
4066 | ||
4067 | } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { | |
4068 | int v; | |
4069 | ||
4070 | /* Find the VSI(s) that requested a re-init */ | |
4071 | dev_info(&pf->pdev->dev, | |
4072 | "VSI reinit requested\n"); | |
4073 | for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { | |
4074 | struct i40e_vsi *vsi = pf->vsi[v]; | |
4075 | if (vsi != NULL && | |
4076 | test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { | |
4077 | i40e_vsi_reinit_locked(pf->vsi[v]); | |
4078 | clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); | |
4079 | } | |
4080 | } | |
4081 | ||
4082 | /* no further action needed, so return now */ | |
4083 | return; | |
4084 | } else { | |
4085 | dev_info(&pf->pdev->dev, | |
4086 | "bad reset request 0x%08x\n", reset_flags); | |
4087 | return; | |
4088 | } | |
4089 | } | |
4090 | ||
4091 | /** | |
4092 | * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event | |
4093 | * @pf: board private structure | |
4094 | * @e: event info posted on ARQ | |
4095 | * | |
4096 | * Handler for LAN Queue Overflow Event generated by the firmware for PF | |
4097 | * and VF queues | |
4098 | **/ | |
4099 | static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, | |
4100 | struct i40e_arq_event_info *e) | |
4101 | { | |
4102 | struct i40e_aqc_lan_overflow *data = | |
4103 | (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; | |
4104 | u32 queue = le32_to_cpu(data->prtdcb_rupto); | |
4105 | u32 qtx_ctl = le32_to_cpu(data->otx_ctl); | |
4106 | struct i40e_hw *hw = &pf->hw; | |
4107 | struct i40e_vf *vf; | |
4108 | u16 vf_id; | |
4109 | ||
4110 | dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n", | |
4111 | __func__, queue, qtx_ctl); | |
4112 | ||
4113 | /* Queue belongs to VF, find the VF and issue VF reset */ | |
4114 | if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) | |
4115 | >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { | |
4116 | vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) | |
4117 | >> I40E_QTX_CTL_VFVM_INDX_SHIFT); | |
4118 | vf_id -= hw->func_caps.vf_base_id; | |
4119 | vf = &pf->vf[vf_id]; | |
4120 | i40e_vc_notify_vf_reset(vf); | |
4121 | /* Allow VF to process pending reset notification */ | |
4122 | msleep(20); | |
4123 | i40e_reset_vf(vf, false); | |
4124 | } | |
4125 | } | |
4126 | ||
4127 | /** | |
4128 | * i40e_service_event_complete - Finish up the service event | |
4129 | * @pf: board private structure | |
4130 | **/ | |
4131 | static void i40e_service_event_complete(struct i40e_pf *pf) | |
4132 | { | |
4133 | BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); | |
4134 | ||
4135 | /* flush memory to make sure state is correct before next watchog */ | |
4136 | smp_mb__before_clear_bit(); | |
4137 | clear_bit(__I40E_SERVICE_SCHED, &pf->state); | |
4138 | } | |
4139 | ||
4140 | /** | |
4141 | * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table | |
4142 | * @pf: board private structure | |
4143 | **/ | |
4144 | static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) | |
4145 | { | |
4146 | if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT)) | |
4147 | return; | |
4148 | ||
4149 | pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; | |
4150 | ||
4151 | /* if interface is down do nothing */ | |
4152 | if (test_bit(__I40E_DOWN, &pf->state)) | |
4153 | return; | |
4154 | } | |
4155 | ||
4156 | /** | |
4157 | * i40e_vsi_link_event - notify VSI of a link event | |
4158 | * @vsi: vsi to be notified | |
4159 | * @link_up: link up or down | |
4160 | **/ | |
4161 | static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) | |
4162 | { | |
4163 | if (!vsi) | |
4164 | return; | |
4165 | ||
4166 | switch (vsi->type) { | |
4167 | case I40E_VSI_MAIN: | |
4168 | if (!vsi->netdev || !vsi->netdev_registered) | |
4169 | break; | |
4170 | ||
4171 | if (link_up) { | |
4172 | netif_carrier_on(vsi->netdev); | |
4173 | netif_tx_wake_all_queues(vsi->netdev); | |
4174 | } else { | |
4175 | netif_carrier_off(vsi->netdev); | |
4176 | netif_tx_stop_all_queues(vsi->netdev); | |
4177 | } | |
4178 | break; | |
4179 | ||
4180 | case I40E_VSI_SRIOV: | |
4181 | break; | |
4182 | ||
4183 | case I40E_VSI_VMDQ2: | |
4184 | case I40E_VSI_CTRL: | |
4185 | case I40E_VSI_MIRROR: | |
4186 | default: | |
4187 | /* there is no notification for other VSIs */ | |
4188 | break; | |
4189 | } | |
4190 | } | |
4191 | ||
4192 | /** | |
4193 | * i40e_veb_link_event - notify elements on the veb of a link event | |
4194 | * @veb: veb to be notified | |
4195 | * @link_up: link up or down | |
4196 | **/ | |
4197 | static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) | |
4198 | { | |
4199 | struct i40e_pf *pf; | |
4200 | int i; | |
4201 | ||
4202 | if (!veb || !veb->pf) | |
4203 | return; | |
4204 | pf = veb->pf; | |
4205 | ||
4206 | /* depth first... */ | |
4207 | for (i = 0; i < I40E_MAX_VEB; i++) | |
4208 | if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) | |
4209 | i40e_veb_link_event(pf->veb[i], link_up); | |
4210 | ||
4211 | /* ... now the local VSIs */ | |
4212 | for (i = 0; i < pf->hw.func_caps.num_vsis; i++) | |
4213 | if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) | |
4214 | i40e_vsi_link_event(pf->vsi[i], link_up); | |
4215 | } | |
4216 | ||
4217 | /** | |
4218 | * i40e_link_event - Update netif_carrier status | |
4219 | * @pf: board private structure | |
4220 | **/ | |
4221 | static void i40e_link_event(struct i40e_pf *pf) | |
4222 | { | |
4223 | bool new_link, old_link; | |
4224 | ||
4225 | new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP); | |
4226 | old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); | |
4227 | ||
4228 | if (new_link == old_link) | |
4229 | return; | |
4230 | ||
6d779b41 AS |
4231 | if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) |
4232 | netdev_info(pf->vsi[pf->lan_vsi]->netdev, | |
4233 | "NIC Link is %s\n", (new_link ? "Up" : "Down")); | |
41c445ff JB |
4234 | |
4235 | /* Notify the base of the switch tree connected to | |
4236 | * the link. Floating VEBs are not notified. | |
4237 | */ | |
4238 | if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) | |
4239 | i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); | |
4240 | else | |
4241 | i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link); | |
4242 | ||
4243 | if (pf->vf) | |
4244 | i40e_vc_notify_link_state(pf); | |
4245 | } | |
4246 | ||
4247 | /** | |
4248 | * i40e_check_hang_subtask - Check for hung queues and dropped interrupts | |
4249 | * @pf: board private structure | |
4250 | * | |
4251 | * Set the per-queue flags to request a check for stuck queues in the irq | |
4252 | * clean functions, then force interrupts to be sure the irq clean is called. | |
4253 | **/ | |
4254 | static void i40e_check_hang_subtask(struct i40e_pf *pf) | |
4255 | { | |
4256 | int i, v; | |
4257 | ||
4258 | /* If we're down or resetting, just bail */ | |
4259 | if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) | |
4260 | return; | |
4261 | ||
4262 | /* for each VSI/netdev | |
4263 | * for each Tx queue | |
4264 | * set the check flag | |
4265 | * for each q_vector | |
4266 | * force an interrupt | |
4267 | */ | |
4268 | for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { | |
4269 | struct i40e_vsi *vsi = pf->vsi[v]; | |
4270 | int armed = 0; | |
4271 | ||
4272 | if (!pf->vsi[v] || | |
4273 | test_bit(__I40E_DOWN, &vsi->state) || | |
4274 | (vsi->netdev && !netif_carrier_ok(vsi->netdev))) | |
4275 | continue; | |
4276 | ||
4277 | for (i = 0; i < vsi->num_queue_pairs; i++) { | |
9f65e15b | 4278 | set_check_for_tx_hang(vsi->tx_rings[i]); |
41c445ff | 4279 | if (test_bit(__I40E_HANG_CHECK_ARMED, |
9f65e15b | 4280 | &vsi->tx_rings[i]->state)) |
41c445ff JB |
4281 | armed++; |
4282 | } | |
4283 | ||
4284 | if (armed) { | |
4285 | if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { | |
4286 | wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, | |
4287 | (I40E_PFINT_DYN_CTL0_INTENA_MASK | | |
4288 | I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); | |
4289 | } else { | |
4290 | u16 vec = vsi->base_vector - 1; | |
4291 | u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | | |
4292 | I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); | |
4293 | for (i = 0; i < vsi->num_q_vectors; i++, vec++) | |
4294 | wr32(&vsi->back->hw, | |
4295 | I40E_PFINT_DYN_CTLN(vec), val); | |
4296 | } | |
4297 | i40e_flush(&vsi->back->hw); | |
4298 | } | |
4299 | } | |
4300 | } | |
4301 | ||
4302 | /** | |
4303 | * i40e_watchdog_subtask - Check and bring link up | |
4304 | * @pf: board private structure | |
4305 | **/ | |
4306 | static void i40e_watchdog_subtask(struct i40e_pf *pf) | |
4307 | { | |
4308 | int i; | |
4309 | ||
4310 | /* if interface is down do nothing */ | |
4311 | if (test_bit(__I40E_DOWN, &pf->state) || | |
4312 | test_bit(__I40E_CONFIG_BUSY, &pf->state)) | |
4313 | return; | |
4314 | ||
4315 | /* Update the stats for active netdevs so the network stack | |
4316 | * can look at updated numbers whenever it cares to | |
4317 | */ | |
4318 | for (i = 0; i < pf->hw.func_caps.num_vsis; i++) | |
4319 | if (pf->vsi[i] && pf->vsi[i]->netdev) | |
4320 | i40e_update_stats(pf->vsi[i]); | |
4321 | ||
4322 | /* Update the stats for the active switching components */ | |
4323 | for (i = 0; i < I40E_MAX_VEB; i++) | |
4324 | if (pf->veb[i]) | |
4325 | i40e_update_veb_stats(pf->veb[i]); | |
4326 | } | |
4327 | ||
4328 | /** | |
4329 | * i40e_reset_subtask - Set up for resetting the device and driver | |
4330 | * @pf: board private structure | |
4331 | **/ | |
4332 | static void i40e_reset_subtask(struct i40e_pf *pf) | |
4333 | { | |
4334 | u32 reset_flags = 0; | |
4335 | ||
4336 | if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { | |
4337 | reset_flags |= (1 << __I40E_REINIT_REQUESTED); | |
4338 | clear_bit(__I40E_REINIT_REQUESTED, &pf->state); | |
4339 | } | |
4340 | if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { | |
4341 | reset_flags |= (1 << __I40E_PF_RESET_REQUESTED); | |
4342 | clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); | |
4343 | } | |
4344 | if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { | |
4345 | reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED); | |
4346 | clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); | |
4347 | } | |
4348 | if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { | |
4349 | reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED); | |
4350 | clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); | |
4351 | } | |
4352 | ||
4353 | /* If there's a recovery already waiting, it takes | |
4354 | * precedence before starting a new reset sequence. | |
4355 | */ | |
4356 | if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { | |
4357 | i40e_handle_reset_warning(pf); | |
4358 | return; | |
4359 | } | |
4360 | ||
4361 | /* If we're already down or resetting, just bail */ | |
4362 | if (reset_flags && | |
4363 | !test_bit(__I40E_DOWN, &pf->state) && | |
4364 | !test_bit(__I40E_CONFIG_BUSY, &pf->state)) | |
4365 | i40e_do_reset(pf, reset_flags); | |
4366 | } | |
4367 | ||
4368 | /** | |
4369 | * i40e_handle_link_event - Handle link event | |
4370 | * @pf: board private structure | |
4371 | * @e: event info posted on ARQ | |
4372 | **/ | |
4373 | static void i40e_handle_link_event(struct i40e_pf *pf, | |
4374 | struct i40e_arq_event_info *e) | |
4375 | { | |
4376 | struct i40e_hw *hw = &pf->hw; | |
4377 | struct i40e_aqc_get_link_status *status = | |
4378 | (struct i40e_aqc_get_link_status *)&e->desc.params.raw; | |
4379 | struct i40e_link_status *hw_link_info = &hw->phy.link_info; | |
4380 | ||
4381 | /* save off old link status information */ | |
4382 | memcpy(&pf->hw.phy.link_info_old, hw_link_info, | |
4383 | sizeof(pf->hw.phy.link_info_old)); | |
4384 | ||
4385 | /* update link status */ | |
4386 | hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type; | |
4387 | hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed; | |
4388 | hw_link_info->link_info = status->link_info; | |
4389 | hw_link_info->an_info = status->an_info; | |
4390 | hw_link_info->ext_info = status->ext_info; | |
4391 | hw_link_info->lse_enable = | |
4392 | le16_to_cpu(status->command_flags) & | |
4393 | I40E_AQ_LSE_ENABLE; | |
4394 | ||
4395 | /* process the event */ | |
4396 | i40e_link_event(pf); | |
4397 | ||
4398 | /* Do a new status request to re-enable LSE reporting | |
4399 | * and load new status information into the hw struct, | |
4400 | * then see if the status changed while processing the | |
4401 | * initial event. | |
4402 | */ | |
4403 | i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); | |
4404 | i40e_link_event(pf); | |
4405 | } | |
4406 | ||
4407 | /** | |
4408 | * i40e_clean_adminq_subtask - Clean the AdminQ rings | |
4409 | * @pf: board private structure | |
4410 | **/ | |
4411 | static void i40e_clean_adminq_subtask(struct i40e_pf *pf) | |
4412 | { | |
4413 | struct i40e_arq_event_info event; | |
4414 | struct i40e_hw *hw = &pf->hw; | |
4415 | u16 pending, i = 0; | |
4416 | i40e_status ret; | |
4417 | u16 opcode; | |
4418 | u32 val; | |
4419 | ||
4420 | if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)) | |
4421 | return; | |
4422 | ||
4423 | event.msg_size = I40E_MAX_AQ_BUF_SIZE; | |
4424 | event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); | |
4425 | if (!event.msg_buf) | |
4426 | return; | |
4427 | ||
4428 | do { | |
4429 | ret = i40e_clean_arq_element(hw, &event, &pending); | |
4430 | if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) { | |
4431 | dev_info(&pf->pdev->dev, "No ARQ event found\n"); | |
4432 | break; | |
4433 | } else if (ret) { | |
4434 | dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); | |
4435 | break; | |
4436 | } | |
4437 | ||
4438 | opcode = le16_to_cpu(event.desc.opcode); | |
4439 | switch (opcode) { | |
4440 | ||
4441 | case i40e_aqc_opc_get_link_status: | |
4442 | i40e_handle_link_event(pf, &event); | |
4443 | break; | |
4444 | case i40e_aqc_opc_send_msg_to_pf: | |
4445 | ret = i40e_vc_process_vf_msg(pf, | |
4446 | le16_to_cpu(event.desc.retval), | |
4447 | le32_to_cpu(event.desc.cookie_high), | |
4448 | le32_to_cpu(event.desc.cookie_low), | |
4449 | event.msg_buf, | |
4450 | event.msg_size); | |
4451 | break; | |
4452 | case i40e_aqc_opc_lldp_update_mib: | |
4453 | dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); | |
4454 | break; | |
4455 | case i40e_aqc_opc_event_lan_overflow: | |
4456 | dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); | |
4457 | i40e_handle_lan_overflow_event(pf, &event); | |
4458 | break; | |
4459 | default: | |
4460 | dev_info(&pf->pdev->dev, | |
4461 | "ARQ Error: Unknown event %d received\n", | |
4462 | event.desc.opcode); | |
4463 | break; | |
4464 | } | |
4465 | } while (pending && (i++ < pf->adminq_work_limit)); | |
4466 | ||
4467 | clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); | |
4468 | /* re-enable Admin queue interrupt cause */ | |
4469 | val = rd32(hw, I40E_PFINT_ICR0_ENA); | |
4470 | val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; | |
4471 | wr32(hw, I40E_PFINT_ICR0_ENA, val); | |
4472 | i40e_flush(hw); | |
4473 | ||
4474 | kfree(event.msg_buf); | |
4475 | } | |
4476 | ||
4477 | /** | |
4478 | * i40e_reconstitute_veb - rebuild the VEB and anything connected to it | |
4479 | * @veb: pointer to the VEB instance | |
4480 | * | |
4481 | * This is a recursive function that first builds the attached VSIs then | |
4482 | * recurses in to build the next layer of VEB. We track the connections | |
4483 | * through our own index numbers because the seid's from the HW could | |
4484 | * change across the reset. | |
4485 | **/ | |
4486 | static int i40e_reconstitute_veb(struct i40e_veb *veb) | |
4487 | { | |
4488 | struct i40e_vsi *ctl_vsi = NULL; | |
4489 | struct i40e_pf *pf = veb->pf; | |
4490 | int v, veb_idx; | |
4491 | int ret; | |
4492 | ||
4493 | /* build VSI that owns this VEB, temporarily attached to base VEB */ | |
4494 | for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) { | |
4495 | if (pf->vsi[v] && | |
4496 | pf->vsi[v]->veb_idx == veb->idx && | |
4497 | pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { | |
4498 | ctl_vsi = pf->vsi[v]; | |
4499 | break; | |
4500 | } | |
4501 | } | |
4502 | if (!ctl_vsi) { | |
4503 | dev_info(&pf->pdev->dev, | |
4504 | "missing owner VSI for veb_idx %d\n", veb->idx); | |
4505 | ret = -ENOENT; | |
4506 | goto end_reconstitute; | |
4507 | } | |
4508 | if (ctl_vsi != pf->vsi[pf->lan_vsi]) | |
4509 | ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; | |
4510 | ret = i40e_add_vsi(ctl_vsi); | |
4511 | if (ret) { | |
4512 | dev_info(&pf->pdev->dev, | |
4513 | "rebuild of owner VSI failed: %d\n", ret); | |
4514 | goto end_reconstitute; | |
4515 | } | |
4516 | i40e_vsi_reset_stats(ctl_vsi); | |
4517 | ||
4518 | /* create the VEB in the switch and move the VSI onto the VEB */ | |
4519 | ret = i40e_add_veb(veb, ctl_vsi); | |
4520 | if (ret) | |
4521 | goto end_reconstitute; | |
4522 | ||
4523 | /* create the remaining VSIs attached to this VEB */ | |
4524 | for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { | |
4525 | if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) | |
4526 | continue; | |
4527 | ||
4528 | if (pf->vsi[v]->veb_idx == veb->idx) { | |
4529 | struct i40e_vsi *vsi = pf->vsi[v]; | |
4530 | vsi->uplink_seid = veb->seid; | |
4531 | ret = i40e_add_vsi(vsi); | |
4532 | if (ret) { | |
4533 | dev_info(&pf->pdev->dev, | |
4534 | "rebuild of vsi_idx %d failed: %d\n", | |
4535 | v, ret); | |
4536 | goto end_reconstitute; | |
4537 | } | |
4538 | i40e_vsi_reset_stats(vsi); | |
4539 | } | |
4540 | } | |
4541 | ||
4542 | /* create any VEBs attached to this VEB - RECURSION */ | |
4543 | for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { | |
4544 | if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { | |
4545 | pf->veb[veb_idx]->uplink_seid = veb->seid; | |
4546 | ret = i40e_reconstitute_veb(pf->veb[veb_idx]); | |
4547 | if (ret) | |
4548 | break; | |
4549 | } | |
4550 | } | |
4551 | ||
4552 | end_reconstitute: | |
4553 | return ret; | |
4554 | } | |
4555 | ||
4556 | /** | |
4557 | * i40e_get_capabilities - get info about the HW | |
4558 | * @pf: the PF struct | |
4559 | **/ | |
4560 | static int i40e_get_capabilities(struct i40e_pf *pf) | |
4561 | { | |
4562 | struct i40e_aqc_list_capabilities_element_resp *cap_buf; | |
4563 | u16 data_size; | |
4564 | int buf_len; | |
4565 | int err; | |
4566 | ||
4567 | buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); | |
4568 | do { | |
4569 | cap_buf = kzalloc(buf_len, GFP_KERNEL); | |
4570 | if (!cap_buf) | |
4571 | return -ENOMEM; | |
4572 | ||
4573 | /* this loads the data into the hw struct for us */ | |
4574 | err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, | |
4575 | &data_size, | |
4576 | i40e_aqc_opc_list_func_capabilities, | |
4577 | NULL); | |
4578 | /* data loaded, buffer no longer needed */ | |
4579 | kfree(cap_buf); | |
4580 | ||
4581 | if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { | |
4582 | /* retry with a larger buffer */ | |
4583 | buf_len = data_size; | |
4584 | } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { | |
4585 | dev_info(&pf->pdev->dev, | |
4586 | "capability discovery failed: aq=%d\n", | |
4587 | pf->hw.aq.asq_last_status); | |
4588 | return -ENODEV; | |
4589 | } | |
4590 | } while (err); | |
4591 | ||
4592 | if (pf->hw.debug_mask & I40E_DEBUG_USER) | |
4593 | dev_info(&pf->pdev->dev, | |
4594 | "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", | |
4595 | pf->hw.pf_id, pf->hw.func_caps.num_vfs, | |
4596 | pf->hw.func_caps.num_msix_vectors, | |
4597 | pf->hw.func_caps.num_msix_vectors_vf, | |
4598 | pf->hw.func_caps.fd_filters_guaranteed, | |
4599 | pf->hw.func_caps.fd_filters_best_effort, | |
4600 | pf->hw.func_caps.num_tx_qp, | |
4601 | pf->hw.func_caps.num_vsis); | |
4602 | ||
4603 | return 0; | |
4604 | } | |
4605 | ||
4606 | /** | |
4607 | * i40e_fdir_setup - initialize the Flow Director resources | |
4608 | * @pf: board private structure | |
4609 | **/ | |
4610 | static void i40e_fdir_setup(struct i40e_pf *pf) | |
4611 | { | |
4612 | struct i40e_vsi *vsi; | |
4613 | bool new_vsi = false; | |
4614 | int err, i; | |
4615 | ||
958a3e3b SN |
4616 | if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED | |
4617 | I40E_FLAG_FDIR_ATR_ENABLED))) | |
41c445ff JB |
4618 | return; |
4619 | ||
4620 | pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; | |
4621 | ||
4622 | /* find existing or make new FDIR VSI */ | |
4623 | vsi = NULL; | |
4624 | for (i = 0; i < pf->hw.func_caps.num_vsis; i++) | |
4625 | if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) | |
4626 | vsi = pf->vsi[i]; | |
4627 | if (!vsi) { | |
4628 | vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0); | |
4629 | if (!vsi) { | |
4630 | dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); | |
4631 | pf->flags &= ~I40E_FLAG_FDIR_ENABLED; | |
4632 | return; | |
4633 | } | |
4634 | new_vsi = true; | |
4635 | } | |
4636 | WARN_ON(vsi->base_queue != I40E_FDIR_RING); | |
4637 | i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings); | |
4638 | ||
4639 | err = i40e_vsi_setup_tx_resources(vsi); | |
4640 | if (!err) | |
4641 | err = i40e_vsi_setup_rx_resources(vsi); | |
4642 | if (!err) | |
4643 | err = i40e_vsi_configure(vsi); | |
4644 | if (!err && new_vsi) { | |
4645 | char int_name[IFNAMSIZ + 9]; | |
4646 | snprintf(int_name, sizeof(int_name) - 1, "%s-fdir", | |
4647 | dev_driver_string(&pf->pdev->dev)); | |
4648 | err = i40e_vsi_request_irq(vsi, int_name); | |
4649 | } | |
4650 | if (!err) | |
4651 | err = i40e_up_complete(vsi); | |
4652 | ||
4653 | clear_bit(__I40E_NEEDS_RESTART, &vsi->state); | |
4654 | } | |
4655 | ||
4656 | /** | |
4657 | * i40e_fdir_teardown - release the Flow Director resources | |
4658 | * @pf: board private structure | |
4659 | **/ | |
4660 | static void i40e_fdir_teardown(struct i40e_pf *pf) | |
4661 | { | |
4662 | int i; | |
4663 | ||
4664 | for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { | |
4665 | if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { | |
4666 | i40e_vsi_release(pf->vsi[i]); | |
4667 | break; | |
4668 | } | |
4669 | } | |
4670 | } | |
4671 | ||
4672 | /** | |
4673 | * i40e_handle_reset_warning - prep for the core to reset | |
4674 | * @pf: board private structure | |
4675 | * | |
4676 | * Close up the VFs and other things in prep for a Core Reset, | |
4677 | * then get ready to rebuild the world. | |
4678 | **/ | |
4679 | static void i40e_handle_reset_warning(struct i40e_pf *pf) | |
4680 | { | |
4681 | struct i40e_driver_version dv; | |
4682 | struct i40e_hw *hw = &pf->hw; | |
4683 | i40e_status ret; | |
4684 | u32 v; | |
4685 | ||
4686 | clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); | |
4687 | if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) | |
4688 | return; | |
4689 | ||
4690 | dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n"); | |
4691 | ||
4692 | i40e_vc_notify_reset(pf); | |
4693 | ||
4694 | /* quiesce the VSIs and their queues that are not already DOWN */ | |
4695 | i40e_pf_quiesce_all_vsi(pf); | |
4696 | ||
4697 | for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { | |
4698 | if (pf->vsi[v]) | |
4699 | pf->vsi[v]->seid = 0; | |
4700 | } | |
4701 | ||
4702 | i40e_shutdown_adminq(&pf->hw); | |
4703 | ||
4704 | /* Now we wait for GRST to settle out. | |
4705 | * We don't have to delete the VEBs or VSIs from the hw switch | |
4706 | * because the reset will make them disappear. | |
4707 | */ | |
4708 | ret = i40e_pf_reset(hw); | |
4709 | if (ret) | |
4710 | dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); | |
4711 | pf->pfr_count++; | |
4712 | ||
4713 | if (test_bit(__I40E_DOWN, &pf->state)) | |
4714 | goto end_core_reset; | |
4715 | dev_info(&pf->pdev->dev, "Rebuilding internal switch\n"); | |
4716 | ||
4717 | /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ | |
4718 | ret = i40e_init_adminq(&pf->hw); | |
4719 | if (ret) { | |
4720 | dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); | |
4721 | goto end_core_reset; | |
4722 | } | |
4723 | ||
4724 | ret = i40e_get_capabilities(pf); | |
4725 | if (ret) { | |
4726 | dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", | |
4727 | ret); | |
4728 | goto end_core_reset; | |
4729 | } | |
4730 | ||
4731 | /* call shutdown HMC */ | |
4732 | ret = i40e_shutdown_lan_hmc(hw); | |
4733 | if (ret) { | |
4734 | dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); | |
4735 | goto end_core_reset; | |
4736 | } | |
4737 | ||
4738 | ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, | |
4739 | hw->func_caps.num_rx_qp, | |
4740 | pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); | |
4741 | if (ret) { | |
4742 | dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); | |
4743 | goto end_core_reset; | |
4744 | } | |
4745 | ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); | |
4746 | if (ret) { | |
4747 | dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); | |
4748 | goto end_core_reset; | |
4749 | } | |
4750 | ||
4751 | /* do basic switch setup */ | |
4752 | ret = i40e_setup_pf_switch(pf); | |
4753 | if (ret) | |
4754 | goto end_core_reset; | |
4755 | ||
4756 | /* Rebuild the VSIs and VEBs that existed before reset. | |
4757 | * They are still in our local switch element arrays, so only | |
4758 | * need to rebuild the switch model in the HW. | |
4759 | * | |
4760 | * If there were VEBs but the reconstitution failed, we'll try | |
4761 | * try to recover minimal use by getting the basic PF VSI working. | |
4762 | */ | |
4763 | if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { | |
4764 | dev_info(&pf->pdev->dev, "attempting to rebuild switch\n"); | |
4765 | /* find the one VEB connected to the MAC, and find orphans */ | |
4766 | for (v = 0; v < I40E_MAX_VEB; v++) { | |
4767 | if (!pf->veb[v]) | |
4768 | continue; | |
4769 | ||
4770 | if (pf->veb[v]->uplink_seid == pf->mac_seid || | |
4771 | pf->veb[v]->uplink_seid == 0) { | |
4772 | ret = i40e_reconstitute_veb(pf->veb[v]); | |
4773 | ||
4774 | if (!ret) | |
4775 | continue; | |
4776 | ||
4777 | /* If Main VEB failed, we're in deep doodoo, | |
4778 | * so give up rebuilding the switch and set up | |
4779 | * for minimal rebuild of PF VSI. | |
4780 | * If orphan failed, we'll report the error | |
4781 | * but try to keep going. | |
4782 | */ | |
4783 | if (pf->veb[v]->uplink_seid == pf->mac_seid) { | |
4784 | dev_info(&pf->pdev->dev, | |
4785 | "rebuild of switch failed: %d, will try to set up simple PF connection\n", | |
4786 | ret); | |
4787 | pf->vsi[pf->lan_vsi]->uplink_seid | |
4788 | = pf->mac_seid; | |
4789 | break; | |
4790 | } else if (pf->veb[v]->uplink_seid == 0) { | |
4791 | dev_info(&pf->pdev->dev, | |
4792 | "rebuild of orphan VEB failed: %d\n", | |
4793 | ret); | |
4794 | } | |
4795 | } | |
4796 | } | |
4797 | } | |
4798 | ||
4799 | if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { | |
4800 | dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); | |
4801 | /* no VEB, so rebuild only the Main VSI */ | |
4802 | ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); | |
4803 | if (ret) { | |
4804 | dev_info(&pf->pdev->dev, | |
4805 | "rebuild of Main VSI failed: %d\n", ret); | |
4806 | goto end_core_reset; | |
4807 | } | |
4808 | } | |
4809 | ||
4810 | /* reinit the misc interrupt */ | |
4811 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) | |
4812 | ret = i40e_setup_misc_vector(pf); | |
4813 | ||
4814 | /* restart the VSIs that were rebuilt and running before the reset */ | |
4815 | i40e_pf_unquiesce_all_vsi(pf); | |
4816 | ||
4817 | /* tell the firmware that we're starting */ | |
4818 | dv.major_version = DRV_VERSION_MAJOR; | |
4819 | dv.minor_version = DRV_VERSION_MINOR; | |
4820 | dv.build_version = DRV_VERSION_BUILD; | |
4821 | dv.subbuild_version = 0; | |
4822 | i40e_aq_send_driver_version(&pf->hw, &dv, NULL); | |
4823 | ||
4824 | dev_info(&pf->pdev->dev, "PF reset done\n"); | |
4825 | ||
4826 | end_core_reset: | |
4827 | clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); | |
4828 | } | |
4829 | ||
4830 | /** | |
4831 | * i40e_handle_mdd_event | |
4832 | * @pf: pointer to the pf structure | |
4833 | * | |
4834 | * Called from the MDD irq handler to identify possibly malicious vfs | |
4835 | **/ | |
4836 | static void i40e_handle_mdd_event(struct i40e_pf *pf) | |
4837 | { | |
4838 | struct i40e_hw *hw = &pf->hw; | |
4839 | bool mdd_detected = false; | |
4840 | struct i40e_vf *vf; | |
4841 | u32 reg; | |
4842 | int i; | |
4843 | ||
4844 | if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) | |
4845 | return; | |
4846 | ||
4847 | /* find what triggered the MDD event */ | |
4848 | reg = rd32(hw, I40E_GL_MDET_TX); | |
4849 | if (reg & I40E_GL_MDET_TX_VALID_MASK) { | |
4850 | u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK) | |
4851 | >> I40E_GL_MDET_TX_FUNCTION_SHIFT; | |
4852 | u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) | |
4853 | >> I40E_GL_MDET_TX_EVENT_SHIFT; | |
4854 | u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) | |
4855 | >> I40E_GL_MDET_TX_QUEUE_SHIFT; | |
4856 | dev_info(&pf->pdev->dev, | |
4857 | "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n", | |
4858 | event, queue, func); | |
4859 | wr32(hw, I40E_GL_MDET_TX, 0xffffffff); | |
4860 | mdd_detected = true; | |
4861 | } | |
4862 | reg = rd32(hw, I40E_GL_MDET_RX); | |
4863 | if (reg & I40E_GL_MDET_RX_VALID_MASK) { | |
4864 | u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) | |
4865 | >> I40E_GL_MDET_RX_FUNCTION_SHIFT; | |
4866 | u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) | |
4867 | >> I40E_GL_MDET_RX_EVENT_SHIFT; | |
4868 | u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) | |
4869 | >> I40E_GL_MDET_RX_QUEUE_SHIFT; | |
4870 | dev_info(&pf->pdev->dev, | |
4871 | "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n", | |
4872 | event, queue, func); | |
4873 | wr32(hw, I40E_GL_MDET_RX, 0xffffffff); | |
4874 | mdd_detected = true; | |
4875 | } | |
4876 | ||
4877 | /* see if one of the VFs needs its hand slapped */ | |
4878 | for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { | |
4879 | vf = &(pf->vf[i]); | |
4880 | reg = rd32(hw, I40E_VP_MDET_TX(i)); | |
4881 | if (reg & I40E_VP_MDET_TX_VALID_MASK) { | |
4882 | wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); | |
4883 | vf->num_mdd_events++; | |
4884 | dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i); | |
4885 | } | |
4886 | ||
4887 | reg = rd32(hw, I40E_VP_MDET_RX(i)); | |
4888 | if (reg & I40E_VP_MDET_RX_VALID_MASK) { | |
4889 | wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); | |
4890 | vf->num_mdd_events++; | |
4891 | dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i); | |
4892 | } | |
4893 | ||
4894 | if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { | |
4895 | dev_info(&pf->pdev->dev, | |
4896 | "Too many MDD events on VF %d, disabled\n", i); | |
4897 | dev_info(&pf->pdev->dev, | |
4898 | "Use PF Control I/F to re-enable the VF\n"); | |
4899 | set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); | |
4900 | } | |
4901 | } | |
4902 | ||
4903 | /* re-enable mdd interrupt cause */ | |
4904 | clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); | |
4905 | reg = rd32(hw, I40E_PFINT_ICR0_ENA); | |
4906 | reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; | |
4907 | wr32(hw, I40E_PFINT_ICR0_ENA, reg); | |
4908 | i40e_flush(hw); | |
4909 | } | |
4910 | ||
4911 | /** | |
4912 | * i40e_service_task - Run the driver's async subtasks | |
4913 | * @work: pointer to work_struct containing our data | |
4914 | **/ | |
4915 | static void i40e_service_task(struct work_struct *work) | |
4916 | { | |
4917 | struct i40e_pf *pf = container_of(work, | |
4918 | struct i40e_pf, | |
4919 | service_task); | |
4920 | unsigned long start_time = jiffies; | |
4921 | ||
4922 | i40e_reset_subtask(pf); | |
4923 | i40e_handle_mdd_event(pf); | |
4924 | i40e_vc_process_vflr_event(pf); | |
4925 | i40e_watchdog_subtask(pf); | |
4926 | i40e_fdir_reinit_subtask(pf); | |
4927 | i40e_check_hang_subtask(pf); | |
4928 | i40e_sync_filters_subtask(pf); | |
4929 | i40e_clean_adminq_subtask(pf); | |
4930 | ||
4931 | i40e_service_event_complete(pf); | |
4932 | ||
4933 | /* If the tasks have taken longer than one timer cycle or there | |
4934 | * is more work to be done, reschedule the service task now | |
4935 | * rather than wait for the timer to tick again. | |
4936 | */ | |
4937 | if (time_after(jiffies, (start_time + pf->service_timer_period)) || | |
4938 | test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || | |
4939 | test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || | |
4940 | test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) | |
4941 | i40e_service_event_schedule(pf); | |
4942 | } | |
4943 | ||
4944 | /** | |
4945 | * i40e_service_timer - timer callback | |
4946 | * @data: pointer to PF struct | |
4947 | **/ | |
4948 | static void i40e_service_timer(unsigned long data) | |
4949 | { | |
4950 | struct i40e_pf *pf = (struct i40e_pf *)data; | |
4951 | ||
4952 | mod_timer(&pf->service_timer, | |
4953 | round_jiffies(jiffies + pf->service_timer_period)); | |
4954 | i40e_service_event_schedule(pf); | |
4955 | } | |
4956 | ||
4957 | /** | |
4958 | * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI | |
4959 | * @vsi: the VSI being configured | |
4960 | **/ | |
4961 | static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) | |
4962 | { | |
4963 | struct i40e_pf *pf = vsi->back; | |
4964 | ||
4965 | switch (vsi->type) { | |
4966 | case I40E_VSI_MAIN: | |
4967 | vsi->alloc_queue_pairs = pf->num_lan_qps; | |
4968 | vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, | |
4969 | I40E_REQ_DESCRIPTOR_MULTIPLE); | |
4970 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) | |
4971 | vsi->num_q_vectors = pf->num_lan_msix; | |
4972 | else | |
4973 | vsi->num_q_vectors = 1; | |
4974 | ||
4975 | break; | |
4976 | ||
4977 | case I40E_VSI_FDIR: | |
4978 | vsi->alloc_queue_pairs = 1; | |
4979 | vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, | |
4980 | I40E_REQ_DESCRIPTOR_MULTIPLE); | |
4981 | vsi->num_q_vectors = 1; | |
4982 | break; | |
4983 | ||
4984 | case I40E_VSI_VMDQ2: | |
4985 | vsi->alloc_queue_pairs = pf->num_vmdq_qps; | |
4986 | vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, | |
4987 | I40E_REQ_DESCRIPTOR_MULTIPLE); | |
4988 | vsi->num_q_vectors = pf->num_vmdq_msix; | |
4989 | break; | |
4990 | ||
4991 | case I40E_VSI_SRIOV: | |
4992 | vsi->alloc_queue_pairs = pf->num_vf_qps; | |
4993 | vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, | |
4994 | I40E_REQ_DESCRIPTOR_MULTIPLE); | |
4995 | break; | |
4996 | ||
4997 | default: | |
4998 | WARN_ON(1); | |
4999 | return -ENODATA; | |
5000 | } | |
5001 | ||
5002 | return 0; | |
5003 | } | |
5004 | ||
5005 | /** | |
5006 | * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF | |
5007 | * @pf: board private structure | |
5008 | * @type: type of VSI | |
5009 | * | |
5010 | * On error: returns error code (negative) | |
5011 | * On success: returns vsi index in PF (positive) | |
5012 | **/ | |
5013 | static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) | |
5014 | { | |
5015 | int ret = -ENODEV; | |
5016 | struct i40e_vsi *vsi; | |
493fb300 | 5017 | int sz_vectors; |
9f65e15b | 5018 | int sz_rings; |
41c445ff JB |
5019 | int vsi_idx; |
5020 | int i; | |
5021 | ||
5022 | /* Need to protect the allocation of the VSIs at the PF level */ | |
5023 | mutex_lock(&pf->switch_mutex); | |
5024 | ||
5025 | /* VSI list may be fragmented if VSI creation/destruction has | |
5026 | * been happening. We can afford to do a quick scan to look | |
5027 | * for any free VSIs in the list. | |
5028 | * | |
5029 | * find next empty vsi slot, looping back around if necessary | |
5030 | */ | |
5031 | i = pf->next_vsi; | |
5032 | while (i < pf->hw.func_caps.num_vsis && pf->vsi[i]) | |
5033 | i++; | |
5034 | if (i >= pf->hw.func_caps.num_vsis) { | |
5035 | i = 0; | |
5036 | while (i < pf->next_vsi && pf->vsi[i]) | |
5037 | i++; | |
5038 | } | |
5039 | ||
5040 | if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) { | |
5041 | vsi_idx = i; /* Found one! */ | |
5042 | } else { | |
5043 | ret = -ENODEV; | |
493fb300 | 5044 | goto unlock_pf; /* out of VSI slots! */ |
41c445ff JB |
5045 | } |
5046 | pf->next_vsi = ++i; | |
5047 | ||
5048 | vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); | |
5049 | if (!vsi) { | |
5050 | ret = -ENOMEM; | |
493fb300 | 5051 | goto unlock_pf; |
41c445ff JB |
5052 | } |
5053 | vsi->type = type; | |
5054 | vsi->back = pf; | |
5055 | set_bit(__I40E_DOWN, &vsi->state); | |
5056 | vsi->flags = 0; | |
5057 | vsi->idx = vsi_idx; | |
5058 | vsi->rx_itr_setting = pf->rx_itr_default; | |
5059 | vsi->tx_itr_setting = pf->tx_itr_default; | |
5060 | vsi->netdev_registered = false; | |
5061 | vsi->work_limit = I40E_DEFAULT_IRQ_WORK; | |
5062 | INIT_LIST_HEAD(&vsi->mac_filter_list); | |
5063 | ||
9f65e15b AD |
5064 | ret = i40e_set_num_rings_in_vsi(vsi); |
5065 | if (ret) | |
5066 | goto err_rings; | |
5067 | ||
5068 | /* allocate memory for ring pointers */ | |
5069 | sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; | |
5070 | vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL); | |
5071 | if (!vsi->tx_rings) { | |
5072 | ret = -ENOMEM; | |
5073 | goto err_rings; | |
5074 | } | |
5075 | vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; | |
41c445ff | 5076 | |
493fb300 AD |
5077 | /* allocate memory for q_vector pointers */ |
5078 | sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors; | |
5079 | vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL); | |
5080 | if (!vsi->q_vectors) { | |
5081 | ret = -ENOMEM; | |
5082 | goto err_vectors; | |
5083 | } | |
5084 | ||
41c445ff JB |
5085 | /* Setup default MSIX irq handler for VSI */ |
5086 | i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); | |
5087 | ||
5088 | pf->vsi[vsi_idx] = vsi; | |
5089 | ret = vsi_idx; | |
493fb300 AD |
5090 | goto unlock_pf; |
5091 | ||
5092 | err_vectors: | |
9f65e15b AD |
5093 | kfree(vsi->tx_rings); |
5094 | err_rings: | |
493fb300 AD |
5095 | pf->next_vsi = i - 1; |
5096 | kfree(vsi); | |
5097 | unlock_pf: | |
41c445ff JB |
5098 | mutex_unlock(&pf->switch_mutex); |
5099 | return ret; | |
5100 | } | |
5101 | ||
5102 | /** | |
5103 | * i40e_vsi_clear - Deallocate the VSI provided | |
5104 | * @vsi: the VSI being un-configured | |
5105 | **/ | |
5106 | static int i40e_vsi_clear(struct i40e_vsi *vsi) | |
5107 | { | |
5108 | struct i40e_pf *pf; | |
5109 | ||
5110 | if (!vsi) | |
5111 | return 0; | |
5112 | ||
5113 | if (!vsi->back) | |
5114 | goto free_vsi; | |
5115 | pf = vsi->back; | |
5116 | ||
5117 | mutex_lock(&pf->switch_mutex); | |
5118 | if (!pf->vsi[vsi->idx]) { | |
5119 | dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", | |
5120 | vsi->idx, vsi->idx, vsi, vsi->type); | |
5121 | goto unlock_vsi; | |
5122 | } | |
5123 | ||
5124 | if (pf->vsi[vsi->idx] != vsi) { | |
5125 | dev_err(&pf->pdev->dev, | |
5126 | "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", | |
5127 | pf->vsi[vsi->idx]->idx, | |
5128 | pf->vsi[vsi->idx], | |
5129 | pf->vsi[vsi->idx]->type, | |
5130 | vsi->idx, vsi, vsi->type); | |
5131 | goto unlock_vsi; | |
5132 | } | |
5133 | ||
5134 | /* updates the pf for this cleared vsi */ | |
5135 | i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); | |
5136 | i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); | |
5137 | ||
493fb300 AD |
5138 | /* free the ring and vector containers */ |
5139 | kfree(vsi->q_vectors); | |
9f65e15b | 5140 | kfree(vsi->tx_rings); |
493fb300 | 5141 | |
41c445ff JB |
5142 | pf->vsi[vsi->idx] = NULL; |
5143 | if (vsi->idx < pf->next_vsi) | |
5144 | pf->next_vsi = vsi->idx; | |
5145 | ||
5146 | unlock_vsi: | |
5147 | mutex_unlock(&pf->switch_mutex); | |
5148 | free_vsi: | |
5149 | kfree(vsi); | |
5150 | ||
5151 | return 0; | |
5152 | } | |
5153 | ||
9f65e15b AD |
5154 | /** |
5155 | * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI | |
5156 | * @vsi: the VSI being cleaned | |
5157 | **/ | |
5158 | static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi) | |
5159 | { | |
5160 | int i; | |
5161 | ||
00403f04 MW |
5162 | if (vsi->tx_rings[0]) |
5163 | for (i = 0; i < vsi->alloc_queue_pairs; i++) { | |
5164 | kfree_rcu(vsi->tx_rings[i], rcu); | |
5165 | vsi->tx_rings[i] = NULL; | |
5166 | vsi->rx_rings[i] = NULL; | |
5167 | } | |
9f65e15b AD |
5168 | |
5169 | return 0; | |
5170 | } | |
5171 | ||
41c445ff JB |
5172 | /** |
5173 | * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI | |
5174 | * @vsi: the VSI being configured | |
5175 | **/ | |
5176 | static int i40e_alloc_rings(struct i40e_vsi *vsi) | |
5177 | { | |
5178 | struct i40e_pf *pf = vsi->back; | |
41c445ff JB |
5179 | int i; |
5180 | ||
41c445ff JB |
5181 | /* Set basic values in the rings to be used later during open() */ |
5182 | for (i = 0; i < vsi->alloc_queue_pairs; i++) { | |
9f65e15b AD |
5183 | struct i40e_ring *tx_ring; |
5184 | struct i40e_ring *rx_ring; | |
5185 | ||
5186 | tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); | |
5187 | if (!tx_ring) | |
5188 | goto err_out; | |
41c445ff JB |
5189 | |
5190 | tx_ring->queue_index = i; | |
5191 | tx_ring->reg_idx = vsi->base_queue + i; | |
5192 | tx_ring->ring_active = false; | |
5193 | tx_ring->vsi = vsi; | |
5194 | tx_ring->netdev = vsi->netdev; | |
5195 | tx_ring->dev = &pf->pdev->dev; | |
5196 | tx_ring->count = vsi->num_desc; | |
5197 | tx_ring->size = 0; | |
5198 | tx_ring->dcb_tc = 0; | |
9f65e15b | 5199 | vsi->tx_rings[i] = tx_ring; |
41c445ff | 5200 | |
9f65e15b | 5201 | rx_ring = &tx_ring[1]; |
41c445ff JB |
5202 | rx_ring->queue_index = i; |
5203 | rx_ring->reg_idx = vsi->base_queue + i; | |
5204 | rx_ring->ring_active = false; | |
5205 | rx_ring->vsi = vsi; | |
5206 | rx_ring->netdev = vsi->netdev; | |
5207 | rx_ring->dev = &pf->pdev->dev; | |
5208 | rx_ring->count = vsi->num_desc; | |
5209 | rx_ring->size = 0; | |
5210 | rx_ring->dcb_tc = 0; | |
5211 | if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) | |
5212 | set_ring_16byte_desc_enabled(rx_ring); | |
5213 | else | |
5214 | clear_ring_16byte_desc_enabled(rx_ring); | |
9f65e15b | 5215 | vsi->rx_rings[i] = rx_ring; |
41c445ff JB |
5216 | } |
5217 | ||
5218 | return 0; | |
9f65e15b AD |
5219 | |
5220 | err_out: | |
5221 | i40e_vsi_clear_rings(vsi); | |
5222 | return -ENOMEM; | |
41c445ff JB |
5223 | } |
5224 | ||
5225 | /** | |
5226 | * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel | |
5227 | * @pf: board private structure | |
5228 | * @vectors: the number of MSI-X vectors to request | |
5229 | * | |
5230 | * Returns the number of vectors reserved, or error | |
5231 | **/ | |
5232 | static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) | |
5233 | { | |
5234 | int err = 0; | |
5235 | ||
5236 | pf->num_msix_entries = 0; | |
5237 | while (vectors >= I40E_MIN_MSIX) { | |
5238 | err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors); | |
5239 | if (err == 0) { | |
5240 | /* good to go */ | |
5241 | pf->num_msix_entries = vectors; | |
5242 | break; | |
5243 | } else if (err < 0) { | |
5244 | /* total failure */ | |
5245 | dev_info(&pf->pdev->dev, | |
5246 | "MSI-X vector reservation failed: %d\n", err); | |
5247 | vectors = 0; | |
5248 | break; | |
5249 | } else { | |
5250 | /* err > 0 is the hint for retry */ | |
5251 | dev_info(&pf->pdev->dev, | |
5252 | "MSI-X vectors wanted %d, retrying with %d\n", | |
5253 | vectors, err); | |
5254 | vectors = err; | |
5255 | } | |
5256 | } | |
5257 | ||
5258 | if (vectors > 0 && vectors < I40E_MIN_MSIX) { | |
5259 | dev_info(&pf->pdev->dev, | |
5260 | "Couldn't get enough vectors, only %d available\n", | |
5261 | vectors); | |
5262 | vectors = 0; | |
5263 | } | |
5264 | ||
5265 | return vectors; | |
5266 | } | |
5267 | ||
5268 | /** | |
5269 | * i40e_init_msix - Setup the MSIX capability | |
5270 | * @pf: board private structure | |
5271 | * | |
5272 | * Work with the OS to set up the MSIX vectors needed. | |
5273 | * | |
5274 | * Returns 0 on success, negative on failure | |
5275 | **/ | |
5276 | static int i40e_init_msix(struct i40e_pf *pf) | |
5277 | { | |
5278 | i40e_status err = 0; | |
5279 | struct i40e_hw *hw = &pf->hw; | |
5280 | int v_budget, i; | |
5281 | int vec; | |
5282 | ||
5283 | if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) | |
5284 | return -ENODEV; | |
5285 | ||
5286 | /* The number of vectors we'll request will be comprised of: | |
5287 | * - Add 1 for "other" cause for Admin Queue events, etc. | |
5288 | * - The number of LAN queue pairs | |
5289 | * already adjusted for the NUMA node | |
5290 | * assumes symmetric Tx/Rx pairing | |
5291 | * - The number of VMDq pairs | |
5292 | * Once we count this up, try the request. | |
5293 | * | |
5294 | * If we can't get what we want, we'll simplify to nearly nothing | |
5295 | * and try again. If that still fails, we punt. | |
5296 | */ | |
5297 | pf->num_lan_msix = pf->num_lan_qps; | |
5298 | pf->num_vmdq_msix = pf->num_vmdq_qps; | |
5299 | v_budget = 1 + pf->num_lan_msix; | |
5300 | v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix); | |
5301 | if (pf->flags & I40E_FLAG_FDIR_ENABLED) | |
5302 | v_budget++; | |
5303 | ||
5304 | /* Scale down if necessary, and the rings will share vectors */ | |
5305 | v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors); | |
5306 | ||
5307 | pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), | |
5308 | GFP_KERNEL); | |
5309 | if (!pf->msix_entries) | |
5310 | return -ENOMEM; | |
5311 | ||
5312 | for (i = 0; i < v_budget; i++) | |
5313 | pf->msix_entries[i].entry = i; | |
5314 | vec = i40e_reserve_msix_vectors(pf, v_budget); | |
5315 | if (vec < I40E_MIN_MSIX) { | |
5316 | pf->flags &= ~I40E_FLAG_MSIX_ENABLED; | |
5317 | kfree(pf->msix_entries); | |
5318 | pf->msix_entries = NULL; | |
5319 | return -ENODEV; | |
5320 | ||
5321 | } else if (vec == I40E_MIN_MSIX) { | |
5322 | /* Adjust for minimal MSIX use */ | |
5323 | dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n"); | |
5324 | pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; | |
5325 | pf->num_vmdq_vsis = 0; | |
5326 | pf->num_vmdq_qps = 0; | |
5327 | pf->num_vmdq_msix = 0; | |
5328 | pf->num_lan_qps = 1; | |
5329 | pf->num_lan_msix = 1; | |
5330 | ||
5331 | } else if (vec != v_budget) { | |
5332 | /* Scale vector usage down */ | |
5333 | pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ | |
5334 | vec--; /* reserve the misc vector */ | |
5335 | ||
5336 | /* partition out the remaining vectors */ | |
5337 | switch (vec) { | |
5338 | case 2: | |
5339 | pf->num_vmdq_vsis = 1; | |
5340 | pf->num_lan_msix = 1; | |
5341 | break; | |
5342 | case 3: | |
5343 | pf->num_vmdq_vsis = 1; | |
5344 | pf->num_lan_msix = 2; | |
5345 | break; | |
5346 | default: | |
5347 | pf->num_lan_msix = min_t(int, (vec / 2), | |
5348 | pf->num_lan_qps); | |
5349 | pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix), | |
5350 | I40E_DEFAULT_NUM_VMDQ_VSI); | |
5351 | break; | |
5352 | } | |
5353 | } | |
5354 | ||
5355 | return err; | |
5356 | } | |
5357 | ||
493fb300 AD |
5358 | /** |
5359 | * i40e_alloc_q_vector - Allocate memory for a single interrupt vector | |
5360 | * @vsi: the VSI being configured | |
5361 | * @v_idx: index of the vector in the vsi struct | |
5362 | * | |
5363 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | |
5364 | **/ | |
5365 | static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) | |
5366 | { | |
5367 | struct i40e_q_vector *q_vector; | |
5368 | ||
5369 | /* allocate q_vector */ | |
5370 | q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); | |
5371 | if (!q_vector) | |
5372 | return -ENOMEM; | |
5373 | ||
5374 | q_vector->vsi = vsi; | |
5375 | q_vector->v_idx = v_idx; | |
5376 | cpumask_set_cpu(v_idx, &q_vector->affinity_mask); | |
5377 | if (vsi->netdev) | |
5378 | netif_napi_add(vsi->netdev, &q_vector->napi, | |
5379 | i40e_napi_poll, vsi->work_limit); | |
5380 | ||
cd0b6fa6 AD |
5381 | q_vector->rx.latency_range = I40E_LOW_LATENCY; |
5382 | q_vector->tx.latency_range = I40E_LOW_LATENCY; | |
5383 | ||
493fb300 AD |
5384 | /* tie q_vector and vsi together */ |
5385 | vsi->q_vectors[v_idx] = q_vector; | |
5386 | ||
5387 | return 0; | |
5388 | } | |
5389 | ||
41c445ff JB |
5390 | /** |
5391 | * i40e_alloc_q_vectors - Allocate memory for interrupt vectors | |
5392 | * @vsi: the VSI being configured | |
5393 | * | |
5394 | * We allocate one q_vector per queue interrupt. If allocation fails we | |
5395 | * return -ENOMEM. | |
5396 | **/ | |
5397 | static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) | |
5398 | { | |
5399 | struct i40e_pf *pf = vsi->back; | |
5400 | int v_idx, num_q_vectors; | |
493fb300 | 5401 | int err; |
41c445ff JB |
5402 | |
5403 | /* if not MSIX, give the one vector only to the LAN VSI */ | |
5404 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) | |
5405 | num_q_vectors = vsi->num_q_vectors; | |
5406 | else if (vsi == pf->vsi[pf->lan_vsi]) | |
5407 | num_q_vectors = 1; | |
5408 | else | |
5409 | return -EINVAL; | |
5410 | ||
41c445ff | 5411 | for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { |
493fb300 AD |
5412 | err = i40e_alloc_q_vector(vsi, v_idx); |
5413 | if (err) | |
5414 | goto err_out; | |
41c445ff JB |
5415 | } |
5416 | ||
5417 | return 0; | |
493fb300 AD |
5418 | |
5419 | err_out: | |
5420 | while (v_idx--) | |
5421 | i40e_free_q_vector(vsi, v_idx); | |
5422 | ||
5423 | return err; | |
41c445ff JB |
5424 | } |
5425 | ||
5426 | /** | |
5427 | * i40e_init_interrupt_scheme - Determine proper interrupt scheme | |
5428 | * @pf: board private structure to initialize | |
5429 | **/ | |
5430 | static void i40e_init_interrupt_scheme(struct i40e_pf *pf) | |
5431 | { | |
5432 | int err = 0; | |
5433 | ||
5434 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | |
5435 | err = i40e_init_msix(pf); | |
5436 | if (err) { | |
958a3e3b SN |
5437 | pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | |
5438 | I40E_FLAG_RSS_ENABLED | | |
41c445ff JB |
5439 | I40E_FLAG_MQ_ENABLED | |
5440 | I40E_FLAG_DCB_ENABLED | | |
5441 | I40E_FLAG_SRIOV_ENABLED | | |
5442 | I40E_FLAG_FDIR_ENABLED | | |
5443 | I40E_FLAG_FDIR_ATR_ENABLED | | |
5444 | I40E_FLAG_VMDQ_ENABLED); | |
5445 | ||
5446 | /* rework the queue expectations without MSIX */ | |
5447 | i40e_determine_queue_usage(pf); | |
5448 | } | |
5449 | } | |
5450 | ||
5451 | if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && | |
5452 | (pf->flags & I40E_FLAG_MSI_ENABLED)) { | |
958a3e3b | 5453 | dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n"); |
41c445ff JB |
5454 | err = pci_enable_msi(pf->pdev); |
5455 | if (err) { | |
958a3e3b | 5456 | dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); |
41c445ff JB |
5457 | pf->flags &= ~I40E_FLAG_MSI_ENABLED; |
5458 | } | |
5459 | } | |
5460 | ||
958a3e3b SN |
5461 | if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) |
5462 | dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n"); | |
5463 | ||
41c445ff JB |
5464 | /* track first vector for misc interrupts */ |
5465 | err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); | |
5466 | } | |
5467 | ||
5468 | /** | |
5469 | * i40e_setup_misc_vector - Setup the misc vector to handle non queue events | |
5470 | * @pf: board private structure | |
5471 | * | |
5472 | * This sets up the handler for MSIX 0, which is used to manage the | |
5473 | * non-queue interrupts, e.g. AdminQ and errors. This is not used | |
5474 | * when in MSI or Legacy interrupt mode. | |
5475 | **/ | |
5476 | static int i40e_setup_misc_vector(struct i40e_pf *pf) | |
5477 | { | |
5478 | struct i40e_hw *hw = &pf->hw; | |
5479 | int err = 0; | |
5480 | ||
5481 | /* Only request the irq if this is the first time through, and | |
5482 | * not when we're rebuilding after a Reset | |
5483 | */ | |
5484 | if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { | |
5485 | err = request_irq(pf->msix_entries[0].vector, | |
5486 | i40e_intr, 0, pf->misc_int_name, pf); | |
5487 | if (err) { | |
5488 | dev_info(&pf->pdev->dev, | |
5489 | "request_irq for msix_misc failed: %d\n", err); | |
5490 | return -EFAULT; | |
5491 | } | |
5492 | } | |
5493 | ||
5494 | i40e_enable_misc_int_causes(hw); | |
5495 | ||
5496 | /* associate no queues to the misc vector */ | |
5497 | wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); | |
5498 | wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); | |
5499 | ||
5500 | i40e_flush(hw); | |
5501 | ||
5502 | i40e_irq_dynamic_enable_icr0(pf); | |
5503 | ||
5504 | return err; | |
5505 | } | |
5506 | ||
5507 | /** | |
5508 | * i40e_config_rss - Prepare for RSS if used | |
5509 | * @pf: board private structure | |
5510 | **/ | |
5511 | static int i40e_config_rss(struct i40e_pf *pf) | |
5512 | { | |
5513 | struct i40e_hw *hw = &pf->hw; | |
5514 | u32 lut = 0; | |
5515 | int i, j; | |
5516 | u64 hena; | |
5517 | /* Set of random keys generated using kernel random number generator */ | |
5518 | static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687, | |
5519 | 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, | |
5520 | 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, | |
5521 | 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be}; | |
5522 | ||
5523 | /* Fill out hash function seed */ | |
5524 | for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) | |
5525 | wr32(hw, I40E_PFQF_HKEY(i), seed[i]); | |
5526 | ||
5527 | /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ | |
5528 | hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | | |
5529 | ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); | |
5530 | hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | | |
5531 | ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | | |
5532 | ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | | |
5533 | ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | | |
5534 | ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | | |
5535 | ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | | |
5536 | ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | | |
5537 | ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | | |
5538 | ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)| | |
5539 | ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); | |
5540 | wr32(hw, I40E_PFQF_HENA(0), (u32)hena); | |
5541 | wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); | |
5542 | ||
5543 | /* Populate the LUT with max no. of queues in round robin fashion */ | |
5544 | for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) { | |
5545 | ||
5546 | /* The assumption is that lan qp count will be the highest | |
5547 | * qp count for any PF VSI that needs RSS. | |
5548 | * If multiple VSIs need RSS support, all the qp counts | |
5549 | * for those VSIs should be a power of 2 for RSS to work. | |
5550 | * If LAN VSI is the only consumer for RSS then this requirement | |
5551 | * is not necessary. | |
5552 | */ | |
5553 | if (j == pf->rss_size) | |
5554 | j = 0; | |
5555 | /* lut = 4-byte sliding window of 4 lut entries */ | |
5556 | lut = (lut << 8) | (j & | |
5557 | ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); | |
5558 | /* On i = 3, we have 4 entries in lut; write to the register */ | |
5559 | if ((i & 3) == 3) | |
5560 | wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); | |
5561 | } | |
5562 | i40e_flush(hw); | |
5563 | ||
5564 | return 0; | |
5565 | } | |
5566 | ||
5567 | /** | |
5568 | * i40e_sw_init - Initialize general software structures (struct i40e_pf) | |
5569 | * @pf: board private structure to initialize | |
5570 | * | |
5571 | * i40e_sw_init initializes the Adapter private data structure. | |
5572 | * Fields are initialized based on PCI device information and | |
5573 | * OS network device settings (MTU size). | |
5574 | **/ | |
5575 | static int i40e_sw_init(struct i40e_pf *pf) | |
5576 | { | |
5577 | int err = 0; | |
5578 | int size; | |
5579 | ||
5580 | pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, | |
5581 | (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); | |
2759997b | 5582 | pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; |
41c445ff JB |
5583 | if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { |
5584 | if (I40E_DEBUG_USER & debug) | |
5585 | pf->hw.debug_mask = debug; | |
5586 | pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), | |
5587 | I40E_DEFAULT_MSG_ENABLE); | |
5588 | } | |
5589 | ||
5590 | /* Set default capability flags */ | |
5591 | pf->flags = I40E_FLAG_RX_CSUM_ENABLED | | |
5592 | I40E_FLAG_MSI_ENABLED | | |
5593 | I40E_FLAG_MSIX_ENABLED | | |
5594 | I40E_FLAG_RX_PS_ENABLED | | |
5595 | I40E_FLAG_MQ_ENABLED | | |
5596 | I40E_FLAG_RX_1BUF_ENABLED; | |
5597 | ||
5598 | pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; | |
5599 | if (pf->hw.func_caps.rss) { | |
5600 | pf->flags |= I40E_FLAG_RSS_ENABLED; | |
5601 | pf->rss_size = min_t(int, pf->rss_size_max, | |
5602 | nr_cpus_node(numa_node_id())); | |
5603 | } else { | |
5604 | pf->rss_size = 1; | |
5605 | } | |
5606 | ||
5607 | if (pf->hw.func_caps.dcb) | |
5608 | pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC; | |
5609 | else | |
5610 | pf->num_tc_qps = 0; | |
5611 | ||
5612 | if (pf->hw.func_caps.fd) { | |
5613 | /* FW/NVM is not yet fixed in this regard */ | |
5614 | if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || | |
5615 | (pf->hw.func_caps.fd_filters_best_effort > 0)) { | |
5616 | pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED; | |
5617 | dev_info(&pf->pdev->dev, | |
5618 | "Flow Director ATR mode Enabled\n"); | |
5619 | pf->flags |= I40E_FLAG_FDIR_ENABLED; | |
5620 | dev_info(&pf->pdev->dev, | |
5621 | "Flow Director Side Band mode Enabled\n"); | |
5622 | pf->fdir_pf_filter_count = | |
5623 | pf->hw.func_caps.fd_filters_guaranteed; | |
5624 | } | |
5625 | } else { | |
5626 | pf->fdir_pf_filter_count = 0; | |
5627 | } | |
5628 | ||
5629 | if (pf->hw.func_caps.vmdq) { | |
5630 | pf->flags |= I40E_FLAG_VMDQ_ENABLED; | |
5631 | pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; | |
5632 | pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; | |
5633 | } | |
5634 | ||
5635 | /* MFP mode enabled */ | |
5636 | if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { | |
5637 | pf->flags |= I40E_FLAG_MFP_ENABLED; | |
5638 | dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); | |
5639 | } | |
5640 | ||
5641 | #ifdef CONFIG_PCI_IOV | |
5642 | if (pf->hw.func_caps.num_vfs) { | |
5643 | pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; | |
5644 | pf->flags |= I40E_FLAG_SRIOV_ENABLED; | |
5645 | pf->num_req_vfs = min_t(int, | |
5646 | pf->hw.func_caps.num_vfs, | |
5647 | I40E_MAX_VF_COUNT); | |
5648 | } | |
5649 | #endif /* CONFIG_PCI_IOV */ | |
5650 | pf->eeprom_version = 0xDEAD; | |
5651 | pf->lan_veb = I40E_NO_VEB; | |
5652 | pf->lan_vsi = I40E_NO_VSI; | |
5653 | ||
5654 | /* set up queue assignment tracking */ | |
5655 | size = sizeof(struct i40e_lump_tracking) | |
5656 | + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); | |
5657 | pf->qp_pile = kzalloc(size, GFP_KERNEL); | |
5658 | if (!pf->qp_pile) { | |
5659 | err = -ENOMEM; | |
5660 | goto sw_init_done; | |
5661 | } | |
5662 | pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; | |
5663 | pf->qp_pile->search_hint = 0; | |
5664 | ||
5665 | /* set up vector assignment tracking */ | |
5666 | size = sizeof(struct i40e_lump_tracking) | |
5667 | + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors); | |
5668 | pf->irq_pile = kzalloc(size, GFP_KERNEL); | |
5669 | if (!pf->irq_pile) { | |
5670 | kfree(pf->qp_pile); | |
5671 | err = -ENOMEM; | |
5672 | goto sw_init_done; | |
5673 | } | |
5674 | pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors; | |
5675 | pf->irq_pile->search_hint = 0; | |
5676 | ||
5677 | mutex_init(&pf->switch_mutex); | |
5678 | ||
5679 | sw_init_done: | |
5680 | return err; | |
5681 | } | |
5682 | ||
5683 | /** | |
5684 | * i40e_set_features - set the netdev feature flags | |
5685 | * @netdev: ptr to the netdev being adjusted | |
5686 | * @features: the feature set that the stack is suggesting | |
5687 | **/ | |
5688 | static int i40e_set_features(struct net_device *netdev, | |
5689 | netdev_features_t features) | |
5690 | { | |
5691 | struct i40e_netdev_priv *np = netdev_priv(netdev); | |
5692 | struct i40e_vsi *vsi = np->vsi; | |
5693 | ||
5694 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | |
5695 | i40e_vlan_stripping_enable(vsi); | |
5696 | else | |
5697 | i40e_vlan_stripping_disable(vsi); | |
5698 | ||
5699 | return 0; | |
5700 | } | |
5701 | ||
5702 | static const struct net_device_ops i40e_netdev_ops = { | |
5703 | .ndo_open = i40e_open, | |
5704 | .ndo_stop = i40e_close, | |
5705 | .ndo_start_xmit = i40e_lan_xmit_frame, | |
5706 | .ndo_get_stats64 = i40e_get_netdev_stats_struct, | |
5707 | .ndo_set_rx_mode = i40e_set_rx_mode, | |
5708 | .ndo_validate_addr = eth_validate_addr, | |
5709 | .ndo_set_mac_address = i40e_set_mac, | |
5710 | .ndo_change_mtu = i40e_change_mtu, | |
5711 | .ndo_tx_timeout = i40e_tx_timeout, | |
5712 | .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, | |
5713 | .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, | |
5714 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
5715 | .ndo_poll_controller = i40e_netpoll, | |
5716 | #endif | |
5717 | .ndo_setup_tc = i40e_setup_tc, | |
5718 | .ndo_set_features = i40e_set_features, | |
5719 | .ndo_set_vf_mac = i40e_ndo_set_vf_mac, | |
5720 | .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, | |
5721 | .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, | |
5722 | .ndo_get_vf_config = i40e_ndo_get_vf_config, | |
5723 | }; | |
5724 | ||
5725 | /** | |
5726 | * i40e_config_netdev - Setup the netdev flags | |
5727 | * @vsi: the VSI being configured | |
5728 | * | |
5729 | * Returns 0 on success, negative value on failure | |
5730 | **/ | |
5731 | static int i40e_config_netdev(struct i40e_vsi *vsi) | |
5732 | { | |
5733 | struct i40e_pf *pf = vsi->back; | |
5734 | struct i40e_hw *hw = &pf->hw; | |
5735 | struct i40e_netdev_priv *np; | |
5736 | struct net_device *netdev; | |
5737 | u8 mac_addr[ETH_ALEN]; | |
5738 | int etherdev_size; | |
5739 | ||
5740 | etherdev_size = sizeof(struct i40e_netdev_priv); | |
5741 | netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); | |
5742 | if (!netdev) | |
5743 | return -ENOMEM; | |
5744 | ||
5745 | vsi->netdev = netdev; | |
5746 | np = netdev_priv(netdev); | |
5747 | np->vsi = vsi; | |
5748 | ||
5749 | netdev->hw_enc_features = NETIF_F_IP_CSUM | | |
5750 | NETIF_F_GSO_UDP_TUNNEL | | |
5751 | NETIF_F_TSO | | |
5752 | NETIF_F_SG; | |
5753 | ||
5754 | netdev->features = NETIF_F_SG | | |
5755 | NETIF_F_IP_CSUM | | |
5756 | NETIF_F_SCTP_CSUM | | |
5757 | NETIF_F_HIGHDMA | | |
5758 | NETIF_F_GSO_UDP_TUNNEL | | |
5759 | NETIF_F_HW_VLAN_CTAG_TX | | |
5760 | NETIF_F_HW_VLAN_CTAG_RX | | |
5761 | NETIF_F_HW_VLAN_CTAG_FILTER | | |
5762 | NETIF_F_IPV6_CSUM | | |
5763 | NETIF_F_TSO | | |
5764 | NETIF_F_TSO6 | | |
5765 | NETIF_F_RXCSUM | | |
5766 | NETIF_F_RXHASH | | |
5767 | 0; | |
5768 | ||
5769 | /* copy netdev features into list of user selectable features */ | |
5770 | netdev->hw_features |= netdev->features; | |
5771 | ||
5772 | if (vsi->type == I40E_VSI_MAIN) { | |
5773 | SET_NETDEV_DEV(netdev, &pf->pdev->dev); | |
5774 | memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); | |
5775 | } else { | |
5776 | /* relate the VSI_VMDQ name to the VSI_MAIN name */ | |
5777 | snprintf(netdev->name, IFNAMSIZ, "%sv%%d", | |
5778 | pf->vsi[pf->lan_vsi]->netdev->name); | |
5779 | random_ether_addr(mac_addr); | |
5780 | i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); | |
5781 | } | |
5782 | ||
5783 | memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); | |
5784 | memcpy(netdev->perm_addr, mac_addr, ETH_ALEN); | |
5785 | /* vlan gets same features (except vlan offload) | |
5786 | * after any tweaks for specific VSI types | |
5787 | */ | |
5788 | netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | | |
5789 | NETIF_F_HW_VLAN_CTAG_RX | | |
5790 | NETIF_F_HW_VLAN_CTAG_FILTER); | |
5791 | netdev->priv_flags |= IFF_UNICAST_FLT; | |
5792 | netdev->priv_flags |= IFF_SUPP_NOFCS; | |
5793 | /* Setup netdev TC information */ | |
5794 | i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); | |
5795 | ||
5796 | netdev->netdev_ops = &i40e_netdev_ops; | |
5797 | netdev->watchdog_timeo = 5 * HZ; | |
5798 | i40e_set_ethtool_ops(netdev); | |
5799 | ||
5800 | return 0; | |
5801 | } | |
5802 | ||
5803 | /** | |
5804 | * i40e_vsi_delete - Delete a VSI from the switch | |
5805 | * @vsi: the VSI being removed | |
5806 | * | |
5807 | * Returns 0 on success, negative value on failure | |
5808 | **/ | |
5809 | static void i40e_vsi_delete(struct i40e_vsi *vsi) | |
5810 | { | |
5811 | /* remove default VSI is not allowed */ | |
5812 | if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) | |
5813 | return; | |
5814 | ||
5815 | /* there is no HW VSI for FDIR */ | |
5816 | if (vsi->type == I40E_VSI_FDIR) | |
5817 | return; | |
5818 | ||
5819 | i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); | |
5820 | return; | |
5821 | } | |
5822 | ||
5823 | /** | |
5824 | * i40e_add_vsi - Add a VSI to the switch | |
5825 | * @vsi: the VSI being configured | |
5826 | * | |
5827 | * This initializes a VSI context depending on the VSI type to be added and | |
5828 | * passes it down to the add_vsi aq command. | |
5829 | **/ | |
5830 | static int i40e_add_vsi(struct i40e_vsi *vsi) | |
5831 | { | |
5832 | int ret = -ENODEV; | |
5833 | struct i40e_mac_filter *f, *ftmp; | |
5834 | struct i40e_pf *pf = vsi->back; | |
5835 | struct i40e_hw *hw = &pf->hw; | |
5836 | struct i40e_vsi_context ctxt; | |
5837 | u8 enabled_tc = 0x1; /* TC0 enabled */ | |
5838 | int f_count = 0; | |
5839 | ||
5840 | memset(&ctxt, 0, sizeof(ctxt)); | |
5841 | switch (vsi->type) { | |
5842 | case I40E_VSI_MAIN: | |
5843 | /* The PF's main VSI is already setup as part of the | |
5844 | * device initialization, so we'll not bother with | |
5845 | * the add_vsi call, but we will retrieve the current | |
5846 | * VSI context. | |
5847 | */ | |
5848 | ctxt.seid = pf->main_vsi_seid; | |
5849 | ctxt.pf_num = pf->hw.pf_id; | |
5850 | ctxt.vf_num = 0; | |
5851 | ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); | |
5852 | ctxt.flags = I40E_AQ_VSI_TYPE_PF; | |
5853 | if (ret) { | |
5854 | dev_info(&pf->pdev->dev, | |
5855 | "couldn't get pf vsi config, err %d, aq_err %d\n", | |
5856 | ret, pf->hw.aq.asq_last_status); | |
5857 | return -ENOENT; | |
5858 | } | |
5859 | memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); | |
5860 | vsi->info.valid_sections = 0; | |
5861 | ||
5862 | vsi->seid = ctxt.seid; | |
5863 | vsi->id = ctxt.vsi_number; | |
5864 | ||
5865 | enabled_tc = i40e_pf_get_tc_map(pf); | |
5866 | ||
5867 | /* MFP mode setup queue map and update VSI */ | |
5868 | if (pf->flags & I40E_FLAG_MFP_ENABLED) { | |
5869 | memset(&ctxt, 0, sizeof(ctxt)); | |
5870 | ctxt.seid = pf->main_vsi_seid; | |
5871 | ctxt.pf_num = pf->hw.pf_id; | |
5872 | ctxt.vf_num = 0; | |
5873 | i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); | |
5874 | ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); | |
5875 | if (ret) { | |
5876 | dev_info(&pf->pdev->dev, | |
5877 | "update vsi failed, aq_err=%d\n", | |
5878 | pf->hw.aq.asq_last_status); | |
5879 | ret = -ENOENT; | |
5880 | goto err; | |
5881 | } | |
5882 | /* update the local VSI info queue map */ | |
5883 | i40e_vsi_update_queue_map(vsi, &ctxt); | |
5884 | vsi->info.valid_sections = 0; | |
5885 | } else { | |
5886 | /* Default/Main VSI is only enabled for TC0 | |
5887 | * reconfigure it to enable all TCs that are | |
5888 | * available on the port in SFP mode. | |
5889 | */ | |
5890 | ret = i40e_vsi_config_tc(vsi, enabled_tc); | |
5891 | if (ret) { | |
5892 | dev_info(&pf->pdev->dev, | |
5893 | "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n", | |
5894 | enabled_tc, ret, | |
5895 | pf->hw.aq.asq_last_status); | |
5896 | ret = -ENOENT; | |
5897 | } | |
5898 | } | |
5899 | break; | |
5900 | ||
5901 | case I40E_VSI_FDIR: | |
5902 | /* no queue mapping or actual HW VSI needed */ | |
5903 | vsi->info.valid_sections = 0; | |
5904 | vsi->seid = 0; | |
5905 | vsi->id = 0; | |
5906 | i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); | |
5907 | return 0; | |
5908 | break; | |
5909 | ||
5910 | case I40E_VSI_VMDQ2: | |
5911 | ctxt.pf_num = hw->pf_id; | |
5912 | ctxt.vf_num = 0; | |
5913 | ctxt.uplink_seid = vsi->uplink_seid; | |
5914 | ctxt.connection_type = 0x1; /* regular data port */ | |
5915 | ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; | |
5916 | ||
5917 | ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); | |
5918 | ||
5919 | /* This VSI is connected to VEB so the switch_id | |
5920 | * should be set to zero by default. | |
5921 | */ | |
5922 | ctxt.info.switch_id = 0; | |
5923 | ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB); | |
5924 | ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); | |
5925 | ||
5926 | /* Setup the VSI tx/rx queue map for TC0 only for now */ | |
5927 | i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); | |
5928 | break; | |
5929 | ||
5930 | case I40E_VSI_SRIOV: | |
5931 | ctxt.pf_num = hw->pf_id; | |
5932 | ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; | |
5933 | ctxt.uplink_seid = vsi->uplink_seid; | |
5934 | ctxt.connection_type = 0x1; /* regular data port */ | |
5935 | ctxt.flags = I40E_AQ_VSI_TYPE_VF; | |
5936 | ||
5937 | ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); | |
5938 | ||
5939 | /* This VSI is connected to VEB so the switch_id | |
5940 | * should be set to zero by default. | |
5941 | */ | |
5942 | ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); | |
5943 | ||
5944 | ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); | |
5945 | ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; | |
5946 | /* Setup the VSI tx/rx queue map for TC0 only for now */ | |
5947 | i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); | |
5948 | break; | |
5949 | ||
5950 | default: | |
5951 | return -ENODEV; | |
5952 | } | |
5953 | ||
5954 | if (vsi->type != I40E_VSI_MAIN) { | |
5955 | ret = i40e_aq_add_vsi(hw, &ctxt, NULL); | |
5956 | if (ret) { | |
5957 | dev_info(&vsi->back->pdev->dev, | |
5958 | "add vsi failed, aq_err=%d\n", | |
5959 | vsi->back->hw.aq.asq_last_status); | |
5960 | ret = -ENOENT; | |
5961 | goto err; | |
5962 | } | |
5963 | memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); | |
5964 | vsi->info.valid_sections = 0; | |
5965 | vsi->seid = ctxt.seid; | |
5966 | vsi->id = ctxt.vsi_number; | |
5967 | } | |
5968 | ||
5969 | /* If macvlan filters already exist, force them to get loaded */ | |
5970 | list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { | |
5971 | f->changed = true; | |
5972 | f_count++; | |
5973 | } | |
5974 | if (f_count) { | |
5975 | vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; | |
5976 | pf->flags |= I40E_FLAG_FILTER_SYNC; | |
5977 | } | |
5978 | ||
5979 | /* Update VSI BW information */ | |
5980 | ret = i40e_vsi_get_bw_info(vsi); | |
5981 | if (ret) { | |
5982 | dev_info(&pf->pdev->dev, | |
5983 | "couldn't get vsi bw info, err %d, aq_err %d\n", | |
5984 | ret, pf->hw.aq.asq_last_status); | |
5985 | /* VSI is already added so not tearing that up */ | |
5986 | ret = 0; | |
5987 | } | |
5988 | ||
5989 | err: | |
5990 | return ret; | |
5991 | } | |
5992 | ||
5993 | /** | |
5994 | * i40e_vsi_release - Delete a VSI and free its resources | |
5995 | * @vsi: the VSI being removed | |
5996 | * | |
5997 | * Returns 0 on success or < 0 on error | |
5998 | **/ | |
5999 | int i40e_vsi_release(struct i40e_vsi *vsi) | |
6000 | { | |
6001 | struct i40e_mac_filter *f, *ftmp; | |
6002 | struct i40e_veb *veb = NULL; | |
6003 | struct i40e_pf *pf; | |
6004 | u16 uplink_seid; | |
6005 | int i, n; | |
6006 | ||
6007 | pf = vsi->back; | |
6008 | ||
6009 | /* release of a VEB-owner or last VSI is not allowed */ | |
6010 | if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { | |
6011 | dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", | |
6012 | vsi->seid, vsi->uplink_seid); | |
6013 | return -ENODEV; | |
6014 | } | |
6015 | if (vsi == pf->vsi[pf->lan_vsi] && | |
6016 | !test_bit(__I40E_DOWN, &pf->state)) { | |
6017 | dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); | |
6018 | return -ENODEV; | |
6019 | } | |
6020 | ||
6021 | uplink_seid = vsi->uplink_seid; | |
6022 | if (vsi->type != I40E_VSI_SRIOV) { | |
6023 | if (vsi->netdev_registered) { | |
6024 | vsi->netdev_registered = false; | |
6025 | if (vsi->netdev) { | |
6026 | /* results in a call to i40e_close() */ | |
6027 | unregister_netdev(vsi->netdev); | |
6028 | free_netdev(vsi->netdev); | |
6029 | vsi->netdev = NULL; | |
6030 | } | |
6031 | } else { | |
6032 | if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) | |
6033 | i40e_down(vsi); | |
6034 | i40e_vsi_free_irq(vsi); | |
6035 | i40e_vsi_free_tx_resources(vsi); | |
6036 | i40e_vsi_free_rx_resources(vsi); | |
6037 | } | |
6038 | i40e_vsi_disable_irq(vsi); | |
6039 | } | |
6040 | ||
6041 | list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) | |
6042 | i40e_del_filter(vsi, f->macaddr, f->vlan, | |
6043 | f->is_vf, f->is_netdev); | |
6044 | i40e_sync_vsi_filters(vsi); | |
6045 | ||
6046 | i40e_vsi_delete(vsi); | |
6047 | i40e_vsi_free_q_vectors(vsi); | |
6048 | i40e_vsi_clear_rings(vsi); | |
6049 | i40e_vsi_clear(vsi); | |
6050 | ||
6051 | /* If this was the last thing on the VEB, except for the | |
6052 | * controlling VSI, remove the VEB, which puts the controlling | |
6053 | * VSI onto the next level down in the switch. | |
6054 | * | |
6055 | * Well, okay, there's one more exception here: don't remove | |
6056 | * the orphan VEBs yet. We'll wait for an explicit remove request | |
6057 | * from up the network stack. | |
6058 | */ | |
6059 | for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) { | |
6060 | if (pf->vsi[i] && | |
6061 | pf->vsi[i]->uplink_seid == uplink_seid && | |
6062 | (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { | |
6063 | n++; /* count the VSIs */ | |
6064 | } | |
6065 | } | |
6066 | for (i = 0; i < I40E_MAX_VEB; i++) { | |
6067 | if (!pf->veb[i]) | |
6068 | continue; | |
6069 | if (pf->veb[i]->uplink_seid == uplink_seid) | |
6070 | n++; /* count the VEBs */ | |
6071 | if (pf->veb[i]->seid == uplink_seid) | |
6072 | veb = pf->veb[i]; | |
6073 | } | |
6074 | if (n == 0 && veb && veb->uplink_seid != 0) | |
6075 | i40e_veb_release(veb); | |
6076 | ||
6077 | return 0; | |
6078 | } | |
6079 | ||
6080 | /** | |
6081 | * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI | |
6082 | * @vsi: ptr to the VSI | |
6083 | * | |
6084 | * This should only be called after i40e_vsi_mem_alloc() which allocates the | |
6085 | * corresponding SW VSI structure and initializes num_queue_pairs for the | |
6086 | * newly allocated VSI. | |
6087 | * | |
6088 | * Returns 0 on success or negative on failure | |
6089 | **/ | |
6090 | static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) | |
6091 | { | |
6092 | int ret = -ENOENT; | |
6093 | struct i40e_pf *pf = vsi->back; | |
6094 | ||
493fb300 | 6095 | if (vsi->q_vectors[0]) { |
41c445ff JB |
6096 | dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", |
6097 | vsi->seid); | |
6098 | return -EEXIST; | |
6099 | } | |
6100 | ||
6101 | if (vsi->base_vector) { | |
6102 | dev_info(&pf->pdev->dev, | |
6103 | "VSI %d has non-zero base vector %d\n", | |
6104 | vsi->seid, vsi->base_vector); | |
6105 | return -EEXIST; | |
6106 | } | |
6107 | ||
6108 | ret = i40e_alloc_q_vectors(vsi); | |
6109 | if (ret) { | |
6110 | dev_info(&pf->pdev->dev, | |
6111 | "failed to allocate %d q_vector for VSI %d, ret=%d\n", | |
6112 | vsi->num_q_vectors, vsi->seid, ret); | |
6113 | vsi->num_q_vectors = 0; | |
6114 | goto vector_setup_out; | |
6115 | } | |
6116 | ||
958a3e3b SN |
6117 | if (vsi->num_q_vectors) |
6118 | vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, | |
6119 | vsi->num_q_vectors, vsi->idx); | |
41c445ff JB |
6120 | if (vsi->base_vector < 0) { |
6121 | dev_info(&pf->pdev->dev, | |
6122 | "failed to get q tracking for VSI %d, err=%d\n", | |
6123 | vsi->seid, vsi->base_vector); | |
6124 | i40e_vsi_free_q_vectors(vsi); | |
6125 | ret = -ENOENT; | |
6126 | goto vector_setup_out; | |
6127 | } | |
6128 | ||
6129 | vector_setup_out: | |
6130 | return ret; | |
6131 | } | |
6132 | ||
6133 | /** | |
6134 | * i40e_vsi_setup - Set up a VSI by a given type | |
6135 | * @pf: board private structure | |
6136 | * @type: VSI type | |
6137 | * @uplink_seid: the switch element to link to | |
6138 | * @param1: usage depends upon VSI type. For VF types, indicates VF id | |
6139 | * | |
6140 | * This allocates the sw VSI structure and its queue resources, then add a VSI | |
6141 | * to the identified VEB. | |
6142 | * | |
6143 | * Returns pointer to the successfully allocated and configure VSI sw struct on | |
6144 | * success, otherwise returns NULL on failure. | |
6145 | **/ | |
6146 | struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, | |
6147 | u16 uplink_seid, u32 param1) | |
6148 | { | |
6149 | struct i40e_vsi *vsi = NULL; | |
6150 | struct i40e_veb *veb = NULL; | |
6151 | int ret, i; | |
6152 | int v_idx; | |
6153 | ||
6154 | /* The requested uplink_seid must be either | |
6155 | * - the PF's port seid | |
6156 | * no VEB is needed because this is the PF | |
6157 | * or this is a Flow Director special case VSI | |
6158 | * - seid of an existing VEB | |
6159 | * - seid of a VSI that owns an existing VEB | |
6160 | * - seid of a VSI that doesn't own a VEB | |
6161 | * a new VEB is created and the VSI becomes the owner | |
6162 | * - seid of the PF VSI, which is what creates the first VEB | |
6163 | * this is a special case of the previous | |
6164 | * | |
6165 | * Find which uplink_seid we were given and create a new VEB if needed | |
6166 | */ | |
6167 | for (i = 0; i < I40E_MAX_VEB; i++) { | |
6168 | if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { | |
6169 | veb = pf->veb[i]; | |
6170 | break; | |
6171 | } | |
6172 | } | |
6173 | ||
6174 | if (!veb && uplink_seid != pf->mac_seid) { | |
6175 | ||
6176 | for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { | |
6177 | if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { | |
6178 | vsi = pf->vsi[i]; | |
6179 | break; | |
6180 | } | |
6181 | } | |
6182 | if (!vsi) { | |
6183 | dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", | |
6184 | uplink_seid); | |
6185 | return NULL; | |
6186 | } | |
6187 | ||
6188 | if (vsi->uplink_seid == pf->mac_seid) | |
6189 | veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, | |
6190 | vsi->tc_config.enabled_tc); | |
6191 | else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) | |
6192 | veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, | |
6193 | vsi->tc_config.enabled_tc); | |
6194 | ||
6195 | for (i = 0; i < I40E_MAX_VEB && !veb; i++) { | |
6196 | if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) | |
6197 | veb = pf->veb[i]; | |
6198 | } | |
6199 | if (!veb) { | |
6200 | dev_info(&pf->pdev->dev, "couldn't add VEB\n"); | |
6201 | return NULL; | |
6202 | } | |
6203 | ||
6204 | vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; | |
6205 | uplink_seid = veb->seid; | |
6206 | } | |
6207 | ||
6208 | /* get vsi sw struct */ | |
6209 | v_idx = i40e_vsi_mem_alloc(pf, type); | |
6210 | if (v_idx < 0) | |
6211 | goto err_alloc; | |
6212 | vsi = pf->vsi[v_idx]; | |
6213 | vsi->type = type; | |
6214 | vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); | |
6215 | ||
6216 | if (type == I40E_VSI_MAIN) | |
6217 | pf->lan_vsi = v_idx; | |
6218 | else if (type == I40E_VSI_SRIOV) | |
6219 | vsi->vf_id = param1; | |
6220 | /* assign it some queues */ | |
6221 | ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); | |
6222 | if (ret < 0) { | |
6223 | dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", | |
6224 | vsi->seid, ret); | |
6225 | goto err_vsi; | |
6226 | } | |
6227 | vsi->base_queue = ret; | |
6228 | ||
6229 | /* get a VSI from the hardware */ | |
6230 | vsi->uplink_seid = uplink_seid; | |
6231 | ret = i40e_add_vsi(vsi); | |
6232 | if (ret) | |
6233 | goto err_vsi; | |
6234 | ||
6235 | switch (vsi->type) { | |
6236 | /* setup the netdev if needed */ | |
6237 | case I40E_VSI_MAIN: | |
6238 | case I40E_VSI_VMDQ2: | |
6239 | ret = i40e_config_netdev(vsi); | |
6240 | if (ret) | |
6241 | goto err_netdev; | |
6242 | ret = register_netdev(vsi->netdev); | |
6243 | if (ret) | |
6244 | goto err_netdev; | |
6245 | vsi->netdev_registered = true; | |
6246 | netif_carrier_off(vsi->netdev); | |
6247 | /* fall through */ | |
6248 | ||
6249 | case I40E_VSI_FDIR: | |
6250 | /* set up vectors and rings if needed */ | |
6251 | ret = i40e_vsi_setup_vectors(vsi); | |
6252 | if (ret) | |
6253 | goto err_msix; | |
6254 | ||
6255 | ret = i40e_alloc_rings(vsi); | |
6256 | if (ret) | |
6257 | goto err_rings; | |
6258 | ||
6259 | /* map all of the rings to the q_vectors */ | |
6260 | i40e_vsi_map_rings_to_vectors(vsi); | |
6261 | ||
6262 | i40e_vsi_reset_stats(vsi); | |
6263 | break; | |
6264 | ||
6265 | default: | |
6266 | /* no netdev or rings for the other VSI types */ | |
6267 | break; | |
6268 | } | |
6269 | ||
6270 | return vsi; | |
6271 | ||
6272 | err_rings: | |
6273 | i40e_vsi_free_q_vectors(vsi); | |
6274 | err_msix: | |
6275 | if (vsi->netdev_registered) { | |
6276 | vsi->netdev_registered = false; | |
6277 | unregister_netdev(vsi->netdev); | |
6278 | free_netdev(vsi->netdev); | |
6279 | vsi->netdev = NULL; | |
6280 | } | |
6281 | err_netdev: | |
6282 | i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); | |
6283 | err_vsi: | |
6284 | i40e_vsi_clear(vsi); | |
6285 | err_alloc: | |
6286 | return NULL; | |
6287 | } | |
6288 | ||
6289 | /** | |
6290 | * i40e_veb_get_bw_info - Query VEB BW information | |
6291 | * @veb: the veb to query | |
6292 | * | |
6293 | * Query the Tx scheduler BW configuration data for given VEB | |
6294 | **/ | |
6295 | static int i40e_veb_get_bw_info(struct i40e_veb *veb) | |
6296 | { | |
6297 | struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; | |
6298 | struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; | |
6299 | struct i40e_pf *pf = veb->pf; | |
6300 | struct i40e_hw *hw = &pf->hw; | |
6301 | u32 tc_bw_max; | |
6302 | int ret = 0; | |
6303 | int i; | |
6304 | ||
6305 | ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, | |
6306 | &bw_data, NULL); | |
6307 | if (ret) { | |
6308 | dev_info(&pf->pdev->dev, | |
6309 | "query veb bw config failed, aq_err=%d\n", | |
6310 | hw->aq.asq_last_status); | |
6311 | goto out; | |
6312 | } | |
6313 | ||
6314 | ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, | |
6315 | &ets_data, NULL); | |
6316 | if (ret) { | |
6317 | dev_info(&pf->pdev->dev, | |
6318 | "query veb bw ets config failed, aq_err=%d\n", | |
6319 | hw->aq.asq_last_status); | |
6320 | goto out; | |
6321 | } | |
6322 | ||
6323 | veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); | |
6324 | veb->bw_max_quanta = ets_data.tc_bw_max; | |
6325 | veb->is_abs_credits = bw_data.absolute_credits_enable; | |
6326 | tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | | |
6327 | (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); | |
6328 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { | |
6329 | veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; | |
6330 | veb->bw_tc_limit_credits[i] = | |
6331 | le16_to_cpu(bw_data.tc_bw_limits[i]); | |
6332 | veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); | |
6333 | } | |
6334 | ||
6335 | out: | |
6336 | return ret; | |
6337 | } | |
6338 | ||
6339 | /** | |
6340 | * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF | |
6341 | * @pf: board private structure | |
6342 | * | |
6343 | * On error: returns error code (negative) | |
6344 | * On success: returns vsi index in PF (positive) | |
6345 | **/ | |
6346 | static int i40e_veb_mem_alloc(struct i40e_pf *pf) | |
6347 | { | |
6348 | int ret = -ENOENT; | |
6349 | struct i40e_veb *veb; | |
6350 | int i; | |
6351 | ||
6352 | /* Need to protect the allocation of switch elements at the PF level */ | |
6353 | mutex_lock(&pf->switch_mutex); | |
6354 | ||
6355 | /* VEB list may be fragmented if VEB creation/destruction has | |
6356 | * been happening. We can afford to do a quick scan to look | |
6357 | * for any free slots in the list. | |
6358 | * | |
6359 | * find next empty veb slot, looping back around if necessary | |
6360 | */ | |
6361 | i = 0; | |
6362 | while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) | |
6363 | i++; | |
6364 | if (i >= I40E_MAX_VEB) { | |
6365 | ret = -ENOMEM; | |
6366 | goto err_alloc_veb; /* out of VEB slots! */ | |
6367 | } | |
6368 | ||
6369 | veb = kzalloc(sizeof(*veb), GFP_KERNEL); | |
6370 | if (!veb) { | |
6371 | ret = -ENOMEM; | |
6372 | goto err_alloc_veb; | |
6373 | } | |
6374 | veb->pf = pf; | |
6375 | veb->idx = i; | |
6376 | veb->enabled_tc = 1; | |
6377 | ||
6378 | pf->veb[i] = veb; | |
6379 | ret = i; | |
6380 | err_alloc_veb: | |
6381 | mutex_unlock(&pf->switch_mutex); | |
6382 | return ret; | |
6383 | } | |
6384 | ||
6385 | /** | |
6386 | * i40e_switch_branch_release - Delete a branch of the switch tree | |
6387 | * @branch: where to start deleting | |
6388 | * | |
6389 | * This uses recursion to find the tips of the branch to be | |
6390 | * removed, deleting until we get back to and can delete this VEB. | |
6391 | **/ | |
6392 | static void i40e_switch_branch_release(struct i40e_veb *branch) | |
6393 | { | |
6394 | struct i40e_pf *pf = branch->pf; | |
6395 | u16 branch_seid = branch->seid; | |
6396 | u16 veb_idx = branch->idx; | |
6397 | int i; | |
6398 | ||
6399 | /* release any VEBs on this VEB - RECURSION */ | |
6400 | for (i = 0; i < I40E_MAX_VEB; i++) { | |
6401 | if (!pf->veb[i]) | |
6402 | continue; | |
6403 | if (pf->veb[i]->uplink_seid == branch->seid) | |
6404 | i40e_switch_branch_release(pf->veb[i]); | |
6405 | } | |
6406 | ||
6407 | /* Release the VSIs on this VEB, but not the owner VSI. | |
6408 | * | |
6409 | * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing | |
6410 | * the VEB itself, so don't use (*branch) after this loop. | |
6411 | */ | |
6412 | for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { | |
6413 | if (!pf->vsi[i]) | |
6414 | continue; | |
6415 | if (pf->vsi[i]->uplink_seid == branch_seid && | |
6416 | (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { | |
6417 | i40e_vsi_release(pf->vsi[i]); | |
6418 | } | |
6419 | } | |
6420 | ||
6421 | /* There's one corner case where the VEB might not have been | |
6422 | * removed, so double check it here and remove it if needed. | |
6423 | * This case happens if the veb was created from the debugfs | |
6424 | * commands and no VSIs were added to it. | |
6425 | */ | |
6426 | if (pf->veb[veb_idx]) | |
6427 | i40e_veb_release(pf->veb[veb_idx]); | |
6428 | } | |
6429 | ||
6430 | /** | |
6431 | * i40e_veb_clear - remove veb struct | |
6432 | * @veb: the veb to remove | |
6433 | **/ | |
6434 | static void i40e_veb_clear(struct i40e_veb *veb) | |
6435 | { | |
6436 | if (!veb) | |
6437 | return; | |
6438 | ||
6439 | if (veb->pf) { | |
6440 | struct i40e_pf *pf = veb->pf; | |
6441 | ||
6442 | mutex_lock(&pf->switch_mutex); | |
6443 | if (pf->veb[veb->idx] == veb) | |
6444 | pf->veb[veb->idx] = NULL; | |
6445 | mutex_unlock(&pf->switch_mutex); | |
6446 | } | |
6447 | ||
6448 | kfree(veb); | |
6449 | } | |
6450 | ||
6451 | /** | |
6452 | * i40e_veb_release - Delete a VEB and free its resources | |
6453 | * @veb: the VEB being removed | |
6454 | **/ | |
6455 | void i40e_veb_release(struct i40e_veb *veb) | |
6456 | { | |
6457 | struct i40e_vsi *vsi = NULL; | |
6458 | struct i40e_pf *pf; | |
6459 | int i, n = 0; | |
6460 | ||
6461 | pf = veb->pf; | |
6462 | ||
6463 | /* find the remaining VSI and check for extras */ | |
6464 | for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { | |
6465 | if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { | |
6466 | n++; | |
6467 | vsi = pf->vsi[i]; | |
6468 | } | |
6469 | } | |
6470 | if (n != 1) { | |
6471 | dev_info(&pf->pdev->dev, | |
6472 | "can't remove VEB %d with %d VSIs left\n", | |
6473 | veb->seid, n); | |
6474 | return; | |
6475 | } | |
6476 | ||
6477 | /* move the remaining VSI to uplink veb */ | |
6478 | vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; | |
6479 | if (veb->uplink_seid) { | |
6480 | vsi->uplink_seid = veb->uplink_seid; | |
6481 | if (veb->uplink_seid == pf->mac_seid) | |
6482 | vsi->veb_idx = I40E_NO_VEB; | |
6483 | else | |
6484 | vsi->veb_idx = veb->veb_idx; | |
6485 | } else { | |
6486 | /* floating VEB */ | |
6487 | vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; | |
6488 | vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; | |
6489 | } | |
6490 | ||
6491 | i40e_aq_delete_element(&pf->hw, veb->seid, NULL); | |
6492 | i40e_veb_clear(veb); | |
6493 | ||
6494 | return; | |
6495 | } | |
6496 | ||
6497 | /** | |
6498 | * i40e_add_veb - create the VEB in the switch | |
6499 | * @veb: the VEB to be instantiated | |
6500 | * @vsi: the controlling VSI | |
6501 | **/ | |
6502 | static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) | |
6503 | { | |
6504 | bool is_default = (vsi->idx == vsi->back->lan_vsi); | |
6505 | int ret; | |
6506 | ||
6507 | /* get a VEB from the hardware */ | |
6508 | ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, | |
6509 | veb->enabled_tc, is_default, &veb->seid, NULL); | |
6510 | if (ret) { | |
6511 | dev_info(&veb->pf->pdev->dev, | |
6512 | "couldn't add VEB, err %d, aq_err %d\n", | |
6513 | ret, veb->pf->hw.aq.asq_last_status); | |
6514 | return -EPERM; | |
6515 | } | |
6516 | ||
6517 | /* get statistics counter */ | |
6518 | ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL, | |
6519 | &veb->stats_idx, NULL, NULL, NULL); | |
6520 | if (ret) { | |
6521 | dev_info(&veb->pf->pdev->dev, | |
6522 | "couldn't get VEB statistics idx, err %d, aq_err %d\n", | |
6523 | ret, veb->pf->hw.aq.asq_last_status); | |
6524 | return -EPERM; | |
6525 | } | |
6526 | ret = i40e_veb_get_bw_info(veb); | |
6527 | if (ret) { | |
6528 | dev_info(&veb->pf->pdev->dev, | |
6529 | "couldn't get VEB bw info, err %d, aq_err %d\n", | |
6530 | ret, veb->pf->hw.aq.asq_last_status); | |
6531 | i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL); | |
6532 | return -ENOENT; | |
6533 | } | |
6534 | ||
6535 | vsi->uplink_seid = veb->seid; | |
6536 | vsi->veb_idx = veb->idx; | |
6537 | vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; | |
6538 | ||
6539 | return 0; | |
6540 | } | |
6541 | ||
6542 | /** | |
6543 | * i40e_veb_setup - Set up a VEB | |
6544 | * @pf: board private structure | |
6545 | * @flags: VEB setup flags | |
6546 | * @uplink_seid: the switch element to link to | |
6547 | * @vsi_seid: the initial VSI seid | |
6548 | * @enabled_tc: Enabled TC bit-map | |
6549 | * | |
6550 | * This allocates the sw VEB structure and links it into the switch | |
6551 | * It is possible and legal for this to be a duplicate of an already | |
6552 | * existing VEB. It is also possible for both uplink and vsi seids | |
6553 | * to be zero, in order to create a floating VEB. | |
6554 | * | |
6555 | * Returns pointer to the successfully allocated VEB sw struct on | |
6556 | * success, otherwise returns NULL on failure. | |
6557 | **/ | |
6558 | struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, | |
6559 | u16 uplink_seid, u16 vsi_seid, | |
6560 | u8 enabled_tc) | |
6561 | { | |
6562 | struct i40e_veb *veb, *uplink_veb = NULL; | |
6563 | int vsi_idx, veb_idx; | |
6564 | int ret; | |
6565 | ||
6566 | /* if one seid is 0, the other must be 0 to create a floating relay */ | |
6567 | if ((uplink_seid == 0 || vsi_seid == 0) && | |
6568 | (uplink_seid + vsi_seid != 0)) { | |
6569 | dev_info(&pf->pdev->dev, | |
6570 | "one, not both seid's are 0: uplink=%d vsi=%d\n", | |
6571 | uplink_seid, vsi_seid); | |
6572 | return NULL; | |
6573 | } | |
6574 | ||
6575 | /* make sure there is such a vsi and uplink */ | |
6576 | for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++) | |
6577 | if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) | |
6578 | break; | |
6579 | if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) { | |
6580 | dev_info(&pf->pdev->dev, "vsi seid %d not found\n", | |
6581 | vsi_seid); | |
6582 | return NULL; | |
6583 | } | |
6584 | ||
6585 | if (uplink_seid && uplink_seid != pf->mac_seid) { | |
6586 | for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { | |
6587 | if (pf->veb[veb_idx] && | |
6588 | pf->veb[veb_idx]->seid == uplink_seid) { | |
6589 | uplink_veb = pf->veb[veb_idx]; | |
6590 | break; | |
6591 | } | |
6592 | } | |
6593 | if (!uplink_veb) { | |
6594 | dev_info(&pf->pdev->dev, | |
6595 | "uplink seid %d not found\n", uplink_seid); | |
6596 | return NULL; | |
6597 | } | |
6598 | } | |
6599 | ||
6600 | /* get veb sw struct */ | |
6601 | veb_idx = i40e_veb_mem_alloc(pf); | |
6602 | if (veb_idx < 0) | |
6603 | goto err_alloc; | |
6604 | veb = pf->veb[veb_idx]; | |
6605 | veb->flags = flags; | |
6606 | veb->uplink_seid = uplink_seid; | |
6607 | veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); | |
6608 | veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); | |
6609 | ||
6610 | /* create the VEB in the switch */ | |
6611 | ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); | |
6612 | if (ret) | |
6613 | goto err_veb; | |
6614 | ||
6615 | return veb; | |
6616 | ||
6617 | err_veb: | |
6618 | i40e_veb_clear(veb); | |
6619 | err_alloc: | |
6620 | return NULL; | |
6621 | } | |
6622 | ||
6623 | /** | |
6624 | * i40e_setup_pf_switch_element - set pf vars based on switch type | |
6625 | * @pf: board private structure | |
6626 | * @ele: element we are building info from | |
6627 | * @num_reported: total number of elements | |
6628 | * @printconfig: should we print the contents | |
6629 | * | |
6630 | * helper function to assist in extracting a few useful SEID values. | |
6631 | **/ | |
6632 | static void i40e_setup_pf_switch_element(struct i40e_pf *pf, | |
6633 | struct i40e_aqc_switch_config_element_resp *ele, | |
6634 | u16 num_reported, bool printconfig) | |
6635 | { | |
6636 | u16 downlink_seid = le16_to_cpu(ele->downlink_seid); | |
6637 | u16 uplink_seid = le16_to_cpu(ele->uplink_seid); | |
6638 | u8 element_type = ele->element_type; | |
6639 | u16 seid = le16_to_cpu(ele->seid); | |
6640 | ||
6641 | if (printconfig) | |
6642 | dev_info(&pf->pdev->dev, | |
6643 | "type=%d seid=%d uplink=%d downlink=%d\n", | |
6644 | element_type, seid, uplink_seid, downlink_seid); | |
6645 | ||
6646 | switch (element_type) { | |
6647 | case I40E_SWITCH_ELEMENT_TYPE_MAC: | |
6648 | pf->mac_seid = seid; | |
6649 | break; | |
6650 | case I40E_SWITCH_ELEMENT_TYPE_VEB: | |
6651 | /* Main VEB? */ | |
6652 | if (uplink_seid != pf->mac_seid) | |
6653 | break; | |
6654 | if (pf->lan_veb == I40E_NO_VEB) { | |
6655 | int v; | |
6656 | ||
6657 | /* find existing or else empty VEB */ | |
6658 | for (v = 0; v < I40E_MAX_VEB; v++) { | |
6659 | if (pf->veb[v] && (pf->veb[v]->seid == seid)) { | |
6660 | pf->lan_veb = v; | |
6661 | break; | |
6662 | } | |
6663 | } | |
6664 | if (pf->lan_veb == I40E_NO_VEB) { | |
6665 | v = i40e_veb_mem_alloc(pf); | |
6666 | if (v < 0) | |
6667 | break; | |
6668 | pf->lan_veb = v; | |
6669 | } | |
6670 | } | |
6671 | ||
6672 | pf->veb[pf->lan_veb]->seid = seid; | |
6673 | pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; | |
6674 | pf->veb[pf->lan_veb]->pf = pf; | |
6675 | pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; | |
6676 | break; | |
6677 | case I40E_SWITCH_ELEMENT_TYPE_VSI: | |
6678 | if (num_reported != 1) | |
6679 | break; | |
6680 | /* This is immediately after a reset so we can assume this is | |
6681 | * the PF's VSI | |
6682 | */ | |
6683 | pf->mac_seid = uplink_seid; | |
6684 | pf->pf_seid = downlink_seid; | |
6685 | pf->main_vsi_seid = seid; | |
6686 | if (printconfig) | |
6687 | dev_info(&pf->pdev->dev, | |
6688 | "pf_seid=%d main_vsi_seid=%d\n", | |
6689 | pf->pf_seid, pf->main_vsi_seid); | |
6690 | break; | |
6691 | case I40E_SWITCH_ELEMENT_TYPE_PF: | |
6692 | case I40E_SWITCH_ELEMENT_TYPE_VF: | |
6693 | case I40E_SWITCH_ELEMENT_TYPE_EMP: | |
6694 | case I40E_SWITCH_ELEMENT_TYPE_BMC: | |
6695 | case I40E_SWITCH_ELEMENT_TYPE_PE: | |
6696 | case I40E_SWITCH_ELEMENT_TYPE_PA: | |
6697 | /* ignore these for now */ | |
6698 | break; | |
6699 | default: | |
6700 | dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", | |
6701 | element_type, seid); | |
6702 | break; | |
6703 | } | |
6704 | } | |
6705 | ||
6706 | /** | |
6707 | * i40e_fetch_switch_configuration - Get switch config from firmware | |
6708 | * @pf: board private structure | |
6709 | * @printconfig: should we print the contents | |
6710 | * | |
6711 | * Get the current switch configuration from the device and | |
6712 | * extract a few useful SEID values. | |
6713 | **/ | |
6714 | int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) | |
6715 | { | |
6716 | struct i40e_aqc_get_switch_config_resp *sw_config; | |
6717 | u16 next_seid = 0; | |
6718 | int ret = 0; | |
6719 | u8 *aq_buf; | |
6720 | int i; | |
6721 | ||
6722 | aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); | |
6723 | if (!aq_buf) | |
6724 | return -ENOMEM; | |
6725 | ||
6726 | sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; | |
6727 | do { | |
6728 | u16 num_reported, num_total; | |
6729 | ||
6730 | ret = i40e_aq_get_switch_config(&pf->hw, sw_config, | |
6731 | I40E_AQ_LARGE_BUF, | |
6732 | &next_seid, NULL); | |
6733 | if (ret) { | |
6734 | dev_info(&pf->pdev->dev, | |
6735 | "get switch config failed %d aq_err=%x\n", | |
6736 | ret, pf->hw.aq.asq_last_status); | |
6737 | kfree(aq_buf); | |
6738 | return -ENOENT; | |
6739 | } | |
6740 | ||
6741 | num_reported = le16_to_cpu(sw_config->header.num_reported); | |
6742 | num_total = le16_to_cpu(sw_config->header.num_total); | |
6743 | ||
6744 | if (printconfig) | |
6745 | dev_info(&pf->pdev->dev, | |
6746 | "header: %d reported %d total\n", | |
6747 | num_reported, num_total); | |
6748 | ||
6749 | if (num_reported) { | |
6750 | int sz = sizeof(*sw_config) * num_reported; | |
6751 | ||
6752 | kfree(pf->sw_config); | |
6753 | pf->sw_config = kzalloc(sz, GFP_KERNEL); | |
6754 | if (pf->sw_config) | |
6755 | memcpy(pf->sw_config, sw_config, sz); | |
6756 | } | |
6757 | ||
6758 | for (i = 0; i < num_reported; i++) { | |
6759 | struct i40e_aqc_switch_config_element_resp *ele = | |
6760 | &sw_config->element[i]; | |
6761 | ||
6762 | i40e_setup_pf_switch_element(pf, ele, num_reported, | |
6763 | printconfig); | |
6764 | } | |
6765 | } while (next_seid != 0); | |
6766 | ||
6767 | kfree(aq_buf); | |
6768 | return ret; | |
6769 | } | |
6770 | ||
6771 | /** | |
6772 | * i40e_setup_pf_switch - Setup the HW switch on startup or after reset | |
6773 | * @pf: board private structure | |
6774 | * | |
6775 | * Returns 0 on success, negative value on failure | |
6776 | **/ | |
6777 | static int i40e_setup_pf_switch(struct i40e_pf *pf) | |
6778 | { | |
6779 | int ret; | |
6780 | ||
6781 | /* find out what's out there already */ | |
6782 | ret = i40e_fetch_switch_configuration(pf, false); | |
6783 | if (ret) { | |
6784 | dev_info(&pf->pdev->dev, | |
6785 | "couldn't fetch switch config, err %d, aq_err %d\n", | |
6786 | ret, pf->hw.aq.asq_last_status); | |
6787 | return ret; | |
6788 | } | |
6789 | i40e_pf_reset_stats(pf); | |
6790 | ||
6791 | /* fdir VSI must happen first to be sure it gets queue 0, but only | |
6792 | * if there is enough room for the fdir VSI | |
6793 | */ | |
6794 | if (pf->num_lan_qps > 1) | |
6795 | i40e_fdir_setup(pf); | |
6796 | ||
6797 | /* first time setup */ | |
6798 | if (pf->lan_vsi == I40E_NO_VSI) { | |
6799 | struct i40e_vsi *vsi = NULL; | |
6800 | u16 uplink_seid; | |
6801 | ||
6802 | /* Set up the PF VSI associated with the PF's main VSI | |
6803 | * that is already in the HW switch | |
6804 | */ | |
6805 | if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) | |
6806 | uplink_seid = pf->veb[pf->lan_veb]->seid; | |
6807 | else | |
6808 | uplink_seid = pf->mac_seid; | |
6809 | ||
6810 | vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); | |
6811 | if (!vsi) { | |
6812 | dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); | |
6813 | i40e_fdir_teardown(pf); | |
6814 | return -EAGAIN; | |
6815 | } | |
6816 | /* accommodate kcompat by copying the main VSI queue count | |
6817 | * into the pf, since this newer code pushes the pf queue | |
6818 | * info down a level into a VSI | |
6819 | */ | |
6820 | pf->num_rx_queues = vsi->alloc_queue_pairs; | |
6821 | pf->num_tx_queues = vsi->alloc_queue_pairs; | |
6822 | } else { | |
6823 | /* force a reset of TC and queue layout configurations */ | |
6824 | u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; | |
6825 | pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; | |
6826 | pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; | |
6827 | i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); | |
6828 | } | |
6829 | i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); | |
6830 | ||
6831 | /* Setup static PF queue filter control settings */ | |
6832 | ret = i40e_setup_pf_filter_control(pf); | |
6833 | if (ret) { | |
6834 | dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", | |
6835 | ret); | |
6836 | /* Failure here should not stop continuing other steps */ | |
6837 | } | |
6838 | ||
6839 | /* enable RSS in the HW, even for only one queue, as the stack can use | |
6840 | * the hash | |
6841 | */ | |
6842 | if ((pf->flags & I40E_FLAG_RSS_ENABLED)) | |
6843 | i40e_config_rss(pf); | |
6844 | ||
6845 | /* fill in link information and enable LSE reporting */ | |
6846 | i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); | |
6847 | i40e_link_event(pf); | |
6848 | ||
6849 | /* Initialize user-specifics link properties */ | |
6850 | pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & | |
6851 | I40E_AQ_AN_COMPLETED) ? true : false); | |
6852 | pf->hw.fc.requested_mode = I40E_FC_DEFAULT; | |
6853 | if (pf->hw.phy.link_info.an_info & | |
6854 | (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX)) | |
6855 | pf->hw.fc.current_mode = I40E_FC_FULL; | |
6856 | else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) | |
6857 | pf->hw.fc.current_mode = I40E_FC_TX_PAUSE; | |
6858 | else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) | |
6859 | pf->hw.fc.current_mode = I40E_FC_RX_PAUSE; | |
6860 | else | |
6861 | pf->hw.fc.current_mode = I40E_FC_DEFAULT; | |
6862 | ||
6863 | return ret; | |
6864 | } | |
6865 | ||
6866 | /** | |
6867 | * i40e_set_rss_size - helper to set rss_size | |
6868 | * @pf: board private structure | |
6869 | * @queues_left: how many queues | |
6870 | */ | |
6871 | static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left) | |
6872 | { | |
6873 | int num_tc0; | |
6874 | ||
6875 | num_tc0 = min_t(int, queues_left, pf->rss_size_max); | |
6876 | num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id())); | |
6877 | num_tc0 = rounddown_pow_of_two(num_tc0); | |
6878 | ||
6879 | return num_tc0; | |
6880 | } | |
6881 | ||
6882 | /** | |
6883 | * i40e_determine_queue_usage - Work out queue distribution | |
6884 | * @pf: board private structure | |
6885 | **/ | |
6886 | static void i40e_determine_queue_usage(struct i40e_pf *pf) | |
6887 | { | |
6888 | int accum_tc_size; | |
6889 | int queues_left; | |
6890 | ||
6891 | pf->num_lan_qps = 0; | |
6892 | pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps); | |
6893 | accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps; | |
6894 | ||
6895 | /* Find the max queues to be put into basic use. We'll always be | |
6896 | * using TC0, whether or not DCB is running, and TC0 will get the | |
6897 | * big RSS set. | |
6898 | */ | |
6899 | queues_left = pf->hw.func_caps.num_tx_qp; | |
6900 | ||
6901 | if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) && | |
6902 | (pf->flags & I40E_FLAG_MQ_ENABLED)) || | |
6903 | !(pf->flags & (I40E_FLAG_RSS_ENABLED | | |
6904 | I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) || | |
6905 | (queues_left == 1)) { | |
6906 | ||
6907 | /* one qp for PF, no queues for anything else */ | |
6908 | queues_left = 0; | |
6909 | pf->rss_size = pf->num_lan_qps = 1; | |
6910 | ||
6911 | /* make sure all the fancies are disabled */ | |
6912 | pf->flags &= ~(I40E_FLAG_RSS_ENABLED | | |
6913 | I40E_FLAG_MQ_ENABLED | | |
6914 | I40E_FLAG_FDIR_ENABLED | | |
6915 | I40E_FLAG_FDIR_ATR_ENABLED | | |
6916 | I40E_FLAG_DCB_ENABLED | | |
6917 | I40E_FLAG_SRIOV_ENABLED | | |
6918 | I40E_FLAG_VMDQ_ENABLED); | |
6919 | ||
6920 | } else if (pf->flags & I40E_FLAG_RSS_ENABLED && | |
6921 | !(pf->flags & I40E_FLAG_FDIR_ENABLED) && | |
6922 | !(pf->flags & I40E_FLAG_DCB_ENABLED)) { | |
6923 | ||
6924 | pf->rss_size = i40e_set_rss_size(pf, queues_left); | |
6925 | ||
6926 | queues_left -= pf->rss_size; | |
6927 | pf->num_lan_qps = pf->rss_size; | |
6928 | ||
6929 | } else if (pf->flags & I40E_FLAG_RSS_ENABLED && | |
6930 | !(pf->flags & I40E_FLAG_FDIR_ENABLED) && | |
6931 | (pf->flags & I40E_FLAG_DCB_ENABLED)) { | |
6932 | ||
6933 | /* save num_tc_qps queues for TCs 1 thru 7 and the rest | |
6934 | * are set up for RSS in TC0 | |
6935 | */ | |
6936 | queues_left -= accum_tc_size; | |
6937 | ||
6938 | pf->rss_size = i40e_set_rss_size(pf, queues_left); | |
6939 | ||
6940 | queues_left -= pf->rss_size; | |
6941 | if (queues_left < 0) { | |
6942 | dev_info(&pf->pdev->dev, "not enough queues for DCB\n"); | |
6943 | return; | |
6944 | } | |
6945 | ||
6946 | pf->num_lan_qps = pf->rss_size + accum_tc_size; | |
6947 | ||
6948 | } else if (pf->flags & I40E_FLAG_RSS_ENABLED && | |
6949 | (pf->flags & I40E_FLAG_FDIR_ENABLED) && | |
6950 | !(pf->flags & I40E_FLAG_DCB_ENABLED)) { | |
6951 | ||
6952 | queues_left -= 1; /* save 1 queue for FD */ | |
6953 | ||
6954 | pf->rss_size = i40e_set_rss_size(pf, queues_left); | |
6955 | ||
6956 | queues_left -= pf->rss_size; | |
6957 | if (queues_left < 0) { | |
6958 | dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n"); | |
6959 | return; | |
6960 | } | |
6961 | ||
6962 | pf->num_lan_qps = pf->rss_size; | |
6963 | ||
6964 | } else if (pf->flags & I40E_FLAG_RSS_ENABLED && | |
6965 | (pf->flags & I40E_FLAG_FDIR_ENABLED) && | |
6966 | (pf->flags & I40E_FLAG_DCB_ENABLED)) { | |
6967 | ||
6968 | /* save 1 queue for TCs 1 thru 7, | |
6969 | * 1 queue for flow director, | |
6970 | * and the rest are set up for RSS in TC0 | |
6971 | */ | |
6972 | queues_left -= 1; | |
6973 | queues_left -= accum_tc_size; | |
6974 | ||
6975 | pf->rss_size = i40e_set_rss_size(pf, queues_left); | |
6976 | queues_left -= pf->rss_size; | |
6977 | if (queues_left < 0) { | |
6978 | dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n"); | |
6979 | return; | |
6980 | } | |
6981 | ||
6982 | pf->num_lan_qps = pf->rss_size + accum_tc_size; | |
6983 | ||
6984 | } else { | |
6985 | dev_info(&pf->pdev->dev, | |
6986 | "Invalid configuration, flags=0x%08llx\n", pf->flags); | |
6987 | return; | |
6988 | } | |
6989 | ||
6990 | if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && | |
6991 | pf->num_vf_qps && pf->num_req_vfs && queues_left) { | |
6992 | pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left / | |
6993 | pf->num_vf_qps)); | |
6994 | queues_left -= (pf->num_req_vfs * pf->num_vf_qps); | |
6995 | } | |
6996 | ||
6997 | if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && | |
6998 | pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { | |
6999 | pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, | |
7000 | (queues_left / pf->num_vmdq_qps)); | |
7001 | queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); | |
7002 | } | |
7003 | ||
7004 | return; | |
7005 | } | |
7006 | ||
7007 | /** | |
7008 | * i40e_setup_pf_filter_control - Setup PF static filter control | |
7009 | * @pf: PF to be setup | |
7010 | * | |
7011 | * i40e_setup_pf_filter_control sets up a pf's initial filter control | |
7012 | * settings. If PE/FCoE are enabled then it will also set the per PF | |
7013 | * based filter sizes required for them. It also enables Flow director, | |
7014 | * ethertype and macvlan type filter settings for the pf. | |
7015 | * | |
7016 | * Returns 0 on success, negative on failure | |
7017 | **/ | |
7018 | static int i40e_setup_pf_filter_control(struct i40e_pf *pf) | |
7019 | { | |
7020 | struct i40e_filter_control_settings *settings = &pf->filter_settings; | |
7021 | ||
7022 | settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; | |
7023 | ||
7024 | /* Flow Director is enabled */ | |
7025 | if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED)) | |
7026 | settings->enable_fdir = true; | |
7027 | ||
7028 | /* Ethtype and MACVLAN filters enabled for PF */ | |
7029 | settings->enable_ethtype = true; | |
7030 | settings->enable_macvlan = true; | |
7031 | ||
7032 | if (i40e_set_filter_control(&pf->hw, settings)) | |
7033 | return -ENOENT; | |
7034 | ||
7035 | return 0; | |
7036 | } | |
7037 | ||
7038 | /** | |
7039 | * i40e_probe - Device initialization routine | |
7040 | * @pdev: PCI device information struct | |
7041 | * @ent: entry in i40e_pci_tbl | |
7042 | * | |
7043 | * i40e_probe initializes a pf identified by a pci_dev structure. | |
7044 | * The OS initialization, configuring of the pf private structure, | |
7045 | * and a hardware reset occur. | |
7046 | * | |
7047 | * Returns 0 on success, negative on failure | |
7048 | **/ | |
7049 | static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
7050 | { | |
7051 | struct i40e_driver_version dv; | |
7052 | struct i40e_pf *pf; | |
7053 | struct i40e_hw *hw; | |
7054 | int err = 0; | |
7055 | u32 len; | |
7056 | ||
7057 | err = pci_enable_device_mem(pdev); | |
7058 | if (err) | |
7059 | return err; | |
7060 | ||
7061 | /* set up for high or low dma */ | |
7062 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { | |
7063 | /* coherent mask for the same size will always succeed if | |
7064 | * dma_set_mask does | |
7065 | */ | |
7066 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); | |
7067 | } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { | |
7068 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | |
7069 | } else { | |
7070 | dev_err(&pdev->dev, "DMA configuration failed: %d\n", err); | |
7071 | err = -EIO; | |
7072 | goto err_dma; | |
7073 | } | |
7074 | ||
7075 | /* set up pci connections */ | |
7076 | err = pci_request_selected_regions(pdev, pci_select_bars(pdev, | |
7077 | IORESOURCE_MEM), i40e_driver_name); | |
7078 | if (err) { | |
7079 | dev_info(&pdev->dev, | |
7080 | "pci_request_selected_regions failed %d\n", err); | |
7081 | goto err_pci_reg; | |
7082 | } | |
7083 | ||
7084 | pci_enable_pcie_error_reporting(pdev); | |
7085 | pci_set_master(pdev); | |
7086 | ||
7087 | /* Now that we have a PCI connection, we need to do the | |
7088 | * low level device setup. This is primarily setting up | |
7089 | * the Admin Queue structures and then querying for the | |
7090 | * device's current profile information. | |
7091 | */ | |
7092 | pf = kzalloc(sizeof(*pf), GFP_KERNEL); | |
7093 | if (!pf) { | |
7094 | err = -ENOMEM; | |
7095 | goto err_pf_alloc; | |
7096 | } | |
7097 | pf->next_vsi = 0; | |
7098 | pf->pdev = pdev; | |
7099 | set_bit(__I40E_DOWN, &pf->state); | |
7100 | ||
7101 | hw = &pf->hw; | |
7102 | hw->back = pf; | |
7103 | hw->hw_addr = ioremap(pci_resource_start(pdev, 0), | |
7104 | pci_resource_len(pdev, 0)); | |
7105 | if (!hw->hw_addr) { | |
7106 | err = -EIO; | |
7107 | dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", | |
7108 | (unsigned int)pci_resource_start(pdev, 0), | |
7109 | (unsigned int)pci_resource_len(pdev, 0), err); | |
7110 | goto err_ioremap; | |
7111 | } | |
7112 | hw->vendor_id = pdev->vendor; | |
7113 | hw->device_id = pdev->device; | |
7114 | pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); | |
7115 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | |
7116 | hw->subsystem_device_id = pdev->subsystem_device; | |
7117 | hw->bus.device = PCI_SLOT(pdev->devfn); | |
7118 | hw->bus.func = PCI_FUNC(pdev->devfn); | |
7119 | ||
7120 | /* Reset here to make sure all is clean and to define PF 'n' */ | |
7121 | err = i40e_pf_reset(hw); | |
7122 | if (err) { | |
7123 | dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); | |
7124 | goto err_pf_reset; | |
7125 | } | |
7126 | pf->pfr_count++; | |
7127 | ||
7128 | hw->aq.num_arq_entries = I40E_AQ_LEN; | |
7129 | hw->aq.num_asq_entries = I40E_AQ_LEN; | |
7130 | hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; | |
7131 | hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; | |
7132 | pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; | |
7133 | snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1, | |
7134 | "%s-pf%d:misc", | |
7135 | dev_driver_string(&pf->pdev->dev), pf->hw.pf_id); | |
7136 | ||
7137 | err = i40e_init_shared_code(hw); | |
7138 | if (err) { | |
7139 | dev_info(&pdev->dev, "init_shared_code failed: %d\n", err); | |
7140 | goto err_pf_reset; | |
7141 | } | |
7142 | ||
7143 | err = i40e_init_adminq(hw); | |
7144 | dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); | |
fe310704 AS |
7145 | if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK) |
7146 | >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) { | |
7147 | dev_info(&pdev->dev, | |
7148 | "warning: NVM version not supported, supported version: %02x.%02x\n", | |
7149 | I40E_CURRENT_NVM_VERSION_HI, | |
7150 | I40E_CURRENT_NVM_VERSION_LO); | |
7151 | } | |
41c445ff JB |
7152 | if (err) { |
7153 | dev_info(&pdev->dev, | |
7154 | "init_adminq failed: %d expecting API %02x.%02x\n", | |
7155 | err, | |
7156 | I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR); | |
7157 | goto err_pf_reset; | |
7158 | } | |
7159 | ||
7160 | err = i40e_get_capabilities(pf); | |
7161 | if (err) | |
7162 | goto err_adminq_setup; | |
7163 | ||
7164 | err = i40e_sw_init(pf); | |
7165 | if (err) { | |
7166 | dev_info(&pdev->dev, "sw_init failed: %d\n", err); | |
7167 | goto err_sw_init; | |
7168 | } | |
7169 | ||
7170 | err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, | |
7171 | hw->func_caps.num_rx_qp, | |
7172 | pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); | |
7173 | if (err) { | |
7174 | dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); | |
7175 | goto err_init_lan_hmc; | |
7176 | } | |
7177 | ||
7178 | err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); | |
7179 | if (err) { | |
7180 | dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); | |
7181 | err = -ENOENT; | |
7182 | goto err_configure_lan_hmc; | |
7183 | } | |
7184 | ||
7185 | i40e_get_mac_addr(hw, hw->mac.addr); | |
7186 | if (i40e_validate_mac_addr(hw->mac.addr)) { | |
7187 | dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); | |
7188 | err = -EIO; | |
7189 | goto err_mac_addr; | |
7190 | } | |
7191 | dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); | |
7192 | memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN); | |
7193 | ||
7194 | pci_set_drvdata(pdev, pf); | |
7195 | pci_save_state(pdev); | |
7196 | ||
7197 | /* set up periodic task facility */ | |
7198 | setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); | |
7199 | pf->service_timer_period = HZ; | |
7200 | ||
7201 | INIT_WORK(&pf->service_task, i40e_service_task); | |
7202 | clear_bit(__I40E_SERVICE_SCHED, &pf->state); | |
7203 | pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; | |
7204 | pf->link_check_timeout = jiffies; | |
7205 | ||
7206 | /* set up the main switch operations */ | |
7207 | i40e_determine_queue_usage(pf); | |
7208 | i40e_init_interrupt_scheme(pf); | |
7209 | ||
7210 | /* Set up the *vsi struct based on the number of VSIs in the HW, | |
7211 | * and set up our local tracking of the MAIN PF vsi. | |
7212 | */ | |
7213 | len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis; | |
7214 | pf->vsi = kzalloc(len, GFP_KERNEL); | |
ed87ac09 WY |
7215 | if (!pf->vsi) { |
7216 | err = -ENOMEM; | |
41c445ff | 7217 | goto err_switch_setup; |
ed87ac09 | 7218 | } |
41c445ff JB |
7219 | |
7220 | err = i40e_setup_pf_switch(pf); | |
7221 | if (err) { | |
7222 | dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); | |
7223 | goto err_vsis; | |
7224 | } | |
7225 | ||
7226 | /* The main driver is (mostly) up and happy. We need to set this state | |
7227 | * before setting up the misc vector or we get a race and the vector | |
7228 | * ends up disabled forever. | |
7229 | */ | |
7230 | clear_bit(__I40E_DOWN, &pf->state); | |
7231 | ||
7232 | /* In case of MSIX we are going to setup the misc vector right here | |
7233 | * to handle admin queue events etc. In case of legacy and MSI | |
7234 | * the misc functionality and queue processing is combined in | |
7235 | * the same vector and that gets setup at open. | |
7236 | */ | |
7237 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | |
7238 | err = i40e_setup_misc_vector(pf); | |
7239 | if (err) { | |
7240 | dev_info(&pdev->dev, | |
7241 | "setup of misc vector failed: %d\n", err); | |
7242 | goto err_vsis; | |
7243 | } | |
7244 | } | |
7245 | ||
7246 | /* prep for VF support */ | |
7247 | if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && | |
7248 | (pf->flags & I40E_FLAG_MSIX_ENABLED)) { | |
7249 | u32 val; | |
7250 | ||
7251 | /* disable link interrupts for VFs */ | |
7252 | val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); | |
7253 | val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; | |
7254 | wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); | |
7255 | i40e_flush(hw); | |
7256 | } | |
7257 | ||
7258 | i40e_dbg_pf_init(pf); | |
7259 | ||
7260 | /* tell the firmware that we're starting */ | |
7261 | dv.major_version = DRV_VERSION_MAJOR; | |
7262 | dv.minor_version = DRV_VERSION_MINOR; | |
7263 | dv.build_version = DRV_VERSION_BUILD; | |
7264 | dv.subbuild_version = 0; | |
7265 | i40e_aq_send_driver_version(&pf->hw, &dv, NULL); | |
7266 | ||
7267 | /* since everything's happy, start the service_task timer */ | |
7268 | mod_timer(&pf->service_timer, | |
7269 | round_jiffies(jiffies + pf->service_timer_period)); | |
7270 | ||
7271 | return 0; | |
7272 | ||
7273 | /* Unwind what we've done if something failed in the setup */ | |
7274 | err_vsis: | |
7275 | set_bit(__I40E_DOWN, &pf->state); | |
7276 | err_switch_setup: | |
7277 | i40e_clear_interrupt_scheme(pf); | |
7278 | kfree(pf->vsi); | |
7279 | del_timer_sync(&pf->service_timer); | |
7280 | err_mac_addr: | |
7281 | err_configure_lan_hmc: | |
7282 | (void)i40e_shutdown_lan_hmc(hw); | |
7283 | err_init_lan_hmc: | |
7284 | kfree(pf->qp_pile); | |
7285 | kfree(pf->irq_pile); | |
7286 | err_sw_init: | |
7287 | err_adminq_setup: | |
7288 | (void)i40e_shutdown_adminq(hw); | |
7289 | err_pf_reset: | |
7290 | iounmap(hw->hw_addr); | |
7291 | err_ioremap: | |
7292 | kfree(pf); | |
7293 | err_pf_alloc: | |
7294 | pci_disable_pcie_error_reporting(pdev); | |
7295 | pci_release_selected_regions(pdev, | |
7296 | pci_select_bars(pdev, IORESOURCE_MEM)); | |
7297 | err_pci_reg: | |
7298 | err_dma: | |
7299 | pci_disable_device(pdev); | |
7300 | return err; | |
7301 | } | |
7302 | ||
7303 | /** | |
7304 | * i40e_remove - Device removal routine | |
7305 | * @pdev: PCI device information struct | |
7306 | * | |
7307 | * i40e_remove is called by the PCI subsystem to alert the driver | |
7308 | * that is should release a PCI device. This could be caused by a | |
7309 | * Hot-Plug event, or because the driver is going to be removed from | |
7310 | * memory. | |
7311 | **/ | |
7312 | static void i40e_remove(struct pci_dev *pdev) | |
7313 | { | |
7314 | struct i40e_pf *pf = pci_get_drvdata(pdev); | |
7315 | i40e_status ret_code; | |
7316 | u32 reg; | |
7317 | int i; | |
7318 | ||
7319 | i40e_dbg_pf_exit(pf); | |
7320 | ||
7321 | if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { | |
7322 | i40e_free_vfs(pf); | |
7323 | pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; | |
7324 | } | |
7325 | ||
7326 | /* no more scheduling of any task */ | |
7327 | set_bit(__I40E_DOWN, &pf->state); | |
7328 | del_timer_sync(&pf->service_timer); | |
7329 | cancel_work_sync(&pf->service_task); | |
7330 | ||
7331 | i40e_fdir_teardown(pf); | |
7332 | ||
7333 | /* If there is a switch structure or any orphans, remove them. | |
7334 | * This will leave only the PF's VSI remaining. | |
7335 | */ | |
7336 | for (i = 0; i < I40E_MAX_VEB; i++) { | |
7337 | if (!pf->veb[i]) | |
7338 | continue; | |
7339 | ||
7340 | if (pf->veb[i]->uplink_seid == pf->mac_seid || | |
7341 | pf->veb[i]->uplink_seid == 0) | |
7342 | i40e_switch_branch_release(pf->veb[i]); | |
7343 | } | |
7344 | ||
7345 | /* Now we can shutdown the PF's VSI, just before we kill | |
7346 | * adminq and hmc. | |
7347 | */ | |
7348 | if (pf->vsi[pf->lan_vsi]) | |
7349 | i40e_vsi_release(pf->vsi[pf->lan_vsi]); | |
7350 | ||
7351 | i40e_stop_misc_vector(pf); | |
7352 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | |
7353 | synchronize_irq(pf->msix_entries[0].vector); | |
7354 | free_irq(pf->msix_entries[0].vector, pf); | |
7355 | } | |
7356 | ||
7357 | /* shutdown and destroy the HMC */ | |
7358 | ret_code = i40e_shutdown_lan_hmc(&pf->hw); | |
7359 | if (ret_code) | |
7360 | dev_warn(&pdev->dev, | |
7361 | "Failed to destroy the HMC resources: %d\n", ret_code); | |
7362 | ||
7363 | /* shutdown the adminq */ | |
7364 | i40e_aq_queue_shutdown(&pf->hw, true); | |
7365 | ret_code = i40e_shutdown_adminq(&pf->hw); | |
7366 | if (ret_code) | |
7367 | dev_warn(&pdev->dev, | |
7368 | "Failed to destroy the Admin Queue resources: %d\n", | |
7369 | ret_code); | |
7370 | ||
7371 | /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ | |
7372 | i40e_clear_interrupt_scheme(pf); | |
7373 | for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { | |
7374 | if (pf->vsi[i]) { | |
7375 | i40e_vsi_clear_rings(pf->vsi[i]); | |
7376 | i40e_vsi_clear(pf->vsi[i]); | |
7377 | pf->vsi[i] = NULL; | |
7378 | } | |
7379 | } | |
7380 | ||
7381 | for (i = 0; i < I40E_MAX_VEB; i++) { | |
7382 | kfree(pf->veb[i]); | |
7383 | pf->veb[i] = NULL; | |
7384 | } | |
7385 | ||
7386 | kfree(pf->qp_pile); | |
7387 | kfree(pf->irq_pile); | |
7388 | kfree(pf->sw_config); | |
7389 | kfree(pf->vsi); | |
7390 | ||
7391 | /* force a PF reset to clean anything leftover */ | |
7392 | reg = rd32(&pf->hw, I40E_PFGEN_CTRL); | |
7393 | wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); | |
7394 | i40e_flush(&pf->hw); | |
7395 | ||
7396 | iounmap(pf->hw.hw_addr); | |
7397 | kfree(pf); | |
7398 | pci_release_selected_regions(pdev, | |
7399 | pci_select_bars(pdev, IORESOURCE_MEM)); | |
7400 | ||
7401 | pci_disable_pcie_error_reporting(pdev); | |
7402 | pci_disable_device(pdev); | |
7403 | } | |
7404 | ||
7405 | /** | |
7406 | * i40e_pci_error_detected - warning that something funky happened in PCI land | |
7407 | * @pdev: PCI device information struct | |
7408 | * | |
7409 | * Called to warn that something happened and the error handling steps | |
7410 | * are in progress. Allows the driver to quiesce things, be ready for | |
7411 | * remediation. | |
7412 | **/ | |
7413 | static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, | |
7414 | enum pci_channel_state error) | |
7415 | { | |
7416 | struct i40e_pf *pf = pci_get_drvdata(pdev); | |
7417 | ||
7418 | dev_info(&pdev->dev, "%s: error %d\n", __func__, error); | |
7419 | ||
7420 | /* shutdown all operations */ | |
7421 | i40e_pf_quiesce_all_vsi(pf); | |
7422 | ||
7423 | /* Request a slot reset */ | |
7424 | return PCI_ERS_RESULT_NEED_RESET; | |
7425 | } | |
7426 | ||
7427 | /** | |
7428 | * i40e_pci_error_slot_reset - a PCI slot reset just happened | |
7429 | * @pdev: PCI device information struct | |
7430 | * | |
7431 | * Called to find if the driver can work with the device now that | |
7432 | * the pci slot has been reset. If a basic connection seems good | |
7433 | * (registers are readable and have sane content) then return a | |
7434 | * happy little PCI_ERS_RESULT_xxx. | |
7435 | **/ | |
7436 | static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) | |
7437 | { | |
7438 | struct i40e_pf *pf = pci_get_drvdata(pdev); | |
7439 | pci_ers_result_t result; | |
7440 | int err; | |
7441 | u32 reg; | |
7442 | ||
7443 | dev_info(&pdev->dev, "%s\n", __func__); | |
7444 | if (pci_enable_device_mem(pdev)) { | |
7445 | dev_info(&pdev->dev, | |
7446 | "Cannot re-enable PCI device after reset.\n"); | |
7447 | result = PCI_ERS_RESULT_DISCONNECT; | |
7448 | } else { | |
7449 | pci_set_master(pdev); | |
7450 | pci_restore_state(pdev); | |
7451 | pci_save_state(pdev); | |
7452 | pci_wake_from_d3(pdev, false); | |
7453 | ||
7454 | reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); | |
7455 | if (reg == 0) | |
7456 | result = PCI_ERS_RESULT_RECOVERED; | |
7457 | else | |
7458 | result = PCI_ERS_RESULT_DISCONNECT; | |
7459 | } | |
7460 | ||
7461 | err = pci_cleanup_aer_uncorrect_error_status(pdev); | |
7462 | if (err) { | |
7463 | dev_info(&pdev->dev, | |
7464 | "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", | |
7465 | err); | |
7466 | /* non-fatal, continue */ | |
7467 | } | |
7468 | ||
7469 | return result; | |
7470 | } | |
7471 | ||
7472 | /** | |
7473 | * i40e_pci_error_resume - restart operations after PCI error recovery | |
7474 | * @pdev: PCI device information struct | |
7475 | * | |
7476 | * Called to allow the driver to bring things back up after PCI error | |
7477 | * and/or reset recovery has finished. | |
7478 | **/ | |
7479 | static void i40e_pci_error_resume(struct pci_dev *pdev) | |
7480 | { | |
7481 | struct i40e_pf *pf = pci_get_drvdata(pdev); | |
7482 | ||
7483 | dev_info(&pdev->dev, "%s\n", __func__); | |
7484 | i40e_handle_reset_warning(pf); | |
7485 | } | |
7486 | ||
7487 | static const struct pci_error_handlers i40e_err_handler = { | |
7488 | .error_detected = i40e_pci_error_detected, | |
7489 | .slot_reset = i40e_pci_error_slot_reset, | |
7490 | .resume = i40e_pci_error_resume, | |
7491 | }; | |
7492 | ||
7493 | static struct pci_driver i40e_driver = { | |
7494 | .name = i40e_driver_name, | |
7495 | .id_table = i40e_pci_tbl, | |
7496 | .probe = i40e_probe, | |
7497 | .remove = i40e_remove, | |
7498 | .err_handler = &i40e_err_handler, | |
7499 | .sriov_configure = i40e_pci_sriov_configure, | |
7500 | }; | |
7501 | ||
7502 | /** | |
7503 | * i40e_init_module - Driver registration routine | |
7504 | * | |
7505 | * i40e_init_module is the first routine called when the driver is | |
7506 | * loaded. All it does is register with the PCI subsystem. | |
7507 | **/ | |
7508 | static int __init i40e_init_module(void) | |
7509 | { | |
7510 | pr_info("%s: %s - version %s\n", i40e_driver_name, | |
7511 | i40e_driver_string, i40e_driver_version_str); | |
7512 | pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); | |
7513 | i40e_dbg_init(); | |
7514 | return pci_register_driver(&i40e_driver); | |
7515 | } | |
7516 | module_init(i40e_init_module); | |
7517 | ||
7518 | /** | |
7519 | * i40e_exit_module - Driver exit cleanup routine | |
7520 | * | |
7521 | * i40e_exit_module is called just before the driver is removed | |
7522 | * from memory. | |
7523 | **/ | |
7524 | static void __exit i40e_exit_module(void) | |
7525 | { | |
7526 | pci_unregister_driver(&i40e_driver); | |
7527 | i40e_dbg_exit(); | |
7528 | } | |
7529 | module_exit(i40e_exit_module); |