arm/xen: fix SMP guests boot
[deliverable/linux.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
1 /*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25 *******************************************************************************/
26
27 /******************************************************************************
28 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
29 ******************************************************************************/
30
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33 #include <linux/types.h>
34 #include <linux/bitops.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/netdevice.h>
38 #include <linux/vmalloc.h>
39 #include <linux/string.h>
40 #include <linux/in.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/sctp.h>
44 #include <linux/ipv6.h>
45 #include <linux/slab.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/ethtool.h>
49 #include <linux/if.h>
50 #include <linux/if_vlan.h>
51 #include <linux/prefetch.h>
52
53 #include "ixgbevf.h"
54
55 const char ixgbevf_driver_name[] = "ixgbevf";
56 static const char ixgbevf_driver_string[] =
57 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
58
59 #define DRV_VERSION "3.2.2-k"
60 const char ixgbevf_driver_version[] = DRV_VERSION;
61 static char ixgbevf_copyright[] =
62 "Copyright (c) 2009 - 2015 Intel Corporation.";
63
64 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
65 [board_82599_vf] = &ixgbevf_82599_vf_info,
66 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
67 [board_X540_vf] = &ixgbevf_X540_vf_info,
68 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
69 [board_X550_vf] = &ixgbevf_X550_vf_info,
70 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
71 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
72 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
73 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
74 };
75
76 /* ixgbevf_pci_tbl - PCI Device ID Table
77 *
78 * Wildcard entries (PCI_ANY_ID) should come last
79 * Last entry must be all 0s
80 *
81 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
82 * Class, Class Mask, private data (not used) }
83 */
84 static const struct pci_device_id ixgbevf_pci_tbl[] = {
85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
94 /* required last entry */
95 {0, }
96 };
97 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
98
99 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
100 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
101 MODULE_LICENSE("GPL");
102 MODULE_VERSION(DRV_VERSION);
103
104 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
105 static int debug = -1;
106 module_param(debug, int, 0);
107 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
108
109 static struct workqueue_struct *ixgbevf_wq;
110
111 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
112 {
113 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
114 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
115 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
116 queue_work(ixgbevf_wq, &adapter->service_task);
117 }
118
119 static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
120 {
121 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
122
123 /* flush memory to make sure state is correct before next watchdog */
124 smp_mb__before_atomic();
125 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
126 }
127
128 /* forward decls */
129 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
130 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
131 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
132
133 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
134 {
135 struct ixgbevf_adapter *adapter = hw->back;
136
137 if (!hw->hw_addr)
138 return;
139 hw->hw_addr = NULL;
140 dev_err(&adapter->pdev->dev, "Adapter removed\n");
141 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
142 ixgbevf_service_event_schedule(adapter);
143 }
144
145 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
146 {
147 u32 value;
148
149 /* The following check not only optimizes a bit by not
150 * performing a read on the status register when the
151 * register just read was a status register read that
152 * returned IXGBE_FAILED_READ_REG. It also blocks any
153 * potential recursion.
154 */
155 if (reg == IXGBE_VFSTATUS) {
156 ixgbevf_remove_adapter(hw);
157 return;
158 }
159 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
160 if (value == IXGBE_FAILED_READ_REG)
161 ixgbevf_remove_adapter(hw);
162 }
163
164 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
165 {
166 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
167 u32 value;
168
169 if (IXGBE_REMOVED(reg_addr))
170 return IXGBE_FAILED_READ_REG;
171 value = readl(reg_addr + reg);
172 if (unlikely(value == IXGBE_FAILED_READ_REG))
173 ixgbevf_check_remove(hw, reg);
174 return value;
175 }
176
177 /**
178 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
179 * @adapter: pointer to adapter struct
180 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
181 * @queue: queue to map the corresponding interrupt to
182 * @msix_vector: the vector to map to the corresponding queue
183 **/
184 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
185 u8 queue, u8 msix_vector)
186 {
187 u32 ivar, index;
188 struct ixgbe_hw *hw = &adapter->hw;
189
190 if (direction == -1) {
191 /* other causes */
192 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
193 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
194 ivar &= ~0xFF;
195 ivar |= msix_vector;
196 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
197 } else {
198 /* Tx or Rx causes */
199 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
200 index = ((16 * (queue & 1)) + (8 * direction));
201 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
202 ivar &= ~(0xFF << index);
203 ivar |= (msix_vector << index);
204 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
205 }
206 }
207
208 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
209 struct ixgbevf_tx_buffer *tx_buffer)
210 {
211 if (tx_buffer->skb) {
212 dev_kfree_skb_any(tx_buffer->skb);
213 if (dma_unmap_len(tx_buffer, len))
214 dma_unmap_single(tx_ring->dev,
215 dma_unmap_addr(tx_buffer, dma),
216 dma_unmap_len(tx_buffer, len),
217 DMA_TO_DEVICE);
218 } else if (dma_unmap_len(tx_buffer, len)) {
219 dma_unmap_page(tx_ring->dev,
220 dma_unmap_addr(tx_buffer, dma),
221 dma_unmap_len(tx_buffer, len),
222 DMA_TO_DEVICE);
223 }
224 tx_buffer->next_to_watch = NULL;
225 tx_buffer->skb = NULL;
226 dma_unmap_len_set(tx_buffer, len, 0);
227 /* tx_buffer must be completely set up in the transmit path */
228 }
229
230 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
231 {
232 return ring->stats.packets;
233 }
234
235 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
236 {
237 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
238 struct ixgbe_hw *hw = &adapter->hw;
239
240 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
241 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
242
243 if (head != tail)
244 return (head < tail) ?
245 tail - head : (tail + ring->count - head);
246
247 return 0;
248 }
249
250 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
251 {
252 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
253 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
254 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
255
256 clear_check_for_tx_hang(tx_ring);
257
258 /* Check for a hung queue, but be thorough. This verifies
259 * that a transmit has been completed since the previous
260 * check AND there is at least one packet pending. The
261 * ARMED bit is set to indicate a potential hang.
262 */
263 if ((tx_done_old == tx_done) && tx_pending) {
264 /* make sure it is true for two checks in a row */
265 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
266 &tx_ring->state);
267 }
268 /* reset the countdown */
269 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
270
271 /* update completed stats and continue */
272 tx_ring->tx_stats.tx_done_old = tx_done;
273
274 return false;
275 }
276
277 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
278 {
279 /* Do the reset outside of interrupt context */
280 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
281 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
282 ixgbevf_service_event_schedule(adapter);
283 }
284 }
285
286 /**
287 * ixgbevf_tx_timeout - Respond to a Tx Hang
288 * @netdev: network interface device structure
289 **/
290 static void ixgbevf_tx_timeout(struct net_device *netdev)
291 {
292 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
293
294 ixgbevf_tx_timeout_reset(adapter);
295 }
296
297 /**
298 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
299 * @q_vector: board private structure
300 * @tx_ring: tx ring to clean
301 * @napi_budget: Used to determine if we are in netpoll
302 **/
303 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
304 struct ixgbevf_ring *tx_ring, int napi_budget)
305 {
306 struct ixgbevf_adapter *adapter = q_vector->adapter;
307 struct ixgbevf_tx_buffer *tx_buffer;
308 union ixgbe_adv_tx_desc *tx_desc;
309 unsigned int total_bytes = 0, total_packets = 0;
310 unsigned int budget = tx_ring->count / 2;
311 unsigned int i = tx_ring->next_to_clean;
312
313 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
314 return true;
315
316 tx_buffer = &tx_ring->tx_buffer_info[i];
317 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
318 i -= tx_ring->count;
319
320 do {
321 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
322
323 /* if next_to_watch is not set then there is no work pending */
324 if (!eop_desc)
325 break;
326
327 /* prevent any other reads prior to eop_desc */
328 read_barrier_depends();
329
330 /* if DD is not set pending work has not been completed */
331 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
332 break;
333
334 /* clear next_to_watch to prevent false hangs */
335 tx_buffer->next_to_watch = NULL;
336
337 /* update the statistics for this packet */
338 total_bytes += tx_buffer->bytecount;
339 total_packets += tx_buffer->gso_segs;
340
341 /* free the skb */
342 napi_consume_skb(tx_buffer->skb, napi_budget);
343
344 /* unmap skb header data */
345 dma_unmap_single(tx_ring->dev,
346 dma_unmap_addr(tx_buffer, dma),
347 dma_unmap_len(tx_buffer, len),
348 DMA_TO_DEVICE);
349
350 /* clear tx_buffer data */
351 tx_buffer->skb = NULL;
352 dma_unmap_len_set(tx_buffer, len, 0);
353
354 /* unmap remaining buffers */
355 while (tx_desc != eop_desc) {
356 tx_buffer++;
357 tx_desc++;
358 i++;
359 if (unlikely(!i)) {
360 i -= tx_ring->count;
361 tx_buffer = tx_ring->tx_buffer_info;
362 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
363 }
364
365 /* unmap any remaining paged data */
366 if (dma_unmap_len(tx_buffer, len)) {
367 dma_unmap_page(tx_ring->dev,
368 dma_unmap_addr(tx_buffer, dma),
369 dma_unmap_len(tx_buffer, len),
370 DMA_TO_DEVICE);
371 dma_unmap_len_set(tx_buffer, len, 0);
372 }
373 }
374
375 /* move us one more past the eop_desc for start of next pkt */
376 tx_buffer++;
377 tx_desc++;
378 i++;
379 if (unlikely(!i)) {
380 i -= tx_ring->count;
381 tx_buffer = tx_ring->tx_buffer_info;
382 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
383 }
384
385 /* issue prefetch for next Tx descriptor */
386 prefetch(tx_desc);
387
388 /* update budget accounting */
389 budget--;
390 } while (likely(budget));
391
392 i += tx_ring->count;
393 tx_ring->next_to_clean = i;
394 u64_stats_update_begin(&tx_ring->syncp);
395 tx_ring->stats.bytes += total_bytes;
396 tx_ring->stats.packets += total_packets;
397 u64_stats_update_end(&tx_ring->syncp);
398 q_vector->tx.total_bytes += total_bytes;
399 q_vector->tx.total_packets += total_packets;
400
401 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
402 struct ixgbe_hw *hw = &adapter->hw;
403 union ixgbe_adv_tx_desc *eop_desc;
404
405 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
406
407 pr_err("Detected Tx Unit Hang\n"
408 " Tx Queue <%d>\n"
409 " TDH, TDT <%x>, <%x>\n"
410 " next_to_use <%x>\n"
411 " next_to_clean <%x>\n"
412 "tx_buffer_info[next_to_clean]\n"
413 " next_to_watch <%p>\n"
414 " eop_desc->wb.status <%x>\n"
415 " time_stamp <%lx>\n"
416 " jiffies <%lx>\n",
417 tx_ring->queue_index,
418 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
419 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
420 tx_ring->next_to_use, i,
421 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
422 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
423
424 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
425
426 /* schedule immediate reset if we believe we hung */
427 ixgbevf_tx_timeout_reset(adapter);
428
429 return true;
430 }
431
432 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
433 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
434 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
435 /* Make sure that anybody stopping the queue after this
436 * sees the new next_to_clean.
437 */
438 smp_mb();
439
440 if (__netif_subqueue_stopped(tx_ring->netdev,
441 tx_ring->queue_index) &&
442 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
443 netif_wake_subqueue(tx_ring->netdev,
444 tx_ring->queue_index);
445 ++tx_ring->tx_stats.restart_queue;
446 }
447 }
448
449 return !!budget;
450 }
451
452 /**
453 * ixgbevf_rx_skb - Helper function to determine proper Rx method
454 * @q_vector: structure containing interrupt and ring information
455 * @skb: packet to send up
456 **/
457 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
458 struct sk_buff *skb)
459 {
460 #ifdef CONFIG_NET_RX_BUSY_POLL
461 skb_mark_napi_id(skb, &q_vector->napi);
462
463 if (ixgbevf_qv_busy_polling(q_vector)) {
464 netif_receive_skb(skb);
465 /* exit early if we busy polled */
466 return;
467 }
468 #endif /* CONFIG_NET_RX_BUSY_POLL */
469
470 napi_gro_receive(&q_vector->napi, skb);
471 }
472
473 #define IXGBE_RSS_L4_TYPES_MASK \
474 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
475 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
476 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
477 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
478
479 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
480 union ixgbe_adv_rx_desc *rx_desc,
481 struct sk_buff *skb)
482 {
483 u16 rss_type;
484
485 if (!(ring->netdev->features & NETIF_F_RXHASH))
486 return;
487
488 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
489 IXGBE_RXDADV_RSSTYPE_MASK;
490
491 if (!rss_type)
492 return;
493
494 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
495 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
496 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
497 }
498
499 /**
500 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
501 * @ring: structure containig ring specific data
502 * @rx_desc: current Rx descriptor being processed
503 * @skb: skb currently being received and modified
504 **/
505 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
506 union ixgbe_adv_rx_desc *rx_desc,
507 struct sk_buff *skb)
508 {
509 skb_checksum_none_assert(skb);
510
511 /* Rx csum disabled */
512 if (!(ring->netdev->features & NETIF_F_RXCSUM))
513 return;
514
515 /* if IP and error */
516 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
517 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
518 ring->rx_stats.csum_err++;
519 return;
520 }
521
522 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
523 return;
524
525 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
526 ring->rx_stats.csum_err++;
527 return;
528 }
529
530 /* It must be a TCP or UDP packet with a valid checksum */
531 skb->ip_summed = CHECKSUM_UNNECESSARY;
532 }
533
534 /**
535 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
536 * @rx_ring: rx descriptor ring packet is being transacted on
537 * @rx_desc: pointer to the EOP Rx descriptor
538 * @skb: pointer to current skb being populated
539 *
540 * This function checks the ring, descriptor, and packet information in
541 * order to populate the checksum, VLAN, protocol, and other fields within
542 * the skb.
543 **/
544 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
545 union ixgbe_adv_rx_desc *rx_desc,
546 struct sk_buff *skb)
547 {
548 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
549 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
550
551 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
552 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
553 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
554
555 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
556 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
557 }
558
559 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
560 }
561
562 /**
563 * ixgbevf_is_non_eop - process handling of non-EOP buffers
564 * @rx_ring: Rx ring being processed
565 * @rx_desc: Rx descriptor for current buffer
566 * @skb: current socket buffer containing buffer in progress
567 *
568 * This function updates next to clean. If the buffer is an EOP buffer
569 * this function exits returning false, otherwise it will place the
570 * sk_buff in the next buffer to be chained and return true indicating
571 * that this is in fact a non-EOP buffer.
572 **/
573 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
574 union ixgbe_adv_rx_desc *rx_desc)
575 {
576 u32 ntc = rx_ring->next_to_clean + 1;
577
578 /* fetch, update, and store next to clean */
579 ntc = (ntc < rx_ring->count) ? ntc : 0;
580 rx_ring->next_to_clean = ntc;
581
582 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
583
584 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
585 return false;
586
587 return true;
588 }
589
590 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
591 struct ixgbevf_rx_buffer *bi)
592 {
593 struct page *page = bi->page;
594 dma_addr_t dma = bi->dma;
595
596 /* since we are recycling buffers we should seldom need to alloc */
597 if (likely(page))
598 return true;
599
600 /* alloc new page for storage */
601 page = dev_alloc_page();
602 if (unlikely(!page)) {
603 rx_ring->rx_stats.alloc_rx_page_failed++;
604 return false;
605 }
606
607 /* map page for use */
608 dma = dma_map_page(rx_ring->dev, page, 0,
609 PAGE_SIZE, DMA_FROM_DEVICE);
610
611 /* if mapping failed free memory back to system since
612 * there isn't much point in holding memory we can't use
613 */
614 if (dma_mapping_error(rx_ring->dev, dma)) {
615 __free_page(page);
616
617 rx_ring->rx_stats.alloc_rx_buff_failed++;
618 return false;
619 }
620
621 bi->dma = dma;
622 bi->page = page;
623 bi->page_offset = 0;
624
625 return true;
626 }
627
628 /**
629 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
630 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
631 * @cleaned_count: number of buffers to replace
632 **/
633 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
634 u16 cleaned_count)
635 {
636 union ixgbe_adv_rx_desc *rx_desc;
637 struct ixgbevf_rx_buffer *bi;
638 unsigned int i = rx_ring->next_to_use;
639
640 /* nothing to do or no valid netdev defined */
641 if (!cleaned_count || !rx_ring->netdev)
642 return;
643
644 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
645 bi = &rx_ring->rx_buffer_info[i];
646 i -= rx_ring->count;
647
648 do {
649 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
650 break;
651
652 /* Refresh the desc even if pkt_addr didn't change
653 * because each write-back erases this info.
654 */
655 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
656
657 rx_desc++;
658 bi++;
659 i++;
660 if (unlikely(!i)) {
661 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
662 bi = rx_ring->rx_buffer_info;
663 i -= rx_ring->count;
664 }
665
666 /* clear the hdr_addr for the next_to_use descriptor */
667 rx_desc->read.hdr_addr = 0;
668
669 cleaned_count--;
670 } while (cleaned_count);
671
672 i += rx_ring->count;
673
674 if (rx_ring->next_to_use != i) {
675 /* record the next descriptor to use */
676 rx_ring->next_to_use = i;
677
678 /* update next to alloc since we have filled the ring */
679 rx_ring->next_to_alloc = i;
680
681 /* Force memory writes to complete before letting h/w
682 * know there are new descriptors to fetch. (Only
683 * applicable for weak-ordered memory model archs,
684 * such as IA-64).
685 */
686 wmb();
687 ixgbevf_write_tail(rx_ring, i);
688 }
689 }
690
691 /**
692 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
693 * @rx_ring: rx descriptor ring packet is being transacted on
694 * @rx_desc: pointer to the EOP Rx descriptor
695 * @skb: pointer to current skb being fixed
696 *
697 * Check for corrupted packet headers caused by senders on the local L2
698 * embedded NIC switch not setting up their Tx Descriptors right. These
699 * should be very rare.
700 *
701 * Also address the case where we are pulling data in on pages only
702 * and as such no data is present in the skb header.
703 *
704 * In addition if skb is not at least 60 bytes we need to pad it so that
705 * it is large enough to qualify as a valid Ethernet frame.
706 *
707 * Returns true if an error was encountered and skb was freed.
708 **/
709 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
710 union ixgbe_adv_rx_desc *rx_desc,
711 struct sk_buff *skb)
712 {
713 /* verify that the packet does not have any known errors */
714 if (unlikely(ixgbevf_test_staterr(rx_desc,
715 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
716 struct net_device *netdev = rx_ring->netdev;
717
718 if (!(netdev->features & NETIF_F_RXALL)) {
719 dev_kfree_skb_any(skb);
720 return true;
721 }
722 }
723
724 /* if eth_skb_pad returns an error the skb was freed */
725 if (eth_skb_pad(skb))
726 return true;
727
728 return false;
729 }
730
731 /**
732 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
733 * @rx_ring: rx descriptor ring to store buffers on
734 * @old_buff: donor buffer to have page reused
735 *
736 * Synchronizes page for reuse by the adapter
737 **/
738 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
739 struct ixgbevf_rx_buffer *old_buff)
740 {
741 struct ixgbevf_rx_buffer *new_buff;
742 u16 nta = rx_ring->next_to_alloc;
743
744 new_buff = &rx_ring->rx_buffer_info[nta];
745
746 /* update, and store next to alloc */
747 nta++;
748 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
749
750 /* transfer page from old buffer to new buffer */
751 new_buff->page = old_buff->page;
752 new_buff->dma = old_buff->dma;
753 new_buff->page_offset = old_buff->page_offset;
754
755 /* sync the buffer for use by the device */
756 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
757 new_buff->page_offset,
758 IXGBEVF_RX_BUFSZ,
759 DMA_FROM_DEVICE);
760 }
761
762 static inline bool ixgbevf_page_is_reserved(struct page *page)
763 {
764 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
765 }
766
767 /**
768 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
769 * @rx_ring: rx descriptor ring to transact packets on
770 * @rx_buffer: buffer containing page to add
771 * @rx_desc: descriptor containing length of buffer written by hardware
772 * @skb: sk_buff to place the data into
773 *
774 * This function will add the data contained in rx_buffer->page to the skb.
775 * This is done either through a direct copy if the data in the buffer is
776 * less than the skb header size, otherwise it will just attach the page as
777 * a frag to the skb.
778 *
779 * The function will then update the page offset if necessary and return
780 * true if the buffer can be reused by the adapter.
781 **/
782 static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
783 struct ixgbevf_rx_buffer *rx_buffer,
784 union ixgbe_adv_rx_desc *rx_desc,
785 struct sk_buff *skb)
786 {
787 struct page *page = rx_buffer->page;
788 unsigned char *va = page_address(page) + rx_buffer->page_offset;
789 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
790 #if (PAGE_SIZE < 8192)
791 unsigned int truesize = IXGBEVF_RX_BUFSZ;
792 #else
793 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
794 #endif
795 unsigned int pull_len;
796
797 if (unlikely(skb_is_nonlinear(skb)))
798 goto add_tail_frag;
799
800 if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
801 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
802
803 /* page is not reserved, we can reuse buffer as is */
804 if (likely(!ixgbevf_page_is_reserved(page)))
805 return true;
806
807 /* this page cannot be reused so discard it */
808 put_page(page);
809 return false;
810 }
811
812 /* we need the header to contain the greater of either ETH_HLEN or
813 * 60 bytes if the skb->len is less than 60 for skb_pad.
814 */
815 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
816
817 /* align pull length to size of long to optimize memcpy performance */
818 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
819
820 /* update all of the pointers */
821 va += pull_len;
822 size -= pull_len;
823
824 add_tail_frag:
825 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
826 (unsigned long)va & ~PAGE_MASK, size, truesize);
827
828 /* avoid re-using remote pages */
829 if (unlikely(ixgbevf_page_is_reserved(page)))
830 return false;
831
832 #if (PAGE_SIZE < 8192)
833 /* if we are only owner of page we can reuse it */
834 if (unlikely(page_count(page) != 1))
835 return false;
836
837 /* flip page offset to other buffer */
838 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
839
840 #else
841 /* move offset up to the next cache line */
842 rx_buffer->page_offset += truesize;
843
844 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
845 return false;
846
847 #endif
848 /* Even if we own the page, we are not allowed to use atomic_set()
849 * This would break get_page_unless_zero() users.
850 */
851 page_ref_inc(page);
852
853 return true;
854 }
855
856 static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
857 union ixgbe_adv_rx_desc *rx_desc,
858 struct sk_buff *skb)
859 {
860 struct ixgbevf_rx_buffer *rx_buffer;
861 struct page *page;
862
863 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
864 page = rx_buffer->page;
865 prefetchw(page);
866
867 if (likely(!skb)) {
868 void *page_addr = page_address(page) +
869 rx_buffer->page_offset;
870
871 /* prefetch first cache line of first page */
872 prefetch(page_addr);
873 #if L1_CACHE_BYTES < 128
874 prefetch(page_addr + L1_CACHE_BYTES);
875 #endif
876
877 /* allocate a skb to store the frags */
878 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
879 IXGBEVF_RX_HDR_SIZE);
880 if (unlikely(!skb)) {
881 rx_ring->rx_stats.alloc_rx_buff_failed++;
882 return NULL;
883 }
884
885 /* we will be copying header into skb->data in
886 * pskb_may_pull so it is in our interest to prefetch
887 * it now to avoid a possible cache miss
888 */
889 prefetchw(skb->data);
890 }
891
892 /* we are reusing so sync this buffer for CPU use */
893 dma_sync_single_range_for_cpu(rx_ring->dev,
894 rx_buffer->dma,
895 rx_buffer->page_offset,
896 IXGBEVF_RX_BUFSZ,
897 DMA_FROM_DEVICE);
898
899 /* pull page into skb */
900 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
901 /* hand second half of page back to the ring */
902 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
903 } else {
904 /* we are not reusing the buffer so unmap it */
905 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
906 PAGE_SIZE, DMA_FROM_DEVICE);
907 }
908
909 /* clear contents of buffer_info */
910 rx_buffer->dma = 0;
911 rx_buffer->page = NULL;
912
913 return skb;
914 }
915
916 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
917 u32 qmask)
918 {
919 struct ixgbe_hw *hw = &adapter->hw;
920
921 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
922 }
923
924 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
925 struct ixgbevf_ring *rx_ring,
926 int budget)
927 {
928 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
929 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
930 struct sk_buff *skb = rx_ring->skb;
931
932 while (likely(total_rx_packets < budget)) {
933 union ixgbe_adv_rx_desc *rx_desc;
934
935 /* return some buffers to hardware, one at a time is too slow */
936 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
937 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
938 cleaned_count = 0;
939 }
940
941 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
942
943 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
944 break;
945
946 /* This memory barrier is needed to keep us from reading
947 * any other fields out of the rx_desc until we know the
948 * RXD_STAT_DD bit is set
949 */
950 rmb();
951
952 /* retrieve a buffer from the ring */
953 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
954
955 /* exit if we failed to retrieve a buffer */
956 if (!skb)
957 break;
958
959 cleaned_count++;
960
961 /* fetch next buffer in frame if non-eop */
962 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
963 continue;
964
965 /* verify the packet layout is correct */
966 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
967 skb = NULL;
968 continue;
969 }
970
971 /* probably a little skewed due to removing CRC */
972 total_rx_bytes += skb->len;
973
974 /* Workaround hardware that can't do proper VEPA multicast
975 * source pruning.
976 */
977 if ((skb->pkt_type == PACKET_BROADCAST ||
978 skb->pkt_type == PACKET_MULTICAST) &&
979 ether_addr_equal(rx_ring->netdev->dev_addr,
980 eth_hdr(skb)->h_source)) {
981 dev_kfree_skb_irq(skb);
982 continue;
983 }
984
985 /* populate checksum, VLAN, and protocol */
986 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
987
988 ixgbevf_rx_skb(q_vector, skb);
989
990 /* reset skb pointer */
991 skb = NULL;
992
993 /* update budget accounting */
994 total_rx_packets++;
995 }
996
997 /* place incomplete frames back on ring for completion */
998 rx_ring->skb = skb;
999
1000 u64_stats_update_begin(&rx_ring->syncp);
1001 rx_ring->stats.packets += total_rx_packets;
1002 rx_ring->stats.bytes += total_rx_bytes;
1003 u64_stats_update_end(&rx_ring->syncp);
1004 q_vector->rx.total_packets += total_rx_packets;
1005 q_vector->rx.total_bytes += total_rx_bytes;
1006
1007 return total_rx_packets;
1008 }
1009
1010 /**
1011 * ixgbevf_poll - NAPI polling calback
1012 * @napi: napi struct with our devices info in it
1013 * @budget: amount of work driver is allowed to do this pass, in packets
1014 *
1015 * This function will clean more than one or more rings associated with a
1016 * q_vector.
1017 **/
1018 static int ixgbevf_poll(struct napi_struct *napi, int budget)
1019 {
1020 struct ixgbevf_q_vector *q_vector =
1021 container_of(napi, struct ixgbevf_q_vector, napi);
1022 struct ixgbevf_adapter *adapter = q_vector->adapter;
1023 struct ixgbevf_ring *ring;
1024 int per_ring_budget, work_done = 0;
1025 bool clean_complete = true;
1026
1027 ixgbevf_for_each_ring(ring, q_vector->tx) {
1028 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1029 clean_complete = false;
1030 }
1031
1032 if (budget <= 0)
1033 return budget;
1034 #ifdef CONFIG_NET_RX_BUSY_POLL
1035 if (!ixgbevf_qv_lock_napi(q_vector))
1036 return budget;
1037 #endif
1038
1039 /* attempt to distribute budget to each queue fairly, but don't allow
1040 * the budget to go below 1 because we'll exit polling
1041 */
1042 if (q_vector->rx.count > 1)
1043 per_ring_budget = max(budget/q_vector->rx.count, 1);
1044 else
1045 per_ring_budget = budget;
1046
1047 ixgbevf_for_each_ring(ring, q_vector->rx) {
1048 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1049 per_ring_budget);
1050 work_done += cleaned;
1051 if (cleaned >= per_ring_budget)
1052 clean_complete = false;
1053 }
1054
1055 #ifdef CONFIG_NET_RX_BUSY_POLL
1056 ixgbevf_qv_unlock_napi(q_vector);
1057 #endif
1058
1059 /* If all work not completed, return budget and keep polling */
1060 if (!clean_complete)
1061 return budget;
1062 /* all work done, exit the polling mode */
1063 napi_complete_done(napi, work_done);
1064 if (adapter->rx_itr_setting == 1)
1065 ixgbevf_set_itr(q_vector);
1066 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1067 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1068 ixgbevf_irq_enable_queues(adapter,
1069 BIT(q_vector->v_idx));
1070
1071 return 0;
1072 }
1073
1074 /**
1075 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1076 * @q_vector: structure containing interrupt and ring information
1077 **/
1078 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1079 {
1080 struct ixgbevf_adapter *adapter = q_vector->adapter;
1081 struct ixgbe_hw *hw = &adapter->hw;
1082 int v_idx = q_vector->v_idx;
1083 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1084
1085 /* set the WDIS bit to not clear the timer bits and cause an
1086 * immediate assertion of the interrupt
1087 */
1088 itr_reg |= IXGBE_EITR_CNT_WDIS;
1089
1090 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1091 }
1092
1093 #ifdef CONFIG_NET_RX_BUSY_POLL
1094 /* must be called with local_bh_disable()d */
1095 static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
1096 {
1097 struct ixgbevf_q_vector *q_vector =
1098 container_of(napi, struct ixgbevf_q_vector, napi);
1099 struct ixgbevf_adapter *adapter = q_vector->adapter;
1100 struct ixgbevf_ring *ring;
1101 int found = 0;
1102
1103 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
1104 return LL_FLUSH_FAILED;
1105
1106 if (!ixgbevf_qv_lock_poll(q_vector))
1107 return LL_FLUSH_BUSY;
1108
1109 ixgbevf_for_each_ring(ring, q_vector->rx) {
1110 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
1111 #ifdef BP_EXTENDED_STATS
1112 if (found)
1113 ring->stats.cleaned += found;
1114 else
1115 ring->stats.misses++;
1116 #endif
1117 if (found)
1118 break;
1119 }
1120
1121 ixgbevf_qv_unlock_poll(q_vector);
1122
1123 return found;
1124 }
1125 #endif /* CONFIG_NET_RX_BUSY_POLL */
1126
1127 /**
1128 * ixgbevf_configure_msix - Configure MSI-X hardware
1129 * @adapter: board private structure
1130 *
1131 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1132 * interrupts.
1133 **/
1134 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1135 {
1136 struct ixgbevf_q_vector *q_vector;
1137 int q_vectors, v_idx;
1138
1139 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1140 adapter->eims_enable_mask = 0;
1141
1142 /* Populate the IVAR table and set the ITR values to the
1143 * corresponding register.
1144 */
1145 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1146 struct ixgbevf_ring *ring;
1147
1148 q_vector = adapter->q_vector[v_idx];
1149
1150 ixgbevf_for_each_ring(ring, q_vector->rx)
1151 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1152
1153 ixgbevf_for_each_ring(ring, q_vector->tx)
1154 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1155
1156 if (q_vector->tx.ring && !q_vector->rx.ring) {
1157 /* Tx only vector */
1158 if (adapter->tx_itr_setting == 1)
1159 q_vector->itr = IXGBE_12K_ITR;
1160 else
1161 q_vector->itr = adapter->tx_itr_setting;
1162 } else {
1163 /* Rx or Rx/Tx vector */
1164 if (adapter->rx_itr_setting == 1)
1165 q_vector->itr = IXGBE_20K_ITR;
1166 else
1167 q_vector->itr = adapter->rx_itr_setting;
1168 }
1169
1170 /* add q_vector eims value to global eims_enable_mask */
1171 adapter->eims_enable_mask |= BIT(v_idx);
1172
1173 ixgbevf_write_eitr(q_vector);
1174 }
1175
1176 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1177 /* setup eims_other and add value to global eims_enable_mask */
1178 adapter->eims_other = BIT(v_idx);
1179 adapter->eims_enable_mask |= adapter->eims_other;
1180 }
1181
1182 enum latency_range {
1183 lowest_latency = 0,
1184 low_latency = 1,
1185 bulk_latency = 2,
1186 latency_invalid = 255
1187 };
1188
1189 /**
1190 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1191 * @q_vector: structure containing interrupt and ring information
1192 * @ring_container: structure containing ring performance data
1193 *
1194 * Stores a new ITR value based on packets and byte
1195 * counts during the last interrupt. The advantage of per interrupt
1196 * computation is faster updates and more accurate ITR for the current
1197 * traffic pattern. Constants in this function were computed
1198 * based on theoretical maximum wire speed and thresholds were set based
1199 * on testing data as well as attempting to minimize response time
1200 * while increasing bulk throughput.
1201 **/
1202 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1203 struct ixgbevf_ring_container *ring_container)
1204 {
1205 int bytes = ring_container->total_bytes;
1206 int packets = ring_container->total_packets;
1207 u32 timepassed_us;
1208 u64 bytes_perint;
1209 u8 itr_setting = ring_container->itr;
1210
1211 if (packets == 0)
1212 return;
1213
1214 /* simple throttle rate management
1215 * 0-20MB/s lowest (100000 ints/s)
1216 * 20-100MB/s low (20000 ints/s)
1217 * 100-1249MB/s bulk (12000 ints/s)
1218 */
1219 /* what was last interrupt timeslice? */
1220 timepassed_us = q_vector->itr >> 2;
1221 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1222
1223 switch (itr_setting) {
1224 case lowest_latency:
1225 if (bytes_perint > 10)
1226 itr_setting = low_latency;
1227 break;
1228 case low_latency:
1229 if (bytes_perint > 20)
1230 itr_setting = bulk_latency;
1231 else if (bytes_perint <= 10)
1232 itr_setting = lowest_latency;
1233 break;
1234 case bulk_latency:
1235 if (bytes_perint <= 20)
1236 itr_setting = low_latency;
1237 break;
1238 }
1239
1240 /* clear work counters since we have the values we need */
1241 ring_container->total_bytes = 0;
1242 ring_container->total_packets = 0;
1243
1244 /* write updated itr to ring container */
1245 ring_container->itr = itr_setting;
1246 }
1247
1248 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1249 {
1250 u32 new_itr = q_vector->itr;
1251 u8 current_itr;
1252
1253 ixgbevf_update_itr(q_vector, &q_vector->tx);
1254 ixgbevf_update_itr(q_vector, &q_vector->rx);
1255
1256 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1257
1258 switch (current_itr) {
1259 /* counts and packets in update_itr are dependent on these numbers */
1260 case lowest_latency:
1261 new_itr = IXGBE_100K_ITR;
1262 break;
1263 case low_latency:
1264 new_itr = IXGBE_20K_ITR;
1265 break;
1266 case bulk_latency:
1267 new_itr = IXGBE_12K_ITR;
1268 break;
1269 default:
1270 break;
1271 }
1272
1273 if (new_itr != q_vector->itr) {
1274 /* do an exponential smoothing */
1275 new_itr = (10 * new_itr * q_vector->itr) /
1276 ((9 * new_itr) + q_vector->itr);
1277
1278 /* save the algorithm value here */
1279 q_vector->itr = new_itr;
1280
1281 ixgbevf_write_eitr(q_vector);
1282 }
1283 }
1284
1285 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1286 {
1287 struct ixgbevf_adapter *adapter = data;
1288 struct ixgbe_hw *hw = &adapter->hw;
1289
1290 hw->mac.get_link_status = 1;
1291
1292 ixgbevf_service_event_schedule(adapter);
1293
1294 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1295
1296 return IRQ_HANDLED;
1297 }
1298
1299 /**
1300 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1301 * @irq: unused
1302 * @data: pointer to our q_vector struct for this interrupt vector
1303 **/
1304 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1305 {
1306 struct ixgbevf_q_vector *q_vector = data;
1307
1308 /* EIAM disabled interrupts (on this vector) for us */
1309 if (q_vector->rx.ring || q_vector->tx.ring)
1310 napi_schedule_irqoff(&q_vector->napi);
1311
1312 return IRQ_HANDLED;
1313 }
1314
1315 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1316 int r_idx)
1317 {
1318 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1319
1320 a->rx_ring[r_idx]->next = q_vector->rx.ring;
1321 q_vector->rx.ring = a->rx_ring[r_idx];
1322 q_vector->rx.count++;
1323 }
1324
1325 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1326 int t_idx)
1327 {
1328 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1329
1330 a->tx_ring[t_idx]->next = q_vector->tx.ring;
1331 q_vector->tx.ring = a->tx_ring[t_idx];
1332 q_vector->tx.count++;
1333 }
1334
1335 /**
1336 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1337 * @adapter: board private structure to initialize
1338 *
1339 * This function maps descriptor rings to the queue-specific vectors
1340 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1341 * one vector per ring/queue, but on a constrained vector budget, we
1342 * group the rings as "efficiently" as possible. You would add new
1343 * mapping configurations in here.
1344 **/
1345 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1346 {
1347 int q_vectors;
1348 int v_start = 0;
1349 int rxr_idx = 0, txr_idx = 0;
1350 int rxr_remaining = adapter->num_rx_queues;
1351 int txr_remaining = adapter->num_tx_queues;
1352 int i, j;
1353 int rqpv, tqpv;
1354
1355 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1356
1357 /* The ideal configuration...
1358 * We have enough vectors to map one per queue.
1359 */
1360 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1361 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1362 map_vector_to_rxq(adapter, v_start, rxr_idx);
1363
1364 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1365 map_vector_to_txq(adapter, v_start, txr_idx);
1366 return 0;
1367 }
1368
1369 /* If we don't have enough vectors for a 1-to-1
1370 * mapping, we'll have to group them so there are
1371 * multiple queues per vector.
1372 */
1373 /* Re-adjusting *qpv takes care of the remainder. */
1374 for (i = v_start; i < q_vectors; i++) {
1375 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1376 for (j = 0; j < rqpv; j++) {
1377 map_vector_to_rxq(adapter, i, rxr_idx);
1378 rxr_idx++;
1379 rxr_remaining--;
1380 }
1381 }
1382 for (i = v_start; i < q_vectors; i++) {
1383 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1384 for (j = 0; j < tqpv; j++) {
1385 map_vector_to_txq(adapter, i, txr_idx);
1386 txr_idx++;
1387 txr_remaining--;
1388 }
1389 }
1390
1391 return 0;
1392 }
1393
1394 /**
1395 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1396 * @adapter: board private structure
1397 *
1398 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1399 * interrupts from the kernel.
1400 **/
1401 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1402 {
1403 struct net_device *netdev = adapter->netdev;
1404 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1405 int vector, err;
1406 int ri = 0, ti = 0;
1407
1408 for (vector = 0; vector < q_vectors; vector++) {
1409 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1410 struct msix_entry *entry = &adapter->msix_entries[vector];
1411
1412 if (q_vector->tx.ring && q_vector->rx.ring) {
1413 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1414 "%s-%s-%d", netdev->name, "TxRx", ri++);
1415 ti++;
1416 } else if (q_vector->rx.ring) {
1417 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1418 "%s-%s-%d", netdev->name, "rx", ri++);
1419 } else if (q_vector->tx.ring) {
1420 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1421 "%s-%s-%d", netdev->name, "tx", ti++);
1422 } else {
1423 /* skip this unused q_vector */
1424 continue;
1425 }
1426 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1427 q_vector->name, q_vector);
1428 if (err) {
1429 hw_dbg(&adapter->hw,
1430 "request_irq failed for MSIX interrupt Error: %d\n",
1431 err);
1432 goto free_queue_irqs;
1433 }
1434 }
1435
1436 err = request_irq(adapter->msix_entries[vector].vector,
1437 &ixgbevf_msix_other, 0, netdev->name, adapter);
1438 if (err) {
1439 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1440 err);
1441 goto free_queue_irqs;
1442 }
1443
1444 return 0;
1445
1446 free_queue_irqs:
1447 while (vector) {
1448 vector--;
1449 free_irq(adapter->msix_entries[vector].vector,
1450 adapter->q_vector[vector]);
1451 }
1452 /* This failure is non-recoverable - it indicates the system is
1453 * out of MSIX vector resources and the VF driver cannot run
1454 * without them. Set the number of msix vectors to zero
1455 * indicating that not enough can be allocated. The error
1456 * will be returned to the user indicating device open failed.
1457 * Any further attempts to force the driver to open will also
1458 * fail. The only way to recover is to unload the driver and
1459 * reload it again. If the system has recovered some MSIX
1460 * vectors then it may succeed.
1461 */
1462 adapter->num_msix_vectors = 0;
1463 return err;
1464 }
1465
1466 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1467 {
1468 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1469
1470 for (i = 0; i < q_vectors; i++) {
1471 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1472
1473 q_vector->rx.ring = NULL;
1474 q_vector->tx.ring = NULL;
1475 q_vector->rx.count = 0;
1476 q_vector->tx.count = 0;
1477 }
1478 }
1479
1480 /**
1481 * ixgbevf_request_irq - initialize interrupts
1482 * @adapter: board private structure
1483 *
1484 * Attempts to configure interrupts using the best available
1485 * capabilities of the hardware and kernel.
1486 **/
1487 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1488 {
1489 int err = ixgbevf_request_msix_irqs(adapter);
1490
1491 if (err)
1492 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1493
1494 return err;
1495 }
1496
1497 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1498 {
1499 int i, q_vectors;
1500
1501 q_vectors = adapter->num_msix_vectors;
1502 i = q_vectors - 1;
1503
1504 free_irq(adapter->msix_entries[i].vector, adapter);
1505 i--;
1506
1507 for (; i >= 0; i--) {
1508 /* free only the irqs that were actually requested */
1509 if (!adapter->q_vector[i]->rx.ring &&
1510 !adapter->q_vector[i]->tx.ring)
1511 continue;
1512
1513 free_irq(adapter->msix_entries[i].vector,
1514 adapter->q_vector[i]);
1515 }
1516
1517 ixgbevf_reset_q_vectors(adapter);
1518 }
1519
1520 /**
1521 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1522 * @adapter: board private structure
1523 **/
1524 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1525 {
1526 struct ixgbe_hw *hw = &adapter->hw;
1527 int i;
1528
1529 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1530 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1531 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1532
1533 IXGBE_WRITE_FLUSH(hw);
1534
1535 for (i = 0; i < adapter->num_msix_vectors; i++)
1536 synchronize_irq(adapter->msix_entries[i].vector);
1537 }
1538
1539 /**
1540 * ixgbevf_irq_enable - Enable default interrupt generation settings
1541 * @adapter: board private structure
1542 **/
1543 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1544 {
1545 struct ixgbe_hw *hw = &adapter->hw;
1546
1547 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1548 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1549 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1550 }
1551
1552 /**
1553 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1554 * @adapter: board private structure
1555 * @ring: structure containing ring specific data
1556 *
1557 * Configure the Tx descriptor ring after a reset.
1558 **/
1559 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1560 struct ixgbevf_ring *ring)
1561 {
1562 struct ixgbe_hw *hw = &adapter->hw;
1563 u64 tdba = ring->dma;
1564 int wait_loop = 10;
1565 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1566 u8 reg_idx = ring->reg_idx;
1567
1568 /* disable queue to avoid issues while updating state */
1569 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1570 IXGBE_WRITE_FLUSH(hw);
1571
1572 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1573 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1574 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1575 ring->count * sizeof(union ixgbe_adv_tx_desc));
1576
1577 /* disable head writeback */
1578 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1579 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1580
1581 /* enable relaxed ordering */
1582 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1583 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1584 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1585
1586 /* reset head and tail pointers */
1587 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1588 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1589 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1590
1591 /* reset ntu and ntc to place SW in sync with hardwdare */
1592 ring->next_to_clean = 0;
1593 ring->next_to_use = 0;
1594
1595 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1596 * to or less than the number of on chip descriptors, which is
1597 * currently 40.
1598 */
1599 txdctl |= (8 << 16); /* WTHRESH = 8 */
1600
1601 /* Setting PTHRESH to 32 both improves performance */
1602 txdctl |= (1u << 8) | /* HTHRESH = 1 */
1603 32; /* PTHRESH = 32 */
1604
1605 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1606
1607 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1608
1609 /* poll to verify queue is enabled */
1610 do {
1611 usleep_range(1000, 2000);
1612 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1613 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1614 if (!wait_loop)
1615 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1616 }
1617
1618 /**
1619 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1620 * @adapter: board private structure
1621 *
1622 * Configure the Tx unit of the MAC after a reset.
1623 **/
1624 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1625 {
1626 u32 i;
1627
1628 /* Setup the HW Tx Head and Tail descriptor pointers */
1629 for (i = 0; i < adapter->num_tx_queues; i++)
1630 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1631 }
1632
1633 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1634
1635 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1636 {
1637 struct ixgbe_hw *hw = &adapter->hw;
1638 u32 srrctl;
1639
1640 srrctl = IXGBE_SRRCTL_DROP_EN;
1641
1642 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1643 srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1644 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1645
1646 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1647 }
1648
1649 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1650 {
1651 struct ixgbe_hw *hw = &adapter->hw;
1652
1653 /* PSRTYPE must be initialized in 82599 */
1654 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1655 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1656 IXGBE_PSRTYPE_L2HDR;
1657
1658 if (adapter->num_rx_queues > 1)
1659 psrtype |= BIT(29);
1660
1661 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1662 }
1663
1664 #define IXGBEVF_MAX_RX_DESC_POLL 10
1665 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1666 struct ixgbevf_ring *ring)
1667 {
1668 struct ixgbe_hw *hw = &adapter->hw;
1669 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1670 u32 rxdctl;
1671 u8 reg_idx = ring->reg_idx;
1672
1673 if (IXGBE_REMOVED(hw->hw_addr))
1674 return;
1675 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1676 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1677
1678 /* write value back with RXDCTL.ENABLE bit cleared */
1679 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1680
1681 /* the hardware may take up to 100us to really disable the Rx queue */
1682 do {
1683 udelay(10);
1684 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1685 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1686
1687 if (!wait_loop)
1688 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1689 reg_idx);
1690 }
1691
1692 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1693 struct ixgbevf_ring *ring)
1694 {
1695 struct ixgbe_hw *hw = &adapter->hw;
1696 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1697 u32 rxdctl;
1698 u8 reg_idx = ring->reg_idx;
1699
1700 if (IXGBE_REMOVED(hw->hw_addr))
1701 return;
1702 do {
1703 usleep_range(1000, 2000);
1704 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1705 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1706
1707 if (!wait_loop)
1708 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1709 reg_idx);
1710 }
1711
1712 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1713 {
1714 struct ixgbe_hw *hw = &adapter->hw;
1715 u32 vfmrqc = 0, vfreta = 0;
1716 u16 rss_i = adapter->num_rx_queues;
1717 u8 i, j;
1718
1719 /* Fill out hash function seeds */
1720 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
1721 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1722 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
1723
1724 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1725 if (j == rss_i)
1726 j = 0;
1727
1728 adapter->rss_indir_tbl[i] = j;
1729
1730 vfreta |= j << (i & 0x3) * 8;
1731 if ((i & 3) == 3) {
1732 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1733 vfreta = 0;
1734 }
1735 }
1736
1737 /* Perform hash on these packet types */
1738 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1739 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1740 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1741 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1742
1743 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1744
1745 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1746 }
1747
1748 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1749 struct ixgbevf_ring *ring)
1750 {
1751 struct ixgbe_hw *hw = &adapter->hw;
1752 u64 rdba = ring->dma;
1753 u32 rxdctl;
1754 u8 reg_idx = ring->reg_idx;
1755
1756 /* disable queue to avoid issues while updating state */
1757 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1758 ixgbevf_disable_rx_queue(adapter, ring);
1759
1760 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1761 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1762 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1763 ring->count * sizeof(union ixgbe_adv_rx_desc));
1764
1765 #ifndef CONFIG_SPARC
1766 /* enable relaxed ordering */
1767 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1768 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1769 #else
1770 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1771 IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1772 IXGBE_DCA_RXCTRL_DATA_WRO_EN);
1773 #endif
1774
1775 /* reset head and tail pointers */
1776 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1777 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1778 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1779
1780 /* reset ntu and ntc to place SW in sync with hardwdare */
1781 ring->next_to_clean = 0;
1782 ring->next_to_use = 0;
1783 ring->next_to_alloc = 0;
1784
1785 ixgbevf_configure_srrctl(adapter, reg_idx);
1786
1787 /* allow any size packet since we can handle overflow */
1788 rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1789
1790 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1791 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1792
1793 ixgbevf_rx_desc_queue_enable(adapter, ring);
1794 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1795 }
1796
1797 /**
1798 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1799 * @adapter: board private structure
1800 *
1801 * Configure the Rx unit of the MAC after a reset.
1802 **/
1803 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1804 {
1805 struct ixgbe_hw *hw = &adapter->hw;
1806 struct net_device *netdev = adapter->netdev;
1807 int i, ret;
1808
1809 ixgbevf_setup_psrtype(adapter);
1810 if (hw->mac.type >= ixgbe_mac_X550_vf)
1811 ixgbevf_setup_vfmrqc(adapter);
1812
1813 /* notify the PF of our intent to use this size of frame */
1814 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
1815 if (ret)
1816 dev_err(&adapter->pdev->dev,
1817 "Failed to set MTU at %d\n", netdev->mtu);
1818
1819 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1820 * the Base and Length of the Rx Descriptor Ring
1821 */
1822 for (i = 0; i < adapter->num_rx_queues; i++)
1823 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1824 }
1825
1826 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1827 __be16 proto, u16 vid)
1828 {
1829 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1830 struct ixgbe_hw *hw = &adapter->hw;
1831 int err;
1832
1833 spin_lock_bh(&adapter->mbx_lock);
1834
1835 /* add VID to filter table */
1836 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1837
1838 spin_unlock_bh(&adapter->mbx_lock);
1839
1840 /* translate error return types so error makes sense */
1841 if (err == IXGBE_ERR_MBX)
1842 return -EIO;
1843
1844 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1845 return -EACCES;
1846
1847 set_bit(vid, adapter->active_vlans);
1848
1849 return err;
1850 }
1851
1852 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1853 __be16 proto, u16 vid)
1854 {
1855 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1856 struct ixgbe_hw *hw = &adapter->hw;
1857 int err;
1858
1859 spin_lock_bh(&adapter->mbx_lock);
1860
1861 /* remove VID from filter table */
1862 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1863
1864 spin_unlock_bh(&adapter->mbx_lock);
1865
1866 clear_bit(vid, adapter->active_vlans);
1867
1868 return err;
1869 }
1870
1871 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1872 {
1873 u16 vid;
1874
1875 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1876 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1877 htons(ETH_P_8021Q), vid);
1878 }
1879
1880 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1881 {
1882 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1883 struct ixgbe_hw *hw = &adapter->hw;
1884 int count = 0;
1885
1886 if ((netdev_uc_count(netdev)) > 10) {
1887 pr_err("Too many unicast filters - No Space\n");
1888 return -ENOSPC;
1889 }
1890
1891 if (!netdev_uc_empty(netdev)) {
1892 struct netdev_hw_addr *ha;
1893
1894 netdev_for_each_uc_addr(ha, netdev) {
1895 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1896 udelay(200);
1897 }
1898 } else {
1899 /* If the list is empty then send message to PF driver to
1900 * clear all MAC VLANs on this VF.
1901 */
1902 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1903 }
1904
1905 return count;
1906 }
1907
1908 /**
1909 * ixgbevf_set_rx_mode - Multicast and unicast set
1910 * @netdev: network interface device structure
1911 *
1912 * The set_rx_method entry point is called whenever the multicast address
1913 * list, unicast address list or the network interface flags are updated.
1914 * This routine is responsible for configuring the hardware for proper
1915 * multicast mode and configuring requested unicast filters.
1916 **/
1917 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1918 {
1919 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1920 struct ixgbe_hw *hw = &adapter->hw;
1921 unsigned int flags = netdev->flags;
1922 int xcast_mode;
1923
1924 xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
1925 (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
1926 IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
1927
1928 spin_lock_bh(&adapter->mbx_lock);
1929
1930 hw->mac.ops.update_xcast_mode(hw, xcast_mode);
1931
1932 /* reprogram multicast list */
1933 hw->mac.ops.update_mc_addr_list(hw, netdev);
1934
1935 ixgbevf_write_uc_addr_list(netdev);
1936
1937 spin_unlock_bh(&adapter->mbx_lock);
1938 }
1939
1940 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1941 {
1942 int q_idx;
1943 struct ixgbevf_q_vector *q_vector;
1944 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1945
1946 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1947 q_vector = adapter->q_vector[q_idx];
1948 #ifdef CONFIG_NET_RX_BUSY_POLL
1949 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1950 #endif
1951 napi_enable(&q_vector->napi);
1952 }
1953 }
1954
1955 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1956 {
1957 int q_idx;
1958 struct ixgbevf_q_vector *q_vector;
1959 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1960
1961 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1962 q_vector = adapter->q_vector[q_idx];
1963 napi_disable(&q_vector->napi);
1964 #ifdef CONFIG_NET_RX_BUSY_POLL
1965 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1966 pr_info("QV %d locked\n", q_idx);
1967 usleep_range(1000, 20000);
1968 }
1969 #endif /* CONFIG_NET_RX_BUSY_POLL */
1970 }
1971 }
1972
1973 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1974 {
1975 struct ixgbe_hw *hw = &adapter->hw;
1976 unsigned int def_q = 0;
1977 unsigned int num_tcs = 0;
1978 unsigned int num_rx_queues = adapter->num_rx_queues;
1979 unsigned int num_tx_queues = adapter->num_tx_queues;
1980 int err;
1981
1982 spin_lock_bh(&adapter->mbx_lock);
1983
1984 /* fetch queue configuration from the PF */
1985 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1986
1987 spin_unlock_bh(&adapter->mbx_lock);
1988
1989 if (err)
1990 return err;
1991
1992 if (num_tcs > 1) {
1993 /* we need only one Tx queue */
1994 num_tx_queues = 1;
1995
1996 /* update default Tx ring register index */
1997 adapter->tx_ring[0]->reg_idx = def_q;
1998
1999 /* we need as many queues as traffic classes */
2000 num_rx_queues = num_tcs;
2001 }
2002
2003 /* if we have a bad config abort request queue reset */
2004 if ((adapter->num_rx_queues != num_rx_queues) ||
2005 (adapter->num_tx_queues != num_tx_queues)) {
2006 /* force mailbox timeout to prevent further messages */
2007 hw->mbx.timeout = 0;
2008
2009 /* wait for watchdog to come around and bail us out */
2010 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
2011 }
2012
2013 return 0;
2014 }
2015
2016 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
2017 {
2018 ixgbevf_configure_dcb(adapter);
2019
2020 ixgbevf_set_rx_mode(adapter->netdev);
2021
2022 ixgbevf_restore_vlan(adapter);
2023
2024 ixgbevf_configure_tx(adapter);
2025 ixgbevf_configure_rx(adapter);
2026 }
2027
2028 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2029 {
2030 /* Only save pre-reset stats if there are some */
2031 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2032 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2033 adapter->stats.base_vfgprc;
2034 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2035 adapter->stats.base_vfgptc;
2036 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2037 adapter->stats.base_vfgorc;
2038 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2039 adapter->stats.base_vfgotc;
2040 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2041 adapter->stats.base_vfmprc;
2042 }
2043 }
2044
2045 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2046 {
2047 struct ixgbe_hw *hw = &adapter->hw;
2048
2049 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2050 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2051 adapter->stats.last_vfgorc |=
2052 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2053 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2054 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2055 adapter->stats.last_vfgotc |=
2056 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2057 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2058
2059 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2060 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2061 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2062 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2063 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2064 }
2065
2066 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2067 {
2068 struct ixgbe_hw *hw = &adapter->hw;
2069 int api[] = { ixgbe_mbox_api_12,
2070 ixgbe_mbox_api_11,
2071 ixgbe_mbox_api_10,
2072 ixgbe_mbox_api_unknown };
2073 int err, idx = 0;
2074
2075 spin_lock_bh(&adapter->mbx_lock);
2076
2077 while (api[idx] != ixgbe_mbox_api_unknown) {
2078 err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
2079 if (!err)
2080 break;
2081 idx++;
2082 }
2083
2084 spin_unlock_bh(&adapter->mbx_lock);
2085 }
2086
2087 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2088 {
2089 struct net_device *netdev = adapter->netdev;
2090 struct ixgbe_hw *hw = &adapter->hw;
2091
2092 ixgbevf_configure_msix(adapter);
2093
2094 spin_lock_bh(&adapter->mbx_lock);
2095
2096 if (is_valid_ether_addr(hw->mac.addr))
2097 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2098 else
2099 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2100
2101 spin_unlock_bh(&adapter->mbx_lock);
2102
2103 smp_mb__before_atomic();
2104 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2105 ixgbevf_napi_enable_all(adapter);
2106
2107 /* clear any pending interrupts, may auto mask */
2108 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2109 ixgbevf_irq_enable(adapter);
2110
2111 /* enable transmits */
2112 netif_tx_start_all_queues(netdev);
2113
2114 ixgbevf_save_reset_stats(adapter);
2115 ixgbevf_init_last_counter_stats(adapter);
2116
2117 hw->mac.get_link_status = 1;
2118 mod_timer(&adapter->service_timer, jiffies);
2119 }
2120
2121 void ixgbevf_up(struct ixgbevf_adapter *adapter)
2122 {
2123 ixgbevf_configure(adapter);
2124
2125 ixgbevf_up_complete(adapter);
2126 }
2127
2128 /**
2129 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2130 * @rx_ring: ring to free buffers from
2131 **/
2132 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2133 {
2134 struct device *dev = rx_ring->dev;
2135 unsigned long size;
2136 unsigned int i;
2137
2138 /* Free Rx ring sk_buff */
2139 if (rx_ring->skb) {
2140 dev_kfree_skb(rx_ring->skb);
2141 rx_ring->skb = NULL;
2142 }
2143
2144 /* ring already cleared, nothing to do */
2145 if (!rx_ring->rx_buffer_info)
2146 return;
2147
2148 /* Free all the Rx ring pages */
2149 for (i = 0; i < rx_ring->count; i++) {
2150 struct ixgbevf_rx_buffer *rx_buffer;
2151
2152 rx_buffer = &rx_ring->rx_buffer_info[i];
2153 if (rx_buffer->dma)
2154 dma_unmap_page(dev, rx_buffer->dma,
2155 PAGE_SIZE, DMA_FROM_DEVICE);
2156 rx_buffer->dma = 0;
2157 if (rx_buffer->page)
2158 __free_page(rx_buffer->page);
2159 rx_buffer->page = NULL;
2160 }
2161
2162 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2163 memset(rx_ring->rx_buffer_info, 0, size);
2164
2165 /* Zero out the descriptor ring */
2166 memset(rx_ring->desc, 0, rx_ring->size);
2167 }
2168
2169 /**
2170 * ixgbevf_clean_tx_ring - Free Tx Buffers
2171 * @tx_ring: ring to be cleaned
2172 **/
2173 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2174 {
2175 struct ixgbevf_tx_buffer *tx_buffer_info;
2176 unsigned long size;
2177 unsigned int i;
2178
2179 if (!tx_ring->tx_buffer_info)
2180 return;
2181
2182 /* Free all the Tx ring sk_buffs */
2183 for (i = 0; i < tx_ring->count; i++) {
2184 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2185 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2186 }
2187
2188 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2189 memset(tx_ring->tx_buffer_info, 0, size);
2190
2191 memset(tx_ring->desc, 0, tx_ring->size);
2192 }
2193
2194 /**
2195 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2196 * @adapter: board private structure
2197 **/
2198 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2199 {
2200 int i;
2201
2202 for (i = 0; i < adapter->num_rx_queues; i++)
2203 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2204 }
2205
2206 /**
2207 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2208 * @adapter: board private structure
2209 **/
2210 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2211 {
2212 int i;
2213
2214 for (i = 0; i < adapter->num_tx_queues; i++)
2215 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2216 }
2217
2218 void ixgbevf_down(struct ixgbevf_adapter *adapter)
2219 {
2220 struct net_device *netdev = adapter->netdev;
2221 struct ixgbe_hw *hw = &adapter->hw;
2222 int i;
2223
2224 /* signal that we are down to the interrupt handler */
2225 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2226 return; /* do nothing if already down */
2227
2228 /* disable all enabled Rx queues */
2229 for (i = 0; i < adapter->num_rx_queues; i++)
2230 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2231
2232 usleep_range(10000, 20000);
2233
2234 netif_tx_stop_all_queues(netdev);
2235
2236 /* call carrier off first to avoid false dev_watchdog timeouts */
2237 netif_carrier_off(netdev);
2238 netif_tx_disable(netdev);
2239
2240 ixgbevf_irq_disable(adapter);
2241
2242 ixgbevf_napi_disable_all(adapter);
2243
2244 del_timer_sync(&adapter->service_timer);
2245
2246 /* disable transmits in the hardware now that interrupts are off */
2247 for (i = 0; i < adapter->num_tx_queues; i++) {
2248 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2249
2250 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2251 IXGBE_TXDCTL_SWFLSH);
2252 }
2253
2254 if (!pci_channel_offline(adapter->pdev))
2255 ixgbevf_reset(adapter);
2256
2257 ixgbevf_clean_all_tx_rings(adapter);
2258 ixgbevf_clean_all_rx_rings(adapter);
2259 }
2260
2261 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2262 {
2263 WARN_ON(in_interrupt());
2264
2265 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2266 msleep(1);
2267
2268 ixgbevf_down(adapter);
2269 ixgbevf_up(adapter);
2270
2271 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2272 }
2273
2274 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2275 {
2276 struct ixgbe_hw *hw = &adapter->hw;
2277 struct net_device *netdev = adapter->netdev;
2278
2279 if (hw->mac.ops.reset_hw(hw)) {
2280 hw_dbg(hw, "PF still resetting\n");
2281 } else {
2282 hw->mac.ops.init_hw(hw);
2283 ixgbevf_negotiate_api(adapter);
2284 }
2285
2286 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2287 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2288 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2289 }
2290
2291 adapter->last_reset = jiffies;
2292 }
2293
2294 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2295 int vectors)
2296 {
2297 int vector_threshold;
2298
2299 /* We'll want at least 2 (vector_threshold):
2300 * 1) TxQ[0] + RxQ[0] handler
2301 * 2) Other (Link Status Change, etc.)
2302 */
2303 vector_threshold = MIN_MSIX_COUNT;
2304
2305 /* The more we get, the more we will assign to Tx/Rx Cleanup
2306 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2307 * Right now, we simply care about how many we'll get; we'll
2308 * set them up later while requesting irq's.
2309 */
2310 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2311 vector_threshold, vectors);
2312
2313 if (vectors < 0) {
2314 dev_err(&adapter->pdev->dev,
2315 "Unable to allocate MSI-X interrupts\n");
2316 kfree(adapter->msix_entries);
2317 adapter->msix_entries = NULL;
2318 return vectors;
2319 }
2320
2321 /* Adjust for only the vectors we'll use, which is minimum
2322 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2323 * vectors we were allocated.
2324 */
2325 adapter->num_msix_vectors = vectors;
2326
2327 return 0;
2328 }
2329
2330 /**
2331 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2332 * @adapter: board private structure to initialize
2333 *
2334 * This is the top level queue allocation routine. The order here is very
2335 * important, starting with the "most" number of features turned on at once,
2336 * and ending with the smallest set of features. This way large combinations
2337 * can be allocated if they're turned on, and smaller combinations are the
2338 * fallthrough conditions.
2339 *
2340 **/
2341 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2342 {
2343 struct ixgbe_hw *hw = &adapter->hw;
2344 unsigned int def_q = 0;
2345 unsigned int num_tcs = 0;
2346 int err;
2347
2348 /* Start with base case */
2349 adapter->num_rx_queues = 1;
2350 adapter->num_tx_queues = 1;
2351
2352 spin_lock_bh(&adapter->mbx_lock);
2353
2354 /* fetch queue configuration from the PF */
2355 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2356
2357 spin_unlock_bh(&adapter->mbx_lock);
2358
2359 if (err)
2360 return;
2361
2362 /* we need as many queues as traffic classes */
2363 if (num_tcs > 1) {
2364 adapter->num_rx_queues = num_tcs;
2365 } else {
2366 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2367
2368 switch (hw->api_version) {
2369 case ixgbe_mbox_api_11:
2370 case ixgbe_mbox_api_12:
2371 adapter->num_rx_queues = rss;
2372 adapter->num_tx_queues = rss;
2373 default:
2374 break;
2375 }
2376 }
2377 }
2378
2379 /**
2380 * ixgbevf_alloc_queues - Allocate memory for all rings
2381 * @adapter: board private structure to initialize
2382 *
2383 * We allocate one ring per queue at run-time since we don't know the
2384 * number of queues at compile-time. The polling_netdev array is
2385 * intended for Multiqueue, but should work fine with a single queue.
2386 **/
2387 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2388 {
2389 struct ixgbevf_ring *ring;
2390 int rx = 0, tx = 0;
2391
2392 for (; tx < adapter->num_tx_queues; tx++) {
2393 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2394 if (!ring)
2395 goto err_allocation;
2396
2397 ring->dev = &adapter->pdev->dev;
2398 ring->netdev = adapter->netdev;
2399 ring->count = adapter->tx_ring_count;
2400 ring->queue_index = tx;
2401 ring->reg_idx = tx;
2402
2403 adapter->tx_ring[tx] = ring;
2404 }
2405
2406 for (; rx < adapter->num_rx_queues; rx++) {
2407 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2408 if (!ring)
2409 goto err_allocation;
2410
2411 ring->dev = &adapter->pdev->dev;
2412 ring->netdev = adapter->netdev;
2413
2414 ring->count = adapter->rx_ring_count;
2415 ring->queue_index = rx;
2416 ring->reg_idx = rx;
2417
2418 adapter->rx_ring[rx] = ring;
2419 }
2420
2421 return 0;
2422
2423 err_allocation:
2424 while (tx) {
2425 kfree(adapter->tx_ring[--tx]);
2426 adapter->tx_ring[tx] = NULL;
2427 }
2428
2429 while (rx) {
2430 kfree(adapter->rx_ring[--rx]);
2431 adapter->rx_ring[rx] = NULL;
2432 }
2433 return -ENOMEM;
2434 }
2435
2436 /**
2437 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2438 * @adapter: board private structure to initialize
2439 *
2440 * Attempt to configure the interrupts using the best available
2441 * capabilities of the hardware and the kernel.
2442 **/
2443 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2444 {
2445 struct net_device *netdev = adapter->netdev;
2446 int err;
2447 int vector, v_budget;
2448
2449 /* It's easy to be greedy for MSI-X vectors, but it really
2450 * doesn't do us much good if we have a lot more vectors
2451 * than CPU's. So let's be conservative and only ask for
2452 * (roughly) the same number of vectors as there are CPU's.
2453 * The default is to use pairs of vectors.
2454 */
2455 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2456 v_budget = min_t(int, v_budget, num_online_cpus());
2457 v_budget += NON_Q_VECTORS;
2458
2459 /* A failure in MSI-X entry allocation isn't fatal, but it does
2460 * mean we disable MSI-X capabilities of the adapter.
2461 */
2462 adapter->msix_entries = kcalloc(v_budget,
2463 sizeof(struct msix_entry), GFP_KERNEL);
2464 if (!adapter->msix_entries)
2465 return -ENOMEM;
2466
2467 for (vector = 0; vector < v_budget; vector++)
2468 adapter->msix_entries[vector].entry = vector;
2469
2470 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2471 if (err)
2472 return err;
2473
2474 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2475 if (err)
2476 return err;
2477
2478 return netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2479 }
2480
2481 /**
2482 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2483 * @adapter: board private structure to initialize
2484 *
2485 * We allocate one q_vector per queue interrupt. If allocation fails we
2486 * return -ENOMEM.
2487 **/
2488 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2489 {
2490 int q_idx, num_q_vectors;
2491 struct ixgbevf_q_vector *q_vector;
2492
2493 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2494
2495 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2496 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2497 if (!q_vector)
2498 goto err_out;
2499 q_vector->adapter = adapter;
2500 q_vector->v_idx = q_idx;
2501 netif_napi_add(adapter->netdev, &q_vector->napi,
2502 ixgbevf_poll, 64);
2503 adapter->q_vector[q_idx] = q_vector;
2504 }
2505
2506 return 0;
2507
2508 err_out:
2509 while (q_idx) {
2510 q_idx--;
2511 q_vector = adapter->q_vector[q_idx];
2512 #ifdef CONFIG_NET_RX_BUSY_POLL
2513 napi_hash_del(&q_vector->napi);
2514 #endif
2515 netif_napi_del(&q_vector->napi);
2516 kfree(q_vector);
2517 adapter->q_vector[q_idx] = NULL;
2518 }
2519 return -ENOMEM;
2520 }
2521
2522 /**
2523 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2524 * @adapter: board private structure to initialize
2525 *
2526 * This function frees the memory allocated to the q_vectors. In addition if
2527 * NAPI is enabled it will delete any references to the NAPI struct prior
2528 * to freeing the q_vector.
2529 **/
2530 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2531 {
2532 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2533
2534 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2535 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2536
2537 adapter->q_vector[q_idx] = NULL;
2538 #ifdef CONFIG_NET_RX_BUSY_POLL
2539 napi_hash_del(&q_vector->napi);
2540 #endif
2541 netif_napi_del(&q_vector->napi);
2542 kfree(q_vector);
2543 }
2544 }
2545
2546 /**
2547 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2548 * @adapter: board private structure
2549 *
2550 **/
2551 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2552 {
2553 pci_disable_msix(adapter->pdev);
2554 kfree(adapter->msix_entries);
2555 adapter->msix_entries = NULL;
2556 }
2557
2558 /**
2559 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2560 * @adapter: board private structure to initialize
2561 *
2562 **/
2563 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2564 {
2565 int err;
2566
2567 /* Number of supported queues */
2568 ixgbevf_set_num_queues(adapter);
2569
2570 err = ixgbevf_set_interrupt_capability(adapter);
2571 if (err) {
2572 hw_dbg(&adapter->hw,
2573 "Unable to setup interrupt capabilities\n");
2574 goto err_set_interrupt;
2575 }
2576
2577 err = ixgbevf_alloc_q_vectors(adapter);
2578 if (err) {
2579 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2580 goto err_alloc_q_vectors;
2581 }
2582
2583 err = ixgbevf_alloc_queues(adapter);
2584 if (err) {
2585 pr_err("Unable to allocate memory for queues\n");
2586 goto err_alloc_queues;
2587 }
2588
2589 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
2590 (adapter->num_rx_queues > 1) ? "Enabled" :
2591 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2592
2593 set_bit(__IXGBEVF_DOWN, &adapter->state);
2594
2595 return 0;
2596 err_alloc_queues:
2597 ixgbevf_free_q_vectors(adapter);
2598 err_alloc_q_vectors:
2599 ixgbevf_reset_interrupt_capability(adapter);
2600 err_set_interrupt:
2601 return err;
2602 }
2603
2604 /**
2605 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2606 * @adapter: board private structure to clear interrupt scheme on
2607 *
2608 * We go through and clear interrupt specific resources and reset the structure
2609 * to pre-load conditions
2610 **/
2611 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2612 {
2613 int i;
2614
2615 for (i = 0; i < adapter->num_tx_queues; i++) {
2616 kfree(adapter->tx_ring[i]);
2617 adapter->tx_ring[i] = NULL;
2618 }
2619 for (i = 0; i < adapter->num_rx_queues; i++) {
2620 kfree(adapter->rx_ring[i]);
2621 adapter->rx_ring[i] = NULL;
2622 }
2623
2624 adapter->num_tx_queues = 0;
2625 adapter->num_rx_queues = 0;
2626
2627 ixgbevf_free_q_vectors(adapter);
2628 ixgbevf_reset_interrupt_capability(adapter);
2629 }
2630
2631 /**
2632 * ixgbevf_sw_init - Initialize general software structures
2633 * @adapter: board private structure to initialize
2634 *
2635 * ixgbevf_sw_init initializes the Adapter private data structure.
2636 * Fields are initialized based on PCI device information and
2637 * OS network device settings (MTU size).
2638 **/
2639 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2640 {
2641 struct ixgbe_hw *hw = &adapter->hw;
2642 struct pci_dev *pdev = adapter->pdev;
2643 struct net_device *netdev = adapter->netdev;
2644 int err;
2645
2646 /* PCI config space info */
2647 hw->vendor_id = pdev->vendor;
2648 hw->device_id = pdev->device;
2649 hw->revision_id = pdev->revision;
2650 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2651 hw->subsystem_device_id = pdev->subsystem_device;
2652
2653 hw->mbx.ops.init_params(hw);
2654
2655 /* assume legacy case in which PF would only give VF 2 queues */
2656 hw->mac.max_tx_queues = 2;
2657 hw->mac.max_rx_queues = 2;
2658
2659 /* lock to protect mailbox accesses */
2660 spin_lock_init(&adapter->mbx_lock);
2661
2662 err = hw->mac.ops.reset_hw(hw);
2663 if (err) {
2664 dev_info(&pdev->dev,
2665 "PF still in reset state. Is the PF interface up?\n");
2666 } else {
2667 err = hw->mac.ops.init_hw(hw);
2668 if (err) {
2669 pr_err("init_shared_code failed: %d\n", err);
2670 goto out;
2671 }
2672 ixgbevf_negotiate_api(adapter);
2673 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2674 if (err)
2675 dev_info(&pdev->dev, "Error reading MAC address\n");
2676 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2677 dev_info(&pdev->dev,
2678 "MAC address not assigned by administrator.\n");
2679 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
2680 }
2681
2682 if (!is_valid_ether_addr(netdev->dev_addr)) {
2683 dev_info(&pdev->dev, "Assigning random MAC address\n");
2684 eth_hw_addr_random(netdev);
2685 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
2686 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
2687 }
2688
2689 /* Enable dynamic interrupt throttling rates */
2690 adapter->rx_itr_setting = 1;
2691 adapter->tx_itr_setting = 1;
2692
2693 /* set default ring sizes */
2694 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2695 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2696
2697 set_bit(__IXGBEVF_DOWN, &adapter->state);
2698 return 0;
2699
2700 out:
2701 return err;
2702 }
2703
2704 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2705 { \
2706 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2707 if (current_counter < last_counter) \
2708 counter += 0x100000000LL; \
2709 last_counter = current_counter; \
2710 counter &= 0xFFFFFFFF00000000LL; \
2711 counter |= current_counter; \
2712 }
2713
2714 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2715 { \
2716 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2717 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2718 u64 current_counter = (current_counter_msb << 32) | \
2719 current_counter_lsb; \
2720 if (current_counter < last_counter) \
2721 counter += 0x1000000000LL; \
2722 last_counter = current_counter; \
2723 counter &= 0xFFFFFFF000000000LL; \
2724 counter |= current_counter; \
2725 }
2726 /**
2727 * ixgbevf_update_stats - Update the board statistics counters.
2728 * @adapter: board private structure
2729 **/
2730 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2731 {
2732 struct ixgbe_hw *hw = &adapter->hw;
2733 int i;
2734
2735 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2736 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2737 return;
2738
2739 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2740 adapter->stats.vfgprc);
2741 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2742 adapter->stats.vfgptc);
2743 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2744 adapter->stats.last_vfgorc,
2745 adapter->stats.vfgorc);
2746 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2747 adapter->stats.last_vfgotc,
2748 adapter->stats.vfgotc);
2749 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2750 adapter->stats.vfmprc);
2751
2752 for (i = 0; i < adapter->num_rx_queues; i++) {
2753 adapter->hw_csum_rx_error +=
2754 adapter->rx_ring[i]->hw_csum_rx_error;
2755 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2756 }
2757 }
2758
2759 /**
2760 * ixgbevf_service_timer - Timer Call-back
2761 * @data: pointer to adapter cast into an unsigned long
2762 **/
2763 static void ixgbevf_service_timer(unsigned long data)
2764 {
2765 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2766
2767 /* Reset the timer */
2768 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
2769
2770 ixgbevf_service_event_schedule(adapter);
2771 }
2772
2773 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
2774 {
2775 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
2776 return;
2777
2778 /* If we're already down or resetting, just bail */
2779 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2780 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
2781 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2782 return;
2783
2784 adapter->tx_timeout_count++;
2785
2786 rtnl_lock();
2787 ixgbevf_reinit_locked(adapter);
2788 rtnl_unlock();
2789 }
2790
2791 /**
2792 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2793 * @adapter: pointer to the device adapter structure
2794 *
2795 * This function serves two purposes. First it strobes the interrupt lines
2796 * in order to make certain interrupts are occurring. Secondly it sets the
2797 * bits needed to check for TX hangs. As a result we should immediately
2798 * determine if a hang has occurred.
2799 **/
2800 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
2801 {
2802 struct ixgbe_hw *hw = &adapter->hw;
2803 u32 eics = 0;
2804 int i;
2805
2806 /* If we're down or resetting, just bail */
2807 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2808 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2809 return;
2810
2811 /* Force detection of hung controller */
2812 if (netif_carrier_ok(adapter->netdev)) {
2813 for (i = 0; i < adapter->num_tx_queues; i++)
2814 set_check_for_tx_hang(adapter->tx_ring[i]);
2815 }
2816
2817 /* get one bit for every active Tx/Rx interrupt vector */
2818 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2819 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2820
2821 if (qv->rx.ring || qv->tx.ring)
2822 eics |= BIT(i);
2823 }
2824
2825 /* Cause software interrupt to ensure rings are cleaned */
2826 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2827 }
2828
2829 /**
2830 * ixgbevf_watchdog_update_link - update the link status
2831 * @adapter: pointer to the device adapter structure
2832 **/
2833 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
2834 {
2835 struct ixgbe_hw *hw = &adapter->hw;
2836 u32 link_speed = adapter->link_speed;
2837 bool link_up = adapter->link_up;
2838 s32 err;
2839
2840 spin_lock_bh(&adapter->mbx_lock);
2841
2842 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2843
2844 spin_unlock_bh(&adapter->mbx_lock);
2845
2846 /* if check for link returns error we will need to reset */
2847 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
2848 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
2849 link_up = false;
2850 }
2851
2852 adapter->link_up = link_up;
2853 adapter->link_speed = link_speed;
2854 }
2855
2856 /**
2857 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2858 * print link up message
2859 * @adapter: pointer to the device adapter structure
2860 **/
2861 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
2862 {
2863 struct net_device *netdev = adapter->netdev;
2864
2865 /* only continue if link was previously down */
2866 if (netif_carrier_ok(netdev))
2867 return;
2868
2869 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
2870 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2871 "10 Gbps" :
2872 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
2873 "1 Gbps" :
2874 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
2875 "100 Mbps" :
2876 "unknown speed");
2877
2878 netif_carrier_on(netdev);
2879 }
2880
2881 /**
2882 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2883 * print link down message
2884 * @adapter: pointer to the adapter structure
2885 **/
2886 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
2887 {
2888 struct net_device *netdev = adapter->netdev;
2889
2890 adapter->link_speed = 0;
2891
2892 /* only continue if link was up previously */
2893 if (!netif_carrier_ok(netdev))
2894 return;
2895
2896 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2897
2898 netif_carrier_off(netdev);
2899 }
2900
2901 /**
2902 * ixgbevf_watchdog_subtask - worker thread to bring link up
2903 * @work: pointer to work_struct containing our data
2904 **/
2905 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
2906 {
2907 /* if interface is down do nothing */
2908 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2909 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2910 return;
2911
2912 ixgbevf_watchdog_update_link(adapter);
2913
2914 if (adapter->link_up)
2915 ixgbevf_watchdog_link_is_up(adapter);
2916 else
2917 ixgbevf_watchdog_link_is_down(adapter);
2918
2919 ixgbevf_update_stats(adapter);
2920 }
2921
2922 /**
2923 * ixgbevf_service_task - manages and runs subtasks
2924 * @work: pointer to work_struct containing our data
2925 **/
2926 static void ixgbevf_service_task(struct work_struct *work)
2927 {
2928 struct ixgbevf_adapter *adapter = container_of(work,
2929 struct ixgbevf_adapter,
2930 service_task);
2931 struct ixgbe_hw *hw = &adapter->hw;
2932
2933 if (IXGBE_REMOVED(hw->hw_addr)) {
2934 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2935 rtnl_lock();
2936 ixgbevf_down(adapter);
2937 rtnl_unlock();
2938 }
2939 return;
2940 }
2941
2942 ixgbevf_queue_reset_subtask(adapter);
2943 ixgbevf_reset_subtask(adapter);
2944 ixgbevf_watchdog_subtask(adapter);
2945 ixgbevf_check_hang_subtask(adapter);
2946
2947 ixgbevf_service_event_complete(adapter);
2948 }
2949
2950 /**
2951 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2952 * @tx_ring: Tx descriptor ring for a specific queue
2953 *
2954 * Free all transmit software resources
2955 **/
2956 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
2957 {
2958 ixgbevf_clean_tx_ring(tx_ring);
2959
2960 vfree(tx_ring->tx_buffer_info);
2961 tx_ring->tx_buffer_info = NULL;
2962
2963 /* if not set, then don't free */
2964 if (!tx_ring->desc)
2965 return;
2966
2967 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2968 tx_ring->dma);
2969
2970 tx_ring->desc = NULL;
2971 }
2972
2973 /**
2974 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2975 * @adapter: board private structure
2976 *
2977 * Free all transmit software resources
2978 **/
2979 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2980 {
2981 int i;
2982
2983 for (i = 0; i < adapter->num_tx_queues; i++)
2984 if (adapter->tx_ring[i]->desc)
2985 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
2986 }
2987
2988 /**
2989 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2990 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
2991 *
2992 * Return 0 on success, negative on failure
2993 **/
2994 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2995 {
2996 int size;
2997
2998 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2999 tx_ring->tx_buffer_info = vzalloc(size);
3000 if (!tx_ring->tx_buffer_info)
3001 goto err;
3002
3003 /* round up to nearest 4K */
3004 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3005 tx_ring->size = ALIGN(tx_ring->size, 4096);
3006
3007 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
3008 &tx_ring->dma, GFP_KERNEL);
3009 if (!tx_ring->desc)
3010 goto err;
3011
3012 return 0;
3013
3014 err:
3015 vfree(tx_ring->tx_buffer_info);
3016 tx_ring->tx_buffer_info = NULL;
3017 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3018 return -ENOMEM;
3019 }
3020
3021 /**
3022 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3023 * @adapter: board private structure
3024 *
3025 * If this function returns with an error, then it's possible one or
3026 * more of the rings is populated (while the rest are not). It is the
3027 * callers duty to clean those orphaned rings.
3028 *
3029 * Return 0 on success, negative on failure
3030 **/
3031 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3032 {
3033 int i, err = 0;
3034
3035 for (i = 0; i < adapter->num_tx_queues; i++) {
3036 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3037 if (!err)
3038 continue;
3039 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3040 break;
3041 }
3042
3043 return err;
3044 }
3045
3046 /**
3047 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3048 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3049 *
3050 * Returns 0 on success, negative on failure
3051 **/
3052 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
3053 {
3054 int size;
3055
3056 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3057 rx_ring->rx_buffer_info = vzalloc(size);
3058 if (!rx_ring->rx_buffer_info)
3059 goto err;
3060
3061 /* Round up to nearest 4K */
3062 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3063 rx_ring->size = ALIGN(rx_ring->size, 4096);
3064
3065 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3066 &rx_ring->dma, GFP_KERNEL);
3067
3068 if (!rx_ring->desc)
3069 goto err;
3070
3071 return 0;
3072 err:
3073 vfree(rx_ring->rx_buffer_info);
3074 rx_ring->rx_buffer_info = NULL;
3075 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3076 return -ENOMEM;
3077 }
3078
3079 /**
3080 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3081 * @adapter: board private structure
3082 *
3083 * If this function returns with an error, then it's possible one or
3084 * more of the rings is populated (while the rest are not). It is the
3085 * callers duty to clean those orphaned rings.
3086 *
3087 * Return 0 on success, negative on failure
3088 **/
3089 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3090 {
3091 int i, err = 0;
3092
3093 for (i = 0; i < adapter->num_rx_queues; i++) {
3094 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
3095 if (!err)
3096 continue;
3097 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3098 break;
3099 }
3100 return err;
3101 }
3102
3103 /**
3104 * ixgbevf_free_rx_resources - Free Rx Resources
3105 * @rx_ring: ring to clean the resources from
3106 *
3107 * Free all receive software resources
3108 **/
3109 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3110 {
3111 ixgbevf_clean_rx_ring(rx_ring);
3112
3113 vfree(rx_ring->rx_buffer_info);
3114 rx_ring->rx_buffer_info = NULL;
3115
3116 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3117 rx_ring->dma);
3118
3119 rx_ring->desc = NULL;
3120 }
3121
3122 /**
3123 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3124 * @adapter: board private structure
3125 *
3126 * Free all receive software resources
3127 **/
3128 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3129 {
3130 int i;
3131
3132 for (i = 0; i < adapter->num_rx_queues; i++)
3133 if (adapter->rx_ring[i]->desc)
3134 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3135 }
3136
3137 /**
3138 * ixgbevf_open - Called when a network interface is made active
3139 * @netdev: network interface device structure
3140 *
3141 * Returns 0 on success, negative value on failure
3142 *
3143 * The open entry point is called when a network interface is made
3144 * active by the system (IFF_UP). At this point all resources needed
3145 * for transmit and receive operations are allocated, the interrupt
3146 * handler is registered with the OS, the watchdog timer is started,
3147 * and the stack is notified that the interface is ready.
3148 **/
3149 int ixgbevf_open(struct net_device *netdev)
3150 {
3151 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3152 struct ixgbe_hw *hw = &adapter->hw;
3153 int err;
3154
3155 /* A previous failure to open the device because of a lack of
3156 * available MSIX vector resources may have reset the number
3157 * of msix vectors variable to zero. The only way to recover
3158 * is to unload/reload the driver and hope that the system has
3159 * been able to recover some MSIX vector resources.
3160 */
3161 if (!adapter->num_msix_vectors)
3162 return -ENOMEM;
3163
3164 if (hw->adapter_stopped) {
3165 ixgbevf_reset(adapter);
3166 /* if adapter is still stopped then PF isn't up and
3167 * the VF can't start.
3168 */
3169 if (hw->adapter_stopped) {
3170 err = IXGBE_ERR_MBX;
3171 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3172 goto err_setup_reset;
3173 }
3174 }
3175
3176 /* disallow open during test */
3177 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3178 return -EBUSY;
3179
3180 netif_carrier_off(netdev);
3181
3182 /* allocate transmit descriptors */
3183 err = ixgbevf_setup_all_tx_resources(adapter);
3184 if (err)
3185 goto err_setup_tx;
3186
3187 /* allocate receive descriptors */
3188 err = ixgbevf_setup_all_rx_resources(adapter);
3189 if (err)
3190 goto err_setup_rx;
3191
3192 ixgbevf_configure(adapter);
3193
3194 /* Map the Tx/Rx rings to the vectors we were allotted.
3195 * if request_irq will be called in this function map_rings
3196 * must be called *before* up_complete
3197 */
3198 ixgbevf_map_rings_to_vectors(adapter);
3199
3200 err = ixgbevf_request_irq(adapter);
3201 if (err)
3202 goto err_req_irq;
3203
3204 ixgbevf_up_complete(adapter);
3205
3206 return 0;
3207
3208 err_req_irq:
3209 ixgbevf_down(adapter);
3210 err_setup_rx:
3211 ixgbevf_free_all_rx_resources(adapter);
3212 err_setup_tx:
3213 ixgbevf_free_all_tx_resources(adapter);
3214 ixgbevf_reset(adapter);
3215
3216 err_setup_reset:
3217
3218 return err;
3219 }
3220
3221 /**
3222 * ixgbevf_close - Disables a network interface
3223 * @netdev: network interface device structure
3224 *
3225 * Returns 0, this is not allowed to fail
3226 *
3227 * The close entry point is called when an interface is de-activated
3228 * by the OS. The hardware is still under the drivers control, but
3229 * needs to be disabled. A global MAC reset is issued to stop the
3230 * hardware, and all transmit and receive resources are freed.
3231 **/
3232 int ixgbevf_close(struct net_device *netdev)
3233 {
3234 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3235
3236 ixgbevf_down(adapter);
3237 ixgbevf_free_irq(adapter);
3238
3239 ixgbevf_free_all_tx_resources(adapter);
3240 ixgbevf_free_all_rx_resources(adapter);
3241
3242 return 0;
3243 }
3244
3245 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3246 {
3247 struct net_device *dev = adapter->netdev;
3248
3249 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3250 &adapter->state))
3251 return;
3252
3253 /* if interface is down do nothing */
3254 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3255 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3256 return;
3257
3258 /* Hardware has to reinitialize queues and interrupts to
3259 * match packet buffer alignment. Unfortunately, the
3260 * hardware is not flexible enough to do this dynamically.
3261 */
3262 if (netif_running(dev))
3263 ixgbevf_close(dev);
3264
3265 ixgbevf_clear_interrupt_scheme(adapter);
3266 ixgbevf_init_interrupt_scheme(adapter);
3267
3268 if (netif_running(dev))
3269 ixgbevf_open(dev);
3270 }
3271
3272 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3273 u32 vlan_macip_lens, u32 type_tucmd,
3274 u32 mss_l4len_idx)
3275 {
3276 struct ixgbe_adv_tx_context_desc *context_desc;
3277 u16 i = tx_ring->next_to_use;
3278
3279 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3280
3281 i++;
3282 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3283
3284 /* set bits to identify this as an advanced context descriptor */
3285 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3286
3287 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3288 context_desc->seqnum_seed = 0;
3289 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3290 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3291 }
3292
3293 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3294 struct ixgbevf_tx_buffer *first,
3295 u8 *hdr_len)
3296 {
3297 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
3298 struct sk_buff *skb = first->skb;
3299 union {
3300 struct iphdr *v4;
3301 struct ipv6hdr *v6;
3302 unsigned char *hdr;
3303 } ip;
3304 union {
3305 struct tcphdr *tcp;
3306 unsigned char *hdr;
3307 } l4;
3308 u32 paylen, l4_offset;
3309 int err;
3310
3311 if (skb->ip_summed != CHECKSUM_PARTIAL)
3312 return 0;
3313
3314 if (!skb_is_gso(skb))
3315 return 0;
3316
3317 err = skb_cow_head(skb, 0);
3318 if (err < 0)
3319 return err;
3320
3321 ip.hdr = skb_network_header(skb);
3322 l4.hdr = skb_checksum_start(skb);
3323
3324 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3325 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3326
3327 /* initialize outer IP header fields */
3328 if (ip.v4->version == 4) {
3329 /* IP header will have to cancel out any data that
3330 * is not a part of the outer IP header
3331 */
3332 ip.v4->check = csum_fold(csum_add(lco_csum(skb),
3333 csum_unfold(l4.tcp->check)));
3334 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3335
3336 ip.v4->tot_len = 0;
3337 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3338 IXGBE_TX_FLAGS_CSUM |
3339 IXGBE_TX_FLAGS_IPV4;
3340 } else {
3341 ip.v6->payload_len = 0;
3342 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3343 IXGBE_TX_FLAGS_CSUM;
3344 }
3345
3346 /* determine offset of inner transport header */
3347 l4_offset = l4.hdr - skb->data;
3348
3349 /* compute length of segmentation header */
3350 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3351
3352 /* remove payload length from inner checksum */
3353 paylen = skb->len - l4_offset;
3354 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
3355
3356 /* update gso size and bytecount with header size */
3357 first->gso_segs = skb_shinfo(skb)->gso_segs;
3358 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3359
3360 /* mss_l4len_id: use 1 as index for TSO */
3361 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
3362 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3363 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
3364
3365 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3366 vlan_macip_lens = l4.hdr - ip.hdr;
3367 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
3368 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3369
3370 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3371 type_tucmd, mss_l4len_idx);
3372
3373 return 1;
3374 }
3375
3376 static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
3377 {
3378 unsigned int offset = 0;
3379
3380 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
3381
3382 return offset == skb_checksum_start_offset(skb);
3383 }
3384
3385 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3386 struct ixgbevf_tx_buffer *first)
3387 {
3388 struct sk_buff *skb = first->skb;
3389 u32 vlan_macip_lens = 0;
3390 u32 type_tucmd = 0;
3391
3392 if (skb->ip_summed != CHECKSUM_PARTIAL)
3393 goto no_csum;
3394
3395 switch (skb->csum_offset) {
3396 case offsetof(struct tcphdr, check):
3397 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3398 /* fall through */
3399 case offsetof(struct udphdr, check):
3400 break;
3401 case offsetof(struct sctphdr, checksum):
3402 /* validate that this is actually an SCTP request */
3403 if (((first->protocol == htons(ETH_P_IP)) &&
3404 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
3405 ((first->protocol == htons(ETH_P_IPV6)) &&
3406 ixgbevf_ipv6_csum_is_sctp(skb))) {
3407 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3408 break;
3409 }
3410 /* fall through */
3411 default:
3412 skb_checksum_help(skb);
3413 goto no_csum;
3414 }
3415 /* update TX checksum flag */
3416 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3417 vlan_macip_lens = skb_checksum_start_offset(skb) -
3418 skb_network_offset(skb);
3419 no_csum:
3420 /* vlan_macip_lens: MACLEN, VLAN tag */
3421 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3422 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3423
3424 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
3425 }
3426
3427 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3428 {
3429 /* set type for advanced descriptor with frame checksum insertion */
3430 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3431 IXGBE_ADVTXD_DCMD_IFCS |
3432 IXGBE_ADVTXD_DCMD_DEXT);
3433
3434 /* set HW VLAN bit if VLAN is present */
3435 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3436 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3437
3438 /* set segmentation enable bits for TSO/FSO */
3439 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3440 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3441
3442 return cmd_type;
3443 }
3444
3445 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3446 u32 tx_flags, unsigned int paylen)
3447 {
3448 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3449
3450 /* enable L4 checksum for TSO and TX checksum offload */
3451 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3452 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3453
3454 /* enble IPv4 checksum for TSO */
3455 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3456 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3457
3458 /* use index 1 context for TSO/FSO/FCOE */
3459 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3460 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
3461
3462 /* Check Context must be set if Tx switch is enabled, which it
3463 * always is for case where virtual functions are running
3464 */
3465 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3466
3467 tx_desc->read.olinfo_status = olinfo_status;
3468 }
3469
3470 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3471 struct ixgbevf_tx_buffer *first,
3472 const u8 hdr_len)
3473 {
3474 dma_addr_t dma;
3475 struct sk_buff *skb = first->skb;
3476 struct ixgbevf_tx_buffer *tx_buffer;
3477 union ixgbe_adv_tx_desc *tx_desc;
3478 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3479 unsigned int data_len = skb->data_len;
3480 unsigned int size = skb_headlen(skb);
3481 unsigned int paylen = skb->len - hdr_len;
3482 u32 tx_flags = first->tx_flags;
3483 __le32 cmd_type;
3484 u16 i = tx_ring->next_to_use;
3485
3486 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3487
3488 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3489 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3490
3491 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3492 if (dma_mapping_error(tx_ring->dev, dma))
3493 goto dma_error;
3494
3495 /* record length, and DMA address */
3496 dma_unmap_len_set(first, len, size);
3497 dma_unmap_addr_set(first, dma, dma);
3498
3499 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3500
3501 for (;;) {
3502 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3503 tx_desc->read.cmd_type_len =
3504 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3505
3506 i++;
3507 tx_desc++;
3508 if (i == tx_ring->count) {
3509 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3510 i = 0;
3511 }
3512
3513 dma += IXGBE_MAX_DATA_PER_TXD;
3514 size -= IXGBE_MAX_DATA_PER_TXD;
3515
3516 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3517 tx_desc->read.olinfo_status = 0;
3518 }
3519
3520 if (likely(!data_len))
3521 break;
3522
3523 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3524
3525 i++;
3526 tx_desc++;
3527 if (i == tx_ring->count) {
3528 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3529 i = 0;
3530 }
3531
3532 size = skb_frag_size(frag);
3533 data_len -= size;
3534
3535 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3536 DMA_TO_DEVICE);
3537 if (dma_mapping_error(tx_ring->dev, dma))
3538 goto dma_error;
3539
3540 tx_buffer = &tx_ring->tx_buffer_info[i];
3541 dma_unmap_len_set(tx_buffer, len, size);
3542 dma_unmap_addr_set(tx_buffer, dma, dma);
3543
3544 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3545 tx_desc->read.olinfo_status = 0;
3546
3547 frag++;
3548 }
3549
3550 /* write last descriptor with RS and EOP bits */
3551 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3552 tx_desc->read.cmd_type_len = cmd_type;
3553
3554 /* set the timestamp */
3555 first->time_stamp = jiffies;
3556
3557 /* Force memory writes to complete before letting h/w know there
3558 * are new descriptors to fetch. (Only applicable for weak-ordered
3559 * memory model archs, such as IA-64).
3560 *
3561 * We also need this memory barrier (wmb) to make certain all of the
3562 * status bits have been updated before next_to_watch is written.
3563 */
3564 wmb();
3565
3566 /* set next_to_watch value indicating a packet is present */
3567 first->next_to_watch = tx_desc;
3568
3569 i++;
3570 if (i == tx_ring->count)
3571 i = 0;
3572
3573 tx_ring->next_to_use = i;
3574
3575 /* notify HW of packet */
3576 ixgbevf_write_tail(tx_ring, i);
3577
3578 return;
3579 dma_error:
3580 dev_err(tx_ring->dev, "TX DMA map failed\n");
3581
3582 /* clear dma mappings for failed tx_buffer_info map */
3583 for (;;) {
3584 tx_buffer = &tx_ring->tx_buffer_info[i];
3585 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3586 if (tx_buffer == first)
3587 break;
3588 if (i == 0)
3589 i = tx_ring->count;
3590 i--;
3591 }
3592
3593 tx_ring->next_to_use = i;
3594 }
3595
3596 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3597 {
3598 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3599 /* Herbert's original patch had:
3600 * smp_mb__after_netif_stop_queue();
3601 * but since that doesn't exist yet, just open code it.
3602 */
3603 smp_mb();
3604
3605 /* We need to check again in a case another CPU has just
3606 * made room available.
3607 */
3608 if (likely(ixgbevf_desc_unused(tx_ring) < size))
3609 return -EBUSY;
3610
3611 /* A reprieve! - use start_queue because it doesn't call schedule */
3612 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3613 ++tx_ring->tx_stats.restart_queue;
3614
3615 return 0;
3616 }
3617
3618 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3619 {
3620 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
3621 return 0;
3622 return __ixgbevf_maybe_stop_tx(tx_ring, size);
3623 }
3624
3625 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3626 {
3627 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3628 struct ixgbevf_tx_buffer *first;
3629 struct ixgbevf_ring *tx_ring;
3630 int tso;
3631 u32 tx_flags = 0;
3632 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3633 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3634 unsigned short f;
3635 #endif
3636 u8 hdr_len = 0;
3637 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3638
3639 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3640 dev_kfree_skb_any(skb);
3641 return NETDEV_TX_OK;
3642 }
3643
3644 tx_ring = adapter->tx_ring[skb->queue_mapping];
3645
3646 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3647 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3648 * + 2 desc gap to keep tail from touching head,
3649 * + 1 desc for context descriptor,
3650 * otherwise try next time
3651 */
3652 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3653 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3654 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3655 #else
3656 count += skb_shinfo(skb)->nr_frags;
3657 #endif
3658 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3659 tx_ring->tx_stats.tx_busy++;
3660 return NETDEV_TX_BUSY;
3661 }
3662
3663 /* record the location of the first descriptor for this packet */
3664 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3665 first->skb = skb;
3666 first->bytecount = skb->len;
3667 first->gso_segs = 1;
3668
3669 if (skb_vlan_tag_present(skb)) {
3670 tx_flags |= skb_vlan_tag_get(skb);
3671 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3672 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3673 }
3674
3675 /* record initial flags and protocol */
3676 first->tx_flags = tx_flags;
3677 first->protocol = vlan_get_protocol(skb);
3678
3679 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3680 if (tso < 0)
3681 goto out_drop;
3682 else if (!tso)
3683 ixgbevf_tx_csum(tx_ring, first);
3684
3685 ixgbevf_tx_map(tx_ring, first, hdr_len);
3686
3687 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3688
3689 return NETDEV_TX_OK;
3690
3691 out_drop:
3692 dev_kfree_skb_any(first->skb);
3693 first->skb = NULL;
3694
3695 return NETDEV_TX_OK;
3696 }
3697
3698 /**
3699 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3700 * @netdev: network interface device structure
3701 * @p: pointer to an address structure
3702 *
3703 * Returns 0 on success, negative on failure
3704 **/
3705 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3706 {
3707 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3708 struct ixgbe_hw *hw = &adapter->hw;
3709 struct sockaddr *addr = p;
3710 int err;
3711
3712 if (!is_valid_ether_addr(addr->sa_data))
3713 return -EADDRNOTAVAIL;
3714
3715 spin_lock_bh(&adapter->mbx_lock);
3716
3717 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
3718
3719 spin_unlock_bh(&adapter->mbx_lock);
3720
3721 if (err)
3722 return -EPERM;
3723
3724 ether_addr_copy(hw->mac.addr, addr->sa_data);
3725 ether_addr_copy(netdev->dev_addr, addr->sa_data);
3726
3727 return 0;
3728 }
3729
3730 /**
3731 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3732 * @netdev: network interface device structure
3733 * @new_mtu: new value for maximum frame size
3734 *
3735 * Returns 0 on success, negative on failure
3736 **/
3737 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3738 {
3739 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3740 struct ixgbe_hw *hw = &adapter->hw;
3741 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3742 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3743 int ret;
3744
3745 switch (adapter->hw.api_version) {
3746 case ixgbe_mbox_api_11:
3747 case ixgbe_mbox_api_12:
3748 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3749 break;
3750 default:
3751 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
3752 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3753 break;
3754 }
3755
3756 /* MTU < 68 is an error and causes problems on some kernels */
3757 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3758 return -EINVAL;
3759
3760 /* notify the PF of our intent to use this size of frame */
3761 ret = hw->mac.ops.set_rlpml(hw, max_frame);
3762 if (ret)
3763 return -EINVAL;
3764
3765 hw_dbg(hw, "changing MTU from %d to %d\n",
3766 netdev->mtu, new_mtu);
3767
3768 /* must set new MTU before calling down or up */
3769 netdev->mtu = new_mtu;
3770
3771 return 0;
3772 }
3773
3774 #ifdef CONFIG_NET_POLL_CONTROLLER
3775 /* Polling 'interrupt' - used by things like netconsole to send skbs
3776 * without having to re-enable interrupts. It's not called while
3777 * the interrupt routine is executing.
3778 */
3779 static void ixgbevf_netpoll(struct net_device *netdev)
3780 {
3781 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3782 int i;
3783
3784 /* if interface is down do nothing */
3785 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3786 return;
3787 for (i = 0; i < adapter->num_rx_queues; i++)
3788 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3789 }
3790 #endif /* CONFIG_NET_POLL_CONTROLLER */
3791
3792 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3793 {
3794 struct net_device *netdev = pci_get_drvdata(pdev);
3795 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3796 #ifdef CONFIG_PM
3797 int retval = 0;
3798 #endif
3799
3800 netif_device_detach(netdev);
3801
3802 if (netif_running(netdev)) {
3803 rtnl_lock();
3804 ixgbevf_down(adapter);
3805 ixgbevf_free_irq(adapter);
3806 ixgbevf_free_all_tx_resources(adapter);
3807 ixgbevf_free_all_rx_resources(adapter);
3808 rtnl_unlock();
3809 }
3810
3811 ixgbevf_clear_interrupt_scheme(adapter);
3812
3813 #ifdef CONFIG_PM
3814 retval = pci_save_state(pdev);
3815 if (retval)
3816 return retval;
3817
3818 #endif
3819 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3820 pci_disable_device(pdev);
3821
3822 return 0;
3823 }
3824
3825 #ifdef CONFIG_PM
3826 static int ixgbevf_resume(struct pci_dev *pdev)
3827 {
3828 struct net_device *netdev = pci_get_drvdata(pdev);
3829 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3830 u32 err;
3831
3832 pci_restore_state(pdev);
3833 /* pci_restore_state clears dev->state_saved so call
3834 * pci_save_state to restore it.
3835 */
3836 pci_save_state(pdev);
3837
3838 err = pci_enable_device_mem(pdev);
3839 if (err) {
3840 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3841 return err;
3842 }
3843 smp_mb__before_atomic();
3844 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3845 pci_set_master(pdev);
3846
3847 ixgbevf_reset(adapter);
3848
3849 rtnl_lock();
3850 err = ixgbevf_init_interrupt_scheme(adapter);
3851 rtnl_unlock();
3852 if (err) {
3853 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3854 return err;
3855 }
3856
3857 if (netif_running(netdev)) {
3858 err = ixgbevf_open(netdev);
3859 if (err)
3860 return err;
3861 }
3862
3863 netif_device_attach(netdev);
3864
3865 return err;
3866 }
3867
3868 #endif /* CONFIG_PM */
3869 static void ixgbevf_shutdown(struct pci_dev *pdev)
3870 {
3871 ixgbevf_suspend(pdev, PMSG_SUSPEND);
3872 }
3873
3874 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3875 struct rtnl_link_stats64 *stats)
3876 {
3877 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3878 unsigned int start;
3879 u64 bytes, packets;
3880 const struct ixgbevf_ring *ring;
3881 int i;
3882
3883 ixgbevf_update_stats(adapter);
3884
3885 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3886
3887 for (i = 0; i < adapter->num_rx_queues; i++) {
3888 ring = adapter->rx_ring[i];
3889 do {
3890 start = u64_stats_fetch_begin_irq(&ring->syncp);
3891 bytes = ring->stats.bytes;
3892 packets = ring->stats.packets;
3893 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3894 stats->rx_bytes += bytes;
3895 stats->rx_packets += packets;
3896 }
3897
3898 for (i = 0; i < adapter->num_tx_queues; i++) {
3899 ring = adapter->tx_ring[i];
3900 do {
3901 start = u64_stats_fetch_begin_irq(&ring->syncp);
3902 bytes = ring->stats.bytes;
3903 packets = ring->stats.packets;
3904 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3905 stats->tx_bytes += bytes;
3906 stats->tx_packets += packets;
3907 }
3908
3909 return stats;
3910 }
3911
3912 #define IXGBEVF_MAX_MAC_HDR_LEN 127
3913 #define IXGBEVF_MAX_NETWORK_HDR_LEN 511
3914
3915 static netdev_features_t
3916 ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
3917 netdev_features_t features)
3918 {
3919 unsigned int network_hdr_len, mac_hdr_len;
3920
3921 /* Make certain the headers can be described by a context descriptor */
3922 mac_hdr_len = skb_network_header(skb) - skb->data;
3923 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
3924 return features & ~(NETIF_F_HW_CSUM |
3925 NETIF_F_SCTP_CRC |
3926 NETIF_F_HW_VLAN_CTAG_TX |
3927 NETIF_F_TSO |
3928 NETIF_F_TSO6);
3929
3930 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
3931 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
3932 return features & ~(NETIF_F_HW_CSUM |
3933 NETIF_F_SCTP_CRC |
3934 NETIF_F_TSO |
3935 NETIF_F_TSO6);
3936
3937 /* We can only support IPV4 TSO in tunnels if we can mangle the
3938 * inner IP ID field, so strip TSO if MANGLEID is not supported.
3939 */
3940 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
3941 features &= ~NETIF_F_TSO;
3942
3943 return features;
3944 }
3945
3946 static const struct net_device_ops ixgbevf_netdev_ops = {
3947 .ndo_open = ixgbevf_open,
3948 .ndo_stop = ixgbevf_close,
3949 .ndo_start_xmit = ixgbevf_xmit_frame,
3950 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3951 .ndo_get_stats64 = ixgbevf_get_stats,
3952 .ndo_validate_addr = eth_validate_addr,
3953 .ndo_set_mac_address = ixgbevf_set_mac,
3954 .ndo_change_mtu = ixgbevf_change_mtu,
3955 .ndo_tx_timeout = ixgbevf_tx_timeout,
3956 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3957 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3958 #ifdef CONFIG_NET_RX_BUSY_POLL
3959 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3960 #endif
3961 #ifdef CONFIG_NET_POLL_CONTROLLER
3962 .ndo_poll_controller = ixgbevf_netpoll,
3963 #endif
3964 .ndo_features_check = ixgbevf_features_check,
3965 };
3966
3967 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3968 {
3969 dev->netdev_ops = &ixgbevf_netdev_ops;
3970 ixgbevf_set_ethtool_ops(dev);
3971 dev->watchdog_timeo = 5 * HZ;
3972 }
3973
3974 /**
3975 * ixgbevf_probe - Device Initialization Routine
3976 * @pdev: PCI device information struct
3977 * @ent: entry in ixgbevf_pci_tbl
3978 *
3979 * Returns 0 on success, negative on failure
3980 *
3981 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3982 * The OS initialization, configuring of the adapter private structure,
3983 * and a hardware reset occur.
3984 **/
3985 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3986 {
3987 struct net_device *netdev;
3988 struct ixgbevf_adapter *adapter = NULL;
3989 struct ixgbe_hw *hw = NULL;
3990 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3991 int err, pci_using_dac;
3992 bool disable_dev = false;
3993
3994 err = pci_enable_device(pdev);
3995 if (err)
3996 return err;
3997
3998 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3999 pci_using_dac = 1;
4000 } else {
4001 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4002 if (err) {
4003 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4004 goto err_dma;
4005 }
4006 pci_using_dac = 0;
4007 }
4008
4009 err = pci_request_regions(pdev, ixgbevf_driver_name);
4010 if (err) {
4011 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4012 goto err_pci_reg;
4013 }
4014
4015 pci_set_master(pdev);
4016
4017 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4018 MAX_TX_QUEUES);
4019 if (!netdev) {
4020 err = -ENOMEM;
4021 goto err_alloc_etherdev;
4022 }
4023
4024 SET_NETDEV_DEV(netdev, &pdev->dev);
4025
4026 adapter = netdev_priv(netdev);
4027
4028 adapter->netdev = netdev;
4029 adapter->pdev = pdev;
4030 hw = &adapter->hw;
4031 hw->back = adapter;
4032 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4033
4034 /* call save state here in standalone driver because it relies on
4035 * adapter struct to exist, and needs to call netdev_priv
4036 */
4037 pci_save_state(pdev);
4038
4039 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4040 pci_resource_len(pdev, 0));
4041 adapter->io_addr = hw->hw_addr;
4042 if (!hw->hw_addr) {
4043 err = -EIO;
4044 goto err_ioremap;
4045 }
4046
4047 ixgbevf_assign_netdev_ops(netdev);
4048
4049 /* Setup HW API */
4050 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4051 hw->mac.type = ii->mac;
4052
4053 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
4054 sizeof(struct ixgbe_mbx_operations));
4055
4056 /* setup the private structure */
4057 err = ixgbevf_sw_init(adapter);
4058 if (err)
4059 goto err_sw_init;
4060
4061 /* The HW MAC address was set and/or determined in sw_init */
4062 if (!is_valid_ether_addr(netdev->dev_addr)) {
4063 pr_err("invalid MAC address\n");
4064 err = -EIO;
4065 goto err_sw_init;
4066 }
4067
4068 netdev->hw_features = NETIF_F_SG |
4069 NETIF_F_TSO |
4070 NETIF_F_TSO6 |
4071 NETIF_F_RXCSUM |
4072 NETIF_F_HW_CSUM |
4073 NETIF_F_SCTP_CRC;
4074
4075 #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4076 NETIF_F_GSO_GRE_CSUM | \
4077 NETIF_F_GSO_IPXIP4 | \
4078 NETIF_F_GSO_IPXIP6 | \
4079 NETIF_F_GSO_UDP_TUNNEL | \
4080 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4081
4082 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4083 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4084 IXGBEVF_GSO_PARTIAL_FEATURES;
4085
4086 netdev->features = netdev->hw_features;
4087
4088 if (pci_using_dac)
4089 netdev->features |= NETIF_F_HIGHDMA;
4090
4091 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
4092 netdev->mpls_features |= NETIF_F_HW_CSUM;
4093 netdev->hw_enc_features |= netdev->vlan_features;
4094
4095 /* set this bit last since it cannot be part of vlan_features */
4096 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4097 NETIF_F_HW_VLAN_CTAG_RX |
4098 NETIF_F_HW_VLAN_CTAG_TX;
4099
4100 netdev->priv_flags |= IFF_UNICAST_FLT;
4101
4102 if (IXGBE_REMOVED(hw->hw_addr)) {
4103 err = -EIO;
4104 goto err_sw_init;
4105 }
4106
4107 setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
4108 (unsigned long)adapter);
4109
4110 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4111 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4112 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4113
4114 err = ixgbevf_init_interrupt_scheme(adapter);
4115 if (err)
4116 goto err_sw_init;
4117
4118 strcpy(netdev->name, "eth%d");
4119
4120 err = register_netdev(netdev);
4121 if (err)
4122 goto err_register;
4123
4124 pci_set_drvdata(pdev, netdev);
4125 netif_carrier_off(netdev);
4126
4127 ixgbevf_init_last_counter_stats(adapter);
4128
4129 /* print the VF info */
4130 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4131 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4132
4133 switch (hw->mac.type) {
4134 case ixgbe_mac_X550_vf:
4135 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4136 break;
4137 case ixgbe_mac_X540_vf:
4138 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4139 break;
4140 case ixgbe_mac_82599_vf:
4141 default:
4142 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4143 break;
4144 }
4145
4146 return 0;
4147
4148 err_register:
4149 ixgbevf_clear_interrupt_scheme(adapter);
4150 err_sw_init:
4151 ixgbevf_reset_interrupt_capability(adapter);
4152 iounmap(adapter->io_addr);
4153 err_ioremap:
4154 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4155 free_netdev(netdev);
4156 err_alloc_etherdev:
4157 pci_release_regions(pdev);
4158 err_pci_reg:
4159 err_dma:
4160 if (!adapter || disable_dev)
4161 pci_disable_device(pdev);
4162 return err;
4163 }
4164
4165 /**
4166 * ixgbevf_remove - Device Removal Routine
4167 * @pdev: PCI device information struct
4168 *
4169 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4170 * that it should release a PCI device. The could be caused by a
4171 * Hot-Plug event, or because the driver is going to be removed from
4172 * memory.
4173 **/
4174 static void ixgbevf_remove(struct pci_dev *pdev)
4175 {
4176 struct net_device *netdev = pci_get_drvdata(pdev);
4177 struct ixgbevf_adapter *adapter;
4178 bool disable_dev;
4179
4180 if (!netdev)
4181 return;
4182
4183 adapter = netdev_priv(netdev);
4184
4185 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4186 cancel_work_sync(&adapter->service_task);
4187
4188 if (netdev->reg_state == NETREG_REGISTERED)
4189 unregister_netdev(netdev);
4190
4191 ixgbevf_clear_interrupt_scheme(adapter);
4192 ixgbevf_reset_interrupt_capability(adapter);
4193
4194 iounmap(adapter->io_addr);
4195 pci_release_regions(pdev);
4196
4197 hw_dbg(&adapter->hw, "Remove complete\n");
4198
4199 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4200 free_netdev(netdev);
4201
4202 if (disable_dev)
4203 pci_disable_device(pdev);
4204 }
4205
4206 /**
4207 * ixgbevf_io_error_detected - called when PCI error is detected
4208 * @pdev: Pointer to PCI device
4209 * @state: The current pci connection state
4210 *
4211 * This function is called after a PCI bus error affecting
4212 * this device has been detected.
4213 **/
4214 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4215 pci_channel_state_t state)
4216 {
4217 struct net_device *netdev = pci_get_drvdata(pdev);
4218 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4219
4220 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4221 return PCI_ERS_RESULT_DISCONNECT;
4222
4223 rtnl_lock();
4224 netif_device_detach(netdev);
4225
4226 if (state == pci_channel_io_perm_failure) {
4227 rtnl_unlock();
4228 return PCI_ERS_RESULT_DISCONNECT;
4229 }
4230
4231 if (netif_running(netdev))
4232 ixgbevf_down(adapter);
4233
4234 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4235 pci_disable_device(pdev);
4236 rtnl_unlock();
4237
4238 /* Request a slot slot reset. */
4239 return PCI_ERS_RESULT_NEED_RESET;
4240 }
4241
4242 /**
4243 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4244 * @pdev: Pointer to PCI device
4245 *
4246 * Restart the card from scratch, as if from a cold-boot. Implementation
4247 * resembles the first-half of the ixgbevf_resume routine.
4248 **/
4249 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4250 {
4251 struct net_device *netdev = pci_get_drvdata(pdev);
4252 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4253
4254 if (pci_enable_device_mem(pdev)) {
4255 dev_err(&pdev->dev,
4256 "Cannot re-enable PCI device after reset.\n");
4257 return PCI_ERS_RESULT_DISCONNECT;
4258 }
4259
4260 smp_mb__before_atomic();
4261 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4262 pci_set_master(pdev);
4263
4264 ixgbevf_reset(adapter);
4265
4266 return PCI_ERS_RESULT_RECOVERED;
4267 }
4268
4269 /**
4270 * ixgbevf_io_resume - called when traffic can start flowing again.
4271 * @pdev: Pointer to PCI device
4272 *
4273 * This callback is called when the error recovery driver tells us that
4274 * its OK to resume normal operation. Implementation resembles the
4275 * second-half of the ixgbevf_resume routine.
4276 **/
4277 static void ixgbevf_io_resume(struct pci_dev *pdev)
4278 {
4279 struct net_device *netdev = pci_get_drvdata(pdev);
4280 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4281
4282 if (netif_running(netdev))
4283 ixgbevf_up(adapter);
4284
4285 netif_device_attach(netdev);
4286 }
4287
4288 /* PCI Error Recovery (ERS) */
4289 static const struct pci_error_handlers ixgbevf_err_handler = {
4290 .error_detected = ixgbevf_io_error_detected,
4291 .slot_reset = ixgbevf_io_slot_reset,
4292 .resume = ixgbevf_io_resume,
4293 };
4294
4295 static struct pci_driver ixgbevf_driver = {
4296 .name = ixgbevf_driver_name,
4297 .id_table = ixgbevf_pci_tbl,
4298 .probe = ixgbevf_probe,
4299 .remove = ixgbevf_remove,
4300 #ifdef CONFIG_PM
4301 /* Power Management Hooks */
4302 .suspend = ixgbevf_suspend,
4303 .resume = ixgbevf_resume,
4304 #endif
4305 .shutdown = ixgbevf_shutdown,
4306 .err_handler = &ixgbevf_err_handler
4307 };
4308
4309 /**
4310 * ixgbevf_init_module - Driver Registration Routine
4311 *
4312 * ixgbevf_init_module is the first routine called when the driver is
4313 * loaded. All it does is register with the PCI subsystem.
4314 **/
4315 static int __init ixgbevf_init_module(void)
4316 {
4317 pr_info("%s - version %s\n", ixgbevf_driver_string,
4318 ixgbevf_driver_version);
4319
4320 pr_info("%s\n", ixgbevf_copyright);
4321 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
4322 if (!ixgbevf_wq) {
4323 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
4324 return -ENOMEM;
4325 }
4326
4327 return pci_register_driver(&ixgbevf_driver);
4328 }
4329
4330 module_init(ixgbevf_init_module);
4331
4332 /**
4333 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4334 *
4335 * ixgbevf_exit_module is called just before the driver is removed
4336 * from memory.
4337 **/
4338 static void __exit ixgbevf_exit_module(void)
4339 {
4340 pci_unregister_driver(&ixgbevf_driver);
4341 if (ixgbevf_wq) {
4342 destroy_workqueue(ixgbevf_wq);
4343 ixgbevf_wq = NULL;
4344 }
4345 }
4346
4347 #ifdef DEBUG
4348 /**
4349 * ixgbevf_get_hw_dev_name - return device name string
4350 * used by hardware layer to print debugging information
4351 **/
4352 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4353 {
4354 struct ixgbevf_adapter *adapter = hw->back;
4355
4356 return adapter->netdev->name;
4357 }
4358
4359 #endif
4360 module_exit(ixgbevf_exit_module);
4361
4362 /* ixgbevf_main.c */
This page took 0.134686 seconds and 5 git commands to generate.