net: sched: fix skb->protocol use in case of accelerated vlan path
[deliverable/linux.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
CommitLineData
92915f71
GR
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
2e7cfbdd 4 Copyright(c) 1999 - 2014 Intel Corporation.
92915f71
GR
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
dbd9636e
JK
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
92915f71 35#include <linux/types.h>
dadcd65f 36#include <linux/bitops.h>
92915f71
GR
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
70a10e25 45#include <linux/sctp.h>
92915f71 46#include <linux/ipv6.h>
5a0e3ad6 47#include <linux/slab.h>
92915f71
GR
48#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
01789349 51#include <linux/if.h>
92915f71 52#include <linux/if_vlan.h>
70c71606 53#include <linux/prefetch.h>
92915f71
GR
54
55#include "ixgbevf.h"
56
3d8fe98f 57const char ixgbevf_driver_name[] = "ixgbevf";
92915f71 58static const char ixgbevf_driver_string[] =
422e05d1 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
92915f71 60
86f359f6 61#define DRV_VERSION "2.12.1-k"
92915f71 62const char ixgbevf_driver_version[] = DRV_VERSION;
66c87bd5 63static char ixgbevf_copyright[] =
5c47a2b6 64 "Copyright (c) 2009 - 2012 Intel Corporation.";
92915f71
GR
65
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
2316aa2a
GR
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
47068b0d
ET
69 [board_X550_vf] = &ixgbevf_X550_vf_info,
70 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
92915f71
GR
71};
72
73/* ixgbevf_pci_tbl - PCI Device ID Table
74 *
75 * Wildcard entries (PCI_ANY_ID) should come last
76 * Last entry must be all 0s
77 *
78 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
79 * Class, Class Mask, private data (not used) }
80 */
9baa3c34 81static const struct pci_device_id ixgbevf_pci_tbl[] = {
39ba22b4
SH
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
47068b0d
ET
84 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
92915f71
GR
86 /* required last entry */
87 {0, }
88};
89MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
90
91MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
b8ce18cd 92MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
92915f71
GR
93MODULE_LICENSE("GPL");
94MODULE_VERSION(DRV_VERSION);
95
b3f4d599 96#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
97static int debug = -1;
98module_param(debug, int, 0);
99MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
92915f71
GR
100
101/* forward decls */
220fe050 102static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
fa71ae27 103static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
56e94095 104static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
92915f71 105
dbf8b0d8
MR
106static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
107{
108 struct ixgbevf_adapter *adapter = hw->back;
109
110 if (!hw->hw_addr)
111 return;
112 hw->hw_addr = NULL;
113 dev_err(&adapter->pdev->dev, "Adapter removed\n");
ea699569
MR
114 if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
115 schedule_work(&adapter->watchdog_task);
dbf8b0d8
MR
116}
117
118static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
119{
120 u32 value;
121
122 /* The following check not only optimizes a bit by not
123 * performing a read on the status register when the
124 * register just read was a status register read that
125 * returned IXGBE_FAILED_READ_REG. It also blocks any
126 * potential recursion.
127 */
128 if (reg == IXGBE_VFSTATUS) {
129 ixgbevf_remove_adapter(hw);
130 return;
131 }
32c74949 132 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
dbf8b0d8
MR
133 if (value == IXGBE_FAILED_READ_REG)
134 ixgbevf_remove_adapter(hw);
135}
136
32c74949 137u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
dbf8b0d8
MR
138{
139 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
140 u32 value;
141
142 if (IXGBE_REMOVED(reg_addr))
143 return IXGBE_FAILED_READ_REG;
144 value = readl(reg_addr + reg);
145 if (unlikely(value == IXGBE_FAILED_READ_REG))
146 ixgbevf_check_remove(hw, reg);
147 return value;
148}
149
49ce9c2c 150/**
65d676c8 151 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
92915f71
GR
152 * @adapter: pointer to adapter struct
153 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
154 * @queue: queue to map the corresponding interrupt to
155 * @msix_vector: the vector to map to the corresponding queue
92915f71
GR
156 */
157static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
158 u8 queue, u8 msix_vector)
159{
160 u32 ivar, index;
161 struct ixgbe_hw *hw = &adapter->hw;
162 if (direction == -1) {
163 /* other causes */
164 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
165 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
166 ivar &= ~0xFF;
167 ivar |= msix_vector;
168 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
169 } else {
170 /* tx or rx causes */
171 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
172 index = ((16 * (queue & 1)) + (8 * direction));
173 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
174 ivar &= ~(0xFF << index);
175 ivar |= (msix_vector << index);
176 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
177 }
178}
179
70a10e25 180static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
9bdfefd2
ET
181 struct ixgbevf_tx_buffer *tx_buffer)
182{
183 if (tx_buffer->skb) {
184 dev_kfree_skb_any(tx_buffer->skb);
185 if (dma_unmap_len(tx_buffer, len))
70a10e25 186 dma_unmap_single(tx_ring->dev,
9bdfefd2
ET
187 dma_unmap_addr(tx_buffer, dma),
188 dma_unmap_len(tx_buffer, len),
2a1f8794 189 DMA_TO_DEVICE);
9bdfefd2
ET
190 } else if (dma_unmap_len(tx_buffer, len)) {
191 dma_unmap_page(tx_ring->dev,
192 dma_unmap_addr(tx_buffer, dma),
193 dma_unmap_len(tx_buffer, len),
194 DMA_TO_DEVICE);
92915f71 195 }
9bdfefd2
ET
196 tx_buffer->next_to_watch = NULL;
197 tx_buffer->skb = NULL;
198 dma_unmap_len_set(tx_buffer, len, 0);
199 /* tx_buffer must be completely set up in the transmit path */
92915f71
GR
200}
201
92915f71
GR
202#define IXGBE_MAX_TXD_PWR 14
203#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
204
205/* Tx Descriptors needed, worst case */
3595990a
AD
206#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
207#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
92915f71
GR
208
209static void ixgbevf_tx_timeout(struct net_device *netdev);
210
211/**
212 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
fa71ae27 213 * @q_vector: board private structure
92915f71
GR
214 * @tx_ring: tx ring to clean
215 **/
fa71ae27 216static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
92915f71
GR
217 struct ixgbevf_ring *tx_ring)
218{
fa71ae27 219 struct ixgbevf_adapter *adapter = q_vector->adapter;
7ad1a093
ET
220 struct ixgbevf_tx_buffer *tx_buffer;
221 union ixgbe_adv_tx_desc *tx_desc;
92915f71 222 unsigned int total_bytes = 0, total_packets = 0;
7ad1a093
ET
223 unsigned int budget = tx_ring->count / 2;
224 unsigned int i = tx_ring->next_to_clean;
92915f71 225
10cc1bdd
AD
226 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
227 return true;
228
7ad1a093
ET
229 tx_buffer = &tx_ring->tx_buffer_info[i];
230 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
231 i -= tx_ring->count;
92915f71 232
e757e3e1 233 do {
7ad1a093 234 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
e757e3e1
AD
235
236 /* if next_to_watch is not set then there is no work pending */
237 if (!eop_desc)
238 break;
239
240 /* prevent any other reads prior to eop_desc */
241 read_barrier_depends();
242
243 /* if DD is not set pending work has not been completed */
244 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
245 break;
246
247 /* clear next_to_watch to prevent false hangs */
7ad1a093 248 tx_buffer->next_to_watch = NULL;
e757e3e1 249
7ad1a093
ET
250 /* update the statistics for this packet */
251 total_bytes += tx_buffer->bytecount;
252 total_packets += tx_buffer->gso_segs;
92915f71 253
9bdfefd2
ET
254 /* free the skb */
255 dev_kfree_skb_any(tx_buffer->skb);
256
257 /* unmap skb header data */
258 dma_unmap_single(tx_ring->dev,
259 dma_unmap_addr(tx_buffer, dma),
260 dma_unmap_len(tx_buffer, len),
261 DMA_TO_DEVICE);
262
7ad1a093 263 /* clear tx_buffer data */
9bdfefd2
ET
264 tx_buffer->skb = NULL;
265 dma_unmap_len_set(tx_buffer, len, 0);
92915f71 266
7ad1a093
ET
267 /* unmap remaining buffers */
268 while (tx_desc != eop_desc) {
7ad1a093
ET
269 tx_buffer++;
270 tx_desc++;
92915f71 271 i++;
7ad1a093
ET
272 if (unlikely(!i)) {
273 i -= tx_ring->count;
274 tx_buffer = tx_ring->tx_buffer_info;
275 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
276 }
e757e3e1 277
9bdfefd2
ET
278 /* unmap any remaining paged data */
279 if (dma_unmap_len(tx_buffer, len)) {
280 dma_unmap_page(tx_ring->dev,
281 dma_unmap_addr(tx_buffer, dma),
282 dma_unmap_len(tx_buffer, len),
283 DMA_TO_DEVICE);
284 dma_unmap_len_set(tx_buffer, len, 0);
285 }
92915f71
GR
286 }
287
7ad1a093
ET
288 /* move us one more past the eop_desc for start of next pkt */
289 tx_buffer++;
290 tx_desc++;
291 i++;
292 if (unlikely(!i)) {
293 i -= tx_ring->count;
294 tx_buffer = tx_ring->tx_buffer_info;
295 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
296 }
297
298 /* issue prefetch for next Tx descriptor */
299 prefetch(tx_desc);
300
301 /* update budget accounting */
302 budget--;
303 } while (likely(budget));
304
305 i += tx_ring->count;
92915f71 306 tx_ring->next_to_clean = i;
7ad1a093
ET
307 u64_stats_update_begin(&tx_ring->syncp);
308 tx_ring->stats.bytes += total_bytes;
309 tx_ring->stats.packets += total_packets;
310 u64_stats_update_end(&tx_ring->syncp);
311 q_vector->tx.total_bytes += total_bytes;
312 q_vector->tx.total_packets += total_packets;
92915f71
GR
313
314#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
7ad1a093 315 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
f880d07b 316 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
92915f71
GR
317 /* Make sure that anybody stopping the queue after this
318 * sees the new next_to_clean.
319 */
320 smp_mb();
7ad1a093 321
fb40195c
AD
322 if (__netif_subqueue_stopped(tx_ring->netdev,
323 tx_ring->queue_index) &&
92915f71 324 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
fb40195c
AD
325 netif_wake_subqueue(tx_ring->netdev,
326 tx_ring->queue_index);
7ad1a093 327 ++tx_ring->tx_stats.restart_queue;
92915f71 328 }
92915f71
GR
329 }
330
7ad1a093 331 return !!budget;
92915f71
GR
332}
333
08681618
JK
334/**
335 * ixgbevf_rx_skb - Helper function to determine proper Rx method
336 * @q_vector: structure containing interrupt and ring information
337 * @skb: packet to send up
08681618
JK
338 **/
339static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
dff80520 340 struct sk_buff *skb)
08681618 341{
c777cdfa
JK
342#ifdef CONFIG_NET_RX_BUSY_POLL
343 skb_mark_napi_id(skb, &q_vector->napi);
344
345 if (ixgbevf_qv_busy_polling(q_vector)) {
346 netif_receive_skb(skb);
347 /* exit early if we busy polled */
348 return;
349 }
350#endif /* CONFIG_NET_RX_BUSY_POLL */
688ff32d
ET
351
352 napi_gro_receive(&q_vector->napi, skb);
08681618
JK
353}
354
ec62fe26
ET
355/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
356 * @ring: structure containig ring specific data
357 * @rx_desc: current Rx descriptor being processed
92915f71 358 * @skb: skb currently being received and modified
ec62fe26 359 */
55fb277c 360static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
ec62fe26
ET
361 union ixgbe_adv_rx_desc *rx_desc,
362 struct sk_buff *skb)
92915f71 363{
bc8acf2c 364 skb_checksum_none_assert(skb);
92915f71
GR
365
366 /* Rx csum disabled */
fb40195c 367 if (!(ring->netdev->features & NETIF_F_RXCSUM))
92915f71
GR
368 return;
369
370 /* if IP and error */
ec62fe26
ET
371 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
372 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
095e2617 373 ring->rx_stats.csum_err++;
92915f71
GR
374 return;
375 }
376
ec62fe26 377 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
92915f71
GR
378 return;
379
ec62fe26 380 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
095e2617 381 ring->rx_stats.csum_err++;
92915f71
GR
382 return;
383 }
384
385 /* It must be a TCP or UDP packet with a valid checksum */
386 skb->ip_summed = CHECKSUM_UNNECESSARY;
92915f71
GR
387}
388
dff80520
ET
389/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
390 * @rx_ring: rx descriptor ring packet is being transacted on
391 * @rx_desc: pointer to the EOP Rx descriptor
392 * @skb: pointer to current skb being populated
393 *
394 * This function checks the ring, descriptor, and packet information in
395 * order to populate the checksum, VLAN, protocol, and other fields within
396 * the skb.
397 */
398static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
399 union ixgbe_adv_rx_desc *rx_desc,
400 struct sk_buff *skb)
401{
402 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
403
404 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
405 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
406 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
407
408 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
409 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
410 }
411
412 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
413}
414
4b95fe3d
ET
415/**
416 * ixgbevf_is_non_eop - process handling of non-EOP buffers
417 * @rx_ring: Rx ring being processed
418 * @rx_desc: Rx descriptor for current buffer
419 * @skb: current socket buffer containing buffer in progress
420 *
421 * This function updates next to clean. If the buffer is an EOP buffer
422 * this function exits returning false, otherwise it will place the
423 * sk_buff in the next buffer to be chained and return true indicating
424 * that this is in fact a non-EOP buffer.
425 **/
426static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
bad17234 427 union ixgbe_adv_rx_desc *rx_desc)
4b95fe3d
ET
428{
429 u32 ntc = rx_ring->next_to_clean + 1;
430
431 /* fetch, update, and store next to clean */
432 ntc = (ntc < rx_ring->count) ? ntc : 0;
433 rx_ring->next_to_clean = ntc;
434
435 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
436
437 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
438 return false;
439
440 return true;
441}
442
bad17234
ET
443static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
444 struct ixgbevf_rx_buffer *bi)
bafa578f 445{
bad17234 446 struct page *page = bi->page;
bafa578f
ET
447 dma_addr_t dma = bi->dma;
448
bad17234
ET
449 /* since we are recycling buffers we should seldom need to alloc */
450 if (likely(page))
bafa578f
ET
451 return true;
452
bad17234
ET
453 /* alloc new page for storage */
454 page = dev_alloc_page();
455 if (unlikely(!page)) {
456 rx_ring->rx_stats.alloc_rx_page_failed++;
bafa578f
ET
457 return false;
458 }
459
bad17234
ET
460 /* map page for use */
461 dma = dma_map_page(rx_ring->dev, page, 0,
462 PAGE_SIZE, DMA_FROM_DEVICE);
bafa578f
ET
463
464 /* if mapping failed free memory back to system since
465 * there isn't much point in holding memory we can't use
466 */
467 if (dma_mapping_error(rx_ring->dev, dma)) {
bad17234 468 __free_page(page);
bafa578f
ET
469
470 rx_ring->rx_stats.alloc_rx_buff_failed++;
471 return false;
472 }
473
bafa578f 474 bi->dma = dma;
bad17234
ET
475 bi->page = page;
476 bi->page_offset = 0;
bafa578f
ET
477
478 return true;
479}
480
92915f71
GR
481/**
482 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
095e2617 483 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
bafa578f 484 * @cleaned_count: number of buffers to replace
92915f71 485 **/
095e2617 486static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
bafa578f 487 u16 cleaned_count)
92915f71 488{
92915f71
GR
489 union ixgbe_adv_rx_desc *rx_desc;
490 struct ixgbevf_rx_buffer *bi;
fb40195c 491 unsigned int i = rx_ring->next_to_use;
92915f71 492
bafa578f
ET
493 /* nothing to do or no valid netdev defined */
494 if (!cleaned_count || !rx_ring->netdev)
495 return;
b9dd245b 496
bafa578f
ET
497 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
498 bi = &rx_ring->rx_buffer_info[i];
499 i -= rx_ring->count;
05d063aa 500
bafa578f 501 do {
bad17234 502 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
bafa578f 503 break;
b9dd245b 504
bafa578f
ET
505 /* Refresh the desc even if pkt_addr didn't change
506 * because each write-back erases this info.
507 */
bad17234 508 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
92915f71 509
bafa578f
ET
510 rx_desc++;
511 bi++;
92915f71 512 i++;
bafa578f
ET
513 if (unlikely(!i)) {
514 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
515 bi = rx_ring->rx_buffer_info;
516 i -= rx_ring->count;
517 }
518
519 /* clear the hdr_addr for the next_to_use descriptor */
520 rx_desc->read.hdr_addr = 0;
521
522 cleaned_count--;
523 } while (cleaned_count);
524
525 i += rx_ring->count;
92915f71 526
bafa578f
ET
527 if (rx_ring->next_to_use != i) {
528 /* record the next descriptor to use */
529 rx_ring->next_to_use = i;
530
bad17234
ET
531 /* update next to alloc since we have filled the ring */
532 rx_ring->next_to_alloc = i;
533
bafa578f
ET
534 /* Force memory writes to complete before letting h/w
535 * know there are new descriptors to fetch. (Only
536 * applicable for weak-ordered memory model archs,
537 * such as IA-64).
538 */
539 wmb();
540 ixgbevf_write_tail(rx_ring, i);
541 }
92915f71
GR
542}
543
bad17234
ET
544/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
545 * @rx_ring: rx descriptor ring packet is being transacted on
546 * @skb: pointer to current skb being adjusted
547 *
548 * This function is an ixgbevf specific version of __pskb_pull_tail. The
549 * main difference between this version and the original function is that
550 * this function can make several assumptions about the state of things
551 * that allow for significant optimizations versus the standard function.
552 * As a result we can do things like drop a frag and maintain an accurate
553 * truesize for the skb.
554 */
555static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
556 struct sk_buff *skb)
557{
558 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
559 unsigned char *va;
560 unsigned int pull_len;
561
562 /* it is valid to use page_address instead of kmap since we are
563 * working with pages allocated out of the lomem pool per
564 * alloc_page(GFP_ATOMIC)
565 */
566 va = skb_frag_address(frag);
567
568 /* we need the header to contain the greater of either ETH_HLEN or
569 * 60 bytes if the skb->len is less than 60 for skb_pad.
570 */
571 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
572
573 /* align pull length to size of long to optimize memcpy performance */
574 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
575
576 /* update all of the pointers */
577 skb_frag_size_sub(frag, pull_len);
578 frag->page_offset += pull_len;
579 skb->data_len -= pull_len;
580 skb->tail += pull_len;
581}
582
583/* ixgbevf_cleanup_headers - Correct corrupted or empty headers
584 * @rx_ring: rx descriptor ring packet is being transacted on
585 * @rx_desc: pointer to the EOP Rx descriptor
586 * @skb: pointer to current skb being fixed
587 *
588 * Check for corrupted packet headers caused by senders on the local L2
589 * embedded NIC switch not setting up their Tx Descriptors right. These
590 * should be very rare.
591 *
592 * Also address the case where we are pulling data in on pages only
593 * and as such no data is present in the skb header.
594 *
595 * In addition if skb is not at least 60 bytes we need to pad it so that
596 * it is large enough to qualify as a valid Ethernet frame.
597 *
598 * Returns true if an error was encountered and skb was freed.
599 */
600static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
601 union ixgbe_adv_rx_desc *rx_desc,
602 struct sk_buff *skb)
603{
604 /* verify that the packet does not have any known errors */
605 if (unlikely(ixgbevf_test_staterr(rx_desc,
606 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
607 struct net_device *netdev = rx_ring->netdev;
608
609 if (!(netdev->features & NETIF_F_RXALL)) {
610 dev_kfree_skb_any(skb);
611 return true;
612 }
613 }
614
615 /* place header in linear portion of buffer */
616 if (skb_is_nonlinear(skb))
617 ixgbevf_pull_tail(rx_ring, skb);
618
a94d9e22
AD
619 /* if eth_skb_pad returns an error the skb was freed */
620 if (eth_skb_pad(skb))
621 return true;
bad17234
ET
622
623 return false;
624}
625
626/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
627 * @rx_ring: rx descriptor ring to store buffers on
628 * @old_buff: donor buffer to have page reused
629 *
630 * Synchronizes page for reuse by the adapter
631 */
632static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
633 struct ixgbevf_rx_buffer *old_buff)
634{
635 struct ixgbevf_rx_buffer *new_buff;
636 u16 nta = rx_ring->next_to_alloc;
637
638 new_buff = &rx_ring->rx_buffer_info[nta];
639
640 /* update, and store next to alloc */
641 nta++;
642 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
643
644 /* transfer page from old buffer to new buffer */
645 new_buff->page = old_buff->page;
646 new_buff->dma = old_buff->dma;
647 new_buff->page_offset = old_buff->page_offset;
648
649 /* sync the buffer for use by the device */
650 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
651 new_buff->page_offset,
652 IXGBEVF_RX_BUFSZ,
653 DMA_FROM_DEVICE);
654}
655
656static inline bool ixgbevf_page_is_reserved(struct page *page)
657{
658 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
659}
660
661/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
662 * @rx_ring: rx descriptor ring to transact packets on
663 * @rx_buffer: buffer containing page to add
664 * @rx_desc: descriptor containing length of buffer written by hardware
665 * @skb: sk_buff to place the data into
666 *
667 * This function will add the data contained in rx_buffer->page to the skb.
668 * This is done either through a direct copy if the data in the buffer is
669 * less than the skb header size, otherwise it will just attach the page as
670 * a frag to the skb.
671 *
672 * The function will then update the page offset if necessary and return
673 * true if the buffer can be reused by the adapter.
674 */
675static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
676 struct ixgbevf_rx_buffer *rx_buffer,
677 union ixgbe_adv_rx_desc *rx_desc,
678 struct sk_buff *skb)
679{
680 struct page *page = rx_buffer->page;
681 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
682#if (PAGE_SIZE < 8192)
683 unsigned int truesize = IXGBEVF_RX_BUFSZ;
684#else
685 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
686#endif
687
688 if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
689 unsigned char *va = page_address(page) + rx_buffer->page_offset;
690
691 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
692
693 /* page is not reserved, we can reuse buffer as is */
694 if (likely(!ixgbevf_page_is_reserved(page)))
695 return true;
696
697 /* this page cannot be reused so discard it */
698 put_page(page);
699 return false;
700 }
701
702 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
703 rx_buffer->page_offset, size, truesize);
704
705 /* avoid re-using remote pages */
706 if (unlikely(ixgbevf_page_is_reserved(page)))
707 return false;
708
709#if (PAGE_SIZE < 8192)
710 /* if we are only owner of page we can reuse it */
711 if (unlikely(page_count(page) != 1))
712 return false;
713
714 /* flip page offset to other buffer */
715 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
716
717#else
718 /* move offset up to the next cache line */
719 rx_buffer->page_offset += truesize;
720
721 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
722 return false;
723
724#endif
725 /* Even if we own the page, we are not allowed to use atomic_set()
726 * This would break get_page_unless_zero() users.
727 */
728 atomic_inc(&page->_count);
729
730 return true;
731}
732
733static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
734 union ixgbe_adv_rx_desc *rx_desc,
735 struct sk_buff *skb)
736{
737 struct ixgbevf_rx_buffer *rx_buffer;
738 struct page *page;
739
740 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
741 page = rx_buffer->page;
742 prefetchw(page);
743
744 if (likely(!skb)) {
745 void *page_addr = page_address(page) +
746 rx_buffer->page_offset;
747
748 /* prefetch first cache line of first page */
749 prefetch(page_addr);
750#if L1_CACHE_BYTES < 128
751 prefetch(page_addr + L1_CACHE_BYTES);
752#endif
753
754 /* allocate a skb to store the frags */
755 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
756 IXGBEVF_RX_HDR_SIZE);
757 if (unlikely(!skb)) {
758 rx_ring->rx_stats.alloc_rx_buff_failed++;
759 return NULL;
760 }
761
762 /* we will be copying header into skb->data in
763 * pskb_may_pull so it is in our interest to prefetch
764 * it now to avoid a possible cache miss
765 */
766 prefetchw(skb->data);
767 }
768
769 /* we are reusing so sync this buffer for CPU use */
770 dma_sync_single_range_for_cpu(rx_ring->dev,
771 rx_buffer->dma,
772 rx_buffer->page_offset,
773 IXGBEVF_RX_BUFSZ,
774 DMA_FROM_DEVICE);
775
776 /* pull page into skb */
777 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
778 /* hand second half of page back to the ring */
779 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
780 } else {
781 /* we are not reusing the buffer so unmap it */
782 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
783 PAGE_SIZE, DMA_FROM_DEVICE);
784 }
785
786 /* clear contents of buffer_info */
787 rx_buffer->dma = 0;
788 rx_buffer->page = NULL;
789
790 return skb;
791}
792
92915f71 793static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
5f3600eb 794 u32 qmask)
92915f71 795{
92915f71
GR
796 struct ixgbe_hw *hw = &adapter->hw;
797
5f3600eb 798 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
92915f71
GR
799}
800
08e50a20
JK
801static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
802 struct ixgbevf_ring *rx_ring,
803 int budget)
92915f71 804{
92915f71 805 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
bafa578f 806 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
bad17234 807 struct sk_buff *skb = rx_ring->skb;
92915f71 808
6622402a 809 while (likely(total_rx_packets < budget)) {
4b95fe3d 810 union ixgbe_adv_rx_desc *rx_desc;
b97fe3b1 811
0579eefc
ET
812 /* return some buffers to hardware, one at a time is too slow */
813 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
814 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
815 cleaned_count = 0;
816 }
817
bad17234 818 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
0579eefc
ET
819
820 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
92915f71 821 break;
92915f71 822
0579eefc
ET
823 /* This memory barrier is needed to keep us from reading
824 * any other fields out of the rx_desc until we know the
825 * RXD_STAT_DD bit is set
826 */
827 rmb();
ec62fe26 828
bad17234
ET
829 /* retrieve a buffer from the ring */
830 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
0579eefc 831
bad17234
ET
832 /* exit if we failed to retrieve a buffer */
833 if (!skb)
834 break;
92915f71 835
b97fe3b1
ET
836 cleaned_count++;
837
bad17234
ET
838 /* fetch next buffer in frame if non-eop */
839 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
0579eefc 840 continue;
5c60f81a 841
bad17234
ET
842 /* verify the packet layout is correct */
843 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
844 skb = NULL;
0579eefc 845 continue;
92915f71
GR
846 }
847
92915f71
GR
848 /* probably a little skewed due to removing CRC */
849 total_rx_bytes += skb->len;
92915f71 850
815cccbf
JF
851 /* Workaround hardware that can't do proper VEPA multicast
852 * source pruning.
853 */
bd9d5592
FF
854 if ((skb->pkt_type == PACKET_BROADCAST ||
855 skb->pkt_type == PACKET_MULTICAST) &&
095e2617 856 ether_addr_equal(rx_ring->netdev->dev_addr,
7367d0b5 857 eth_hdr(skb)->h_source)) {
815cccbf 858 dev_kfree_skb_irq(skb);
0579eefc 859 continue;
815cccbf
JF
860 }
861
dff80520
ET
862 /* populate checksum, VLAN, and protocol */
863 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
864
865 ixgbevf_rx_skb(q_vector, skb);
92915f71 866
bad17234
ET
867 /* reset skb pointer */
868 skb = NULL;
869
0579eefc 870 /* update budget accounting */
6622402a
ET
871 total_rx_packets++;
872 }
92915f71 873
bad17234
ET
874 /* place incomplete frames back on ring for completion */
875 rx_ring->skb = skb;
876
4197aa7b 877 u64_stats_update_begin(&rx_ring->syncp);
095e2617
ET
878 rx_ring->stats.packets += total_rx_packets;
879 rx_ring->stats.bytes += total_rx_bytes;
4197aa7b 880 u64_stats_update_end(&rx_ring->syncp);
ac6ed8f0
GR
881 q_vector->rx.total_packets += total_rx_packets;
882 q_vector->rx.total_bytes += total_rx_bytes;
92915f71 883
08e50a20 884 return total_rx_packets;
92915f71
GR
885}
886
887/**
fa71ae27 888 * ixgbevf_poll - NAPI polling calback
92915f71
GR
889 * @napi: napi struct with our devices info in it
890 * @budget: amount of work driver is allowed to do this pass, in packets
891 *
fa71ae27 892 * This function will clean more than one or more rings associated with a
92915f71
GR
893 * q_vector.
894 **/
fa71ae27 895static int ixgbevf_poll(struct napi_struct *napi, int budget)
92915f71
GR
896{
897 struct ixgbevf_q_vector *q_vector =
898 container_of(napi, struct ixgbevf_q_vector, napi);
899 struct ixgbevf_adapter *adapter = q_vector->adapter;
fa71ae27
AD
900 struct ixgbevf_ring *ring;
901 int per_ring_budget;
902 bool clean_complete = true;
903
904 ixgbevf_for_each_ring(ring, q_vector->tx)
905 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
92915f71 906
c777cdfa
JK
907#ifdef CONFIG_NET_RX_BUSY_POLL
908 if (!ixgbevf_qv_lock_napi(q_vector))
909 return budget;
910#endif
911
92915f71
GR
912 /* attempt to distribute budget to each queue fairly, but don't allow
913 * the budget to go below 1 because we'll exit polling */
fa71ae27
AD
914 if (q_vector->rx.count > 1)
915 per_ring_budget = max(budget/q_vector->rx.count, 1);
916 else
917 per_ring_budget = budget;
918
919 ixgbevf_for_each_ring(ring, q_vector->rx)
08e50a20
JK
920 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
921 per_ring_budget)
922 < per_ring_budget);
fa71ae27 923
c777cdfa
JK
924#ifdef CONFIG_NET_RX_BUSY_POLL
925 ixgbevf_qv_unlock_napi(q_vector);
926#endif
927
fa71ae27
AD
928 /* If all work not completed, return budget and keep polling */
929 if (!clean_complete)
930 return budget;
931 /* all work done, exit the polling mode */
932 napi_complete(napi);
933 if (adapter->rx_itr_setting & 1)
934 ixgbevf_set_itr(q_vector);
2e7cfbdd
MR
935 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
936 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
fa71ae27
AD
937 ixgbevf_irq_enable_queues(adapter,
938 1 << q_vector->v_idx);
92915f71 939
fa71ae27 940 return 0;
92915f71
GR
941}
942
ce422606
GR
943/**
944 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
945 * @q_vector: structure containing interrupt and ring information
946 */
3849623e 947void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
ce422606
GR
948{
949 struct ixgbevf_adapter *adapter = q_vector->adapter;
950 struct ixgbe_hw *hw = &adapter->hw;
951 int v_idx = q_vector->v_idx;
952 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
953
954 /*
955 * set the WDIS bit to not clear the timer bits and cause an
956 * immediate assertion of the interrupt
957 */
958 itr_reg |= IXGBE_EITR_CNT_WDIS;
959
960 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
961}
92915f71 962
c777cdfa
JK
963#ifdef CONFIG_NET_RX_BUSY_POLL
964/* must be called with local_bh_disable()d */
965static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
966{
967 struct ixgbevf_q_vector *q_vector =
968 container_of(napi, struct ixgbevf_q_vector, napi);
969 struct ixgbevf_adapter *adapter = q_vector->adapter;
970 struct ixgbevf_ring *ring;
971 int found = 0;
972
973 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
974 return LL_FLUSH_FAILED;
975
976 if (!ixgbevf_qv_lock_poll(q_vector))
977 return LL_FLUSH_BUSY;
978
979 ixgbevf_for_each_ring(ring, q_vector->rx) {
980 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
3b5dca26
JK
981#ifdef BP_EXTENDED_STATS
982 if (found)
095e2617 983 ring->stats.cleaned += found;
3b5dca26 984 else
095e2617 985 ring->stats.misses++;
3b5dca26 986#endif
c777cdfa
JK
987 if (found)
988 break;
989 }
990
991 ixgbevf_qv_unlock_poll(q_vector);
992
993 return found;
994}
995#endif /* CONFIG_NET_RX_BUSY_POLL */
996
92915f71
GR
997/**
998 * ixgbevf_configure_msix - Configure MSI-X hardware
999 * @adapter: board private structure
1000 *
1001 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1002 * interrupts.
1003 **/
1004static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1005{
1006 struct ixgbevf_q_vector *q_vector;
6b43c446 1007 int q_vectors, v_idx;
92915f71
GR
1008
1009 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5f3600eb 1010 adapter->eims_enable_mask = 0;
92915f71
GR
1011
1012 /*
1013 * Populate the IVAR table and set the ITR values to the
1014 * corresponding register.
1015 */
1016 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
6b43c446 1017 struct ixgbevf_ring *ring;
92915f71 1018 q_vector = adapter->q_vector[v_idx];
6b43c446
AD
1019
1020 ixgbevf_for_each_ring(ring, q_vector->rx)
1021 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1022
1023 ixgbevf_for_each_ring(ring, q_vector->tx)
1024 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
92915f71 1025
5f3600eb
AD
1026 if (q_vector->tx.ring && !q_vector->rx.ring) {
1027 /* tx only vector */
1028 if (adapter->tx_itr_setting == 1)
1029 q_vector->itr = IXGBE_10K_ITR;
1030 else
1031 q_vector->itr = adapter->tx_itr_setting;
1032 } else {
1033 /* rx or rx/tx vector */
1034 if (adapter->rx_itr_setting == 1)
1035 q_vector->itr = IXGBE_20K_ITR;
1036 else
1037 q_vector->itr = adapter->rx_itr_setting;
1038 }
1039
1040 /* add q_vector eims value to global eims_enable_mask */
1041 adapter->eims_enable_mask |= 1 << v_idx;
92915f71 1042
5f3600eb 1043 ixgbevf_write_eitr(q_vector);
92915f71
GR
1044 }
1045
1046 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
5f3600eb
AD
1047 /* setup eims_other and add value to global eims_enable_mask */
1048 adapter->eims_other = 1 << v_idx;
1049 adapter->eims_enable_mask |= adapter->eims_other;
92915f71
GR
1050}
1051
1052enum latency_range {
1053 lowest_latency = 0,
1054 low_latency = 1,
1055 bulk_latency = 2,
1056 latency_invalid = 255
1057};
1058
1059/**
1060 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
5f3600eb
AD
1061 * @q_vector: structure containing interrupt and ring information
1062 * @ring_container: structure containing ring performance data
92915f71
GR
1063 *
1064 * Stores a new ITR value based on packets and byte
1065 * counts during the last interrupt. The advantage of per interrupt
1066 * computation is faster updates and more accurate ITR for the current
1067 * traffic pattern. Constants in this function were computed
1068 * based on theoretical maximum wire speed and thresholds were set based
1069 * on testing data as well as attempting to minimize response time
1070 * while increasing bulk throughput.
1071 **/
5f3600eb
AD
1072static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1073 struct ixgbevf_ring_container *ring_container)
92915f71 1074{
5f3600eb
AD
1075 int bytes = ring_container->total_bytes;
1076 int packets = ring_container->total_packets;
92915f71
GR
1077 u32 timepassed_us;
1078 u64 bytes_perint;
5f3600eb 1079 u8 itr_setting = ring_container->itr;
92915f71
GR
1080
1081 if (packets == 0)
5f3600eb 1082 return;
92915f71
GR
1083
1084 /* simple throttlerate management
1085 * 0-20MB/s lowest (100000 ints/s)
1086 * 20-100MB/s low (20000 ints/s)
1087 * 100-1249MB/s bulk (8000 ints/s)
1088 */
1089 /* what was last interrupt timeslice? */
5f3600eb 1090 timepassed_us = q_vector->itr >> 2;
92915f71
GR
1091 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1092
1093 switch (itr_setting) {
1094 case lowest_latency:
e2c28ce7 1095 if (bytes_perint > 10)
5f3600eb 1096 itr_setting = low_latency;
92915f71
GR
1097 break;
1098 case low_latency:
e2c28ce7 1099 if (bytes_perint > 20)
5f3600eb 1100 itr_setting = bulk_latency;
e2c28ce7 1101 else if (bytes_perint <= 10)
5f3600eb 1102 itr_setting = lowest_latency;
92915f71
GR
1103 break;
1104 case bulk_latency:
e2c28ce7 1105 if (bytes_perint <= 20)
5f3600eb 1106 itr_setting = low_latency;
92915f71
GR
1107 break;
1108 }
1109
5f3600eb
AD
1110 /* clear work counters since we have the values we need */
1111 ring_container->total_bytes = 0;
1112 ring_container->total_packets = 0;
1113
1114 /* write updated itr to ring container */
1115 ring_container->itr = itr_setting;
92915f71
GR
1116}
1117
fa71ae27 1118static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
92915f71 1119{
5f3600eb
AD
1120 u32 new_itr = q_vector->itr;
1121 u8 current_itr;
92915f71 1122
5f3600eb
AD
1123 ixgbevf_update_itr(q_vector, &q_vector->tx);
1124 ixgbevf_update_itr(q_vector, &q_vector->rx);
92915f71 1125
6b43c446 1126 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
92915f71
GR
1127
1128 switch (current_itr) {
1129 /* counts and packets in update_itr are dependent on these numbers */
1130 case lowest_latency:
5f3600eb 1131 new_itr = IXGBE_100K_ITR;
92915f71
GR
1132 break;
1133 case low_latency:
5f3600eb 1134 new_itr = IXGBE_20K_ITR;
92915f71
GR
1135 break;
1136 case bulk_latency:
1137 default:
5f3600eb 1138 new_itr = IXGBE_8K_ITR;
92915f71
GR
1139 break;
1140 }
1141
5f3600eb 1142 if (new_itr != q_vector->itr) {
92915f71 1143 /* do an exponential smoothing */
5f3600eb
AD
1144 new_itr = (10 * new_itr * q_vector->itr) /
1145 ((9 * new_itr) + q_vector->itr);
1146
1147 /* save the algorithm value here */
1148 q_vector->itr = new_itr;
1149
1150 ixgbevf_write_eitr(q_vector);
92915f71 1151 }
92915f71
GR
1152}
1153
4b2cd27f 1154static irqreturn_t ixgbevf_msix_other(int irq, void *data)
92915f71 1155{
fa71ae27 1156 struct ixgbevf_adapter *adapter = data;
92915f71 1157 struct ixgbe_hw *hw = &adapter->hw;
08259594 1158
4b2cd27f 1159 hw->mac.get_link_status = 1;
1e72bfc3 1160
2e7cfbdd
MR
1161 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1162 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
c7bb417d 1163 mod_timer(&adapter->watchdog_timer, jiffies);
3a2c4033 1164
5f3600eb
AD
1165 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1166
92915f71
GR
1167 return IRQ_HANDLED;
1168}
1169
92915f71 1170/**
fa71ae27 1171 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
92915f71
GR
1172 * @irq: unused
1173 * @data: pointer to our q_vector struct for this interrupt vector
1174 **/
fa71ae27 1175static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
92915f71
GR
1176{
1177 struct ixgbevf_q_vector *q_vector = data;
92915f71 1178
5f3600eb 1179 /* EIAM disabled interrupts (on this vector) for us */
fa71ae27
AD
1180 if (q_vector->rx.ring || q_vector->tx.ring)
1181 napi_schedule(&q_vector->napi);
92915f71
GR
1182
1183 return IRQ_HANDLED;
1184}
1185
1186static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1187 int r_idx)
1188{
1189 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1190
87e70ab9
DS
1191 a->rx_ring[r_idx]->next = q_vector->rx.ring;
1192 q_vector->rx.ring = a->rx_ring[r_idx];
6b43c446 1193 q_vector->rx.count++;
92915f71
GR
1194}
1195
1196static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1197 int t_idx)
1198{
1199 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1200
87e70ab9
DS
1201 a->tx_ring[t_idx]->next = q_vector->tx.ring;
1202 q_vector->tx.ring = a->tx_ring[t_idx];
6b43c446 1203 q_vector->tx.count++;
92915f71
GR
1204}
1205
1206/**
1207 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1208 * @adapter: board private structure to initialize
1209 *
1210 * This function maps descriptor rings to the queue-specific vectors
1211 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1212 * one vector per ring/queue, but on a constrained vector budget, we
1213 * group the rings as "efficiently" as possible. You would add new
1214 * mapping configurations in here.
1215 **/
1216static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1217{
1218 int q_vectors;
1219 int v_start = 0;
1220 int rxr_idx = 0, txr_idx = 0;
1221 int rxr_remaining = adapter->num_rx_queues;
1222 int txr_remaining = adapter->num_tx_queues;
1223 int i, j;
1224 int rqpv, tqpv;
1225 int err = 0;
1226
1227 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1228
1229 /*
1230 * The ideal configuration...
1231 * We have enough vectors to map one per queue.
1232 */
1233 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1234 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1235 map_vector_to_rxq(adapter, v_start, rxr_idx);
1236
1237 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1238 map_vector_to_txq(adapter, v_start, txr_idx);
1239 goto out;
1240 }
1241
1242 /*
1243 * If we don't have enough vectors for a 1-to-1
1244 * mapping, we'll have to group them so there are
1245 * multiple queues per vector.
1246 */
1247 /* Re-adjusting *qpv takes care of the remainder. */
1248 for (i = v_start; i < q_vectors; i++) {
1249 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1250 for (j = 0; j < rqpv; j++) {
1251 map_vector_to_rxq(adapter, i, rxr_idx);
1252 rxr_idx++;
1253 rxr_remaining--;
1254 }
1255 }
1256 for (i = v_start; i < q_vectors; i++) {
1257 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1258 for (j = 0; j < tqpv; j++) {
1259 map_vector_to_txq(adapter, i, txr_idx);
1260 txr_idx++;
1261 txr_remaining--;
1262 }
1263 }
1264
1265out:
1266 return err;
1267}
1268
1269/**
1270 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1271 * @adapter: board private structure
1272 *
1273 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1274 * interrupts from the kernel.
1275 **/
1276static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1277{
1278 struct net_device *netdev = adapter->netdev;
fa71ae27
AD
1279 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1280 int vector, err;
92915f71
GR
1281 int ri = 0, ti = 0;
1282
92915f71 1283 for (vector = 0; vector < q_vectors; vector++) {
fa71ae27
AD
1284 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1285 struct msix_entry *entry = &adapter->msix_entries[vector];
1286
1287 if (q_vector->tx.ring && q_vector->rx.ring) {
1288 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1289 "%s-%s-%d", netdev->name, "TxRx", ri++);
1290 ti++;
1291 } else if (q_vector->rx.ring) {
1292 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1293 "%s-%s-%d", netdev->name, "rx", ri++);
1294 } else if (q_vector->tx.ring) {
1295 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1296 "%s-%s-%d", netdev->name, "tx", ti++);
92915f71
GR
1297 } else {
1298 /* skip this unused q_vector */
1299 continue;
1300 }
fa71ae27
AD
1301 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1302 q_vector->name, q_vector);
92915f71
GR
1303 if (err) {
1304 hw_dbg(&adapter->hw,
1305 "request_irq failed for MSIX interrupt "
1306 "Error: %d\n", err);
1307 goto free_queue_irqs;
1308 }
1309 }
1310
92915f71 1311 err = request_irq(adapter->msix_entries[vector].vector,
4b2cd27f 1312 &ixgbevf_msix_other, 0, netdev->name, adapter);
92915f71
GR
1313 if (err) {
1314 hw_dbg(&adapter->hw,
4b2cd27f 1315 "request_irq for msix_other failed: %d\n", err);
92915f71
GR
1316 goto free_queue_irqs;
1317 }
1318
1319 return 0;
1320
1321free_queue_irqs:
fa71ae27
AD
1322 while (vector) {
1323 vector--;
1324 free_irq(adapter->msix_entries[vector].vector,
1325 adapter->q_vector[vector]);
1326 }
a1f6c6b1 1327 /* This failure is non-recoverable - it indicates the system is
1328 * out of MSIX vector resources and the VF driver cannot run
1329 * without them. Set the number of msix vectors to zero
1330 * indicating that not enough can be allocated. The error
1331 * will be returned to the user indicating device open failed.
1332 * Any further attempts to force the driver to open will also
1333 * fail. The only way to recover is to unload the driver and
1334 * reload it again. If the system has recovered some MSIX
1335 * vectors then it may succeed.
1336 */
1337 adapter->num_msix_vectors = 0;
92915f71
GR
1338 return err;
1339}
1340
1341static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1342{
1343 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1344
1345 for (i = 0; i < q_vectors; i++) {
1346 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
6b43c446
AD
1347 q_vector->rx.ring = NULL;
1348 q_vector->tx.ring = NULL;
1349 q_vector->rx.count = 0;
1350 q_vector->tx.count = 0;
92915f71
GR
1351 }
1352}
1353
1354/**
1355 * ixgbevf_request_irq - initialize interrupts
1356 * @adapter: board private structure
1357 *
1358 * Attempts to configure interrupts using the best available
1359 * capabilities of the hardware and kernel.
1360 **/
1361static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1362{
1363 int err = 0;
1364
1365 err = ixgbevf_request_msix_irqs(adapter);
1366
1367 if (err)
1368 hw_dbg(&adapter->hw,
1369 "request_irq failed, Error %d\n", err);
1370
1371 return err;
1372}
1373
1374static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1375{
92915f71
GR
1376 int i, q_vectors;
1377
1378 q_vectors = adapter->num_msix_vectors;
92915f71
GR
1379 i = q_vectors - 1;
1380
fa71ae27 1381 free_irq(adapter->msix_entries[i].vector, adapter);
92915f71
GR
1382 i--;
1383
1384 for (; i >= 0; i--) {
fa71ae27
AD
1385 /* free only the irqs that were actually requested */
1386 if (!adapter->q_vector[i]->rx.ring &&
1387 !adapter->q_vector[i]->tx.ring)
1388 continue;
1389
92915f71
GR
1390 free_irq(adapter->msix_entries[i].vector,
1391 adapter->q_vector[i]);
1392 }
1393
1394 ixgbevf_reset_q_vectors(adapter);
1395}
1396
1397/**
1398 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1399 * @adapter: board private structure
1400 **/
1401static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1402{
92915f71 1403 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 1404 int i;
92915f71 1405
5f3600eb 1406 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
92915f71 1407 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
5f3600eb 1408 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
92915f71
GR
1409
1410 IXGBE_WRITE_FLUSH(hw);
1411
1412 for (i = 0; i < adapter->num_msix_vectors; i++)
1413 synchronize_irq(adapter->msix_entries[i].vector);
1414}
1415
1416/**
1417 * ixgbevf_irq_enable - Enable default interrupt generation settings
1418 * @adapter: board private structure
1419 **/
5f3600eb 1420static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
92915f71
GR
1421{
1422 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1423
5f3600eb
AD
1424 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1425 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1426 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
92915f71
GR
1427}
1428
de02decb
DS
1429/**
1430 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1431 * @adapter: board private structure
1432 * @ring: structure containing ring specific data
1433 *
1434 * Configure the Tx descriptor ring after a reset.
1435 **/
1436static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1437 struct ixgbevf_ring *ring)
1438{
1439 struct ixgbe_hw *hw = &adapter->hw;
1440 u64 tdba = ring->dma;
1441 int wait_loop = 10;
1442 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1443 u8 reg_idx = ring->reg_idx;
1444
1445 /* disable queue to avoid issues while updating state */
1446 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1447 IXGBE_WRITE_FLUSH(hw);
1448
1449 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1450 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1451 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1452 ring->count * sizeof(union ixgbe_adv_tx_desc));
1453
1454 /* disable head writeback */
1455 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1456 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1457
1458 /* enable relaxed ordering */
1459 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1460 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1461 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1462
1463 /* reset head and tail pointers */
1464 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1465 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
dbf8b0d8 1466 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
de02decb
DS
1467
1468 /* reset ntu and ntc to place SW in sync with hardwdare */
1469 ring->next_to_clean = 0;
1470 ring->next_to_use = 0;
1471
1472 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1473 * to or less than the number of on chip descriptors, which is
1474 * currently 40.
1475 */
1476 txdctl |= (8 << 16); /* WTHRESH = 8 */
1477
1478 /* Setting PTHRESH to 32 both improves performance */
1479 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1480 32; /* PTHRESH = 32 */
1481
1482 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1483
1484 /* poll to verify queue is enabled */
1485 do {
1486 usleep_range(1000, 2000);
1487 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1488 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1489 if (!wait_loop)
1490 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1491}
1492
92915f71
GR
1493/**
1494 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1495 * @adapter: board private structure
1496 *
1497 * Configure the Tx unit of the MAC after a reset.
1498 **/
1499static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1500{
de02decb 1501 u32 i;
92915f71
GR
1502
1503 /* Setup the HW Tx Head and Tail descriptor pointers */
de02decb
DS
1504 for (i = 0; i < adapter->num_tx_queues; i++)
1505 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
92915f71
GR
1506}
1507
1508#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1509
1510static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1511{
92915f71
GR
1512 struct ixgbe_hw *hw = &adapter->hw;
1513 u32 srrctl;
1514
92915f71
GR
1515 srrctl = IXGBE_SRRCTL_DROP_EN;
1516
bad17234
ET
1517 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1518 srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
77d5dfca 1519 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
92915f71 1520
92915f71
GR
1521 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1522}
1523
1bb9c639
DS
1524static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1525{
1526 struct ixgbe_hw *hw = &adapter->hw;
1527
1528 /* PSRTYPE must be initialized in 82599 */
1529 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1530 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1531 IXGBE_PSRTYPE_L2HDR;
1532
1533 if (adapter->num_rx_queues > 1)
1534 psrtype |= 1 << 29;
1535
1536 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1537}
1538
de02decb
DS
1539#define IXGBEVF_MAX_RX_DESC_POLL 10
1540static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1541 struct ixgbevf_ring *ring)
1542{
1543 struct ixgbe_hw *hw = &adapter->hw;
1544 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1545 u32 rxdctl;
1546 u8 reg_idx = ring->reg_idx;
1547
26597802
MR
1548 if (IXGBE_REMOVED(hw->hw_addr))
1549 return;
de02decb
DS
1550 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1551 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1552
1553 /* write value back with RXDCTL.ENABLE bit cleared */
1554 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1555
1556 /* the hardware may take up to 100us to really disable the rx queue */
1557 do {
1558 udelay(10);
1559 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1560 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1561
1562 if (!wait_loop)
1563 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1564 reg_idx);
1565}
1566
1567static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1568 struct ixgbevf_ring *ring)
1569{
1570 struct ixgbe_hw *hw = &adapter->hw;
1571 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1572 u32 rxdctl;
1573 u8 reg_idx = ring->reg_idx;
1574
26597802
MR
1575 if (IXGBE_REMOVED(hw->hw_addr))
1576 return;
de02decb
DS
1577 do {
1578 usleep_range(1000, 2000);
1579 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1580 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1581
1582 if (!wait_loop)
1583 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1584 reg_idx);
1585}
1586
1587static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1588 struct ixgbevf_ring *ring)
1589{
1590 struct ixgbe_hw *hw = &adapter->hw;
1591 u64 rdba = ring->dma;
1592 u32 rxdctl;
1593 u8 reg_idx = ring->reg_idx;
1594
1595 /* disable queue to avoid issues while updating state */
1596 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1597 ixgbevf_disable_rx_queue(adapter, ring);
1598
1599 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1600 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1601 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1602 ring->count * sizeof(union ixgbe_adv_rx_desc));
1603
1604 /* enable relaxed ordering */
1605 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1606 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1607
1608 /* reset head and tail pointers */
1609 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1610 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
dbf8b0d8 1611 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
de02decb
DS
1612
1613 /* reset ntu and ntc to place SW in sync with hardwdare */
1614 ring->next_to_clean = 0;
1615 ring->next_to_use = 0;
bad17234 1616 ring->next_to_alloc = 0;
de02decb
DS
1617
1618 ixgbevf_configure_srrctl(adapter, reg_idx);
1619
bad17234
ET
1620 /* allow any size packet since we can handle overflow */
1621 rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1622
de02decb
DS
1623 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1624 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1625
1626 ixgbevf_rx_desc_queue_enable(adapter, ring);
095e2617 1627 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
de02decb
DS
1628}
1629
92915f71
GR
1630/**
1631 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1632 * @adapter: board private structure
1633 *
1634 * Configure the Rx unit of the MAC after a reset.
1635 **/
1636static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1637{
de02decb 1638 int i;
bad17234
ET
1639 struct ixgbe_hw *hw = &adapter->hw;
1640 struct net_device *netdev = adapter->netdev;
92915f71 1641
1bb9c639 1642 ixgbevf_setup_psrtype(adapter);
dd1fe113 1643
bad17234
ET
1644 /* notify the PF of our intent to use this size of frame */
1645 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
92915f71 1646
92915f71
GR
1647 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1648 * the Base and Length of the Rx Descriptor Ring */
de02decb
DS
1649 for (i = 0; i < adapter->num_rx_queues; i++)
1650 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
92915f71
GR
1651}
1652
80d5c368
PM
1653static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1654 __be16 proto, u16 vid)
92915f71
GR
1655{
1656 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1657 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1
AD
1658 int err;
1659
55fdd45b 1660 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1661
92915f71 1662 /* add VID to filter table */
2ddc7fe1 1663 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1c55ed76 1664
55fdd45b 1665 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1666
2ddc7fe1
AD
1667 /* translate error return types so error makes sense */
1668 if (err == IXGBE_ERR_MBX)
1669 return -EIO;
1670
1671 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1672 return -EACCES;
1673
dadcd65f 1674 set_bit(vid, adapter->active_vlans);
8e586137 1675
2ddc7fe1 1676 return err;
92915f71
GR
1677}
1678
80d5c368
PM
1679static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1680 __be16 proto, u16 vid)
92915f71
GR
1681{
1682 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1683 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1 1684 int err = -EOPNOTSUPP;
92915f71 1685
55fdd45b 1686 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1687
92915f71 1688 /* remove VID from filter table */
92fe0bf7 1689 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1c55ed76 1690
55fdd45b 1691 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1692
dadcd65f 1693 clear_bit(vid, adapter->active_vlans);
8e586137 1694
2ddc7fe1 1695 return err;
92915f71
GR
1696}
1697
1698static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1699{
dadcd65f 1700 u16 vid;
92915f71 1701
dadcd65f 1702 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
80d5c368
PM
1703 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1704 htons(ETH_P_8021Q), vid);
92915f71
GR
1705}
1706
46ec20ff
GR
1707static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1708{
1709 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1710 struct ixgbe_hw *hw = &adapter->hw;
1711 int count = 0;
1712
1713 if ((netdev_uc_count(netdev)) > 10) {
dbd9636e 1714 pr_err("Too many unicast filters - No Space\n");
46ec20ff
GR
1715 return -ENOSPC;
1716 }
1717
1718 if (!netdev_uc_empty(netdev)) {
1719 struct netdev_hw_addr *ha;
1720 netdev_for_each_uc_addr(ha, netdev) {
1721 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1722 udelay(200);
1723 }
1724 } else {
1725 /*
1726 * If the list is empty then send message to PF driver to
1727 * clear all macvlans on this VF.
1728 */
1729 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1730 }
1731
1732 return count;
1733}
1734
92915f71 1735/**
dee847f5 1736 * ixgbevf_set_rx_mode - Multicast and unicast set
92915f71
GR
1737 * @netdev: network interface device structure
1738 *
1739 * The set_rx_method entry point is called whenever the multicast address
dee847f5
GR
1740 * list, unicast address list or the network interface flags are updated.
1741 * This routine is responsible for configuring the hardware for proper
1742 * multicast mode and configuring requested unicast filters.
92915f71
GR
1743 **/
1744static void ixgbevf_set_rx_mode(struct net_device *netdev)
1745{
1746 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1747 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1748
55fdd45b 1749 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1750
92915f71 1751 /* reprogram multicast list */
92fe0bf7 1752 hw->mac.ops.update_mc_addr_list(hw, netdev);
46ec20ff
GR
1753
1754 ixgbevf_write_uc_addr_list(netdev);
1c55ed76 1755
55fdd45b 1756 spin_unlock_bh(&adapter->mbx_lock);
92915f71
GR
1757}
1758
1759static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1760{
1761 int q_idx;
1762 struct ixgbevf_q_vector *q_vector;
1763 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1764
1765 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
92915f71 1766 q_vector = adapter->q_vector[q_idx];
c777cdfa
JK
1767#ifdef CONFIG_NET_RX_BUSY_POLL
1768 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1769#endif
fa71ae27 1770 napi_enable(&q_vector->napi);
92915f71
GR
1771 }
1772}
1773
1774static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1775{
1776 int q_idx;
1777 struct ixgbevf_q_vector *q_vector;
1778 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1779
1780 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1781 q_vector = adapter->q_vector[q_idx];
92915f71 1782 napi_disable(&q_vector->napi);
c777cdfa
JK
1783#ifdef CONFIG_NET_RX_BUSY_POLL
1784 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1785 pr_info("QV %d locked\n", q_idx);
1786 usleep_range(1000, 20000);
1787 }
1788#endif /* CONFIG_NET_RX_BUSY_POLL */
92915f71
GR
1789 }
1790}
1791
220fe050
DS
1792static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1793{
1794 struct ixgbe_hw *hw = &adapter->hw;
1795 unsigned int def_q = 0;
1796 unsigned int num_tcs = 0;
1797 unsigned int num_rx_queues = 1;
1798 int err;
1799
1800 spin_lock_bh(&adapter->mbx_lock);
1801
1802 /* fetch queue configuration from the PF */
1803 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1804
1805 spin_unlock_bh(&adapter->mbx_lock);
1806
1807 if (err)
1808 return err;
1809
1810 if (num_tcs > 1) {
1811 /* update default Tx ring register index */
87e70ab9 1812 adapter->tx_ring[0]->reg_idx = def_q;
220fe050
DS
1813
1814 /* we need as many queues as traffic classes */
1815 num_rx_queues = num_tcs;
1816 }
1817
1818 /* if we have a bad config abort request queue reset */
1819 if (adapter->num_rx_queues != num_rx_queues) {
1820 /* force mailbox timeout to prevent further messages */
1821 hw->mbx.timeout = 0;
1822
1823 /* wait for watchdog to come around and bail us out */
1824 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1825 }
1826
1827 return 0;
1828}
1829
92915f71
GR
1830static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1831{
220fe050
DS
1832 ixgbevf_configure_dcb(adapter);
1833
de02decb 1834 ixgbevf_set_rx_mode(adapter->netdev);
92915f71
GR
1835
1836 ixgbevf_restore_vlan(adapter);
1837
1838 ixgbevf_configure_tx(adapter);
1839 ixgbevf_configure_rx(adapter);
92915f71
GR
1840}
1841
33bd9f60
GR
1842static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1843{
1844 /* Only save pre-reset stats if there are some */
1845 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1846 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1847 adapter->stats.base_vfgprc;
1848 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1849 adapter->stats.base_vfgptc;
1850 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1851 adapter->stats.base_vfgorc;
1852 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1853 adapter->stats.base_vfgotc;
1854 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1855 adapter->stats.base_vfmprc;
1856 }
1857}
1858
1859static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1860{
1861 struct ixgbe_hw *hw = &adapter->hw;
1862
1863 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1864 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1865 adapter->stats.last_vfgorc |=
1866 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1867 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1868 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1869 adapter->stats.last_vfgotc |=
1870 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1871 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1872
1873 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1874 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1875 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1876 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1877 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1878}
1879
31186785
AD
1880static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1881{
1882 struct ixgbe_hw *hw = &adapter->hw;
56e94095
AD
1883 int api[] = { ixgbe_mbox_api_11,
1884 ixgbe_mbox_api_10,
31186785
AD
1885 ixgbe_mbox_api_unknown };
1886 int err = 0, idx = 0;
1887
55fdd45b 1888 spin_lock_bh(&adapter->mbx_lock);
31186785
AD
1889
1890 while (api[idx] != ixgbe_mbox_api_unknown) {
1891 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1892 if (!err)
1893 break;
1894 idx++;
1895 }
1896
55fdd45b 1897 spin_unlock_bh(&adapter->mbx_lock);
31186785
AD
1898}
1899
795180d8 1900static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
92915f71
GR
1901{
1902 struct net_device *netdev = adapter->netdev;
1903 struct ixgbe_hw *hw = &adapter->hw;
92915f71
GR
1904
1905 ixgbevf_configure_msix(adapter);
1906
55fdd45b 1907 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1908
92fe0bf7
GR
1909 if (is_valid_ether_addr(hw->mac.addr))
1910 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1911 else
1912 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
92915f71 1913
55fdd45b 1914 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1915
4e857c58 1916 smp_mb__before_atomic();
92915f71
GR
1917 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1918 ixgbevf_napi_enable_all(adapter);
1919
1920 /* enable transmits */
1921 netif_tx_start_all_queues(netdev);
1922
33bd9f60
GR
1923 ixgbevf_save_reset_stats(adapter);
1924 ixgbevf_init_last_counter_stats(adapter);
1925
4b2cd27f 1926 hw->mac.get_link_status = 1;
92915f71 1927 mod_timer(&adapter->watchdog_timer, jiffies);
92915f71
GR
1928}
1929
795180d8 1930void ixgbevf_up(struct ixgbevf_adapter *adapter)
92915f71 1931{
92915f71
GR
1932 struct ixgbe_hw *hw = &adapter->hw;
1933
1934 ixgbevf_configure(adapter);
1935
795180d8 1936 ixgbevf_up_complete(adapter);
92915f71
GR
1937
1938 /* clear any pending interrupts, may auto mask */
1939 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1940
5f3600eb 1941 ixgbevf_irq_enable(adapter);
92915f71
GR
1942}
1943
1944/**
1945 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
92915f71
GR
1946 * @rx_ring: ring to free buffers from
1947 **/
05d063aa 1948static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
92915f71 1949{
bad17234 1950 struct device *dev = rx_ring->dev;
92915f71
GR
1951 unsigned long size;
1952 unsigned int i;
1953
bad17234
ET
1954 /* Free Rx ring sk_buff */
1955 if (rx_ring->skb) {
1956 dev_kfree_skb(rx_ring->skb);
1957 rx_ring->skb = NULL;
1958 }
1959
1960 /* ring already cleared, nothing to do */
c0456c23
GR
1961 if (!rx_ring->rx_buffer_info)
1962 return;
92915f71 1963
bad17234 1964 /* Free all the Rx ring pages */
92915f71 1965 for (i = 0; i < rx_ring->count; i++) {
bad17234 1966 struct ixgbevf_rx_buffer *rx_buffer;
92915f71 1967
bad17234
ET
1968 rx_buffer = &rx_ring->rx_buffer_info[i];
1969 if (rx_buffer->dma)
1970 dma_unmap_page(dev, rx_buffer->dma,
1971 PAGE_SIZE, DMA_FROM_DEVICE);
1972 rx_buffer->dma = 0;
1973 if (rx_buffer->page)
1974 __free_page(rx_buffer->page);
1975 rx_buffer->page = NULL;
92915f71
GR
1976 }
1977
1978 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1979 memset(rx_ring->rx_buffer_info, 0, size);
1980
1981 /* Zero out the descriptor ring */
1982 memset(rx_ring->desc, 0, rx_ring->size);
92915f71
GR
1983}
1984
1985/**
1986 * ixgbevf_clean_tx_ring - Free Tx Buffers
92915f71
GR
1987 * @tx_ring: ring to be cleaned
1988 **/
05d063aa 1989static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
92915f71
GR
1990{
1991 struct ixgbevf_tx_buffer *tx_buffer_info;
1992 unsigned long size;
1993 unsigned int i;
1994
c0456c23
GR
1995 if (!tx_ring->tx_buffer_info)
1996 return;
1997
92915f71 1998 /* Free all the Tx ring sk_buffs */
92915f71
GR
1999 for (i = 0; i < tx_ring->count; i++) {
2000 tx_buffer_info = &tx_ring->tx_buffer_info[i];
70a10e25 2001 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
92915f71
GR
2002 }
2003
2004 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2005 memset(tx_ring->tx_buffer_info, 0, size);
2006
2007 memset(tx_ring->desc, 0, tx_ring->size);
92915f71
GR
2008}
2009
2010/**
2011 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2012 * @adapter: board private structure
2013 **/
2014static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2015{
2016 int i;
2017
2018 for (i = 0; i < adapter->num_rx_queues; i++)
05d063aa 2019 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
92915f71
GR
2020}
2021
2022/**
2023 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2024 * @adapter: board private structure
2025 **/
2026static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2027{
2028 int i;
2029
2030 for (i = 0; i < adapter->num_tx_queues; i++)
05d063aa 2031 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
92915f71
GR
2032}
2033
2034void ixgbevf_down(struct ixgbevf_adapter *adapter)
2035{
2036 struct net_device *netdev = adapter->netdev;
2037 struct ixgbe_hw *hw = &adapter->hw;
de02decb 2038 int i;
92915f71
GR
2039
2040 /* signal that we are down to the interrupt handler */
5b346dc9
MR
2041 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2042 return; /* do nothing if already down */
858c3dda
DS
2043
2044 /* disable all enabled rx queues */
2045 for (i = 0; i < adapter->num_rx_queues; i++)
87e70ab9 2046 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
92915f71
GR
2047
2048 netif_tx_disable(netdev);
2049
2050 msleep(10);
2051
2052 netif_tx_stop_all_queues(netdev);
2053
2054 ixgbevf_irq_disable(adapter);
2055
2056 ixgbevf_napi_disable_all(adapter);
2057
2058 del_timer_sync(&adapter->watchdog_timer);
2059 /* can't call flush scheduled work here because it can deadlock
2060 * if linkwatch_event tries to acquire the rtnl_lock which we are
2061 * holding */
2062 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
2063 msleep(1);
2064
2065 /* disable transmits in the hardware now that interrupts are off */
2066 for (i = 0; i < adapter->num_tx_queues; i++) {
de02decb
DS
2067 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2068
2069 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2070 IXGBE_TXDCTL_SWFLSH);
92915f71
GR
2071 }
2072
2073 netif_carrier_off(netdev);
2074
2075 if (!pci_channel_offline(adapter->pdev))
2076 ixgbevf_reset(adapter);
2077
2078 ixgbevf_clean_all_tx_rings(adapter);
2079 ixgbevf_clean_all_rx_rings(adapter);
2080}
2081
2082void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2083{
2084 WARN_ON(in_interrupt());
c0456c23 2085
92915f71
GR
2086 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2087 msleep(1);
2088
4b2cd27f
AD
2089 ixgbevf_down(adapter);
2090 ixgbevf_up(adapter);
92915f71
GR
2091
2092 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2093}
2094
2095void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2096{
2097 struct ixgbe_hw *hw = &adapter->hw;
2098 struct net_device *netdev = adapter->netdev;
2099
798e381a 2100 if (hw->mac.ops.reset_hw(hw)) {
92915f71 2101 hw_dbg(hw, "PF still resetting\n");
798e381a 2102 } else {
92915f71 2103 hw->mac.ops.init_hw(hw);
798e381a
DS
2104 ixgbevf_negotiate_api(adapter);
2105 }
92915f71
GR
2106
2107 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2108 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2109 netdev->addr_len);
2110 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
2111 netdev->addr_len);
2112 }
2113}
2114
e45dd5fe
JK
2115static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2116 int vectors)
92915f71 2117{
a5f9337b 2118 int vector_threshold;
92915f71 2119
fa71ae27
AD
2120 /* We'll want at least 2 (vector_threshold):
2121 * 1) TxQ[0] + RxQ[0] handler
2122 * 2) Other (Link Status Change, etc.)
92915f71
GR
2123 */
2124 vector_threshold = MIN_MSIX_COUNT;
2125
2126 /* The more we get, the more we will assign to Tx/Rx Cleanup
2127 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2128 * Right now, we simply care about how many we'll get; we'll
2129 * set them up later while requesting irq's.
2130 */
5c1e3588
AG
2131 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2132 vector_threshold, vectors);
92915f71 2133
5c1e3588 2134 if (vectors < 0) {
e45dd5fe
JK
2135 dev_err(&adapter->pdev->dev,
2136 "Unable to allocate MSI-X interrupts\n");
92915f71
GR
2137 kfree(adapter->msix_entries);
2138 adapter->msix_entries = NULL;
5c1e3588 2139 return vectors;
92915f71 2140 }
dee847f5 2141
5c1e3588
AG
2142 /* Adjust for only the vectors we'll use, which is minimum
2143 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2144 * vectors we were allocated.
2145 */
2146 adapter->num_msix_vectors = vectors;
2147
2148 return 0;
92915f71
GR
2149}
2150
49ce9c2c
BH
2151/**
2152 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
92915f71
GR
2153 * @adapter: board private structure to initialize
2154 *
2155 * This is the top level queue allocation routine. The order here is very
2156 * important, starting with the "most" number of features turned on at once,
2157 * and ending with the smallest set of features. This way large combinations
2158 * can be allocated if they're turned on, and smaller combinations are the
2159 * fallthrough conditions.
2160 *
2161 **/
2162static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2163{
220fe050
DS
2164 struct ixgbe_hw *hw = &adapter->hw;
2165 unsigned int def_q = 0;
2166 unsigned int num_tcs = 0;
2167 int err;
2168
92915f71
GR
2169 /* Start with base case */
2170 adapter->num_rx_queues = 1;
2171 adapter->num_tx_queues = 1;
220fe050
DS
2172
2173 spin_lock_bh(&adapter->mbx_lock);
2174
2175 /* fetch queue configuration from the PF */
2176 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2177
2178 spin_unlock_bh(&adapter->mbx_lock);
2179
2180 if (err)
2181 return;
2182
2183 /* we need as many queues as traffic classes */
2184 if (num_tcs > 1)
2185 adapter->num_rx_queues = num_tcs;
92915f71
GR
2186}
2187
2188/**
2189 * ixgbevf_alloc_queues - Allocate memory for all rings
2190 * @adapter: board private structure to initialize
2191 *
2192 * We allocate one ring per queue at run-time since we don't know the
2193 * number of queues at compile-time. The polling_netdev array is
2194 * intended for Multiqueue, but should work fine with a single queue.
2195 **/
2196static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2197{
87e70ab9
DS
2198 struct ixgbevf_ring *ring;
2199 int rx = 0, tx = 0;
92915f71 2200
87e70ab9
DS
2201 for (; tx < adapter->num_tx_queues; tx++) {
2202 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2203 if (!ring)
2204 goto err_allocation;
92915f71 2205
87e70ab9
DS
2206 ring->dev = &adapter->pdev->dev;
2207 ring->netdev = adapter->netdev;
2208 ring->count = adapter->tx_ring_count;
2209 ring->queue_index = tx;
2210 ring->reg_idx = tx;
92915f71 2211
87e70ab9 2212 adapter->tx_ring[tx] = ring;
92915f71
GR
2213 }
2214
87e70ab9
DS
2215 for (; rx < adapter->num_rx_queues; rx++) {
2216 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2217 if (!ring)
2218 goto err_allocation;
2219
2220 ring->dev = &adapter->pdev->dev;
2221 ring->netdev = adapter->netdev;
2222
2223 ring->count = adapter->rx_ring_count;
2224 ring->queue_index = rx;
2225 ring->reg_idx = rx;
2226
2227 adapter->rx_ring[rx] = ring;
92915f71
GR
2228 }
2229
2230 return 0;
2231
87e70ab9
DS
2232err_allocation:
2233 while (tx) {
2234 kfree(adapter->tx_ring[--tx]);
2235 adapter->tx_ring[tx] = NULL;
2236 }
2237
2238 while (rx) {
2239 kfree(adapter->rx_ring[--rx]);
2240 adapter->rx_ring[rx] = NULL;
2241 }
92915f71
GR
2242 return -ENOMEM;
2243}
2244
2245/**
2246 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2247 * @adapter: board private structure to initialize
2248 *
2249 * Attempt to configure the interrupts using the best available
2250 * capabilities of the hardware and the kernel.
2251 **/
2252static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2253{
91e2b89b 2254 struct net_device *netdev = adapter->netdev;
92915f71
GR
2255 int err = 0;
2256 int vector, v_budget;
2257
2258 /*
2259 * It's easy to be greedy for MSI-X vectors, but it really
2260 * doesn't do us much good if we have a lot more vectors
2261 * than CPU's. So let's be conservative and only ask for
fa71ae27
AD
2262 * (roughly) the same number of vectors as there are CPU's.
2263 * The default is to use pairs of vectors.
92915f71 2264 */
fa71ae27
AD
2265 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2266 v_budget = min_t(int, v_budget, num_online_cpus());
2267 v_budget += NON_Q_VECTORS;
92915f71
GR
2268
2269 /* A failure in MSI-X entry allocation isn't fatal, but it does
2270 * mean we disable MSI-X capabilities of the adapter. */
2271 adapter->msix_entries = kcalloc(v_budget,
2272 sizeof(struct msix_entry), GFP_KERNEL);
2273 if (!adapter->msix_entries) {
2274 err = -ENOMEM;
2275 goto out;
2276 }
2277
2278 for (vector = 0; vector < v_budget; vector++)
2279 adapter->msix_entries[vector].entry = vector;
2280
e45dd5fe
JK
2281 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2282 if (err)
2283 goto out;
92915f71 2284
91e2b89b
GR
2285 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2286 if (err)
2287 goto out;
2288
2289 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2290
92915f71
GR
2291out:
2292 return err;
2293}
2294
2295/**
2296 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2297 * @adapter: board private structure to initialize
2298 *
2299 * We allocate one q_vector per queue interrupt. If allocation fails we
2300 * return -ENOMEM.
2301 **/
2302static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2303{
2304 int q_idx, num_q_vectors;
2305 struct ixgbevf_q_vector *q_vector;
92915f71
GR
2306
2307 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
2308
2309 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2310 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2311 if (!q_vector)
2312 goto err_out;
2313 q_vector->adapter = adapter;
2314 q_vector->v_idx = q_idx;
fa71ae27
AD
2315 netif_napi_add(adapter->netdev, &q_vector->napi,
2316 ixgbevf_poll, 64);
c777cdfa
JK
2317#ifdef CONFIG_NET_RX_BUSY_POLL
2318 napi_hash_add(&q_vector->napi);
2319#endif
92915f71
GR
2320 adapter->q_vector[q_idx] = q_vector;
2321 }
2322
2323 return 0;
2324
2325err_out:
2326 while (q_idx) {
2327 q_idx--;
2328 q_vector = adapter->q_vector[q_idx];
c777cdfa
JK
2329#ifdef CONFIG_NET_RX_BUSY_POLL
2330 napi_hash_del(&q_vector->napi);
2331#endif
92915f71
GR
2332 netif_napi_del(&q_vector->napi);
2333 kfree(q_vector);
2334 adapter->q_vector[q_idx] = NULL;
2335 }
2336 return -ENOMEM;
2337}
2338
2339/**
2340 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2341 * @adapter: board private structure to initialize
2342 *
2343 * This function frees the memory allocated to the q_vectors. In addition if
2344 * NAPI is enabled it will delete any references to the NAPI struct prior
2345 * to freeing the q_vector.
2346 **/
2347static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2348{
f4477702 2349 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
2350
2351 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2352 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2353
2354 adapter->q_vector[q_idx] = NULL;
c777cdfa
JK
2355#ifdef CONFIG_NET_RX_BUSY_POLL
2356 napi_hash_del(&q_vector->napi);
2357#endif
f4477702 2358 netif_napi_del(&q_vector->napi);
92915f71
GR
2359 kfree(q_vector);
2360 }
2361}
2362
2363/**
2364 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2365 * @adapter: board private structure
2366 *
2367 **/
2368static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2369{
2370 pci_disable_msix(adapter->pdev);
2371 kfree(adapter->msix_entries);
2372 adapter->msix_entries = NULL;
92915f71
GR
2373}
2374
2375/**
2376 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2377 * @adapter: board private structure to initialize
2378 *
2379 **/
2380static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2381{
2382 int err;
2383
2384 /* Number of supported queues */
2385 ixgbevf_set_num_queues(adapter);
2386
2387 err = ixgbevf_set_interrupt_capability(adapter);
2388 if (err) {
2389 hw_dbg(&adapter->hw,
2390 "Unable to setup interrupt capabilities\n");
2391 goto err_set_interrupt;
2392 }
2393
2394 err = ixgbevf_alloc_q_vectors(adapter);
2395 if (err) {
2396 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2397 "vectors\n");
2398 goto err_alloc_q_vectors;
2399 }
2400
2401 err = ixgbevf_alloc_queues(adapter);
2402 if (err) {
dbd9636e 2403 pr_err("Unable to allocate memory for queues\n");
92915f71
GR
2404 goto err_alloc_queues;
2405 }
2406
2407 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2408 "Tx Queue count = %u\n",
2409 (adapter->num_rx_queues > 1) ? "Enabled" :
2410 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2411
2412 set_bit(__IXGBEVF_DOWN, &adapter->state);
2413
2414 return 0;
2415err_alloc_queues:
2416 ixgbevf_free_q_vectors(adapter);
2417err_alloc_q_vectors:
2418 ixgbevf_reset_interrupt_capability(adapter);
2419err_set_interrupt:
2420 return err;
2421}
2422
0ac1e8ce
AD
2423/**
2424 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2425 * @adapter: board private structure to clear interrupt scheme on
2426 *
2427 * We go through and clear interrupt specific resources and reset the structure
2428 * to pre-load conditions
2429 **/
2430static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2431{
87e70ab9
DS
2432 int i;
2433
2434 for (i = 0; i < adapter->num_tx_queues; i++) {
2435 kfree(adapter->tx_ring[i]);
2436 adapter->tx_ring[i] = NULL;
2437 }
2438 for (i = 0; i < adapter->num_rx_queues; i++) {
2439 kfree(adapter->rx_ring[i]);
2440 adapter->rx_ring[i] = NULL;
2441 }
2442
0ac1e8ce
AD
2443 adapter->num_tx_queues = 0;
2444 adapter->num_rx_queues = 0;
2445
2446 ixgbevf_free_q_vectors(adapter);
2447 ixgbevf_reset_interrupt_capability(adapter);
2448}
2449
92915f71
GR
2450/**
2451 * ixgbevf_sw_init - Initialize general software structures
2452 * (struct ixgbevf_adapter)
2453 * @adapter: board private structure to initialize
2454 *
2455 * ixgbevf_sw_init initializes the Adapter private data structure.
2456 * Fields are initialized based on PCI device information and
2457 * OS network device settings (MTU size).
2458 **/
9f9a12f8 2459static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
92915f71
GR
2460{
2461 struct ixgbe_hw *hw = &adapter->hw;
2462 struct pci_dev *pdev = adapter->pdev;
e1941a74 2463 struct net_device *netdev = adapter->netdev;
92915f71
GR
2464 int err;
2465
2466 /* PCI config space info */
2467
2468 hw->vendor_id = pdev->vendor;
2469 hw->device_id = pdev->device;
ff938e43 2470 hw->revision_id = pdev->revision;
92915f71
GR
2471 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2472 hw->subsystem_device_id = pdev->subsystem_device;
2473
2474 hw->mbx.ops.init_params(hw);
56e94095
AD
2475
2476 /* assume legacy case in which PF would only give VF 2 queues */
2477 hw->mac.max_tx_queues = 2;
2478 hw->mac.max_rx_queues = 2;
2479
798e381a
DS
2480 /* lock to protect mailbox accesses */
2481 spin_lock_init(&adapter->mbx_lock);
2482
92915f71
GR
2483 err = hw->mac.ops.reset_hw(hw);
2484 if (err) {
2485 dev_info(&pdev->dev,
e1941a74 2486 "PF still in reset state. Is the PF interface up?\n");
92915f71
GR
2487 } else {
2488 err = hw->mac.ops.init_hw(hw);
2489 if (err) {
dbd9636e 2490 pr_err("init_shared_code failed: %d\n", err);
92915f71
GR
2491 goto out;
2492 }
798e381a 2493 ixgbevf_negotiate_api(adapter);
e1941a74
GR
2494 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2495 if (err)
2496 dev_info(&pdev->dev, "Error reading MAC address\n");
2497 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2498 dev_info(&pdev->dev,
2499 "MAC address not assigned by administrator.\n");
2500 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2501 }
2502
2503 if (!is_valid_ether_addr(netdev->dev_addr)) {
2504 dev_info(&pdev->dev, "Assigning random MAC address\n");
2505 eth_hw_addr_random(netdev);
2506 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
92915f71
GR
2507 }
2508
2509 /* Enable dynamic interrupt throttling rates */
5f3600eb
AD
2510 adapter->rx_itr_setting = 1;
2511 adapter->tx_itr_setting = 1;
92915f71 2512
92915f71
GR
2513 /* set default ring sizes */
2514 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2515 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2516
92915f71 2517 set_bit(__IXGBEVF_DOWN, &adapter->state);
1a0d6ae5 2518 return 0;
92915f71
GR
2519
2520out:
2521 return err;
2522}
2523
92915f71
GR
2524#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2525 { \
2526 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2527 if (current_counter < last_counter) \
2528 counter += 0x100000000LL; \
2529 last_counter = current_counter; \
2530 counter &= 0xFFFFFFFF00000000LL; \
2531 counter |= current_counter; \
2532 }
2533
2534#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2535 { \
2536 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2537 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2538 u64 current_counter = (current_counter_msb << 32) | \
2539 current_counter_lsb; \
2540 if (current_counter < last_counter) \
2541 counter += 0x1000000000LL; \
2542 last_counter = current_counter; \
2543 counter &= 0xFFFFFFF000000000LL; \
2544 counter |= current_counter; \
2545 }
2546/**
2547 * ixgbevf_update_stats - Update the board statistics counters.
2548 * @adapter: board private structure
2549 **/
2550void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2551{
2552 struct ixgbe_hw *hw = &adapter->hw;
55fb277c 2553 int i;
92915f71 2554
088245a3
GR
2555 if (!adapter->link_up)
2556 return;
2557
92915f71
GR
2558 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2559 adapter->stats.vfgprc);
2560 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2561 adapter->stats.vfgptc);
2562 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2563 adapter->stats.last_vfgorc,
2564 adapter->stats.vfgorc);
2565 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2566 adapter->stats.last_vfgotc,
2567 adapter->stats.vfgotc);
2568 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2569 adapter->stats.vfmprc);
55fb277c
GR
2570
2571 for (i = 0; i < adapter->num_rx_queues; i++) {
2572 adapter->hw_csum_rx_error +=
87e70ab9 2573 adapter->rx_ring[i]->hw_csum_rx_error;
87e70ab9 2574 adapter->rx_ring[i]->hw_csum_rx_error = 0;
55fb277c 2575 }
92915f71
GR
2576}
2577
2578/**
2579 * ixgbevf_watchdog - Timer Call-back
2580 * @data: pointer to adapter cast into an unsigned long
2581 **/
2582static void ixgbevf_watchdog(unsigned long data)
2583{
2584 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2585 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 2586 u32 eics = 0;
92915f71
GR
2587 int i;
2588
2589 /*
2590 * Do the watchdog outside of interrupt context due to the lovely
2591 * delays that some of the newer hardware requires
2592 */
2593
2594 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2595 goto watchdog_short_circuit;
2596
2597 /* get one bit for every active tx/rx interrupt vector */
2598 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2599 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
6b43c446 2600 if (qv->rx.ring || qv->tx.ring)
5f3600eb 2601 eics |= 1 << i;
92915f71
GR
2602 }
2603
5f3600eb 2604 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
92915f71
GR
2605
2606watchdog_short_circuit:
2607 schedule_work(&adapter->watchdog_task);
2608}
2609
2610/**
2611 * ixgbevf_tx_timeout - Respond to a Tx Hang
2612 * @netdev: network interface device structure
2613 **/
2614static void ixgbevf_tx_timeout(struct net_device *netdev)
2615{
2616 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2617
2618 /* Do the reset outside of interrupt context */
2619 schedule_work(&adapter->reset_task);
2620}
2621
2622static void ixgbevf_reset_task(struct work_struct *work)
2623{
2624 struct ixgbevf_adapter *adapter;
2625 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2626
2627 /* If we're already down or resetting, just bail */
2628 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2e7cfbdd 2629 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
92915f71
GR
2630 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2631 return;
2632
2633 adapter->tx_timeout_count++;
2634
2635 ixgbevf_reinit_locked(adapter);
2636}
2637
2638/**
2639 * ixgbevf_watchdog_task - worker thread to bring link up
2640 * @work: pointer to work_struct containing our data
2641 **/
2642static void ixgbevf_watchdog_task(struct work_struct *work)
2643{
2644 struct ixgbevf_adapter *adapter = container_of(work,
2645 struct ixgbevf_adapter,
2646 watchdog_task);
2647 struct net_device *netdev = adapter->netdev;
2648 struct ixgbe_hw *hw = &adapter->hw;
2649 u32 link_speed = adapter->link_speed;
2650 bool link_up = adapter->link_up;
92fe0bf7 2651 s32 need_reset;
92915f71 2652
26597802
MR
2653 if (IXGBE_REMOVED(hw->hw_addr)) {
2654 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2655 rtnl_lock();
2656 ixgbevf_down(adapter);
2657 rtnl_unlock();
2658 }
2659 return;
2660 }
220fe050
DS
2661 ixgbevf_queue_reset_subtask(adapter);
2662
92915f71
GR
2663 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2664
2665 /*
2666 * Always check the link on the watchdog because we have
2667 * no LSC interrupt
2668 */
92fe0bf7 2669 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 2670
92fe0bf7 2671 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
1c55ed76 2672
92fe0bf7 2673 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 2674
92fe0bf7
GR
2675 if (need_reset) {
2676 adapter->link_up = link_up;
2677 adapter->link_speed = link_speed;
2678 netif_carrier_off(netdev);
2679 netif_tx_stop_all_queues(netdev);
2680 schedule_work(&adapter->reset_task);
2681 goto pf_has_reset;
92915f71
GR
2682 }
2683 adapter->link_up = link_up;
2684 adapter->link_speed = link_speed;
2685
2686 if (link_up) {
2687 if (!netif_carrier_ok(netdev)) {
b876a744
GR
2688 char *link_speed_string;
2689 switch (link_speed) {
2690 case IXGBE_LINK_SPEED_10GB_FULL:
2691 link_speed_string = "10 Gbps";
2692 break;
2693 case IXGBE_LINK_SPEED_1GB_FULL:
2694 link_speed_string = "1 Gbps";
2695 break;
2696 case IXGBE_LINK_SPEED_100_FULL:
2697 link_speed_string = "100 Mbps";
2698 break;
2699 default:
2700 link_speed_string = "unknown speed";
2701 break;
2702 }
6fe59675 2703 dev_info(&adapter->pdev->dev,
b876a744 2704 "NIC Link is Up, %s\n", link_speed_string);
92915f71
GR
2705 netif_carrier_on(netdev);
2706 netif_tx_wake_all_queues(netdev);
92915f71
GR
2707 }
2708 } else {
2709 adapter->link_up = false;
2710 adapter->link_speed = 0;
2711 if (netif_carrier_ok(netdev)) {
6fe59675 2712 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
92915f71
GR
2713 netif_carrier_off(netdev);
2714 netif_tx_stop_all_queues(netdev);
2715 }
2716 }
2717
92915f71
GR
2718 ixgbevf_update_stats(adapter);
2719
33bd9f60 2720pf_has_reset:
92915f71 2721 /* Reset the timer */
2e7cfbdd
MR
2722 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
2723 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
92915f71
GR
2724 mod_timer(&adapter->watchdog_timer,
2725 round_jiffies(jiffies + (2 * HZ)));
2726
2727 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2728}
2729
2730/**
2731 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
92915f71
GR
2732 * @tx_ring: Tx descriptor ring for a specific queue
2733 *
2734 * Free all transmit software resources
2735 **/
05d063aa 2736void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
92915f71 2737{
05d063aa 2738 ixgbevf_clean_tx_ring(tx_ring);
92915f71
GR
2739
2740 vfree(tx_ring->tx_buffer_info);
2741 tx_ring->tx_buffer_info = NULL;
2742
de02decb
DS
2743 /* if not set, then don't free */
2744 if (!tx_ring->desc)
2745 return;
2746
05d063aa 2747 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2a1f8794 2748 tx_ring->dma);
92915f71
GR
2749
2750 tx_ring->desc = NULL;
2751}
2752
2753/**
2754 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2755 * @adapter: board private structure
2756 *
2757 * Free all transmit software resources
2758 **/
2759static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2760{
2761 int i;
2762
2763 for (i = 0; i < adapter->num_tx_queues; i++)
87e70ab9 2764 if (adapter->tx_ring[i]->desc)
05d063aa 2765 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
92915f71
GR
2766}
2767
2768/**
2769 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
92915f71
GR
2770 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2771 *
2772 * Return 0 on success, negative on failure
2773 **/
05d063aa 2774int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
92915f71 2775{
92915f71
GR
2776 int size;
2777
2778 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
89bf67f1 2779 tx_ring->tx_buffer_info = vzalloc(size);
92915f71
GR
2780 if (!tx_ring->tx_buffer_info)
2781 goto err;
92915f71
GR
2782
2783 /* round up to nearest 4K */
2784 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2785 tx_ring->size = ALIGN(tx_ring->size, 4096);
2786
05d063aa 2787 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2a1f8794 2788 &tx_ring->dma, GFP_KERNEL);
92915f71
GR
2789 if (!tx_ring->desc)
2790 goto err;
2791
92915f71
GR
2792 return 0;
2793
2794err:
2795 vfree(tx_ring->tx_buffer_info);
2796 tx_ring->tx_buffer_info = NULL;
2797 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2798 "descriptor ring\n");
2799 return -ENOMEM;
2800}
2801
2802/**
2803 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2804 * @adapter: board private structure
2805 *
2806 * If this function returns with an error, then it's possible one or
2807 * more of the rings is populated (while the rest are not). It is the
2808 * callers duty to clean those orphaned rings.
2809 *
2810 * Return 0 on success, negative on failure
2811 **/
2812static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2813{
2814 int i, err = 0;
2815
2816 for (i = 0; i < adapter->num_tx_queues; i++) {
05d063aa 2817 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
92915f71
GR
2818 if (!err)
2819 continue;
2820 hw_dbg(&adapter->hw,
2821 "Allocation for Tx Queue %u failed\n", i);
2822 break;
2823 }
2824
2825 return err;
2826}
2827
2828/**
2829 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
92915f71
GR
2830 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2831 *
2832 * Returns 0 on success, negative on failure
2833 **/
05d063aa 2834int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
92915f71 2835{
92915f71
GR
2836 int size;
2837
2838 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
89bf67f1 2839 rx_ring->rx_buffer_info = vzalloc(size);
e404decb 2840 if (!rx_ring->rx_buffer_info)
05d063aa 2841 goto err;
92915f71
GR
2842
2843 /* Round up to nearest 4K */
2844 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2845 rx_ring->size = ALIGN(rx_ring->size, 4096);
2846
05d063aa 2847 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
2a1f8794 2848 &rx_ring->dma, GFP_KERNEL);
92915f71 2849
05d063aa
ET
2850 if (!rx_ring->desc)
2851 goto err;
92915f71 2852
92915f71 2853 return 0;
05d063aa
ET
2854err:
2855 vfree(rx_ring->rx_buffer_info);
2856 rx_ring->rx_buffer_info = NULL;
2857 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
92915f71
GR
2858 return -ENOMEM;
2859}
2860
2861/**
2862 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2863 * @adapter: board private structure
2864 *
2865 * If this function returns with an error, then it's possible one or
2866 * more of the rings is populated (while the rest are not). It is the
2867 * callers duty to clean those orphaned rings.
2868 *
2869 * Return 0 on success, negative on failure
2870 **/
2871static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2872{
2873 int i, err = 0;
2874
2875 for (i = 0; i < adapter->num_rx_queues; i++) {
05d063aa 2876 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
92915f71
GR
2877 if (!err)
2878 continue;
2879 hw_dbg(&adapter->hw,
2880 "Allocation for Rx Queue %u failed\n", i);
2881 break;
2882 }
2883 return err;
2884}
2885
2886/**
2887 * ixgbevf_free_rx_resources - Free Rx Resources
92915f71
GR
2888 * @rx_ring: ring to clean the resources from
2889 *
2890 * Free all receive software resources
2891 **/
05d063aa 2892void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
92915f71 2893{
05d063aa 2894 ixgbevf_clean_rx_ring(rx_ring);
92915f71
GR
2895
2896 vfree(rx_ring->rx_buffer_info);
2897 rx_ring->rx_buffer_info = NULL;
2898
05d063aa 2899 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
2a1f8794 2900 rx_ring->dma);
92915f71
GR
2901
2902 rx_ring->desc = NULL;
2903}
2904
2905/**
2906 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2907 * @adapter: board private structure
2908 *
2909 * Free all receive software resources
2910 **/
2911static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2912{
2913 int i;
2914
2915 for (i = 0; i < adapter->num_rx_queues; i++)
87e70ab9 2916 if (adapter->rx_ring[i]->desc)
05d063aa 2917 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
92915f71
GR
2918}
2919
2920/**
2921 * ixgbevf_open - Called when a network interface is made active
2922 * @netdev: network interface device structure
2923 *
2924 * Returns 0 on success, negative value on failure
2925 *
2926 * The open entry point is called when a network interface is made
2927 * active by the system (IFF_UP). At this point all resources needed
2928 * for transmit and receive operations are allocated, the interrupt
2929 * handler is registered with the OS, the watchdog timer is started,
2930 * and the stack is notified that the interface is ready.
2931 **/
2932static int ixgbevf_open(struct net_device *netdev)
2933{
2934 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2935 struct ixgbe_hw *hw = &adapter->hw;
2936 int err;
2937
a1f6c6b1 2938 /* A previous failure to open the device because of a lack of
2939 * available MSIX vector resources may have reset the number
2940 * of msix vectors variable to zero. The only way to recover
2941 * is to unload/reload the driver and hope that the system has
2942 * been able to recover some MSIX vector resources.
2943 */
2944 if (!adapter->num_msix_vectors)
2945 return -ENOMEM;
2946
92915f71
GR
2947 /* disallow open during test */
2948 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2949 return -EBUSY;
2950
2951 if (hw->adapter_stopped) {
2952 ixgbevf_reset(adapter);
2953 /* if adapter is still stopped then PF isn't up and
2954 * the vf can't start. */
2955 if (hw->adapter_stopped) {
2956 err = IXGBE_ERR_MBX;
dbd9636e
JK
2957 pr_err("Unable to start - perhaps the PF Driver isn't "
2958 "up yet\n");
92915f71
GR
2959 goto err_setup_reset;
2960 }
2961 }
2962
2963 /* allocate transmit descriptors */
2964 err = ixgbevf_setup_all_tx_resources(adapter);
2965 if (err)
2966 goto err_setup_tx;
2967
2968 /* allocate receive descriptors */
2969 err = ixgbevf_setup_all_rx_resources(adapter);
2970 if (err)
2971 goto err_setup_rx;
2972
2973 ixgbevf_configure(adapter);
2974
2975 /*
2976 * Map the Tx/Rx rings to the vectors we were allotted.
2977 * if request_irq will be called in this function map_rings
2978 * must be called *before* up_complete
2979 */
2980 ixgbevf_map_rings_to_vectors(adapter);
2981
795180d8 2982 ixgbevf_up_complete(adapter);
92915f71
GR
2983
2984 /* clear any pending interrupts, may auto mask */
2985 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2986 err = ixgbevf_request_irq(adapter);
2987 if (err)
2988 goto err_req_irq;
2989
5f3600eb 2990 ixgbevf_irq_enable(adapter);
92915f71
GR
2991
2992 return 0;
2993
2994err_req_irq:
2995 ixgbevf_down(adapter);
92915f71
GR
2996err_setup_rx:
2997 ixgbevf_free_all_rx_resources(adapter);
2998err_setup_tx:
2999 ixgbevf_free_all_tx_resources(adapter);
3000 ixgbevf_reset(adapter);
3001
3002err_setup_reset:
3003
3004 return err;
3005}
3006
3007/**
3008 * ixgbevf_close - Disables a network interface
3009 * @netdev: network interface device structure
3010 *
3011 * Returns 0, this is not allowed to fail
3012 *
3013 * The close entry point is called when an interface is de-activated
3014 * by the OS. The hardware is still under the drivers control, but
3015 * needs to be disabled. A global MAC reset is issued to stop the
3016 * hardware, and all transmit and receive resources are freed.
3017 **/
3018static int ixgbevf_close(struct net_device *netdev)
3019{
3020 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3021
3022 ixgbevf_down(adapter);
3023 ixgbevf_free_irq(adapter);
3024
3025 ixgbevf_free_all_tx_resources(adapter);
3026 ixgbevf_free_all_rx_resources(adapter);
3027
3028 return 0;
3029}
3030
220fe050
DS
3031static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3032{
3033 struct net_device *dev = adapter->netdev;
3034
3035 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
3036 return;
3037
3038 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
3039
3040 /* if interface is down do nothing */
3041 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3042 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3043 return;
3044
3045 /* Hardware has to reinitialize queues and interrupts to
3046 * match packet buffer alignment. Unfortunately, the
3047 * hardware is not flexible enough to do this dynamically.
3048 */
3049 if (netif_running(dev))
3050 ixgbevf_close(dev);
3051
3052 ixgbevf_clear_interrupt_scheme(adapter);
3053 ixgbevf_init_interrupt_scheme(adapter);
3054
3055 if (netif_running(dev))
3056 ixgbevf_open(dev);
3057}
3058
70a10e25
AD
3059static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3060 u32 vlan_macip_lens, u32 type_tucmd,
3061 u32 mss_l4len_idx)
92915f71
GR
3062{
3063 struct ixgbe_adv_tx_context_desc *context_desc;
70a10e25 3064 u16 i = tx_ring->next_to_use;
92915f71 3065
70a10e25 3066 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
92915f71 3067
70a10e25
AD
3068 i++;
3069 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
92915f71 3070
70a10e25
AD
3071 /* set bits to identify this as an advanced context descriptor */
3072 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
92915f71 3073
70a10e25
AD
3074 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3075 context_desc->seqnum_seed = 0;
3076 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3077 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3078}
3079
3080static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
7ad1a093
ET
3081 struct ixgbevf_tx_buffer *first,
3082 u8 *hdr_len)
70a10e25 3083{
7ad1a093 3084 struct sk_buff *skb = first->skb;
70a10e25
AD
3085 u32 vlan_macip_lens, type_tucmd;
3086 u32 mss_l4len_idx, l4len;
8f12c034 3087 int err;
70a10e25 3088
01a545cf
ET
3089 if (skb->ip_summed != CHECKSUM_PARTIAL)
3090 return 0;
3091
70a10e25
AD
3092 if (!skb_is_gso(skb))
3093 return 0;
92915f71 3094
8f12c034
FR
3095 err = skb_cow_head(skb, 0);
3096 if (err < 0)
3097 return err;
92915f71 3098
70a10e25
AD
3099 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3100 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3101
3102 if (skb->protocol == htons(ETH_P_IP)) {
3103 struct iphdr *iph = ip_hdr(skb);
3104 iph->tot_len = 0;
3105 iph->check = 0;
3106 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3107 iph->daddr, 0,
3108 IPPROTO_TCP,
3109 0);
3110 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7ad1a093
ET
3111 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3112 IXGBE_TX_FLAGS_CSUM |
3113 IXGBE_TX_FLAGS_IPV4;
70a10e25
AD
3114 } else if (skb_is_gso_v6(skb)) {
3115 ipv6_hdr(skb)->payload_len = 0;
3116 tcp_hdr(skb)->check =
3117 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3118 &ipv6_hdr(skb)->daddr,
3119 0, IPPROTO_TCP, 0);
7ad1a093
ET
3120 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3121 IXGBE_TX_FLAGS_CSUM;
70a10e25
AD
3122 }
3123
3124 /* compute header lengths */
3125 l4len = tcp_hdrlen(skb);
3126 *hdr_len += l4len;
3127 *hdr_len = skb_transport_offset(skb) + l4len;
3128
7ad1a093
ET
3129 /* update gso size and bytecount with header size */
3130 first->gso_segs = skb_shinfo(skb)->gso_segs;
3131 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3132
70a10e25
AD
3133 /* mss_l4len_id: use 1 as index for TSO */
3134 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
3135 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3136 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
3137
3138 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3139 vlan_macip_lens = skb_network_header_len(skb);
3140 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7ad1a093 3141 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
70a10e25
AD
3142
3143 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3144 type_tucmd, mss_l4len_idx);
3145
3146 return 1;
92915f71
GR
3147}
3148
7ad1a093
ET
3149static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3150 struct ixgbevf_tx_buffer *first)
92915f71 3151{
7ad1a093 3152 struct sk_buff *skb = first->skb;
70a10e25
AD
3153 u32 vlan_macip_lens = 0;
3154 u32 mss_l4len_idx = 0;
3155 u32 type_tucmd = 0;
92915f71 3156
70a10e25
AD
3157 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3158 u8 l4_hdr = 0;
3159 switch (skb->protocol) {
0933ce4a 3160 case htons(ETH_P_IP):
70a10e25
AD
3161 vlan_macip_lens |= skb_network_header_len(skb);
3162 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3163 l4_hdr = ip_hdr(skb)->protocol;
3164 break;
0933ce4a 3165 case htons(ETH_P_IPV6):
70a10e25
AD
3166 vlan_macip_lens |= skb_network_header_len(skb);
3167 l4_hdr = ipv6_hdr(skb)->nexthdr;
3168 break;
3169 default:
3170 if (unlikely(net_ratelimit())) {
3171 dev_warn(tx_ring->dev,
3172 "partial checksum but proto=%x!\n",
7ad1a093 3173 first->protocol);
70a10e25
AD
3174 }
3175 break;
3176 }
92915f71 3177
70a10e25
AD
3178 switch (l4_hdr) {
3179 case IPPROTO_TCP:
3180 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3181 mss_l4len_idx = tcp_hdrlen(skb) <<
3182 IXGBE_ADVTXD_L4LEN_SHIFT;
3183 break;
3184 case IPPROTO_SCTP:
3185 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3186 mss_l4len_idx = sizeof(struct sctphdr) <<
3187 IXGBE_ADVTXD_L4LEN_SHIFT;
3188 break;
3189 case IPPROTO_UDP:
3190 mss_l4len_idx = sizeof(struct udphdr) <<
3191 IXGBE_ADVTXD_L4LEN_SHIFT;
3192 break;
3193 default:
3194 if (unlikely(net_ratelimit())) {
3195 dev_warn(tx_ring->dev,
3196 "partial checksum but l4 proto=%x!\n",
3197 l4_hdr);
3198 }
3199 break;
3200 }
7ad1a093
ET
3201
3202 /* update TX checksum flag */
3203 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
92915f71
GR
3204 }
3205
70a10e25
AD
3206 /* vlan_macip_lens: MACLEN, VLAN tag */
3207 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7ad1a093 3208 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
70a10e25
AD
3209
3210 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3211 type_tucmd, mss_l4len_idx);
92915f71
GR
3212}
3213
29d37fa1 3214static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
92915f71 3215{
29d37fa1
ET
3216 /* set type for advanced descriptor with frame checksum insertion */
3217 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3218 IXGBE_ADVTXD_DCMD_IFCS |
3219 IXGBE_ADVTXD_DCMD_DEXT);
92915f71 3220
29d37fa1
ET
3221 /* set HW vlan bit if vlan is present */
3222 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3223 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
92915f71 3224
29d37fa1
ET
3225 /* set segmentation enable bits for TSO/FSO */
3226 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3227 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
92915f71 3228
29d37fa1
ET
3229 return cmd_type;
3230}
92915f71 3231
29d37fa1
ET
3232static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3233 u32 tx_flags, unsigned int paylen)
3234{
3235 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
9bdfefd2 3236
29d37fa1
ET
3237 /* enable L4 checksum for TSO and TX checksum offload */
3238 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3239 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
92915f71 3240
29d37fa1
ET
3241 /* enble IPv4 checksum for TSO */
3242 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3243 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
92915f71 3244
29d37fa1
ET
3245 /* use index 1 context for TSO/FSO/FCOE */
3246 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3247 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
92915f71 3248
29d37fa1
ET
3249 /* Check Context must be set if Tx switch is enabled, which it
3250 * always is for case where virtual functions are running
3251 */
3252 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
92915f71 3253
29d37fa1
ET
3254 tx_desc->read.olinfo_status = olinfo_status;
3255}
92915f71 3256
29d37fa1
ET
3257static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3258 struct ixgbevf_tx_buffer *first,
3259 const u8 hdr_len)
3260{
3261 dma_addr_t dma;
3262 struct sk_buff *skb = first->skb;
3263 struct ixgbevf_tx_buffer *tx_buffer;
3264 union ixgbe_adv_tx_desc *tx_desc;
3265 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3266 unsigned int data_len = skb->data_len;
3267 unsigned int size = skb_headlen(skb);
3268 unsigned int paylen = skb->len - hdr_len;
3269 u32 tx_flags = first->tx_flags;
3270 __le32 cmd_type;
3271 u16 i = tx_ring->next_to_use;
9bdfefd2 3272
29d37fa1 3273 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
92915f71 3274
29d37fa1
ET
3275 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3276 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
7ad1a093 3277
29d37fa1
ET
3278 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3279 if (dma_mapping_error(tx_ring->dev, dma))
3280 goto dma_error;
92915f71 3281
29d37fa1
ET
3282 /* record length, and DMA address */
3283 dma_unmap_len_set(first, len, size);
3284 dma_unmap_addr_set(first, dma, dma);
92915f71 3285
29d37fa1 3286 tx_desc->read.buffer_addr = cpu_to_le64(dma);
92915f71 3287
29d37fa1
ET
3288 for (;;) {
3289 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3290 tx_desc->read.cmd_type_len =
3291 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
92915f71 3292
29d37fa1
ET
3293 i++;
3294 tx_desc++;
3295 if (i == tx_ring->count) {
3296 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3297 i = 0;
3298 }
92915f71 3299
29d37fa1
ET
3300 dma += IXGBE_MAX_DATA_PER_TXD;
3301 size -= IXGBE_MAX_DATA_PER_TXD;
92915f71 3302
29d37fa1
ET
3303 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3304 tx_desc->read.olinfo_status = 0;
3305 }
92915f71 3306
29d37fa1
ET
3307 if (likely(!data_len))
3308 break;
92915f71 3309
29d37fa1 3310 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
92915f71 3311
29d37fa1
ET
3312 i++;
3313 tx_desc++;
3314 if (i == tx_ring->count) {
3315 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3316 i = 0;
3317 }
92915f71 3318
29d37fa1
ET
3319 size = skb_frag_size(frag);
3320 data_len -= size;
92915f71 3321
29d37fa1
ET
3322 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3323 DMA_TO_DEVICE);
3324 if (dma_mapping_error(tx_ring->dev, dma))
3325 goto dma_error;
70a10e25 3326
29d37fa1
ET
3327 tx_buffer = &tx_ring->tx_buffer_info[i];
3328 dma_unmap_len_set(tx_buffer, len, size);
3329 dma_unmap_addr_set(tx_buffer, dma, dma);
92915f71 3330
29d37fa1
ET
3331 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3332 tx_desc->read.olinfo_status = 0;
3333
3334 frag++;
70a10e25 3335 }
92915f71 3336
29d37fa1
ET
3337 /* write last descriptor with RS and EOP bits */
3338 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3339 tx_desc->read.cmd_type_len = cmd_type;
3340
3341 /* set the timestamp */
3342 first->time_stamp = jiffies;
3343
3344 /* Force memory writes to complete before letting h/w know there
3345 * are new descriptors to fetch. (Only applicable for weak-ordered
3346 * memory model archs, such as IA-64).
3347 *
3348 * We also need this memory barrier (wmb) to make certain all of the
3349 * status bits have been updated before next_to_watch is written.
70a10e25 3350 */
29d37fa1 3351 wmb();
92915f71 3352
29d37fa1
ET
3353 /* set next_to_watch value indicating a packet is present */
3354 first->next_to_watch = tx_desc;
92915f71 3355
29d37fa1
ET
3356 i++;
3357 if (i == tx_ring->count)
3358 i = 0;
9bdfefd2 3359
29d37fa1 3360 tx_ring->next_to_use = i;
92915f71 3361
29d37fa1 3362 /* notify HW of packet */
06380db6 3363 ixgbevf_write_tail(tx_ring, i);
29d37fa1
ET
3364
3365 return;
3366dma_error:
3367 dev_err(tx_ring->dev, "TX DMA map failed\n");
3368
3369 /* clear dma mappings for failed tx_buffer_info map */
3370 for (;;) {
3371 tx_buffer = &tx_ring->tx_buffer_info[i];
3372 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3373 if (tx_buffer == first)
3374 break;
3375 if (i == 0)
3376 i = tx_ring->count;
3377 i--;
3378 }
92915f71 3379
92915f71 3380 tx_ring->next_to_use = i;
92915f71
GR
3381}
3382
fb40195c 3383static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71 3384{
fb40195c 3385 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
92915f71
GR
3386 /* Herbert's original patch had:
3387 * smp_mb__after_netif_stop_queue();
3388 * but since that doesn't exist yet, just open code it. */
3389 smp_mb();
3390
3391 /* We need to check again in a case another CPU has just
3392 * made room available. */
f880d07b 3393 if (likely(ixgbevf_desc_unused(tx_ring) < size))
92915f71
GR
3394 return -EBUSY;
3395
3396 /* A reprieve! - use start_queue because it doesn't call schedule */
fb40195c 3397 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
095e2617
ET
3398 ++tx_ring->tx_stats.restart_queue;
3399
92915f71
GR
3400 return 0;
3401}
3402
fb40195c 3403static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71 3404{
f880d07b 3405 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
92915f71 3406 return 0;
fb40195c 3407 return __ixgbevf_maybe_stop_tx(tx_ring, size);
92915f71
GR
3408}
3409
3410static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3411{
3412 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
7ad1a093 3413 struct ixgbevf_tx_buffer *first;
92915f71 3414 struct ixgbevf_ring *tx_ring;
7ad1a093
ET
3415 int tso;
3416 u32 tx_flags = 0;
3595990a
AD
3417 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3418#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3419 unsigned short f;
3420#endif
7ad1a093 3421 u8 hdr_len = 0;
f9d08f16 3422 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
7ad1a093 3423
46acc460 3424 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
f9d08f16
GR
3425 dev_kfree_skb(skb);
3426 return NETDEV_TX_OK;
3427 }
92915f71 3428
7ad1a093 3429 tx_ring = adapter->tx_ring[skb->queue_mapping];
92915f71 3430
3595990a
AD
3431 /*
3432 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3433 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3434 * + 2 desc gap to keep tail from touching head,
3435 * + 1 desc for context descriptor,
3436 * otherwise try next time
3437 */
3438#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3439 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3440 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3441#else
3442 count += skb_shinfo(skb)->nr_frags;
3443#endif
fb40195c 3444 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
095e2617 3445 tx_ring->tx_stats.tx_busy++;
3595990a
AD
3446 return NETDEV_TX_BUSY;
3447 }
3448
7ad1a093
ET
3449 /* record the location of the first descriptor for this packet */
3450 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3451 first->skb = skb;
3452 first->bytecount = skb->len;
3453 first->gso_segs = 1;
3454
eab6d18d 3455 if (vlan_tx_tag_present(skb)) {
92915f71
GR
3456 tx_flags |= vlan_tx_tag_get(skb);
3457 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3458 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3459 }
3460
7ad1a093
ET
3461 /* record initial flags and protocol */
3462 first->tx_flags = tx_flags;
3463 first->protocol = vlan_get_protocol(skb);
92915f71 3464
7ad1a093
ET
3465 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3466 if (tso < 0)
3467 goto out_drop;
b5d217f3 3468 else if (!tso)
7ad1a093 3469 ixgbevf_tx_csum(tx_ring, first);
92915f71 3470
29d37fa1 3471 ixgbevf_tx_map(tx_ring, first, hdr_len);
70a10e25 3472
fb40195c 3473 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
92915f71 3474
7ad1a093
ET
3475 return NETDEV_TX_OK;
3476
3477out_drop:
3478 dev_kfree_skb_any(first->skb);
3479 first->skb = NULL;
3480
92915f71
GR
3481 return NETDEV_TX_OK;
3482}
3483
92915f71
GR
3484/**
3485 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3486 * @netdev: network interface device structure
3487 * @p: pointer to an address structure
3488 *
3489 * Returns 0 on success, negative on failure
3490 **/
3491static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3492{
3493 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3494 struct ixgbe_hw *hw = &adapter->hw;
3495 struct sockaddr *addr = p;
3496
3497 if (!is_valid_ether_addr(addr->sa_data))
3498 return -EADDRNOTAVAIL;
3499
3500 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3501 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3502
55fdd45b 3503 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 3504
92fe0bf7 3505 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
92915f71 3506
55fdd45b 3507 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 3508
92915f71
GR
3509 return 0;
3510}
3511
3512/**
3513 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3514 * @netdev: network interface device structure
3515 * @new_mtu: new value for maximum frame size
3516 *
3517 * Returns 0 on success, negative on failure
3518 **/
3519static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3520{
3521 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
bad17234 3522 struct ixgbe_hw *hw = &adapter->hw;
92915f71 3523 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
69bfbec4 3524 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
69bfbec4 3525
56e94095
AD
3526 switch (adapter->hw.api_version) {
3527 case ixgbe_mbox_api_11:
69bfbec4 3528 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
56e94095
AD
3529 break;
3530 default:
47068b0d 3531 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
56e94095
AD
3532 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3533 break;
3534 }
92915f71
GR
3535
3536 /* MTU < 68 is an error and causes problems on some kernels */
69bfbec4 3537 if ((new_mtu < 68) || (max_frame > max_possible_frame))
92915f71
GR
3538 return -EINVAL;
3539
bad17234 3540 hw_dbg(hw, "changing MTU from %d to %d\n",
92915f71
GR
3541 netdev->mtu, new_mtu);
3542 /* must set new MTU before calling down or up */
3543 netdev->mtu = new_mtu;
3544
bad17234
ET
3545 /* notify the PF of our intent to use this size of frame */
3546 ixgbevf_rlpml_set_vf(hw, max_frame);
92915f71
GR
3547
3548 return 0;
3549}
3550
688ff32d
ET
3551#ifdef CONFIG_NET_POLL_CONTROLLER
3552/* Polling 'interrupt' - used by things like netconsole to send skbs
3553 * without having to re-enable interrupts. It's not called while
3554 * the interrupt routine is executing.
3555 */
3556static void ixgbevf_netpoll(struct net_device *netdev)
3557{
3558 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3559 int i;
3560
3561 /* if interface is down do nothing */
3562 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3563 return;
3564 for (i = 0; i < adapter->num_rx_queues; i++)
3565 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3566}
3567#endif /* CONFIG_NET_POLL_CONTROLLER */
3568
0ac1e8ce 3569static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
92915f71
GR
3570{
3571 struct net_device *netdev = pci_get_drvdata(pdev);
3572 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0ac1e8ce
AD
3573#ifdef CONFIG_PM
3574 int retval = 0;
3575#endif
92915f71
GR
3576
3577 netif_device_detach(netdev);
3578
3579 if (netif_running(netdev)) {
0ac1e8ce 3580 rtnl_lock();
92915f71
GR
3581 ixgbevf_down(adapter);
3582 ixgbevf_free_irq(adapter);
3583 ixgbevf_free_all_tx_resources(adapter);
3584 ixgbevf_free_all_rx_resources(adapter);
0ac1e8ce 3585 rtnl_unlock();
92915f71
GR
3586 }
3587
0ac1e8ce 3588 ixgbevf_clear_interrupt_scheme(adapter);
92915f71 3589
0ac1e8ce
AD
3590#ifdef CONFIG_PM
3591 retval = pci_save_state(pdev);
3592 if (retval)
3593 return retval;
92915f71 3594
0ac1e8ce 3595#endif
bc0c7151
MR
3596 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3597 pci_disable_device(pdev);
0ac1e8ce
AD
3598
3599 return 0;
3600}
3601
3602#ifdef CONFIG_PM
3603static int ixgbevf_resume(struct pci_dev *pdev)
3604{
27ae2967
WY
3605 struct net_device *netdev = pci_get_drvdata(pdev);
3606 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0ac1e8ce
AD
3607 u32 err;
3608
0ac1e8ce
AD
3609 pci_restore_state(pdev);
3610 /*
3611 * pci_restore_state clears dev->state_saved so call
3612 * pci_save_state to restore it.
3613 */
3614 pci_save_state(pdev);
3615
3616 err = pci_enable_device_mem(pdev);
3617 if (err) {
3618 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3619 return err;
3620 }
4e857c58 3621 smp_mb__before_atomic();
bc0c7151 3622 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
0ac1e8ce
AD
3623 pci_set_master(pdev);
3624
798e381a
DS
3625 ixgbevf_reset(adapter);
3626
0ac1e8ce
AD
3627 rtnl_lock();
3628 err = ixgbevf_init_interrupt_scheme(adapter);
3629 rtnl_unlock();
3630 if (err) {
3631 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3632 return err;
3633 }
3634
0ac1e8ce
AD
3635 if (netif_running(netdev)) {
3636 err = ixgbevf_open(netdev);
3637 if (err)
3638 return err;
3639 }
3640
3641 netif_device_attach(netdev);
3642
3643 return err;
3644}
3645
3646#endif /* CONFIG_PM */
3647static void ixgbevf_shutdown(struct pci_dev *pdev)
3648{
3649 ixgbevf_suspend(pdev, PMSG_SUSPEND);
92915f71
GR
3650}
3651
4197aa7b
ED
3652static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3653 struct rtnl_link_stats64 *stats)
3654{
3655 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3656 unsigned int start;
3657 u64 bytes, packets;
3658 const struct ixgbevf_ring *ring;
3659 int i;
3660
3661 ixgbevf_update_stats(adapter);
3662
3663 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3664
3665 for (i = 0; i < adapter->num_rx_queues; i++) {
87e70ab9 3666 ring = adapter->rx_ring[i];
4197aa7b 3667 do {
57a7744e 3668 start = u64_stats_fetch_begin_irq(&ring->syncp);
095e2617
ET
3669 bytes = ring->stats.bytes;
3670 packets = ring->stats.packets;
57a7744e 3671 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4197aa7b
ED
3672 stats->rx_bytes += bytes;
3673 stats->rx_packets += packets;
3674 }
3675
3676 for (i = 0; i < adapter->num_tx_queues; i++) {
87e70ab9 3677 ring = adapter->tx_ring[i];
4197aa7b 3678 do {
57a7744e 3679 start = u64_stats_fetch_begin_irq(&ring->syncp);
095e2617
ET
3680 bytes = ring->stats.bytes;
3681 packets = ring->stats.packets;
57a7744e 3682 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4197aa7b
ED
3683 stats->tx_bytes += bytes;
3684 stats->tx_packets += packets;
3685 }
3686
3687 return stats;
3688}
3689
0ac1e8ce 3690static const struct net_device_ops ixgbevf_netdev_ops = {
c12db769
SH
3691 .ndo_open = ixgbevf_open,
3692 .ndo_stop = ixgbevf_close,
3693 .ndo_start_xmit = ixgbevf_xmit_frame,
3694 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4197aa7b 3695 .ndo_get_stats64 = ixgbevf_get_stats,
92915f71 3696 .ndo_validate_addr = eth_validate_addr,
c12db769
SH
3697 .ndo_set_mac_address = ixgbevf_set_mac,
3698 .ndo_change_mtu = ixgbevf_change_mtu,
3699 .ndo_tx_timeout = ixgbevf_tx_timeout,
c12db769
SH
3700 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3701 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
c777cdfa
JK
3702#ifdef CONFIG_NET_RX_BUSY_POLL
3703 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3704#endif
688ff32d
ET
3705#ifdef CONFIG_NET_POLL_CONTROLLER
3706 .ndo_poll_controller = ixgbevf_netpoll,
3707#endif
92915f71 3708};
92915f71
GR
3709
3710static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3711{
0ac1e8ce 3712 dev->netdev_ops = &ixgbevf_netdev_ops;
92915f71
GR
3713 ixgbevf_set_ethtool_ops(dev);
3714 dev->watchdog_timeo = 5 * HZ;
3715}
3716
3717/**
3718 * ixgbevf_probe - Device Initialization Routine
3719 * @pdev: PCI device information struct
3720 * @ent: entry in ixgbevf_pci_tbl
3721 *
3722 * Returns 0 on success, negative on failure
3723 *
3724 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3725 * The OS initialization, configuring of the adapter private structure,
3726 * and a hardware reset occur.
3727 **/
1dd06ae8 3728static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
92915f71
GR
3729{
3730 struct net_device *netdev;
3731 struct ixgbevf_adapter *adapter = NULL;
3732 struct ixgbe_hw *hw = NULL;
3733 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
92915f71 3734 int err, pci_using_dac;
0333464f 3735 bool disable_dev = false;
92915f71
GR
3736
3737 err = pci_enable_device(pdev);
3738 if (err)
3739 return err;
3740
53567aa4 3741 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
92915f71
GR
3742 pci_using_dac = 1;
3743 } else {
53567aa4 3744 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
92915f71 3745 if (err) {
53567aa4
RK
3746 dev_err(&pdev->dev, "No usable DMA "
3747 "configuration, aborting\n");
3748 goto err_dma;
92915f71
GR
3749 }
3750 pci_using_dac = 0;
3751 }
3752
3753 err = pci_request_regions(pdev, ixgbevf_driver_name);
3754 if (err) {
3755 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3756 goto err_pci_reg;
3757 }
3758
3759 pci_set_master(pdev);
3760
92915f71
GR
3761 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3762 MAX_TX_QUEUES);
92915f71
GR
3763 if (!netdev) {
3764 err = -ENOMEM;
3765 goto err_alloc_etherdev;
3766 }
3767
3768 SET_NETDEV_DEV(netdev, &pdev->dev);
3769
92915f71
GR
3770 adapter = netdev_priv(netdev);
3771
3772 adapter->netdev = netdev;
3773 adapter->pdev = pdev;
3774 hw = &adapter->hw;
3775 hw->back = adapter;
b3f4d599 3776 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
92915f71
GR
3777
3778 /*
3779 * call save state here in standalone driver because it relies on
3780 * adapter struct to exist, and needs to call netdev_priv
3781 */
3782 pci_save_state(pdev);
3783
3784 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3785 pci_resource_len(pdev, 0));
dbf8b0d8 3786 adapter->io_addr = hw->hw_addr;
92915f71
GR
3787 if (!hw->hw_addr) {
3788 err = -EIO;
3789 goto err_ioremap;
3790 }
3791
3792 ixgbevf_assign_netdev_ops(netdev);
3793
92915f71
GR
3794 /* Setup hw api */
3795 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3796 hw->mac.type = ii->mac;
3797
3798 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
f416dfc0 3799 sizeof(struct ixgbe_mbx_operations));
92915f71 3800
92915f71
GR
3801 /* setup the private structure */
3802 err = ixgbevf_sw_init(adapter);
1a0d6ae5
DK
3803 if (err)
3804 goto err_sw_init;
3805
3806 /* The HW MAC address was set and/or determined in sw_init */
1a0d6ae5
DK
3807 if (!is_valid_ether_addr(netdev->dev_addr)) {
3808 pr_err("invalid MAC address\n");
3809 err = -EIO;
3810 goto err_sw_init;
3811 }
92915f71 3812
471a76de 3813 netdev->hw_features = NETIF_F_SG |
92915f71 3814 NETIF_F_IP_CSUM |
471a76de
MM
3815 NETIF_F_IPV6_CSUM |
3816 NETIF_F_TSO |
3817 NETIF_F_TSO6 |
3818 NETIF_F_RXCSUM;
3819
3820 netdev->features = netdev->hw_features |
f646968f
PM
3821 NETIF_F_HW_VLAN_CTAG_TX |
3822 NETIF_F_HW_VLAN_CTAG_RX |
3823 NETIF_F_HW_VLAN_CTAG_FILTER;
92915f71 3824
92915f71
GR
3825 netdev->vlan_features |= NETIF_F_TSO;
3826 netdev->vlan_features |= NETIF_F_TSO6;
3827 netdev->vlan_features |= NETIF_F_IP_CSUM;
3bfacf96 3828 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
92915f71
GR
3829 netdev->vlan_features |= NETIF_F_SG;
3830
3831 if (pci_using_dac)
3832 netdev->features |= NETIF_F_HIGHDMA;
3833
01789349
JP
3834 netdev->priv_flags |= IFF_UNICAST_FLT;
3835
92915f71 3836 init_timer(&adapter->watchdog_timer);
c061b18d 3837 adapter->watchdog_timer.function = ixgbevf_watchdog;
92915f71
GR
3838 adapter->watchdog_timer.data = (unsigned long)adapter;
3839
ea699569
MR
3840 if (IXGBE_REMOVED(hw->hw_addr)) {
3841 err = -EIO;
3842 goto err_sw_init;
3843 }
92915f71
GR
3844 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3845 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
ea699569 3846 set_bit(__IXGBEVF_WORK_INIT, &adapter->state);
92915f71
GR
3847
3848 err = ixgbevf_init_interrupt_scheme(adapter);
3849 if (err)
3850 goto err_sw_init;
3851
92915f71
GR
3852 strcpy(netdev->name, "eth%d");
3853
3854 err = register_netdev(netdev);
3855 if (err)
3856 goto err_register;
3857
0333464f 3858 pci_set_drvdata(pdev, netdev);
5d426ad1
GR
3859 netif_carrier_off(netdev);
3860
33bd9f60
GR
3861 ixgbevf_init_last_counter_stats(adapter);
3862
47068b0d
ET
3863 /* print the VF info */
3864 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
3865 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
92915f71 3866
47068b0d
ET
3867 switch (hw->mac.type) {
3868 case ixgbe_mac_X550_vf:
3869 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
3870 break;
3871 case ixgbe_mac_X540_vf:
3872 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
3873 break;
3874 case ixgbe_mac_82599_vf:
3875 default:
3876 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
3877 break;
3878 }
92915f71 3879
92915f71
GR
3880 return 0;
3881
3882err_register:
0ac1e8ce 3883 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
3884err_sw_init:
3885 ixgbevf_reset_interrupt_capability(adapter);
dbf8b0d8 3886 iounmap(adapter->io_addr);
92915f71 3887err_ioremap:
0333464f 3888 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
92915f71
GR
3889 free_netdev(netdev);
3890err_alloc_etherdev:
3891 pci_release_regions(pdev);
3892err_pci_reg:
3893err_dma:
0333464f 3894 if (!adapter || disable_dev)
bc0c7151 3895 pci_disable_device(pdev);
92915f71
GR
3896 return err;
3897}
3898
3899/**
3900 * ixgbevf_remove - Device Removal Routine
3901 * @pdev: PCI device information struct
3902 *
3903 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3904 * that it should release a PCI device. The could be caused by a
3905 * Hot-Plug event, or because the driver is going to be removed from
3906 * memory.
3907 **/
9f9a12f8 3908static void ixgbevf_remove(struct pci_dev *pdev)
92915f71
GR
3909{
3910 struct net_device *netdev = pci_get_drvdata(pdev);
0333464f
ET
3911 struct ixgbevf_adapter *adapter;
3912 bool disable_dev;
3913
3914 if (!netdev)
3915 return;
3916
3917 adapter = netdev_priv(netdev);
92915f71 3918
2e7cfbdd 3919 set_bit(__IXGBEVF_REMOVING, &adapter->state);
92915f71
GR
3920
3921 del_timer_sync(&adapter->watchdog_timer);
3922
23f333a2 3923 cancel_work_sync(&adapter->reset_task);
92915f71
GR
3924 cancel_work_sync(&adapter->watchdog_task);
3925
fd13a9ab 3926 if (netdev->reg_state == NETREG_REGISTERED)
92915f71 3927 unregister_netdev(netdev);
92915f71 3928
0ac1e8ce 3929 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
3930 ixgbevf_reset_interrupt_capability(adapter);
3931
dbf8b0d8 3932 iounmap(adapter->io_addr);
92915f71
GR
3933 pci_release_regions(pdev);
3934
3935 hw_dbg(&adapter->hw, "Remove complete\n");
3936
0333464f 3937 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
92915f71
GR
3938 free_netdev(netdev);
3939
0333464f 3940 if (disable_dev)
bc0c7151 3941 pci_disable_device(pdev);
92915f71
GR
3942}
3943
9f19f31d
AD
3944/**
3945 * ixgbevf_io_error_detected - called when PCI error is detected
3946 * @pdev: Pointer to PCI device
3947 * @state: The current pci connection state
3948 *
3949 * This function is called after a PCI bus error affecting
3950 * this device has been detected.
3951 */
3952static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3953 pci_channel_state_t state)
3954{
3955 struct net_device *netdev = pci_get_drvdata(pdev);
3956 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3957
ea699569
MR
3958 if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
3959 return PCI_ERS_RESULT_DISCONNECT;
3960
bc0c7151 3961 rtnl_lock();
9f19f31d
AD
3962 netif_device_detach(netdev);
3963
bc0c7151
MR
3964 if (state == pci_channel_io_perm_failure) {
3965 rtnl_unlock();
9f19f31d 3966 return PCI_ERS_RESULT_DISCONNECT;
bc0c7151 3967 }
9f19f31d
AD
3968
3969 if (netif_running(netdev))
3970 ixgbevf_down(adapter);
3971
bc0c7151
MR
3972 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3973 pci_disable_device(pdev);
3974 rtnl_unlock();
9f19f31d
AD
3975
3976 /* Request a slot slot reset. */
3977 return PCI_ERS_RESULT_NEED_RESET;
3978}
3979
3980/**
3981 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3982 * @pdev: Pointer to PCI device
3983 *
3984 * Restart the card from scratch, as if from a cold-boot. Implementation
3985 * resembles the first-half of the ixgbevf_resume routine.
3986 */
3987static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3988{
3989 struct net_device *netdev = pci_get_drvdata(pdev);
3990 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3991
3992 if (pci_enable_device_mem(pdev)) {
3993 dev_err(&pdev->dev,
3994 "Cannot re-enable PCI device after reset.\n");
3995 return PCI_ERS_RESULT_DISCONNECT;
3996 }
3997
4e857c58 3998 smp_mb__before_atomic();
bc0c7151 3999 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
9f19f31d
AD
4000 pci_set_master(pdev);
4001
4002 ixgbevf_reset(adapter);
4003
4004 return PCI_ERS_RESULT_RECOVERED;
4005}
4006
4007/**
4008 * ixgbevf_io_resume - called when traffic can start flowing again.
4009 * @pdev: Pointer to PCI device
4010 *
4011 * This callback is called when the error recovery driver tells us that
4012 * its OK to resume normal operation. Implementation resembles the
4013 * second-half of the ixgbevf_resume routine.
4014 */
4015static void ixgbevf_io_resume(struct pci_dev *pdev)
4016{
4017 struct net_device *netdev = pci_get_drvdata(pdev);
4018 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4019
4020 if (netif_running(netdev))
4021 ixgbevf_up(adapter);
4022
4023 netif_device_attach(netdev);
4024}
4025
4026/* PCI Error Recovery (ERS) */
3646f0e5 4027static const struct pci_error_handlers ixgbevf_err_handler = {
9f19f31d
AD
4028 .error_detected = ixgbevf_io_error_detected,
4029 .slot_reset = ixgbevf_io_slot_reset,
4030 .resume = ixgbevf_io_resume,
4031};
4032
92915f71
GR
4033static struct pci_driver ixgbevf_driver = {
4034 .name = ixgbevf_driver_name,
4035 .id_table = ixgbevf_pci_tbl,
4036 .probe = ixgbevf_probe,
9f9a12f8 4037 .remove = ixgbevf_remove,
0ac1e8ce
AD
4038#ifdef CONFIG_PM
4039 /* Power Management Hooks */
4040 .suspend = ixgbevf_suspend,
4041 .resume = ixgbevf_resume,
4042#endif
92915f71 4043 .shutdown = ixgbevf_shutdown,
9f19f31d 4044 .err_handler = &ixgbevf_err_handler
92915f71
GR
4045};
4046
4047/**
65d676c8 4048 * ixgbevf_init_module - Driver Registration Routine
92915f71 4049 *
65d676c8 4050 * ixgbevf_init_module is the first routine called when the driver is
92915f71
GR
4051 * loaded. All it does is register with the PCI subsystem.
4052 **/
4053static int __init ixgbevf_init_module(void)
4054{
4055 int ret;
dbd9636e
JK
4056 pr_info("%s - version %s\n", ixgbevf_driver_string,
4057 ixgbevf_driver_version);
92915f71 4058
dbd9636e 4059 pr_info("%s\n", ixgbevf_copyright);
92915f71
GR
4060
4061 ret = pci_register_driver(&ixgbevf_driver);
4062 return ret;
4063}
4064
4065module_init(ixgbevf_init_module);
4066
4067/**
65d676c8 4068 * ixgbevf_exit_module - Driver Exit Cleanup Routine
92915f71 4069 *
65d676c8 4070 * ixgbevf_exit_module is called just before the driver is removed
92915f71
GR
4071 * from memory.
4072 **/
4073static void __exit ixgbevf_exit_module(void)
4074{
4075 pci_unregister_driver(&ixgbevf_driver);
4076}
4077
4078#ifdef DEBUG
4079/**
65d676c8 4080 * ixgbevf_get_hw_dev_name - return device name string
92915f71
GR
4081 * used by hardware layer to print debugging information
4082 **/
4083char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4084{
4085 struct ixgbevf_adapter *adapter = hw->back;
4086 return adapter->netdev->name;
4087}
4088
4089#endif
4090module_exit(ixgbevf_exit_module);
4091
4092/* ixgbevf_main.c */
This page took 1.921089 seconds and 5 git commands to generate.