i40e: make functions static and remove dead code
[deliverable/linux.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
CommitLineData
92915f71
GR
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
5c47a2b6 4 Copyright(c) 1999 - 2012 Intel Corporation.
92915f71
GR
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
dbd9636e
JK
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
92915f71 35#include <linux/types.h>
dadcd65f 36#include <linux/bitops.h>
92915f71
GR
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
70a10e25 45#include <linux/sctp.h>
92915f71 46#include <linux/ipv6.h>
5a0e3ad6 47#include <linux/slab.h>
92915f71
GR
48#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
01789349 51#include <linux/if.h>
92915f71 52#include <linux/if_vlan.h>
70c71606 53#include <linux/prefetch.h>
92915f71
GR
54
55#include "ixgbevf.h"
56
3d8fe98f 57const char ixgbevf_driver_name[] = "ixgbevf";
92915f71 58static const char ixgbevf_driver_string[] =
422e05d1 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
92915f71 60
9e6fcae7 61#define DRV_VERSION "2.11.3-k"
92915f71 62const char ixgbevf_driver_version[] = DRV_VERSION;
66c87bd5 63static char ixgbevf_copyright[] =
5c47a2b6 64 "Copyright (c) 2009 - 2012 Intel Corporation.";
92915f71
GR
65
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
2316aa2a
GR
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
92915f71
GR
69};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
78 */
39ba22b4
SH
79static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
92915f71
GR
82 /* required last entry */
83 {0, }
84};
85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86
87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION);
91
b3f4d599 92#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93static int debug = -1;
94module_param(debug, int, 0);
95MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
92915f71
GR
96
97/* forward decls */
fa71ae27 98static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
56e94095 99static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
92915f71 100
5cdab2f6 101static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
92915f71
GR
102 u32 val)
103{
5cdab2f6
DS
104 rx_ring->next_to_use = val;
105
92915f71
GR
106 /*
107 * Force memory writes to complete before letting h/w
108 * know there are new descriptors to fetch. (Only
109 * applicable for weak-ordered memory model archs,
110 * such as IA-64).
111 */
112 wmb();
5cdab2f6 113 writel(val, rx_ring->tail);
92915f71
GR
114}
115
49ce9c2c 116/**
65d676c8 117 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
92915f71
GR
118 * @adapter: pointer to adapter struct
119 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
120 * @queue: queue to map the corresponding interrupt to
121 * @msix_vector: the vector to map to the corresponding queue
92915f71
GR
122 */
123static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
124 u8 queue, u8 msix_vector)
125{
126 u32 ivar, index;
127 struct ixgbe_hw *hw = &adapter->hw;
128 if (direction == -1) {
129 /* other causes */
130 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
131 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
132 ivar &= ~0xFF;
133 ivar |= msix_vector;
134 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
135 } else {
136 /* tx or rx causes */
137 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
138 index = ((16 * (queue & 1)) + (8 * direction));
139 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
140 ivar &= ~(0xFF << index);
141 ivar |= (msix_vector << index);
142 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
143 }
144}
145
70a10e25 146static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
92915f71
GR
147 struct ixgbevf_tx_buffer
148 *tx_buffer_info)
149{
150 if (tx_buffer_info->dma) {
151 if (tx_buffer_info->mapped_as_page)
70a10e25 152 dma_unmap_page(tx_ring->dev,
92915f71
GR
153 tx_buffer_info->dma,
154 tx_buffer_info->length,
2a1f8794 155 DMA_TO_DEVICE);
92915f71 156 else
70a10e25 157 dma_unmap_single(tx_ring->dev,
92915f71
GR
158 tx_buffer_info->dma,
159 tx_buffer_info->length,
2a1f8794 160 DMA_TO_DEVICE);
92915f71
GR
161 tx_buffer_info->dma = 0;
162 }
163 if (tx_buffer_info->skb) {
164 dev_kfree_skb_any(tx_buffer_info->skb);
165 tx_buffer_info->skb = NULL;
166 }
167 tx_buffer_info->time_stamp = 0;
168 /* tx_buffer_info must be completely set up in the transmit path */
169}
170
92915f71
GR
171#define IXGBE_MAX_TXD_PWR 14
172#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
173
174/* Tx Descriptors needed, worst case */
3595990a
AD
175#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
176#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
92915f71
GR
177
178static void ixgbevf_tx_timeout(struct net_device *netdev);
179
180/**
181 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
fa71ae27 182 * @q_vector: board private structure
92915f71
GR
183 * @tx_ring: tx ring to clean
184 **/
fa71ae27 185static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
92915f71
GR
186 struct ixgbevf_ring *tx_ring)
187{
fa71ae27 188 struct ixgbevf_adapter *adapter = q_vector->adapter;
92915f71
GR
189 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
190 struct ixgbevf_tx_buffer *tx_buffer_info;
e757e3e1 191 unsigned int i, count = 0;
92915f71
GR
192 unsigned int total_bytes = 0, total_packets = 0;
193
10cc1bdd
AD
194 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
195 return true;
196
92915f71 197 i = tx_ring->next_to_clean;
e757e3e1
AD
198 tx_buffer_info = &tx_ring->tx_buffer_info[i];
199 eop_desc = tx_buffer_info->next_to_watch;
92915f71 200
e757e3e1 201 do {
92915f71 202 bool cleaned = false;
e757e3e1
AD
203
204 /* if next_to_watch is not set then there is no work pending */
205 if (!eop_desc)
206 break;
207
208 /* prevent any other reads prior to eop_desc */
209 read_barrier_depends();
210
211 /* if DD is not set pending work has not been completed */
212 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
213 break;
214
215 /* clear next_to_watch to prevent false hangs */
216 tx_buffer_info->next_to_watch = NULL;
217
92915f71
GR
218 for ( ; !cleaned; count++) {
219 struct sk_buff *skb;
908421f6 220 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
e757e3e1 221 cleaned = (tx_desc == eop_desc);
92915f71
GR
222 skb = tx_buffer_info->skb;
223
224 if (cleaned && skb) {
225 unsigned int segs, bytecount;
226
227 /* gso_segs is currently only valid for tcp */
228 segs = skb_shinfo(skb)->gso_segs ?: 1;
229 /* multiply data chunks by size of headers */
230 bytecount = ((segs - 1) * skb_headlen(skb)) +
231 skb->len;
232 total_packets += segs;
233 total_bytes += bytecount;
234 }
235
70a10e25 236 ixgbevf_unmap_and_free_tx_resource(tx_ring,
92915f71
GR
237 tx_buffer_info);
238
239 tx_desc->wb.status = 0;
240
241 i++;
242 if (i == tx_ring->count)
243 i = 0;
e757e3e1
AD
244
245 tx_buffer_info = &tx_ring->tx_buffer_info[i];
92915f71
GR
246 }
247
e757e3e1
AD
248 eop_desc = tx_buffer_info->next_to_watch;
249 } while (count < tx_ring->count);
92915f71
GR
250
251 tx_ring->next_to_clean = i;
252
253#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
fb40195c 254 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
f880d07b 255 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
92915f71
GR
256 /* Make sure that anybody stopping the queue after this
257 * sees the new next_to_clean.
258 */
259 smp_mb();
fb40195c
AD
260 if (__netif_subqueue_stopped(tx_ring->netdev,
261 tx_ring->queue_index) &&
92915f71 262 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
fb40195c
AD
263 netif_wake_subqueue(tx_ring->netdev,
264 tx_ring->queue_index);
92915f71
GR
265 ++adapter->restart_queue;
266 }
92915f71
GR
267 }
268
4197aa7b 269 u64_stats_update_begin(&tx_ring->syncp);
92915f71
GR
270 tx_ring->total_bytes += total_bytes;
271 tx_ring->total_packets += total_packets;
4197aa7b 272 u64_stats_update_end(&tx_ring->syncp);
ac6ed8f0
GR
273 q_vector->tx.total_bytes += total_bytes;
274 q_vector->tx.total_packets += total_packets;
92915f71 275
fa71ae27 276 return count < tx_ring->count;
92915f71
GR
277}
278
279/**
280 * ixgbevf_receive_skb - Send a completed packet up the stack
281 * @q_vector: structure containing interrupt and ring information
282 * @skb: packet to send up
283 * @status: hardware indication of status of receive
92915f71
GR
284 * @rx_desc: rx descriptor
285 **/
286static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
287 struct sk_buff *skb, u8 status,
92915f71
GR
288 union ixgbe_adv_rx_desc *rx_desc)
289{
290 struct ixgbevf_adapter *adapter = q_vector->adapter;
291 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
dd1ed3b7 292 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
92915f71 293
5d9a533b 294 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
86a9bad3 295 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
dadcd65f 296
366c1099
GR
297 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
298 napi_gro_receive(&q_vector->napi, skb);
299 else
300 netif_rx(skb);
92915f71
GR
301}
302
08681618
JK
303/**
304 * ixgbevf_rx_skb - Helper function to determine proper Rx method
305 * @q_vector: structure containing interrupt and ring information
306 * @skb: packet to send up
307 * @status: hardware indication of status of receive
308 * @rx_desc: rx descriptor
309 **/
310static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
311 struct sk_buff *skb, u8 status,
312 union ixgbe_adv_rx_desc *rx_desc)
313{
c777cdfa
JK
314#ifdef CONFIG_NET_RX_BUSY_POLL
315 skb_mark_napi_id(skb, &q_vector->napi);
316
317 if (ixgbevf_qv_busy_polling(q_vector)) {
318 netif_receive_skb(skb);
319 /* exit early if we busy polled */
320 return;
321 }
322#endif /* CONFIG_NET_RX_BUSY_POLL */
323
08681618
JK
324 ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
325}
326
92915f71
GR
327/**
328 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
55fb277c 329 * @ring: pointer to Rx descriptor ring structure
92915f71
GR
330 * @status_err: hardware indication of status of receive
331 * @skb: skb currently being received and modified
332 **/
55fb277c 333static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
92915f71
GR
334 u32 status_err, struct sk_buff *skb)
335{
bc8acf2c 336 skb_checksum_none_assert(skb);
92915f71
GR
337
338 /* Rx csum disabled */
fb40195c 339 if (!(ring->netdev->features & NETIF_F_RXCSUM))
92915f71
GR
340 return;
341
342 /* if IP and error */
343 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
344 (status_err & IXGBE_RXDADV_ERR_IPE)) {
55fb277c 345 ring->hw_csum_rx_error++;
92915f71
GR
346 return;
347 }
348
349 if (!(status_err & IXGBE_RXD_STAT_L4CS))
350 return;
351
352 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
55fb277c 353 ring->hw_csum_rx_error++;
92915f71
GR
354 return;
355 }
356
357 /* It must be a TCP or UDP packet with a valid checksum */
358 skb->ip_summed = CHECKSUM_UNNECESSARY;
55fb277c 359 ring->hw_csum_rx_good++;
92915f71
GR
360}
361
362/**
363 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
364 * @adapter: address of board private structure
365 **/
366static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
367 struct ixgbevf_ring *rx_ring,
368 int cleaned_count)
369{
370 struct pci_dev *pdev = adapter->pdev;
371 union ixgbe_adv_rx_desc *rx_desc;
372 struct ixgbevf_rx_buffer *bi;
fb40195c 373 unsigned int i = rx_ring->next_to_use;
92915f71 374
92915f71
GR
375 bi = &rx_ring->rx_buffer_info[i];
376
377 while (cleaned_count--) {
908421f6 378 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
b9dd245b
GR
379
380 if (!bi->skb) {
381 struct sk_buff *skb;
382
fb40195c
AD
383 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
384 rx_ring->rx_buf_len);
92915f71
GR
385 if (!skb) {
386 adapter->alloc_rx_buff_failed++;
387 goto no_buffers;
388 }
92915f71 389 bi->skb = skb;
b9dd245b 390
2a1f8794 391 bi->dma = dma_map_single(&pdev->dev, skb->data,
92915f71 392 rx_ring->rx_buf_len,
2a1f8794 393 DMA_FROM_DEVICE);
6132ee8a
GR
394 if (dma_mapping_error(&pdev->dev, bi->dma)) {
395 dev_kfree_skb(skb);
396 bi->skb = NULL;
397 dev_err(&pdev->dev, "RX DMA map failed\n");
398 break;
399 }
92915f71 400 }
77d5dfca 401 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
92915f71
GR
402
403 i++;
404 if (i == rx_ring->count)
405 i = 0;
406 bi = &rx_ring->rx_buffer_info[i];
407 }
408
409no_buffers:
5cdab2f6
DS
410 if (rx_ring->next_to_use != i)
411 ixgbevf_release_rx_desc(rx_ring, i);
92915f71
GR
412}
413
414static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
5f3600eb 415 u32 qmask)
92915f71 416{
92915f71
GR
417 struct ixgbe_hw *hw = &adapter->hw;
418
5f3600eb 419 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
92915f71
GR
420}
421
08e50a20
JK
422static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
423 struct ixgbevf_ring *rx_ring,
424 int budget)
92915f71
GR
425{
426 struct ixgbevf_adapter *adapter = q_vector->adapter;
427 struct pci_dev *pdev = adapter->pdev;
428 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
429 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
430 struct sk_buff *skb;
431 unsigned int i;
432 u32 len, staterr;
92915f71
GR
433 int cleaned_count = 0;
434 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
435
436 i = rx_ring->next_to_clean;
908421f6 437 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
92915f71
GR
438 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
439 rx_buffer_info = &rx_ring->rx_buffer_info[i];
440
441 while (staterr & IXGBE_RXD_STAT_DD) {
fa71ae27 442 if (!budget)
92915f71 443 break;
fa71ae27 444 budget--;
92915f71 445
2d0bb1c1 446 rmb(); /* read descriptor and rx_buffer_info after status DD */
77d5dfca 447 len = le16_to_cpu(rx_desc->wb.upper.length);
92915f71
GR
448 skb = rx_buffer_info->skb;
449 prefetch(skb->data - NET_IP_ALIGN);
450 rx_buffer_info->skb = NULL;
451
452 if (rx_buffer_info->dma) {
2a1f8794 453 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
92915f71 454 rx_ring->rx_buf_len,
2a1f8794 455 DMA_FROM_DEVICE);
92915f71
GR
456 rx_buffer_info->dma = 0;
457 skb_put(skb, len);
458 }
459
92915f71
GR
460 i++;
461 if (i == rx_ring->count)
462 i = 0;
463
908421f6 464 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
92915f71
GR
465 prefetch(next_rxd);
466 cleaned_count++;
467
468 next_buffer = &rx_ring->rx_buffer_info[i];
469
470 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
77d5dfca 471 skb->next = next_buffer->skb;
5c60f81a 472 IXGBE_CB(skb->next)->prev = skb;
92915f71
GR
473 adapter->non_eop_descs++;
474 goto next_desc;
475 }
476
5c60f81a
AD
477 /* we should not be chaining buffers, if we did drop the skb */
478 if (IXGBE_CB(skb)->prev) {
479 do {
480 struct sk_buff *this = skb;
481 skb = IXGBE_CB(skb)->prev;
482 dev_kfree_skb(this);
483 } while (skb);
484 goto next_desc;
485 }
486
92915f71
GR
487 /* ERR_MASK will only have valid bits if EOP set */
488 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
489 dev_kfree_skb_irq(skb);
490 goto next_desc;
491 }
492
55fb277c 493 ixgbevf_rx_checksum(rx_ring, staterr, skb);
92915f71
GR
494
495 /* probably a little skewed due to removing CRC */
496 total_rx_bytes += skb->len;
497 total_rx_packets++;
498
fb40195c 499 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
92915f71 500
815cccbf
JF
501 /* Workaround hardware that can't do proper VEPA multicast
502 * source pruning.
503 */
504 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
7367d0b5
JP
505 ether_addr_equal(adapter->netdev->dev_addr,
506 eth_hdr(skb)->h_source)) {
815cccbf
JF
507 dev_kfree_skb_irq(skb);
508 goto next_desc;
509 }
510
08681618 511 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
92915f71
GR
512
513next_desc:
514 rx_desc->wb.upper.status_error = 0;
515
516 /* return some buffers to hardware, one at a time is too slow */
517 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
518 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
519 cleaned_count);
520 cleaned_count = 0;
521 }
522
523 /* use prefetched values */
524 rx_desc = next_rxd;
525 rx_buffer_info = &rx_ring->rx_buffer_info[i];
526
527 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
528 }
529
530 rx_ring->next_to_clean = i;
f880d07b 531 cleaned_count = ixgbevf_desc_unused(rx_ring);
92915f71
GR
532
533 if (cleaned_count)
534 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
535
4197aa7b 536 u64_stats_update_begin(&rx_ring->syncp);
92915f71
GR
537 rx_ring->total_packets += total_rx_packets;
538 rx_ring->total_bytes += total_rx_bytes;
4197aa7b 539 u64_stats_update_end(&rx_ring->syncp);
ac6ed8f0
GR
540 q_vector->rx.total_packets += total_rx_packets;
541 q_vector->rx.total_bytes += total_rx_bytes;
92915f71 542
08e50a20 543 return total_rx_packets;
92915f71
GR
544}
545
546/**
fa71ae27 547 * ixgbevf_poll - NAPI polling calback
92915f71
GR
548 * @napi: napi struct with our devices info in it
549 * @budget: amount of work driver is allowed to do this pass, in packets
550 *
fa71ae27 551 * This function will clean more than one or more rings associated with a
92915f71
GR
552 * q_vector.
553 **/
fa71ae27 554static int ixgbevf_poll(struct napi_struct *napi, int budget)
92915f71
GR
555{
556 struct ixgbevf_q_vector *q_vector =
557 container_of(napi, struct ixgbevf_q_vector, napi);
558 struct ixgbevf_adapter *adapter = q_vector->adapter;
fa71ae27
AD
559 struct ixgbevf_ring *ring;
560 int per_ring_budget;
561 bool clean_complete = true;
562
563 ixgbevf_for_each_ring(ring, q_vector->tx)
564 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
92915f71 565
c777cdfa
JK
566#ifdef CONFIG_NET_RX_BUSY_POLL
567 if (!ixgbevf_qv_lock_napi(q_vector))
568 return budget;
569#endif
570
92915f71
GR
571 /* attempt to distribute budget to each queue fairly, but don't allow
572 * the budget to go below 1 because we'll exit polling */
fa71ae27
AD
573 if (q_vector->rx.count > 1)
574 per_ring_budget = max(budget/q_vector->rx.count, 1);
575 else
576 per_ring_budget = budget;
577
366c1099 578 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
fa71ae27 579 ixgbevf_for_each_ring(ring, q_vector->rx)
08e50a20
JK
580 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
581 per_ring_budget)
582 < per_ring_budget);
366c1099 583 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
fa71ae27 584
c777cdfa
JK
585#ifdef CONFIG_NET_RX_BUSY_POLL
586 ixgbevf_qv_unlock_napi(q_vector);
587#endif
588
fa71ae27
AD
589 /* If all work not completed, return budget and keep polling */
590 if (!clean_complete)
591 return budget;
592 /* all work done, exit the polling mode */
593 napi_complete(napi);
594 if (adapter->rx_itr_setting & 1)
595 ixgbevf_set_itr(q_vector);
596 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
597 ixgbevf_irq_enable_queues(adapter,
598 1 << q_vector->v_idx);
92915f71 599
fa71ae27 600 return 0;
92915f71
GR
601}
602
ce422606
GR
603/**
604 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
605 * @q_vector: structure containing interrupt and ring information
606 */
3849623e 607void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
ce422606
GR
608{
609 struct ixgbevf_adapter *adapter = q_vector->adapter;
610 struct ixgbe_hw *hw = &adapter->hw;
611 int v_idx = q_vector->v_idx;
612 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
613
614 /*
615 * set the WDIS bit to not clear the timer bits and cause an
616 * immediate assertion of the interrupt
617 */
618 itr_reg |= IXGBE_EITR_CNT_WDIS;
619
620 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
621}
92915f71 622
c777cdfa
JK
623#ifdef CONFIG_NET_RX_BUSY_POLL
624/* must be called with local_bh_disable()d */
625static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
626{
627 struct ixgbevf_q_vector *q_vector =
628 container_of(napi, struct ixgbevf_q_vector, napi);
629 struct ixgbevf_adapter *adapter = q_vector->adapter;
630 struct ixgbevf_ring *ring;
631 int found = 0;
632
633 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
634 return LL_FLUSH_FAILED;
635
636 if (!ixgbevf_qv_lock_poll(q_vector))
637 return LL_FLUSH_BUSY;
638
639 ixgbevf_for_each_ring(ring, q_vector->rx) {
640 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
3b5dca26
JK
641#ifdef BP_EXTENDED_STATS
642 if (found)
643 ring->bp_cleaned += found;
644 else
645 ring->bp_misses++;
646#endif
c777cdfa
JK
647 if (found)
648 break;
649 }
650
651 ixgbevf_qv_unlock_poll(q_vector);
652
653 return found;
654}
655#endif /* CONFIG_NET_RX_BUSY_POLL */
656
92915f71
GR
657/**
658 * ixgbevf_configure_msix - Configure MSI-X hardware
659 * @adapter: board private structure
660 *
661 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
662 * interrupts.
663 **/
664static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
665{
666 struct ixgbevf_q_vector *q_vector;
6b43c446 667 int q_vectors, v_idx;
92915f71
GR
668
669 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5f3600eb 670 adapter->eims_enable_mask = 0;
92915f71
GR
671
672 /*
673 * Populate the IVAR table and set the ITR values to the
674 * corresponding register.
675 */
676 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
6b43c446 677 struct ixgbevf_ring *ring;
92915f71 678 q_vector = adapter->q_vector[v_idx];
6b43c446
AD
679
680 ixgbevf_for_each_ring(ring, q_vector->rx)
681 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
682
683 ixgbevf_for_each_ring(ring, q_vector->tx)
684 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
92915f71 685
5f3600eb
AD
686 if (q_vector->tx.ring && !q_vector->rx.ring) {
687 /* tx only vector */
688 if (adapter->tx_itr_setting == 1)
689 q_vector->itr = IXGBE_10K_ITR;
690 else
691 q_vector->itr = adapter->tx_itr_setting;
692 } else {
693 /* rx or rx/tx vector */
694 if (adapter->rx_itr_setting == 1)
695 q_vector->itr = IXGBE_20K_ITR;
696 else
697 q_vector->itr = adapter->rx_itr_setting;
698 }
699
700 /* add q_vector eims value to global eims_enable_mask */
701 adapter->eims_enable_mask |= 1 << v_idx;
92915f71 702
5f3600eb 703 ixgbevf_write_eitr(q_vector);
92915f71
GR
704 }
705
706 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
5f3600eb
AD
707 /* setup eims_other and add value to global eims_enable_mask */
708 adapter->eims_other = 1 << v_idx;
709 adapter->eims_enable_mask |= adapter->eims_other;
92915f71
GR
710}
711
712enum latency_range {
713 lowest_latency = 0,
714 low_latency = 1,
715 bulk_latency = 2,
716 latency_invalid = 255
717};
718
719/**
720 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
5f3600eb
AD
721 * @q_vector: structure containing interrupt and ring information
722 * @ring_container: structure containing ring performance data
92915f71
GR
723 *
724 * Stores a new ITR value based on packets and byte
725 * counts during the last interrupt. The advantage of per interrupt
726 * computation is faster updates and more accurate ITR for the current
727 * traffic pattern. Constants in this function were computed
728 * based on theoretical maximum wire speed and thresholds were set based
729 * on testing data as well as attempting to minimize response time
730 * while increasing bulk throughput.
731 **/
5f3600eb
AD
732static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
733 struct ixgbevf_ring_container *ring_container)
92915f71 734{
5f3600eb
AD
735 int bytes = ring_container->total_bytes;
736 int packets = ring_container->total_packets;
92915f71
GR
737 u32 timepassed_us;
738 u64 bytes_perint;
5f3600eb 739 u8 itr_setting = ring_container->itr;
92915f71
GR
740
741 if (packets == 0)
5f3600eb 742 return;
92915f71
GR
743
744 /* simple throttlerate management
745 * 0-20MB/s lowest (100000 ints/s)
746 * 20-100MB/s low (20000 ints/s)
747 * 100-1249MB/s bulk (8000 ints/s)
748 */
749 /* what was last interrupt timeslice? */
5f3600eb 750 timepassed_us = q_vector->itr >> 2;
92915f71
GR
751 bytes_perint = bytes / timepassed_us; /* bytes/usec */
752
753 switch (itr_setting) {
754 case lowest_latency:
e2c28ce7 755 if (bytes_perint > 10)
5f3600eb 756 itr_setting = low_latency;
92915f71
GR
757 break;
758 case low_latency:
e2c28ce7 759 if (bytes_perint > 20)
5f3600eb 760 itr_setting = bulk_latency;
e2c28ce7 761 else if (bytes_perint <= 10)
5f3600eb 762 itr_setting = lowest_latency;
92915f71
GR
763 break;
764 case bulk_latency:
e2c28ce7 765 if (bytes_perint <= 20)
5f3600eb 766 itr_setting = low_latency;
92915f71
GR
767 break;
768 }
769
5f3600eb
AD
770 /* clear work counters since we have the values we need */
771 ring_container->total_bytes = 0;
772 ring_container->total_packets = 0;
773
774 /* write updated itr to ring container */
775 ring_container->itr = itr_setting;
92915f71
GR
776}
777
fa71ae27 778static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
92915f71 779{
5f3600eb
AD
780 u32 new_itr = q_vector->itr;
781 u8 current_itr;
92915f71 782
5f3600eb
AD
783 ixgbevf_update_itr(q_vector, &q_vector->tx);
784 ixgbevf_update_itr(q_vector, &q_vector->rx);
92915f71 785
6b43c446 786 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
92915f71
GR
787
788 switch (current_itr) {
789 /* counts and packets in update_itr are dependent on these numbers */
790 case lowest_latency:
5f3600eb 791 new_itr = IXGBE_100K_ITR;
92915f71
GR
792 break;
793 case low_latency:
5f3600eb 794 new_itr = IXGBE_20K_ITR;
92915f71
GR
795 break;
796 case bulk_latency:
797 default:
5f3600eb 798 new_itr = IXGBE_8K_ITR;
92915f71
GR
799 break;
800 }
801
5f3600eb 802 if (new_itr != q_vector->itr) {
92915f71 803 /* do an exponential smoothing */
5f3600eb
AD
804 new_itr = (10 * new_itr * q_vector->itr) /
805 ((9 * new_itr) + q_vector->itr);
806
807 /* save the algorithm value here */
808 q_vector->itr = new_itr;
809
810 ixgbevf_write_eitr(q_vector);
92915f71 811 }
92915f71
GR
812}
813
4b2cd27f 814static irqreturn_t ixgbevf_msix_other(int irq, void *data)
92915f71 815{
fa71ae27 816 struct ixgbevf_adapter *adapter = data;
92915f71 817 struct ixgbe_hw *hw = &adapter->hw;
08259594 818
4b2cd27f 819 hw->mac.get_link_status = 1;
1e72bfc3 820
c7bb417d
DS
821 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
822 mod_timer(&adapter->watchdog_timer, jiffies);
3a2c4033 823
5f3600eb
AD
824 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
825
92915f71
GR
826 return IRQ_HANDLED;
827}
828
92915f71 829/**
fa71ae27 830 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
92915f71
GR
831 * @irq: unused
832 * @data: pointer to our q_vector struct for this interrupt vector
833 **/
fa71ae27 834static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
92915f71
GR
835{
836 struct ixgbevf_q_vector *q_vector = data;
92915f71 837
5f3600eb 838 /* EIAM disabled interrupts (on this vector) for us */
fa71ae27
AD
839 if (q_vector->rx.ring || q_vector->tx.ring)
840 napi_schedule(&q_vector->napi);
92915f71
GR
841
842 return IRQ_HANDLED;
843}
844
845static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
846 int r_idx)
847{
848 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
849
6b43c446
AD
850 a->rx_ring[r_idx].next = q_vector->rx.ring;
851 q_vector->rx.ring = &a->rx_ring[r_idx];
852 q_vector->rx.count++;
92915f71
GR
853}
854
855static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
856 int t_idx)
857{
858 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
859
6b43c446
AD
860 a->tx_ring[t_idx].next = q_vector->tx.ring;
861 q_vector->tx.ring = &a->tx_ring[t_idx];
862 q_vector->tx.count++;
92915f71
GR
863}
864
865/**
866 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
867 * @adapter: board private structure to initialize
868 *
869 * This function maps descriptor rings to the queue-specific vectors
870 * we were allotted through the MSI-X enabling code. Ideally, we'd have
871 * one vector per ring/queue, but on a constrained vector budget, we
872 * group the rings as "efficiently" as possible. You would add new
873 * mapping configurations in here.
874 **/
875static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
876{
877 int q_vectors;
878 int v_start = 0;
879 int rxr_idx = 0, txr_idx = 0;
880 int rxr_remaining = adapter->num_rx_queues;
881 int txr_remaining = adapter->num_tx_queues;
882 int i, j;
883 int rqpv, tqpv;
884 int err = 0;
885
886 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
887
888 /*
889 * The ideal configuration...
890 * We have enough vectors to map one per queue.
891 */
892 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
893 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
894 map_vector_to_rxq(adapter, v_start, rxr_idx);
895
896 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
897 map_vector_to_txq(adapter, v_start, txr_idx);
898 goto out;
899 }
900
901 /*
902 * If we don't have enough vectors for a 1-to-1
903 * mapping, we'll have to group them so there are
904 * multiple queues per vector.
905 */
906 /* Re-adjusting *qpv takes care of the remainder. */
907 for (i = v_start; i < q_vectors; i++) {
908 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
909 for (j = 0; j < rqpv; j++) {
910 map_vector_to_rxq(adapter, i, rxr_idx);
911 rxr_idx++;
912 rxr_remaining--;
913 }
914 }
915 for (i = v_start; i < q_vectors; i++) {
916 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
917 for (j = 0; j < tqpv; j++) {
918 map_vector_to_txq(adapter, i, txr_idx);
919 txr_idx++;
920 txr_remaining--;
921 }
922 }
923
924out:
925 return err;
926}
927
928/**
929 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
930 * @adapter: board private structure
931 *
932 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
933 * interrupts from the kernel.
934 **/
935static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
936{
937 struct net_device *netdev = adapter->netdev;
fa71ae27
AD
938 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
939 int vector, err;
92915f71
GR
940 int ri = 0, ti = 0;
941
92915f71 942 for (vector = 0; vector < q_vectors; vector++) {
fa71ae27
AD
943 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
944 struct msix_entry *entry = &adapter->msix_entries[vector];
945
946 if (q_vector->tx.ring && q_vector->rx.ring) {
947 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
948 "%s-%s-%d", netdev->name, "TxRx", ri++);
949 ti++;
950 } else if (q_vector->rx.ring) {
951 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
952 "%s-%s-%d", netdev->name, "rx", ri++);
953 } else if (q_vector->tx.ring) {
954 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
955 "%s-%s-%d", netdev->name, "tx", ti++);
92915f71
GR
956 } else {
957 /* skip this unused q_vector */
958 continue;
959 }
fa71ae27
AD
960 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
961 q_vector->name, q_vector);
92915f71
GR
962 if (err) {
963 hw_dbg(&adapter->hw,
964 "request_irq failed for MSIX interrupt "
965 "Error: %d\n", err);
966 goto free_queue_irqs;
967 }
968 }
969
92915f71 970 err = request_irq(adapter->msix_entries[vector].vector,
4b2cd27f 971 &ixgbevf_msix_other, 0, netdev->name, adapter);
92915f71
GR
972 if (err) {
973 hw_dbg(&adapter->hw,
4b2cd27f 974 "request_irq for msix_other failed: %d\n", err);
92915f71
GR
975 goto free_queue_irqs;
976 }
977
978 return 0;
979
980free_queue_irqs:
fa71ae27
AD
981 while (vector) {
982 vector--;
983 free_irq(adapter->msix_entries[vector].vector,
984 adapter->q_vector[vector]);
985 }
a1f6c6b1 986 /* This failure is non-recoverable - it indicates the system is
987 * out of MSIX vector resources and the VF driver cannot run
988 * without them. Set the number of msix vectors to zero
989 * indicating that not enough can be allocated. The error
990 * will be returned to the user indicating device open failed.
991 * Any further attempts to force the driver to open will also
992 * fail. The only way to recover is to unload the driver and
993 * reload it again. If the system has recovered some MSIX
994 * vectors then it may succeed.
995 */
996 adapter->num_msix_vectors = 0;
92915f71
GR
997 return err;
998}
999
1000static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1001{
1002 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1003
1004 for (i = 0; i < q_vectors; i++) {
1005 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
6b43c446
AD
1006 q_vector->rx.ring = NULL;
1007 q_vector->tx.ring = NULL;
1008 q_vector->rx.count = 0;
1009 q_vector->tx.count = 0;
92915f71
GR
1010 }
1011}
1012
1013/**
1014 * ixgbevf_request_irq - initialize interrupts
1015 * @adapter: board private structure
1016 *
1017 * Attempts to configure interrupts using the best available
1018 * capabilities of the hardware and kernel.
1019 **/
1020static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1021{
1022 int err = 0;
1023
1024 err = ixgbevf_request_msix_irqs(adapter);
1025
1026 if (err)
1027 hw_dbg(&adapter->hw,
1028 "request_irq failed, Error %d\n", err);
1029
1030 return err;
1031}
1032
1033static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1034{
92915f71
GR
1035 int i, q_vectors;
1036
1037 q_vectors = adapter->num_msix_vectors;
92915f71
GR
1038 i = q_vectors - 1;
1039
fa71ae27 1040 free_irq(adapter->msix_entries[i].vector, adapter);
92915f71
GR
1041 i--;
1042
1043 for (; i >= 0; i--) {
fa71ae27
AD
1044 /* free only the irqs that were actually requested */
1045 if (!adapter->q_vector[i]->rx.ring &&
1046 !adapter->q_vector[i]->tx.ring)
1047 continue;
1048
92915f71
GR
1049 free_irq(adapter->msix_entries[i].vector,
1050 adapter->q_vector[i]);
1051 }
1052
1053 ixgbevf_reset_q_vectors(adapter);
1054}
1055
1056/**
1057 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1058 * @adapter: board private structure
1059 **/
1060static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1061{
92915f71 1062 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 1063 int i;
92915f71 1064
5f3600eb 1065 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
92915f71 1066 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
5f3600eb 1067 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
92915f71
GR
1068
1069 IXGBE_WRITE_FLUSH(hw);
1070
1071 for (i = 0; i < adapter->num_msix_vectors; i++)
1072 synchronize_irq(adapter->msix_entries[i].vector);
1073}
1074
1075/**
1076 * ixgbevf_irq_enable - Enable default interrupt generation settings
1077 * @adapter: board private structure
1078 **/
5f3600eb 1079static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
92915f71
GR
1080{
1081 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1082
5f3600eb
AD
1083 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1084 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1085 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
92915f71
GR
1086}
1087
1088/**
1089 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1090 * @adapter: board private structure
1091 *
1092 * Configure the Tx unit of the MAC after a reset.
1093 **/
1094static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1095{
1096 u64 tdba;
1097 struct ixgbe_hw *hw = &adapter->hw;
1098 u32 i, j, tdlen, txctrl;
1099
1100 /* Setup the HW Tx Head and Tail descriptor pointers */
1101 for (i = 0; i < adapter->num_tx_queues; i++) {
1102 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1103 j = ring->reg_idx;
1104 tdba = ring->dma;
1105 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1106 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1107 (tdba & DMA_BIT_MASK(32)));
1108 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1109 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1110 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1111 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
5cdab2f6
DS
1112 ring->tail = hw->hw_addr + IXGBE_VFTDT(j);
1113 ring->next_to_clean = 0;
1114 ring->next_to_use = 0;
92915f71
GR
1115 /* Disable Tx Head Writeback RO bit, since this hoses
1116 * bookkeeping if things aren't delivered in order.
1117 */
1118 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1119 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1120 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1121 }
1122}
1123
1124#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1125
1126static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1127{
1128 struct ixgbevf_ring *rx_ring;
1129 struct ixgbe_hw *hw = &adapter->hw;
1130 u32 srrctl;
1131
1132 rx_ring = &adapter->rx_ring[index];
1133
1134 srrctl = IXGBE_SRRCTL_DROP_EN;
1135
77d5dfca 1136 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
92915f71 1137
dd1fe113
AD
1138 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1139 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1140
92915f71
GR
1141 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1142}
1143
1bb9c639
DS
1144static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1145{
1146 struct ixgbe_hw *hw = &adapter->hw;
1147
1148 /* PSRTYPE must be initialized in 82599 */
1149 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1150 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1151 IXGBE_PSRTYPE_L2HDR;
1152
1153 if (adapter->num_rx_queues > 1)
1154 psrtype |= 1 << 29;
1155
1156 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1157}
1158
dd1fe113
AD
1159static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1160{
1161 struct ixgbe_hw *hw = &adapter->hw;
1162 struct net_device *netdev = adapter->netdev;
1163 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1164 int i;
1165 u16 rx_buf_len;
1166
1167 /* notify the PF of our intent to use this size of frame */
1168 ixgbevf_rlpml_set_vf(hw, max_frame);
1169
1170 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1171 max_frame += VLAN_HLEN;
1172
1173 /*
85624caf
GR
1174 * Allocate buffer sizes that fit well into 32K and
1175 * take into account max frame size of 9.5K
dd1fe113
AD
1176 */
1177 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1178 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1179 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
85624caf
GR
1180 else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1181 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1182 else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1183 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1184 else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1185 rx_buf_len = IXGBEVF_RXBUFFER_8K;
dd1fe113 1186 else
85624caf 1187 rx_buf_len = IXGBEVF_RXBUFFER_10K;
dd1fe113
AD
1188
1189 for (i = 0; i < adapter->num_rx_queues; i++)
1190 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1191}
1192
92915f71
GR
1193/**
1194 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1195 * @adapter: board private structure
1196 *
1197 * Configure the Rx unit of the MAC after a reset.
1198 **/
1199static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1200{
1201 u64 rdba;
1202 struct ixgbe_hw *hw = &adapter->hw;
92915f71
GR
1203 int i, j;
1204 u32 rdlen;
92915f71 1205
1bb9c639 1206 ixgbevf_setup_psrtype(adapter);
dd1fe113
AD
1207
1208 /* set_rx_buffer_len must be called before ring initialization */
1209 ixgbevf_set_rx_buffer_len(adapter);
92915f71 1210
92915f71
GR
1211 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1212 * the Base and Length of the Rx Descriptor Ring */
1213 for (i = 0; i < adapter->num_rx_queues; i++) {
5cdab2f6
DS
1214 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1215 rdba = ring->dma;
1216 j = ring->reg_idx;
1217 rdlen = ring->count * sizeof(union ixgbe_adv_rx_desc);
92915f71
GR
1218 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1219 (rdba & DMA_BIT_MASK(32)));
1220 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1221 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1222 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1223 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
5cdab2f6
DS
1224 ring->tail = hw->hw_addr + IXGBE_VFRDT(j);
1225 ring->next_to_clean = 0;
1226 ring->next_to_use = 0;
92915f71
GR
1227
1228 ixgbevf_configure_srrctl(adapter, j);
1229 }
1230}
1231
80d5c368
PM
1232static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1233 __be16 proto, u16 vid)
92915f71
GR
1234{
1235 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1236 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1
AD
1237 int err;
1238
55fdd45b 1239 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1240
92915f71 1241 /* add VID to filter table */
2ddc7fe1 1242 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1c55ed76 1243
55fdd45b 1244 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1245
2ddc7fe1
AD
1246 /* translate error return types so error makes sense */
1247 if (err == IXGBE_ERR_MBX)
1248 return -EIO;
1249
1250 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1251 return -EACCES;
1252
dadcd65f 1253 set_bit(vid, adapter->active_vlans);
8e586137 1254
2ddc7fe1 1255 return err;
92915f71
GR
1256}
1257
80d5c368
PM
1258static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1259 __be16 proto, u16 vid)
92915f71
GR
1260{
1261 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1262 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1 1263 int err = -EOPNOTSUPP;
92915f71 1264
55fdd45b 1265 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1266
92915f71 1267 /* remove VID from filter table */
92fe0bf7 1268 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1c55ed76 1269
55fdd45b 1270 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1271
dadcd65f 1272 clear_bit(vid, adapter->active_vlans);
8e586137 1273
2ddc7fe1 1274 return err;
92915f71
GR
1275}
1276
1277static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1278{
dadcd65f 1279 u16 vid;
92915f71 1280
dadcd65f 1281 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
80d5c368
PM
1282 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1283 htons(ETH_P_8021Q), vid);
92915f71
GR
1284}
1285
46ec20ff
GR
1286static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1287{
1288 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1289 struct ixgbe_hw *hw = &adapter->hw;
1290 int count = 0;
1291
1292 if ((netdev_uc_count(netdev)) > 10) {
dbd9636e 1293 pr_err("Too many unicast filters - No Space\n");
46ec20ff
GR
1294 return -ENOSPC;
1295 }
1296
1297 if (!netdev_uc_empty(netdev)) {
1298 struct netdev_hw_addr *ha;
1299 netdev_for_each_uc_addr(ha, netdev) {
1300 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1301 udelay(200);
1302 }
1303 } else {
1304 /*
1305 * If the list is empty then send message to PF driver to
1306 * clear all macvlans on this VF.
1307 */
1308 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1309 }
1310
1311 return count;
1312}
1313
92915f71 1314/**
dee847f5 1315 * ixgbevf_set_rx_mode - Multicast and unicast set
92915f71
GR
1316 * @netdev: network interface device structure
1317 *
1318 * The set_rx_method entry point is called whenever the multicast address
dee847f5
GR
1319 * list, unicast address list or the network interface flags are updated.
1320 * This routine is responsible for configuring the hardware for proper
1321 * multicast mode and configuring requested unicast filters.
92915f71
GR
1322 **/
1323static void ixgbevf_set_rx_mode(struct net_device *netdev)
1324{
1325 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1326 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1327
55fdd45b 1328 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1329
92915f71 1330 /* reprogram multicast list */
92fe0bf7 1331 hw->mac.ops.update_mc_addr_list(hw, netdev);
46ec20ff
GR
1332
1333 ixgbevf_write_uc_addr_list(netdev);
1c55ed76 1334
55fdd45b 1335 spin_unlock_bh(&adapter->mbx_lock);
92915f71
GR
1336}
1337
1338static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1339{
1340 int q_idx;
1341 struct ixgbevf_q_vector *q_vector;
1342 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1343
1344 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
92915f71 1345 q_vector = adapter->q_vector[q_idx];
c777cdfa
JK
1346#ifdef CONFIG_NET_RX_BUSY_POLL
1347 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1348#endif
fa71ae27 1349 napi_enable(&q_vector->napi);
92915f71
GR
1350 }
1351}
1352
1353static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1354{
1355 int q_idx;
1356 struct ixgbevf_q_vector *q_vector;
1357 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1358
1359 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1360 q_vector = adapter->q_vector[q_idx];
92915f71 1361 napi_disable(&q_vector->napi);
c777cdfa
JK
1362#ifdef CONFIG_NET_RX_BUSY_POLL
1363 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1364 pr_info("QV %d locked\n", q_idx);
1365 usleep_range(1000, 20000);
1366 }
1367#endif /* CONFIG_NET_RX_BUSY_POLL */
92915f71
GR
1368 }
1369}
1370
1371static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1372{
1373 struct net_device *netdev = adapter->netdev;
1374 int i;
1375
1376 ixgbevf_set_rx_mode(netdev);
1377
1378 ixgbevf_restore_vlan(adapter);
1379
1380 ixgbevf_configure_tx(adapter);
1381 ixgbevf_configure_rx(adapter);
1382 for (i = 0; i < adapter->num_rx_queues; i++) {
1383 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
18c63089 1384 ixgbevf_alloc_rx_buffers(adapter, ring,
f880d07b 1385 ixgbevf_desc_unused(ring));
92915f71
GR
1386 }
1387}
1388
858c3dda
DS
1389#define IXGBEVF_MAX_RX_DESC_POLL 10
1390static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1391 int rxr)
92915f71
GR
1392{
1393 struct ixgbe_hw *hw = &adapter->hw;
858c3dda
DS
1394 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1395 u32 rxdctl;
92915f71 1396 int j = adapter->rx_ring[rxr].reg_idx;
92915f71 1397
858c3dda
DS
1398 do {
1399 usleep_range(1000, 2000);
1400 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1401 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1402
1403 if (!wait_loop)
1404 hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
1405 rxr);
1406
5cdab2f6 1407 ixgbevf_release_rx_desc(&adapter->rx_ring[rxr],
858c3dda
DS
1408 (adapter->rx_ring[rxr].count - 1));
1409}
92915f71 1410
858c3dda
DS
1411static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1412 struct ixgbevf_ring *ring)
1413{
1414 struct ixgbe_hw *hw = &adapter->hw;
1415 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1416 u32 rxdctl;
1417 u8 reg_idx = ring->reg_idx;
1418
1419 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1420 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1421
1422 /* write value back with RXDCTL.ENABLE bit cleared */
1423 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1424
1425 /* the hardware may take up to 100us to really disable the rx queue */
1426 do {
1427 udelay(10);
1428 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1429 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1430
1431 if (!wait_loop)
1432 hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
1433 reg_idx);
92915f71
GR
1434}
1435
33bd9f60
GR
1436static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1437{
1438 /* Only save pre-reset stats if there are some */
1439 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1440 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1441 adapter->stats.base_vfgprc;
1442 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1443 adapter->stats.base_vfgptc;
1444 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1445 adapter->stats.base_vfgorc;
1446 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1447 adapter->stats.base_vfgotc;
1448 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1449 adapter->stats.base_vfmprc;
1450 }
1451}
1452
1453static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1454{
1455 struct ixgbe_hw *hw = &adapter->hw;
1456
1457 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1458 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1459 adapter->stats.last_vfgorc |=
1460 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1461 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1462 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1463 adapter->stats.last_vfgotc |=
1464 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1465 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1466
1467 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1468 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1469 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1470 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1471 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1472}
1473
31186785
AD
1474static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1475{
1476 struct ixgbe_hw *hw = &adapter->hw;
56e94095
AD
1477 int api[] = { ixgbe_mbox_api_11,
1478 ixgbe_mbox_api_10,
31186785
AD
1479 ixgbe_mbox_api_unknown };
1480 int err = 0, idx = 0;
1481
55fdd45b 1482 spin_lock_bh(&adapter->mbx_lock);
31186785
AD
1483
1484 while (api[idx] != ixgbe_mbox_api_unknown) {
1485 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1486 if (!err)
1487 break;
1488 idx++;
1489 }
1490
55fdd45b 1491 spin_unlock_bh(&adapter->mbx_lock);
31186785
AD
1492}
1493
795180d8 1494static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
92915f71
GR
1495{
1496 struct net_device *netdev = adapter->netdev;
1497 struct ixgbe_hw *hw = &adapter->hw;
1498 int i, j = 0;
1499 int num_rx_rings = adapter->num_rx_queues;
1500 u32 txdctl, rxdctl;
1501
1502 for (i = 0; i < adapter->num_tx_queues; i++) {
1503 j = adapter->tx_ring[i].reg_idx;
1504 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1505 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1506 txdctl |= (8 << 16);
1507 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1508 }
1509
1510 for (i = 0; i < adapter->num_tx_queues; i++) {
1511 j = adapter->tx_ring[i].reg_idx;
1512 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1513 txdctl |= IXGBE_TXDCTL_ENABLE;
1514 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1515 }
1516
1517 for (i = 0; i < num_rx_rings; i++) {
1518 j = adapter->rx_ring[i].reg_idx;
1519 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
dadcd65f 1520 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
69bfbec4
GR
1521 if (hw->mac.type == ixgbe_mac_X540_vf) {
1522 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1523 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1524 IXGBE_RXDCTL_RLPML_EN);
1525 }
92915f71
GR
1526 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1527 ixgbevf_rx_desc_queue_enable(adapter, i);
1528 }
1529
1530 ixgbevf_configure_msix(adapter);
1531
55fdd45b 1532 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1533
92fe0bf7
GR
1534 if (is_valid_ether_addr(hw->mac.addr))
1535 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1536 else
1537 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
92915f71 1538
55fdd45b 1539 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1540
92915f71
GR
1541 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1542 ixgbevf_napi_enable_all(adapter);
1543
1544 /* enable transmits */
1545 netif_tx_start_all_queues(netdev);
1546
33bd9f60
GR
1547 ixgbevf_save_reset_stats(adapter);
1548 ixgbevf_init_last_counter_stats(adapter);
1549
4b2cd27f 1550 hw->mac.get_link_status = 1;
92915f71 1551 mod_timer(&adapter->watchdog_timer, jiffies);
92915f71
GR
1552}
1553
56e94095
AD
1554static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1555{
1556 struct ixgbe_hw *hw = &adapter->hw;
1557 struct ixgbevf_ring *rx_ring;
1558 unsigned int def_q = 0;
1559 unsigned int num_tcs = 0;
1560 unsigned int num_rx_queues = 1;
1561 int err, i;
1562
55fdd45b 1563 spin_lock_bh(&adapter->mbx_lock);
56e94095
AD
1564
1565 /* fetch queue configuration from the PF */
1566 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1567
55fdd45b 1568 spin_unlock_bh(&adapter->mbx_lock);
56e94095
AD
1569
1570 if (err)
1571 return err;
1572
1573 if (num_tcs > 1) {
1574 /* update default Tx ring register index */
1575 adapter->tx_ring[0].reg_idx = def_q;
1576
1577 /* we need as many queues as traffic classes */
1578 num_rx_queues = num_tcs;
1579 }
1580
1581 /* nothing to do if we have the correct number of queues */
1582 if (adapter->num_rx_queues == num_rx_queues)
1583 return 0;
1584
1585 /* allocate new rings */
1586 rx_ring = kcalloc(num_rx_queues,
1587 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1588 if (!rx_ring)
1589 return -ENOMEM;
1590
1591 /* setup ring fields */
1592 for (i = 0; i < num_rx_queues; i++) {
1593 rx_ring[i].count = adapter->rx_ring_count;
1594 rx_ring[i].queue_index = i;
1595 rx_ring[i].reg_idx = i;
1596 rx_ring[i].dev = &adapter->pdev->dev;
1597 rx_ring[i].netdev = adapter->netdev;
1598
1599 /* allocate resources on the ring */
1600 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
1601 if (err) {
1602 while (i) {
1603 i--;
1604 ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
1605 }
1606 kfree(rx_ring);
1607 return err;
1608 }
1609 }
1610
1611 /* free the existing rings and queues */
1612 ixgbevf_free_all_rx_resources(adapter);
1613 adapter->num_rx_queues = 0;
1614 kfree(adapter->rx_ring);
1615
1616 /* move new rings into position on the adapter struct */
1617 adapter->rx_ring = rx_ring;
1618 adapter->num_rx_queues = num_rx_queues;
1619
1620 /* reset ring to vector mapping */
1621 ixgbevf_reset_q_vectors(adapter);
1622 ixgbevf_map_rings_to_vectors(adapter);
1623
1624 return 0;
1625}
1626
795180d8 1627void ixgbevf_up(struct ixgbevf_adapter *adapter)
92915f71 1628{
92915f71
GR
1629 struct ixgbe_hw *hw = &adapter->hw;
1630
56e94095
AD
1631 ixgbevf_reset_queues(adapter);
1632
92915f71
GR
1633 ixgbevf_configure(adapter);
1634
795180d8 1635 ixgbevf_up_complete(adapter);
92915f71
GR
1636
1637 /* clear any pending interrupts, may auto mask */
1638 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1639
5f3600eb 1640 ixgbevf_irq_enable(adapter);
92915f71
GR
1641}
1642
1643/**
1644 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1645 * @adapter: board private structure
1646 * @rx_ring: ring to free buffers from
1647 **/
1648static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1649 struct ixgbevf_ring *rx_ring)
1650{
1651 struct pci_dev *pdev = adapter->pdev;
1652 unsigned long size;
1653 unsigned int i;
1654
c0456c23
GR
1655 if (!rx_ring->rx_buffer_info)
1656 return;
92915f71 1657
c0456c23 1658 /* Free all the Rx ring sk_buffs */
92915f71
GR
1659 for (i = 0; i < rx_ring->count; i++) {
1660 struct ixgbevf_rx_buffer *rx_buffer_info;
1661
1662 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1663 if (rx_buffer_info->dma) {
2a1f8794 1664 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
92915f71 1665 rx_ring->rx_buf_len,
2a1f8794 1666 DMA_FROM_DEVICE);
92915f71
GR
1667 rx_buffer_info->dma = 0;
1668 }
1669 if (rx_buffer_info->skb) {
1670 struct sk_buff *skb = rx_buffer_info->skb;
1671 rx_buffer_info->skb = NULL;
1672 do {
1673 struct sk_buff *this = skb;
5c60f81a 1674 skb = IXGBE_CB(skb)->prev;
92915f71
GR
1675 dev_kfree_skb(this);
1676 } while (skb);
1677 }
92915f71
GR
1678 }
1679
1680 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1681 memset(rx_ring->rx_buffer_info, 0, size);
1682
1683 /* Zero out the descriptor ring */
1684 memset(rx_ring->desc, 0, rx_ring->size);
92915f71
GR
1685}
1686
1687/**
1688 * ixgbevf_clean_tx_ring - Free Tx Buffers
1689 * @adapter: board private structure
1690 * @tx_ring: ring to be cleaned
1691 **/
1692static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1693 struct ixgbevf_ring *tx_ring)
1694{
1695 struct ixgbevf_tx_buffer *tx_buffer_info;
1696 unsigned long size;
1697 unsigned int i;
1698
c0456c23
GR
1699 if (!tx_ring->tx_buffer_info)
1700 return;
1701
92915f71 1702 /* Free all the Tx ring sk_buffs */
92915f71
GR
1703 for (i = 0; i < tx_ring->count; i++) {
1704 tx_buffer_info = &tx_ring->tx_buffer_info[i];
70a10e25 1705 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
92915f71
GR
1706 }
1707
1708 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1709 memset(tx_ring->tx_buffer_info, 0, size);
1710
1711 memset(tx_ring->desc, 0, tx_ring->size);
92915f71
GR
1712}
1713
1714/**
1715 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1716 * @adapter: board private structure
1717 **/
1718static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1719{
1720 int i;
1721
1722 for (i = 0; i < adapter->num_rx_queues; i++)
1723 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1724}
1725
1726/**
1727 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1728 * @adapter: board private structure
1729 **/
1730static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1731{
1732 int i;
1733
1734 for (i = 0; i < adapter->num_tx_queues; i++)
1735 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1736}
1737
1738void ixgbevf_down(struct ixgbevf_adapter *adapter)
1739{
1740 struct net_device *netdev = adapter->netdev;
1741 struct ixgbe_hw *hw = &adapter->hw;
1742 u32 txdctl;
1743 int i, j;
1744
1745 /* signal that we are down to the interrupt handler */
1746 set_bit(__IXGBEVF_DOWN, &adapter->state);
858c3dda
DS
1747
1748 /* disable all enabled rx queues */
1749 for (i = 0; i < adapter->num_rx_queues; i++)
1750 ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
92915f71
GR
1751
1752 netif_tx_disable(netdev);
1753
1754 msleep(10);
1755
1756 netif_tx_stop_all_queues(netdev);
1757
1758 ixgbevf_irq_disable(adapter);
1759
1760 ixgbevf_napi_disable_all(adapter);
1761
1762 del_timer_sync(&adapter->watchdog_timer);
1763 /* can't call flush scheduled work here because it can deadlock
1764 * if linkwatch_event tries to acquire the rtnl_lock which we are
1765 * holding */
1766 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1767 msleep(1);
1768
1769 /* disable transmits in the hardware now that interrupts are off */
1770 for (i = 0; i < adapter->num_tx_queues; i++) {
1771 j = adapter->tx_ring[i].reg_idx;
1772 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1773 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1774 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1775 }
1776
1777 netif_carrier_off(netdev);
1778
1779 if (!pci_channel_offline(adapter->pdev))
1780 ixgbevf_reset(adapter);
1781
1782 ixgbevf_clean_all_tx_rings(adapter);
1783 ixgbevf_clean_all_rx_rings(adapter);
1784}
1785
1786void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1787{
1788 WARN_ON(in_interrupt());
c0456c23 1789
92915f71
GR
1790 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1791 msleep(1);
1792
4b2cd27f
AD
1793 ixgbevf_down(adapter);
1794 ixgbevf_up(adapter);
92915f71
GR
1795
1796 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1797}
1798
1799void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1800{
1801 struct ixgbe_hw *hw = &adapter->hw;
1802 struct net_device *netdev = adapter->netdev;
1803
798e381a 1804 if (hw->mac.ops.reset_hw(hw)) {
92915f71 1805 hw_dbg(hw, "PF still resetting\n");
798e381a 1806 } else {
92915f71 1807 hw->mac.ops.init_hw(hw);
798e381a
DS
1808 ixgbevf_negotiate_api(adapter);
1809 }
92915f71
GR
1810
1811 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1812 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1813 netdev->addr_len);
1814 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1815 netdev->addr_len);
1816 }
1817}
1818
e45dd5fe
JK
1819static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1820 int vectors)
92915f71 1821{
a5f9337b
ET
1822 int err = 0;
1823 int vector_threshold;
92915f71 1824
fa71ae27
AD
1825 /* We'll want at least 2 (vector_threshold):
1826 * 1) TxQ[0] + RxQ[0] handler
1827 * 2) Other (Link Status Change, etc.)
92915f71
GR
1828 */
1829 vector_threshold = MIN_MSIX_COUNT;
1830
1831 /* The more we get, the more we will assign to Tx/Rx Cleanup
1832 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1833 * Right now, we simply care about how many we'll get; we'll
1834 * set them up later while requesting irq's.
1835 */
1836 while (vectors >= vector_threshold) {
1837 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1838 vectors);
e45dd5fe 1839 if (!err || err < 0) /* Success or a nasty failure. */
92915f71 1840 break;
92915f71
GR
1841 else /* err == number of vectors we should try again with */
1842 vectors = err;
1843 }
1844
e45dd5fe
JK
1845 if (vectors < vector_threshold)
1846 err = -ENOMEM;
1847
1848 if (err) {
1849 dev_err(&adapter->pdev->dev,
1850 "Unable to allocate MSI-X interrupts\n");
92915f71
GR
1851 kfree(adapter->msix_entries);
1852 adapter->msix_entries = NULL;
1853 } else {
1854 /*
1855 * Adjust for only the vectors we'll use, which is minimum
1856 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1857 * vectors we were allocated.
1858 */
1859 adapter->num_msix_vectors = vectors;
1860 }
dee847f5 1861
e45dd5fe 1862 return err;
92915f71
GR
1863}
1864
49ce9c2c
BH
1865/**
1866 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
92915f71
GR
1867 * @adapter: board private structure to initialize
1868 *
1869 * This is the top level queue allocation routine. The order here is very
1870 * important, starting with the "most" number of features turned on at once,
1871 * and ending with the smallest set of features. This way large combinations
1872 * can be allocated if they're turned on, and smaller combinations are the
1873 * fallthrough conditions.
1874 *
1875 **/
1876static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1877{
1878 /* Start with base case */
1879 adapter->num_rx_queues = 1;
1880 adapter->num_tx_queues = 1;
92915f71
GR
1881}
1882
1883/**
1884 * ixgbevf_alloc_queues - Allocate memory for all rings
1885 * @adapter: board private structure to initialize
1886 *
1887 * We allocate one ring per queue at run-time since we don't know the
1888 * number of queues at compile-time. The polling_netdev array is
1889 * intended for Multiqueue, but should work fine with a single queue.
1890 **/
1891static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1892{
1893 int i;
1894
1895 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1896 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1897 if (!adapter->tx_ring)
1898 goto err_tx_ring_allocation;
1899
1900 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1901 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1902 if (!adapter->rx_ring)
1903 goto err_rx_ring_allocation;
1904
1905 for (i = 0; i < adapter->num_tx_queues; i++) {
1906 adapter->tx_ring[i].count = adapter->tx_ring_count;
1907 adapter->tx_ring[i].queue_index = i;
56e94095 1908 /* reg_idx may be remapped later by DCB config */
92915f71 1909 adapter->tx_ring[i].reg_idx = i;
fb40195c
AD
1910 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1911 adapter->tx_ring[i].netdev = adapter->netdev;
92915f71
GR
1912 }
1913
1914 for (i = 0; i < adapter->num_rx_queues; i++) {
1915 adapter->rx_ring[i].count = adapter->rx_ring_count;
1916 adapter->rx_ring[i].queue_index = i;
1917 adapter->rx_ring[i].reg_idx = i;
fb40195c
AD
1918 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1919 adapter->rx_ring[i].netdev = adapter->netdev;
92915f71
GR
1920 }
1921
1922 return 0;
1923
1924err_rx_ring_allocation:
1925 kfree(adapter->tx_ring);
1926err_tx_ring_allocation:
1927 return -ENOMEM;
1928}
1929
1930/**
1931 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1932 * @adapter: board private structure to initialize
1933 *
1934 * Attempt to configure the interrupts using the best available
1935 * capabilities of the hardware and the kernel.
1936 **/
1937static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1938{
91e2b89b 1939 struct net_device *netdev = adapter->netdev;
92915f71
GR
1940 int err = 0;
1941 int vector, v_budget;
1942
1943 /*
1944 * It's easy to be greedy for MSI-X vectors, but it really
1945 * doesn't do us much good if we have a lot more vectors
1946 * than CPU's. So let's be conservative and only ask for
fa71ae27
AD
1947 * (roughly) the same number of vectors as there are CPU's.
1948 * The default is to use pairs of vectors.
92915f71 1949 */
fa71ae27
AD
1950 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1951 v_budget = min_t(int, v_budget, num_online_cpus());
1952 v_budget += NON_Q_VECTORS;
92915f71
GR
1953
1954 /* A failure in MSI-X entry allocation isn't fatal, but it does
1955 * mean we disable MSI-X capabilities of the adapter. */
1956 adapter->msix_entries = kcalloc(v_budget,
1957 sizeof(struct msix_entry), GFP_KERNEL);
1958 if (!adapter->msix_entries) {
1959 err = -ENOMEM;
1960 goto out;
1961 }
1962
1963 for (vector = 0; vector < v_budget; vector++)
1964 adapter->msix_entries[vector].entry = vector;
1965
e45dd5fe
JK
1966 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1967 if (err)
1968 goto out;
92915f71 1969
91e2b89b
GR
1970 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1971 if (err)
1972 goto out;
1973
1974 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1975
92915f71
GR
1976out:
1977 return err;
1978}
1979
1980/**
1981 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1982 * @adapter: board private structure to initialize
1983 *
1984 * We allocate one q_vector per queue interrupt. If allocation fails we
1985 * return -ENOMEM.
1986 **/
1987static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1988{
1989 int q_idx, num_q_vectors;
1990 struct ixgbevf_q_vector *q_vector;
92915f71
GR
1991
1992 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
1993
1994 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1995 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1996 if (!q_vector)
1997 goto err_out;
1998 q_vector->adapter = adapter;
1999 q_vector->v_idx = q_idx;
fa71ae27
AD
2000 netif_napi_add(adapter->netdev, &q_vector->napi,
2001 ixgbevf_poll, 64);
c777cdfa
JK
2002#ifdef CONFIG_NET_RX_BUSY_POLL
2003 napi_hash_add(&q_vector->napi);
2004#endif
92915f71
GR
2005 adapter->q_vector[q_idx] = q_vector;
2006 }
2007
2008 return 0;
2009
2010err_out:
2011 while (q_idx) {
2012 q_idx--;
2013 q_vector = adapter->q_vector[q_idx];
c777cdfa
JK
2014#ifdef CONFIG_NET_RX_BUSY_POLL
2015 napi_hash_del(&q_vector->napi);
2016#endif
92915f71
GR
2017 netif_napi_del(&q_vector->napi);
2018 kfree(q_vector);
2019 adapter->q_vector[q_idx] = NULL;
2020 }
2021 return -ENOMEM;
2022}
2023
2024/**
2025 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2026 * @adapter: board private structure to initialize
2027 *
2028 * This function frees the memory allocated to the q_vectors. In addition if
2029 * NAPI is enabled it will delete any references to the NAPI struct prior
2030 * to freeing the q_vector.
2031 **/
2032static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2033{
f4477702 2034 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
2035
2036 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2037 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2038
2039 adapter->q_vector[q_idx] = NULL;
c777cdfa
JK
2040#ifdef CONFIG_NET_RX_BUSY_POLL
2041 napi_hash_del(&q_vector->napi);
2042#endif
f4477702 2043 netif_napi_del(&q_vector->napi);
92915f71
GR
2044 kfree(q_vector);
2045 }
2046}
2047
2048/**
2049 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2050 * @adapter: board private structure
2051 *
2052 **/
2053static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2054{
2055 pci_disable_msix(adapter->pdev);
2056 kfree(adapter->msix_entries);
2057 adapter->msix_entries = NULL;
92915f71
GR
2058}
2059
2060/**
2061 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2062 * @adapter: board private structure to initialize
2063 *
2064 **/
2065static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2066{
2067 int err;
2068
2069 /* Number of supported queues */
2070 ixgbevf_set_num_queues(adapter);
2071
2072 err = ixgbevf_set_interrupt_capability(adapter);
2073 if (err) {
2074 hw_dbg(&adapter->hw,
2075 "Unable to setup interrupt capabilities\n");
2076 goto err_set_interrupt;
2077 }
2078
2079 err = ixgbevf_alloc_q_vectors(adapter);
2080 if (err) {
2081 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2082 "vectors\n");
2083 goto err_alloc_q_vectors;
2084 }
2085
2086 err = ixgbevf_alloc_queues(adapter);
2087 if (err) {
dbd9636e 2088 pr_err("Unable to allocate memory for queues\n");
92915f71
GR
2089 goto err_alloc_queues;
2090 }
2091
2092 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2093 "Tx Queue count = %u\n",
2094 (adapter->num_rx_queues > 1) ? "Enabled" :
2095 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2096
2097 set_bit(__IXGBEVF_DOWN, &adapter->state);
2098
2099 return 0;
2100err_alloc_queues:
2101 ixgbevf_free_q_vectors(adapter);
2102err_alloc_q_vectors:
2103 ixgbevf_reset_interrupt_capability(adapter);
2104err_set_interrupt:
2105 return err;
2106}
2107
0ac1e8ce
AD
2108/**
2109 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2110 * @adapter: board private structure to clear interrupt scheme on
2111 *
2112 * We go through and clear interrupt specific resources and reset the structure
2113 * to pre-load conditions
2114 **/
2115static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2116{
2117 adapter->num_tx_queues = 0;
2118 adapter->num_rx_queues = 0;
2119
2120 ixgbevf_free_q_vectors(adapter);
2121 ixgbevf_reset_interrupt_capability(adapter);
2122}
2123
92915f71
GR
2124/**
2125 * ixgbevf_sw_init - Initialize general software structures
2126 * (struct ixgbevf_adapter)
2127 * @adapter: board private structure to initialize
2128 *
2129 * ixgbevf_sw_init initializes the Adapter private data structure.
2130 * Fields are initialized based on PCI device information and
2131 * OS network device settings (MTU size).
2132 **/
9f9a12f8 2133static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
92915f71
GR
2134{
2135 struct ixgbe_hw *hw = &adapter->hw;
2136 struct pci_dev *pdev = adapter->pdev;
e1941a74 2137 struct net_device *netdev = adapter->netdev;
92915f71
GR
2138 int err;
2139
2140 /* PCI config space info */
2141
2142 hw->vendor_id = pdev->vendor;
2143 hw->device_id = pdev->device;
ff938e43 2144 hw->revision_id = pdev->revision;
92915f71
GR
2145 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2146 hw->subsystem_device_id = pdev->subsystem_device;
2147
2148 hw->mbx.ops.init_params(hw);
56e94095
AD
2149
2150 /* assume legacy case in which PF would only give VF 2 queues */
2151 hw->mac.max_tx_queues = 2;
2152 hw->mac.max_rx_queues = 2;
2153
798e381a
DS
2154 /* lock to protect mailbox accesses */
2155 spin_lock_init(&adapter->mbx_lock);
2156
92915f71
GR
2157 err = hw->mac.ops.reset_hw(hw);
2158 if (err) {
2159 dev_info(&pdev->dev,
e1941a74 2160 "PF still in reset state. Is the PF interface up?\n");
92915f71
GR
2161 } else {
2162 err = hw->mac.ops.init_hw(hw);
2163 if (err) {
dbd9636e 2164 pr_err("init_shared_code failed: %d\n", err);
92915f71
GR
2165 goto out;
2166 }
798e381a 2167 ixgbevf_negotiate_api(adapter);
e1941a74
GR
2168 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2169 if (err)
2170 dev_info(&pdev->dev, "Error reading MAC address\n");
2171 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2172 dev_info(&pdev->dev,
2173 "MAC address not assigned by administrator.\n");
2174 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2175 }
2176
2177 if (!is_valid_ether_addr(netdev->dev_addr)) {
2178 dev_info(&pdev->dev, "Assigning random MAC address\n");
2179 eth_hw_addr_random(netdev);
2180 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
92915f71
GR
2181 }
2182
2183 /* Enable dynamic interrupt throttling rates */
5f3600eb
AD
2184 adapter->rx_itr_setting = 1;
2185 adapter->tx_itr_setting = 1;
92915f71 2186
92915f71
GR
2187 /* set default ring sizes */
2188 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2189 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2190
92915f71 2191 set_bit(__IXGBEVF_DOWN, &adapter->state);
1a0d6ae5 2192 return 0;
92915f71
GR
2193
2194out:
2195 return err;
2196}
2197
92915f71
GR
2198#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2199 { \
2200 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2201 if (current_counter < last_counter) \
2202 counter += 0x100000000LL; \
2203 last_counter = current_counter; \
2204 counter &= 0xFFFFFFFF00000000LL; \
2205 counter |= current_counter; \
2206 }
2207
2208#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2209 { \
2210 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2211 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2212 u64 current_counter = (current_counter_msb << 32) | \
2213 current_counter_lsb; \
2214 if (current_counter < last_counter) \
2215 counter += 0x1000000000LL; \
2216 last_counter = current_counter; \
2217 counter &= 0xFFFFFFF000000000LL; \
2218 counter |= current_counter; \
2219 }
2220/**
2221 * ixgbevf_update_stats - Update the board statistics counters.
2222 * @adapter: board private structure
2223 **/
2224void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2225{
2226 struct ixgbe_hw *hw = &adapter->hw;
55fb277c 2227 int i;
92915f71 2228
088245a3
GR
2229 if (!adapter->link_up)
2230 return;
2231
92915f71
GR
2232 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2233 adapter->stats.vfgprc);
2234 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2235 adapter->stats.vfgptc);
2236 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2237 adapter->stats.last_vfgorc,
2238 adapter->stats.vfgorc);
2239 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2240 adapter->stats.last_vfgotc,
2241 adapter->stats.vfgotc);
2242 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2243 adapter->stats.vfmprc);
55fb277c
GR
2244
2245 for (i = 0; i < adapter->num_rx_queues; i++) {
2246 adapter->hw_csum_rx_error +=
2247 adapter->rx_ring[i].hw_csum_rx_error;
2248 adapter->hw_csum_rx_good +=
2249 adapter->rx_ring[i].hw_csum_rx_good;
2250 adapter->rx_ring[i].hw_csum_rx_error = 0;
2251 adapter->rx_ring[i].hw_csum_rx_good = 0;
2252 }
92915f71
GR
2253}
2254
2255/**
2256 * ixgbevf_watchdog - Timer Call-back
2257 * @data: pointer to adapter cast into an unsigned long
2258 **/
2259static void ixgbevf_watchdog(unsigned long data)
2260{
2261 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2262 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 2263 u32 eics = 0;
92915f71
GR
2264 int i;
2265
2266 /*
2267 * Do the watchdog outside of interrupt context due to the lovely
2268 * delays that some of the newer hardware requires
2269 */
2270
2271 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2272 goto watchdog_short_circuit;
2273
2274 /* get one bit for every active tx/rx interrupt vector */
2275 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2276 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
6b43c446 2277 if (qv->rx.ring || qv->tx.ring)
5f3600eb 2278 eics |= 1 << i;
92915f71
GR
2279 }
2280
5f3600eb 2281 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
92915f71
GR
2282
2283watchdog_short_circuit:
2284 schedule_work(&adapter->watchdog_task);
2285}
2286
2287/**
2288 * ixgbevf_tx_timeout - Respond to a Tx Hang
2289 * @netdev: network interface device structure
2290 **/
2291static void ixgbevf_tx_timeout(struct net_device *netdev)
2292{
2293 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2294
2295 /* Do the reset outside of interrupt context */
2296 schedule_work(&adapter->reset_task);
2297}
2298
2299static void ixgbevf_reset_task(struct work_struct *work)
2300{
2301 struct ixgbevf_adapter *adapter;
2302 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2303
2304 /* If we're already down or resetting, just bail */
2305 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2306 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2307 return;
2308
2309 adapter->tx_timeout_count++;
2310
2311 ixgbevf_reinit_locked(adapter);
2312}
2313
2314/**
2315 * ixgbevf_watchdog_task - worker thread to bring link up
2316 * @work: pointer to work_struct containing our data
2317 **/
2318static void ixgbevf_watchdog_task(struct work_struct *work)
2319{
2320 struct ixgbevf_adapter *adapter = container_of(work,
2321 struct ixgbevf_adapter,
2322 watchdog_task);
2323 struct net_device *netdev = adapter->netdev;
2324 struct ixgbe_hw *hw = &adapter->hw;
2325 u32 link_speed = adapter->link_speed;
2326 bool link_up = adapter->link_up;
92fe0bf7 2327 s32 need_reset;
92915f71
GR
2328
2329 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2330
2331 /*
2332 * Always check the link on the watchdog because we have
2333 * no LSC interrupt
2334 */
92fe0bf7 2335 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 2336
92fe0bf7 2337 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
1c55ed76 2338
92fe0bf7 2339 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 2340
92fe0bf7
GR
2341 if (need_reset) {
2342 adapter->link_up = link_up;
2343 adapter->link_speed = link_speed;
2344 netif_carrier_off(netdev);
2345 netif_tx_stop_all_queues(netdev);
2346 schedule_work(&adapter->reset_task);
2347 goto pf_has_reset;
92915f71
GR
2348 }
2349 adapter->link_up = link_up;
2350 adapter->link_speed = link_speed;
2351
2352 if (link_up) {
2353 if (!netif_carrier_ok(netdev)) {
b876a744
GR
2354 char *link_speed_string;
2355 switch (link_speed) {
2356 case IXGBE_LINK_SPEED_10GB_FULL:
2357 link_speed_string = "10 Gbps";
2358 break;
2359 case IXGBE_LINK_SPEED_1GB_FULL:
2360 link_speed_string = "1 Gbps";
2361 break;
2362 case IXGBE_LINK_SPEED_100_FULL:
2363 link_speed_string = "100 Mbps";
2364 break;
2365 default:
2366 link_speed_string = "unknown speed";
2367 break;
2368 }
6fe59675 2369 dev_info(&adapter->pdev->dev,
b876a744 2370 "NIC Link is Up, %s\n", link_speed_string);
92915f71
GR
2371 netif_carrier_on(netdev);
2372 netif_tx_wake_all_queues(netdev);
92915f71
GR
2373 }
2374 } else {
2375 adapter->link_up = false;
2376 adapter->link_speed = 0;
2377 if (netif_carrier_ok(netdev)) {
6fe59675 2378 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
92915f71
GR
2379 netif_carrier_off(netdev);
2380 netif_tx_stop_all_queues(netdev);
2381 }
2382 }
2383
92915f71
GR
2384 ixgbevf_update_stats(adapter);
2385
33bd9f60 2386pf_has_reset:
92915f71
GR
2387 /* Reset the timer */
2388 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2389 mod_timer(&adapter->watchdog_timer,
2390 round_jiffies(jiffies + (2 * HZ)));
2391
2392 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2393}
2394
2395/**
2396 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2397 * @adapter: board private structure
2398 * @tx_ring: Tx descriptor ring for a specific queue
2399 *
2400 * Free all transmit software resources
2401 **/
2402void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2403 struct ixgbevf_ring *tx_ring)
2404{
2405 struct pci_dev *pdev = adapter->pdev;
2406
92915f71
GR
2407 ixgbevf_clean_tx_ring(adapter, tx_ring);
2408
2409 vfree(tx_ring->tx_buffer_info);
2410 tx_ring->tx_buffer_info = NULL;
2411
2a1f8794
NN
2412 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2413 tx_ring->dma);
92915f71
GR
2414
2415 tx_ring->desc = NULL;
2416}
2417
2418/**
2419 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2420 * @adapter: board private structure
2421 *
2422 * Free all transmit software resources
2423 **/
2424static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2425{
2426 int i;
2427
2428 for (i = 0; i < adapter->num_tx_queues; i++)
2429 if (adapter->tx_ring[i].desc)
2430 ixgbevf_free_tx_resources(adapter,
2431 &adapter->tx_ring[i]);
2432
2433}
2434
2435/**
2436 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2437 * @adapter: board private structure
2438 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2439 *
2440 * Return 0 on success, negative on failure
2441 **/
2442int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2443 struct ixgbevf_ring *tx_ring)
2444{
2445 struct pci_dev *pdev = adapter->pdev;
2446 int size;
2447
2448 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
89bf67f1 2449 tx_ring->tx_buffer_info = vzalloc(size);
92915f71
GR
2450 if (!tx_ring->tx_buffer_info)
2451 goto err;
92915f71
GR
2452
2453 /* round up to nearest 4K */
2454 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2455 tx_ring->size = ALIGN(tx_ring->size, 4096);
2456
2a1f8794
NN
2457 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2458 &tx_ring->dma, GFP_KERNEL);
92915f71
GR
2459 if (!tx_ring->desc)
2460 goto err;
2461
92915f71
GR
2462 return 0;
2463
2464err:
2465 vfree(tx_ring->tx_buffer_info);
2466 tx_ring->tx_buffer_info = NULL;
2467 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2468 "descriptor ring\n");
2469 return -ENOMEM;
2470}
2471
2472/**
2473 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2474 * @adapter: board private structure
2475 *
2476 * If this function returns with an error, then it's possible one or
2477 * more of the rings is populated (while the rest are not). It is the
2478 * callers duty to clean those orphaned rings.
2479 *
2480 * Return 0 on success, negative on failure
2481 **/
2482static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2483{
2484 int i, err = 0;
2485
2486 for (i = 0; i < adapter->num_tx_queues; i++) {
2487 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2488 if (!err)
2489 continue;
2490 hw_dbg(&adapter->hw,
2491 "Allocation for Tx Queue %u failed\n", i);
2492 break;
2493 }
2494
2495 return err;
2496}
2497
2498/**
2499 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2500 * @adapter: board private structure
2501 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2502 *
2503 * Returns 0 on success, negative on failure
2504 **/
2505int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2506 struct ixgbevf_ring *rx_ring)
2507{
2508 struct pci_dev *pdev = adapter->pdev;
2509 int size;
2510
2511 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
89bf67f1 2512 rx_ring->rx_buffer_info = vzalloc(size);
e404decb 2513 if (!rx_ring->rx_buffer_info)
92915f71 2514 goto alloc_failed;
92915f71
GR
2515
2516 /* Round up to nearest 4K */
2517 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2518 rx_ring->size = ALIGN(rx_ring->size, 4096);
2519
2a1f8794
NN
2520 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2521 &rx_ring->dma, GFP_KERNEL);
92915f71
GR
2522
2523 if (!rx_ring->desc) {
92915f71
GR
2524 vfree(rx_ring->rx_buffer_info);
2525 rx_ring->rx_buffer_info = NULL;
2526 goto alloc_failed;
2527 }
2528
92915f71
GR
2529 return 0;
2530alloc_failed:
2531 return -ENOMEM;
2532}
2533
2534/**
2535 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2536 * @adapter: board private structure
2537 *
2538 * If this function returns with an error, then it's possible one or
2539 * more of the rings is populated (while the rest are not). It is the
2540 * callers duty to clean those orphaned rings.
2541 *
2542 * Return 0 on success, negative on failure
2543 **/
2544static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2545{
2546 int i, err = 0;
2547
2548 for (i = 0; i < adapter->num_rx_queues; i++) {
2549 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2550 if (!err)
2551 continue;
2552 hw_dbg(&adapter->hw,
2553 "Allocation for Rx Queue %u failed\n", i);
2554 break;
2555 }
2556 return err;
2557}
2558
2559/**
2560 * ixgbevf_free_rx_resources - Free Rx Resources
2561 * @adapter: board private structure
2562 * @rx_ring: ring to clean the resources from
2563 *
2564 * Free all receive software resources
2565 **/
2566void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2567 struct ixgbevf_ring *rx_ring)
2568{
2569 struct pci_dev *pdev = adapter->pdev;
2570
2571 ixgbevf_clean_rx_ring(adapter, rx_ring);
2572
2573 vfree(rx_ring->rx_buffer_info);
2574 rx_ring->rx_buffer_info = NULL;
2575
2a1f8794
NN
2576 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2577 rx_ring->dma);
92915f71
GR
2578
2579 rx_ring->desc = NULL;
2580}
2581
2582/**
2583 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2584 * @adapter: board private structure
2585 *
2586 * Free all receive software resources
2587 **/
2588static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2589{
2590 int i;
2591
2592 for (i = 0; i < adapter->num_rx_queues; i++)
2593 if (adapter->rx_ring[i].desc)
2594 ixgbevf_free_rx_resources(adapter,
2595 &adapter->rx_ring[i]);
2596}
2597
56e94095
AD
2598static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2599{
2600 struct ixgbe_hw *hw = &adapter->hw;
2601 struct ixgbevf_ring *rx_ring;
2602 unsigned int def_q = 0;
2603 unsigned int num_tcs = 0;
2604 unsigned int num_rx_queues = 1;
2605 int err, i;
2606
55fdd45b 2607 spin_lock_bh(&adapter->mbx_lock);
56e94095
AD
2608
2609 /* fetch queue configuration from the PF */
2610 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2611
55fdd45b 2612 spin_unlock_bh(&adapter->mbx_lock);
56e94095
AD
2613
2614 if (err)
2615 return err;
2616
2617 if (num_tcs > 1) {
2618 /* update default Tx ring register index */
2619 adapter->tx_ring[0].reg_idx = def_q;
2620
2621 /* we need as many queues as traffic classes */
2622 num_rx_queues = num_tcs;
2623 }
2624
2625 /* nothing to do if we have the correct number of queues */
2626 if (adapter->num_rx_queues == num_rx_queues)
2627 return 0;
2628
2629 /* allocate new rings */
2630 rx_ring = kcalloc(num_rx_queues,
2631 sizeof(struct ixgbevf_ring), GFP_KERNEL);
2632 if (!rx_ring)
2633 return -ENOMEM;
2634
2635 /* setup ring fields */
2636 for (i = 0; i < num_rx_queues; i++) {
2637 rx_ring[i].count = adapter->rx_ring_count;
2638 rx_ring[i].queue_index = i;
2639 rx_ring[i].reg_idx = i;
2640 rx_ring[i].dev = &adapter->pdev->dev;
2641 rx_ring[i].netdev = adapter->netdev;
2642 }
2643
2644 /* free the existing ring and queues */
2645 adapter->num_rx_queues = 0;
2646 kfree(adapter->rx_ring);
2647
2648 /* move new rings into position on the adapter struct */
2649 adapter->rx_ring = rx_ring;
2650 adapter->num_rx_queues = num_rx_queues;
2651
2652 return 0;
2653}
2654
92915f71
GR
2655/**
2656 * ixgbevf_open - Called when a network interface is made active
2657 * @netdev: network interface device structure
2658 *
2659 * Returns 0 on success, negative value on failure
2660 *
2661 * The open entry point is called when a network interface is made
2662 * active by the system (IFF_UP). At this point all resources needed
2663 * for transmit and receive operations are allocated, the interrupt
2664 * handler is registered with the OS, the watchdog timer is started,
2665 * and the stack is notified that the interface is ready.
2666 **/
2667static int ixgbevf_open(struct net_device *netdev)
2668{
2669 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2670 struct ixgbe_hw *hw = &adapter->hw;
2671 int err;
2672
a1f6c6b1 2673 /* A previous failure to open the device because of a lack of
2674 * available MSIX vector resources may have reset the number
2675 * of msix vectors variable to zero. The only way to recover
2676 * is to unload/reload the driver and hope that the system has
2677 * been able to recover some MSIX vector resources.
2678 */
2679 if (!adapter->num_msix_vectors)
2680 return -ENOMEM;
2681
92915f71
GR
2682 /* disallow open during test */
2683 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2684 return -EBUSY;
2685
2686 if (hw->adapter_stopped) {
2687 ixgbevf_reset(adapter);
2688 /* if adapter is still stopped then PF isn't up and
2689 * the vf can't start. */
2690 if (hw->adapter_stopped) {
2691 err = IXGBE_ERR_MBX;
dbd9636e
JK
2692 pr_err("Unable to start - perhaps the PF Driver isn't "
2693 "up yet\n");
92915f71
GR
2694 goto err_setup_reset;
2695 }
2696 }
2697
56e94095
AD
2698 /* setup queue reg_idx and Rx queue count */
2699 err = ixgbevf_setup_queues(adapter);
2700 if (err)
2701 goto err_setup_queues;
2702
92915f71
GR
2703 /* allocate transmit descriptors */
2704 err = ixgbevf_setup_all_tx_resources(adapter);
2705 if (err)
2706 goto err_setup_tx;
2707
2708 /* allocate receive descriptors */
2709 err = ixgbevf_setup_all_rx_resources(adapter);
2710 if (err)
2711 goto err_setup_rx;
2712
2713 ixgbevf_configure(adapter);
2714
2715 /*
2716 * Map the Tx/Rx rings to the vectors we were allotted.
2717 * if request_irq will be called in this function map_rings
2718 * must be called *before* up_complete
2719 */
2720 ixgbevf_map_rings_to_vectors(adapter);
2721
795180d8 2722 ixgbevf_up_complete(adapter);
92915f71
GR
2723
2724 /* clear any pending interrupts, may auto mask */
2725 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2726 err = ixgbevf_request_irq(adapter);
2727 if (err)
2728 goto err_req_irq;
2729
5f3600eb 2730 ixgbevf_irq_enable(adapter);
92915f71
GR
2731
2732 return 0;
2733
2734err_req_irq:
2735 ixgbevf_down(adapter);
92915f71
GR
2736err_setup_rx:
2737 ixgbevf_free_all_rx_resources(adapter);
2738err_setup_tx:
2739 ixgbevf_free_all_tx_resources(adapter);
56e94095 2740err_setup_queues:
92915f71
GR
2741 ixgbevf_reset(adapter);
2742
2743err_setup_reset:
2744
2745 return err;
2746}
2747
2748/**
2749 * ixgbevf_close - Disables a network interface
2750 * @netdev: network interface device structure
2751 *
2752 * Returns 0, this is not allowed to fail
2753 *
2754 * The close entry point is called when an interface is de-activated
2755 * by the OS. The hardware is still under the drivers control, but
2756 * needs to be disabled. A global MAC reset is issued to stop the
2757 * hardware, and all transmit and receive resources are freed.
2758 **/
2759static int ixgbevf_close(struct net_device *netdev)
2760{
2761 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2762
2763 ixgbevf_down(adapter);
2764 ixgbevf_free_irq(adapter);
2765
2766 ixgbevf_free_all_tx_resources(adapter);
2767 ixgbevf_free_all_rx_resources(adapter);
2768
2769 return 0;
2770}
2771
70a10e25
AD
2772static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2773 u32 vlan_macip_lens, u32 type_tucmd,
2774 u32 mss_l4len_idx)
92915f71
GR
2775{
2776 struct ixgbe_adv_tx_context_desc *context_desc;
70a10e25 2777 u16 i = tx_ring->next_to_use;
92915f71 2778
70a10e25 2779 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
92915f71 2780
70a10e25
AD
2781 i++;
2782 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
92915f71 2783
70a10e25
AD
2784 /* set bits to identify this as an advanced context descriptor */
2785 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
92915f71 2786
70a10e25
AD
2787 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2788 context_desc->seqnum_seed = 0;
2789 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2790 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2791}
2792
2793static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2794 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2795{
2796 u32 vlan_macip_lens, type_tucmd;
2797 u32 mss_l4len_idx, l4len;
2798
2799 if (!skb_is_gso(skb))
2800 return 0;
92915f71 2801
70a10e25
AD
2802 if (skb_header_cloned(skb)) {
2803 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2804 if (err)
2805 return err;
92915f71
GR
2806 }
2807
70a10e25
AD
2808 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2809 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2810
2811 if (skb->protocol == htons(ETH_P_IP)) {
2812 struct iphdr *iph = ip_hdr(skb);
2813 iph->tot_len = 0;
2814 iph->check = 0;
2815 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2816 iph->daddr, 0,
2817 IPPROTO_TCP,
2818 0);
2819 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2820 } else if (skb_is_gso_v6(skb)) {
2821 ipv6_hdr(skb)->payload_len = 0;
2822 tcp_hdr(skb)->check =
2823 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2824 &ipv6_hdr(skb)->daddr,
2825 0, IPPROTO_TCP, 0);
2826 }
2827
2828 /* compute header lengths */
2829 l4len = tcp_hdrlen(skb);
2830 *hdr_len += l4len;
2831 *hdr_len = skb_transport_offset(skb) + l4len;
2832
2833 /* mss_l4len_id: use 1 as index for TSO */
2834 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2835 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2836 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2837
2838 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2839 vlan_macip_lens = skb_network_header_len(skb);
2840 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2841 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2842
2843 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2844 type_tucmd, mss_l4len_idx);
2845
2846 return 1;
92915f71
GR
2847}
2848
70a10e25 2849static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
92915f71
GR
2850 struct sk_buff *skb, u32 tx_flags)
2851{
70a10e25
AD
2852 u32 vlan_macip_lens = 0;
2853 u32 mss_l4len_idx = 0;
2854 u32 type_tucmd = 0;
92915f71 2855
70a10e25
AD
2856 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2857 u8 l4_hdr = 0;
2858 switch (skb->protocol) {
2859 case __constant_htons(ETH_P_IP):
2860 vlan_macip_lens |= skb_network_header_len(skb);
2861 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2862 l4_hdr = ip_hdr(skb)->protocol;
2863 break;
2864 case __constant_htons(ETH_P_IPV6):
2865 vlan_macip_lens |= skb_network_header_len(skb);
2866 l4_hdr = ipv6_hdr(skb)->nexthdr;
2867 break;
2868 default:
2869 if (unlikely(net_ratelimit())) {
2870 dev_warn(tx_ring->dev,
2871 "partial checksum but proto=%x!\n",
2872 skb->protocol);
2873 }
2874 break;
2875 }
92915f71 2876
70a10e25
AD
2877 switch (l4_hdr) {
2878 case IPPROTO_TCP:
2879 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2880 mss_l4len_idx = tcp_hdrlen(skb) <<
2881 IXGBE_ADVTXD_L4LEN_SHIFT;
2882 break;
2883 case IPPROTO_SCTP:
2884 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2885 mss_l4len_idx = sizeof(struct sctphdr) <<
2886 IXGBE_ADVTXD_L4LEN_SHIFT;
2887 break;
2888 case IPPROTO_UDP:
2889 mss_l4len_idx = sizeof(struct udphdr) <<
2890 IXGBE_ADVTXD_L4LEN_SHIFT;
2891 break;
2892 default:
2893 if (unlikely(net_ratelimit())) {
2894 dev_warn(tx_ring->dev,
2895 "partial checksum but l4 proto=%x!\n",
2896 l4_hdr);
2897 }
2898 break;
2899 }
92915f71
GR
2900 }
2901
70a10e25
AD
2902 /* vlan_macip_lens: MACLEN, VLAN tag */
2903 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2904 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2905
2906 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2907 type_tucmd, mss_l4len_idx);
2908
2909 return (skb->ip_summed == CHECKSUM_PARTIAL);
92915f71
GR
2910}
2911
70a10e25 2912static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
e757e3e1 2913 struct sk_buff *skb, u32 tx_flags)
92915f71 2914{
92915f71
GR
2915 struct ixgbevf_tx_buffer *tx_buffer_info;
2916 unsigned int len;
2917 unsigned int total = skb->len;
2540ddb5
KV
2918 unsigned int offset = 0, size;
2919 int count = 0;
92915f71
GR
2920 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2921 unsigned int f;
65deeed7 2922 int i;
92915f71
GR
2923
2924 i = tx_ring->next_to_use;
2925
2926 len = min(skb_headlen(skb), total);
2927 while (len) {
2928 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2929 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2930
2931 tx_buffer_info->length = size;
2932 tx_buffer_info->mapped_as_page = false;
70a10e25 2933 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
92915f71 2934 skb->data + offset,
2a1f8794 2935 size, DMA_TO_DEVICE);
70a10e25 2936 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
92915f71 2937 goto dma_error;
92915f71
GR
2938
2939 len -= size;
2940 total -= size;
2941 offset += size;
2942 count++;
2943 i++;
2944 if (i == tx_ring->count)
2945 i = 0;
2946 }
2947
2948 for (f = 0; f < nr_frags; f++) {
9e903e08 2949 const struct skb_frag_struct *frag;
92915f71
GR
2950
2951 frag = &skb_shinfo(skb)->frags[f];
9e903e08 2952 len = min((unsigned int)skb_frag_size(frag), total);
877749bf 2953 offset = 0;
92915f71
GR
2954
2955 while (len) {
2956 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2957 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2958
2959 tx_buffer_info->length = size;
877749bf 2960 tx_buffer_info->dma =
70a10e25 2961 skb_frag_dma_map(tx_ring->dev, frag,
877749bf 2962 offset, size, DMA_TO_DEVICE);
70a10e25
AD
2963 if (dma_mapping_error(tx_ring->dev,
2964 tx_buffer_info->dma))
92915f71 2965 goto dma_error;
6132ee8a 2966 tx_buffer_info->mapped_as_page = true;
92915f71
GR
2967
2968 len -= size;
2969 total -= size;
2970 offset += size;
2971 count++;
2972 i++;
2973 if (i == tx_ring->count)
2974 i = 0;
2975 }
2976 if (total == 0)
2977 break;
2978 }
2979
2980 if (i == 0)
2981 i = tx_ring->count - 1;
2982 else
2983 i = i - 1;
2984 tx_ring->tx_buffer_info[i].skb = skb;
92915f71
GR
2985
2986 return count;
2987
2988dma_error:
70a10e25 2989 dev_err(tx_ring->dev, "TX DMA map failed\n");
92915f71
GR
2990
2991 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2992 tx_buffer_info->dma = 0;
92915f71
GR
2993 count--;
2994
2995 /* clear timestamp and dma mappings for remaining portion of packet */
2996 while (count >= 0) {
2997 count--;
2998 i--;
2999 if (i < 0)
3000 i += tx_ring->count;
3001 tx_buffer_info = &tx_ring->tx_buffer_info[i];
70a10e25 3002 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
92915f71
GR
3003 }
3004
3005 return count;
3006}
3007
70a10e25 3008static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
e757e3e1
AD
3009 int count, unsigned int first, u32 paylen,
3010 u8 hdr_len)
92915f71
GR
3011{
3012 union ixgbe_adv_tx_desc *tx_desc = NULL;
3013 struct ixgbevf_tx_buffer *tx_buffer_info;
3014 u32 olinfo_status = 0, cmd_type_len = 0;
3015 unsigned int i;
3016
3017 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3018
3019 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3020
3021 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3022
3023 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3024 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3025
70a10e25
AD
3026 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3027 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
3028
92915f71
GR
3029 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3030 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3031
92915f71
GR
3032 /* use index 1 context for tso */
3033 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3034 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
70a10e25 3035 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
70a10e25 3036 }
92915f71 3037
70a10e25
AD
3038 /*
3039 * Check Context must be set if Tx switch is enabled, which it
3040 * always is for case where virtual functions are running
3041 */
3042 olinfo_status |= IXGBE_ADVTXD_CC;
92915f71
GR
3043
3044 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3045
3046 i = tx_ring->next_to_use;
3047 while (count--) {
3048 tx_buffer_info = &tx_ring->tx_buffer_info[i];
908421f6 3049 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
92915f71
GR
3050 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3051 tx_desc->read.cmd_type_len =
3052 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3053 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3054 i++;
3055 if (i == tx_ring->count)
3056 i = 0;
3057 }
3058
3059 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3060
e757e3e1
AD
3061 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
3062
3063 /* Force memory writes to complete before letting h/w
3064 * know there are new descriptors to fetch. (Only
3065 * applicable for weak-ordered memory model archs,
3066 * such as IA-64).
3067 */
3068 wmb();
3069
3070 tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
92915f71 3071 tx_ring->next_to_use = i;
92915f71
GR
3072}
3073
fb40195c 3074static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71 3075{
fb40195c 3076 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
92915f71 3077
fb40195c 3078 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
92915f71
GR
3079 /* Herbert's original patch had:
3080 * smp_mb__after_netif_stop_queue();
3081 * but since that doesn't exist yet, just open code it. */
3082 smp_mb();
3083
3084 /* We need to check again in a case another CPU has just
3085 * made room available. */
f880d07b 3086 if (likely(ixgbevf_desc_unused(tx_ring) < size))
92915f71
GR
3087 return -EBUSY;
3088
3089 /* A reprieve! - use start_queue because it doesn't call schedule */
fb40195c 3090 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
92915f71
GR
3091 ++adapter->restart_queue;
3092 return 0;
3093}
3094
fb40195c 3095static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71 3096{
f880d07b 3097 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
92915f71 3098 return 0;
fb40195c 3099 return __ixgbevf_maybe_stop_tx(tx_ring, size);
92915f71
GR
3100}
3101
3102static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3103{
3104 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3105 struct ixgbevf_ring *tx_ring;
3106 unsigned int first;
3107 unsigned int tx_flags = 0;
3108 u8 hdr_len = 0;
3109 int r_idx = 0, tso;
3595990a
AD
3110 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3111#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3112 unsigned short f;
3113#endif
f9d08f16 3114 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
46acc460 3115 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
f9d08f16
GR
3116 dev_kfree_skb(skb);
3117 return NETDEV_TX_OK;
3118 }
92915f71
GR
3119
3120 tx_ring = &adapter->tx_ring[r_idx];
3121
3595990a
AD
3122 /*
3123 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3124 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3125 * + 2 desc gap to keep tail from touching head,
3126 * + 1 desc for context descriptor,
3127 * otherwise try next time
3128 */
3129#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3130 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3131 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3132#else
3133 count += skb_shinfo(skb)->nr_frags;
3134#endif
fb40195c 3135 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3595990a
AD
3136 adapter->tx_busy++;
3137 return NETDEV_TX_BUSY;
3138 }
3139
eab6d18d 3140 if (vlan_tx_tag_present(skb)) {
92915f71
GR
3141 tx_flags |= vlan_tx_tag_get(skb);
3142 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3143 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3144 }
3145
92915f71
GR
3146 first = tx_ring->next_to_use;
3147
3148 if (skb->protocol == htons(ETH_P_IP))
3149 tx_flags |= IXGBE_TX_FLAGS_IPV4;
70a10e25 3150 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
92915f71
GR
3151 if (tso < 0) {
3152 dev_kfree_skb_any(skb);
3153 return NETDEV_TX_OK;
3154 }
3155
3156 if (tso)
70a10e25
AD
3157 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3158 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
92915f71
GR
3159 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3160
70a10e25 3161 ixgbevf_tx_queue(tx_ring, tx_flags,
e757e3e1
AD
3162 ixgbevf_tx_map(tx_ring, skb, tx_flags),
3163 first, skb->len, hdr_len);
70a10e25 3164
5cdab2f6 3165 writel(tx_ring->next_to_use, tx_ring->tail);
92915f71 3166
fb40195c 3167 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
92915f71
GR
3168
3169 return NETDEV_TX_OK;
3170}
3171
92915f71
GR
3172/**
3173 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3174 * @netdev: network interface device structure
3175 * @p: pointer to an address structure
3176 *
3177 * Returns 0 on success, negative on failure
3178 **/
3179static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3180{
3181 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3182 struct ixgbe_hw *hw = &adapter->hw;
3183 struct sockaddr *addr = p;
3184
3185 if (!is_valid_ether_addr(addr->sa_data))
3186 return -EADDRNOTAVAIL;
3187
3188 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3189 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3190
55fdd45b 3191 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 3192
92fe0bf7 3193 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
92915f71 3194
55fdd45b 3195 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 3196
92915f71
GR
3197 return 0;
3198}
3199
3200/**
3201 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3202 * @netdev: network interface device structure
3203 * @new_mtu: new value for maximum frame size
3204 *
3205 * Returns 0 on success, negative on failure
3206 **/
3207static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3208{
3209 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3210 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
69bfbec4 3211 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
69bfbec4 3212
56e94095
AD
3213 switch (adapter->hw.api_version) {
3214 case ixgbe_mbox_api_11:
69bfbec4 3215 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
56e94095
AD
3216 break;
3217 default:
3218 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3219 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3220 break;
3221 }
92915f71
GR
3222
3223 /* MTU < 68 is an error and causes problems on some kernels */
69bfbec4 3224 if ((new_mtu < 68) || (max_frame > max_possible_frame))
92915f71
GR
3225 return -EINVAL;
3226
3227 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3228 netdev->mtu, new_mtu);
3229 /* must set new MTU before calling down or up */
3230 netdev->mtu = new_mtu;
3231
3232 if (netif_running(netdev))
3233 ixgbevf_reinit_locked(adapter);
3234
3235 return 0;
3236}
3237
0ac1e8ce 3238static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
92915f71
GR
3239{
3240 struct net_device *netdev = pci_get_drvdata(pdev);
3241 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0ac1e8ce
AD
3242#ifdef CONFIG_PM
3243 int retval = 0;
3244#endif
92915f71
GR
3245
3246 netif_device_detach(netdev);
3247
3248 if (netif_running(netdev)) {
0ac1e8ce 3249 rtnl_lock();
92915f71
GR
3250 ixgbevf_down(adapter);
3251 ixgbevf_free_irq(adapter);
3252 ixgbevf_free_all_tx_resources(adapter);
3253 ixgbevf_free_all_rx_resources(adapter);
0ac1e8ce 3254 rtnl_unlock();
92915f71
GR
3255 }
3256
0ac1e8ce 3257 ixgbevf_clear_interrupt_scheme(adapter);
92915f71 3258
0ac1e8ce
AD
3259#ifdef CONFIG_PM
3260 retval = pci_save_state(pdev);
3261 if (retval)
3262 return retval;
92915f71 3263
0ac1e8ce 3264#endif
92915f71 3265 pci_disable_device(pdev);
0ac1e8ce
AD
3266
3267 return 0;
3268}
3269
3270#ifdef CONFIG_PM
3271static int ixgbevf_resume(struct pci_dev *pdev)
3272{
3273 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
3274 struct net_device *netdev = adapter->netdev;
3275 u32 err;
3276
3277 pci_set_power_state(pdev, PCI_D0);
3278 pci_restore_state(pdev);
3279 /*
3280 * pci_restore_state clears dev->state_saved so call
3281 * pci_save_state to restore it.
3282 */
3283 pci_save_state(pdev);
3284
3285 err = pci_enable_device_mem(pdev);
3286 if (err) {
3287 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3288 return err;
3289 }
3290 pci_set_master(pdev);
3291
798e381a
DS
3292 ixgbevf_reset(adapter);
3293
0ac1e8ce
AD
3294 rtnl_lock();
3295 err = ixgbevf_init_interrupt_scheme(adapter);
3296 rtnl_unlock();
3297 if (err) {
3298 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3299 return err;
3300 }
3301
0ac1e8ce
AD
3302 if (netif_running(netdev)) {
3303 err = ixgbevf_open(netdev);
3304 if (err)
3305 return err;
3306 }
3307
3308 netif_device_attach(netdev);
3309
3310 return err;
3311}
3312
3313#endif /* CONFIG_PM */
3314static void ixgbevf_shutdown(struct pci_dev *pdev)
3315{
3316 ixgbevf_suspend(pdev, PMSG_SUSPEND);
92915f71
GR
3317}
3318
4197aa7b
ED
3319static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3320 struct rtnl_link_stats64 *stats)
3321{
3322 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3323 unsigned int start;
3324 u64 bytes, packets;
3325 const struct ixgbevf_ring *ring;
3326 int i;
3327
3328 ixgbevf_update_stats(adapter);
3329
3330 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3331
3332 for (i = 0; i < adapter->num_rx_queues; i++) {
3333 ring = &adapter->rx_ring[i];
3334 do {
3335 start = u64_stats_fetch_begin_bh(&ring->syncp);
3336 bytes = ring->total_bytes;
3337 packets = ring->total_packets;
3338 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3339 stats->rx_bytes += bytes;
3340 stats->rx_packets += packets;
3341 }
3342
3343 for (i = 0; i < adapter->num_tx_queues; i++) {
3344 ring = &adapter->tx_ring[i];
3345 do {
3346 start = u64_stats_fetch_begin_bh(&ring->syncp);
3347 bytes = ring->total_bytes;
3348 packets = ring->total_packets;
3349 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3350 stats->tx_bytes += bytes;
3351 stats->tx_packets += packets;
3352 }
3353
3354 return stats;
3355}
3356
0ac1e8ce 3357static const struct net_device_ops ixgbevf_netdev_ops = {
c12db769
SH
3358 .ndo_open = ixgbevf_open,
3359 .ndo_stop = ixgbevf_close,
3360 .ndo_start_xmit = ixgbevf_xmit_frame,
3361 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4197aa7b 3362 .ndo_get_stats64 = ixgbevf_get_stats,
92915f71 3363 .ndo_validate_addr = eth_validate_addr,
c12db769
SH
3364 .ndo_set_mac_address = ixgbevf_set_mac,
3365 .ndo_change_mtu = ixgbevf_change_mtu,
3366 .ndo_tx_timeout = ixgbevf_tx_timeout,
c12db769
SH
3367 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3368 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
c777cdfa
JK
3369#ifdef CONFIG_NET_RX_BUSY_POLL
3370 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3371#endif
92915f71 3372};
92915f71
GR
3373
3374static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3375{
0ac1e8ce 3376 dev->netdev_ops = &ixgbevf_netdev_ops;
92915f71
GR
3377 ixgbevf_set_ethtool_ops(dev);
3378 dev->watchdog_timeo = 5 * HZ;
3379}
3380
3381/**
3382 * ixgbevf_probe - Device Initialization Routine
3383 * @pdev: PCI device information struct
3384 * @ent: entry in ixgbevf_pci_tbl
3385 *
3386 * Returns 0 on success, negative on failure
3387 *
3388 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3389 * The OS initialization, configuring of the adapter private structure,
3390 * and a hardware reset occur.
3391 **/
1dd06ae8 3392static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
92915f71
GR
3393{
3394 struct net_device *netdev;
3395 struct ixgbevf_adapter *adapter = NULL;
3396 struct ixgbe_hw *hw = NULL;
3397 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3398 static int cards_found;
3399 int err, pci_using_dac;
3400
3401 err = pci_enable_device(pdev);
3402 if (err)
3403 return err;
3404
53567aa4 3405 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
92915f71
GR
3406 pci_using_dac = 1;
3407 } else {
53567aa4 3408 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
92915f71 3409 if (err) {
53567aa4
RK
3410 dev_err(&pdev->dev, "No usable DMA "
3411 "configuration, aborting\n");
3412 goto err_dma;
92915f71
GR
3413 }
3414 pci_using_dac = 0;
3415 }
3416
3417 err = pci_request_regions(pdev, ixgbevf_driver_name);
3418 if (err) {
3419 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3420 goto err_pci_reg;
3421 }
3422
3423 pci_set_master(pdev);
3424
92915f71
GR
3425 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3426 MAX_TX_QUEUES);
92915f71
GR
3427 if (!netdev) {
3428 err = -ENOMEM;
3429 goto err_alloc_etherdev;
3430 }
3431
3432 SET_NETDEV_DEV(netdev, &pdev->dev);
3433
3434 pci_set_drvdata(pdev, netdev);
3435 adapter = netdev_priv(netdev);
3436
3437 adapter->netdev = netdev;
3438 adapter->pdev = pdev;
3439 hw = &adapter->hw;
3440 hw->back = adapter;
b3f4d599 3441 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
92915f71
GR
3442
3443 /*
3444 * call save state here in standalone driver because it relies on
3445 * adapter struct to exist, and needs to call netdev_priv
3446 */
3447 pci_save_state(pdev);
3448
3449 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3450 pci_resource_len(pdev, 0));
3451 if (!hw->hw_addr) {
3452 err = -EIO;
3453 goto err_ioremap;
3454 }
3455
3456 ixgbevf_assign_netdev_ops(netdev);
3457
3458 adapter->bd_number = cards_found;
3459
3460 /* Setup hw api */
3461 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3462 hw->mac.type = ii->mac;
3463
3464 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
f416dfc0 3465 sizeof(struct ixgbe_mbx_operations));
92915f71 3466
92915f71
GR
3467 /* setup the private structure */
3468 err = ixgbevf_sw_init(adapter);
1a0d6ae5
DK
3469 if (err)
3470 goto err_sw_init;
3471
3472 /* The HW MAC address was set and/or determined in sw_init */
1a0d6ae5
DK
3473 if (!is_valid_ether_addr(netdev->dev_addr)) {
3474 pr_err("invalid MAC address\n");
3475 err = -EIO;
3476 goto err_sw_init;
3477 }
92915f71 3478
471a76de 3479 netdev->hw_features = NETIF_F_SG |
92915f71 3480 NETIF_F_IP_CSUM |
471a76de
MM
3481 NETIF_F_IPV6_CSUM |
3482 NETIF_F_TSO |
3483 NETIF_F_TSO6 |
3484 NETIF_F_RXCSUM;
3485
3486 netdev->features = netdev->hw_features |
f646968f
PM
3487 NETIF_F_HW_VLAN_CTAG_TX |
3488 NETIF_F_HW_VLAN_CTAG_RX |
3489 NETIF_F_HW_VLAN_CTAG_FILTER;
92915f71 3490
92915f71
GR
3491 netdev->vlan_features |= NETIF_F_TSO;
3492 netdev->vlan_features |= NETIF_F_TSO6;
3493 netdev->vlan_features |= NETIF_F_IP_CSUM;
3bfacf96 3494 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
92915f71
GR
3495 netdev->vlan_features |= NETIF_F_SG;
3496
3497 if (pci_using_dac)
3498 netdev->features |= NETIF_F_HIGHDMA;
3499
01789349
JP
3500 netdev->priv_flags |= IFF_UNICAST_FLT;
3501
92915f71 3502 init_timer(&adapter->watchdog_timer);
c061b18d 3503 adapter->watchdog_timer.function = ixgbevf_watchdog;
92915f71
GR
3504 adapter->watchdog_timer.data = (unsigned long)adapter;
3505
3506 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3507 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3508
3509 err = ixgbevf_init_interrupt_scheme(adapter);
3510 if (err)
3511 goto err_sw_init;
3512
92915f71
GR
3513 strcpy(netdev->name, "eth%d");
3514
3515 err = register_netdev(netdev);
3516 if (err)
3517 goto err_register;
3518
5d426ad1
GR
3519 netif_carrier_off(netdev);
3520
33bd9f60
GR
3521 ixgbevf_init_last_counter_stats(adapter);
3522
92915f71 3523 /* print the MAC address */
f794e7ef 3524 hw_dbg(hw, "%pM\n", netdev->dev_addr);
92915f71
GR
3525
3526 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3527
92915f71
GR
3528 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3529 cards_found++;
3530 return 0;
3531
3532err_register:
0ac1e8ce 3533 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
3534err_sw_init:
3535 ixgbevf_reset_interrupt_capability(adapter);
3536 iounmap(hw->hw_addr);
3537err_ioremap:
3538 free_netdev(netdev);
3539err_alloc_etherdev:
3540 pci_release_regions(pdev);
3541err_pci_reg:
3542err_dma:
3543 pci_disable_device(pdev);
3544 return err;
3545}
3546
3547/**
3548 * ixgbevf_remove - Device Removal Routine
3549 * @pdev: PCI device information struct
3550 *
3551 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3552 * that it should release a PCI device. The could be caused by a
3553 * Hot-Plug event, or because the driver is going to be removed from
3554 * memory.
3555 **/
9f9a12f8 3556static void ixgbevf_remove(struct pci_dev *pdev)
92915f71
GR
3557{
3558 struct net_device *netdev = pci_get_drvdata(pdev);
3559 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3560
3561 set_bit(__IXGBEVF_DOWN, &adapter->state);
3562
3563 del_timer_sync(&adapter->watchdog_timer);
3564
23f333a2 3565 cancel_work_sync(&adapter->reset_task);
92915f71
GR
3566 cancel_work_sync(&adapter->watchdog_task);
3567
fd13a9ab 3568 if (netdev->reg_state == NETREG_REGISTERED)
92915f71 3569 unregister_netdev(netdev);
92915f71 3570
0ac1e8ce 3571 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
3572 ixgbevf_reset_interrupt_capability(adapter);
3573
3574 iounmap(adapter->hw.hw_addr);
3575 pci_release_regions(pdev);
3576
3577 hw_dbg(&adapter->hw, "Remove complete\n");
3578
3579 kfree(adapter->tx_ring);
3580 kfree(adapter->rx_ring);
3581
3582 free_netdev(netdev);
3583
3584 pci_disable_device(pdev);
3585}
3586
9f19f31d
AD
3587/**
3588 * ixgbevf_io_error_detected - called when PCI error is detected
3589 * @pdev: Pointer to PCI device
3590 * @state: The current pci connection state
3591 *
3592 * This function is called after a PCI bus error affecting
3593 * this device has been detected.
3594 */
3595static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3596 pci_channel_state_t state)
3597{
3598 struct net_device *netdev = pci_get_drvdata(pdev);
3599 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3600
3601 netif_device_detach(netdev);
3602
3603 if (state == pci_channel_io_perm_failure)
3604 return PCI_ERS_RESULT_DISCONNECT;
3605
3606 if (netif_running(netdev))
3607 ixgbevf_down(adapter);
3608
3609 pci_disable_device(pdev);
3610
3611 /* Request a slot slot reset. */
3612 return PCI_ERS_RESULT_NEED_RESET;
3613}
3614
3615/**
3616 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3617 * @pdev: Pointer to PCI device
3618 *
3619 * Restart the card from scratch, as if from a cold-boot. Implementation
3620 * resembles the first-half of the ixgbevf_resume routine.
3621 */
3622static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3623{
3624 struct net_device *netdev = pci_get_drvdata(pdev);
3625 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3626
3627 if (pci_enable_device_mem(pdev)) {
3628 dev_err(&pdev->dev,
3629 "Cannot re-enable PCI device after reset.\n");
3630 return PCI_ERS_RESULT_DISCONNECT;
3631 }
3632
3633 pci_set_master(pdev);
3634
3635 ixgbevf_reset(adapter);
3636
3637 return PCI_ERS_RESULT_RECOVERED;
3638}
3639
3640/**
3641 * ixgbevf_io_resume - called when traffic can start flowing again.
3642 * @pdev: Pointer to PCI device
3643 *
3644 * This callback is called when the error recovery driver tells us that
3645 * its OK to resume normal operation. Implementation resembles the
3646 * second-half of the ixgbevf_resume routine.
3647 */
3648static void ixgbevf_io_resume(struct pci_dev *pdev)
3649{
3650 struct net_device *netdev = pci_get_drvdata(pdev);
3651 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3652
3653 if (netif_running(netdev))
3654 ixgbevf_up(adapter);
3655
3656 netif_device_attach(netdev);
3657}
3658
3659/* PCI Error Recovery (ERS) */
3646f0e5 3660static const struct pci_error_handlers ixgbevf_err_handler = {
9f19f31d
AD
3661 .error_detected = ixgbevf_io_error_detected,
3662 .slot_reset = ixgbevf_io_slot_reset,
3663 .resume = ixgbevf_io_resume,
3664};
3665
92915f71
GR
3666static struct pci_driver ixgbevf_driver = {
3667 .name = ixgbevf_driver_name,
3668 .id_table = ixgbevf_pci_tbl,
3669 .probe = ixgbevf_probe,
9f9a12f8 3670 .remove = ixgbevf_remove,
0ac1e8ce
AD
3671#ifdef CONFIG_PM
3672 /* Power Management Hooks */
3673 .suspend = ixgbevf_suspend,
3674 .resume = ixgbevf_resume,
3675#endif
92915f71 3676 .shutdown = ixgbevf_shutdown,
9f19f31d 3677 .err_handler = &ixgbevf_err_handler
92915f71
GR
3678};
3679
3680/**
65d676c8 3681 * ixgbevf_init_module - Driver Registration Routine
92915f71 3682 *
65d676c8 3683 * ixgbevf_init_module is the first routine called when the driver is
92915f71
GR
3684 * loaded. All it does is register with the PCI subsystem.
3685 **/
3686static int __init ixgbevf_init_module(void)
3687{
3688 int ret;
dbd9636e
JK
3689 pr_info("%s - version %s\n", ixgbevf_driver_string,
3690 ixgbevf_driver_version);
92915f71 3691
dbd9636e 3692 pr_info("%s\n", ixgbevf_copyright);
92915f71
GR
3693
3694 ret = pci_register_driver(&ixgbevf_driver);
3695 return ret;
3696}
3697
3698module_init(ixgbevf_init_module);
3699
3700/**
65d676c8 3701 * ixgbevf_exit_module - Driver Exit Cleanup Routine
92915f71 3702 *
65d676c8 3703 * ixgbevf_exit_module is called just before the driver is removed
92915f71
GR
3704 * from memory.
3705 **/
3706static void __exit ixgbevf_exit_module(void)
3707{
3708 pci_unregister_driver(&ixgbevf_driver);
3709}
3710
3711#ifdef DEBUG
3712/**
65d676c8 3713 * ixgbevf_get_hw_dev_name - return device name string
92915f71
GR
3714 * used by hardware layer to print debugging information
3715 **/
3716char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3717{
3718 struct ixgbevf_adapter *adapter = hw->back;
3719 return adapter->netdev->name;
3720}
3721
3722#endif
3723module_exit(ixgbevf_exit_module);
3724
3725/* ixgbevf_main.c */
This page took 1.212859 seconds and 5 git commands to generate.