1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <linux/slab.h>
36 #include <net/checksum.h>
37 #include <net/ip6_checksum.h>
38 #include <linux/net_tstamp.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/if_vlan.h>
42 #include <linux/pci.h>
43 #include <linux/pci-aspm.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/if_ether.h>
47 #include <linux/aer.h>
49 #include <linux/dca.h>
53 #define DRV_VERSION "2.1.0-k2"
54 char igb_driver_name
[] = "igb";
55 char igb_driver_version
[] = DRV_VERSION
;
56 static const char igb_driver_string
[] =
57 "Intel(R) Gigabit Ethernet Network Driver";
58 static const char igb_copyright
[] = "Copyright (c) 2007-2009 Intel Corporation.";
60 static const struct e1000_info
*igb_info_tbl
[] = {
61 [board_82575
] = &e1000_82575_info
,
64 static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl
) = {
65 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_I350_COPPER
), board_82575
},
66 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_I350_FIBER
), board_82575
},
67 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_I350_SERDES
), board_82575
},
68 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_I350_SGMII
), board_82575
},
69 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_COPPER
), board_82575
},
70 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_FIBER
), board_82575
},
71 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_SERDES
), board_82575
},
72 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_SGMII
), board_82575
},
73 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_COPPER_DUAL
), board_82575
},
74 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_DH89XXCC_SGMII
), board_82575
},
75 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_DH89XXCC_SERDES
), board_82575
},
76 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576
), board_82575
},
77 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_NS
), board_82575
},
78 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_NS_SERDES
), board_82575
},
79 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_FIBER
), board_82575
},
80 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_SERDES
), board_82575
},
81 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_SERDES_QUAD
), board_82575
},
82 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_QUAD_COPPER_ET2
), board_82575
},
83 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_QUAD_COPPER
), board_82575
},
84 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_COPPER
), board_82575
},
85 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_FIBER_SERDES
), board_82575
},
86 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575GB_QUAD_COPPER
), board_82575
},
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci
, igb_pci_tbl
);
93 void igb_reset(struct igb_adapter
*);
94 static int igb_setup_all_tx_resources(struct igb_adapter
*);
95 static int igb_setup_all_rx_resources(struct igb_adapter
*);
96 static void igb_free_all_tx_resources(struct igb_adapter
*);
97 static void igb_free_all_rx_resources(struct igb_adapter
*);
98 static void igb_setup_mrqc(struct igb_adapter
*);
99 void igb_update_stats(struct igb_adapter
*);
100 static int igb_probe(struct pci_dev
*, const struct pci_device_id
*);
101 static void __devexit
igb_remove(struct pci_dev
*pdev
);
102 static int igb_sw_init(struct igb_adapter
*);
103 static int igb_open(struct net_device
*);
104 static int igb_close(struct net_device
*);
105 static void igb_configure_tx(struct igb_adapter
*);
106 static void igb_configure_rx(struct igb_adapter
*);
107 static void igb_clean_all_tx_rings(struct igb_adapter
*);
108 static void igb_clean_all_rx_rings(struct igb_adapter
*);
109 static void igb_clean_tx_ring(struct igb_ring
*);
110 static void igb_clean_rx_ring(struct igb_ring
*);
111 static void igb_set_rx_mode(struct net_device
*);
112 static void igb_update_phy_info(unsigned long);
113 static void igb_watchdog(unsigned long);
114 static void igb_watchdog_task(struct work_struct
*);
115 static netdev_tx_t
igb_xmit_frame_adv(struct sk_buff
*skb
, struct net_device
*);
116 static struct net_device_stats
*igb_get_stats(struct net_device
*);
117 static int igb_change_mtu(struct net_device
*, int);
118 static int igb_set_mac(struct net_device
*, void *);
119 static void igb_set_uta(struct igb_adapter
*adapter
);
120 static irqreturn_t
igb_intr(int irq
, void *);
121 static irqreturn_t
igb_intr_msi(int irq
, void *);
122 static irqreturn_t
igb_msix_other(int irq
, void *);
123 static irqreturn_t
igb_msix_ring(int irq
, void *);
124 #ifdef CONFIG_IGB_DCA
125 static void igb_update_dca(struct igb_q_vector
*);
126 static void igb_setup_dca(struct igb_adapter
*);
127 #endif /* CONFIG_IGB_DCA */
128 static bool igb_clean_tx_irq(struct igb_q_vector
*);
129 static int igb_poll(struct napi_struct
*, int);
130 static bool igb_clean_rx_irq_adv(struct igb_q_vector
*, int *, int);
131 static int igb_ioctl(struct net_device
*, struct ifreq
*, int cmd
);
132 static void igb_tx_timeout(struct net_device
*);
133 static void igb_reset_task(struct work_struct
*);
134 static void igb_vlan_rx_register(struct net_device
*, struct vlan_group
*);
135 static void igb_vlan_rx_add_vid(struct net_device
*, u16
);
136 static void igb_vlan_rx_kill_vid(struct net_device
*, u16
);
137 static void igb_restore_vlan(struct igb_adapter
*);
138 static void igb_rar_set_qsel(struct igb_adapter
*, u8
*, u32
, u8
);
139 static void igb_ping_all_vfs(struct igb_adapter
*);
140 static void igb_msg_task(struct igb_adapter
*);
141 static void igb_vmm_control(struct igb_adapter
*);
142 static int igb_set_vf_mac(struct igb_adapter
*, int, unsigned char *);
143 static void igb_restore_vf_multicasts(struct igb_adapter
*adapter
);
144 static int igb_ndo_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
);
145 static int igb_ndo_set_vf_vlan(struct net_device
*netdev
,
146 int vf
, u16 vlan
, u8 qos
);
147 static int igb_ndo_set_vf_bw(struct net_device
*netdev
, int vf
, int tx_rate
);
148 static int igb_ndo_get_vf_config(struct net_device
*netdev
, int vf
,
149 struct ifla_vf_info
*ivi
);
152 static int igb_suspend(struct pci_dev
*, pm_message_t
);
153 static int igb_resume(struct pci_dev
*);
155 static void igb_shutdown(struct pci_dev
*);
156 #ifdef CONFIG_IGB_DCA
157 static int igb_notify_dca(struct notifier_block
*, unsigned long, void *);
158 static struct notifier_block dca_notifier
= {
159 .notifier_call
= igb_notify_dca
,
164 #ifdef CONFIG_NET_POLL_CONTROLLER
165 /* for netdump / net console */
166 static void igb_netpoll(struct net_device
*);
168 #ifdef CONFIG_PCI_IOV
169 static unsigned int max_vfs
= 0;
170 module_param(max_vfs
, uint
, 0);
171 MODULE_PARM_DESC(max_vfs
, "Maximum number of virtual functions to allocate "
172 "per physical function");
173 #endif /* CONFIG_PCI_IOV */
175 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*,
176 pci_channel_state_t
);
177 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*);
178 static void igb_io_resume(struct pci_dev
*);
180 static struct pci_error_handlers igb_err_handler
= {
181 .error_detected
= igb_io_error_detected
,
182 .slot_reset
= igb_io_slot_reset
,
183 .resume
= igb_io_resume
,
187 static struct pci_driver igb_driver
= {
188 .name
= igb_driver_name
,
189 .id_table
= igb_pci_tbl
,
191 .remove
= __devexit_p(igb_remove
),
193 /* Power Managment Hooks */
194 .suspend
= igb_suspend
,
195 .resume
= igb_resume
,
197 .shutdown
= igb_shutdown
,
198 .err_handler
= &igb_err_handler
201 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
202 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
203 MODULE_LICENSE("GPL");
204 MODULE_VERSION(DRV_VERSION
);
206 struct igb_reg_info
{
211 static const struct igb_reg_info igb_reg_info_tbl
[] = {
213 /* General Registers */
214 {E1000_CTRL
, "CTRL"},
215 {E1000_STATUS
, "STATUS"},
216 {E1000_CTRL_EXT
, "CTRL_EXT"},
218 /* Interrupt Registers */
222 {E1000_RCTL
, "RCTL"},
223 {E1000_RDLEN(0), "RDLEN"},
224 {E1000_RDH(0), "RDH"},
225 {E1000_RDT(0), "RDT"},
226 {E1000_RXDCTL(0), "RXDCTL"},
227 {E1000_RDBAL(0), "RDBAL"},
228 {E1000_RDBAH(0), "RDBAH"},
231 {E1000_TCTL
, "TCTL"},
232 {E1000_TDBAL(0), "TDBAL"},
233 {E1000_TDBAH(0), "TDBAH"},
234 {E1000_TDLEN(0), "TDLEN"},
235 {E1000_TDH(0), "TDH"},
236 {E1000_TDT(0), "TDT"},
237 {E1000_TXDCTL(0), "TXDCTL"},
238 {E1000_TDFH
, "TDFH"},
239 {E1000_TDFT
, "TDFT"},
240 {E1000_TDFHS
, "TDFHS"},
241 {E1000_TDFPC
, "TDFPC"},
243 /* List Terminator */
248 * igb_regdump - register printout routine
250 static void igb_regdump(struct e1000_hw
*hw
, struct igb_reg_info
*reginfo
)
256 switch (reginfo
->ofs
) {
258 for (n
= 0; n
< 4; n
++)
259 regs
[n
] = rd32(E1000_RDLEN(n
));
262 for (n
= 0; n
< 4; n
++)
263 regs
[n
] = rd32(E1000_RDH(n
));
266 for (n
= 0; n
< 4; n
++)
267 regs
[n
] = rd32(E1000_RDT(n
));
269 case E1000_RXDCTL(0):
270 for (n
= 0; n
< 4; n
++)
271 regs
[n
] = rd32(E1000_RXDCTL(n
));
274 for (n
= 0; n
< 4; n
++)
275 regs
[n
] = rd32(E1000_RDBAL(n
));
278 for (n
= 0; n
< 4; n
++)
279 regs
[n
] = rd32(E1000_RDBAH(n
));
282 for (n
= 0; n
< 4; n
++)
283 regs
[n
] = rd32(E1000_RDBAL(n
));
286 for (n
= 0; n
< 4; n
++)
287 regs
[n
] = rd32(E1000_TDBAH(n
));
290 for (n
= 0; n
< 4; n
++)
291 regs
[n
] = rd32(E1000_TDLEN(n
));
294 for (n
= 0; n
< 4; n
++)
295 regs
[n
] = rd32(E1000_TDH(n
));
298 for (n
= 0; n
< 4; n
++)
299 regs
[n
] = rd32(E1000_TDT(n
));
301 case E1000_TXDCTL(0):
302 for (n
= 0; n
< 4; n
++)
303 regs
[n
] = rd32(E1000_TXDCTL(n
));
306 printk(KERN_INFO
"%-15s %08x\n",
307 reginfo
->name
, rd32(reginfo
->ofs
));
311 snprintf(rname
, 16, "%s%s", reginfo
->name
, "[0-3]");
312 printk(KERN_INFO
"%-15s ", rname
);
313 for (n
= 0; n
< 4; n
++)
314 printk(KERN_CONT
"%08x ", regs
[n
]);
315 printk(KERN_CONT
"\n");
319 * igb_dump - Print registers, tx-rings and rx-rings
321 static void igb_dump(struct igb_adapter
*adapter
)
323 struct net_device
*netdev
= adapter
->netdev
;
324 struct e1000_hw
*hw
= &adapter
->hw
;
325 struct igb_reg_info
*reginfo
;
327 struct igb_ring
*tx_ring
;
328 union e1000_adv_tx_desc
*tx_desc
;
329 struct my_u0
{ u64 a
; u64 b
; } *u0
;
330 struct igb_buffer
*buffer_info
;
331 struct igb_ring
*rx_ring
;
332 union e1000_adv_rx_desc
*rx_desc
;
336 if (!netif_msg_hw(adapter
))
339 /* Print netdevice Info */
341 dev_info(&adapter
->pdev
->dev
, "Net device Info\n");
342 printk(KERN_INFO
"Device Name state "
343 "trans_start last_rx\n");
344 printk(KERN_INFO
"%-15s %016lX %016lX %016lX\n",
351 /* Print Registers */
352 dev_info(&adapter
->pdev
->dev
, "Register Dump\n");
353 printk(KERN_INFO
" Register Name Value\n");
354 for (reginfo
= (struct igb_reg_info
*)igb_reg_info_tbl
;
355 reginfo
->name
; reginfo
++) {
356 igb_regdump(hw
, reginfo
);
359 /* Print TX Ring Summary */
360 if (!netdev
|| !netif_running(netdev
))
363 dev_info(&adapter
->pdev
->dev
, "TX Rings Summary\n");
364 printk(KERN_INFO
"Queue [NTU] [NTC] [bi(ntc)->dma ]"
365 " leng ntw timestamp\n");
366 for (n
= 0; n
< adapter
->num_tx_queues
; n
++) {
367 tx_ring
= adapter
->tx_ring
[n
];
368 buffer_info
= &tx_ring
->buffer_info
[tx_ring
->next_to_clean
];
369 printk(KERN_INFO
" %5d %5X %5X %016llX %04X %3X %016llX\n",
370 n
, tx_ring
->next_to_use
, tx_ring
->next_to_clean
,
371 (u64
)buffer_info
->dma
,
373 buffer_info
->next_to_watch
,
374 (u64
)buffer_info
->time_stamp
);
378 if (!netif_msg_tx_done(adapter
))
379 goto rx_ring_summary
;
381 dev_info(&adapter
->pdev
->dev
, "TX Rings Dump\n");
383 /* Transmit Descriptor Formats
385 * Advanced Transmit Descriptor
386 * +--------------------------------------------------------------+
387 * 0 | Buffer Address [63:0] |
388 * +--------------------------------------------------------------+
389 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
390 * +--------------------------------------------------------------+
391 * 63 46 45 40 39 38 36 35 32 31 24 15 0
394 for (n
= 0; n
< adapter
->num_tx_queues
; n
++) {
395 tx_ring
= adapter
->tx_ring
[n
];
396 printk(KERN_INFO
"------------------------------------\n");
397 printk(KERN_INFO
"TX QUEUE INDEX = %d\n", tx_ring
->queue_index
);
398 printk(KERN_INFO
"------------------------------------\n");
399 printk(KERN_INFO
"T [desc] [address 63:0 ] "
400 "[PlPOCIStDDM Ln] [bi->dma ] "
401 "leng ntw timestamp bi->skb\n");
403 for (i
= 0; tx_ring
->desc
&& (i
< tx_ring
->count
); i
++) {
404 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
405 buffer_info
= &tx_ring
->buffer_info
[i
];
406 u0
= (struct my_u0
*)tx_desc
;
407 printk(KERN_INFO
"T [0x%03X] %016llX %016llX %016llX"
408 " %04X %3X %016llX %p", i
,
411 (u64
)buffer_info
->dma
,
413 buffer_info
->next_to_watch
,
414 (u64
)buffer_info
->time_stamp
,
416 if (i
== tx_ring
->next_to_use
&&
417 i
== tx_ring
->next_to_clean
)
418 printk(KERN_CONT
" NTC/U\n");
419 else if (i
== tx_ring
->next_to_use
)
420 printk(KERN_CONT
" NTU\n");
421 else if (i
== tx_ring
->next_to_clean
)
422 printk(KERN_CONT
" NTC\n");
424 printk(KERN_CONT
"\n");
426 if (netif_msg_pktdata(adapter
) && buffer_info
->dma
!= 0)
427 print_hex_dump(KERN_INFO
, "",
429 16, 1, phys_to_virt(buffer_info
->dma
),
430 buffer_info
->length
, true);
434 /* Print RX Rings Summary */
436 dev_info(&adapter
->pdev
->dev
, "RX Rings Summary\n");
437 printk(KERN_INFO
"Queue [NTU] [NTC]\n");
438 for (n
= 0; n
< adapter
->num_rx_queues
; n
++) {
439 rx_ring
= adapter
->rx_ring
[n
];
440 printk(KERN_INFO
" %5d %5X %5X\n", n
,
441 rx_ring
->next_to_use
, rx_ring
->next_to_clean
);
445 if (!netif_msg_rx_status(adapter
))
448 dev_info(&adapter
->pdev
->dev
, "RX Rings Dump\n");
450 /* Advanced Receive Descriptor (Read) Format
452 * +-----------------------------------------------------+
453 * 0 | Packet Buffer Address [63:1] |A0/NSE|
454 * +----------------------------------------------+------+
455 * 8 | Header Buffer Address [63:1] | DD |
456 * +-----------------------------------------------------+
459 * Advanced Receive Descriptor (Write-Back) Format
461 * 63 48 47 32 31 30 21 20 17 16 4 3 0
462 * +------------------------------------------------------+
463 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
464 * | Checksum Ident | | | | Type | Type |
465 * +------------------------------------------------------+
466 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
467 * +------------------------------------------------------+
468 * 63 48 47 32 31 20 19 0
471 for (n
= 0; n
< adapter
->num_rx_queues
; n
++) {
472 rx_ring
= adapter
->rx_ring
[n
];
473 printk(KERN_INFO
"------------------------------------\n");
474 printk(KERN_INFO
"RX QUEUE INDEX = %d\n", rx_ring
->queue_index
);
475 printk(KERN_INFO
"------------------------------------\n");
476 printk(KERN_INFO
"R [desc] [ PktBuf A0] "
477 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
478 "<-- Adv Rx Read format\n");
479 printk(KERN_INFO
"RWB[desc] [PcsmIpSHl PtRs] "
480 "[vl er S cks ln] ---------------- [bi->skb] "
481 "<-- Adv Rx Write-Back format\n");
483 for (i
= 0; i
< rx_ring
->count
; i
++) {
484 buffer_info
= &rx_ring
->buffer_info
[i
];
485 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
486 u0
= (struct my_u0
*)rx_desc
;
487 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
488 if (staterr
& E1000_RXD_STAT_DD
) {
489 /* Descriptor Done */
490 printk(KERN_INFO
"RWB[0x%03X] %016llX "
491 "%016llX ---------------- %p", i
,
496 printk(KERN_INFO
"R [0x%03X] %016llX "
497 "%016llX %016llX %p", i
,
500 (u64
)buffer_info
->dma
,
503 if (netif_msg_pktdata(adapter
)) {
504 print_hex_dump(KERN_INFO
, "",
507 phys_to_virt(buffer_info
->dma
),
508 rx_ring
->rx_buffer_len
, true);
509 if (rx_ring
->rx_buffer_len
511 print_hex_dump(KERN_INFO
, "",
515 buffer_info
->page_dma
+
516 buffer_info
->page_offset
),
521 if (i
== rx_ring
->next_to_use
)
522 printk(KERN_CONT
" NTU\n");
523 else if (i
== rx_ring
->next_to_clean
)
524 printk(KERN_CONT
" NTC\n");
526 printk(KERN_CONT
"\n");
537 * igb_read_clock - read raw cycle counter (to be used by time counter)
539 static cycle_t
igb_read_clock(const struct cyclecounter
*tc
)
541 struct igb_adapter
*adapter
=
542 container_of(tc
, struct igb_adapter
, cycles
);
543 struct e1000_hw
*hw
= &adapter
->hw
;
548 * The timestamp latches on lowest register read. For the 82580
549 * the lowest register is SYSTIMR instead of SYSTIML. However we never
550 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
552 if (hw
->mac
.type
== e1000_82580
) {
553 stamp
= rd32(E1000_SYSTIMR
) >> 8;
554 shift
= IGB_82580_TSYNC_SHIFT
;
557 stamp
|= (u64
)rd32(E1000_SYSTIML
) << shift
;
558 stamp
|= (u64
)rd32(E1000_SYSTIMH
) << (shift
+ 32);
563 * igb_get_hw_dev - return device
564 * used by hardware layer to print debugging information
566 struct net_device
*igb_get_hw_dev(struct e1000_hw
*hw
)
568 struct igb_adapter
*adapter
= hw
->back
;
569 return adapter
->netdev
;
573 * igb_init_module - Driver Registration Routine
575 * igb_init_module is the first routine called when the driver is
576 * loaded. All it does is register with the PCI subsystem.
578 static int __init
igb_init_module(void)
581 printk(KERN_INFO
"%s - version %s\n",
582 igb_driver_string
, igb_driver_version
);
584 printk(KERN_INFO
"%s\n", igb_copyright
);
586 #ifdef CONFIG_IGB_DCA
587 dca_register_notify(&dca_notifier
);
589 ret
= pci_register_driver(&igb_driver
);
593 module_init(igb_init_module
);
596 * igb_exit_module - Driver Exit Cleanup Routine
598 * igb_exit_module is called just before the driver is removed
601 static void __exit
igb_exit_module(void)
603 #ifdef CONFIG_IGB_DCA
604 dca_unregister_notify(&dca_notifier
);
606 pci_unregister_driver(&igb_driver
);
609 module_exit(igb_exit_module
);
611 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
613 * igb_cache_ring_register - Descriptor ring to register mapping
614 * @adapter: board private structure to initialize
616 * Once we know the feature-set enabled for the device, we'll cache
617 * the register offset the descriptor ring is assigned to.
619 static void igb_cache_ring_register(struct igb_adapter
*adapter
)
622 u32 rbase_offset
= adapter
->vfs_allocated_count
;
624 switch (adapter
->hw
.mac
.type
) {
626 /* The queues are allocated for virtualization such that VF 0
627 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
628 * In order to avoid collision we start at the first free queue
629 * and continue consuming queues in the same sequence
631 if (adapter
->vfs_allocated_count
) {
632 for (; i
< adapter
->rss_queues
; i
++)
633 adapter
->rx_ring
[i
]->reg_idx
= rbase_offset
+
640 for (; i
< adapter
->num_rx_queues
; i
++)
641 adapter
->rx_ring
[i
]->reg_idx
= rbase_offset
+ i
;
642 for (; j
< adapter
->num_tx_queues
; j
++)
643 adapter
->tx_ring
[j
]->reg_idx
= rbase_offset
+ j
;
648 static void igb_free_queues(struct igb_adapter
*adapter
)
652 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
653 kfree(adapter
->tx_ring
[i
]);
654 adapter
->tx_ring
[i
] = NULL
;
656 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
657 kfree(adapter
->rx_ring
[i
]);
658 adapter
->rx_ring
[i
] = NULL
;
660 adapter
->num_rx_queues
= 0;
661 adapter
->num_tx_queues
= 0;
665 * igb_alloc_queues - Allocate memory for all rings
666 * @adapter: board private structure to initialize
668 * We allocate one ring per queue at run-time since we don't know the
669 * number of queues at compile-time.
671 static int igb_alloc_queues(struct igb_adapter
*adapter
)
673 struct igb_ring
*ring
;
676 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
677 ring
= kzalloc(sizeof(struct igb_ring
), GFP_KERNEL
);
680 ring
->count
= adapter
->tx_ring_count
;
681 ring
->queue_index
= i
;
682 ring
->dev
= &adapter
->pdev
->dev
;
683 ring
->netdev
= adapter
->netdev
;
684 /* For 82575, context index must be unique per ring. */
685 if (adapter
->hw
.mac
.type
== e1000_82575
)
686 ring
->flags
= IGB_RING_FLAG_TX_CTX_IDX
;
687 adapter
->tx_ring
[i
] = ring
;
690 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
691 ring
= kzalloc(sizeof(struct igb_ring
), GFP_KERNEL
);
694 ring
->count
= adapter
->rx_ring_count
;
695 ring
->queue_index
= i
;
696 ring
->dev
= &adapter
->pdev
->dev
;
697 ring
->netdev
= adapter
->netdev
;
698 ring
->rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
699 ring
->flags
= IGB_RING_FLAG_RX_CSUM
; /* enable rx checksum */
700 /* set flag indicating ring supports SCTP checksum offload */
701 if (adapter
->hw
.mac
.type
>= e1000_82576
)
702 ring
->flags
|= IGB_RING_FLAG_RX_SCTP_CSUM
;
703 adapter
->rx_ring
[i
] = ring
;
706 igb_cache_ring_register(adapter
);
711 igb_free_queues(adapter
);
716 #define IGB_N0_QUEUE -1
717 static void igb_assign_vector(struct igb_q_vector
*q_vector
, int msix_vector
)
720 struct igb_adapter
*adapter
= q_vector
->adapter
;
721 struct e1000_hw
*hw
= &adapter
->hw
;
723 int rx_queue
= IGB_N0_QUEUE
;
724 int tx_queue
= IGB_N0_QUEUE
;
726 if (q_vector
->rx_ring
)
727 rx_queue
= q_vector
->rx_ring
->reg_idx
;
728 if (q_vector
->tx_ring
)
729 tx_queue
= q_vector
->tx_ring
->reg_idx
;
731 switch (hw
->mac
.type
) {
733 /* The 82575 assigns vectors using a bitmask, which matches the
734 bitmask for the EICR/EIMS/EIMC registers. To assign one
735 or more queues to a vector, we write the appropriate bits
736 into the MSIXBM register for that vector. */
737 if (rx_queue
> IGB_N0_QUEUE
)
738 msixbm
= E1000_EICR_RX_QUEUE0
<< rx_queue
;
739 if (tx_queue
> IGB_N0_QUEUE
)
740 msixbm
|= E1000_EICR_TX_QUEUE0
<< tx_queue
;
741 if (!adapter
->msix_entries
&& msix_vector
== 0)
742 msixbm
|= E1000_EIMS_OTHER
;
743 array_wr32(E1000_MSIXBM(0), msix_vector
, msixbm
);
744 q_vector
->eims_value
= msixbm
;
747 /* 82576 uses a table-based method for assigning vectors.
748 Each queue has a single entry in the table to which we write
749 a vector number along with a "valid" bit. Sadly, the layout
750 of the table is somewhat counterintuitive. */
751 if (rx_queue
> IGB_N0_QUEUE
) {
752 index
= (rx_queue
& 0x7);
753 ivar
= array_rd32(E1000_IVAR0
, index
);
755 /* vector goes into low byte of register */
756 ivar
= ivar
& 0xFFFFFF00;
757 ivar
|= msix_vector
| E1000_IVAR_VALID
;
759 /* vector goes into third byte of register */
760 ivar
= ivar
& 0xFF00FFFF;
761 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 16;
763 array_wr32(E1000_IVAR0
, index
, ivar
);
765 if (tx_queue
> IGB_N0_QUEUE
) {
766 index
= (tx_queue
& 0x7);
767 ivar
= array_rd32(E1000_IVAR0
, index
);
769 /* vector goes into second byte of register */
770 ivar
= ivar
& 0xFFFF00FF;
771 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 8;
773 /* vector goes into high byte of register */
774 ivar
= ivar
& 0x00FFFFFF;
775 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 24;
777 array_wr32(E1000_IVAR0
, index
, ivar
);
779 q_vector
->eims_value
= 1 << msix_vector
;
783 /* 82580 uses the same table-based approach as 82576 but has fewer
784 entries as a result we carry over for queues greater than 4. */
785 if (rx_queue
> IGB_N0_QUEUE
) {
786 index
= (rx_queue
>> 1);
787 ivar
= array_rd32(E1000_IVAR0
, index
);
788 if (rx_queue
& 0x1) {
789 /* vector goes into third byte of register */
790 ivar
= ivar
& 0xFF00FFFF;
791 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 16;
793 /* vector goes into low byte of register */
794 ivar
= ivar
& 0xFFFFFF00;
795 ivar
|= msix_vector
| E1000_IVAR_VALID
;
797 array_wr32(E1000_IVAR0
, index
, ivar
);
799 if (tx_queue
> IGB_N0_QUEUE
) {
800 index
= (tx_queue
>> 1);
801 ivar
= array_rd32(E1000_IVAR0
, index
);
802 if (tx_queue
& 0x1) {
803 /* vector goes into high byte of register */
804 ivar
= ivar
& 0x00FFFFFF;
805 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 24;
807 /* vector goes into second byte of register */
808 ivar
= ivar
& 0xFFFF00FF;
809 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 8;
811 array_wr32(E1000_IVAR0
, index
, ivar
);
813 q_vector
->eims_value
= 1 << msix_vector
;
820 /* add q_vector eims value to global eims_enable_mask */
821 adapter
->eims_enable_mask
|= q_vector
->eims_value
;
823 /* configure q_vector to set itr on first interrupt */
824 q_vector
->set_itr
= 1;
828 * igb_configure_msix - Configure MSI-X hardware
830 * igb_configure_msix sets up the hardware to properly
831 * generate MSI-X interrupts.
833 static void igb_configure_msix(struct igb_adapter
*adapter
)
837 struct e1000_hw
*hw
= &adapter
->hw
;
839 adapter
->eims_enable_mask
= 0;
841 /* set vector for other causes, i.e. link changes */
842 switch (hw
->mac
.type
) {
844 tmp
= rd32(E1000_CTRL_EXT
);
845 /* enable MSI-X PBA support*/
846 tmp
|= E1000_CTRL_EXT_PBA_CLR
;
848 /* Auto-Mask interrupts upon ICR read. */
849 tmp
|= E1000_CTRL_EXT_EIAME
;
850 tmp
|= E1000_CTRL_EXT_IRCA
;
852 wr32(E1000_CTRL_EXT
, tmp
);
854 /* enable msix_other interrupt */
855 array_wr32(E1000_MSIXBM(0), vector
++,
857 adapter
->eims_other
= E1000_EIMS_OTHER
;
864 /* Turn on MSI-X capability first, or our settings
865 * won't stick. And it will take days to debug. */
866 wr32(E1000_GPIE
, E1000_GPIE_MSIX_MODE
|
867 E1000_GPIE_PBA
| E1000_GPIE_EIAME
|
870 /* enable msix_other interrupt */
871 adapter
->eims_other
= 1 << vector
;
872 tmp
= (vector
++ | E1000_IVAR_VALID
) << 8;
874 wr32(E1000_IVAR_MISC
, tmp
);
877 /* do nothing, since nothing else supports MSI-X */
879 } /* switch (hw->mac.type) */
881 adapter
->eims_enable_mask
|= adapter
->eims_other
;
883 for (i
= 0; i
< adapter
->num_q_vectors
; i
++)
884 igb_assign_vector(adapter
->q_vector
[i
], vector
++);
890 * igb_request_msix - Initialize MSI-X interrupts
892 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
895 static int igb_request_msix(struct igb_adapter
*adapter
)
897 struct net_device
*netdev
= adapter
->netdev
;
898 struct e1000_hw
*hw
= &adapter
->hw
;
899 int i
, err
= 0, vector
= 0;
901 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
902 igb_msix_other
, 0, netdev
->name
, adapter
);
907 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
908 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
910 q_vector
->itr_register
= hw
->hw_addr
+ E1000_EITR(vector
);
912 if (q_vector
->rx_ring
&& q_vector
->tx_ring
)
913 sprintf(q_vector
->name
, "%s-TxRx-%u", netdev
->name
,
914 q_vector
->rx_ring
->queue_index
);
915 else if (q_vector
->tx_ring
)
916 sprintf(q_vector
->name
, "%s-tx-%u", netdev
->name
,
917 q_vector
->tx_ring
->queue_index
);
918 else if (q_vector
->rx_ring
)
919 sprintf(q_vector
->name
, "%s-rx-%u", netdev
->name
,
920 q_vector
->rx_ring
->queue_index
);
922 sprintf(q_vector
->name
, "%s-unused", netdev
->name
);
924 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
925 igb_msix_ring
, 0, q_vector
->name
,
932 igb_configure_msix(adapter
);
938 static void igb_reset_interrupt_capability(struct igb_adapter
*adapter
)
940 if (adapter
->msix_entries
) {
941 pci_disable_msix(adapter
->pdev
);
942 kfree(adapter
->msix_entries
);
943 adapter
->msix_entries
= NULL
;
944 } else if (adapter
->flags
& IGB_FLAG_HAS_MSI
) {
945 pci_disable_msi(adapter
->pdev
);
950 * igb_free_q_vectors - Free memory allocated for interrupt vectors
951 * @adapter: board private structure to initialize
953 * This function frees the memory allocated to the q_vectors. In addition if
954 * NAPI is enabled it will delete any references to the NAPI struct prior
955 * to freeing the q_vector.
957 static void igb_free_q_vectors(struct igb_adapter
*adapter
)
961 for (v_idx
= 0; v_idx
< adapter
->num_q_vectors
; v_idx
++) {
962 struct igb_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
963 adapter
->q_vector
[v_idx
] = NULL
;
966 netif_napi_del(&q_vector
->napi
);
969 adapter
->num_q_vectors
= 0;
973 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
975 * This function resets the device so that it has 0 rx queues, tx queues, and
976 * MSI-X interrupts allocated.
978 static void igb_clear_interrupt_scheme(struct igb_adapter
*adapter
)
980 igb_free_queues(adapter
);
981 igb_free_q_vectors(adapter
);
982 igb_reset_interrupt_capability(adapter
);
986 * igb_set_interrupt_capability - set MSI or MSI-X if supported
988 * Attempt to configure interrupts using the best available
989 * capabilities of the hardware and kernel.
991 static void igb_set_interrupt_capability(struct igb_adapter
*adapter
)
996 /* Number of supported queues. */
997 adapter
->num_rx_queues
= adapter
->rss_queues
;
998 if (adapter
->vfs_allocated_count
)
999 adapter
->num_tx_queues
= 1;
1001 adapter
->num_tx_queues
= adapter
->rss_queues
;
1003 /* start with one vector for every rx queue */
1004 numvecs
= adapter
->num_rx_queues
;
1006 /* if tx handler is separate add 1 for every tx queue */
1007 if (!(adapter
->flags
& IGB_FLAG_QUEUE_PAIRS
))
1008 numvecs
+= adapter
->num_tx_queues
;
1010 /* store the number of vectors reserved for queues */
1011 adapter
->num_q_vectors
= numvecs
;
1013 /* add 1 vector for link status interrupts */
1015 adapter
->msix_entries
= kcalloc(numvecs
, sizeof(struct msix_entry
),
1017 if (!adapter
->msix_entries
)
1020 for (i
= 0; i
< numvecs
; i
++)
1021 adapter
->msix_entries
[i
].entry
= i
;
1023 err
= pci_enable_msix(adapter
->pdev
,
1024 adapter
->msix_entries
,
1029 igb_reset_interrupt_capability(adapter
);
1031 /* If we can't do MSI-X, try MSI */
1033 #ifdef CONFIG_PCI_IOV
1034 /* disable SR-IOV for non MSI-X configurations */
1035 if (adapter
->vf_data
) {
1036 struct e1000_hw
*hw
= &adapter
->hw
;
1037 /* disable iov and allow time for transactions to clear */
1038 pci_disable_sriov(adapter
->pdev
);
1041 kfree(adapter
->vf_data
);
1042 adapter
->vf_data
= NULL
;
1043 wr32(E1000_IOVCTL
, E1000_IOVCTL_REUSE_VFQ
);
1045 dev_info(&adapter
->pdev
->dev
, "IOV Disabled\n");
1048 adapter
->vfs_allocated_count
= 0;
1049 adapter
->rss_queues
= 1;
1050 adapter
->flags
|= IGB_FLAG_QUEUE_PAIRS
;
1051 adapter
->num_rx_queues
= 1;
1052 adapter
->num_tx_queues
= 1;
1053 adapter
->num_q_vectors
= 1;
1054 if (!pci_enable_msi(adapter
->pdev
))
1055 adapter
->flags
|= IGB_FLAG_HAS_MSI
;
1057 /* Notify the stack of the (possibly) reduced Tx Queue count. */
1058 adapter
->netdev
->real_num_tx_queues
= adapter
->num_tx_queues
;
1062 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1063 * @adapter: board private structure to initialize
1065 * We allocate one q_vector per queue interrupt. If allocation fails we
1068 static int igb_alloc_q_vectors(struct igb_adapter
*adapter
)
1070 struct igb_q_vector
*q_vector
;
1071 struct e1000_hw
*hw
= &adapter
->hw
;
1074 for (v_idx
= 0; v_idx
< adapter
->num_q_vectors
; v_idx
++) {
1075 q_vector
= kzalloc(sizeof(struct igb_q_vector
), GFP_KERNEL
);
1078 q_vector
->adapter
= adapter
;
1079 q_vector
->itr_register
= hw
->hw_addr
+ E1000_EITR(0);
1080 q_vector
->itr_val
= IGB_START_ITR
;
1081 netif_napi_add(adapter
->netdev
, &q_vector
->napi
, igb_poll
, 64);
1082 adapter
->q_vector
[v_idx
] = q_vector
;
1087 igb_free_q_vectors(adapter
);
1091 static void igb_map_rx_ring_to_vector(struct igb_adapter
*adapter
,
1092 int ring_idx
, int v_idx
)
1094 struct igb_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
1096 q_vector
->rx_ring
= adapter
->rx_ring
[ring_idx
];
1097 q_vector
->rx_ring
->q_vector
= q_vector
;
1098 q_vector
->itr_val
= adapter
->rx_itr_setting
;
1099 if (q_vector
->itr_val
&& q_vector
->itr_val
<= 3)
1100 q_vector
->itr_val
= IGB_START_ITR
;
1103 static void igb_map_tx_ring_to_vector(struct igb_adapter
*adapter
,
1104 int ring_idx
, int v_idx
)
1106 struct igb_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
1108 q_vector
->tx_ring
= adapter
->tx_ring
[ring_idx
];
1109 q_vector
->tx_ring
->q_vector
= q_vector
;
1110 q_vector
->itr_val
= adapter
->tx_itr_setting
;
1111 if (q_vector
->itr_val
&& q_vector
->itr_val
<= 3)
1112 q_vector
->itr_val
= IGB_START_ITR
;
1116 * igb_map_ring_to_vector - maps allocated queues to vectors
1118 * This function maps the recently allocated queues to vectors.
1120 static int igb_map_ring_to_vector(struct igb_adapter
*adapter
)
1125 if ((adapter
->num_q_vectors
< adapter
->num_rx_queues
) ||
1126 (adapter
->num_q_vectors
< adapter
->num_tx_queues
))
1129 if (adapter
->num_q_vectors
>=
1130 (adapter
->num_rx_queues
+ adapter
->num_tx_queues
)) {
1131 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1132 igb_map_rx_ring_to_vector(adapter
, i
, v_idx
++);
1133 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1134 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
++);
1136 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1137 if (i
< adapter
->num_tx_queues
)
1138 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
);
1139 igb_map_rx_ring_to_vector(adapter
, i
, v_idx
++);
1141 for (; i
< adapter
->num_tx_queues
; i
++)
1142 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
++);
1148 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1150 * This function initializes the interrupts and allocates all of the queues.
1152 static int igb_init_interrupt_scheme(struct igb_adapter
*adapter
)
1154 struct pci_dev
*pdev
= adapter
->pdev
;
1157 igb_set_interrupt_capability(adapter
);
1159 err
= igb_alloc_q_vectors(adapter
);
1161 dev_err(&pdev
->dev
, "Unable to allocate memory for vectors\n");
1162 goto err_alloc_q_vectors
;
1165 err
= igb_alloc_queues(adapter
);
1167 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
1168 goto err_alloc_queues
;
1171 err
= igb_map_ring_to_vector(adapter
);
1173 dev_err(&pdev
->dev
, "Invalid q_vector to ring mapping\n");
1174 goto err_map_queues
;
1180 igb_free_queues(adapter
);
1182 igb_free_q_vectors(adapter
);
1183 err_alloc_q_vectors
:
1184 igb_reset_interrupt_capability(adapter
);
1189 * igb_request_irq - initialize interrupts
1191 * Attempts to configure interrupts using the best available
1192 * capabilities of the hardware and kernel.
1194 static int igb_request_irq(struct igb_adapter
*adapter
)
1196 struct net_device
*netdev
= adapter
->netdev
;
1197 struct pci_dev
*pdev
= adapter
->pdev
;
1200 if (adapter
->msix_entries
) {
1201 err
= igb_request_msix(adapter
);
1204 /* fall back to MSI */
1205 igb_clear_interrupt_scheme(adapter
);
1206 if (!pci_enable_msi(adapter
->pdev
))
1207 adapter
->flags
|= IGB_FLAG_HAS_MSI
;
1208 igb_free_all_tx_resources(adapter
);
1209 igb_free_all_rx_resources(adapter
);
1210 adapter
->num_tx_queues
= 1;
1211 adapter
->num_rx_queues
= 1;
1212 adapter
->num_q_vectors
= 1;
1213 err
= igb_alloc_q_vectors(adapter
);
1216 "Unable to allocate memory for vectors\n");
1219 err
= igb_alloc_queues(adapter
);
1222 "Unable to allocate memory for queues\n");
1223 igb_free_q_vectors(adapter
);
1226 igb_setup_all_tx_resources(adapter
);
1227 igb_setup_all_rx_resources(adapter
);
1229 igb_assign_vector(adapter
->q_vector
[0], 0);
1232 if (adapter
->flags
& IGB_FLAG_HAS_MSI
) {
1233 err
= request_irq(adapter
->pdev
->irq
, igb_intr_msi
, 0,
1234 netdev
->name
, adapter
);
1238 /* fall back to legacy interrupts */
1239 igb_reset_interrupt_capability(adapter
);
1240 adapter
->flags
&= ~IGB_FLAG_HAS_MSI
;
1243 err
= request_irq(adapter
->pdev
->irq
, igb_intr
, IRQF_SHARED
,
1244 netdev
->name
, adapter
);
1247 dev_err(&adapter
->pdev
->dev
, "Error %d getting interrupt\n",
1254 static void igb_free_irq(struct igb_adapter
*adapter
)
1256 if (adapter
->msix_entries
) {
1259 free_irq(adapter
->msix_entries
[vector
++].vector
, adapter
);
1261 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1262 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1263 free_irq(adapter
->msix_entries
[vector
++].vector
,
1267 free_irq(adapter
->pdev
->irq
, adapter
);
1272 * igb_irq_disable - Mask off interrupt generation on the NIC
1273 * @adapter: board private structure
1275 static void igb_irq_disable(struct igb_adapter
*adapter
)
1277 struct e1000_hw
*hw
= &adapter
->hw
;
1280 * we need to be careful when disabling interrupts. The VFs are also
1281 * mapped into these registers and so clearing the bits can cause
1282 * issues on the VF drivers so we only need to clear what we set
1284 if (adapter
->msix_entries
) {
1285 u32 regval
= rd32(E1000_EIAM
);
1286 wr32(E1000_EIAM
, regval
& ~adapter
->eims_enable_mask
);
1287 wr32(E1000_EIMC
, adapter
->eims_enable_mask
);
1288 regval
= rd32(E1000_EIAC
);
1289 wr32(E1000_EIAC
, regval
& ~adapter
->eims_enable_mask
);
1293 wr32(E1000_IMC
, ~0);
1295 if (adapter
->msix_entries
) {
1297 for (i
= 0; i
< adapter
->num_q_vectors
; i
++)
1298 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1300 synchronize_irq(adapter
->pdev
->irq
);
1305 * igb_irq_enable - Enable default interrupt generation settings
1306 * @adapter: board private structure
1308 static void igb_irq_enable(struct igb_adapter
*adapter
)
1310 struct e1000_hw
*hw
= &adapter
->hw
;
1312 if (adapter
->msix_entries
) {
1313 u32 ims
= E1000_IMS_LSC
| E1000_IMS_DOUTSYNC
;
1314 u32 regval
= rd32(E1000_EIAC
);
1315 wr32(E1000_EIAC
, regval
| adapter
->eims_enable_mask
);
1316 regval
= rd32(E1000_EIAM
);
1317 wr32(E1000_EIAM
, regval
| adapter
->eims_enable_mask
);
1318 wr32(E1000_EIMS
, adapter
->eims_enable_mask
);
1319 if (adapter
->vfs_allocated_count
) {
1320 wr32(E1000_MBVFIMR
, 0xFF);
1321 ims
|= E1000_IMS_VMMB
;
1323 if (adapter
->hw
.mac
.type
== e1000_82580
)
1324 ims
|= E1000_IMS_DRSTA
;
1326 wr32(E1000_IMS
, ims
);
1328 wr32(E1000_IMS
, IMS_ENABLE_MASK
|
1330 wr32(E1000_IAM
, IMS_ENABLE_MASK
|
1335 static void igb_update_mng_vlan(struct igb_adapter
*adapter
)
1337 struct e1000_hw
*hw
= &adapter
->hw
;
1338 u16 vid
= adapter
->hw
.mng_cookie
.vlan_id
;
1339 u16 old_vid
= adapter
->mng_vlan_id
;
1341 if (hw
->mng_cookie
.status
& E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) {
1342 /* add VID to filter table */
1343 igb_vfta_set(hw
, vid
, true);
1344 adapter
->mng_vlan_id
= vid
;
1346 adapter
->mng_vlan_id
= IGB_MNG_VLAN_NONE
;
1349 if ((old_vid
!= (u16
)IGB_MNG_VLAN_NONE
) &&
1351 !vlan_group_get_device(adapter
->vlgrp
, old_vid
)) {
1352 /* remove VID from filter table */
1353 igb_vfta_set(hw
, old_vid
, false);
1358 * igb_release_hw_control - release control of the h/w to f/w
1359 * @adapter: address of board private structure
1361 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1362 * For ASF and Pass Through versions of f/w this means that the
1363 * driver is no longer loaded.
1366 static void igb_release_hw_control(struct igb_adapter
*adapter
)
1368 struct e1000_hw
*hw
= &adapter
->hw
;
1371 /* Let firmware take over control of h/w */
1372 ctrl_ext
= rd32(E1000_CTRL_EXT
);
1373 wr32(E1000_CTRL_EXT
,
1374 ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
1378 * igb_get_hw_control - get control of the h/w from f/w
1379 * @adapter: address of board private structure
1381 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1382 * For ASF and Pass Through versions of f/w this means that
1383 * the driver is loaded.
1386 static void igb_get_hw_control(struct igb_adapter
*adapter
)
1388 struct e1000_hw
*hw
= &adapter
->hw
;
1391 /* Let firmware know the driver has taken over */
1392 ctrl_ext
= rd32(E1000_CTRL_EXT
);
1393 wr32(E1000_CTRL_EXT
,
1394 ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
1398 * igb_configure - configure the hardware for RX and TX
1399 * @adapter: private board structure
1401 static void igb_configure(struct igb_adapter
*adapter
)
1403 struct net_device
*netdev
= adapter
->netdev
;
1406 igb_get_hw_control(adapter
);
1407 igb_set_rx_mode(netdev
);
1409 igb_restore_vlan(adapter
);
1411 igb_setup_tctl(adapter
);
1412 igb_setup_mrqc(adapter
);
1413 igb_setup_rctl(adapter
);
1415 igb_configure_tx(adapter
);
1416 igb_configure_rx(adapter
);
1418 igb_rx_fifo_flush_82575(&adapter
->hw
);
1420 /* call igb_desc_unused which always leaves
1421 * at least 1 descriptor unused to make sure
1422 * next_to_use != next_to_clean */
1423 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1424 struct igb_ring
*ring
= adapter
->rx_ring
[i
];
1425 igb_alloc_rx_buffers_adv(ring
, igb_desc_unused(ring
));
1430 * igb_power_up_link - Power up the phy/serdes link
1431 * @adapter: address of board private structure
1433 void igb_power_up_link(struct igb_adapter
*adapter
)
1435 if (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
)
1436 igb_power_up_phy_copper(&adapter
->hw
);
1438 igb_power_up_serdes_link_82575(&adapter
->hw
);
1442 * igb_power_down_link - Power down the phy/serdes link
1443 * @adapter: address of board private structure
1445 static void igb_power_down_link(struct igb_adapter
*adapter
)
1447 if (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
)
1448 igb_power_down_phy_copper_82575(&adapter
->hw
);
1450 igb_shutdown_serdes_link_82575(&adapter
->hw
);
1454 * igb_up - Open the interface and prepare it to handle traffic
1455 * @adapter: board private structure
1457 int igb_up(struct igb_adapter
*adapter
)
1459 struct e1000_hw
*hw
= &adapter
->hw
;
1462 /* hardware has been reset, we need to reload some things */
1463 igb_configure(adapter
);
1465 clear_bit(__IGB_DOWN
, &adapter
->state
);
1467 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1468 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1469 napi_enable(&q_vector
->napi
);
1471 if (adapter
->msix_entries
)
1472 igb_configure_msix(adapter
);
1474 igb_assign_vector(adapter
->q_vector
[0], 0);
1476 /* Clear any pending interrupts. */
1478 igb_irq_enable(adapter
);
1480 /* notify VFs that reset has been completed */
1481 if (adapter
->vfs_allocated_count
) {
1482 u32 reg_data
= rd32(E1000_CTRL_EXT
);
1483 reg_data
|= E1000_CTRL_EXT_PFRSTD
;
1484 wr32(E1000_CTRL_EXT
, reg_data
);
1487 netif_tx_start_all_queues(adapter
->netdev
);
1489 /* start the watchdog. */
1490 hw
->mac
.get_link_status
= 1;
1491 schedule_work(&adapter
->watchdog_task
);
1496 void igb_down(struct igb_adapter
*adapter
)
1498 struct net_device
*netdev
= adapter
->netdev
;
1499 struct e1000_hw
*hw
= &adapter
->hw
;
1503 /* signal that we're down so the interrupt handler does not
1504 * reschedule our watchdog timer */
1505 set_bit(__IGB_DOWN
, &adapter
->state
);
1507 /* disable receives in the hardware */
1508 rctl
= rd32(E1000_RCTL
);
1509 wr32(E1000_RCTL
, rctl
& ~E1000_RCTL_EN
);
1510 /* flush and sleep below */
1512 netif_tx_stop_all_queues(netdev
);
1514 /* disable transmits in the hardware */
1515 tctl
= rd32(E1000_TCTL
);
1516 tctl
&= ~E1000_TCTL_EN
;
1517 wr32(E1000_TCTL
, tctl
);
1518 /* flush both disables and wait for them to finish */
1522 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1523 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1524 napi_disable(&q_vector
->napi
);
1527 igb_irq_disable(adapter
);
1529 del_timer_sync(&adapter
->watchdog_timer
);
1530 del_timer_sync(&adapter
->phy_info_timer
);
1532 netif_carrier_off(netdev
);
1534 /* record the stats before reset*/
1535 igb_update_stats(adapter
);
1537 adapter
->link_speed
= 0;
1538 adapter
->link_duplex
= 0;
1540 if (!pci_channel_offline(adapter
->pdev
))
1542 igb_clean_all_tx_rings(adapter
);
1543 igb_clean_all_rx_rings(adapter
);
1544 #ifdef CONFIG_IGB_DCA
1546 /* since we reset the hardware DCA settings were cleared */
1547 igb_setup_dca(adapter
);
1551 void igb_reinit_locked(struct igb_adapter
*adapter
)
1553 WARN_ON(in_interrupt());
1554 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
1558 clear_bit(__IGB_RESETTING
, &adapter
->state
);
1561 void igb_reset(struct igb_adapter
*adapter
)
1563 struct pci_dev
*pdev
= adapter
->pdev
;
1564 struct e1000_hw
*hw
= &adapter
->hw
;
1565 struct e1000_mac_info
*mac
= &hw
->mac
;
1566 struct e1000_fc_info
*fc
= &hw
->fc
;
1567 u32 pba
= 0, tx_space
, min_tx_space
, min_rx_space
;
1570 /* Repartition Pba for greater than 9k mtu
1571 * To take effect CTRL.RST is required.
1573 switch (mac
->type
) {
1576 pba
= rd32(E1000_RXPBS
);
1577 pba
= igb_rxpbs_adjust_82580(pba
);
1580 pba
= rd32(E1000_RXPBS
);
1581 pba
&= E1000_RXPBS_SIZE_MASK_82576
;
1585 pba
= E1000_PBA_34K
;
1589 if ((adapter
->max_frame_size
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) &&
1590 (mac
->type
< e1000_82576
)) {
1591 /* adjust PBA for jumbo frames */
1592 wr32(E1000_PBA
, pba
);
1594 /* To maintain wire speed transmits, the Tx FIFO should be
1595 * large enough to accommodate two full transmit packets,
1596 * rounded up to the next 1KB and expressed in KB. Likewise,
1597 * the Rx FIFO should be large enough to accommodate at least
1598 * one full receive packet and is similarly rounded up and
1599 * expressed in KB. */
1600 pba
= rd32(E1000_PBA
);
1601 /* upper 16 bits has Tx packet buffer allocation size in KB */
1602 tx_space
= pba
>> 16;
1603 /* lower 16 bits has Rx packet buffer allocation size in KB */
1605 /* the tx fifo also stores 16 bytes of information about the tx
1606 * but don't include ethernet FCS because hardware appends it */
1607 min_tx_space
= (adapter
->max_frame_size
+
1608 sizeof(union e1000_adv_tx_desc
) -
1610 min_tx_space
= ALIGN(min_tx_space
, 1024);
1611 min_tx_space
>>= 10;
1612 /* software strips receive CRC, so leave room for it */
1613 min_rx_space
= adapter
->max_frame_size
;
1614 min_rx_space
= ALIGN(min_rx_space
, 1024);
1615 min_rx_space
>>= 10;
1617 /* If current Tx allocation is less than the min Tx FIFO size,
1618 * and the min Tx FIFO size is less than the current Rx FIFO
1619 * allocation, take space away from current Rx allocation */
1620 if (tx_space
< min_tx_space
&&
1621 ((min_tx_space
- tx_space
) < pba
)) {
1622 pba
= pba
- (min_tx_space
- tx_space
);
1624 /* if short on rx space, rx wins and must trump tx
1626 if (pba
< min_rx_space
)
1629 wr32(E1000_PBA
, pba
);
1632 /* flow control settings */
1633 /* The high water mark must be low enough to fit one full frame
1634 * (or the size used for early receive) above it in the Rx FIFO.
1635 * Set it to the lower of:
1636 * - 90% of the Rx FIFO size, or
1637 * - the full Rx FIFO size minus one full frame */
1638 hwm
= min(((pba
<< 10) * 9 / 10),
1639 ((pba
<< 10) - 2 * adapter
->max_frame_size
));
1641 fc
->high_water
= hwm
& 0xFFF0; /* 16-byte granularity */
1642 fc
->low_water
= fc
->high_water
- 16;
1643 fc
->pause_time
= 0xFFFF;
1645 fc
->current_mode
= fc
->requested_mode
;
1647 /* disable receive for all VFs and wait one second */
1648 if (adapter
->vfs_allocated_count
) {
1650 for (i
= 0 ; i
< adapter
->vfs_allocated_count
; i
++)
1651 adapter
->vf_data
[i
].flags
= 0;
1653 /* ping all the active vfs to let them know we are going down */
1654 igb_ping_all_vfs(adapter
);
1656 /* disable transmits and receives */
1657 wr32(E1000_VFRE
, 0);
1658 wr32(E1000_VFTE
, 0);
1661 /* Allow time for pending master requests to run */
1662 hw
->mac
.ops
.reset_hw(hw
);
1665 if (hw
->mac
.ops
.init_hw(hw
))
1666 dev_err(&pdev
->dev
, "Hardware Error\n");
1668 if (hw
->mac
.type
== e1000_82580
) {
1669 u32 reg
= rd32(E1000_PCIEMISC
);
1670 wr32(E1000_PCIEMISC
,
1671 reg
& ~E1000_PCIEMISC_LX_DECISION
);
1673 if (!netif_running(adapter
->netdev
))
1674 igb_power_down_link(adapter
);
1676 igb_update_mng_vlan(adapter
);
1678 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1679 wr32(E1000_VET
, ETHERNET_IEEE_VLAN_TYPE
);
1681 igb_get_phy_info(hw
);
1684 static const struct net_device_ops igb_netdev_ops
= {
1685 .ndo_open
= igb_open
,
1686 .ndo_stop
= igb_close
,
1687 .ndo_start_xmit
= igb_xmit_frame_adv
,
1688 .ndo_get_stats
= igb_get_stats
,
1689 .ndo_set_rx_mode
= igb_set_rx_mode
,
1690 .ndo_set_multicast_list
= igb_set_rx_mode
,
1691 .ndo_set_mac_address
= igb_set_mac
,
1692 .ndo_change_mtu
= igb_change_mtu
,
1693 .ndo_do_ioctl
= igb_ioctl
,
1694 .ndo_tx_timeout
= igb_tx_timeout
,
1695 .ndo_validate_addr
= eth_validate_addr
,
1696 .ndo_vlan_rx_register
= igb_vlan_rx_register
,
1697 .ndo_vlan_rx_add_vid
= igb_vlan_rx_add_vid
,
1698 .ndo_vlan_rx_kill_vid
= igb_vlan_rx_kill_vid
,
1699 .ndo_set_vf_mac
= igb_ndo_set_vf_mac
,
1700 .ndo_set_vf_vlan
= igb_ndo_set_vf_vlan
,
1701 .ndo_set_vf_tx_rate
= igb_ndo_set_vf_bw
,
1702 .ndo_get_vf_config
= igb_ndo_get_vf_config
,
1703 #ifdef CONFIG_NET_POLL_CONTROLLER
1704 .ndo_poll_controller
= igb_netpoll
,
1709 * igb_probe - Device Initialization Routine
1710 * @pdev: PCI device information struct
1711 * @ent: entry in igb_pci_tbl
1713 * Returns 0 on success, negative on failure
1715 * igb_probe initializes an adapter identified by a pci_dev structure.
1716 * The OS initialization, configuring of the adapter private structure,
1717 * and a hardware reset occur.
1719 static int __devinit
igb_probe(struct pci_dev
*pdev
,
1720 const struct pci_device_id
*ent
)
1722 struct net_device
*netdev
;
1723 struct igb_adapter
*adapter
;
1724 struct e1000_hw
*hw
;
1725 u16 eeprom_data
= 0;
1726 static int global_quad_port_a
; /* global quad port a indication */
1727 const struct e1000_info
*ei
= igb_info_tbl
[ent
->driver_data
];
1728 unsigned long mmio_start
, mmio_len
;
1729 int err
, pci_using_dac
;
1730 u16 eeprom_apme_mask
= IGB_EEPROM_APME
;
1733 /* Catch broken hardware that put the wrong VF device ID in
1734 * the PCIe SR-IOV capability.
1736 if (pdev
->is_virtfn
) {
1737 WARN(1, KERN_ERR
"%s (%hx:%hx) should not be a VF!\n",
1738 pci_name(pdev
), pdev
->vendor
, pdev
->device
);
1742 err
= pci_enable_device_mem(pdev
);
1747 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64));
1749 err
= dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64));
1753 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
1755 err
= dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(32));
1757 dev_err(&pdev
->dev
, "No usable DMA "
1758 "configuration, aborting\n");
1764 err
= pci_request_selected_regions(pdev
, pci_select_bars(pdev
,
1770 pci_enable_pcie_error_reporting(pdev
);
1772 pci_set_master(pdev
);
1773 pci_save_state(pdev
);
1776 netdev
= alloc_etherdev_mq(sizeof(struct igb_adapter
),
1777 IGB_ABS_MAX_TX_QUEUES
);
1779 goto err_alloc_etherdev
;
1781 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1783 pci_set_drvdata(pdev
, netdev
);
1784 adapter
= netdev_priv(netdev
);
1785 adapter
->netdev
= netdev
;
1786 adapter
->pdev
= pdev
;
1789 adapter
->msg_enable
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
;
1791 mmio_start
= pci_resource_start(pdev
, 0);
1792 mmio_len
= pci_resource_len(pdev
, 0);
1795 hw
->hw_addr
= ioremap(mmio_start
, mmio_len
);
1799 netdev
->netdev_ops
= &igb_netdev_ops
;
1800 igb_set_ethtool_ops(netdev
);
1801 netdev
->watchdog_timeo
= 5 * HZ
;
1803 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
1805 netdev
->mem_start
= mmio_start
;
1806 netdev
->mem_end
= mmio_start
+ mmio_len
;
1808 /* PCI config space info */
1809 hw
->vendor_id
= pdev
->vendor
;
1810 hw
->device_id
= pdev
->device
;
1811 hw
->revision_id
= pdev
->revision
;
1812 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
1813 hw
->subsystem_device_id
= pdev
->subsystem_device
;
1815 /* Copy the default MAC, PHY and NVM function pointers */
1816 memcpy(&hw
->mac
.ops
, ei
->mac_ops
, sizeof(hw
->mac
.ops
));
1817 memcpy(&hw
->phy
.ops
, ei
->phy_ops
, sizeof(hw
->phy
.ops
));
1818 memcpy(&hw
->nvm
.ops
, ei
->nvm_ops
, sizeof(hw
->nvm
.ops
));
1819 /* Initialize skew-specific constants */
1820 err
= ei
->get_invariants(hw
);
1824 /* setup the private structure */
1825 err
= igb_sw_init(adapter
);
1829 igb_get_bus_info_pcie(hw
);
1831 hw
->phy
.autoneg_wait_to_complete
= false;
1833 /* Copper options */
1834 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
1835 hw
->phy
.mdix
= AUTO_ALL_MODES
;
1836 hw
->phy
.disable_polarity_correction
= false;
1837 hw
->phy
.ms_type
= e1000_ms_hw_default
;
1840 if (igb_check_reset_block(hw
))
1841 dev_info(&pdev
->dev
,
1842 "PHY reset is blocked due to SOL/IDER session.\n");
1844 netdev
->features
= NETIF_F_SG
|
1846 NETIF_F_HW_VLAN_TX
|
1847 NETIF_F_HW_VLAN_RX
|
1848 NETIF_F_HW_VLAN_FILTER
;
1850 netdev
->features
|= NETIF_F_IPV6_CSUM
;
1851 netdev
->features
|= NETIF_F_TSO
;
1852 netdev
->features
|= NETIF_F_TSO6
;
1853 netdev
->features
|= NETIF_F_GRO
;
1855 netdev
->vlan_features
|= NETIF_F_TSO
;
1856 netdev
->vlan_features
|= NETIF_F_TSO6
;
1857 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
1858 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
1859 netdev
->vlan_features
|= NETIF_F_SG
;
1862 netdev
->features
|= NETIF_F_HIGHDMA
;
1864 if (hw
->mac
.type
>= e1000_82576
)
1865 netdev
->features
|= NETIF_F_SCTP_CSUM
;
1867 adapter
->en_mng_pt
= igb_enable_mng_pass_thru(hw
);
1869 /* before reading the NVM, reset the controller to put the device in a
1870 * known good starting state */
1871 hw
->mac
.ops
.reset_hw(hw
);
1873 /* make sure the NVM is good */
1874 if (igb_validate_nvm_checksum(hw
) < 0) {
1875 dev_err(&pdev
->dev
, "The NVM Checksum Is Not Valid\n");
1880 /* copy the MAC address out of the NVM */
1881 if (hw
->mac
.ops
.read_mac_addr(hw
))
1882 dev_err(&pdev
->dev
, "NVM Read Error\n");
1884 memcpy(netdev
->dev_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1885 memcpy(netdev
->perm_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1887 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
1888 dev_err(&pdev
->dev
, "Invalid MAC Address\n");
1893 setup_timer(&adapter
->watchdog_timer
, igb_watchdog
,
1894 (unsigned long) adapter
);
1895 setup_timer(&adapter
->phy_info_timer
, igb_update_phy_info
,
1896 (unsigned long) adapter
);
1898 INIT_WORK(&adapter
->reset_task
, igb_reset_task
);
1899 INIT_WORK(&adapter
->watchdog_task
, igb_watchdog_task
);
1901 /* Initialize link properties that are user-changeable */
1902 adapter
->fc_autoneg
= true;
1903 hw
->mac
.autoneg
= true;
1904 hw
->phy
.autoneg_advertised
= 0x2f;
1906 hw
->fc
.requested_mode
= e1000_fc_default
;
1907 hw
->fc
.current_mode
= e1000_fc_default
;
1909 igb_validate_mdi_setting(hw
);
1911 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1912 * enable the ACPI Magic Packet filter
1915 if (hw
->bus
.func
== 0)
1916 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_A
, 1, &eeprom_data
);
1917 else if (hw
->mac
.type
== e1000_82580
)
1918 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_A
+
1919 NVM_82580_LAN_FUNC_OFFSET(hw
->bus
.func
), 1,
1921 else if (hw
->bus
.func
== 1)
1922 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_B
, 1, &eeprom_data
);
1924 if (eeprom_data
& eeprom_apme_mask
)
1925 adapter
->eeprom_wol
|= E1000_WUFC_MAG
;
1927 /* now that we have the eeprom settings, apply the special cases where
1928 * the eeprom may be wrong or the board simply won't support wake on
1929 * lan on a particular port */
1930 switch (pdev
->device
) {
1931 case E1000_DEV_ID_82575GB_QUAD_COPPER
:
1932 adapter
->eeprom_wol
= 0;
1934 case E1000_DEV_ID_82575EB_FIBER_SERDES
:
1935 case E1000_DEV_ID_82576_FIBER
:
1936 case E1000_DEV_ID_82576_SERDES
:
1937 /* Wake events only supported on port A for dual fiber
1938 * regardless of eeprom setting */
1939 if (rd32(E1000_STATUS
) & E1000_STATUS_FUNC_1
)
1940 adapter
->eeprom_wol
= 0;
1942 case E1000_DEV_ID_82576_QUAD_COPPER
:
1943 case E1000_DEV_ID_82576_QUAD_COPPER_ET2
:
1944 /* if quad port adapter, disable WoL on all but port A */
1945 if (global_quad_port_a
!= 0)
1946 adapter
->eeprom_wol
= 0;
1948 adapter
->flags
|= IGB_FLAG_QUAD_PORT_A
;
1949 /* Reset for multiple quad port adapters */
1950 if (++global_quad_port_a
== 4)
1951 global_quad_port_a
= 0;
1955 /* initialize the wol settings based on the eeprom settings */
1956 adapter
->wol
= adapter
->eeprom_wol
;
1957 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
1959 /* reset the hardware with the new settings */
1962 /* let the f/w know that the h/w is now under the control of the
1964 igb_get_hw_control(adapter
);
1966 strcpy(netdev
->name
, "eth%d");
1967 err
= register_netdev(netdev
);
1971 /* carrier off reporting is important to ethtool even BEFORE open */
1972 netif_carrier_off(netdev
);
1974 #ifdef CONFIG_IGB_DCA
1975 if (dca_add_requester(&pdev
->dev
) == 0) {
1976 adapter
->flags
|= IGB_FLAG_DCA_ENABLED
;
1977 dev_info(&pdev
->dev
, "DCA enabled\n");
1978 igb_setup_dca(adapter
);
1982 dev_info(&pdev
->dev
, "Intel(R) Gigabit Ethernet Network Connection\n");
1983 /* print bus type/speed/width info */
1984 dev_info(&pdev
->dev
, "%s: (PCIe:%s:%s) %pM\n",
1986 ((hw
->bus
.speed
== e1000_bus_speed_2500
) ? "2.5Gb/s" :
1987 (hw
->bus
.speed
== e1000_bus_speed_5000
) ? "5.0Gb/s" :
1989 ((hw
->bus
.width
== e1000_bus_width_pcie_x4
) ? "Width x4" :
1990 (hw
->bus
.width
== e1000_bus_width_pcie_x2
) ? "Width x2" :
1991 (hw
->bus
.width
== e1000_bus_width_pcie_x1
) ? "Width x1" :
1995 igb_read_part_num(hw
, &part_num
);
1996 dev_info(&pdev
->dev
, "%s: PBA No: %06x-%03x\n", netdev
->name
,
1997 (part_num
>> 8), (part_num
& 0xff));
1999 dev_info(&pdev
->dev
,
2000 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2001 adapter
->msix_entries
? "MSI-X" :
2002 (adapter
->flags
& IGB_FLAG_HAS_MSI
) ? "MSI" : "legacy",
2003 adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2008 igb_release_hw_control(adapter
);
2010 if (!igb_check_reset_block(hw
))
2013 if (hw
->flash_address
)
2014 iounmap(hw
->flash_address
);
2016 igb_clear_interrupt_scheme(adapter
);
2017 iounmap(hw
->hw_addr
);
2019 free_netdev(netdev
);
2021 pci_release_selected_regions(pdev
,
2022 pci_select_bars(pdev
, IORESOURCE_MEM
));
2025 pci_disable_device(pdev
);
2030 * igb_remove - Device Removal Routine
2031 * @pdev: PCI device information struct
2033 * igb_remove is called by the PCI subsystem to alert the driver
2034 * that it should release a PCI device. The could be caused by a
2035 * Hot-Plug event, or because the driver is going to be removed from
2038 static void __devexit
igb_remove(struct pci_dev
*pdev
)
2040 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2041 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2042 struct e1000_hw
*hw
= &adapter
->hw
;
2044 /* flush_scheduled work may reschedule our watchdog task, so
2045 * explicitly disable watchdog tasks from being rescheduled */
2046 set_bit(__IGB_DOWN
, &adapter
->state
);
2047 del_timer_sync(&adapter
->watchdog_timer
);
2048 del_timer_sync(&adapter
->phy_info_timer
);
2050 flush_scheduled_work();
2052 #ifdef CONFIG_IGB_DCA
2053 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
) {
2054 dev_info(&pdev
->dev
, "DCA disabled\n");
2055 dca_remove_requester(&pdev
->dev
);
2056 adapter
->flags
&= ~IGB_FLAG_DCA_ENABLED
;
2057 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_DISABLE
);
2061 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2062 * would have already happened in close and is redundant. */
2063 igb_release_hw_control(adapter
);
2065 unregister_netdev(netdev
);
2067 igb_clear_interrupt_scheme(adapter
);
2069 #ifdef CONFIG_PCI_IOV
2070 /* reclaim resources allocated to VFs */
2071 if (adapter
->vf_data
) {
2072 /* disable iov and allow time for transactions to clear */
2073 pci_disable_sriov(pdev
);
2076 kfree(adapter
->vf_data
);
2077 adapter
->vf_data
= NULL
;
2078 wr32(E1000_IOVCTL
, E1000_IOVCTL_REUSE_VFQ
);
2080 dev_info(&pdev
->dev
, "IOV Disabled\n");
2084 iounmap(hw
->hw_addr
);
2085 if (hw
->flash_address
)
2086 iounmap(hw
->flash_address
);
2087 pci_release_selected_regions(pdev
,
2088 pci_select_bars(pdev
, IORESOURCE_MEM
));
2090 free_netdev(netdev
);
2092 pci_disable_pcie_error_reporting(pdev
);
2094 pci_disable_device(pdev
);
2098 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2099 * @adapter: board private structure to initialize
2101 * This function initializes the vf specific data storage and then attempts to
2102 * allocate the VFs. The reason for ordering it this way is because it is much
2103 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2104 * the memory for the VFs.
2106 static void __devinit
igb_probe_vfs(struct igb_adapter
* adapter
)
2108 #ifdef CONFIG_PCI_IOV
2109 struct pci_dev
*pdev
= adapter
->pdev
;
2111 if (adapter
->vfs_allocated_count
) {
2112 adapter
->vf_data
= kcalloc(adapter
->vfs_allocated_count
,
2113 sizeof(struct vf_data_storage
),
2115 /* if allocation failed then we do not support SR-IOV */
2116 if (!adapter
->vf_data
) {
2117 adapter
->vfs_allocated_count
= 0;
2118 dev_err(&pdev
->dev
, "Unable to allocate memory for VF "
2123 if (pci_enable_sriov(pdev
, adapter
->vfs_allocated_count
)) {
2124 kfree(adapter
->vf_data
);
2125 adapter
->vf_data
= NULL
;
2126 #endif /* CONFIG_PCI_IOV */
2127 adapter
->vfs_allocated_count
= 0;
2128 #ifdef CONFIG_PCI_IOV
2130 unsigned char mac_addr
[ETH_ALEN
];
2132 dev_info(&pdev
->dev
, "%d vfs allocated\n",
2133 adapter
->vfs_allocated_count
);
2134 for (i
= 0; i
< adapter
->vfs_allocated_count
; i
++) {
2135 random_ether_addr(mac_addr
);
2136 igb_set_vf_mac(adapter
, i
, mac_addr
);
2139 #endif /* CONFIG_PCI_IOV */
2144 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2145 * @adapter: board private structure to initialize
2147 * igb_init_hw_timer initializes the function pointer and values for the hw
2148 * timer found in hardware.
2150 static void igb_init_hw_timer(struct igb_adapter
*adapter
)
2152 struct e1000_hw
*hw
= &adapter
->hw
;
2154 switch (hw
->mac
.type
) {
2157 memset(&adapter
->cycles
, 0, sizeof(adapter
->cycles
));
2158 adapter
->cycles
.read
= igb_read_clock
;
2159 adapter
->cycles
.mask
= CLOCKSOURCE_MASK(64);
2160 adapter
->cycles
.mult
= 1;
2162 * The 82580 timesync updates the system timer every 8ns by 8ns
2163 * and the value cannot be shifted. Instead we need to shift
2164 * the registers to generate a 64bit timer value. As a result
2165 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2166 * 24 in order to generate a larger value for synchronization.
2168 adapter
->cycles
.shift
= IGB_82580_TSYNC_SHIFT
;
2169 /* disable system timer temporarily by setting bit 31 */
2170 wr32(E1000_TSAUXC
, 0x80000000);
2173 /* Set registers so that rollover occurs soon to test this. */
2174 wr32(E1000_SYSTIMR
, 0x00000000);
2175 wr32(E1000_SYSTIML
, 0x80000000);
2176 wr32(E1000_SYSTIMH
, 0x000000FF);
2179 /* enable system timer by clearing bit 31 */
2180 wr32(E1000_TSAUXC
, 0x0);
2183 timecounter_init(&adapter
->clock
,
2185 ktime_to_ns(ktime_get_real()));
2187 * Synchronize our NIC clock against system wall clock. NIC
2188 * time stamp reading requires ~3us per sample, each sample
2189 * was pretty stable even under load => only require 10
2190 * samples for each offset comparison.
2192 memset(&adapter
->compare
, 0, sizeof(adapter
->compare
));
2193 adapter
->compare
.source
= &adapter
->clock
;
2194 adapter
->compare
.target
= ktime_get_real
;
2195 adapter
->compare
.num_samples
= 10;
2196 timecompare_update(&adapter
->compare
, 0);
2200 * Initialize hardware timer: we keep it running just in case
2201 * that some program needs it later on.
2203 memset(&adapter
->cycles
, 0, sizeof(adapter
->cycles
));
2204 adapter
->cycles
.read
= igb_read_clock
;
2205 adapter
->cycles
.mask
= CLOCKSOURCE_MASK(64);
2206 adapter
->cycles
.mult
= 1;
2208 * Scale the NIC clock cycle by a large factor so that
2209 * relatively small clock corrections can be added or
2210 * substracted at each clock tick. The drawbacks of a large
2211 * factor are a) that the clock register overflows more quickly
2212 * (not such a big deal) and b) that the increment per tick has
2213 * to fit into 24 bits. As a result we need to use a shift of
2214 * 19 so we can fit a value of 16 into the TIMINCA register.
2216 adapter
->cycles
.shift
= IGB_82576_TSYNC_SHIFT
;
2218 (1 << E1000_TIMINCA_16NS_SHIFT
) |
2219 (16 << IGB_82576_TSYNC_SHIFT
));
2221 /* Set registers so that rollover occurs soon to test this. */
2222 wr32(E1000_SYSTIML
, 0x00000000);
2223 wr32(E1000_SYSTIMH
, 0xFF800000);
2226 timecounter_init(&adapter
->clock
,
2228 ktime_to_ns(ktime_get_real()));
2230 * Synchronize our NIC clock against system wall clock. NIC
2231 * time stamp reading requires ~3us per sample, each sample
2232 * was pretty stable even under load => only require 10
2233 * samples for each offset comparison.
2235 memset(&adapter
->compare
, 0, sizeof(adapter
->compare
));
2236 adapter
->compare
.source
= &adapter
->clock
;
2237 adapter
->compare
.target
= ktime_get_real
;
2238 adapter
->compare
.num_samples
= 10;
2239 timecompare_update(&adapter
->compare
, 0);
2242 /* 82575 does not support timesync */
2250 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2251 * @adapter: board private structure to initialize
2253 * igb_sw_init initializes the Adapter private data structure.
2254 * Fields are initialized based on PCI device information and
2255 * OS network device settings (MTU size).
2257 static int __devinit
igb_sw_init(struct igb_adapter
*adapter
)
2259 struct e1000_hw
*hw
= &adapter
->hw
;
2260 struct net_device
*netdev
= adapter
->netdev
;
2261 struct pci_dev
*pdev
= adapter
->pdev
;
2263 pci_read_config_word(pdev
, PCI_COMMAND
, &hw
->bus
.pci_cmd_word
);
2265 adapter
->tx_ring_count
= IGB_DEFAULT_TXD
;
2266 adapter
->rx_ring_count
= IGB_DEFAULT_RXD
;
2267 adapter
->rx_itr_setting
= IGB_DEFAULT_ITR
;
2268 adapter
->tx_itr_setting
= IGB_DEFAULT_ITR
;
2270 adapter
->max_frame_size
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2271 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
2273 #ifdef CONFIG_PCI_IOV
2274 if (hw
->mac
.type
== e1000_82576
)
2275 adapter
->vfs_allocated_count
= (max_vfs
> 7) ? 7 : max_vfs
;
2277 #endif /* CONFIG_PCI_IOV */
2278 adapter
->rss_queues
= min_t(u32
, IGB_MAX_RX_QUEUES
, num_online_cpus());
2281 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2282 * then we should combine the queues into a queue pair in order to
2283 * conserve interrupts due to limited supply
2285 if ((adapter
->rss_queues
> 4) ||
2286 ((adapter
->rss_queues
> 1) && (adapter
->vfs_allocated_count
> 6)))
2287 adapter
->flags
|= IGB_FLAG_QUEUE_PAIRS
;
2289 /* This call may decrease the number of queues */
2290 if (igb_init_interrupt_scheme(adapter
)) {
2291 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
2295 igb_init_hw_timer(adapter
);
2296 igb_probe_vfs(adapter
);
2298 /* Explicitly disable IRQ since the NIC can be in any state. */
2299 igb_irq_disable(adapter
);
2301 set_bit(__IGB_DOWN
, &adapter
->state
);
2306 * igb_open - Called when a network interface is made active
2307 * @netdev: network interface device structure
2309 * Returns 0 on success, negative value on failure
2311 * The open entry point is called when a network interface is made
2312 * active by the system (IFF_UP). At this point all resources needed
2313 * for transmit and receive operations are allocated, the interrupt
2314 * handler is registered with the OS, the watchdog timer is started,
2315 * and the stack is notified that the interface is ready.
2317 static int igb_open(struct net_device
*netdev
)
2319 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2320 struct e1000_hw
*hw
= &adapter
->hw
;
2324 /* disallow open during test */
2325 if (test_bit(__IGB_TESTING
, &adapter
->state
))
2328 netif_carrier_off(netdev
);
2330 /* allocate transmit descriptors */
2331 err
= igb_setup_all_tx_resources(adapter
);
2335 /* allocate receive descriptors */
2336 err
= igb_setup_all_rx_resources(adapter
);
2340 igb_power_up_link(adapter
);
2342 /* before we allocate an interrupt, we must be ready to handle it.
2343 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2344 * as soon as we call pci_request_irq, so we have to setup our
2345 * clean_rx handler before we do so. */
2346 igb_configure(adapter
);
2348 err
= igb_request_irq(adapter
);
2352 /* From here on the code is the same as igb_up() */
2353 clear_bit(__IGB_DOWN
, &adapter
->state
);
2355 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
2356 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
2357 napi_enable(&q_vector
->napi
);
2360 /* Clear any pending interrupts. */
2363 igb_irq_enable(adapter
);
2365 /* notify VFs that reset has been completed */
2366 if (adapter
->vfs_allocated_count
) {
2367 u32 reg_data
= rd32(E1000_CTRL_EXT
);
2368 reg_data
|= E1000_CTRL_EXT_PFRSTD
;
2369 wr32(E1000_CTRL_EXT
, reg_data
);
2372 netif_tx_start_all_queues(netdev
);
2374 /* start the watchdog. */
2375 hw
->mac
.get_link_status
= 1;
2376 schedule_work(&adapter
->watchdog_task
);
2381 igb_release_hw_control(adapter
);
2382 igb_power_down_link(adapter
);
2383 igb_free_all_rx_resources(adapter
);
2385 igb_free_all_tx_resources(adapter
);
2393 * igb_close - Disables a network interface
2394 * @netdev: network interface device structure
2396 * Returns 0, this is not allowed to fail
2398 * The close entry point is called when an interface is de-activated
2399 * by the OS. The hardware is still under the driver's control, but
2400 * needs to be disabled. A global MAC reset is issued to stop the
2401 * hardware, and all transmit and receive resources are freed.
2403 static int igb_close(struct net_device
*netdev
)
2405 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2407 WARN_ON(test_bit(__IGB_RESETTING
, &adapter
->state
));
2410 igb_free_irq(adapter
);
2412 igb_free_all_tx_resources(adapter
);
2413 igb_free_all_rx_resources(adapter
);
2419 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2420 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2422 * Return 0 on success, negative on failure
2424 int igb_setup_tx_resources(struct igb_ring
*tx_ring
)
2426 struct device
*dev
= tx_ring
->dev
;
2429 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
2430 tx_ring
->buffer_info
= vmalloc(size
);
2431 if (!tx_ring
->buffer_info
)
2433 memset(tx_ring
->buffer_info
, 0, size
);
2435 /* round up to nearest 4K */
2436 tx_ring
->size
= tx_ring
->count
* sizeof(union e1000_adv_tx_desc
);
2437 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2439 tx_ring
->desc
= dma_alloc_coherent(dev
,
2447 tx_ring
->next_to_use
= 0;
2448 tx_ring
->next_to_clean
= 0;
2452 vfree(tx_ring
->buffer_info
);
2454 "Unable to allocate memory for the transmit descriptor ring\n");
2459 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2460 * (Descriptors) for all queues
2461 * @adapter: board private structure
2463 * Return 0 on success, negative on failure
2465 static int igb_setup_all_tx_resources(struct igb_adapter
*adapter
)
2467 struct pci_dev
*pdev
= adapter
->pdev
;
2470 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2471 err
= igb_setup_tx_resources(adapter
->tx_ring
[i
]);
2474 "Allocation for Tx Queue %u failed\n", i
);
2475 for (i
--; i
>= 0; i
--)
2476 igb_free_tx_resources(adapter
->tx_ring
[i
]);
2481 for (i
= 0; i
< IGB_ABS_MAX_TX_QUEUES
; i
++) {
2482 int r_idx
= i
% adapter
->num_tx_queues
;
2483 adapter
->multi_tx_table
[i
] = adapter
->tx_ring
[r_idx
];
2489 * igb_setup_tctl - configure the transmit control registers
2490 * @adapter: Board private structure
2492 void igb_setup_tctl(struct igb_adapter
*adapter
)
2494 struct e1000_hw
*hw
= &adapter
->hw
;
2497 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2498 wr32(E1000_TXDCTL(0), 0);
2500 /* Program the Transmit Control Register */
2501 tctl
= rd32(E1000_TCTL
);
2502 tctl
&= ~E1000_TCTL_CT
;
2503 tctl
|= E1000_TCTL_PSP
| E1000_TCTL_RTLC
|
2504 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
2506 igb_config_collision_dist(hw
);
2508 /* Enable transmits */
2509 tctl
|= E1000_TCTL_EN
;
2511 wr32(E1000_TCTL
, tctl
);
2515 * igb_configure_tx_ring - Configure transmit ring after Reset
2516 * @adapter: board private structure
2517 * @ring: tx ring to configure
2519 * Configure a transmit ring after a reset.
2521 void igb_configure_tx_ring(struct igb_adapter
*adapter
,
2522 struct igb_ring
*ring
)
2524 struct e1000_hw
*hw
= &adapter
->hw
;
2526 u64 tdba
= ring
->dma
;
2527 int reg_idx
= ring
->reg_idx
;
2529 /* disable the queue */
2530 txdctl
= rd32(E1000_TXDCTL(reg_idx
));
2531 wr32(E1000_TXDCTL(reg_idx
),
2532 txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
2536 wr32(E1000_TDLEN(reg_idx
),
2537 ring
->count
* sizeof(union e1000_adv_tx_desc
));
2538 wr32(E1000_TDBAL(reg_idx
),
2539 tdba
& 0x00000000ffffffffULL
);
2540 wr32(E1000_TDBAH(reg_idx
), tdba
>> 32);
2542 ring
->head
= hw
->hw_addr
+ E1000_TDH(reg_idx
);
2543 ring
->tail
= hw
->hw_addr
+ E1000_TDT(reg_idx
);
2544 writel(0, ring
->head
);
2545 writel(0, ring
->tail
);
2547 txdctl
|= IGB_TX_PTHRESH
;
2548 txdctl
|= IGB_TX_HTHRESH
<< 8;
2549 txdctl
|= IGB_TX_WTHRESH
<< 16;
2551 txdctl
|= E1000_TXDCTL_QUEUE_ENABLE
;
2552 wr32(E1000_TXDCTL(reg_idx
), txdctl
);
2556 * igb_configure_tx - Configure transmit Unit after Reset
2557 * @adapter: board private structure
2559 * Configure the Tx unit of the MAC after a reset.
2561 static void igb_configure_tx(struct igb_adapter
*adapter
)
2565 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2566 igb_configure_tx_ring(adapter
, adapter
->tx_ring
[i
]);
2570 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2571 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2573 * Returns 0 on success, negative on failure
2575 int igb_setup_rx_resources(struct igb_ring
*rx_ring
)
2577 struct device
*dev
= rx_ring
->dev
;
2580 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
2581 rx_ring
->buffer_info
= vmalloc(size
);
2582 if (!rx_ring
->buffer_info
)
2584 memset(rx_ring
->buffer_info
, 0, size
);
2586 desc_len
= sizeof(union e1000_adv_rx_desc
);
2588 /* Round up to nearest 4K */
2589 rx_ring
->size
= rx_ring
->count
* desc_len
;
2590 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2592 rx_ring
->desc
= dma_alloc_coherent(dev
,
2600 rx_ring
->next_to_clean
= 0;
2601 rx_ring
->next_to_use
= 0;
2606 vfree(rx_ring
->buffer_info
);
2607 rx_ring
->buffer_info
= NULL
;
2608 dev_err(dev
, "Unable to allocate memory for the receive descriptor"
2614 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2615 * (Descriptors) for all queues
2616 * @adapter: board private structure
2618 * Return 0 on success, negative on failure
2620 static int igb_setup_all_rx_resources(struct igb_adapter
*adapter
)
2622 struct pci_dev
*pdev
= adapter
->pdev
;
2625 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2626 err
= igb_setup_rx_resources(adapter
->rx_ring
[i
]);
2629 "Allocation for Rx Queue %u failed\n", i
);
2630 for (i
--; i
>= 0; i
--)
2631 igb_free_rx_resources(adapter
->rx_ring
[i
]);
2640 * igb_setup_mrqc - configure the multiple receive queue control registers
2641 * @adapter: Board private structure
2643 static void igb_setup_mrqc(struct igb_adapter
*adapter
)
2645 struct e1000_hw
*hw
= &adapter
->hw
;
2647 u32 j
, num_rx_queues
, shift
= 0, shift2
= 0;
2652 static const u8 rsshash
[40] = {
2653 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2654 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2655 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2656 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2658 /* Fill out hash function seeds */
2659 for (j
= 0; j
< 10; j
++) {
2660 u32 rsskey
= rsshash
[(j
* 4)];
2661 rsskey
|= rsshash
[(j
* 4) + 1] << 8;
2662 rsskey
|= rsshash
[(j
* 4) + 2] << 16;
2663 rsskey
|= rsshash
[(j
* 4) + 3] << 24;
2664 array_wr32(E1000_RSSRK(0), j
, rsskey
);
2667 num_rx_queues
= adapter
->rss_queues
;
2669 if (adapter
->vfs_allocated_count
) {
2670 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2671 switch (hw
->mac
.type
) {
2688 if (hw
->mac
.type
== e1000_82575
)
2692 for (j
= 0; j
< (32 * 4); j
++) {
2693 reta
.bytes
[j
& 3] = (j
% num_rx_queues
) << shift
;
2695 reta
.bytes
[j
& 3] |= num_rx_queues
<< shift2
;
2697 wr32(E1000_RETA(j
>> 2), reta
.dword
);
2701 * Disable raw packet checksumming so that RSS hash is placed in
2702 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2703 * offloads as they are enabled by default
2705 rxcsum
= rd32(E1000_RXCSUM
);
2706 rxcsum
|= E1000_RXCSUM_PCSD
;
2708 if (adapter
->hw
.mac
.type
>= e1000_82576
)
2709 /* Enable Receive Checksum Offload for SCTP */
2710 rxcsum
|= E1000_RXCSUM_CRCOFL
;
2712 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2713 wr32(E1000_RXCSUM
, rxcsum
);
2715 /* If VMDq is enabled then we set the appropriate mode for that, else
2716 * we default to RSS so that an RSS hash is calculated per packet even
2717 * if we are only using one queue */
2718 if (adapter
->vfs_allocated_count
) {
2719 if (hw
->mac
.type
> e1000_82575
) {
2720 /* Set the default pool for the PF's first queue */
2721 u32 vtctl
= rd32(E1000_VT_CTL
);
2722 vtctl
&= ~(E1000_VT_CTL_DEFAULT_POOL_MASK
|
2723 E1000_VT_CTL_DISABLE_DEF_POOL
);
2724 vtctl
|= adapter
->vfs_allocated_count
<<
2725 E1000_VT_CTL_DEFAULT_POOL_SHIFT
;
2726 wr32(E1000_VT_CTL
, vtctl
);
2728 if (adapter
->rss_queues
> 1)
2729 mrqc
= E1000_MRQC_ENABLE_VMDQ_RSS_2Q
;
2731 mrqc
= E1000_MRQC_ENABLE_VMDQ
;
2733 mrqc
= E1000_MRQC_ENABLE_RSS_4Q
;
2735 igb_vmm_control(adapter
);
2738 * Generate RSS hash based on TCP port numbers and/or
2739 * IPv4/v6 src and dst addresses since UDP cannot be
2740 * hashed reliably due to IP fragmentation
2742 mrqc
|= E1000_MRQC_RSS_FIELD_IPV4
|
2743 E1000_MRQC_RSS_FIELD_IPV4_TCP
|
2744 E1000_MRQC_RSS_FIELD_IPV6
|
2745 E1000_MRQC_RSS_FIELD_IPV6_TCP
|
2746 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX
;
2748 wr32(E1000_MRQC
, mrqc
);
2752 * igb_setup_rctl - configure the receive control registers
2753 * @adapter: Board private structure
2755 void igb_setup_rctl(struct igb_adapter
*adapter
)
2757 struct e1000_hw
*hw
= &adapter
->hw
;
2760 rctl
= rd32(E1000_RCTL
);
2762 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
2763 rctl
&= ~(E1000_RCTL_LBM_TCVR
| E1000_RCTL_LBM_MAC
);
2765 rctl
|= E1000_RCTL_EN
| E1000_RCTL_BAM
| E1000_RCTL_RDMTS_HALF
|
2766 (hw
->mac
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
2769 * enable stripping of CRC. It's unlikely this will break BMC
2770 * redirection as it did with e1000. Newer features require
2771 * that the HW strips the CRC.
2773 rctl
|= E1000_RCTL_SECRC
;
2775 /* disable store bad packets and clear size bits. */
2776 rctl
&= ~(E1000_RCTL_SBP
| E1000_RCTL_SZ_256
);
2778 /* enable LPE to prevent packets larger than max_frame_size */
2779 rctl
|= E1000_RCTL_LPE
;
2781 /* disable queue 0 to prevent tail write w/o re-config */
2782 wr32(E1000_RXDCTL(0), 0);
2784 /* Attention!!! For SR-IOV PF driver operations you must enable
2785 * queue drop for all VF and PF queues to prevent head of line blocking
2786 * if an un-trusted VF does not provide descriptors to hardware.
2788 if (adapter
->vfs_allocated_count
) {
2789 /* set all queue drop enable bits */
2790 wr32(E1000_QDE
, ALL_QUEUES
);
2793 wr32(E1000_RCTL
, rctl
);
2796 static inline int igb_set_vf_rlpml(struct igb_adapter
*adapter
, int size
,
2799 struct e1000_hw
*hw
= &adapter
->hw
;
2802 /* if it isn't the PF check to see if VFs are enabled and
2803 * increase the size to support vlan tags */
2804 if (vfn
< adapter
->vfs_allocated_count
&&
2805 adapter
->vf_data
[vfn
].vlans_enabled
)
2806 size
+= VLAN_TAG_SIZE
;
2808 vmolr
= rd32(E1000_VMOLR(vfn
));
2809 vmolr
&= ~E1000_VMOLR_RLPML_MASK
;
2810 vmolr
|= size
| E1000_VMOLR_LPE
;
2811 wr32(E1000_VMOLR(vfn
), vmolr
);
2817 * igb_rlpml_set - set maximum receive packet size
2818 * @adapter: board private structure
2820 * Configure maximum receivable packet size.
2822 static void igb_rlpml_set(struct igb_adapter
*adapter
)
2824 u32 max_frame_size
= adapter
->max_frame_size
;
2825 struct e1000_hw
*hw
= &adapter
->hw
;
2826 u16 pf_id
= adapter
->vfs_allocated_count
;
2829 max_frame_size
+= VLAN_TAG_SIZE
;
2831 /* if vfs are enabled we set RLPML to the largest possible request
2832 * size and set the VMOLR RLPML to the size we need */
2834 igb_set_vf_rlpml(adapter
, max_frame_size
, pf_id
);
2835 max_frame_size
= MAX_JUMBO_FRAME_SIZE
;
2838 wr32(E1000_RLPML
, max_frame_size
);
2841 static inline void igb_set_vmolr(struct igb_adapter
*adapter
,
2844 struct e1000_hw
*hw
= &adapter
->hw
;
2848 * This register exists only on 82576 and newer so if we are older then
2849 * we should exit and do nothing
2851 if (hw
->mac
.type
< e1000_82576
)
2854 vmolr
= rd32(E1000_VMOLR(vfn
));
2855 vmolr
|= E1000_VMOLR_STRVLAN
; /* Strip vlan tags */
2857 vmolr
|= E1000_VMOLR_AUPE
; /* Accept untagged packets */
2859 vmolr
&= ~(E1000_VMOLR_AUPE
); /* Tagged packets ONLY */
2861 /* clear all bits that might not be set */
2862 vmolr
&= ~(E1000_VMOLR_BAM
| E1000_VMOLR_RSSE
);
2864 if (adapter
->rss_queues
> 1 && vfn
== adapter
->vfs_allocated_count
)
2865 vmolr
|= E1000_VMOLR_RSSE
; /* enable RSS */
2867 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2870 if (vfn
<= adapter
->vfs_allocated_count
)
2871 vmolr
|= E1000_VMOLR_BAM
; /* Accept broadcast */
2873 wr32(E1000_VMOLR(vfn
), vmolr
);
2877 * igb_configure_rx_ring - Configure a receive ring after Reset
2878 * @adapter: board private structure
2879 * @ring: receive ring to be configured
2881 * Configure the Rx unit of the MAC after a reset.
2883 void igb_configure_rx_ring(struct igb_adapter
*adapter
,
2884 struct igb_ring
*ring
)
2886 struct e1000_hw
*hw
= &adapter
->hw
;
2887 u64 rdba
= ring
->dma
;
2888 int reg_idx
= ring
->reg_idx
;
2891 /* disable the queue */
2892 rxdctl
= rd32(E1000_RXDCTL(reg_idx
));
2893 wr32(E1000_RXDCTL(reg_idx
),
2894 rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
2896 /* Set DMA base address registers */
2897 wr32(E1000_RDBAL(reg_idx
),
2898 rdba
& 0x00000000ffffffffULL
);
2899 wr32(E1000_RDBAH(reg_idx
), rdba
>> 32);
2900 wr32(E1000_RDLEN(reg_idx
),
2901 ring
->count
* sizeof(union e1000_adv_rx_desc
));
2903 /* initialize head and tail */
2904 ring
->head
= hw
->hw_addr
+ E1000_RDH(reg_idx
);
2905 ring
->tail
= hw
->hw_addr
+ E1000_RDT(reg_idx
);
2906 writel(0, ring
->head
);
2907 writel(0, ring
->tail
);
2909 /* set descriptor configuration */
2910 if (ring
->rx_buffer_len
< IGB_RXBUFFER_1024
) {
2911 srrctl
= ALIGN(ring
->rx_buffer_len
, 64) <<
2912 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
2913 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2914 srrctl
|= IGB_RXBUFFER_16384
>>
2915 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2917 srrctl
|= (PAGE_SIZE
/ 2) >>
2918 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2920 srrctl
|= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
2922 srrctl
= ALIGN(ring
->rx_buffer_len
, 1024) >>
2923 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2924 srrctl
|= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
;
2926 if (hw
->mac
.type
== e1000_82580
)
2927 srrctl
|= E1000_SRRCTL_TIMESTAMP
;
2928 /* Only set Drop Enable if we are supporting multiple queues */
2929 if (adapter
->vfs_allocated_count
|| adapter
->num_rx_queues
> 1)
2930 srrctl
|= E1000_SRRCTL_DROP_EN
;
2932 wr32(E1000_SRRCTL(reg_idx
), srrctl
);
2934 /* set filtering for VMDQ pools */
2935 igb_set_vmolr(adapter
, reg_idx
& 0x7, true);
2937 /* enable receive descriptor fetching */
2938 rxdctl
= rd32(E1000_RXDCTL(reg_idx
));
2939 rxdctl
|= E1000_RXDCTL_QUEUE_ENABLE
;
2940 rxdctl
&= 0xFFF00000;
2941 rxdctl
|= IGB_RX_PTHRESH
;
2942 rxdctl
|= IGB_RX_HTHRESH
<< 8;
2943 rxdctl
|= IGB_RX_WTHRESH
<< 16;
2944 wr32(E1000_RXDCTL(reg_idx
), rxdctl
);
2948 * igb_configure_rx - Configure receive Unit after Reset
2949 * @adapter: board private structure
2951 * Configure the Rx unit of the MAC after a reset.
2953 static void igb_configure_rx(struct igb_adapter
*adapter
)
2957 /* set UTA to appropriate mode */
2958 igb_set_uta(adapter
);
2960 /* set the correct pool for the PF default MAC address in entry 0 */
2961 igb_rar_set_qsel(adapter
, adapter
->hw
.mac
.addr
, 0,
2962 adapter
->vfs_allocated_count
);
2964 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2965 * the Base and Length of the Rx Descriptor Ring */
2966 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2967 igb_configure_rx_ring(adapter
, adapter
->rx_ring
[i
]);
2971 * igb_free_tx_resources - Free Tx Resources per Queue
2972 * @tx_ring: Tx descriptor ring for a specific queue
2974 * Free all transmit software resources
2976 void igb_free_tx_resources(struct igb_ring
*tx_ring
)
2978 igb_clean_tx_ring(tx_ring
);
2980 vfree(tx_ring
->buffer_info
);
2981 tx_ring
->buffer_info
= NULL
;
2983 /* if not set, then don't free */
2987 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
,
2988 tx_ring
->desc
, tx_ring
->dma
);
2990 tx_ring
->desc
= NULL
;
2994 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2995 * @adapter: board private structure
2997 * Free all transmit software resources
2999 static void igb_free_all_tx_resources(struct igb_adapter
*adapter
)
3003 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
3004 igb_free_tx_resources(adapter
->tx_ring
[i
]);
3007 void igb_unmap_and_free_tx_resource(struct igb_ring
*tx_ring
,
3008 struct igb_buffer
*buffer_info
)
3010 if (buffer_info
->dma
) {
3011 if (buffer_info
->mapped_as_page
)
3012 dma_unmap_page(tx_ring
->dev
,
3014 buffer_info
->length
,
3017 dma_unmap_single(tx_ring
->dev
,
3019 buffer_info
->length
,
3021 buffer_info
->dma
= 0;
3023 if (buffer_info
->skb
) {
3024 dev_kfree_skb_any(buffer_info
->skb
);
3025 buffer_info
->skb
= NULL
;
3027 buffer_info
->time_stamp
= 0;
3028 buffer_info
->length
= 0;
3029 buffer_info
->next_to_watch
= 0;
3030 buffer_info
->mapped_as_page
= false;
3034 * igb_clean_tx_ring - Free Tx Buffers
3035 * @tx_ring: ring to be cleaned
3037 static void igb_clean_tx_ring(struct igb_ring
*tx_ring
)
3039 struct igb_buffer
*buffer_info
;
3043 if (!tx_ring
->buffer_info
)
3045 /* Free all the Tx ring sk_buffs */
3047 for (i
= 0; i
< tx_ring
->count
; i
++) {
3048 buffer_info
= &tx_ring
->buffer_info
[i
];
3049 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
3052 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
3053 memset(tx_ring
->buffer_info
, 0, size
);
3055 /* Zero out the descriptor ring */
3056 memset(tx_ring
->desc
, 0, tx_ring
->size
);
3058 tx_ring
->next_to_use
= 0;
3059 tx_ring
->next_to_clean
= 0;
3063 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3064 * @adapter: board private structure
3066 static void igb_clean_all_tx_rings(struct igb_adapter
*adapter
)
3070 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
3071 igb_clean_tx_ring(adapter
->tx_ring
[i
]);
3075 * igb_free_rx_resources - Free Rx Resources
3076 * @rx_ring: ring to clean the resources from
3078 * Free all receive software resources
3080 void igb_free_rx_resources(struct igb_ring
*rx_ring
)
3082 igb_clean_rx_ring(rx_ring
);
3084 vfree(rx_ring
->buffer_info
);
3085 rx_ring
->buffer_info
= NULL
;
3087 /* if not set, then don't free */
3091 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
,
3092 rx_ring
->desc
, rx_ring
->dma
);
3094 rx_ring
->desc
= NULL
;
3098 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3099 * @adapter: board private structure
3101 * Free all receive software resources
3103 static void igb_free_all_rx_resources(struct igb_adapter
*adapter
)
3107 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3108 igb_free_rx_resources(adapter
->rx_ring
[i
]);
3112 * igb_clean_rx_ring - Free Rx Buffers per Queue
3113 * @rx_ring: ring to free buffers from
3115 static void igb_clean_rx_ring(struct igb_ring
*rx_ring
)
3117 struct igb_buffer
*buffer_info
;
3121 if (!rx_ring
->buffer_info
)
3124 /* Free all the Rx ring sk_buffs */
3125 for (i
= 0; i
< rx_ring
->count
; i
++) {
3126 buffer_info
= &rx_ring
->buffer_info
[i
];
3127 if (buffer_info
->dma
) {
3128 dma_unmap_single(rx_ring
->dev
,
3130 rx_ring
->rx_buffer_len
,
3132 buffer_info
->dma
= 0;
3135 if (buffer_info
->skb
) {
3136 dev_kfree_skb(buffer_info
->skb
);
3137 buffer_info
->skb
= NULL
;
3139 if (buffer_info
->page_dma
) {
3140 dma_unmap_page(rx_ring
->dev
,
3141 buffer_info
->page_dma
,
3144 buffer_info
->page_dma
= 0;
3146 if (buffer_info
->page
) {
3147 put_page(buffer_info
->page
);
3148 buffer_info
->page
= NULL
;
3149 buffer_info
->page_offset
= 0;
3153 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
3154 memset(rx_ring
->buffer_info
, 0, size
);
3156 /* Zero out the descriptor ring */
3157 memset(rx_ring
->desc
, 0, rx_ring
->size
);
3159 rx_ring
->next_to_clean
= 0;
3160 rx_ring
->next_to_use
= 0;
3164 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3165 * @adapter: board private structure
3167 static void igb_clean_all_rx_rings(struct igb_adapter
*adapter
)
3171 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3172 igb_clean_rx_ring(adapter
->rx_ring
[i
]);
3176 * igb_set_mac - Change the Ethernet Address of the NIC
3177 * @netdev: network interface device structure
3178 * @p: pointer to an address structure
3180 * Returns 0 on success, negative on failure
3182 static int igb_set_mac(struct net_device
*netdev
, void *p
)
3184 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3185 struct e1000_hw
*hw
= &adapter
->hw
;
3186 struct sockaddr
*addr
= p
;
3188 if (!is_valid_ether_addr(addr
->sa_data
))
3189 return -EADDRNOTAVAIL
;
3191 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3192 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3194 /* set the correct pool for the new PF MAC address in entry 0 */
3195 igb_rar_set_qsel(adapter
, hw
->mac
.addr
, 0,
3196 adapter
->vfs_allocated_count
);
3202 * igb_write_mc_addr_list - write multicast addresses to MTA
3203 * @netdev: network interface device structure
3205 * Writes multicast address list to the MTA hash table.
3206 * Returns: -ENOMEM on failure
3207 * 0 on no addresses written
3208 * X on writing X addresses to MTA
3210 static int igb_write_mc_addr_list(struct net_device
*netdev
)
3212 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3213 struct e1000_hw
*hw
= &adapter
->hw
;
3214 struct netdev_hw_addr
*ha
;
3218 if (netdev_mc_empty(netdev
)) {
3219 /* nothing to program, so clear mc list */
3220 igb_update_mc_addr_list(hw
, NULL
, 0);
3221 igb_restore_vf_multicasts(adapter
);
3225 mta_list
= kzalloc(netdev_mc_count(netdev
) * 6, GFP_ATOMIC
);
3229 /* The shared function expects a packed array of only addresses. */
3231 netdev_for_each_mc_addr(ha
, netdev
)
3232 memcpy(mta_list
+ (i
++ * ETH_ALEN
), ha
->addr
, ETH_ALEN
);
3234 igb_update_mc_addr_list(hw
, mta_list
, i
);
3237 return netdev_mc_count(netdev
);
3241 * igb_write_uc_addr_list - write unicast addresses to RAR table
3242 * @netdev: network interface device structure
3244 * Writes unicast address list to the RAR table.
3245 * Returns: -ENOMEM on failure/insufficient address space
3246 * 0 on no addresses written
3247 * X on writing X addresses to the RAR table
3249 static int igb_write_uc_addr_list(struct net_device
*netdev
)
3251 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3252 struct e1000_hw
*hw
= &adapter
->hw
;
3253 unsigned int vfn
= adapter
->vfs_allocated_count
;
3254 unsigned int rar_entries
= hw
->mac
.rar_entry_count
- (vfn
+ 1);
3257 /* return ENOMEM indicating insufficient memory for addresses */
3258 if (netdev_uc_count(netdev
) > rar_entries
)
3261 if (!netdev_uc_empty(netdev
) && rar_entries
) {
3262 struct netdev_hw_addr
*ha
;
3264 netdev_for_each_uc_addr(ha
, netdev
) {
3267 igb_rar_set_qsel(adapter
, ha
->addr
,
3273 /* write the addresses in reverse order to avoid write combining */
3274 for (; rar_entries
> 0 ; rar_entries
--) {
3275 wr32(E1000_RAH(rar_entries
), 0);
3276 wr32(E1000_RAL(rar_entries
), 0);
3284 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3285 * @netdev: network interface device structure
3287 * The set_rx_mode entry point is called whenever the unicast or multicast
3288 * address lists or the network interface flags are updated. This routine is
3289 * responsible for configuring the hardware for proper unicast, multicast,
3290 * promiscuous mode, and all-multi behavior.
3292 static void igb_set_rx_mode(struct net_device
*netdev
)
3294 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3295 struct e1000_hw
*hw
= &adapter
->hw
;
3296 unsigned int vfn
= adapter
->vfs_allocated_count
;
3297 u32 rctl
, vmolr
= 0;
3300 /* Check for Promiscuous and All Multicast modes */
3301 rctl
= rd32(E1000_RCTL
);
3303 /* clear the effected bits */
3304 rctl
&= ~(E1000_RCTL_UPE
| E1000_RCTL_MPE
| E1000_RCTL_VFE
);
3306 if (netdev
->flags
& IFF_PROMISC
) {
3307 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
3308 vmolr
|= (E1000_VMOLR_ROPE
| E1000_VMOLR_MPME
);
3310 if (netdev
->flags
& IFF_ALLMULTI
) {
3311 rctl
|= E1000_RCTL_MPE
;
3312 vmolr
|= E1000_VMOLR_MPME
;
3315 * Write addresses to the MTA, if the attempt fails
3316 * then we should just turn on promiscous mode so
3317 * that we can at least receive multicast traffic
3319 count
= igb_write_mc_addr_list(netdev
);
3321 rctl
|= E1000_RCTL_MPE
;
3322 vmolr
|= E1000_VMOLR_MPME
;
3324 vmolr
|= E1000_VMOLR_ROMPE
;
3328 * Write addresses to available RAR registers, if there is not
3329 * sufficient space to store all the addresses then enable
3330 * unicast promiscous mode
3332 count
= igb_write_uc_addr_list(netdev
);
3334 rctl
|= E1000_RCTL_UPE
;
3335 vmolr
|= E1000_VMOLR_ROPE
;
3337 rctl
|= E1000_RCTL_VFE
;
3339 wr32(E1000_RCTL
, rctl
);
3342 * In order to support SR-IOV and eventually VMDq it is necessary to set
3343 * the VMOLR to enable the appropriate modes. Without this workaround
3344 * we will have issues with VLAN tag stripping not being done for frames
3345 * that are only arriving because we are the default pool
3347 if (hw
->mac
.type
< e1000_82576
)
3350 vmolr
|= rd32(E1000_VMOLR(vfn
)) &
3351 ~(E1000_VMOLR_ROPE
| E1000_VMOLR_MPME
| E1000_VMOLR_ROMPE
);
3352 wr32(E1000_VMOLR(vfn
), vmolr
);
3353 igb_restore_vf_multicasts(adapter
);
3356 /* Need to wait a few seconds after link up to get diagnostic information from
3358 static void igb_update_phy_info(unsigned long data
)
3360 struct igb_adapter
*adapter
= (struct igb_adapter
*) data
;
3361 igb_get_phy_info(&adapter
->hw
);
3365 * igb_has_link - check shared code for link and determine up/down
3366 * @adapter: pointer to driver private info
3368 bool igb_has_link(struct igb_adapter
*adapter
)
3370 struct e1000_hw
*hw
= &adapter
->hw
;
3371 bool link_active
= false;
3374 /* get_link_status is set on LSC (link status) interrupt or
3375 * rx sequence error interrupt. get_link_status will stay
3376 * false until the e1000_check_for_link establishes link
3377 * for copper adapters ONLY
3379 switch (hw
->phy
.media_type
) {
3380 case e1000_media_type_copper
:
3381 if (hw
->mac
.get_link_status
) {
3382 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
3383 link_active
= !hw
->mac
.get_link_status
;
3388 case e1000_media_type_internal_serdes
:
3389 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
3390 link_active
= hw
->mac
.serdes_has_link
;
3393 case e1000_media_type_unknown
:
3401 * igb_watchdog - Timer Call-back
3402 * @data: pointer to adapter cast into an unsigned long
3404 static void igb_watchdog(unsigned long data
)
3406 struct igb_adapter
*adapter
= (struct igb_adapter
*)data
;
3407 /* Do the rest outside of interrupt context */
3408 schedule_work(&adapter
->watchdog_task
);
3411 static void igb_watchdog_task(struct work_struct
*work
)
3413 struct igb_adapter
*adapter
= container_of(work
,
3416 struct e1000_hw
*hw
= &adapter
->hw
;
3417 struct net_device
*netdev
= adapter
->netdev
;
3421 link
= igb_has_link(adapter
);
3423 if (!netif_carrier_ok(netdev
)) {
3425 hw
->mac
.ops
.get_speed_and_duplex(hw
,
3426 &adapter
->link_speed
,
3427 &adapter
->link_duplex
);
3429 ctrl
= rd32(E1000_CTRL
);
3430 /* Links status message must follow this format */
3431 printk(KERN_INFO
"igb: %s NIC Link is Up %d Mbps %s, "
3432 "Flow Control: %s\n",
3434 adapter
->link_speed
,
3435 adapter
->link_duplex
== FULL_DUPLEX
?
3436 "Full Duplex" : "Half Duplex",
3437 ((ctrl
& E1000_CTRL_TFCE
) &&
3438 (ctrl
& E1000_CTRL_RFCE
)) ? "RX/TX" :
3439 ((ctrl
& E1000_CTRL_RFCE
) ? "RX" :
3440 ((ctrl
& E1000_CTRL_TFCE
) ? "TX" : "None")));
3442 /* adjust timeout factor according to speed/duplex */
3443 adapter
->tx_timeout_factor
= 1;
3444 switch (adapter
->link_speed
) {
3446 adapter
->tx_timeout_factor
= 14;
3449 /* maybe add some timeout factor ? */
3453 netif_carrier_on(netdev
);
3455 igb_ping_all_vfs(adapter
);
3457 /* link state has changed, schedule phy info update */
3458 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3459 mod_timer(&adapter
->phy_info_timer
,
3460 round_jiffies(jiffies
+ 2 * HZ
));
3463 if (netif_carrier_ok(netdev
)) {
3464 adapter
->link_speed
= 0;
3465 adapter
->link_duplex
= 0;
3466 /* Links status message must follow this format */
3467 printk(KERN_INFO
"igb: %s NIC Link is Down\n",
3469 netif_carrier_off(netdev
);
3471 igb_ping_all_vfs(adapter
);
3473 /* link state has changed, schedule phy info update */
3474 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3475 mod_timer(&adapter
->phy_info_timer
,
3476 round_jiffies(jiffies
+ 2 * HZ
));
3480 igb_update_stats(adapter
);
3482 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3483 struct igb_ring
*tx_ring
= adapter
->tx_ring
[i
];
3484 if (!netif_carrier_ok(netdev
)) {
3485 /* We've lost link, so the controller stops DMA,
3486 * but we've got queued Tx work that's never going
3487 * to get done, so reset controller to flush Tx.
3488 * (Do the reset outside of interrupt context). */
3489 if (igb_desc_unused(tx_ring
) + 1 < tx_ring
->count
) {
3490 adapter
->tx_timeout_count
++;
3491 schedule_work(&adapter
->reset_task
);
3492 /* return immediately since reset is imminent */
3497 /* Force detection of hung controller every watchdog period */
3498 tx_ring
->detect_tx_hung
= true;
3501 /* Cause software interrupt to ensure rx ring is cleaned */
3502 if (adapter
->msix_entries
) {
3504 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
3505 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
3506 eics
|= q_vector
->eims_value
;
3508 wr32(E1000_EICS
, eics
);
3510 wr32(E1000_ICS
, E1000_ICS_RXDMT0
);
3513 /* Reset the timer */
3514 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3515 mod_timer(&adapter
->watchdog_timer
,
3516 round_jiffies(jiffies
+ 2 * HZ
));
3519 enum latency_range
{
3523 latency_invalid
= 255
3527 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3529 * Stores a new ITR value based on strictly on packet size. This
3530 * algorithm is less sophisticated than that used in igb_update_itr,
3531 * due to the difficulty of synchronizing statistics across multiple
3532 * receive rings. The divisors and thresholds used by this fuction
3533 * were determined based on theoretical maximum wire speed and testing
3534 * data, in order to minimize response time while increasing bulk
3536 * This functionality is controlled by the InterruptThrottleRate module
3537 * parameter (see igb_param.c)
3538 * NOTE: This function is called only when operating in a multiqueue
3539 * receive environment.
3540 * @q_vector: pointer to q_vector
3542 static void igb_update_ring_itr(struct igb_q_vector
*q_vector
)
3544 int new_val
= q_vector
->itr_val
;
3545 int avg_wire_size
= 0;
3546 struct igb_adapter
*adapter
= q_vector
->adapter
;
3548 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3549 * ints/sec - ITR timer value of 120 ticks.
3551 if (adapter
->link_speed
!= SPEED_1000
) {
3556 if (q_vector
->rx_ring
&& q_vector
->rx_ring
->total_packets
) {
3557 struct igb_ring
*ring
= q_vector
->rx_ring
;
3558 avg_wire_size
= ring
->total_bytes
/ ring
->total_packets
;
3561 if (q_vector
->tx_ring
&& q_vector
->tx_ring
->total_packets
) {
3562 struct igb_ring
*ring
= q_vector
->tx_ring
;
3563 avg_wire_size
= max_t(u32
, avg_wire_size
,
3564 (ring
->total_bytes
/
3565 ring
->total_packets
));
3568 /* if avg_wire_size isn't set no work was done */
3572 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3573 avg_wire_size
+= 24;
3575 /* Don't starve jumbo frames */
3576 avg_wire_size
= min(avg_wire_size
, 3000);
3578 /* Give a little boost to mid-size frames */
3579 if ((avg_wire_size
> 300) && (avg_wire_size
< 1200))
3580 new_val
= avg_wire_size
/ 3;
3582 new_val
= avg_wire_size
/ 2;
3584 /* when in itr mode 3 do not exceed 20K ints/sec */
3585 if (adapter
->rx_itr_setting
== 3 && new_val
< 196)
3589 if (new_val
!= q_vector
->itr_val
) {
3590 q_vector
->itr_val
= new_val
;
3591 q_vector
->set_itr
= 1;
3594 if (q_vector
->rx_ring
) {
3595 q_vector
->rx_ring
->total_bytes
= 0;
3596 q_vector
->rx_ring
->total_packets
= 0;
3598 if (q_vector
->tx_ring
) {
3599 q_vector
->tx_ring
->total_bytes
= 0;
3600 q_vector
->tx_ring
->total_packets
= 0;
3605 * igb_update_itr - update the dynamic ITR value based on statistics
3606 * Stores a new ITR value based on packets and byte
3607 * counts during the last interrupt. The advantage of per interrupt
3608 * computation is faster updates and more accurate ITR for the current
3609 * traffic pattern. Constants in this function were computed
3610 * based on theoretical maximum wire speed and thresholds were set based
3611 * on testing data as well as attempting to minimize response time
3612 * while increasing bulk throughput.
3613 * this functionality is controlled by the InterruptThrottleRate module
3614 * parameter (see igb_param.c)
3615 * NOTE: These calculations are only valid when operating in a single-
3616 * queue environment.
3617 * @adapter: pointer to adapter
3618 * @itr_setting: current q_vector->itr_val
3619 * @packets: the number of packets during this measurement interval
3620 * @bytes: the number of bytes during this measurement interval
3622 static unsigned int igb_update_itr(struct igb_adapter
*adapter
, u16 itr_setting
,
3623 int packets
, int bytes
)
3625 unsigned int retval
= itr_setting
;
3628 goto update_itr_done
;
3630 switch (itr_setting
) {
3631 case lowest_latency
:
3632 /* handle TSO and jumbo frames */
3633 if (bytes
/packets
> 8000)
3634 retval
= bulk_latency
;
3635 else if ((packets
< 5) && (bytes
> 512))
3636 retval
= low_latency
;
3638 case low_latency
: /* 50 usec aka 20000 ints/s */
3639 if (bytes
> 10000) {
3640 /* this if handles the TSO accounting */
3641 if (bytes
/packets
> 8000) {
3642 retval
= bulk_latency
;
3643 } else if ((packets
< 10) || ((bytes
/packets
) > 1200)) {
3644 retval
= bulk_latency
;
3645 } else if ((packets
> 35)) {
3646 retval
= lowest_latency
;
3648 } else if (bytes
/packets
> 2000) {
3649 retval
= bulk_latency
;
3650 } else if (packets
<= 2 && bytes
< 512) {
3651 retval
= lowest_latency
;
3654 case bulk_latency
: /* 250 usec aka 4000 ints/s */
3655 if (bytes
> 25000) {
3657 retval
= low_latency
;
3658 } else if (bytes
< 1500) {
3659 retval
= low_latency
;
3668 static void igb_set_itr(struct igb_adapter
*adapter
)
3670 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
3672 u32 new_itr
= q_vector
->itr_val
;
3674 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3675 if (adapter
->link_speed
!= SPEED_1000
) {
3681 adapter
->rx_itr
= igb_update_itr(adapter
,
3683 q_vector
->rx_ring
->total_packets
,
3684 q_vector
->rx_ring
->total_bytes
);
3686 adapter
->tx_itr
= igb_update_itr(adapter
,
3688 q_vector
->tx_ring
->total_packets
,
3689 q_vector
->tx_ring
->total_bytes
);
3690 current_itr
= max(adapter
->rx_itr
, adapter
->tx_itr
);
3692 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3693 if (adapter
->rx_itr_setting
== 3 && current_itr
== lowest_latency
)
3694 current_itr
= low_latency
;
3696 switch (current_itr
) {
3697 /* counts and packets in update_itr are dependent on these numbers */
3698 case lowest_latency
:
3699 new_itr
= 56; /* aka 70,000 ints/sec */
3702 new_itr
= 196; /* aka 20,000 ints/sec */
3705 new_itr
= 980; /* aka 4,000 ints/sec */
3712 q_vector
->rx_ring
->total_bytes
= 0;
3713 q_vector
->rx_ring
->total_packets
= 0;
3714 q_vector
->tx_ring
->total_bytes
= 0;
3715 q_vector
->tx_ring
->total_packets
= 0;
3717 if (new_itr
!= q_vector
->itr_val
) {
3718 /* this attempts to bias the interrupt rate towards Bulk
3719 * by adding intermediate steps when interrupt rate is
3721 new_itr
= new_itr
> q_vector
->itr_val
?
3722 max((new_itr
* q_vector
->itr_val
) /
3723 (new_itr
+ (q_vector
->itr_val
>> 2)),
3726 /* Don't write the value here; it resets the adapter's
3727 * internal timer, and causes us to delay far longer than
3728 * we should between interrupts. Instead, we write the ITR
3729 * value at the beginning of the next interrupt so the timing
3730 * ends up being correct.
3732 q_vector
->itr_val
= new_itr
;
3733 q_vector
->set_itr
= 1;
3737 #define IGB_TX_FLAGS_CSUM 0x00000001
3738 #define IGB_TX_FLAGS_VLAN 0x00000002
3739 #define IGB_TX_FLAGS_TSO 0x00000004
3740 #define IGB_TX_FLAGS_IPV4 0x00000008
3741 #define IGB_TX_FLAGS_TSTAMP 0x00000010
3742 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3743 #define IGB_TX_FLAGS_VLAN_SHIFT 16
3745 static inline int igb_tso_adv(struct igb_ring
*tx_ring
,
3746 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
3748 struct e1000_adv_tx_context_desc
*context_desc
;
3751 struct igb_buffer
*buffer_info
;
3752 u32 info
= 0, tu_cmd
= 0;
3756 if (skb_header_cloned(skb
)) {
3757 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3762 l4len
= tcp_hdrlen(skb
);
3765 if (skb
->protocol
== htons(ETH_P_IP
)) {
3766 struct iphdr
*iph
= ip_hdr(skb
);
3769 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
3773 } else if (skb_is_gso_v6(skb
)) {
3774 ipv6_hdr(skb
)->payload_len
= 0;
3775 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3776 &ipv6_hdr(skb
)->daddr
,
3780 i
= tx_ring
->next_to_use
;
3782 buffer_info
= &tx_ring
->buffer_info
[i
];
3783 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
3784 /* VLAN MACLEN IPLEN */
3785 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3786 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
3787 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
3788 *hdr_len
+= skb_network_offset(skb
);
3789 info
|= skb_network_header_len(skb
);
3790 *hdr_len
+= skb_network_header_len(skb
);
3791 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
3793 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3794 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
3796 if (skb
->protocol
== htons(ETH_P_IP
))
3797 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
3798 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3800 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
3803 mss_l4len_idx
= (skb_shinfo(skb
)->gso_size
<< E1000_ADVTXD_MSS_SHIFT
);
3804 mss_l4len_idx
|= (l4len
<< E1000_ADVTXD_L4LEN_SHIFT
);
3806 /* For 82575, context index must be unique per ring. */
3807 if (tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
)
3808 mss_l4len_idx
|= tx_ring
->reg_idx
<< 4;
3810 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
3811 context_desc
->seqnum_seed
= 0;
3813 buffer_info
->time_stamp
= jiffies
;
3814 buffer_info
->next_to_watch
= i
;
3815 buffer_info
->dma
= 0;
3817 if (i
== tx_ring
->count
)
3820 tx_ring
->next_to_use
= i
;
3825 static inline bool igb_tx_csum_adv(struct igb_ring
*tx_ring
,
3826 struct sk_buff
*skb
, u32 tx_flags
)
3828 struct e1000_adv_tx_context_desc
*context_desc
;
3829 struct device
*dev
= tx_ring
->dev
;
3830 struct igb_buffer
*buffer_info
;
3831 u32 info
= 0, tu_cmd
= 0;
3834 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
3835 (tx_flags
& IGB_TX_FLAGS_VLAN
)) {
3836 i
= tx_ring
->next_to_use
;
3837 buffer_info
= &tx_ring
->buffer_info
[i
];
3838 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
3840 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3841 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
3843 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
3844 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3845 info
|= skb_network_header_len(skb
);
3847 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
3849 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
3851 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3854 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) {
3855 const struct vlan_ethhdr
*vhdr
=
3856 (const struct vlan_ethhdr
*)skb
->data
;
3858 protocol
= vhdr
->h_vlan_encapsulated_proto
;
3860 protocol
= skb
->protocol
;
3864 case cpu_to_be16(ETH_P_IP
):
3865 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
3866 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3867 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3868 else if (ip_hdr(skb
)->protocol
== IPPROTO_SCTP
)
3869 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_SCTP
;
3871 case cpu_to_be16(ETH_P_IPV6
):
3872 /* XXX what about other V6 headers?? */
3873 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3874 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3875 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_SCTP
)
3876 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_SCTP
;
3879 if (unlikely(net_ratelimit()))
3881 "partial checksum but proto=%x!\n",
3887 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
3888 context_desc
->seqnum_seed
= 0;
3889 if (tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
)
3890 context_desc
->mss_l4len_idx
=
3891 cpu_to_le32(tx_ring
->reg_idx
<< 4);
3893 buffer_info
->time_stamp
= jiffies
;
3894 buffer_info
->next_to_watch
= i
;
3895 buffer_info
->dma
= 0;
3898 if (i
== tx_ring
->count
)
3900 tx_ring
->next_to_use
= i
;
3907 #define IGB_MAX_TXD_PWR 16
3908 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3910 static inline int igb_tx_map_adv(struct igb_ring
*tx_ring
, struct sk_buff
*skb
,
3913 struct igb_buffer
*buffer_info
;
3914 struct device
*dev
= tx_ring
->dev
;
3915 unsigned int hlen
= skb_headlen(skb
);
3916 unsigned int count
= 0, i
;
3918 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
?: 1;
3920 i
= tx_ring
->next_to_use
;
3922 buffer_info
= &tx_ring
->buffer_info
[i
];
3923 BUG_ON(hlen
>= IGB_MAX_DATA_PER_TXD
);
3924 buffer_info
->length
= hlen
;
3925 /* set time_stamp *before* dma to help avoid a possible race */
3926 buffer_info
->time_stamp
= jiffies
;
3927 buffer_info
->next_to_watch
= i
;
3928 buffer_info
->dma
= dma_map_single(dev
, skb
->data
, hlen
,
3930 if (dma_mapping_error(dev
, buffer_info
->dma
))
3933 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
3934 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[f
];
3935 unsigned int len
= frag
->size
;
3939 if (i
== tx_ring
->count
)
3942 buffer_info
= &tx_ring
->buffer_info
[i
];
3943 BUG_ON(len
>= IGB_MAX_DATA_PER_TXD
);
3944 buffer_info
->length
= len
;
3945 buffer_info
->time_stamp
= jiffies
;
3946 buffer_info
->next_to_watch
= i
;
3947 buffer_info
->mapped_as_page
= true;
3948 buffer_info
->dma
= dma_map_page(dev
,
3953 if (dma_mapping_error(dev
, buffer_info
->dma
))
3958 tx_ring
->buffer_info
[i
].skb
= skb
;
3959 tx_ring
->buffer_info
[i
].tx_flags
= skb_shinfo(skb
)->tx_flags
;
3960 /* multiply data chunks by size of headers */
3961 tx_ring
->buffer_info
[i
].bytecount
= ((gso_segs
- 1) * hlen
) + skb
->len
;
3962 tx_ring
->buffer_info
[i
].gso_segs
= gso_segs
;
3963 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
3968 dev_err(dev
, "TX DMA map failed\n");
3970 /* clear timestamp and dma mappings for failed buffer_info mapping */
3971 buffer_info
->dma
= 0;
3972 buffer_info
->time_stamp
= 0;
3973 buffer_info
->length
= 0;
3974 buffer_info
->next_to_watch
= 0;
3975 buffer_info
->mapped_as_page
= false;
3977 /* clear timestamp and dma mappings for remaining portion of packet */
3982 buffer_info
= &tx_ring
->buffer_info
[i
];
3983 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
3989 static inline void igb_tx_queue_adv(struct igb_ring
*tx_ring
,
3990 u32 tx_flags
, int count
, u32 paylen
,
3993 union e1000_adv_tx_desc
*tx_desc
;
3994 struct igb_buffer
*buffer_info
;
3995 u32 olinfo_status
= 0, cmd_type_len
;
3996 unsigned int i
= tx_ring
->next_to_use
;
3998 cmd_type_len
= (E1000_ADVTXD_DTYP_DATA
| E1000_ADVTXD_DCMD_IFCS
|
3999 E1000_ADVTXD_DCMD_DEXT
);
4001 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
4002 cmd_type_len
|= E1000_ADVTXD_DCMD_VLE
;
4004 if (tx_flags
& IGB_TX_FLAGS_TSTAMP
)
4005 cmd_type_len
|= E1000_ADVTXD_MAC_TSTAMP
;
4007 if (tx_flags
& IGB_TX_FLAGS_TSO
) {
4008 cmd_type_len
|= E1000_ADVTXD_DCMD_TSE
;
4010 /* insert tcp checksum */
4011 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
4013 /* insert ip checksum */
4014 if (tx_flags
& IGB_TX_FLAGS_IPV4
)
4015 olinfo_status
|= E1000_TXD_POPTS_IXSM
<< 8;
4017 } else if (tx_flags
& IGB_TX_FLAGS_CSUM
) {
4018 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
4021 if ((tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
) &&
4022 (tx_flags
& (IGB_TX_FLAGS_CSUM
|
4024 IGB_TX_FLAGS_VLAN
)))
4025 olinfo_status
|= tx_ring
->reg_idx
<< 4;
4027 olinfo_status
|= ((paylen
- hdr_len
) << E1000_ADVTXD_PAYLEN_SHIFT
);
4030 buffer_info
= &tx_ring
->buffer_info
[i
];
4031 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
4032 tx_desc
->read
.buffer_addr
= cpu_to_le64(buffer_info
->dma
);
4033 tx_desc
->read
.cmd_type_len
=
4034 cpu_to_le32(cmd_type_len
| buffer_info
->length
);
4035 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
4038 if (i
== tx_ring
->count
)
4040 } while (count
> 0);
4042 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(IGB_ADVTXD_DCMD
);
4043 /* Force memory writes to complete before letting h/w
4044 * know there are new descriptors to fetch. (Only
4045 * applicable for weak-ordered memory model archs,
4046 * such as IA-64). */
4049 tx_ring
->next_to_use
= i
;
4050 writel(i
, tx_ring
->tail
);
4051 /* we need this if more than one processor can write to our tail
4052 * at a time, it syncronizes IO on IA64/Altix systems */
4056 static int __igb_maybe_stop_tx(struct igb_ring
*tx_ring
, int size
)
4058 struct net_device
*netdev
= tx_ring
->netdev
;
4060 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
4062 /* Herbert's original patch had:
4063 * smp_mb__after_netif_stop_queue();
4064 * but since that doesn't exist yet, just open code it. */
4067 /* We need to check again in a case another CPU has just
4068 * made room available. */
4069 if (igb_desc_unused(tx_ring
) < size
)
4073 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
4074 tx_ring
->tx_stats
.restart_queue
++;
4078 static inline int igb_maybe_stop_tx(struct igb_ring
*tx_ring
, int size
)
4080 if (igb_desc_unused(tx_ring
) >= size
)
4082 return __igb_maybe_stop_tx(tx_ring
, size
);
4085 netdev_tx_t
igb_xmit_frame_ring_adv(struct sk_buff
*skb
,
4086 struct igb_ring
*tx_ring
)
4088 struct igb_adapter
*adapter
= netdev_priv(tx_ring
->netdev
);
4094 /* need: 1 descriptor per page,
4095 * + 2 desc gap to keep tail from touching head,
4096 * + 1 desc for skb->data,
4097 * + 1 desc for context descriptor,
4098 * otherwise try next time */
4099 if (igb_maybe_stop_tx(tx_ring
, skb_shinfo(skb
)->nr_frags
+ 4)) {
4100 /* this is a hard error */
4101 return NETDEV_TX_BUSY
;
4104 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) {
4105 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
4106 tx_flags
|= IGB_TX_FLAGS_TSTAMP
;
4109 if (vlan_tx_tag_present(skb
) && adapter
->vlgrp
) {
4110 tx_flags
|= IGB_TX_FLAGS_VLAN
;
4111 tx_flags
|= (vlan_tx_tag_get(skb
) << IGB_TX_FLAGS_VLAN_SHIFT
);
4114 if (skb
->protocol
== htons(ETH_P_IP
))
4115 tx_flags
|= IGB_TX_FLAGS_IPV4
;
4117 first
= tx_ring
->next_to_use
;
4118 if (skb_is_gso(skb
)) {
4119 tso
= igb_tso_adv(tx_ring
, skb
, tx_flags
, &hdr_len
);
4122 dev_kfree_skb_any(skb
);
4123 return NETDEV_TX_OK
;
4128 tx_flags
|= IGB_TX_FLAGS_TSO
;
4129 else if (igb_tx_csum_adv(tx_ring
, skb
, tx_flags
) &&
4130 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
4131 tx_flags
|= IGB_TX_FLAGS_CSUM
;
4134 * count reflects descriptors mapped, if 0 or less then mapping error
4135 * has occured and we need to rewind the descriptor queue
4137 count
= igb_tx_map_adv(tx_ring
, skb
, first
);
4139 dev_kfree_skb_any(skb
);
4140 tx_ring
->buffer_info
[first
].time_stamp
= 0;
4141 tx_ring
->next_to_use
= first
;
4142 return NETDEV_TX_OK
;
4145 igb_tx_queue_adv(tx_ring
, tx_flags
, count
, skb
->len
, hdr_len
);
4147 /* Make sure there is space in the ring for the next send. */
4148 igb_maybe_stop_tx(tx_ring
, MAX_SKB_FRAGS
+ 4);
4150 return NETDEV_TX_OK
;
4153 static netdev_tx_t
igb_xmit_frame_adv(struct sk_buff
*skb
,
4154 struct net_device
*netdev
)
4156 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4157 struct igb_ring
*tx_ring
;
4160 if (test_bit(__IGB_DOWN
, &adapter
->state
)) {
4161 dev_kfree_skb_any(skb
);
4162 return NETDEV_TX_OK
;
4165 if (skb
->len
<= 0) {
4166 dev_kfree_skb_any(skb
);
4167 return NETDEV_TX_OK
;
4170 r_idx
= skb
->queue_mapping
& (IGB_ABS_MAX_TX_QUEUES
- 1);
4171 tx_ring
= adapter
->multi_tx_table
[r_idx
];
4173 /* This goes back to the question of how to logically map a tx queue
4174 * to a flow. Right now, performance is impacted slightly negatively
4175 * if using multiple tx queues. If the stack breaks away from a
4176 * single qdisc implementation, we can look at this again. */
4177 return igb_xmit_frame_ring_adv(skb
, tx_ring
);
4181 * igb_tx_timeout - Respond to a Tx Hang
4182 * @netdev: network interface device structure
4184 static void igb_tx_timeout(struct net_device
*netdev
)
4186 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4187 struct e1000_hw
*hw
= &adapter
->hw
;
4189 /* Do the reset outside of interrupt context */
4190 adapter
->tx_timeout_count
++;
4192 if (hw
->mac
.type
== e1000_82580
)
4193 hw
->dev_spec
._82575
.global_device_reset
= true;
4195 schedule_work(&adapter
->reset_task
);
4197 (adapter
->eims_enable_mask
& ~adapter
->eims_other
));
4200 static void igb_reset_task(struct work_struct
*work
)
4202 struct igb_adapter
*adapter
;
4203 adapter
= container_of(work
, struct igb_adapter
, reset_task
);
4206 netdev_err(adapter
->netdev
, "Reset adapter\n");
4207 igb_reinit_locked(adapter
);
4211 * igb_get_stats - Get System Network Statistics
4212 * @netdev: network interface device structure
4214 * Returns the address of the device statistics structure.
4215 * The statistics are actually updated from the timer callback.
4217 static struct net_device_stats
*igb_get_stats(struct net_device
*netdev
)
4219 /* only return the current stats */
4220 return &netdev
->stats
;
4224 * igb_change_mtu - Change the Maximum Transfer Unit
4225 * @netdev: network interface device structure
4226 * @new_mtu: new value for maximum frame size
4228 * Returns 0 on success, negative on failure
4230 static int igb_change_mtu(struct net_device
*netdev
, int new_mtu
)
4232 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4233 struct pci_dev
*pdev
= adapter
->pdev
;
4234 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
4235 u32 rx_buffer_len
, i
;
4237 if ((new_mtu
< 68) || (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
4238 dev_err(&pdev
->dev
, "Invalid MTU setting\n");
4242 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
4243 dev_err(&pdev
->dev
, "MTU > 9216 not supported.\n");
4247 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
4250 /* igb_down has a dependency on max_frame_size */
4251 adapter
->max_frame_size
= max_frame
;
4253 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
4254 * means we reserve 2 more, this pushes us to allocate from the next
4256 * i.e. RXBUFFER_2048 --> size-4096 slab
4259 if (adapter
->hw
.mac
.type
== e1000_82580
)
4260 max_frame
+= IGB_TS_HDR_LEN
;
4262 if (max_frame
<= IGB_RXBUFFER_1024
)
4263 rx_buffer_len
= IGB_RXBUFFER_1024
;
4264 else if (max_frame
<= MAXIMUM_ETHERNET_VLAN_SIZE
)
4265 rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
4267 rx_buffer_len
= IGB_RXBUFFER_128
;
4269 if ((max_frame
== ETH_FRAME_LEN
+ ETH_FCS_LEN
+ IGB_TS_HDR_LEN
) ||
4270 (max_frame
== MAXIMUM_ETHERNET_VLAN_SIZE
+ IGB_TS_HDR_LEN
))
4271 rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
+ IGB_TS_HDR_LEN
;
4273 if ((adapter
->hw
.mac
.type
== e1000_82580
) &&
4274 (rx_buffer_len
== IGB_RXBUFFER_128
))
4275 rx_buffer_len
+= IGB_RXBUFFER_64
;
4277 if (netif_running(netdev
))
4280 dev_info(&pdev
->dev
, "changing MTU from %d to %d\n",
4281 netdev
->mtu
, new_mtu
);
4282 netdev
->mtu
= new_mtu
;
4284 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
4285 adapter
->rx_ring
[i
]->rx_buffer_len
= rx_buffer_len
;
4287 if (netif_running(netdev
))
4292 clear_bit(__IGB_RESETTING
, &adapter
->state
);
4298 * igb_update_stats - Update the board statistics counters
4299 * @adapter: board private structure
4302 void igb_update_stats(struct igb_adapter
*adapter
)
4304 struct net_device_stats
*net_stats
= igb_get_stats(adapter
->netdev
);
4305 struct e1000_hw
*hw
= &adapter
->hw
;
4306 struct pci_dev
*pdev
= adapter
->pdev
;
4312 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4315 * Prevent stats update while adapter is being reset, or if the pci
4316 * connection is down.
4318 if (adapter
->link_speed
== 0)
4320 if (pci_channel_offline(pdev
))
4325 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
4326 u32 rqdpc_tmp
= rd32(E1000_RQDPC(i
)) & 0x0FFF;
4327 struct igb_ring
*ring
= adapter
->rx_ring
[i
];
4328 ring
->rx_stats
.drops
+= rqdpc_tmp
;
4329 net_stats
->rx_fifo_errors
+= rqdpc_tmp
;
4330 bytes
+= ring
->rx_stats
.bytes
;
4331 packets
+= ring
->rx_stats
.packets
;
4334 net_stats
->rx_bytes
= bytes
;
4335 net_stats
->rx_packets
= packets
;
4339 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
4340 struct igb_ring
*ring
= adapter
->tx_ring
[i
];
4341 bytes
+= ring
->tx_stats
.bytes
;
4342 packets
+= ring
->tx_stats
.packets
;
4344 net_stats
->tx_bytes
= bytes
;
4345 net_stats
->tx_packets
= packets
;
4347 /* read stats registers */
4348 adapter
->stats
.crcerrs
+= rd32(E1000_CRCERRS
);
4349 adapter
->stats
.gprc
+= rd32(E1000_GPRC
);
4350 adapter
->stats
.gorc
+= rd32(E1000_GORCL
);
4351 rd32(E1000_GORCH
); /* clear GORCL */
4352 adapter
->stats
.bprc
+= rd32(E1000_BPRC
);
4353 adapter
->stats
.mprc
+= rd32(E1000_MPRC
);
4354 adapter
->stats
.roc
+= rd32(E1000_ROC
);
4356 adapter
->stats
.prc64
+= rd32(E1000_PRC64
);
4357 adapter
->stats
.prc127
+= rd32(E1000_PRC127
);
4358 adapter
->stats
.prc255
+= rd32(E1000_PRC255
);
4359 adapter
->stats
.prc511
+= rd32(E1000_PRC511
);
4360 adapter
->stats
.prc1023
+= rd32(E1000_PRC1023
);
4361 adapter
->stats
.prc1522
+= rd32(E1000_PRC1522
);
4362 adapter
->stats
.symerrs
+= rd32(E1000_SYMERRS
);
4363 adapter
->stats
.sec
+= rd32(E1000_SEC
);
4365 mpc
= rd32(E1000_MPC
);
4366 adapter
->stats
.mpc
+= mpc
;
4367 net_stats
->rx_fifo_errors
+= mpc
;
4368 adapter
->stats
.scc
+= rd32(E1000_SCC
);
4369 adapter
->stats
.ecol
+= rd32(E1000_ECOL
);
4370 adapter
->stats
.mcc
+= rd32(E1000_MCC
);
4371 adapter
->stats
.latecol
+= rd32(E1000_LATECOL
);
4372 adapter
->stats
.dc
+= rd32(E1000_DC
);
4373 adapter
->stats
.rlec
+= rd32(E1000_RLEC
);
4374 adapter
->stats
.xonrxc
+= rd32(E1000_XONRXC
);
4375 adapter
->stats
.xontxc
+= rd32(E1000_XONTXC
);
4376 adapter
->stats
.xoffrxc
+= rd32(E1000_XOFFRXC
);
4377 adapter
->stats
.xofftxc
+= rd32(E1000_XOFFTXC
);
4378 adapter
->stats
.fcruc
+= rd32(E1000_FCRUC
);
4379 adapter
->stats
.gptc
+= rd32(E1000_GPTC
);
4380 adapter
->stats
.gotc
+= rd32(E1000_GOTCL
);
4381 rd32(E1000_GOTCH
); /* clear GOTCL */
4382 adapter
->stats
.rnbc
+= rd32(E1000_RNBC
);
4383 adapter
->stats
.ruc
+= rd32(E1000_RUC
);
4384 adapter
->stats
.rfc
+= rd32(E1000_RFC
);
4385 adapter
->stats
.rjc
+= rd32(E1000_RJC
);
4386 adapter
->stats
.tor
+= rd32(E1000_TORH
);
4387 adapter
->stats
.tot
+= rd32(E1000_TOTH
);
4388 adapter
->stats
.tpr
+= rd32(E1000_TPR
);
4390 adapter
->stats
.ptc64
+= rd32(E1000_PTC64
);
4391 adapter
->stats
.ptc127
+= rd32(E1000_PTC127
);
4392 adapter
->stats
.ptc255
+= rd32(E1000_PTC255
);
4393 adapter
->stats
.ptc511
+= rd32(E1000_PTC511
);
4394 adapter
->stats
.ptc1023
+= rd32(E1000_PTC1023
);
4395 adapter
->stats
.ptc1522
+= rd32(E1000_PTC1522
);
4397 adapter
->stats
.mptc
+= rd32(E1000_MPTC
);
4398 adapter
->stats
.bptc
+= rd32(E1000_BPTC
);
4400 adapter
->stats
.tpt
+= rd32(E1000_TPT
);
4401 adapter
->stats
.colc
+= rd32(E1000_COLC
);
4403 adapter
->stats
.algnerrc
+= rd32(E1000_ALGNERRC
);
4404 /* read internal phy specific stats */
4405 reg
= rd32(E1000_CTRL_EXT
);
4406 if (!(reg
& E1000_CTRL_EXT_LINK_MODE_MASK
)) {
4407 adapter
->stats
.rxerrc
+= rd32(E1000_RXERRC
);
4408 adapter
->stats
.tncrs
+= rd32(E1000_TNCRS
);
4411 adapter
->stats
.tsctc
+= rd32(E1000_TSCTC
);
4412 adapter
->stats
.tsctfc
+= rd32(E1000_TSCTFC
);
4414 adapter
->stats
.iac
+= rd32(E1000_IAC
);
4415 adapter
->stats
.icrxoc
+= rd32(E1000_ICRXOC
);
4416 adapter
->stats
.icrxptc
+= rd32(E1000_ICRXPTC
);
4417 adapter
->stats
.icrxatc
+= rd32(E1000_ICRXATC
);
4418 adapter
->stats
.ictxptc
+= rd32(E1000_ICTXPTC
);
4419 adapter
->stats
.ictxatc
+= rd32(E1000_ICTXATC
);
4420 adapter
->stats
.ictxqec
+= rd32(E1000_ICTXQEC
);
4421 adapter
->stats
.ictxqmtc
+= rd32(E1000_ICTXQMTC
);
4422 adapter
->stats
.icrxdmtc
+= rd32(E1000_ICRXDMTC
);
4424 /* Fill out the OS statistics structure */
4425 net_stats
->multicast
= adapter
->stats
.mprc
;
4426 net_stats
->collisions
= adapter
->stats
.colc
;
4430 /* RLEC on some newer hardware can be incorrect so build
4431 * our own version based on RUC and ROC */
4432 net_stats
->rx_errors
= adapter
->stats
.rxerrc
+
4433 adapter
->stats
.crcerrs
+ adapter
->stats
.algnerrc
+
4434 adapter
->stats
.ruc
+ adapter
->stats
.roc
+
4435 adapter
->stats
.cexterr
;
4436 net_stats
->rx_length_errors
= adapter
->stats
.ruc
+
4438 net_stats
->rx_crc_errors
= adapter
->stats
.crcerrs
;
4439 net_stats
->rx_frame_errors
= adapter
->stats
.algnerrc
;
4440 net_stats
->rx_missed_errors
= adapter
->stats
.mpc
;
4443 net_stats
->tx_errors
= adapter
->stats
.ecol
+
4444 adapter
->stats
.latecol
;
4445 net_stats
->tx_aborted_errors
= adapter
->stats
.ecol
;
4446 net_stats
->tx_window_errors
= adapter
->stats
.latecol
;
4447 net_stats
->tx_carrier_errors
= adapter
->stats
.tncrs
;
4449 /* Tx Dropped needs to be maintained elsewhere */
4452 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
4453 if ((adapter
->link_speed
== SPEED_1000
) &&
4454 (!igb_read_phy_reg(hw
, PHY_1000T_STATUS
, &phy_tmp
))) {
4455 phy_tmp
&= PHY_IDLE_ERROR_COUNT_MASK
;
4456 adapter
->phy_stats
.idle_errors
+= phy_tmp
;
4460 /* Management Stats */
4461 adapter
->stats
.mgptc
+= rd32(E1000_MGTPTC
);
4462 adapter
->stats
.mgprc
+= rd32(E1000_MGTPRC
);
4463 adapter
->stats
.mgpdc
+= rd32(E1000_MGTPDC
);
4466 static irqreturn_t
igb_msix_other(int irq
, void *data
)
4468 struct igb_adapter
*adapter
= data
;
4469 struct e1000_hw
*hw
= &adapter
->hw
;
4470 u32 icr
= rd32(E1000_ICR
);
4471 /* reading ICR causes bit 31 of EICR to be cleared */
4473 if (icr
& E1000_ICR_DRSTA
)
4474 schedule_work(&adapter
->reset_task
);
4476 if (icr
& E1000_ICR_DOUTSYNC
) {
4477 /* HW is reporting DMA is out of sync */
4478 adapter
->stats
.doosync
++;
4481 /* Check for a mailbox event */
4482 if (icr
& E1000_ICR_VMMB
)
4483 igb_msg_task(adapter
);
4485 if (icr
& E1000_ICR_LSC
) {
4486 hw
->mac
.get_link_status
= 1;
4487 /* guard against interrupt when we're going down */
4488 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
4489 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
4492 if (adapter
->vfs_allocated_count
)
4493 wr32(E1000_IMS
, E1000_IMS_LSC
|
4495 E1000_IMS_DOUTSYNC
);
4497 wr32(E1000_IMS
, E1000_IMS_LSC
| E1000_IMS_DOUTSYNC
);
4498 wr32(E1000_EIMS
, adapter
->eims_other
);
4503 static void igb_write_itr(struct igb_q_vector
*q_vector
)
4505 struct igb_adapter
*adapter
= q_vector
->adapter
;
4506 u32 itr_val
= q_vector
->itr_val
& 0x7FFC;
4508 if (!q_vector
->set_itr
)
4514 if (adapter
->hw
.mac
.type
== e1000_82575
)
4515 itr_val
|= itr_val
<< 16;
4517 itr_val
|= 0x8000000;
4519 writel(itr_val
, q_vector
->itr_register
);
4520 q_vector
->set_itr
= 0;
4523 static irqreturn_t
igb_msix_ring(int irq
, void *data
)
4525 struct igb_q_vector
*q_vector
= data
;
4527 /* Write the ITR value calculated from the previous interrupt. */
4528 igb_write_itr(q_vector
);
4530 napi_schedule(&q_vector
->napi
);
4535 #ifdef CONFIG_IGB_DCA
4536 static void igb_update_dca(struct igb_q_vector
*q_vector
)
4538 struct igb_adapter
*adapter
= q_vector
->adapter
;
4539 struct e1000_hw
*hw
= &adapter
->hw
;
4540 int cpu
= get_cpu();
4542 if (q_vector
->cpu
== cpu
)
4545 if (q_vector
->tx_ring
) {
4546 int q
= q_vector
->tx_ring
->reg_idx
;
4547 u32 dca_txctrl
= rd32(E1000_DCA_TXCTRL(q
));
4548 if (hw
->mac
.type
== e1000_82575
) {
4549 dca_txctrl
&= ~E1000_DCA_TXCTRL_CPUID_MASK
;
4550 dca_txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
4552 dca_txctrl
&= ~E1000_DCA_TXCTRL_CPUID_MASK_82576
;
4553 dca_txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
4554 E1000_DCA_TXCTRL_CPUID_SHIFT
;
4556 dca_txctrl
|= E1000_DCA_TXCTRL_DESC_DCA_EN
;
4557 wr32(E1000_DCA_TXCTRL(q
), dca_txctrl
);
4559 if (q_vector
->rx_ring
) {
4560 int q
= q_vector
->rx_ring
->reg_idx
;
4561 u32 dca_rxctrl
= rd32(E1000_DCA_RXCTRL(q
));
4562 if (hw
->mac
.type
== e1000_82575
) {
4563 dca_rxctrl
&= ~E1000_DCA_RXCTRL_CPUID_MASK
;
4564 dca_rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
4566 dca_rxctrl
&= ~E1000_DCA_RXCTRL_CPUID_MASK_82576
;
4567 dca_rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
4568 E1000_DCA_RXCTRL_CPUID_SHIFT
;
4570 dca_rxctrl
|= E1000_DCA_RXCTRL_DESC_DCA_EN
;
4571 dca_rxctrl
|= E1000_DCA_RXCTRL_HEAD_DCA_EN
;
4572 dca_rxctrl
|= E1000_DCA_RXCTRL_DATA_DCA_EN
;
4573 wr32(E1000_DCA_RXCTRL(q
), dca_rxctrl
);
4575 q_vector
->cpu
= cpu
;
4580 static void igb_setup_dca(struct igb_adapter
*adapter
)
4582 struct e1000_hw
*hw
= &adapter
->hw
;
4585 if (!(adapter
->flags
& IGB_FLAG_DCA_ENABLED
))
4588 /* Always use CB2 mode, difference is masked in the CB driver. */
4589 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_CB2
);
4591 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
4592 adapter
->q_vector
[i
]->cpu
= -1;
4593 igb_update_dca(adapter
->q_vector
[i
]);
4597 static int __igb_notify_dca(struct device
*dev
, void *data
)
4599 struct net_device
*netdev
= dev_get_drvdata(dev
);
4600 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4601 struct pci_dev
*pdev
= adapter
->pdev
;
4602 struct e1000_hw
*hw
= &adapter
->hw
;
4603 unsigned long event
= *(unsigned long *)data
;
4606 case DCA_PROVIDER_ADD
:
4607 /* if already enabled, don't do it again */
4608 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
)
4610 if (dca_add_requester(dev
) == 0) {
4611 adapter
->flags
|= IGB_FLAG_DCA_ENABLED
;
4612 dev_info(&pdev
->dev
, "DCA enabled\n");
4613 igb_setup_dca(adapter
);
4616 /* Fall Through since DCA is disabled. */
4617 case DCA_PROVIDER_REMOVE
:
4618 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
) {
4619 /* without this a class_device is left
4620 * hanging around in the sysfs model */
4621 dca_remove_requester(dev
);
4622 dev_info(&pdev
->dev
, "DCA disabled\n");
4623 adapter
->flags
&= ~IGB_FLAG_DCA_ENABLED
;
4624 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_DISABLE
);
4632 static int igb_notify_dca(struct notifier_block
*nb
, unsigned long event
,
4637 ret_val
= driver_for_each_device(&igb_driver
.driver
, NULL
, &event
,
4640 return ret_val
? NOTIFY_BAD
: NOTIFY_DONE
;
4642 #endif /* CONFIG_IGB_DCA */
4644 static void igb_ping_all_vfs(struct igb_adapter
*adapter
)
4646 struct e1000_hw
*hw
= &adapter
->hw
;
4650 for (i
= 0 ; i
< adapter
->vfs_allocated_count
; i
++) {
4651 ping
= E1000_PF_CONTROL_MSG
;
4652 if (adapter
->vf_data
[i
].flags
& IGB_VF_FLAG_CTS
)
4653 ping
|= E1000_VT_MSGTYPE_CTS
;
4654 igb_write_mbx(hw
, &ping
, 1, i
);
4658 static int igb_set_vf_promisc(struct igb_adapter
*adapter
, u32
*msgbuf
, u32 vf
)
4660 struct e1000_hw
*hw
= &adapter
->hw
;
4661 u32 vmolr
= rd32(E1000_VMOLR(vf
));
4662 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4664 vf_data
->flags
&= ~(IGB_VF_FLAG_UNI_PROMISC
|
4665 IGB_VF_FLAG_MULTI_PROMISC
);
4666 vmolr
&= ~(E1000_VMOLR_ROPE
| E1000_VMOLR_ROMPE
| E1000_VMOLR_MPME
);
4668 if (*msgbuf
& E1000_VF_SET_PROMISC_MULTICAST
) {
4669 vmolr
|= E1000_VMOLR_MPME
;
4670 vf_data
->flags
|= IGB_VF_FLAG_MULTI_PROMISC
;
4671 *msgbuf
&= ~E1000_VF_SET_PROMISC_MULTICAST
;
4674 * if we have hashes and we are clearing a multicast promisc
4675 * flag we need to write the hashes to the MTA as this step
4676 * was previously skipped
4678 if (vf_data
->num_vf_mc_hashes
> 30) {
4679 vmolr
|= E1000_VMOLR_MPME
;
4680 } else if (vf_data
->num_vf_mc_hashes
) {
4682 vmolr
|= E1000_VMOLR_ROMPE
;
4683 for (j
= 0; j
< vf_data
->num_vf_mc_hashes
; j
++)
4684 igb_mta_set(hw
, vf_data
->vf_mc_hashes
[j
]);
4688 wr32(E1000_VMOLR(vf
), vmolr
);
4690 /* there are flags left unprocessed, likely not supported */
4691 if (*msgbuf
& E1000_VT_MSGINFO_MASK
)
4698 static int igb_set_vf_multicasts(struct igb_adapter
*adapter
,
4699 u32
*msgbuf
, u32 vf
)
4701 int n
= (msgbuf
[0] & E1000_VT_MSGINFO_MASK
) >> E1000_VT_MSGINFO_SHIFT
;
4702 u16
*hash_list
= (u16
*)&msgbuf
[1];
4703 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4706 /* salt away the number of multicast addresses assigned
4707 * to this VF for later use to restore when the PF multi cast
4710 vf_data
->num_vf_mc_hashes
= n
;
4712 /* only up to 30 hash values supported */
4716 /* store the hashes for later use */
4717 for (i
= 0; i
< n
; i
++)
4718 vf_data
->vf_mc_hashes
[i
] = hash_list
[i
];
4720 /* Flush and reset the mta with the new values */
4721 igb_set_rx_mode(adapter
->netdev
);
4726 static void igb_restore_vf_multicasts(struct igb_adapter
*adapter
)
4728 struct e1000_hw
*hw
= &adapter
->hw
;
4729 struct vf_data_storage
*vf_data
;
4732 for (i
= 0; i
< adapter
->vfs_allocated_count
; i
++) {
4733 u32 vmolr
= rd32(E1000_VMOLR(i
));
4734 vmolr
&= ~(E1000_VMOLR_ROMPE
| E1000_VMOLR_MPME
);
4736 vf_data
= &adapter
->vf_data
[i
];
4738 if ((vf_data
->num_vf_mc_hashes
> 30) ||
4739 (vf_data
->flags
& IGB_VF_FLAG_MULTI_PROMISC
)) {
4740 vmolr
|= E1000_VMOLR_MPME
;
4741 } else if (vf_data
->num_vf_mc_hashes
) {
4742 vmolr
|= E1000_VMOLR_ROMPE
;
4743 for (j
= 0; j
< vf_data
->num_vf_mc_hashes
; j
++)
4744 igb_mta_set(hw
, vf_data
->vf_mc_hashes
[j
]);
4746 wr32(E1000_VMOLR(i
), vmolr
);
4750 static void igb_clear_vf_vfta(struct igb_adapter
*adapter
, u32 vf
)
4752 struct e1000_hw
*hw
= &adapter
->hw
;
4753 u32 pool_mask
, reg
, vid
;
4756 pool_mask
= 1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
);
4758 /* Find the vlan filter for this id */
4759 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4760 reg
= rd32(E1000_VLVF(i
));
4762 /* remove the vf from the pool */
4765 /* if pool is empty then remove entry from vfta */
4766 if (!(reg
& E1000_VLVF_POOLSEL_MASK
) &&
4767 (reg
& E1000_VLVF_VLANID_ENABLE
)) {
4769 vid
= reg
& E1000_VLVF_VLANID_MASK
;
4770 igb_vfta_set(hw
, vid
, false);
4773 wr32(E1000_VLVF(i
), reg
);
4776 adapter
->vf_data
[vf
].vlans_enabled
= 0;
4779 static s32
igb_vlvf_set(struct igb_adapter
*adapter
, u32 vid
, bool add
, u32 vf
)
4781 struct e1000_hw
*hw
= &adapter
->hw
;
4784 /* The vlvf table only exists on 82576 hardware and newer */
4785 if (hw
->mac
.type
< e1000_82576
)
4788 /* we only need to do this if VMDq is enabled */
4789 if (!adapter
->vfs_allocated_count
)
4792 /* Find the vlan filter for this id */
4793 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4794 reg
= rd32(E1000_VLVF(i
));
4795 if ((reg
& E1000_VLVF_VLANID_ENABLE
) &&
4796 vid
== (reg
& E1000_VLVF_VLANID_MASK
))
4801 if (i
== E1000_VLVF_ARRAY_SIZE
) {
4802 /* Did not find a matching VLAN ID entry that was
4803 * enabled. Search for a free filter entry, i.e.
4804 * one without the enable bit set
4806 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4807 reg
= rd32(E1000_VLVF(i
));
4808 if (!(reg
& E1000_VLVF_VLANID_ENABLE
))
4812 if (i
< E1000_VLVF_ARRAY_SIZE
) {
4813 /* Found an enabled/available entry */
4814 reg
|= 1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
);
4816 /* if !enabled we need to set this up in vfta */
4817 if (!(reg
& E1000_VLVF_VLANID_ENABLE
)) {
4818 /* add VID to filter table */
4819 igb_vfta_set(hw
, vid
, true);
4820 reg
|= E1000_VLVF_VLANID_ENABLE
;
4822 reg
&= ~E1000_VLVF_VLANID_MASK
;
4824 wr32(E1000_VLVF(i
), reg
);
4826 /* do not modify RLPML for PF devices */
4827 if (vf
>= adapter
->vfs_allocated_count
)
4830 if (!adapter
->vf_data
[vf
].vlans_enabled
) {
4832 reg
= rd32(E1000_VMOLR(vf
));
4833 size
= reg
& E1000_VMOLR_RLPML_MASK
;
4835 reg
&= ~E1000_VMOLR_RLPML_MASK
;
4837 wr32(E1000_VMOLR(vf
), reg
);
4840 adapter
->vf_data
[vf
].vlans_enabled
++;
4844 if (i
< E1000_VLVF_ARRAY_SIZE
) {
4845 /* remove vf from the pool */
4846 reg
&= ~(1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
));
4847 /* if pool is empty then remove entry from vfta */
4848 if (!(reg
& E1000_VLVF_POOLSEL_MASK
)) {
4850 igb_vfta_set(hw
, vid
, false);
4852 wr32(E1000_VLVF(i
), reg
);
4854 /* do not modify RLPML for PF devices */
4855 if (vf
>= adapter
->vfs_allocated_count
)
4858 adapter
->vf_data
[vf
].vlans_enabled
--;
4859 if (!adapter
->vf_data
[vf
].vlans_enabled
) {
4861 reg
= rd32(E1000_VMOLR(vf
));
4862 size
= reg
& E1000_VMOLR_RLPML_MASK
;
4864 reg
&= ~E1000_VMOLR_RLPML_MASK
;
4866 wr32(E1000_VMOLR(vf
), reg
);
4873 static void igb_set_vmvir(struct igb_adapter
*adapter
, u32 vid
, u32 vf
)
4875 struct e1000_hw
*hw
= &adapter
->hw
;
4878 wr32(E1000_VMVIR(vf
), (vid
| E1000_VMVIR_VLANA_DEFAULT
));
4880 wr32(E1000_VMVIR(vf
), 0);
4883 static int igb_ndo_set_vf_vlan(struct net_device
*netdev
,
4884 int vf
, u16 vlan
, u8 qos
)
4887 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4889 if ((vf
>= adapter
->vfs_allocated_count
) || (vlan
> 4095) || (qos
> 7))
4892 err
= igb_vlvf_set(adapter
, vlan
, !!vlan
, vf
);
4895 igb_set_vmvir(adapter
, vlan
| (qos
<< VLAN_PRIO_SHIFT
), vf
);
4896 igb_set_vmolr(adapter
, vf
, !vlan
);
4897 adapter
->vf_data
[vf
].pf_vlan
= vlan
;
4898 adapter
->vf_data
[vf
].pf_qos
= qos
;
4899 dev_info(&adapter
->pdev
->dev
,
4900 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan
, qos
, vf
);
4901 if (test_bit(__IGB_DOWN
, &adapter
->state
)) {
4902 dev_warn(&adapter
->pdev
->dev
,
4903 "The VF VLAN has been set,"
4904 " but the PF device is not up.\n");
4905 dev_warn(&adapter
->pdev
->dev
,
4906 "Bring the PF device up before"
4907 " attempting to use the VF device.\n");
4910 igb_vlvf_set(adapter
, adapter
->vf_data
[vf
].pf_vlan
,
4912 igb_set_vmvir(adapter
, vlan
, vf
);
4913 igb_set_vmolr(adapter
, vf
, true);
4914 adapter
->vf_data
[vf
].pf_vlan
= 0;
4915 adapter
->vf_data
[vf
].pf_qos
= 0;
4921 static int igb_set_vf_vlan(struct igb_adapter
*adapter
, u32
*msgbuf
, u32 vf
)
4923 int add
= (msgbuf
[0] & E1000_VT_MSGINFO_MASK
) >> E1000_VT_MSGINFO_SHIFT
;
4924 int vid
= (msgbuf
[1] & E1000_VLVF_VLANID_MASK
);
4926 return igb_vlvf_set(adapter
, vid
, add
, vf
);
4929 static inline void igb_vf_reset(struct igb_adapter
*adapter
, u32 vf
)
4932 adapter
->vf_data
[vf
].flags
&= ~(IGB_VF_FLAG_PF_SET_MAC
);
4933 adapter
->vf_data
[vf
].last_nack
= jiffies
;
4935 /* reset offloads to defaults */
4936 igb_set_vmolr(adapter
, vf
, true);
4938 /* reset vlans for device */
4939 igb_clear_vf_vfta(adapter
, vf
);
4940 if (adapter
->vf_data
[vf
].pf_vlan
)
4941 igb_ndo_set_vf_vlan(adapter
->netdev
, vf
,
4942 adapter
->vf_data
[vf
].pf_vlan
,
4943 adapter
->vf_data
[vf
].pf_qos
);
4945 igb_clear_vf_vfta(adapter
, vf
);
4947 /* reset multicast table array for vf */
4948 adapter
->vf_data
[vf
].num_vf_mc_hashes
= 0;
4950 /* Flush and reset the mta with the new values */
4951 igb_set_rx_mode(adapter
->netdev
);
4954 static void igb_vf_reset_event(struct igb_adapter
*adapter
, u32 vf
)
4956 unsigned char *vf_mac
= adapter
->vf_data
[vf
].vf_mac_addresses
;
4958 /* generate a new mac address as we were hotplug removed/added */
4959 if (!(adapter
->vf_data
[vf
].flags
& IGB_VF_FLAG_PF_SET_MAC
))
4960 random_ether_addr(vf_mac
);
4962 /* process remaining reset events */
4963 igb_vf_reset(adapter
, vf
);
4966 static void igb_vf_reset_msg(struct igb_adapter
*adapter
, u32 vf
)
4968 struct e1000_hw
*hw
= &adapter
->hw
;
4969 unsigned char *vf_mac
= adapter
->vf_data
[vf
].vf_mac_addresses
;
4970 int rar_entry
= hw
->mac
.rar_entry_count
- (vf
+ 1);
4972 u8
*addr
= (u8
*)(&msgbuf
[1]);
4974 /* process all the same items cleared in a function level reset */
4975 igb_vf_reset(adapter
, vf
);
4977 /* set vf mac address */
4978 igb_rar_set_qsel(adapter
, vf_mac
, rar_entry
, vf
);
4980 /* enable transmit and receive for vf */
4981 reg
= rd32(E1000_VFTE
);
4982 wr32(E1000_VFTE
, reg
| (1 << vf
));
4983 reg
= rd32(E1000_VFRE
);
4984 wr32(E1000_VFRE
, reg
| (1 << vf
));
4986 adapter
->vf_data
[vf
].flags
= IGB_VF_FLAG_CTS
;
4988 /* reply to reset with ack and vf mac address */
4989 msgbuf
[0] = E1000_VF_RESET
| E1000_VT_MSGTYPE_ACK
;
4990 memcpy(addr
, vf_mac
, 6);
4991 igb_write_mbx(hw
, msgbuf
, 3, vf
);
4994 static int igb_set_vf_mac_addr(struct igb_adapter
*adapter
, u32
*msg
, int vf
)
4997 * The VF MAC Address is stored in a packed array of bytes
4998 * starting at the second 32 bit word of the msg array
5000 unsigned char *addr
= (char *)&msg
[1];
5003 if (is_valid_ether_addr(addr
))
5004 err
= igb_set_vf_mac(adapter
, vf
, addr
);
5009 static void igb_rcv_ack_from_vf(struct igb_adapter
*adapter
, u32 vf
)
5011 struct e1000_hw
*hw
= &adapter
->hw
;
5012 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
5013 u32 msg
= E1000_VT_MSGTYPE_NACK
;
5015 /* if device isn't clear to send it shouldn't be reading either */
5016 if (!(vf_data
->flags
& IGB_VF_FLAG_CTS
) &&
5017 time_after(jiffies
, vf_data
->last_nack
+ (2 * HZ
))) {
5018 igb_write_mbx(hw
, &msg
, 1, vf
);
5019 vf_data
->last_nack
= jiffies
;
5023 static void igb_rcv_msg_from_vf(struct igb_adapter
*adapter
, u32 vf
)
5025 struct pci_dev
*pdev
= adapter
->pdev
;
5026 u32 msgbuf
[E1000_VFMAILBOX_SIZE
];
5027 struct e1000_hw
*hw
= &adapter
->hw
;
5028 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
5031 retval
= igb_read_mbx(hw
, msgbuf
, E1000_VFMAILBOX_SIZE
, vf
);
5034 /* if receive failed revoke VF CTS stats and restart init */
5035 dev_err(&pdev
->dev
, "Error receiving message from VF\n");
5036 vf_data
->flags
&= ~IGB_VF_FLAG_CTS
;
5037 if (!time_after(jiffies
, vf_data
->last_nack
+ (2 * HZ
)))
5042 /* this is a message we already processed, do nothing */
5043 if (msgbuf
[0] & (E1000_VT_MSGTYPE_ACK
| E1000_VT_MSGTYPE_NACK
))
5047 * until the vf completes a reset it should not be
5048 * allowed to start any configuration.
5051 if (msgbuf
[0] == E1000_VF_RESET
) {
5052 igb_vf_reset_msg(adapter
, vf
);
5056 if (!(vf_data
->flags
& IGB_VF_FLAG_CTS
)) {
5057 if (!time_after(jiffies
, vf_data
->last_nack
+ (2 * HZ
)))
5063 switch ((msgbuf
[0] & 0xFFFF)) {
5064 case E1000_VF_SET_MAC_ADDR
:
5065 retval
= igb_set_vf_mac_addr(adapter
, msgbuf
, vf
);
5067 case E1000_VF_SET_PROMISC
:
5068 retval
= igb_set_vf_promisc(adapter
, msgbuf
, vf
);
5070 case E1000_VF_SET_MULTICAST
:
5071 retval
= igb_set_vf_multicasts(adapter
, msgbuf
, vf
);
5073 case E1000_VF_SET_LPE
:
5074 retval
= igb_set_vf_rlpml(adapter
, msgbuf
[1], vf
);
5076 case E1000_VF_SET_VLAN
:
5077 if (adapter
->vf_data
[vf
].pf_vlan
)
5080 retval
= igb_set_vf_vlan(adapter
, msgbuf
, vf
);
5083 dev_err(&pdev
->dev
, "Unhandled Msg %08x\n", msgbuf
[0]);
5088 msgbuf
[0] |= E1000_VT_MSGTYPE_CTS
;
5090 /* notify the VF of the results of what it sent us */
5092 msgbuf
[0] |= E1000_VT_MSGTYPE_NACK
;
5094 msgbuf
[0] |= E1000_VT_MSGTYPE_ACK
;
5096 igb_write_mbx(hw
, msgbuf
, 1, vf
);
5099 static void igb_msg_task(struct igb_adapter
*adapter
)
5101 struct e1000_hw
*hw
= &adapter
->hw
;
5104 for (vf
= 0; vf
< adapter
->vfs_allocated_count
; vf
++) {
5105 /* process any reset requests */
5106 if (!igb_check_for_rst(hw
, vf
))
5107 igb_vf_reset_event(adapter
, vf
);
5109 /* process any messages pending */
5110 if (!igb_check_for_msg(hw
, vf
))
5111 igb_rcv_msg_from_vf(adapter
, vf
);
5113 /* process any acks */
5114 if (!igb_check_for_ack(hw
, vf
))
5115 igb_rcv_ack_from_vf(adapter
, vf
);
5120 * igb_set_uta - Set unicast filter table address
5121 * @adapter: board private structure
5123 * The unicast table address is a register array of 32-bit registers.
5124 * The table is meant to be used in a way similar to how the MTA is used
5125 * however due to certain limitations in the hardware it is necessary to
5126 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
5127 * enable bit to allow vlan tag stripping when promiscous mode is enabled
5129 static void igb_set_uta(struct igb_adapter
*adapter
)
5131 struct e1000_hw
*hw
= &adapter
->hw
;
5134 /* The UTA table only exists on 82576 hardware and newer */
5135 if (hw
->mac
.type
< e1000_82576
)
5138 /* we only need to do this if VMDq is enabled */
5139 if (!adapter
->vfs_allocated_count
)
5142 for (i
= 0; i
< hw
->mac
.uta_reg_count
; i
++)
5143 array_wr32(E1000_UTA
, i
, ~0);
5147 * igb_intr_msi - Interrupt Handler
5148 * @irq: interrupt number
5149 * @data: pointer to a network interface device structure
5151 static irqreturn_t
igb_intr_msi(int irq
, void *data
)
5153 struct igb_adapter
*adapter
= data
;
5154 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
5155 struct e1000_hw
*hw
= &adapter
->hw
;
5156 /* read ICR disables interrupts using IAM */
5157 u32 icr
= rd32(E1000_ICR
);
5159 igb_write_itr(q_vector
);
5161 if (icr
& E1000_ICR_DRSTA
)
5162 schedule_work(&adapter
->reset_task
);
5164 if (icr
& E1000_ICR_DOUTSYNC
) {
5165 /* HW is reporting DMA is out of sync */
5166 adapter
->stats
.doosync
++;
5169 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
5170 hw
->mac
.get_link_status
= 1;
5171 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
5172 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
5175 napi_schedule(&q_vector
->napi
);
5181 * igb_intr - Legacy Interrupt Handler
5182 * @irq: interrupt number
5183 * @data: pointer to a network interface device structure
5185 static irqreturn_t
igb_intr(int irq
, void *data
)
5187 struct igb_adapter
*adapter
= data
;
5188 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
5189 struct e1000_hw
*hw
= &adapter
->hw
;
5190 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5191 * need for the IMC write */
5192 u32 icr
= rd32(E1000_ICR
);
5194 return IRQ_NONE
; /* Not our interrupt */
5196 igb_write_itr(q_vector
);
5198 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5199 * not set, then the adapter didn't send an interrupt */
5200 if (!(icr
& E1000_ICR_INT_ASSERTED
))
5203 if (icr
& E1000_ICR_DRSTA
)
5204 schedule_work(&adapter
->reset_task
);
5206 if (icr
& E1000_ICR_DOUTSYNC
) {
5207 /* HW is reporting DMA is out of sync */
5208 adapter
->stats
.doosync
++;
5211 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
5212 hw
->mac
.get_link_status
= 1;
5213 /* guard against interrupt when we're going down */
5214 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
5215 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
5218 napi_schedule(&q_vector
->napi
);
5223 static inline void igb_ring_irq_enable(struct igb_q_vector
*q_vector
)
5225 struct igb_adapter
*adapter
= q_vector
->adapter
;
5226 struct e1000_hw
*hw
= &adapter
->hw
;
5228 if ((q_vector
->rx_ring
&& (adapter
->rx_itr_setting
& 3)) ||
5229 (!q_vector
->rx_ring
&& (adapter
->tx_itr_setting
& 3))) {
5230 if (!adapter
->msix_entries
)
5231 igb_set_itr(adapter
);
5233 igb_update_ring_itr(q_vector
);
5236 if (!test_bit(__IGB_DOWN
, &adapter
->state
)) {
5237 if (adapter
->msix_entries
)
5238 wr32(E1000_EIMS
, q_vector
->eims_value
);
5240 igb_irq_enable(adapter
);
5245 * igb_poll - NAPI Rx polling callback
5246 * @napi: napi polling structure
5247 * @budget: count of how many packets we should handle
5249 static int igb_poll(struct napi_struct
*napi
, int budget
)
5251 struct igb_q_vector
*q_vector
= container_of(napi
,
5252 struct igb_q_vector
,
5254 int tx_clean_complete
= 1, work_done
= 0;
5256 #ifdef CONFIG_IGB_DCA
5257 if (q_vector
->adapter
->flags
& IGB_FLAG_DCA_ENABLED
)
5258 igb_update_dca(q_vector
);
5260 if (q_vector
->tx_ring
)
5261 tx_clean_complete
= igb_clean_tx_irq(q_vector
);
5263 if (q_vector
->rx_ring
)
5264 igb_clean_rx_irq_adv(q_vector
, &work_done
, budget
);
5266 if (!tx_clean_complete
)
5269 /* If not enough Rx work done, exit the polling mode */
5270 if (work_done
< budget
) {
5271 napi_complete(napi
);
5272 igb_ring_irq_enable(q_vector
);
5279 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
5280 * @adapter: board private structure
5281 * @shhwtstamps: timestamp structure to update
5282 * @regval: unsigned 64bit system time value.
5284 * We need to convert the system time value stored in the RX/TXSTMP registers
5285 * into a hwtstamp which can be used by the upper level timestamping functions
5287 static void igb_systim_to_hwtstamp(struct igb_adapter
*adapter
,
5288 struct skb_shared_hwtstamps
*shhwtstamps
,
5294 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5295 * 24 to match clock shift we setup earlier.
5297 if (adapter
->hw
.mac
.type
== e1000_82580
)
5298 regval
<<= IGB_82580_TSYNC_SHIFT
;
5300 ns
= timecounter_cyc2time(&adapter
->clock
, regval
);
5301 timecompare_update(&adapter
->compare
, ns
);
5302 memset(shhwtstamps
, 0, sizeof(struct skb_shared_hwtstamps
));
5303 shhwtstamps
->hwtstamp
= ns_to_ktime(ns
);
5304 shhwtstamps
->syststamp
= timecompare_transform(&adapter
->compare
, ns
);
5308 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5309 * @q_vector: pointer to q_vector containing needed info
5310 * @buffer: pointer to igb_buffer structure
5312 * If we were asked to do hardware stamping and such a time stamp is
5313 * available, then it must have been for this skb here because we only
5314 * allow only one such packet into the queue.
5316 static void igb_tx_hwtstamp(struct igb_q_vector
*q_vector
, struct igb_buffer
*buffer_info
)
5318 struct igb_adapter
*adapter
= q_vector
->adapter
;
5319 struct e1000_hw
*hw
= &adapter
->hw
;
5320 struct skb_shared_hwtstamps shhwtstamps
;
5323 /* if skb does not support hw timestamp or TX stamp not valid exit */
5324 if (likely(!(buffer_info
->tx_flags
& SKBTX_HW_TSTAMP
)) ||
5325 !(rd32(E1000_TSYNCTXCTL
) & E1000_TSYNCTXCTL_VALID
))
5328 regval
= rd32(E1000_TXSTMPL
);
5329 regval
|= (u64
)rd32(E1000_TXSTMPH
) << 32;
5331 igb_systim_to_hwtstamp(adapter
, &shhwtstamps
, regval
);
5332 skb_tstamp_tx(buffer_info
->skb
, &shhwtstamps
);
5336 * igb_clean_tx_irq - Reclaim resources after transmit completes
5337 * @q_vector: pointer to q_vector containing needed info
5338 * returns true if ring is completely cleaned
5340 static bool igb_clean_tx_irq(struct igb_q_vector
*q_vector
)
5342 struct igb_adapter
*adapter
= q_vector
->adapter
;
5343 struct igb_ring
*tx_ring
= q_vector
->tx_ring
;
5344 struct net_device
*netdev
= tx_ring
->netdev
;
5345 struct e1000_hw
*hw
= &adapter
->hw
;
5346 struct igb_buffer
*buffer_info
;
5347 union e1000_adv_tx_desc
*tx_desc
, *eop_desc
;
5348 unsigned int total_bytes
= 0, total_packets
= 0;
5349 unsigned int i
, eop
, count
= 0;
5350 bool cleaned
= false;
5352 i
= tx_ring
->next_to_clean
;
5353 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
5354 eop_desc
= E1000_TX_DESC_ADV(*tx_ring
, eop
);
5356 while ((eop_desc
->wb
.status
& cpu_to_le32(E1000_TXD_STAT_DD
)) &&
5357 (count
< tx_ring
->count
)) {
5358 rmb(); /* read buffer_info after eop_desc status */
5359 for (cleaned
= false; !cleaned
; count
++) {
5360 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
5361 buffer_info
= &tx_ring
->buffer_info
[i
];
5362 cleaned
= (i
== eop
);
5364 if (buffer_info
->skb
) {
5365 total_bytes
+= buffer_info
->bytecount
;
5366 /* gso_segs is currently only valid for tcp */
5367 total_packets
+= buffer_info
->gso_segs
;
5368 igb_tx_hwtstamp(q_vector
, buffer_info
);
5371 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
5372 tx_desc
->wb
.status
= 0;
5375 if (i
== tx_ring
->count
)
5378 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
5379 eop_desc
= E1000_TX_DESC_ADV(*tx_ring
, eop
);
5382 tx_ring
->next_to_clean
= i
;
5384 if (unlikely(count
&&
5385 netif_carrier_ok(netdev
) &&
5386 igb_desc_unused(tx_ring
) >= IGB_TX_QUEUE_WAKE
)) {
5387 /* Make sure that anybody stopping the queue after this
5388 * sees the new next_to_clean.
5391 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
5392 !(test_bit(__IGB_DOWN
, &adapter
->state
))) {
5393 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
5394 tx_ring
->tx_stats
.restart_queue
++;
5398 if (tx_ring
->detect_tx_hung
) {
5399 /* Detect a transmit hang in hardware, this serializes the
5400 * check with the clearing of time_stamp and movement of i */
5401 tx_ring
->detect_tx_hung
= false;
5402 if (tx_ring
->buffer_info
[i
].time_stamp
&&
5403 time_after(jiffies
, tx_ring
->buffer_info
[i
].time_stamp
+
5404 (adapter
->tx_timeout_factor
* HZ
)) &&
5405 !(rd32(E1000_STATUS
) & E1000_STATUS_TXOFF
)) {
5407 /* detected Tx unit hang */
5408 dev_err(tx_ring
->dev
,
5409 "Detected Tx Unit Hang\n"
5413 " next_to_use <%x>\n"
5414 " next_to_clean <%x>\n"
5415 "buffer_info[next_to_clean]\n"
5416 " time_stamp <%lx>\n"
5417 " next_to_watch <%x>\n"
5419 " desc.status <%x>\n",
5420 tx_ring
->queue_index
,
5421 readl(tx_ring
->head
),
5422 readl(tx_ring
->tail
),
5423 tx_ring
->next_to_use
,
5424 tx_ring
->next_to_clean
,
5425 tx_ring
->buffer_info
[eop
].time_stamp
,
5428 eop_desc
->wb
.status
);
5429 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
5432 tx_ring
->total_bytes
+= total_bytes
;
5433 tx_ring
->total_packets
+= total_packets
;
5434 tx_ring
->tx_stats
.bytes
+= total_bytes
;
5435 tx_ring
->tx_stats
.packets
+= total_packets
;
5436 return (count
< tx_ring
->count
);
5440 * igb_receive_skb - helper function to handle rx indications
5441 * @q_vector: structure containing interrupt and ring information
5442 * @skb: packet to send up
5443 * @vlan_tag: vlan tag for packet
5445 static void igb_receive_skb(struct igb_q_vector
*q_vector
,
5446 struct sk_buff
*skb
,
5449 struct igb_adapter
*adapter
= q_vector
->adapter
;
5451 if (vlan_tag
&& adapter
->vlgrp
)
5452 vlan_gro_receive(&q_vector
->napi
, adapter
->vlgrp
,
5455 napi_gro_receive(&q_vector
->napi
, skb
);
5458 static inline void igb_rx_checksum_adv(struct igb_ring
*ring
,
5459 u32 status_err
, struct sk_buff
*skb
)
5461 skb_checksum_none_assert(skb
);
5463 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
5464 if (!(ring
->flags
& IGB_RING_FLAG_RX_CSUM
) ||
5465 (status_err
& E1000_RXD_STAT_IXSM
))
5468 /* TCP/UDP checksum error bit is set */
5470 (E1000_RXDEXT_STATERR_TCPE
| E1000_RXDEXT_STATERR_IPE
)) {
5472 * work around errata with sctp packets where the TCPE aka
5473 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5474 * packets, (aka let the stack check the crc32c)
5476 if ((skb
->len
== 60) &&
5477 (ring
->flags
& IGB_RING_FLAG_RX_SCTP_CSUM
))
5478 ring
->rx_stats
.csum_err
++;
5480 /* let the stack verify checksum errors */
5483 /* It must be a TCP or UDP packet with a valid checksum */
5484 if (status_err
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
))
5485 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5487 dev_dbg(ring
->dev
, "cksum success: bits %08X\n", status_err
);
5490 static void igb_rx_hwtstamp(struct igb_q_vector
*q_vector
, u32 staterr
,
5491 struct sk_buff
*skb
)
5493 struct igb_adapter
*adapter
= q_vector
->adapter
;
5494 struct e1000_hw
*hw
= &adapter
->hw
;
5498 * If this bit is set, then the RX registers contain the time stamp. No
5499 * other packet will be time stamped until we read these registers, so
5500 * read the registers to make them available again. Because only one
5501 * packet can be time stamped at a time, we know that the register
5502 * values must belong to this one here and therefore we don't need to
5503 * compare any of the additional attributes stored for it.
5505 * If nothing went wrong, then it should have a shared tx_flags that we
5506 * can turn into a skb_shared_hwtstamps.
5508 if (staterr
& E1000_RXDADV_STAT_TSIP
) {
5509 u32
*stamp
= (u32
*)skb
->data
;
5510 regval
= le32_to_cpu(*(stamp
+ 2));
5511 regval
|= (u64
)le32_to_cpu(*(stamp
+ 3)) << 32;
5512 skb_pull(skb
, IGB_TS_HDR_LEN
);
5514 if(!(rd32(E1000_TSYNCRXCTL
) & E1000_TSYNCRXCTL_VALID
))
5517 regval
= rd32(E1000_RXSTMPL
);
5518 regval
|= (u64
)rd32(E1000_RXSTMPH
) << 32;
5521 igb_systim_to_hwtstamp(adapter
, skb_hwtstamps(skb
), regval
);
5523 static inline u16
igb_get_hlen(struct igb_ring
*rx_ring
,
5524 union e1000_adv_rx_desc
*rx_desc
)
5526 /* HW will not DMA in data larger than the given buffer, even if it
5527 * parses the (NFS, of course) header to be larger. In that case, it
5528 * fills the header buffer and spills the rest into the page.
5530 u16 hlen
= (le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hdr_info
) &
5531 E1000_RXDADV_HDRBUFLEN_MASK
) >> E1000_RXDADV_HDRBUFLEN_SHIFT
;
5532 if (hlen
> rx_ring
->rx_buffer_len
)
5533 hlen
= rx_ring
->rx_buffer_len
;
5537 static bool igb_clean_rx_irq_adv(struct igb_q_vector
*q_vector
,
5538 int *work_done
, int budget
)
5540 struct igb_ring
*rx_ring
= q_vector
->rx_ring
;
5541 struct net_device
*netdev
= rx_ring
->netdev
;
5542 struct device
*dev
= rx_ring
->dev
;
5543 union e1000_adv_rx_desc
*rx_desc
, *next_rxd
;
5544 struct igb_buffer
*buffer_info
, *next_buffer
;
5545 struct sk_buff
*skb
;
5546 bool cleaned
= false;
5547 int cleaned_count
= 0;
5548 int current_node
= numa_node_id();
5549 unsigned int total_bytes
= 0, total_packets
= 0;
5555 i
= rx_ring
->next_to_clean
;
5556 buffer_info
= &rx_ring
->buffer_info
[i
];
5557 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
5558 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
5560 while (staterr
& E1000_RXD_STAT_DD
) {
5561 if (*work_done
>= budget
)
5564 rmb(); /* read descriptor and rx_buffer_info after status DD */
5566 skb
= buffer_info
->skb
;
5567 prefetch(skb
->data
- NET_IP_ALIGN
);
5568 buffer_info
->skb
= NULL
;
5571 if (i
== rx_ring
->count
)
5574 next_rxd
= E1000_RX_DESC_ADV(*rx_ring
, i
);
5576 next_buffer
= &rx_ring
->buffer_info
[i
];
5578 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
5582 if (buffer_info
->dma
) {
5583 dma_unmap_single(dev
, buffer_info
->dma
,
5584 rx_ring
->rx_buffer_len
,
5586 buffer_info
->dma
= 0;
5587 if (rx_ring
->rx_buffer_len
>= IGB_RXBUFFER_1024
) {
5588 skb_put(skb
, length
);
5591 skb_put(skb
, igb_get_hlen(rx_ring
, rx_desc
));
5595 dma_unmap_page(dev
, buffer_info
->page_dma
,
5596 PAGE_SIZE
/ 2, DMA_FROM_DEVICE
);
5597 buffer_info
->page_dma
= 0;
5599 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
5601 buffer_info
->page_offset
,
5604 if ((page_count(buffer_info
->page
) != 1) ||
5605 (page_to_nid(buffer_info
->page
) != current_node
))
5606 buffer_info
->page
= NULL
;
5608 get_page(buffer_info
->page
);
5611 skb
->data_len
+= length
;
5612 skb
->truesize
+= length
;
5615 if (!(staterr
& E1000_RXD_STAT_EOP
)) {
5616 buffer_info
->skb
= next_buffer
->skb
;
5617 buffer_info
->dma
= next_buffer
->dma
;
5618 next_buffer
->skb
= skb
;
5619 next_buffer
->dma
= 0;
5623 if (staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) {
5624 dev_kfree_skb_irq(skb
);
5628 if (staterr
& (E1000_RXDADV_STAT_TSIP
| E1000_RXDADV_STAT_TS
))
5629 igb_rx_hwtstamp(q_vector
, staterr
, skb
);
5630 total_bytes
+= skb
->len
;
5633 igb_rx_checksum_adv(rx_ring
, staterr
, skb
);
5635 skb
->protocol
= eth_type_trans(skb
, netdev
);
5636 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
5638 vlan_tag
= ((staterr
& E1000_RXD_STAT_VP
) ?
5639 le16_to_cpu(rx_desc
->wb
.upper
.vlan
) : 0);
5641 igb_receive_skb(q_vector
, skb
, vlan_tag
);
5644 rx_desc
->wb
.upper
.status_error
= 0;
5646 /* return some buffers to hardware, one at a time is too slow */
5647 if (cleaned_count
>= IGB_RX_BUFFER_WRITE
) {
5648 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
5652 /* use prefetched values */
5654 buffer_info
= next_buffer
;
5655 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
5658 rx_ring
->next_to_clean
= i
;
5659 cleaned_count
= igb_desc_unused(rx_ring
);
5662 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
5664 rx_ring
->total_packets
+= total_packets
;
5665 rx_ring
->total_bytes
+= total_bytes
;
5666 rx_ring
->rx_stats
.packets
+= total_packets
;
5667 rx_ring
->rx_stats
.bytes
+= total_bytes
;
5672 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5673 * @adapter: address of board private structure
5675 void igb_alloc_rx_buffers_adv(struct igb_ring
*rx_ring
, int cleaned_count
)
5677 struct net_device
*netdev
= rx_ring
->netdev
;
5678 union e1000_adv_rx_desc
*rx_desc
;
5679 struct igb_buffer
*buffer_info
;
5680 struct sk_buff
*skb
;
5684 i
= rx_ring
->next_to_use
;
5685 buffer_info
= &rx_ring
->buffer_info
[i
];
5687 bufsz
= rx_ring
->rx_buffer_len
;
5689 while (cleaned_count
--) {
5690 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
5692 if ((bufsz
< IGB_RXBUFFER_1024
) && !buffer_info
->page_dma
) {
5693 if (!buffer_info
->page
) {
5694 buffer_info
->page
= netdev_alloc_page(netdev
);
5695 if (!buffer_info
->page
) {
5696 rx_ring
->rx_stats
.alloc_failed
++;
5699 buffer_info
->page_offset
= 0;
5701 buffer_info
->page_offset
^= PAGE_SIZE
/ 2;
5703 buffer_info
->page_dma
=
5704 dma_map_page(rx_ring
->dev
, buffer_info
->page
,
5705 buffer_info
->page_offset
,
5708 if (dma_mapping_error(rx_ring
->dev
,
5709 buffer_info
->page_dma
)) {
5710 buffer_info
->page_dma
= 0;
5711 rx_ring
->rx_stats
.alloc_failed
++;
5716 skb
= buffer_info
->skb
;
5718 skb
= netdev_alloc_skb_ip_align(netdev
, bufsz
);
5720 rx_ring
->rx_stats
.alloc_failed
++;
5724 buffer_info
->skb
= skb
;
5726 if (!buffer_info
->dma
) {
5727 buffer_info
->dma
= dma_map_single(rx_ring
->dev
,
5731 if (dma_mapping_error(rx_ring
->dev
,
5732 buffer_info
->dma
)) {
5733 buffer_info
->dma
= 0;
5734 rx_ring
->rx_stats
.alloc_failed
++;
5738 /* Refresh the desc even if buffer_addrs didn't change because
5739 * each write-back erases this info. */
5740 if (bufsz
< IGB_RXBUFFER_1024
) {
5741 rx_desc
->read
.pkt_addr
=
5742 cpu_to_le64(buffer_info
->page_dma
);
5743 rx_desc
->read
.hdr_addr
= cpu_to_le64(buffer_info
->dma
);
5745 rx_desc
->read
.pkt_addr
= cpu_to_le64(buffer_info
->dma
);
5746 rx_desc
->read
.hdr_addr
= 0;
5750 if (i
== rx_ring
->count
)
5752 buffer_info
= &rx_ring
->buffer_info
[i
];
5756 if (rx_ring
->next_to_use
!= i
) {
5757 rx_ring
->next_to_use
= i
;
5759 i
= (rx_ring
->count
- 1);
5763 /* Force memory writes to complete before letting h/w
5764 * know there are new descriptors to fetch. (Only
5765 * applicable for weak-ordered memory model archs,
5766 * such as IA-64). */
5768 writel(i
, rx_ring
->tail
);
5778 static int igb_mii_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
5780 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5781 struct mii_ioctl_data
*data
= if_mii(ifr
);
5783 if (adapter
->hw
.phy
.media_type
!= e1000_media_type_copper
)
5788 data
->phy_id
= adapter
->hw
.phy
.addr
;
5791 if (igb_read_phy_reg(&adapter
->hw
, data
->reg_num
& 0x1F,
5803 * igb_hwtstamp_ioctl - control hardware time stamping
5808 * Outgoing time stamping can be enabled and disabled. Play nice and
5809 * disable it when requested, although it shouldn't case any overhead
5810 * when no packet needs it. At most one packet in the queue may be
5811 * marked for time stamping, otherwise it would be impossible to tell
5812 * for sure to which packet the hardware time stamp belongs.
5814 * Incoming time stamping has to be configured via the hardware
5815 * filters. Not all combinations are supported, in particular event
5816 * type has to be specified. Matching the kind of event packet is
5817 * not supported, with the exception of "all V2 events regardless of
5821 static int igb_hwtstamp_ioctl(struct net_device
*netdev
,
5822 struct ifreq
*ifr
, int cmd
)
5824 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5825 struct e1000_hw
*hw
= &adapter
->hw
;
5826 struct hwtstamp_config config
;
5827 u32 tsync_tx_ctl
= E1000_TSYNCTXCTL_ENABLED
;
5828 u32 tsync_rx_ctl
= E1000_TSYNCRXCTL_ENABLED
;
5829 u32 tsync_rx_cfg
= 0;
5834 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
5837 /* reserved for future extensions */
5841 switch (config
.tx_type
) {
5842 case HWTSTAMP_TX_OFF
:
5844 case HWTSTAMP_TX_ON
:
5850 switch (config
.rx_filter
) {
5851 case HWTSTAMP_FILTER_NONE
:
5854 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
5855 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
5856 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
5857 case HWTSTAMP_FILTER_ALL
:
5859 * register TSYNCRXCFG must be set, therefore it is not
5860 * possible to time stamp both Sync and Delay_Req messages
5861 * => fall back to time stamping all packets
5863 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_ALL
;
5864 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
5866 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
5867 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L4_V1
;
5868 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE
;
5871 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
5872 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L4_V1
;
5873 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE
;
5876 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
5877 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
5878 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
5879 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE
;
5882 config
.rx_filter
= HWTSTAMP_FILTER_SOME
;
5884 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
5885 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
5886 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
5887 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE
;
5890 config
.rx_filter
= HWTSTAMP_FILTER_SOME
;
5892 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
5893 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
5894 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
5895 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_EVENT_V2
;
5896 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
5903 if (hw
->mac
.type
== e1000_82575
) {
5904 if (tsync_rx_ctl
| tsync_tx_ctl
)
5910 * Per-packet timestamping only works if all packets are
5911 * timestamped, so enable timestamping in all packets as
5912 * long as one rx filter was configured.
5914 if ((hw
->mac
.type
== e1000_82580
) && tsync_rx_ctl
) {
5915 tsync_rx_ctl
= E1000_TSYNCRXCTL_ENABLED
;
5916 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_ALL
;
5919 /* enable/disable TX */
5920 regval
= rd32(E1000_TSYNCTXCTL
);
5921 regval
&= ~E1000_TSYNCTXCTL_ENABLED
;
5922 regval
|= tsync_tx_ctl
;
5923 wr32(E1000_TSYNCTXCTL
, regval
);
5925 /* enable/disable RX */
5926 regval
= rd32(E1000_TSYNCRXCTL
);
5927 regval
&= ~(E1000_TSYNCRXCTL_ENABLED
| E1000_TSYNCRXCTL_TYPE_MASK
);
5928 regval
|= tsync_rx_ctl
;
5929 wr32(E1000_TSYNCRXCTL
, regval
);
5931 /* define which PTP packets are time stamped */
5932 wr32(E1000_TSYNCRXCFG
, tsync_rx_cfg
);
5934 /* define ethertype filter for timestamped packets */
5937 (E1000_ETQF_FILTER_ENABLE
| /* enable filter */
5938 E1000_ETQF_1588
| /* enable timestamping */
5939 ETH_P_1588
)); /* 1588 eth protocol type */
5941 wr32(E1000_ETQF(3), 0);
5943 #define PTP_PORT 319
5944 /* L4 Queue Filter[3]: filter by destination port and protocol */
5946 u32 ftqf
= (IPPROTO_UDP
/* UDP */
5947 | E1000_FTQF_VF_BP
/* VF not compared */
5948 | E1000_FTQF_1588_TIME_STAMP
/* Enable Timestamping */
5949 | E1000_FTQF_MASK
); /* mask all inputs */
5950 ftqf
&= ~E1000_FTQF_MASK_PROTO_BP
; /* enable protocol check */
5952 wr32(E1000_IMIR(3), htons(PTP_PORT
));
5953 wr32(E1000_IMIREXT(3),
5954 (E1000_IMIREXT_SIZE_BP
| E1000_IMIREXT_CTRL_BP
));
5955 if (hw
->mac
.type
== e1000_82576
) {
5956 /* enable source port check */
5957 wr32(E1000_SPQF(3), htons(PTP_PORT
));
5958 ftqf
&= ~E1000_FTQF_MASK_SOURCE_PORT_BP
;
5960 wr32(E1000_FTQF(3), ftqf
);
5962 wr32(E1000_FTQF(3), E1000_FTQF_MASK
);
5966 adapter
->hwtstamp_config
= config
;
5968 /* clear TX/RX time stamp registers, just to be sure */
5969 regval
= rd32(E1000_TXSTMPH
);
5970 regval
= rd32(E1000_RXSTMPH
);
5972 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
5982 static int igb_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
5988 return igb_mii_ioctl(netdev
, ifr
, cmd
);
5990 return igb_hwtstamp_ioctl(netdev
, ifr
, cmd
);
5996 s32
igb_read_pcie_cap_reg(struct e1000_hw
*hw
, u32 reg
, u16
*value
)
5998 struct igb_adapter
*adapter
= hw
->back
;
6001 cap_offset
= pci_find_capability(adapter
->pdev
, PCI_CAP_ID_EXP
);
6003 return -E1000_ERR_CONFIG
;
6005 pci_read_config_word(adapter
->pdev
, cap_offset
+ reg
, value
);
6010 s32
igb_write_pcie_cap_reg(struct e1000_hw
*hw
, u32 reg
, u16
*value
)
6012 struct igb_adapter
*adapter
= hw
->back
;
6015 cap_offset
= pci_find_capability(adapter
->pdev
, PCI_CAP_ID_EXP
);
6017 return -E1000_ERR_CONFIG
;
6019 pci_write_config_word(adapter
->pdev
, cap_offset
+ reg
, *value
);
6024 static void igb_vlan_rx_register(struct net_device
*netdev
,
6025 struct vlan_group
*grp
)
6027 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6028 struct e1000_hw
*hw
= &adapter
->hw
;
6031 igb_irq_disable(adapter
);
6032 adapter
->vlgrp
= grp
;
6035 /* enable VLAN tag insert/strip */
6036 ctrl
= rd32(E1000_CTRL
);
6037 ctrl
|= E1000_CTRL_VME
;
6038 wr32(E1000_CTRL
, ctrl
);
6040 /* Disable CFI check */
6041 rctl
= rd32(E1000_RCTL
);
6042 rctl
&= ~E1000_RCTL_CFIEN
;
6043 wr32(E1000_RCTL
, rctl
);
6045 /* disable VLAN tag insert/strip */
6046 ctrl
= rd32(E1000_CTRL
);
6047 ctrl
&= ~E1000_CTRL_VME
;
6048 wr32(E1000_CTRL
, ctrl
);
6051 igb_rlpml_set(adapter
);
6053 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
6054 igb_irq_enable(adapter
);
6057 static void igb_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
6059 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6060 struct e1000_hw
*hw
= &adapter
->hw
;
6061 int pf_id
= adapter
->vfs_allocated_count
;
6063 /* attempt to add filter to vlvf array */
6064 igb_vlvf_set(adapter
, vid
, true, pf_id
);
6066 /* add the filter since PF can receive vlans w/o entry in vlvf */
6067 igb_vfta_set(hw
, vid
, true);
6070 static void igb_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
6072 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6073 struct e1000_hw
*hw
= &adapter
->hw
;
6074 int pf_id
= adapter
->vfs_allocated_count
;
6077 igb_irq_disable(adapter
);
6078 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
6080 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
6081 igb_irq_enable(adapter
);
6083 /* remove vlan from VLVF table array */
6084 err
= igb_vlvf_set(adapter
, vid
, false, pf_id
);
6086 /* if vid was not present in VLVF just remove it from table */
6088 igb_vfta_set(hw
, vid
, false);
6091 static void igb_restore_vlan(struct igb_adapter
*adapter
)
6093 igb_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
6095 if (adapter
->vlgrp
) {
6097 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
6098 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
6100 igb_vlan_rx_add_vid(adapter
->netdev
, vid
);
6105 int igb_set_spd_dplx(struct igb_adapter
*adapter
, u16 spddplx
)
6107 struct pci_dev
*pdev
= adapter
->pdev
;
6108 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
6113 case SPEED_10
+ DUPLEX_HALF
:
6114 mac
->forced_speed_duplex
= ADVERTISE_10_HALF
;
6116 case SPEED_10
+ DUPLEX_FULL
:
6117 mac
->forced_speed_duplex
= ADVERTISE_10_FULL
;
6119 case SPEED_100
+ DUPLEX_HALF
:
6120 mac
->forced_speed_duplex
= ADVERTISE_100_HALF
;
6122 case SPEED_100
+ DUPLEX_FULL
:
6123 mac
->forced_speed_duplex
= ADVERTISE_100_FULL
;
6125 case SPEED_1000
+ DUPLEX_FULL
:
6127 adapter
->hw
.phy
.autoneg_advertised
= ADVERTISE_1000_FULL
;
6129 case SPEED_1000
+ DUPLEX_HALF
: /* not supported */
6131 dev_err(&pdev
->dev
, "Unsupported Speed/Duplex configuration\n");
6137 static int __igb_shutdown(struct pci_dev
*pdev
, bool *enable_wake
)
6139 struct net_device
*netdev
= pci_get_drvdata(pdev
);
6140 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6141 struct e1000_hw
*hw
= &adapter
->hw
;
6142 u32 ctrl
, rctl
, status
;
6143 u32 wufc
= adapter
->wol
;
6148 netif_device_detach(netdev
);
6150 if (netif_running(netdev
))
6153 igb_clear_interrupt_scheme(adapter
);
6156 retval
= pci_save_state(pdev
);
6161 status
= rd32(E1000_STATUS
);
6162 if (status
& E1000_STATUS_LU
)
6163 wufc
&= ~E1000_WUFC_LNKC
;
6166 igb_setup_rctl(adapter
);
6167 igb_set_rx_mode(netdev
);
6169 /* turn on all-multi mode if wake on multicast is enabled */
6170 if (wufc
& E1000_WUFC_MC
) {
6171 rctl
= rd32(E1000_RCTL
);
6172 rctl
|= E1000_RCTL_MPE
;
6173 wr32(E1000_RCTL
, rctl
);
6176 ctrl
= rd32(E1000_CTRL
);
6177 /* advertise wake from D3Cold */
6178 #define E1000_CTRL_ADVD3WUC 0x00100000
6179 /* phy power management enable */
6180 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6181 ctrl
|= E1000_CTRL_ADVD3WUC
;
6182 wr32(E1000_CTRL
, ctrl
);
6184 /* Allow time for pending master requests to run */
6185 igb_disable_pcie_master(hw
);
6187 wr32(E1000_WUC
, E1000_WUC_PME_EN
);
6188 wr32(E1000_WUFC
, wufc
);
6191 wr32(E1000_WUFC
, 0);
6194 *enable_wake
= wufc
|| adapter
->en_mng_pt
;
6196 igb_power_down_link(adapter
);
6198 igb_power_up_link(adapter
);
6200 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6201 * would have already happened in close and is redundant. */
6202 igb_release_hw_control(adapter
);
6204 pci_disable_device(pdev
);
6210 static int igb_suspend(struct pci_dev
*pdev
, pm_message_t state
)
6215 retval
= __igb_shutdown(pdev
, &wake
);
6220 pci_prepare_to_sleep(pdev
);
6222 pci_wake_from_d3(pdev
, false);
6223 pci_set_power_state(pdev
, PCI_D3hot
);
6229 static int igb_resume(struct pci_dev
*pdev
)
6231 struct net_device
*netdev
= pci_get_drvdata(pdev
);
6232 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6233 struct e1000_hw
*hw
= &adapter
->hw
;
6236 pci_set_power_state(pdev
, PCI_D0
);
6237 pci_restore_state(pdev
);
6238 pci_save_state(pdev
);
6240 err
= pci_enable_device_mem(pdev
);
6243 "igb: Cannot enable PCI device from suspend\n");
6246 pci_set_master(pdev
);
6248 pci_enable_wake(pdev
, PCI_D3hot
, 0);
6249 pci_enable_wake(pdev
, PCI_D3cold
, 0);
6251 if (igb_init_interrupt_scheme(adapter
)) {
6252 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
6258 /* let the f/w know that the h/w is now under the control of the
6260 igb_get_hw_control(adapter
);
6262 wr32(E1000_WUS
, ~0);
6264 if (netif_running(netdev
)) {
6265 err
= igb_open(netdev
);
6270 netif_device_attach(netdev
);
6276 static void igb_shutdown(struct pci_dev
*pdev
)
6280 __igb_shutdown(pdev
, &wake
);
6282 if (system_state
== SYSTEM_POWER_OFF
) {
6283 pci_wake_from_d3(pdev
, wake
);
6284 pci_set_power_state(pdev
, PCI_D3hot
);
6288 #ifdef CONFIG_NET_POLL_CONTROLLER
6290 * Polling 'interrupt' - used by things like netconsole to send skbs
6291 * without having to re-enable interrupts. It's not called while
6292 * the interrupt routine is executing.
6294 static void igb_netpoll(struct net_device
*netdev
)
6296 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6297 struct e1000_hw
*hw
= &adapter
->hw
;
6300 if (!adapter
->msix_entries
) {
6301 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
6302 igb_irq_disable(adapter
);
6303 napi_schedule(&q_vector
->napi
);
6307 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
6308 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
6309 wr32(E1000_EIMC
, q_vector
->eims_value
);
6310 napi_schedule(&q_vector
->napi
);
6313 #endif /* CONFIG_NET_POLL_CONTROLLER */
6316 * igb_io_error_detected - called when PCI error is detected
6317 * @pdev: Pointer to PCI device
6318 * @state: The current pci connection state
6320 * This function is called after a PCI bus error affecting
6321 * this device has been detected.
6323 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*pdev
,
6324 pci_channel_state_t state
)
6326 struct net_device
*netdev
= pci_get_drvdata(pdev
);
6327 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6329 netif_device_detach(netdev
);
6331 if (state
== pci_channel_io_perm_failure
)
6332 return PCI_ERS_RESULT_DISCONNECT
;
6334 if (netif_running(netdev
))
6336 pci_disable_device(pdev
);
6338 /* Request a slot slot reset. */
6339 return PCI_ERS_RESULT_NEED_RESET
;
6343 * igb_io_slot_reset - called after the pci bus has been reset.
6344 * @pdev: Pointer to PCI device
6346 * Restart the card from scratch, as if from a cold-boot. Implementation
6347 * resembles the first-half of the igb_resume routine.
6349 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*pdev
)
6351 struct net_device
*netdev
= pci_get_drvdata(pdev
);
6352 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6353 struct e1000_hw
*hw
= &adapter
->hw
;
6354 pci_ers_result_t result
;
6357 if (pci_enable_device_mem(pdev
)) {
6359 "Cannot re-enable PCI device after reset.\n");
6360 result
= PCI_ERS_RESULT_DISCONNECT
;
6362 pci_set_master(pdev
);
6363 pci_restore_state(pdev
);
6364 pci_save_state(pdev
);
6366 pci_enable_wake(pdev
, PCI_D3hot
, 0);
6367 pci_enable_wake(pdev
, PCI_D3cold
, 0);
6370 wr32(E1000_WUS
, ~0);
6371 result
= PCI_ERS_RESULT_RECOVERED
;
6374 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
6376 dev_err(&pdev
->dev
, "pci_cleanup_aer_uncorrect_error_status "
6377 "failed 0x%0x\n", err
);
6378 /* non-fatal, continue */
6385 * igb_io_resume - called when traffic can start flowing again.
6386 * @pdev: Pointer to PCI device
6388 * This callback is called when the error recovery driver tells us that
6389 * its OK to resume normal operation. Implementation resembles the
6390 * second-half of the igb_resume routine.
6392 static void igb_io_resume(struct pci_dev
*pdev
)
6394 struct net_device
*netdev
= pci_get_drvdata(pdev
);
6395 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6397 if (netif_running(netdev
)) {
6398 if (igb_up(adapter
)) {
6399 dev_err(&pdev
->dev
, "igb_up failed after reset\n");
6404 netif_device_attach(netdev
);
6406 /* let the f/w know that the h/w is now under the control of the
6408 igb_get_hw_control(adapter
);
6411 static void igb_rar_set_qsel(struct igb_adapter
*adapter
, u8
*addr
, u32 index
,
6414 u32 rar_low
, rar_high
;
6415 struct e1000_hw
*hw
= &adapter
->hw
;
6417 /* HW expects these in little endian so we reverse the byte order
6418 * from network order (big endian) to little endian
6420 rar_low
= ((u32
) addr
[0] | ((u32
) addr
[1] << 8) |
6421 ((u32
) addr
[2] << 16) | ((u32
) addr
[3] << 24));
6422 rar_high
= ((u32
) addr
[4] | ((u32
) addr
[5] << 8));
6424 /* Indicate to hardware the Address is Valid. */
6425 rar_high
|= E1000_RAH_AV
;
6427 if (hw
->mac
.type
== e1000_82575
)
6428 rar_high
|= E1000_RAH_POOL_1
* qsel
;
6430 rar_high
|= E1000_RAH_POOL_1
<< qsel
;
6432 wr32(E1000_RAL(index
), rar_low
);
6434 wr32(E1000_RAH(index
), rar_high
);
6438 static int igb_set_vf_mac(struct igb_adapter
*adapter
,
6439 int vf
, unsigned char *mac_addr
)
6441 struct e1000_hw
*hw
= &adapter
->hw
;
6442 /* VF MAC addresses start at end of receive addresses and moves
6443 * torwards the first, as a result a collision should not be possible */
6444 int rar_entry
= hw
->mac
.rar_entry_count
- (vf
+ 1);
6446 memcpy(adapter
->vf_data
[vf
].vf_mac_addresses
, mac_addr
, ETH_ALEN
);
6448 igb_rar_set_qsel(adapter
, mac_addr
, rar_entry
, vf
);
6453 static int igb_ndo_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
6455 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6456 if (!is_valid_ether_addr(mac
) || (vf
>= adapter
->vfs_allocated_count
))
6458 adapter
->vf_data
[vf
].flags
|= IGB_VF_FLAG_PF_SET_MAC
;
6459 dev_info(&adapter
->pdev
->dev
, "setting MAC %pM on VF %d\n", mac
, vf
);
6460 dev_info(&adapter
->pdev
->dev
, "Reload the VF driver to make this"
6461 " change effective.");
6462 if (test_bit(__IGB_DOWN
, &adapter
->state
)) {
6463 dev_warn(&adapter
->pdev
->dev
, "The VF MAC address has been set,"
6464 " but the PF device is not up.\n");
6465 dev_warn(&adapter
->pdev
->dev
, "Bring the PF device up before"
6466 " attempting to use the VF device.\n");
6468 return igb_set_vf_mac(adapter
, vf
, mac
);
6471 static int igb_ndo_set_vf_bw(struct net_device
*netdev
, int vf
, int tx_rate
)
6476 static int igb_ndo_get_vf_config(struct net_device
*netdev
,
6477 int vf
, struct ifla_vf_info
*ivi
)
6479 struct igb_adapter
*adapter
= netdev_priv(netdev
);
6480 if (vf
>= adapter
->vfs_allocated_count
)
6483 memcpy(&ivi
->mac
, adapter
->vf_data
[vf
].vf_mac_addresses
, ETH_ALEN
);
6485 ivi
->vlan
= adapter
->vf_data
[vf
].pf_vlan
;
6486 ivi
->qos
= adapter
->vf_data
[vf
].pf_qos
;
6490 static void igb_vmm_control(struct igb_adapter
*adapter
)
6492 struct e1000_hw
*hw
= &adapter
->hw
;
6495 switch (hw
->mac
.type
) {
6498 /* replication is not supported for 82575 */
6501 /* notify HW that the MAC is adding vlan tags */
6502 reg
= rd32(E1000_DTXCTL
);
6503 reg
|= E1000_DTXCTL_VLAN_ADDED
;
6504 wr32(E1000_DTXCTL
, reg
);
6506 /* enable replication vlan tag stripping */
6507 reg
= rd32(E1000_RPLOLR
);
6508 reg
|= E1000_RPLOLR_STRVLAN
;
6509 wr32(E1000_RPLOLR
, reg
);
6511 /* none of the above registers are supported by i350 */
6515 if (adapter
->vfs_allocated_count
) {
6516 igb_vmdq_set_loopback_pf(hw
, true);
6517 igb_vmdq_set_replication_pf(hw
, true);
6519 igb_vmdq_set_loopback_pf(hw
, false);
6520 igb_vmdq_set_replication_pf(hw
, false);