net: Add functions for handling padding frame and adding to length
[deliverable/linux.git] / drivers / net / ethernet / intel / e1000e / netdev.c
CommitLineData
e78b80b1
DE
1/* Intel PRO/1000 Linux driver
2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 * Contact Information:
17 * Linux NICS <linux.nics@intel.com>
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
bc7f75fa 21
8544b9f7
BA
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
bc7f75fa
AK
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/init.h>
27#include <linux/pci.h>
28#include <linux/vmalloc.h>
29#include <linux/pagemap.h>
30#include <linux/delay.h>
31#include <linux/netdevice.h>
9fb7a5f7 32#include <linux/interrupt.h>
bc7f75fa
AK
33#include <linux/tcp.h>
34#include <linux/ipv6.h>
5a0e3ad6 35#include <linux/slab.h>
bc7f75fa
AK
36#include <net/checksum.h>
37#include <net/ip6_checksum.h>
bc7f75fa
AK
38#include <linux/ethtool.h>
39#include <linux/if_vlan.h>
40#include <linux/cpu.h>
41#include <linux/smp.h>
e8db0be1 42#include <linux/pm_qos.h>
23606cf5 43#include <linux/pm_runtime.h>
111b9dc5 44#include <linux/aer.h>
70c71606 45#include <linux/prefetch.h>
bc7f75fa
AK
46
47#include "e1000.h"
48
b3ccf267 49#define DRV_EXTRAVERSION "-k"
c14c643b 50
8defe713 51#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
bc7f75fa
AK
52char e1000e_driver_name[] = "e1000e";
53const char e1000e_driver_version[] = DRV_VERSION;
54
b3f4d599 55#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
56static int debug = -1;
57module_param(debug, int, 0);
58MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
59
bc7f75fa
AK
60static const struct e1000_info *e1000_info_tbl[] = {
61 [board_82571] = &e1000_82571_info,
62 [board_82572] = &e1000_82572_info,
63 [board_82573] = &e1000_82573_info,
4662e82b 64 [board_82574] = &e1000_82574_info,
8c81c9c3 65 [board_82583] = &e1000_82583_info,
bc7f75fa
AK
66 [board_80003es2lan] = &e1000_es2_info,
67 [board_ich8lan] = &e1000_ich8_info,
68 [board_ich9lan] = &e1000_ich9_info,
f4187b56 69 [board_ich10lan] = &e1000_ich10_info,
a4f58f54 70 [board_pchlan] = &e1000_pch_info,
d3738bb8 71 [board_pch2lan] = &e1000_pch2_info,
2fbe4526 72 [board_pch_lpt] = &e1000_pch_lpt_info,
bc7f75fa
AK
73};
74
84f4ee90
TI
75struct e1000_reg_info {
76 u32 ofs;
77 char *name;
78};
79
84f4ee90 80static const struct e1000_reg_info e1000_reg_info_tbl[] = {
84f4ee90
TI
81 /* General Registers */
82 {E1000_CTRL, "CTRL"},
83 {E1000_STATUS, "STATUS"},
84 {E1000_CTRL_EXT, "CTRL_EXT"},
85
86 /* Interrupt Registers */
87 {E1000_ICR, "ICR"},
88
af667a29 89 /* Rx Registers */
84f4ee90 90 {E1000_RCTL, "RCTL"},
1e36052e
BA
91 {E1000_RDLEN(0), "RDLEN"},
92 {E1000_RDH(0), "RDH"},
93 {E1000_RDT(0), "RDT"},
84f4ee90
TI
94 {E1000_RDTR, "RDTR"},
95 {E1000_RXDCTL(0), "RXDCTL"},
96 {E1000_ERT, "ERT"},
1e36052e
BA
97 {E1000_RDBAL(0), "RDBAL"},
98 {E1000_RDBAH(0), "RDBAH"},
84f4ee90
TI
99 {E1000_RDFH, "RDFH"},
100 {E1000_RDFT, "RDFT"},
101 {E1000_RDFHS, "RDFHS"},
102 {E1000_RDFTS, "RDFTS"},
103 {E1000_RDFPC, "RDFPC"},
104
af667a29 105 /* Tx Registers */
84f4ee90 106 {E1000_TCTL, "TCTL"},
1e36052e
BA
107 {E1000_TDBAL(0), "TDBAL"},
108 {E1000_TDBAH(0), "TDBAH"},
109 {E1000_TDLEN(0), "TDLEN"},
110 {E1000_TDH(0), "TDH"},
111 {E1000_TDT(0), "TDT"},
84f4ee90
TI
112 {E1000_TIDV, "TIDV"},
113 {E1000_TXDCTL(0), "TXDCTL"},
114 {E1000_TADV, "TADV"},
115 {E1000_TARC(0), "TARC"},
116 {E1000_TDFH, "TDFH"},
117 {E1000_TDFT, "TDFT"},
118 {E1000_TDFHS, "TDFHS"},
119 {E1000_TDFTS, "TDFTS"},
120 {E1000_TDFPC, "TDFPC"},
121
122 /* List Terminator */
f36bb6ca 123 {0, NULL}
84f4ee90
TI
124};
125
c6f3148c
AK
126/**
127 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
128 * @hw: pointer to the HW structure
129 *
130 * When updating the MAC CSR registers, the Manageability Engine (ME) could
131 * be accessing the registers at the same time. Normally, this is handled in
132 * h/w by an arbiter but on some parts there is a bug that acknowledges Host
133 * accesses later than it should which could result in the register to have
134 * an incorrect value. Workaround this by checking the FWSM register which
135 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
136 * and try again a number of times.
137 **/
138s32 __ew32_prepare(struct e1000_hw *hw)
139{
140 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
141
142 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
143 udelay(50);
144
145 return i;
146}
147
148void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
149{
150 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
151 __ew32_prepare(hw);
152
153 writel(val, hw->hw_addr + reg);
154}
155
e921eb1a 156/**
84f4ee90 157 * e1000_regdump - register printout routine
e921eb1a
BA
158 * @hw: pointer to the HW structure
159 * @reginfo: pointer to the register info table
160 **/
84f4ee90
TI
161static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
162{
163 int n = 0;
164 char rname[16];
165 u32 regs[8];
166
167 switch (reginfo->ofs) {
168 case E1000_RXDCTL(0):
169 for (n = 0; n < 2; n++)
170 regs[n] = __er32(hw, E1000_RXDCTL(n));
171 break;
172 case E1000_TXDCTL(0):
173 for (n = 0; n < 2; n++)
174 regs[n] = __er32(hw, E1000_TXDCTL(n));
175 break;
176 case E1000_TARC(0):
177 for (n = 0; n < 2; n++)
178 regs[n] = __er32(hw, E1000_TARC(n));
179 break;
180 default:
ef456f85
JK
181 pr_info("%-15s %08x\n",
182 reginfo->name, __er32(hw, reginfo->ofs));
84f4ee90
TI
183 return;
184 }
185
186 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
ef456f85 187 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
84f4ee90
TI
188}
189
f0c5dadf
ET
190static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
191 struct e1000_buffer *bi)
192{
193 int i;
194 struct e1000_ps_page *ps_page;
195
196 for (i = 0; i < adapter->rx_ps_pages; i++) {
197 ps_page = &bi->ps_pages[i];
198
199 if (ps_page->page) {
200 pr_info("packet dump for ps_page %d:\n", i);
201 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
202 16, 1, page_address(ps_page->page),
203 PAGE_SIZE, true);
204 }
205 }
206}
207
e921eb1a 208/**
af667a29 209 * e1000e_dump - Print registers, Tx-ring and Rx-ring
e921eb1a
BA
210 * @adapter: board private structure
211 **/
84f4ee90
TI
212static void e1000e_dump(struct e1000_adapter *adapter)
213{
214 struct net_device *netdev = adapter->netdev;
215 struct e1000_hw *hw = &adapter->hw;
216 struct e1000_reg_info *reginfo;
217 struct e1000_ring *tx_ring = adapter->tx_ring;
218 struct e1000_tx_desc *tx_desc;
af667a29 219 struct my_u0 {
e885d762
BA
220 __le64 a;
221 __le64 b;
af667a29 222 } *u0;
84f4ee90
TI
223 struct e1000_buffer *buffer_info;
224 struct e1000_ring *rx_ring = adapter->rx_ring;
225 union e1000_rx_desc_packet_split *rx_desc_ps;
5f450212 226 union e1000_rx_desc_extended *rx_desc;
af667a29 227 struct my_u1 {
e885d762
BA
228 __le64 a;
229 __le64 b;
230 __le64 c;
231 __le64 d;
af667a29 232 } *u1;
84f4ee90
TI
233 u32 staterr;
234 int i = 0;
235
236 if (!netif_msg_hw(adapter))
237 return;
238
239 /* Print netdevice Info */
240 if (netdev) {
241 dev_info(&adapter->pdev->dev, "Net device Info\n");
ef456f85 242 pr_info("Device Name state trans_start last_rx\n");
e5fe2541
BA
243 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
244 netdev->state, netdev->trans_start, netdev->last_rx);
84f4ee90
TI
245 }
246
247 /* Print Registers */
248 dev_info(&adapter->pdev->dev, "Register Dump\n");
ef456f85 249 pr_info(" Register Name Value\n");
84f4ee90
TI
250 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
251 reginfo->name; reginfo++) {
252 e1000_regdump(hw, reginfo);
253 }
254
af667a29 255 /* Print Tx Ring Summary */
84f4ee90 256 if (!netdev || !netif_running(netdev))
fe1e980f 257 return;
84f4ee90 258
af667a29 259 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
ef456f85 260 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
84f4ee90 261 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
ef456f85
JK
262 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
263 0, tx_ring->next_to_use, tx_ring->next_to_clean,
264 (unsigned long long)buffer_info->dma,
265 buffer_info->length,
266 buffer_info->next_to_watch,
267 (unsigned long long)buffer_info->time_stamp);
84f4ee90 268
af667a29 269 /* Print Tx Ring */
84f4ee90
TI
270 if (!netif_msg_tx_done(adapter))
271 goto rx_ring_summary;
272
af667a29 273 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
84f4ee90
TI
274
275 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
276 *
277 * Legacy Transmit Descriptor
278 * +--------------------------------------------------------------+
279 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
280 * +--------------------------------------------------------------+
281 * 8 | Special | CSS | Status | CMD | CSO | Length |
282 * +--------------------------------------------------------------+
283 * 63 48 47 36 35 32 31 24 23 16 15 0
284 *
285 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
286 * 63 48 47 40 39 32 31 16 15 8 7 0
287 * +----------------------------------------------------------------+
288 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
289 * +----------------------------------------------------------------+
290 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
291 * +----------------------------------------------------------------+
292 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
293 *
294 * Extended Data Descriptor (DTYP=0x1)
295 * +----------------------------------------------------------------+
296 * 0 | Buffer Address [63:0] |
297 * +----------------------------------------------------------------+
298 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
299 * +----------------------------------------------------------------+
300 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
301 */
ef456f85
JK
302 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
303 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
304 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
84f4ee90 305 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
ef456f85 306 const char *next_desc;
84f4ee90
TI
307 tx_desc = E1000_TX_DESC(*tx_ring, i);
308 buffer_info = &tx_ring->buffer_info[i];
309 u0 = (struct my_u0 *)tx_desc;
84f4ee90 310 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
ef456f85 311 next_desc = " NTC/U";
84f4ee90 312 else if (i == tx_ring->next_to_use)
ef456f85 313 next_desc = " NTU";
84f4ee90 314 else if (i == tx_ring->next_to_clean)
ef456f85 315 next_desc = " NTC";
84f4ee90 316 else
ef456f85
JK
317 next_desc = "";
318 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
319 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
320 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
321 i,
322 (unsigned long long)le64_to_cpu(u0->a),
323 (unsigned long long)le64_to_cpu(u0->b),
324 (unsigned long long)buffer_info->dma,
325 buffer_info->length, buffer_info->next_to_watch,
326 (unsigned long long)buffer_info->time_stamp,
327 buffer_info->skb, next_desc);
84f4ee90 328
f0c5dadf 329 if (netif_msg_pktdata(adapter) && buffer_info->skb)
84f4ee90 330 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
f0c5dadf
ET
331 16, 1, buffer_info->skb->data,
332 buffer_info->skb->len, true);
84f4ee90
TI
333 }
334
af667a29 335 /* Print Rx Ring Summary */
84f4ee90 336rx_ring_summary:
af667a29 337 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
ef456f85
JK
338 pr_info("Queue [NTU] [NTC]\n");
339 pr_info(" %5d %5X %5X\n",
340 0, rx_ring->next_to_use, rx_ring->next_to_clean);
84f4ee90 341
af667a29 342 /* Print Rx Ring */
84f4ee90 343 if (!netif_msg_rx_status(adapter))
fe1e980f 344 return;
84f4ee90 345
af667a29 346 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
84f4ee90
TI
347 switch (adapter->rx_ps_pages) {
348 case 1:
349 case 2:
350 case 3:
351 /* [Extended] Packet Split Receive Descriptor Format
352 *
353 * +-----------------------------------------------------+
354 * 0 | Buffer Address 0 [63:0] |
355 * +-----------------------------------------------------+
356 * 8 | Buffer Address 1 [63:0] |
357 * +-----------------------------------------------------+
358 * 16 | Buffer Address 2 [63:0] |
359 * +-----------------------------------------------------+
360 * 24 | Buffer Address 3 [63:0] |
361 * +-----------------------------------------------------+
362 */
ef456f85 363 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
84f4ee90
TI
364 /* [Extended] Receive Descriptor (Write-Back) Format
365 *
366 * 63 48 47 32 31 13 12 8 7 4 3 0
367 * +------------------------------------------------------+
368 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
369 * | Checksum | Ident | | Queue | | Type |
370 * +------------------------------------------------------+
371 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
372 * +------------------------------------------------------+
373 * 63 48 47 32 31 20 19 0
374 */
ef456f85 375 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
84f4ee90 376 for (i = 0; i < rx_ring->count; i++) {
ef456f85 377 const char *next_desc;
84f4ee90
TI
378 buffer_info = &rx_ring->buffer_info[i];
379 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
380 u1 = (struct my_u1 *)rx_desc_ps;
381 staterr =
af667a29 382 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
ef456f85
JK
383
384 if (i == rx_ring->next_to_use)
385 next_desc = " NTU";
386 else if (i == rx_ring->next_to_clean)
387 next_desc = " NTC";
388 else
389 next_desc = "";
390
84f4ee90
TI
391 if (staterr & E1000_RXD_STAT_DD) {
392 /* Descriptor Done */
ef456f85
JK
393 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
394 "RWB", i,
395 (unsigned long long)le64_to_cpu(u1->a),
396 (unsigned long long)le64_to_cpu(u1->b),
397 (unsigned long long)le64_to_cpu(u1->c),
398 (unsigned long long)le64_to_cpu(u1->d),
399 buffer_info->skb, next_desc);
84f4ee90 400 } else {
ef456f85
JK
401 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
402 "R ", i,
403 (unsigned long long)le64_to_cpu(u1->a),
404 (unsigned long long)le64_to_cpu(u1->b),
405 (unsigned long long)le64_to_cpu(u1->c),
406 (unsigned long long)le64_to_cpu(u1->d),
407 (unsigned long long)buffer_info->dma,
408 buffer_info->skb, next_desc);
84f4ee90
TI
409
410 if (netif_msg_pktdata(adapter))
f0c5dadf
ET
411 e1000e_dump_ps_pages(adapter,
412 buffer_info);
84f4ee90 413 }
84f4ee90
TI
414 }
415 break;
416 default:
417 case 0:
5f450212 418 /* Extended Receive Descriptor (Read) Format
84f4ee90 419 *
5f450212
BA
420 * +-----------------------------------------------------+
421 * 0 | Buffer Address [63:0] |
422 * +-----------------------------------------------------+
423 * 8 | Reserved |
424 * +-----------------------------------------------------+
84f4ee90 425 */
ef456f85 426 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
5f450212
BA
427 /* Extended Receive Descriptor (Write-Back) Format
428 *
429 * 63 48 47 32 31 24 23 4 3 0
430 * +------------------------------------------------------+
431 * | RSS Hash | | | |
432 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
433 * | Packet | IP | | | Type |
434 * | Checksum | Ident | | | |
435 * +------------------------------------------------------+
436 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
437 * +------------------------------------------------------+
438 * 63 48 47 32 31 20 19 0
439 */
ef456f85 440 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
5f450212
BA
441
442 for (i = 0; i < rx_ring->count; i++) {
ef456f85
JK
443 const char *next_desc;
444
84f4ee90 445 buffer_info = &rx_ring->buffer_info[i];
5f450212
BA
446 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
447 u1 = (struct my_u1 *)rx_desc;
448 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
ef456f85
JK
449
450 if (i == rx_ring->next_to_use)
451 next_desc = " NTU";
452 else if (i == rx_ring->next_to_clean)
453 next_desc = " NTC";
454 else
455 next_desc = "";
456
5f450212
BA
457 if (staterr & E1000_RXD_STAT_DD) {
458 /* Descriptor Done */
ef456f85
JK
459 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
460 "RWB", i,
461 (unsigned long long)le64_to_cpu(u1->a),
462 (unsigned long long)le64_to_cpu(u1->b),
463 buffer_info->skb, next_desc);
5f450212 464 } else {
ef456f85
JK
465 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
466 "R ", i,
467 (unsigned long long)le64_to_cpu(u1->a),
468 (unsigned long long)le64_to_cpu(u1->b),
469 (unsigned long long)buffer_info->dma,
470 buffer_info->skb, next_desc);
5f450212 471
f0c5dadf
ET
472 if (netif_msg_pktdata(adapter) &&
473 buffer_info->skb)
5f450212
BA
474 print_hex_dump(KERN_INFO, "",
475 DUMP_PREFIX_ADDRESS, 16,
476 1,
f0c5dadf 477 buffer_info->skb->data,
5f450212
BA
478 adapter->rx_buffer_len,
479 true);
480 }
84f4ee90
TI
481 }
482 }
84f4ee90
TI
483}
484
bc7f75fa
AK
485/**
486 * e1000_desc_unused - calculate if we have unused descriptors
487 **/
488static int e1000_desc_unused(struct e1000_ring *ring)
489{
490 if (ring->next_to_clean > ring->next_to_use)
491 return ring->next_to_clean - ring->next_to_use - 1;
492
493 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
494}
495
b67e1913
BA
496/**
497 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
498 * @adapter: board private structure
499 * @hwtstamps: time stamp structure to update
500 * @systim: unsigned 64bit system time value.
501 *
502 * Convert the system time value stored in the RX/TXSTMP registers into a
503 * hwtstamp which can be used by the upper level time stamping functions.
504 *
505 * The 'systim_lock' spinlock is used to protect the consistency of the
506 * system time value. This is needed because reading the 64 bit time
507 * value involves reading two 32 bit registers. The first read latches the
508 * value.
509 **/
510static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
511 struct skb_shared_hwtstamps *hwtstamps,
512 u64 systim)
513{
514 u64 ns;
515 unsigned long flags;
516
517 spin_lock_irqsave(&adapter->systim_lock, flags);
518 ns = timecounter_cyc2time(&adapter->tc, systim);
519 spin_unlock_irqrestore(&adapter->systim_lock, flags);
520
521 memset(hwtstamps, 0, sizeof(*hwtstamps));
522 hwtstamps->hwtstamp = ns_to_ktime(ns);
523}
524
525/**
526 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
527 * @adapter: board private structure
528 * @status: descriptor extended error and status field
529 * @skb: particular skb to include time stamp
530 *
531 * If the time stamp is valid, convert it into the timecounter ns value
532 * and store that result into the shhwtstamps structure which is passed
533 * up the network stack.
534 **/
535static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
536 struct sk_buff *skb)
537{
538 struct e1000_hw *hw = &adapter->hw;
539 u64 rxstmp;
540
541 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
542 !(status & E1000_RXDEXT_STATERR_TST) ||
543 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
544 return;
545
546 /* The Rx time stamp registers contain the time stamp. No other
547 * received packet will be time stamped until the Rx time stamp
548 * registers are read. Because only one packet can be time stamped
549 * at a time, the register values must belong to this packet and
550 * therefore none of the other additional attributes need to be
551 * compared.
552 */
553 rxstmp = (u64)er32(RXSTMPL);
554 rxstmp |= (u64)er32(RXSTMPH) << 32;
555 e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
556
557 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
558}
559
bc7f75fa 560/**
ad68076e 561 * e1000_receive_skb - helper function to handle Rx indications
bc7f75fa 562 * @adapter: board private structure
b67e1913 563 * @staterr: descriptor extended error and status field as written by hardware
bc7f75fa
AK
564 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
565 * @skb: pointer to sk_buff to be indicated to stack
566 **/
567static void e1000_receive_skb(struct e1000_adapter *adapter,
af667a29 568 struct net_device *netdev, struct sk_buff *skb,
b67e1913 569 u32 staterr, __le16 vlan)
bc7f75fa 570{
86d70e53 571 u16 tag = le16_to_cpu(vlan);
b67e1913
BA
572
573 e1000e_rx_hwtstamp(adapter, staterr, skb);
574
bc7f75fa
AK
575 skb->protocol = eth_type_trans(skb, netdev);
576
b67e1913 577 if (staterr & E1000_RXD_STAT_VP)
86a9bad3 578 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
86d70e53
JK
579
580 napi_gro_receive(&adapter->napi, skb);
bc7f75fa
AK
581}
582
583/**
af667a29 584 * e1000_rx_checksum - Receive Checksum Offload
afd12939
BA
585 * @adapter: board private structure
586 * @status_err: receive descriptor status and error fields
587 * @csum: receive descriptor csum field
588 * @sk_buff: socket buffer with received data
bc7f75fa
AK
589 **/
590static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
2e1706f2 591 struct sk_buff *skb)
bc7f75fa
AK
592{
593 u16 status = (u16)status_err;
594 u8 errors = (u8)(status_err >> 24);
bc8acf2c
ED
595
596 skb_checksum_none_assert(skb);
bc7f75fa 597
afd12939
BA
598 /* Rx checksum disabled */
599 if (!(adapter->netdev->features & NETIF_F_RXCSUM))
600 return;
601
bc7f75fa
AK
602 /* Ignore Checksum bit is set */
603 if (status & E1000_RXD_STAT_IXSM)
604 return;
afd12939 605
2e1706f2
BA
606 /* TCP/UDP checksum error bit or IP checksum error bit is set */
607 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
bc7f75fa
AK
608 /* let the stack verify checksum errors */
609 adapter->hw_csum_err++;
610 return;
611 }
612
613 /* TCP/UDP Checksum has not been calculated */
614 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
615 return;
616
617 /* It must be a TCP or UDP packet with a valid checksum */
2e1706f2 618 skb->ip_summed = CHECKSUM_UNNECESSARY;
bc7f75fa
AK
619 adapter->hw_csum_good++;
620}
621
55aa6985 622static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
c6e7f51e 623{
55aa6985 624 struct e1000_adapter *adapter = rx_ring->adapter;
c6e7f51e 625 struct e1000_hw *hw = &adapter->hw;
bdc125f7
BA
626 s32 ret_val = __ew32_prepare(hw);
627
628 writel(i, rx_ring->tail);
c6e7f51e 629
bdc125f7 630 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
c6e7f51e 631 u32 rctl = er32(RCTL);
6cf08d1c 632
c6e7f51e
BA
633 ew32(RCTL, rctl & ~E1000_RCTL_EN);
634 e_err("ME firmware caused invalid RDT - resetting\n");
635 schedule_work(&adapter->reset_task);
636 }
637}
638
55aa6985 639static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
c6e7f51e 640{
55aa6985 641 struct e1000_adapter *adapter = tx_ring->adapter;
c6e7f51e 642 struct e1000_hw *hw = &adapter->hw;
bdc125f7 643 s32 ret_val = __ew32_prepare(hw);
c6e7f51e 644
bdc125f7
BA
645 writel(i, tx_ring->tail);
646
647 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
c6e7f51e 648 u32 tctl = er32(TCTL);
6cf08d1c 649
c6e7f51e
BA
650 ew32(TCTL, tctl & ~E1000_TCTL_EN);
651 e_err("ME firmware caused invalid TDT - resetting\n");
652 schedule_work(&adapter->reset_task);
653 }
654}
655
bc7f75fa 656/**
5f450212 657 * e1000_alloc_rx_buffers - Replace used receive buffers
55aa6985 658 * @rx_ring: Rx descriptor ring
bc7f75fa 659 **/
55aa6985 660static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
c2fed996 661 int cleaned_count, gfp_t gfp)
bc7f75fa 662{
55aa6985 663 struct e1000_adapter *adapter = rx_ring->adapter;
bc7f75fa
AK
664 struct net_device *netdev = adapter->netdev;
665 struct pci_dev *pdev = adapter->pdev;
5f450212 666 union e1000_rx_desc_extended *rx_desc;
bc7f75fa
AK
667 struct e1000_buffer *buffer_info;
668 struct sk_buff *skb;
669 unsigned int i;
89d71a66 670 unsigned int bufsz = adapter->rx_buffer_len;
bc7f75fa
AK
671
672 i = rx_ring->next_to_use;
673 buffer_info = &rx_ring->buffer_info[i];
674
675 while (cleaned_count--) {
676 skb = buffer_info->skb;
677 if (skb) {
678 skb_trim(skb, 0);
679 goto map_skb;
680 }
681
c2fed996 682 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
bc7f75fa
AK
683 if (!skb) {
684 /* Better luck next round */
685 adapter->alloc_rx_buff_failed++;
686 break;
687 }
688
bc7f75fa
AK
689 buffer_info->skb = skb;
690map_skb:
0be3f55f 691 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bc7f75fa 692 adapter->rx_buffer_len,
0be3f55f
NN
693 DMA_FROM_DEVICE);
694 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
af667a29 695 dev_err(&pdev->dev, "Rx DMA map failed\n");
bc7f75fa
AK
696 adapter->rx_dma_failed++;
697 break;
698 }
699
5f450212
BA
700 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
701 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
bc7f75fa 702
50849d79 703 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
e921eb1a 704 /* Force memory writes to complete before letting h/w
50849d79
TH
705 * know there are new descriptors to fetch. (Only
706 * applicable for weak-ordered memory model archs,
707 * such as IA-64).
708 */
709 wmb();
c6e7f51e 710 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
55aa6985 711 e1000e_update_rdt_wa(rx_ring, i);
c6e7f51e 712 else
c5083cf6 713 writel(i, rx_ring->tail);
50849d79 714 }
bc7f75fa
AK
715 i++;
716 if (i == rx_ring->count)
717 i = 0;
718 buffer_info = &rx_ring->buffer_info[i];
719 }
720
50849d79 721 rx_ring->next_to_use = i;
bc7f75fa
AK
722}
723
724/**
725 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
55aa6985 726 * @rx_ring: Rx descriptor ring
bc7f75fa 727 **/
55aa6985 728static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
c2fed996 729 int cleaned_count, gfp_t gfp)
bc7f75fa 730{
55aa6985 731 struct e1000_adapter *adapter = rx_ring->adapter;
bc7f75fa
AK
732 struct net_device *netdev = adapter->netdev;
733 struct pci_dev *pdev = adapter->pdev;
734 union e1000_rx_desc_packet_split *rx_desc;
bc7f75fa
AK
735 struct e1000_buffer *buffer_info;
736 struct e1000_ps_page *ps_page;
737 struct sk_buff *skb;
738 unsigned int i, j;
739
740 i = rx_ring->next_to_use;
741 buffer_info = &rx_ring->buffer_info[i];
742
743 while (cleaned_count--) {
744 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
745
746 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
47f44e40
AK
747 ps_page = &buffer_info->ps_pages[j];
748 if (j >= adapter->rx_ps_pages) {
749 /* all unused desc entries get hw null ptr */
af667a29
BA
750 rx_desc->read.buffer_addr[j + 1] =
751 ~cpu_to_le64(0);
47f44e40
AK
752 continue;
753 }
754 if (!ps_page->page) {
c2fed996 755 ps_page->page = alloc_page(gfp);
bc7f75fa 756 if (!ps_page->page) {
47f44e40
AK
757 adapter->alloc_rx_buff_failed++;
758 goto no_buffers;
759 }
0be3f55f
NN
760 ps_page->dma = dma_map_page(&pdev->dev,
761 ps_page->page,
762 0, PAGE_SIZE,
763 DMA_FROM_DEVICE);
764 if (dma_mapping_error(&pdev->dev,
765 ps_page->dma)) {
47f44e40 766 dev_err(&adapter->pdev->dev,
af667a29 767 "Rx DMA page map failed\n");
47f44e40
AK
768 adapter->rx_dma_failed++;
769 goto no_buffers;
bc7f75fa 770 }
bc7f75fa 771 }
e921eb1a 772 /* Refresh the desc even if buffer_addrs
47f44e40
AK
773 * didn't change because each write-back
774 * erases this info.
775 */
af667a29
BA
776 rx_desc->read.buffer_addr[j + 1] =
777 cpu_to_le64(ps_page->dma);
bc7f75fa
AK
778 }
779
e5fe2541 780 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
c2fed996 781 gfp);
bc7f75fa
AK
782
783 if (!skb) {
784 adapter->alloc_rx_buff_failed++;
785 break;
786 }
787
bc7f75fa 788 buffer_info->skb = skb;
0be3f55f 789 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bc7f75fa 790 adapter->rx_ps_bsize0,
0be3f55f
NN
791 DMA_FROM_DEVICE);
792 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
af667a29 793 dev_err(&pdev->dev, "Rx DMA map failed\n");
bc7f75fa
AK
794 adapter->rx_dma_failed++;
795 /* cleanup skb */
796 dev_kfree_skb_any(skb);
797 buffer_info->skb = NULL;
798 break;
799 }
800
801 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
802
50849d79 803 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
e921eb1a 804 /* Force memory writes to complete before letting h/w
50849d79
TH
805 * know there are new descriptors to fetch. (Only
806 * applicable for weak-ordered memory model archs,
807 * such as IA-64).
808 */
809 wmb();
c6e7f51e 810 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
55aa6985 811 e1000e_update_rdt_wa(rx_ring, i << 1);
c6e7f51e 812 else
c5083cf6 813 writel(i << 1, rx_ring->tail);
50849d79
TH
814 }
815
bc7f75fa
AK
816 i++;
817 if (i == rx_ring->count)
818 i = 0;
819 buffer_info = &rx_ring->buffer_info[i];
820 }
821
822no_buffers:
50849d79 823 rx_ring->next_to_use = i;
bc7f75fa
AK
824}
825
97ac8cae
BA
826/**
827 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
55aa6985 828 * @rx_ring: Rx descriptor ring
97ac8cae
BA
829 * @cleaned_count: number of buffers to allocate this pass
830 **/
831
55aa6985 832static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
c2fed996 833 int cleaned_count, gfp_t gfp)
97ac8cae 834{
55aa6985 835 struct e1000_adapter *adapter = rx_ring->adapter;
97ac8cae
BA
836 struct net_device *netdev = adapter->netdev;
837 struct pci_dev *pdev = adapter->pdev;
5f450212 838 union e1000_rx_desc_extended *rx_desc;
97ac8cae
BA
839 struct e1000_buffer *buffer_info;
840 struct sk_buff *skb;
841 unsigned int i;
2a2293b9 842 unsigned int bufsz = 256 - 16; /* for skb_reserve */
97ac8cae
BA
843
844 i = rx_ring->next_to_use;
845 buffer_info = &rx_ring->buffer_info[i];
846
847 while (cleaned_count--) {
848 skb = buffer_info->skb;
849 if (skb) {
850 skb_trim(skb, 0);
851 goto check_page;
852 }
853
c2fed996 854 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
97ac8cae
BA
855 if (unlikely(!skb)) {
856 /* Better luck next round */
857 adapter->alloc_rx_buff_failed++;
858 break;
859 }
860
97ac8cae
BA
861 buffer_info->skb = skb;
862check_page:
863 /* allocate a new page if necessary */
864 if (!buffer_info->page) {
c2fed996 865 buffer_info->page = alloc_page(gfp);
97ac8cae
BA
866 if (unlikely(!buffer_info->page)) {
867 adapter->alloc_rx_buff_failed++;
868 break;
869 }
870 }
871
37287fae 872 if (!buffer_info->dma) {
0be3f55f 873 buffer_info->dma = dma_map_page(&pdev->dev,
f0ff4398
BA
874 buffer_info->page, 0,
875 PAGE_SIZE,
0be3f55f 876 DMA_FROM_DEVICE);
37287fae
CP
877 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
878 adapter->alloc_rx_buff_failed++;
879 break;
880 }
881 }
97ac8cae 882
5f450212
BA
883 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
884 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
97ac8cae
BA
885
886 if (unlikely(++i == rx_ring->count))
887 i = 0;
888 buffer_info = &rx_ring->buffer_info[i];
889 }
890
891 if (likely(rx_ring->next_to_use != i)) {
892 rx_ring->next_to_use = i;
893 if (unlikely(i-- == 0))
894 i = (rx_ring->count - 1);
895
896 /* Force memory writes to complete before letting h/w
897 * know there are new descriptors to fetch. (Only
898 * applicable for weak-ordered memory model archs,
e921eb1a
BA
899 * such as IA-64).
900 */
97ac8cae 901 wmb();
c6e7f51e 902 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
55aa6985 903 e1000e_update_rdt_wa(rx_ring, i);
c6e7f51e 904 else
c5083cf6 905 writel(i, rx_ring->tail);
97ac8cae
BA
906 }
907}
908
70495a50
BA
909static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
910 struct sk_buff *skb)
911{
912 if (netdev->features & NETIF_F_RXHASH)
e25909bc 913 skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
70495a50
BA
914}
915
bc7f75fa 916/**
55aa6985
BA
917 * e1000_clean_rx_irq - Send received data up the network stack
918 * @rx_ring: Rx descriptor ring
bc7f75fa
AK
919 *
920 * the return value indicates whether actual cleaning was done, there
921 * is no guarantee that everything was cleaned
922 **/
55aa6985
BA
923static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
924 int work_to_do)
bc7f75fa 925{
55aa6985 926 struct e1000_adapter *adapter = rx_ring->adapter;
bc7f75fa
AK
927 struct net_device *netdev = adapter->netdev;
928 struct pci_dev *pdev = adapter->pdev;
3bb99fe2 929 struct e1000_hw *hw = &adapter->hw;
5f450212 930 union e1000_rx_desc_extended *rx_desc, *next_rxd;
bc7f75fa 931 struct e1000_buffer *buffer_info, *next_buffer;
5f450212 932 u32 length, staterr;
bc7f75fa
AK
933 unsigned int i;
934 int cleaned_count = 0;
3db1cd5c 935 bool cleaned = false;
bc7f75fa
AK
936 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
937
938 i = rx_ring->next_to_clean;
5f450212
BA
939 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
940 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
bc7f75fa
AK
941 buffer_info = &rx_ring->buffer_info[i];
942
5f450212 943 while (staterr & E1000_RXD_STAT_DD) {
bc7f75fa 944 struct sk_buff *skb;
bc7f75fa
AK
945
946 if (*work_done >= work_to_do)
947 break;
948 (*work_done)++;
2d0bb1c1 949 rmb(); /* read descriptor and rx_buffer_info after status DD */
bc7f75fa 950
bc7f75fa
AK
951 skb = buffer_info->skb;
952 buffer_info->skb = NULL;
953
954 prefetch(skb->data - NET_IP_ALIGN);
955
956 i++;
957 if (i == rx_ring->count)
958 i = 0;
5f450212 959 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
bc7f75fa
AK
960 prefetch(next_rxd);
961
962 next_buffer = &rx_ring->buffer_info[i];
963
3db1cd5c 964 cleaned = true;
bc7f75fa 965 cleaned_count++;
e5fe2541
BA
966 dma_unmap_single(&pdev->dev, buffer_info->dma,
967 adapter->rx_buffer_len, DMA_FROM_DEVICE);
bc7f75fa
AK
968 buffer_info->dma = 0;
969
5f450212 970 length = le16_to_cpu(rx_desc->wb.upper.length);
bc7f75fa 971
e921eb1a 972 /* !EOP means multiple descriptors were used to store a single
b94b5028
JB
973 * packet, if that's the case we need to toss it. In fact, we
974 * need to toss every packet with the EOP bit clear and the
975 * next frame that _does_ have the EOP bit set, as it is by
976 * definition only a frame fragment
977 */
5f450212 978 if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
b94b5028
JB
979 adapter->flags2 |= FLAG2_IS_DISCARDING;
980
981 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
bc7f75fa 982 /* All receives must fit into a single buffer */
3bb99fe2 983 e_dbg("Receive packet consumed multiple buffers\n");
bc7f75fa
AK
984 /* recycle */
985 buffer_info->skb = skb;
5f450212 986 if (staterr & E1000_RXD_STAT_EOP)
b94b5028 987 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
bc7f75fa
AK
988 goto next_desc;
989 }
990
cf955e6c
BG
991 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
992 !(netdev->features & NETIF_F_RXALL))) {
bc7f75fa
AK
993 /* recycle */
994 buffer_info->skb = skb;
995 goto next_desc;
996 }
997
eb7c3adb 998 /* adjust length to remove Ethernet CRC */
0184039a
BG
999 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1000 /* If configured to store CRC, don't subtract FCS,
1001 * but keep the FCS bytes out of the total_rx_bytes
1002 * counter
1003 */
1004 if (netdev->features & NETIF_F_RXFCS)
1005 total_rx_bytes -= 4;
1006 else
1007 length -= 4;
1008 }
eb7c3adb 1009
bc7f75fa
AK
1010 total_rx_bytes += length;
1011 total_rx_packets++;
1012
e921eb1a 1013 /* code added for copybreak, this should improve
bc7f75fa 1014 * performance for small packets with large amounts
ad68076e
BA
1015 * of reassembly being done in the stack
1016 */
bc7f75fa
AK
1017 if (length < copybreak) {
1018 struct sk_buff *new_skb =
89d71a66 1019 netdev_alloc_skb_ip_align(netdev, length);
bc7f75fa 1020 if (new_skb) {
808ff676
BA
1021 skb_copy_to_linear_data_offset(new_skb,
1022 -NET_IP_ALIGN,
1023 (skb->data -
1024 NET_IP_ALIGN),
1025 (length +
1026 NET_IP_ALIGN));
bc7f75fa
AK
1027 /* save the skb in buffer_info as good */
1028 buffer_info->skb = skb;
1029 skb = new_skb;
1030 }
1031 /* else just continue with the old one */
1032 }
1033 /* end copybreak code */
1034 skb_put(skb, length);
1035
1036 /* Receive Checksum Offload */
2e1706f2 1037 e1000_rx_checksum(adapter, staterr, skb);
bc7f75fa 1038
70495a50
BA
1039 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1040
5f450212
BA
1041 e1000_receive_skb(adapter, netdev, skb, staterr,
1042 rx_desc->wb.upper.vlan);
bc7f75fa
AK
1043
1044next_desc:
5f450212 1045 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
bc7f75fa
AK
1046
1047 /* return some buffers to hardware, one at a time is too slow */
1048 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
55aa6985 1049 adapter->alloc_rx_buf(rx_ring, cleaned_count,
c2fed996 1050 GFP_ATOMIC);
bc7f75fa
AK
1051 cleaned_count = 0;
1052 }
1053
1054 /* use prefetched values */
1055 rx_desc = next_rxd;
1056 buffer_info = next_buffer;
5f450212
BA
1057
1058 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
bc7f75fa
AK
1059 }
1060 rx_ring->next_to_clean = i;
1061
1062 cleaned_count = e1000_desc_unused(rx_ring);
1063 if (cleaned_count)
55aa6985 1064 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
bc7f75fa 1065
bc7f75fa 1066 adapter->total_rx_bytes += total_rx_bytes;
7c25769f 1067 adapter->total_rx_packets += total_rx_packets;
bc7f75fa
AK
1068 return cleaned;
1069}
1070
55aa6985
BA
1071static void e1000_put_txbuf(struct e1000_ring *tx_ring,
1072 struct e1000_buffer *buffer_info)
bc7f75fa 1073{
55aa6985
BA
1074 struct e1000_adapter *adapter = tx_ring->adapter;
1075
03b1320d
AD
1076 if (buffer_info->dma) {
1077 if (buffer_info->mapped_as_page)
0be3f55f
NN
1078 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1079 buffer_info->length, DMA_TO_DEVICE);
03b1320d 1080 else
0be3f55f
NN
1081 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1082 buffer_info->length, DMA_TO_DEVICE);
03b1320d
AD
1083 buffer_info->dma = 0;
1084 }
bc7f75fa
AK
1085 if (buffer_info->skb) {
1086 dev_kfree_skb_any(buffer_info->skb);
1087 buffer_info->skb = NULL;
1088 }
1b7719c4 1089 buffer_info->time_stamp = 0;
bc7f75fa
AK
1090}
1091
41cec6f1 1092static void e1000_print_hw_hang(struct work_struct *work)
bc7f75fa 1093{
41cec6f1 1094 struct e1000_adapter *adapter = container_of(work,
f0ff4398
BA
1095 struct e1000_adapter,
1096 print_hang_task);
09357b00 1097 struct net_device *netdev = adapter->netdev;
bc7f75fa
AK
1098 struct e1000_ring *tx_ring = adapter->tx_ring;
1099 unsigned int i = tx_ring->next_to_clean;
1100 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1101 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
41cec6f1
BA
1102 struct e1000_hw *hw = &adapter->hw;
1103 u16 phy_status, phy_1000t_status, phy_ext_status;
1104 u16 pci_status;
1105
615b32af
JB
1106 if (test_bit(__E1000_DOWN, &adapter->state))
1107 return;
1108
e5fe2541 1109 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
e921eb1a 1110 /* May be block on write-back, flush and detect again
09357b00
JK
1111 * flush pending descriptor writebacks to memory
1112 */
1113 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1114 /* execute the writes immediately */
1115 e1e_flush();
e921eb1a 1116 /* Due to rare timing issues, write to TIDV again to ensure
bf03085f
MV
1117 * the write is successful
1118 */
1119 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1120 /* execute the writes immediately */
1121 e1e_flush();
09357b00
JK
1122 adapter->tx_hang_recheck = true;
1123 return;
1124 }
09357b00 1125 adapter->tx_hang_recheck = false;
d9554e96
DE
1126
1127 if (er32(TDH(0)) == er32(TDT(0))) {
1128 e_dbg("false hang detected, ignoring\n");
1129 return;
1130 }
1131
1132 /* Real hang detected */
09357b00
JK
1133 netif_stop_queue(netdev);
1134
c2ade1a4
BA
1135 e1e_rphy(hw, MII_BMSR, &phy_status);
1136 e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
1137 e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);
bc7f75fa 1138
41cec6f1
BA
1139 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1140
1141 /* detected Hardware unit hang */
1142 e_err("Detected Hardware Unit Hang:\n"
44defeb3
JK
1143 " TDH <%x>\n"
1144 " TDT <%x>\n"
1145 " next_to_use <%x>\n"
1146 " next_to_clean <%x>\n"
1147 "buffer_info[next_to_clean]:\n"
1148 " time_stamp <%lx>\n"
1149 " next_to_watch <%x>\n"
1150 " jiffies <%lx>\n"
41cec6f1
BA
1151 " next_to_watch.status <%x>\n"
1152 "MAC Status <%x>\n"
1153 "PHY Status <%x>\n"
1154 "PHY 1000BASE-T Status <%x>\n"
1155 "PHY Extended Status <%x>\n"
1156 "PCI Status <%x>\n",
e5fe2541
BA
1157 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
1158 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
1159 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
1160 phy_status, phy_1000t_status, phy_ext_status, pci_status);
7c0427ee 1161
d9554e96
DE
1162 e1000e_dump(adapter);
1163
7c0427ee
BA
1164 /* Suggest workaround for known h/w issue */
1165 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1166 e_err("Try turning off Tx pause (flow control) via ethtool\n");
bc7f75fa
AK
1167}
1168
b67e1913
BA
1169/**
1170 * e1000e_tx_hwtstamp_work - check for Tx time stamp
1171 * @work: pointer to work struct
1172 *
1173 * This work function polls the TSYNCTXCTL valid bit to determine when a
1174 * timestamp has been taken for the current stored skb. The timestamp must
1175 * be for this skb because only one such packet is allowed in the queue.
1176 */
1177static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1178{
1179 struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
1180 tx_hwtstamp_work);
1181 struct e1000_hw *hw = &adapter->hw;
1182
b67e1913
BA
1183 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
1184 struct skb_shared_hwtstamps shhwtstamps;
1185 u64 txstmp;
1186
1187 txstmp = er32(TXSTMPL);
1188 txstmp |= (u64)er32(TXSTMPH) << 32;
1189
1190 e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
1191
1192 skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
1193 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1194 adapter->tx_hwtstamp_skb = NULL;
59c871c5
JK
1195 } else if (time_after(jiffies, adapter->tx_hwtstamp_start
1196 + adapter->tx_timeout_factor * HZ)) {
1197 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1198 adapter->tx_hwtstamp_skb = NULL;
1199 adapter->tx_hwtstamp_timeouts++;
c5ffe7e1 1200 e_warn("clearing Tx timestamp hang\n");
b67e1913
BA
1201 } else {
1202 /* reschedule to check later */
1203 schedule_work(&adapter->tx_hwtstamp_work);
1204 }
1205}
1206
bc7f75fa
AK
1207/**
1208 * e1000_clean_tx_irq - Reclaim resources after transmit completes
55aa6985 1209 * @tx_ring: Tx descriptor ring
bc7f75fa
AK
1210 *
1211 * the return value indicates whether actual cleaning was done, there
1212 * is no guarantee that everything was cleaned
1213 **/
55aa6985 1214static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
bc7f75fa 1215{
55aa6985 1216 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa
AK
1217 struct net_device *netdev = adapter->netdev;
1218 struct e1000_hw *hw = &adapter->hw;
bc7f75fa
AK
1219 struct e1000_tx_desc *tx_desc, *eop_desc;
1220 struct e1000_buffer *buffer_info;
1221 unsigned int i, eop;
1222 unsigned int count = 0;
bc7f75fa 1223 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3f0cfa3b 1224 unsigned int bytes_compl = 0, pkts_compl = 0;
bc7f75fa
AK
1225
1226 i = tx_ring->next_to_clean;
1227 eop = tx_ring->buffer_info[i].next_to_watch;
1228 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1229
12d04a3c
AD
1230 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1231 (count < tx_ring->count)) {
a86043c2 1232 bool cleaned = false;
6cf08d1c 1233
e80bd1d1 1234 rmb(); /* read buffer_info after eop_desc */
a86043c2 1235 for (; !cleaned; count++) {
bc7f75fa
AK
1236 tx_desc = E1000_TX_DESC(*tx_ring, i);
1237 buffer_info = &tx_ring->buffer_info[i];
1238 cleaned = (i == eop);
1239
1240 if (cleaned) {
9ed318d5
TH
1241 total_tx_packets += buffer_info->segs;
1242 total_tx_bytes += buffer_info->bytecount;
3f0cfa3b
TH
1243 if (buffer_info->skb) {
1244 bytes_compl += buffer_info->skb->len;
1245 pkts_compl++;
1246 }
bc7f75fa
AK
1247 }
1248
55aa6985 1249 e1000_put_txbuf(tx_ring, buffer_info);
bc7f75fa
AK
1250 tx_desc->upper.data = 0;
1251
1252 i++;
1253 if (i == tx_ring->count)
1254 i = 0;
1255 }
1256
dac87619
TL
1257 if (i == tx_ring->next_to_use)
1258 break;
bc7f75fa
AK
1259 eop = tx_ring->buffer_info[i].next_to_watch;
1260 eop_desc = E1000_TX_DESC(*tx_ring, eop);
bc7f75fa
AK
1261 }
1262
1263 tx_ring->next_to_clean = i;
1264
3f0cfa3b
TH
1265 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1266
bc7f75fa 1267#define TX_WAKE_THRESHOLD 32
a86043c2
JB
1268 if (count && netif_carrier_ok(netdev) &&
1269 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
bc7f75fa
AK
1270 /* Make sure that anybody stopping the queue after this
1271 * sees the new next_to_clean.
1272 */
1273 smp_mb();
1274
1275 if (netif_queue_stopped(netdev) &&
1276 !(test_bit(__E1000_DOWN, &adapter->state))) {
1277 netif_wake_queue(netdev);
1278 ++adapter->restart_queue;
1279 }
1280 }
1281
1282 if (adapter->detect_tx_hung) {
e921eb1a 1283 /* Detect a transmit hang in hardware, this serializes the
41cec6f1
BA
1284 * check with the clearing of time_stamp and movement of i
1285 */
3db1cd5c 1286 adapter->detect_tx_hung = false;
12d04a3c
AD
1287 if (tx_ring->buffer_info[i].time_stamp &&
1288 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
8e95a202 1289 + (adapter->tx_timeout_factor * HZ)) &&
09357b00 1290 !(er32(STATUS) & E1000_STATUS_TXOFF))
41cec6f1 1291 schedule_work(&adapter->print_hang_task);
09357b00
JK
1292 else
1293 adapter->tx_hang_recheck = false;
bc7f75fa
AK
1294 }
1295 adapter->total_tx_bytes += total_tx_bytes;
1296 adapter->total_tx_packets += total_tx_packets;
807540ba 1297 return count < tx_ring->count;
bc7f75fa
AK
1298}
1299
bc7f75fa
AK
1300/**
1301 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
55aa6985 1302 * @rx_ring: Rx descriptor ring
bc7f75fa
AK
1303 *
1304 * the return value indicates whether actual cleaning was done, there
1305 * is no guarantee that everything was cleaned
1306 **/
55aa6985
BA
1307static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1308 int work_to_do)
bc7f75fa 1309{
55aa6985 1310 struct e1000_adapter *adapter = rx_ring->adapter;
3bb99fe2 1311 struct e1000_hw *hw = &adapter->hw;
bc7f75fa
AK
1312 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1313 struct net_device *netdev = adapter->netdev;
1314 struct pci_dev *pdev = adapter->pdev;
bc7f75fa
AK
1315 struct e1000_buffer *buffer_info, *next_buffer;
1316 struct e1000_ps_page *ps_page;
1317 struct sk_buff *skb;
1318 unsigned int i, j;
1319 u32 length, staterr;
1320 int cleaned_count = 0;
3db1cd5c 1321 bool cleaned = false;
bc7f75fa
AK
1322 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1323
1324 i = rx_ring->next_to_clean;
1325 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1326 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1327 buffer_info = &rx_ring->buffer_info[i];
1328
1329 while (staterr & E1000_RXD_STAT_DD) {
1330 if (*work_done >= work_to_do)
1331 break;
1332 (*work_done)++;
1333 skb = buffer_info->skb;
2d0bb1c1 1334 rmb(); /* read descriptor and rx_buffer_info after status DD */
bc7f75fa
AK
1335
1336 /* in the packet split case this is header only */
1337 prefetch(skb->data - NET_IP_ALIGN);
1338
1339 i++;
1340 if (i == rx_ring->count)
1341 i = 0;
1342 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1343 prefetch(next_rxd);
1344
1345 next_buffer = &rx_ring->buffer_info[i];
1346
3db1cd5c 1347 cleaned = true;
bc7f75fa 1348 cleaned_count++;
0be3f55f 1349 dma_unmap_single(&pdev->dev, buffer_info->dma,
af667a29 1350 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
bc7f75fa
AK
1351 buffer_info->dma = 0;
1352
af667a29 1353 /* see !EOP comment in other Rx routine */
b94b5028
JB
1354 if (!(staterr & E1000_RXD_STAT_EOP))
1355 adapter->flags2 |= FLAG2_IS_DISCARDING;
1356
1357 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
ef456f85 1358 e_dbg("Packet Split buffers didn't pick up the full packet\n");
bc7f75fa 1359 dev_kfree_skb_irq(skb);
b94b5028
JB
1360 if (staterr & E1000_RXD_STAT_EOP)
1361 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
bc7f75fa
AK
1362 goto next_desc;
1363 }
1364
cf955e6c
BG
1365 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1366 !(netdev->features & NETIF_F_RXALL))) {
bc7f75fa
AK
1367 dev_kfree_skb_irq(skb);
1368 goto next_desc;
1369 }
1370
1371 length = le16_to_cpu(rx_desc->wb.middle.length0);
1372
1373 if (!length) {
ef456f85 1374 e_dbg("Last part of the packet spanning multiple descriptors\n");
bc7f75fa
AK
1375 dev_kfree_skb_irq(skb);
1376 goto next_desc;
1377 }
1378
1379 /* Good Receive */
1380 skb_put(skb, length);
1381
1382 {
e921eb1a 1383 /* this looks ugly, but it seems compiler issues make
0e15df49
BA
1384 * it more efficient than reusing j
1385 */
1386 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
bc7f75fa 1387
e921eb1a 1388 /* page alloc/put takes too long and effects small
0e15df49
BA
1389 * packet throughput, so unsplit small packets and
1390 * save the alloc/put only valid in softirq (napi)
1391 * context to call kmap_*
ad68076e 1392 */
0e15df49
BA
1393 if (l1 && (l1 <= copybreak) &&
1394 ((length + l1) <= adapter->rx_ps_bsize0)) {
1395 u8 *vaddr;
1396
1397 ps_page = &buffer_info->ps_pages[0];
1398
e921eb1a 1399 /* there is no documentation about how to call
0e15df49
BA
1400 * kmap_atomic, so we can't hold the mapping
1401 * very long
1402 */
1403 dma_sync_single_for_cpu(&pdev->dev,
1404 ps_page->dma,
1405 PAGE_SIZE,
1406 DMA_FROM_DEVICE);
9f393834 1407 vaddr = kmap_atomic(ps_page->page);
0e15df49 1408 memcpy(skb_tail_pointer(skb), vaddr, l1);
9f393834 1409 kunmap_atomic(vaddr);
0e15df49
BA
1410 dma_sync_single_for_device(&pdev->dev,
1411 ps_page->dma,
1412 PAGE_SIZE,
1413 DMA_FROM_DEVICE);
1414
1415 /* remove the CRC */
0184039a
BG
1416 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1417 if (!(netdev->features & NETIF_F_RXFCS))
1418 l1 -= 4;
1419 }
0e15df49
BA
1420
1421 skb_put(skb, l1);
1422 goto copydone;
e80bd1d1 1423 } /* if */
bc7f75fa
AK
1424 }
1425
1426 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1427 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1428 if (!length)
1429 break;
1430
47f44e40 1431 ps_page = &buffer_info->ps_pages[j];
0be3f55f
NN
1432 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1433 DMA_FROM_DEVICE);
bc7f75fa
AK
1434 ps_page->dma = 0;
1435 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1436 ps_page->page = NULL;
1437 skb->len += length;
1438 skb->data_len += length;
98a045d7 1439 skb->truesize += PAGE_SIZE;
bc7f75fa
AK
1440 }
1441
eb7c3adb
JK
1442 /* strip the ethernet crc, problem is we're using pages now so
1443 * this whole operation can get a little cpu intensive
1444 */
0184039a
BG
1445 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1446 if (!(netdev->features & NETIF_F_RXFCS))
1447 pskb_trim(skb, skb->len - 4);
1448 }
eb7c3adb 1449
bc7f75fa
AK
1450copydone:
1451 total_rx_bytes += skb->len;
1452 total_rx_packets++;
1453
2e1706f2 1454 e1000_rx_checksum(adapter, staterr, skb);
bc7f75fa 1455
70495a50
BA
1456 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1457
bc7f75fa 1458 if (rx_desc->wb.upper.header_status &
17e813ec 1459 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
bc7f75fa
AK
1460 adapter->rx_hdr_split++;
1461
b67e1913
BA
1462 e1000_receive_skb(adapter, netdev, skb, staterr,
1463 rx_desc->wb.middle.vlan);
bc7f75fa
AK
1464
1465next_desc:
1466 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1467 buffer_info->skb = NULL;
1468
1469 /* return some buffers to hardware, one at a time is too slow */
1470 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
55aa6985 1471 adapter->alloc_rx_buf(rx_ring, cleaned_count,
c2fed996 1472 GFP_ATOMIC);
bc7f75fa
AK
1473 cleaned_count = 0;
1474 }
1475
1476 /* use prefetched values */
1477 rx_desc = next_rxd;
1478 buffer_info = next_buffer;
1479
1480 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1481 }
1482 rx_ring->next_to_clean = i;
1483
1484 cleaned_count = e1000_desc_unused(rx_ring);
1485 if (cleaned_count)
55aa6985 1486 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
bc7f75fa 1487
bc7f75fa 1488 adapter->total_rx_bytes += total_rx_bytes;
7c25769f 1489 adapter->total_rx_packets += total_rx_packets;
bc7f75fa
AK
1490 return cleaned;
1491}
1492
97ac8cae
BA
1493/**
1494 * e1000_consume_page - helper function
1495 **/
1496static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
66501f56 1497 u16 length)
97ac8cae
BA
1498{
1499 bi->page = NULL;
1500 skb->len += length;
1501 skb->data_len += length;
98a045d7 1502 skb->truesize += PAGE_SIZE;
97ac8cae
BA
1503}
1504
1505/**
1506 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1507 * @adapter: board private structure
1508 *
1509 * the return value indicates whether actual cleaning was done, there
1510 * is no guarantee that everything was cleaned
1511 **/
55aa6985
BA
1512static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1513 int work_to_do)
97ac8cae 1514{
55aa6985 1515 struct e1000_adapter *adapter = rx_ring->adapter;
97ac8cae
BA
1516 struct net_device *netdev = adapter->netdev;
1517 struct pci_dev *pdev = adapter->pdev;
5f450212 1518 union e1000_rx_desc_extended *rx_desc, *next_rxd;
97ac8cae 1519 struct e1000_buffer *buffer_info, *next_buffer;
5f450212 1520 u32 length, staterr;
97ac8cae
BA
1521 unsigned int i;
1522 int cleaned_count = 0;
1523 bool cleaned = false;
362e20ca 1524 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
17e813ec 1525 struct skb_shared_info *shinfo;
97ac8cae
BA
1526
1527 i = rx_ring->next_to_clean;
5f450212
BA
1528 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1529 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
97ac8cae
BA
1530 buffer_info = &rx_ring->buffer_info[i];
1531
5f450212 1532 while (staterr & E1000_RXD_STAT_DD) {
97ac8cae 1533 struct sk_buff *skb;
97ac8cae
BA
1534
1535 if (*work_done >= work_to_do)
1536 break;
1537 (*work_done)++;
2d0bb1c1 1538 rmb(); /* read descriptor and rx_buffer_info after status DD */
97ac8cae 1539
97ac8cae
BA
1540 skb = buffer_info->skb;
1541 buffer_info->skb = NULL;
1542
1543 ++i;
1544 if (i == rx_ring->count)
1545 i = 0;
5f450212 1546 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
97ac8cae
BA
1547 prefetch(next_rxd);
1548
1549 next_buffer = &rx_ring->buffer_info[i];
1550
1551 cleaned = true;
1552 cleaned_count++;
0be3f55f
NN
1553 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1554 DMA_FROM_DEVICE);
97ac8cae
BA
1555 buffer_info->dma = 0;
1556
5f450212 1557 length = le16_to_cpu(rx_desc->wb.upper.length);
97ac8cae
BA
1558
1559 /* errors is only valid for DD + EOP descriptors */
5f450212 1560 if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
cf955e6c
BG
1561 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1562 !(netdev->features & NETIF_F_RXALL)))) {
5f450212
BA
1563 /* recycle both page and skb */
1564 buffer_info->skb = skb;
1565 /* an error means any chain goes out the window too */
1566 if (rx_ring->rx_skb_top)
1567 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1568 rx_ring->rx_skb_top = NULL;
1569 goto next_desc;
97ac8cae 1570 }
f0f1a172 1571#define rxtop (rx_ring->rx_skb_top)
5f450212 1572 if (!(staterr & E1000_RXD_STAT_EOP)) {
97ac8cae
BA
1573 /* this descriptor is only the beginning (or middle) */
1574 if (!rxtop) {
1575 /* this is the beginning of a chain */
1576 rxtop = skb;
1577 skb_fill_page_desc(rxtop, 0, buffer_info->page,
f0ff4398 1578 0, length);
97ac8cae
BA
1579 } else {
1580 /* this is the middle of a chain */
17e813ec
BA
1581 shinfo = skb_shinfo(rxtop);
1582 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1583 buffer_info->page, 0,
1584 length);
97ac8cae
BA
1585 /* re-use the skb, only consumed the page */
1586 buffer_info->skb = skb;
1587 }
1588 e1000_consume_page(buffer_info, rxtop, length);
1589 goto next_desc;
1590 } else {
1591 if (rxtop) {
1592 /* end of the chain */
17e813ec
BA
1593 shinfo = skb_shinfo(rxtop);
1594 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1595 buffer_info->page, 0,
1596 length);
97ac8cae 1597 /* re-use the current skb, we only consumed the
e921eb1a
BA
1598 * page
1599 */
97ac8cae
BA
1600 buffer_info->skb = skb;
1601 skb = rxtop;
1602 rxtop = NULL;
1603 e1000_consume_page(buffer_info, skb, length);
1604 } else {
1605 /* no chain, got EOP, this buf is the packet
e921eb1a
BA
1606 * copybreak to save the put_page/alloc_page
1607 */
97ac8cae
BA
1608 if (length <= copybreak &&
1609 skb_tailroom(skb) >= length) {
1610 u8 *vaddr;
4679026d 1611 vaddr = kmap_atomic(buffer_info->page);
97ac8cae
BA
1612 memcpy(skb_tail_pointer(skb), vaddr,
1613 length);
4679026d 1614 kunmap_atomic(vaddr);
97ac8cae 1615 /* re-use the page, so don't erase
e921eb1a
BA
1616 * buffer_info->page
1617 */
97ac8cae
BA
1618 skb_put(skb, length);
1619 } else {
1620 skb_fill_page_desc(skb, 0,
f0ff4398
BA
1621 buffer_info->page, 0,
1622 length);
97ac8cae 1623 e1000_consume_page(buffer_info, skb,
f0ff4398 1624 length);
97ac8cae
BA
1625 }
1626 }
1627 }
1628
2e1706f2
BA
1629 /* Receive Checksum Offload */
1630 e1000_rx_checksum(adapter, staterr, skb);
97ac8cae 1631
70495a50
BA
1632 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1633
97ac8cae
BA
1634 /* probably a little skewed due to removing CRC */
1635 total_rx_bytes += skb->len;
1636 total_rx_packets++;
1637
1638 /* eth type trans needs skb->data to point to something */
1639 if (!pskb_may_pull(skb, ETH_HLEN)) {
44defeb3 1640 e_err("pskb_may_pull failed.\n");
ef5ab89c 1641 dev_kfree_skb_irq(skb);
97ac8cae
BA
1642 goto next_desc;
1643 }
1644
5f450212
BA
1645 e1000_receive_skb(adapter, netdev, skb, staterr,
1646 rx_desc->wb.upper.vlan);
97ac8cae
BA
1647
1648next_desc:
5f450212 1649 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
97ac8cae
BA
1650
1651 /* return some buffers to hardware, one at a time is too slow */
1652 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
55aa6985 1653 adapter->alloc_rx_buf(rx_ring, cleaned_count,
c2fed996 1654 GFP_ATOMIC);
97ac8cae
BA
1655 cleaned_count = 0;
1656 }
1657
1658 /* use prefetched values */
1659 rx_desc = next_rxd;
1660 buffer_info = next_buffer;
5f450212
BA
1661
1662 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
97ac8cae
BA
1663 }
1664 rx_ring->next_to_clean = i;
1665
1666 cleaned_count = e1000_desc_unused(rx_ring);
1667 if (cleaned_count)
55aa6985 1668 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
97ac8cae
BA
1669
1670 adapter->total_rx_bytes += total_rx_bytes;
1671 adapter->total_rx_packets += total_rx_packets;
97ac8cae
BA
1672 return cleaned;
1673}
1674
bc7f75fa
AK
1675/**
1676 * e1000_clean_rx_ring - Free Rx Buffers per Queue
55aa6985 1677 * @rx_ring: Rx descriptor ring
bc7f75fa 1678 **/
55aa6985 1679static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
bc7f75fa 1680{
55aa6985 1681 struct e1000_adapter *adapter = rx_ring->adapter;
bc7f75fa
AK
1682 struct e1000_buffer *buffer_info;
1683 struct e1000_ps_page *ps_page;
1684 struct pci_dev *pdev = adapter->pdev;
bc7f75fa
AK
1685 unsigned int i, j;
1686
1687 /* Free all the Rx ring sk_buffs */
1688 for (i = 0; i < rx_ring->count; i++) {
1689 buffer_info = &rx_ring->buffer_info[i];
1690 if (buffer_info->dma) {
1691 if (adapter->clean_rx == e1000_clean_rx_irq)
0be3f55f 1692 dma_unmap_single(&pdev->dev, buffer_info->dma,
bc7f75fa 1693 adapter->rx_buffer_len,
0be3f55f 1694 DMA_FROM_DEVICE);
97ac8cae 1695 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
0be3f55f 1696 dma_unmap_page(&pdev->dev, buffer_info->dma,
f0ff4398 1697 PAGE_SIZE, DMA_FROM_DEVICE);
bc7f75fa 1698 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
0be3f55f 1699 dma_unmap_single(&pdev->dev, buffer_info->dma,
bc7f75fa 1700 adapter->rx_ps_bsize0,
0be3f55f 1701 DMA_FROM_DEVICE);
bc7f75fa
AK
1702 buffer_info->dma = 0;
1703 }
1704
97ac8cae
BA
1705 if (buffer_info->page) {
1706 put_page(buffer_info->page);
1707 buffer_info->page = NULL;
1708 }
1709
bc7f75fa
AK
1710 if (buffer_info->skb) {
1711 dev_kfree_skb(buffer_info->skb);
1712 buffer_info->skb = NULL;
1713 }
1714
1715 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
47f44e40 1716 ps_page = &buffer_info->ps_pages[j];
bc7f75fa
AK
1717 if (!ps_page->page)
1718 break;
0be3f55f
NN
1719 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1720 DMA_FROM_DEVICE);
bc7f75fa
AK
1721 ps_page->dma = 0;
1722 put_page(ps_page->page);
1723 ps_page->page = NULL;
1724 }
1725 }
1726
1727 /* there also may be some cached data from a chained receive */
1728 if (rx_ring->rx_skb_top) {
1729 dev_kfree_skb(rx_ring->rx_skb_top);
1730 rx_ring->rx_skb_top = NULL;
1731 }
1732
bc7f75fa
AK
1733 /* Zero out the descriptor ring */
1734 memset(rx_ring->desc, 0, rx_ring->size);
1735
1736 rx_ring->next_to_clean = 0;
1737 rx_ring->next_to_use = 0;
b94b5028 1738 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
bc7f75fa 1739
c5083cf6 1740 writel(0, rx_ring->head);
b485dbae 1741 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
bdc125f7
BA
1742 e1000e_update_rdt_wa(rx_ring, 0);
1743 else
1744 writel(0, rx_ring->tail);
bc7f75fa
AK
1745}
1746
a8f88ff5
JB
1747static void e1000e_downshift_workaround(struct work_struct *work)
1748{
1749 struct e1000_adapter *adapter = container_of(work,
17e813ec
BA
1750 struct e1000_adapter,
1751 downshift_task);
a8f88ff5 1752
615b32af
JB
1753 if (test_bit(__E1000_DOWN, &adapter->state))
1754 return;
1755
a8f88ff5
JB
1756 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1757}
1758
bc7f75fa
AK
1759/**
1760 * e1000_intr_msi - Interrupt Handler
1761 * @irq: interrupt number
1762 * @data: pointer to a network interface device structure
1763 **/
8bb62869 1764static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
bc7f75fa
AK
1765{
1766 struct net_device *netdev = data;
1767 struct e1000_adapter *adapter = netdev_priv(netdev);
1768 struct e1000_hw *hw = &adapter->hw;
1769 u32 icr = er32(ICR);
1770
e921eb1a 1771 /* read ICR disables interrupts using IAM */
573cca8c 1772 if (icr & E1000_ICR_LSC) {
f92518dd 1773 hw->mac.get_link_status = true;
e921eb1a 1774 /* ICH8 workaround-- Call gig speed drop workaround on cable
ad68076e
BA
1775 * disconnect (LSC) before accessing any PHY registers
1776 */
bc7f75fa
AK
1777 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1778 (!(er32(STATUS) & E1000_STATUS_LU)))
a8f88ff5 1779 schedule_work(&adapter->downshift_task);
bc7f75fa 1780
e921eb1a 1781 /* 80003ES2LAN workaround-- For packet buffer work-around on
bc7f75fa 1782 * link down event; disable receives here in the ISR and reset
ad68076e
BA
1783 * adapter in watchdog
1784 */
bc7f75fa
AK
1785 if (netif_carrier_ok(netdev) &&
1786 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1787 /* disable receives */
1788 u32 rctl = er32(RCTL);
6cf08d1c 1789
bc7f75fa 1790 ew32(RCTL, rctl & ~E1000_RCTL_EN);
12d43f7d 1791 adapter->flags |= FLAG_RESTART_NOW;
bc7f75fa
AK
1792 }
1793 /* guard against interrupt when we're going down */
1794 if (!test_bit(__E1000_DOWN, &adapter->state))
1795 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1796 }
1797
94fb848b
BA
1798 /* Reset on uncorrectable ECC error */
1799 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1800 u32 pbeccsts = er32(PBECCSTS);
1801
1802 adapter->corr_errors +=
1803 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1804 adapter->uncorr_errors +=
1805 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1806 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1807
1808 /* Do the reset outside of interrupt context */
1809 schedule_work(&adapter->reset_task);
1810
1811 /* return immediately since reset is imminent */
1812 return IRQ_HANDLED;
1813 }
1814
288379f0 1815 if (napi_schedule_prep(&adapter->napi)) {
bc7f75fa
AK
1816 adapter->total_tx_bytes = 0;
1817 adapter->total_tx_packets = 0;
1818 adapter->total_rx_bytes = 0;
1819 adapter->total_rx_packets = 0;
288379f0 1820 __napi_schedule(&adapter->napi);
bc7f75fa
AK
1821 }
1822
1823 return IRQ_HANDLED;
1824}
1825
1826/**
1827 * e1000_intr - Interrupt Handler
1828 * @irq: interrupt number
1829 * @data: pointer to a network interface device structure
1830 **/
8bb62869 1831static irqreturn_t e1000_intr(int __always_unused irq, void *data)
bc7f75fa
AK
1832{
1833 struct net_device *netdev = data;
1834 struct e1000_adapter *adapter = netdev_priv(netdev);
1835 struct e1000_hw *hw = &adapter->hw;
bc7f75fa 1836 u32 rctl, icr = er32(ICR);
4662e82b 1837
a68ea775 1838 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
e80bd1d1 1839 return IRQ_NONE; /* Not our interrupt */
bc7f75fa 1840
e921eb1a 1841 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
ad68076e
BA
1842 * not set, then the adapter didn't send an interrupt
1843 */
bc7f75fa
AK
1844 if (!(icr & E1000_ICR_INT_ASSERTED))
1845 return IRQ_NONE;
1846
e921eb1a 1847 /* Interrupt Auto-Mask...upon reading ICR,
ad68076e
BA
1848 * interrupts are masked. No need for the
1849 * IMC write
1850 */
bc7f75fa 1851
573cca8c 1852 if (icr & E1000_ICR_LSC) {
f92518dd 1853 hw->mac.get_link_status = true;
e921eb1a 1854 /* ICH8 workaround-- Call gig speed drop workaround on cable
ad68076e
BA
1855 * disconnect (LSC) before accessing any PHY registers
1856 */
bc7f75fa
AK
1857 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1858 (!(er32(STATUS) & E1000_STATUS_LU)))
a8f88ff5 1859 schedule_work(&adapter->downshift_task);
bc7f75fa 1860
e921eb1a 1861 /* 80003ES2LAN workaround--
bc7f75fa
AK
1862 * For packet buffer work-around on link down event;
1863 * disable receives here in the ISR and
1864 * reset adapter in watchdog
1865 */
1866 if (netif_carrier_ok(netdev) &&
1867 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1868 /* disable receives */
1869 rctl = er32(RCTL);
1870 ew32(RCTL, rctl & ~E1000_RCTL_EN);
12d43f7d 1871 adapter->flags |= FLAG_RESTART_NOW;
bc7f75fa
AK
1872 }
1873 /* guard against interrupt when we're going down */
1874 if (!test_bit(__E1000_DOWN, &adapter->state))
1875 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1876 }
1877
94fb848b
BA
1878 /* Reset on uncorrectable ECC error */
1879 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1880 u32 pbeccsts = er32(PBECCSTS);
1881
1882 adapter->corr_errors +=
1883 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1884 adapter->uncorr_errors +=
1885 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1886 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1887
1888 /* Do the reset outside of interrupt context */
1889 schedule_work(&adapter->reset_task);
1890
1891 /* return immediately since reset is imminent */
1892 return IRQ_HANDLED;
1893 }
1894
288379f0 1895 if (napi_schedule_prep(&adapter->napi)) {
bc7f75fa
AK
1896 adapter->total_tx_bytes = 0;
1897 adapter->total_tx_packets = 0;
1898 adapter->total_rx_bytes = 0;
1899 adapter->total_rx_packets = 0;
288379f0 1900 __napi_schedule(&adapter->napi);
bc7f75fa
AK
1901 }
1902
1903 return IRQ_HANDLED;
1904}
1905
8bb62869 1906static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
4662e82b
BA
1907{
1908 struct net_device *netdev = data;
1909 struct e1000_adapter *adapter = netdev_priv(netdev);
1910 struct e1000_hw *hw = &adapter->hw;
1911 u32 icr = er32(ICR);
1912
1913 if (!(icr & E1000_ICR_INT_ASSERTED)) {
a3c69fef
JB
1914 if (!test_bit(__E1000_DOWN, &adapter->state))
1915 ew32(IMS, E1000_IMS_OTHER);
4662e82b
BA
1916 return IRQ_NONE;
1917 }
1918
1919 if (icr & adapter->eiac_mask)
1920 ew32(ICS, (icr & adapter->eiac_mask));
1921
1922 if (icr & E1000_ICR_OTHER) {
1923 if (!(icr & E1000_ICR_LSC))
1924 goto no_link_interrupt;
f92518dd 1925 hw->mac.get_link_status = true;
4662e82b
BA
1926 /* guard against interrupt when we're going down */
1927 if (!test_bit(__E1000_DOWN, &adapter->state))
1928 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1929 }
1930
1931no_link_interrupt:
a3c69fef
JB
1932 if (!test_bit(__E1000_DOWN, &adapter->state))
1933 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
4662e82b
BA
1934
1935 return IRQ_HANDLED;
1936}
1937
8bb62869 1938static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
4662e82b
BA
1939{
1940 struct net_device *netdev = data;
1941 struct e1000_adapter *adapter = netdev_priv(netdev);
1942 struct e1000_hw *hw = &adapter->hw;
1943 struct e1000_ring *tx_ring = adapter->tx_ring;
1944
4662e82b
BA
1945 adapter->total_tx_bytes = 0;
1946 adapter->total_tx_packets = 0;
1947
55aa6985 1948 if (!e1000_clean_tx_irq(tx_ring))
4662e82b
BA
1949 /* Ring was not completely cleaned, so fire another interrupt */
1950 ew32(ICS, tx_ring->ims_val);
1951
1952 return IRQ_HANDLED;
1953}
1954
8bb62869 1955static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
4662e82b
BA
1956{
1957 struct net_device *netdev = data;
1958 struct e1000_adapter *adapter = netdev_priv(netdev);
55aa6985 1959 struct e1000_ring *rx_ring = adapter->rx_ring;
4662e82b
BA
1960
1961 /* Write the ITR value calculated at the end of the
1962 * previous interrupt.
1963 */
55aa6985
BA
1964 if (rx_ring->set_itr) {
1965 writel(1000000000 / (rx_ring->itr_val * 256),
1966 rx_ring->itr_register);
1967 rx_ring->set_itr = 0;
4662e82b
BA
1968 }
1969
288379f0 1970 if (napi_schedule_prep(&adapter->napi)) {
4662e82b
BA
1971 adapter->total_rx_bytes = 0;
1972 adapter->total_rx_packets = 0;
288379f0 1973 __napi_schedule(&adapter->napi);
4662e82b
BA
1974 }
1975 return IRQ_HANDLED;
1976}
1977
1978/**
1979 * e1000_configure_msix - Configure MSI-X hardware
1980 *
1981 * e1000_configure_msix sets up the hardware to properly
1982 * generate MSI-X interrupts.
1983 **/
1984static void e1000_configure_msix(struct e1000_adapter *adapter)
1985{
1986 struct e1000_hw *hw = &adapter->hw;
1987 struct e1000_ring *rx_ring = adapter->rx_ring;
1988 struct e1000_ring *tx_ring = adapter->tx_ring;
1989 int vector = 0;
1990 u32 ctrl_ext, ivar = 0;
1991
1992 adapter->eiac_mask = 0;
1993
1994 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1995 if (hw->mac.type == e1000_82574) {
1996 u32 rfctl = er32(RFCTL);
6cf08d1c 1997
4662e82b
BA
1998 rfctl |= E1000_RFCTL_ACK_DIS;
1999 ew32(RFCTL, rfctl);
2000 }
2001
4662e82b
BA
2002 /* Configure Rx vector */
2003 rx_ring->ims_val = E1000_IMS_RXQ0;
2004 adapter->eiac_mask |= rx_ring->ims_val;
2005 if (rx_ring->itr_val)
2006 writel(1000000000 / (rx_ring->itr_val * 256),
c5083cf6 2007 rx_ring->itr_register);
4662e82b 2008 else
c5083cf6 2009 writel(1, rx_ring->itr_register);
4662e82b
BA
2010 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
2011
2012 /* Configure Tx vector */
2013 tx_ring->ims_val = E1000_IMS_TXQ0;
2014 vector++;
2015 if (tx_ring->itr_val)
2016 writel(1000000000 / (tx_ring->itr_val * 256),
c5083cf6 2017 tx_ring->itr_register);
4662e82b 2018 else
c5083cf6 2019 writel(1, tx_ring->itr_register);
4662e82b
BA
2020 adapter->eiac_mask |= tx_ring->ims_val;
2021 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
2022
2023 /* set vector for Other Causes, e.g. link changes */
2024 vector++;
2025 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
2026 if (rx_ring->itr_val)
2027 writel(1000000000 / (rx_ring->itr_val * 256),
2028 hw->hw_addr + E1000_EITR_82574(vector));
2029 else
2030 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
2031
2032 /* Cause Tx interrupts on every write back */
2033 ivar |= (1 << 31);
2034
2035 ew32(IVAR, ivar);
2036
2037 /* enable MSI-X PBA support */
2038 ctrl_ext = er32(CTRL_EXT);
2039 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
2040
2041 /* Auto-Mask Other interrupts upon ICR read */
4662e82b
BA
2042 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
2043 ctrl_ext |= E1000_CTRL_EXT_EIAME;
2044 ew32(CTRL_EXT, ctrl_ext);
2045 e1e_flush();
2046}
2047
2048void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
2049{
2050 if (adapter->msix_entries) {
2051 pci_disable_msix(adapter->pdev);
2052 kfree(adapter->msix_entries);
2053 adapter->msix_entries = NULL;
2054 } else if (adapter->flags & FLAG_MSI_ENABLED) {
2055 pci_disable_msi(adapter->pdev);
2056 adapter->flags &= ~FLAG_MSI_ENABLED;
2057 }
4662e82b
BA
2058}
2059
2060/**
2061 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
2062 *
2063 * Attempt to configure interrupts using the best available
2064 * capabilities of the hardware and kernel.
2065 **/
2066void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
2067{
2068 int err;
8e86acd7 2069 int i;
4662e82b
BA
2070
2071 switch (adapter->int_mode) {
2072 case E1000E_INT_MODE_MSIX:
2073 if (adapter->flags & FLAG_HAS_MSIX) {
8e86acd7
JK
2074 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
2075 adapter->msix_entries = kcalloc(adapter->num_vectors,
17e813ec
BA
2076 sizeof(struct
2077 msix_entry),
2078 GFP_KERNEL);
4662e82b 2079 if (adapter->msix_entries) {
0cc7c959
AG
2080 struct e1000_adapter *a = adapter;
2081
8e86acd7 2082 for (i = 0; i < adapter->num_vectors; i++)
4662e82b
BA
2083 adapter->msix_entries[i].entry = i;
2084
0cc7c959
AG
2085 err = pci_enable_msix_range(a->pdev,
2086 a->msix_entries,
2087 a->num_vectors,
2088 a->num_vectors);
2089 if (err > 0)
4662e82b
BA
2090 return;
2091 }
2092 /* MSI-X failed, so fall through and try MSI */
ef456f85 2093 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
4662e82b
BA
2094 e1000e_reset_interrupt_capability(adapter);
2095 }
2096 adapter->int_mode = E1000E_INT_MODE_MSI;
2097 /* Fall through */
2098 case E1000E_INT_MODE_MSI:
2099 if (!pci_enable_msi(adapter->pdev)) {
2100 adapter->flags |= FLAG_MSI_ENABLED;
2101 } else {
2102 adapter->int_mode = E1000E_INT_MODE_LEGACY;
ef456f85 2103 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
4662e82b
BA
2104 }
2105 /* Fall through */
2106 case E1000E_INT_MODE_LEGACY:
2107 /* Don't do anything; this is the system default */
2108 break;
2109 }
8e86acd7
JK
2110
2111 /* store the number of vectors being used */
2112 adapter->num_vectors = 1;
4662e82b
BA
2113}
2114
2115/**
2116 * e1000_request_msix - Initialize MSI-X interrupts
2117 *
2118 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
2119 * kernel.
2120 **/
2121static int e1000_request_msix(struct e1000_adapter *adapter)
2122{
2123 struct net_device *netdev = adapter->netdev;
2124 int err = 0, vector = 0;
2125
2126 if (strlen(netdev->name) < (IFNAMSIZ - 5))
79f5e840
BA
2127 snprintf(adapter->rx_ring->name,
2128 sizeof(adapter->rx_ring->name) - 1,
2129 "%s-rx-0", netdev->name);
4662e82b
BA
2130 else
2131 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
2132 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 2133 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
4662e82b
BA
2134 netdev);
2135 if (err)
5015e53a 2136 return err;
c5083cf6
BA
2137 adapter->rx_ring->itr_register = adapter->hw.hw_addr +
2138 E1000_EITR_82574(vector);
4662e82b
BA
2139 adapter->rx_ring->itr_val = adapter->itr;
2140 vector++;
2141
2142 if (strlen(netdev->name) < (IFNAMSIZ - 5))
79f5e840
BA
2143 snprintf(adapter->tx_ring->name,
2144 sizeof(adapter->tx_ring->name) - 1,
2145 "%s-tx-0", netdev->name);
4662e82b
BA
2146 else
2147 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
2148 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 2149 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
4662e82b
BA
2150 netdev);
2151 if (err)
5015e53a 2152 return err;
c5083cf6
BA
2153 adapter->tx_ring->itr_register = adapter->hw.hw_addr +
2154 E1000_EITR_82574(vector);
4662e82b
BA
2155 adapter->tx_ring->itr_val = adapter->itr;
2156 vector++;
2157
2158 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 2159 e1000_msix_other, 0, netdev->name, netdev);
4662e82b 2160 if (err)
5015e53a 2161 return err;
4662e82b
BA
2162
2163 e1000_configure_msix(adapter);
5015e53a 2164
4662e82b 2165 return 0;
4662e82b
BA
2166}
2167
f8d59f78
BA
2168/**
2169 * e1000_request_irq - initialize interrupts
2170 *
2171 * Attempts to configure interrupts using the best available
2172 * capabilities of the hardware and kernel.
2173 **/
bc7f75fa
AK
2174static int e1000_request_irq(struct e1000_adapter *adapter)
2175{
2176 struct net_device *netdev = adapter->netdev;
bc7f75fa
AK
2177 int err;
2178
4662e82b
BA
2179 if (adapter->msix_entries) {
2180 err = e1000_request_msix(adapter);
2181 if (!err)
2182 return err;
2183 /* fall back to MSI */
2184 e1000e_reset_interrupt_capability(adapter);
2185 adapter->int_mode = E1000E_INT_MODE_MSI;
2186 e1000e_set_interrupt_capability(adapter);
bc7f75fa 2187 }
4662e82b 2188 if (adapter->flags & FLAG_MSI_ENABLED) {
a0607fd3 2189 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
4662e82b
BA
2190 netdev->name, netdev);
2191 if (!err)
2192 return err;
bc7f75fa 2193
4662e82b
BA
2194 /* fall back to legacy interrupt */
2195 e1000e_reset_interrupt_capability(adapter);
2196 adapter->int_mode = E1000E_INT_MODE_LEGACY;
bc7f75fa
AK
2197 }
2198
a0607fd3 2199 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
4662e82b
BA
2200 netdev->name, netdev);
2201 if (err)
2202 e_err("Unable to allocate interrupt, Error: %d\n", err);
2203
bc7f75fa
AK
2204 return err;
2205}
2206
2207static void e1000_free_irq(struct e1000_adapter *adapter)
2208{
2209 struct net_device *netdev = adapter->netdev;
2210
4662e82b
BA
2211 if (adapter->msix_entries) {
2212 int vector = 0;
2213
2214 free_irq(adapter->msix_entries[vector].vector, netdev);
2215 vector++;
2216
2217 free_irq(adapter->msix_entries[vector].vector, netdev);
2218 vector++;
2219
2220 /* Other Causes interrupt vector */
2221 free_irq(adapter->msix_entries[vector].vector, netdev);
2222 return;
bc7f75fa 2223 }
4662e82b
BA
2224
2225 free_irq(adapter->pdev->irq, netdev);
bc7f75fa
AK
2226}
2227
2228/**
2229 * e1000_irq_disable - Mask off interrupt generation on the NIC
2230 **/
2231static void e1000_irq_disable(struct e1000_adapter *adapter)
2232{
2233 struct e1000_hw *hw = &adapter->hw;
2234
bc7f75fa 2235 ew32(IMC, ~0);
4662e82b
BA
2236 if (adapter->msix_entries)
2237 ew32(EIAC_82574, 0);
bc7f75fa 2238 e1e_flush();
8e86acd7
JK
2239
2240 if (adapter->msix_entries) {
2241 int i;
6cf08d1c 2242
8e86acd7
JK
2243 for (i = 0; i < adapter->num_vectors; i++)
2244 synchronize_irq(adapter->msix_entries[i].vector);
2245 } else {
2246 synchronize_irq(adapter->pdev->irq);
2247 }
bc7f75fa
AK
2248}
2249
2250/**
2251 * e1000_irq_enable - Enable default interrupt generation settings
2252 **/
2253static void e1000_irq_enable(struct e1000_adapter *adapter)
2254{
2255 struct e1000_hw *hw = &adapter->hw;
2256
4662e82b
BA
2257 if (adapter->msix_entries) {
2258 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2259 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
94fb848b
BA
2260 } else if (hw->mac.type == e1000_pch_lpt) {
2261 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
4662e82b
BA
2262 } else {
2263 ew32(IMS, IMS_ENABLE_MASK);
2264 }
74ef9c39 2265 e1e_flush();
bc7f75fa
AK
2266}
2267
2268/**
31dbe5b4 2269 * e1000e_get_hw_control - get control of the h/w from f/w
bc7f75fa
AK
2270 * @adapter: address of board private structure
2271 *
31dbe5b4 2272 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
bc7f75fa
AK
2273 * For ASF and Pass Through versions of f/w this means that
2274 * the driver is loaded. For AMT version (only with 82573)
2275 * of the f/w this means that the network i/f is open.
2276 **/
31dbe5b4 2277void e1000e_get_hw_control(struct e1000_adapter *adapter)
bc7f75fa
AK
2278{
2279 struct e1000_hw *hw = &adapter->hw;
2280 u32 ctrl_ext;
2281 u32 swsm;
2282
2283 /* Let firmware know the driver has taken over */
2284 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2285 swsm = er32(SWSM);
2286 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2287 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2288 ctrl_ext = er32(CTRL_EXT);
ad68076e 2289 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
bc7f75fa
AK
2290 }
2291}
2292
2293/**
31dbe5b4 2294 * e1000e_release_hw_control - release control of the h/w to f/w
bc7f75fa
AK
2295 * @adapter: address of board private structure
2296 *
31dbe5b4 2297 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
bc7f75fa
AK
2298 * For ASF and Pass Through versions of f/w this means that the
2299 * driver is no longer loaded. For AMT version (only with 82573) i
2300 * of the f/w this means that the network i/f is closed.
2301 *
2302 **/
31dbe5b4 2303void e1000e_release_hw_control(struct e1000_adapter *adapter)
bc7f75fa
AK
2304{
2305 struct e1000_hw *hw = &adapter->hw;
2306 u32 ctrl_ext;
2307 u32 swsm;
2308
2309 /* Let firmware taken over control of h/w */
2310 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2311 swsm = er32(SWSM);
2312 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2313 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2314 ctrl_ext = er32(CTRL_EXT);
ad68076e 2315 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
bc7f75fa
AK
2316 }
2317}
2318
bc7f75fa 2319/**
49ce9c2c 2320 * e1000_alloc_ring_dma - allocate memory for a ring structure
bc7f75fa
AK
2321 **/
2322static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2323 struct e1000_ring *ring)
2324{
2325 struct pci_dev *pdev = adapter->pdev;
2326
2327 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2328 GFP_KERNEL);
2329 if (!ring->desc)
2330 return -ENOMEM;
2331
2332 return 0;
2333}
2334
2335/**
2336 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
55aa6985 2337 * @tx_ring: Tx descriptor ring
bc7f75fa
AK
2338 *
2339 * Return 0 on success, negative on failure
2340 **/
55aa6985 2341int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
bc7f75fa 2342{
55aa6985 2343 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa
AK
2344 int err = -ENOMEM, size;
2345
2346 size = sizeof(struct e1000_buffer) * tx_ring->count;
89bf67f1 2347 tx_ring->buffer_info = vzalloc(size);
bc7f75fa
AK
2348 if (!tx_ring->buffer_info)
2349 goto err;
bc7f75fa
AK
2350
2351 /* round up to nearest 4K */
2352 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2353 tx_ring->size = ALIGN(tx_ring->size, 4096);
2354
2355 err = e1000_alloc_ring_dma(adapter, tx_ring);
2356 if (err)
2357 goto err;
2358
2359 tx_ring->next_to_use = 0;
2360 tx_ring->next_to_clean = 0;
bc7f75fa
AK
2361
2362 return 0;
2363err:
2364 vfree(tx_ring->buffer_info);
44defeb3 2365 e_err("Unable to allocate memory for the transmit descriptor ring\n");
bc7f75fa
AK
2366 return err;
2367}
2368
2369/**
2370 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
55aa6985 2371 * @rx_ring: Rx descriptor ring
bc7f75fa
AK
2372 *
2373 * Returns 0 on success, negative on failure
2374 **/
55aa6985 2375int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
bc7f75fa 2376{
55aa6985 2377 struct e1000_adapter *adapter = rx_ring->adapter;
47f44e40
AK
2378 struct e1000_buffer *buffer_info;
2379 int i, size, desc_len, err = -ENOMEM;
bc7f75fa
AK
2380
2381 size = sizeof(struct e1000_buffer) * rx_ring->count;
89bf67f1 2382 rx_ring->buffer_info = vzalloc(size);
bc7f75fa
AK
2383 if (!rx_ring->buffer_info)
2384 goto err;
bc7f75fa 2385
47f44e40
AK
2386 for (i = 0; i < rx_ring->count; i++) {
2387 buffer_info = &rx_ring->buffer_info[i];
2388 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2389 sizeof(struct e1000_ps_page),
2390 GFP_KERNEL);
2391 if (!buffer_info->ps_pages)
2392 goto err_pages;
2393 }
bc7f75fa
AK
2394
2395 desc_len = sizeof(union e1000_rx_desc_packet_split);
2396
2397 /* Round up to nearest 4K */
2398 rx_ring->size = rx_ring->count * desc_len;
2399 rx_ring->size = ALIGN(rx_ring->size, 4096);
2400
2401 err = e1000_alloc_ring_dma(adapter, rx_ring);
2402 if (err)
47f44e40 2403 goto err_pages;
bc7f75fa
AK
2404
2405 rx_ring->next_to_clean = 0;
2406 rx_ring->next_to_use = 0;
2407 rx_ring->rx_skb_top = NULL;
2408
2409 return 0;
47f44e40
AK
2410
2411err_pages:
2412 for (i = 0; i < rx_ring->count; i++) {
2413 buffer_info = &rx_ring->buffer_info[i];
2414 kfree(buffer_info->ps_pages);
2415 }
bc7f75fa
AK
2416err:
2417 vfree(rx_ring->buffer_info);
e9262447 2418 e_err("Unable to allocate memory for the receive descriptor ring\n");
bc7f75fa
AK
2419 return err;
2420}
2421
2422/**
2423 * e1000_clean_tx_ring - Free Tx Buffers
55aa6985 2424 * @tx_ring: Tx descriptor ring
bc7f75fa 2425 **/
55aa6985 2426static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
bc7f75fa 2427{
55aa6985 2428 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa
AK
2429 struct e1000_buffer *buffer_info;
2430 unsigned long size;
2431 unsigned int i;
2432
2433 for (i = 0; i < tx_ring->count; i++) {
2434 buffer_info = &tx_ring->buffer_info[i];
55aa6985 2435 e1000_put_txbuf(tx_ring, buffer_info);
bc7f75fa
AK
2436 }
2437
3f0cfa3b 2438 netdev_reset_queue(adapter->netdev);
bc7f75fa
AK
2439 size = sizeof(struct e1000_buffer) * tx_ring->count;
2440 memset(tx_ring->buffer_info, 0, size);
2441
2442 memset(tx_ring->desc, 0, tx_ring->size);
2443
2444 tx_ring->next_to_use = 0;
2445 tx_ring->next_to_clean = 0;
2446
c5083cf6 2447 writel(0, tx_ring->head);
b485dbae 2448 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
bdc125f7
BA
2449 e1000e_update_tdt_wa(tx_ring, 0);
2450 else
2451 writel(0, tx_ring->tail);
bc7f75fa
AK
2452}
2453
2454/**
2455 * e1000e_free_tx_resources - Free Tx Resources per Queue
55aa6985 2456 * @tx_ring: Tx descriptor ring
bc7f75fa
AK
2457 *
2458 * Free all transmit software resources
2459 **/
55aa6985 2460void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
bc7f75fa 2461{
55aa6985 2462 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa 2463 struct pci_dev *pdev = adapter->pdev;
bc7f75fa 2464
55aa6985 2465 e1000_clean_tx_ring(tx_ring);
bc7f75fa
AK
2466
2467 vfree(tx_ring->buffer_info);
2468 tx_ring->buffer_info = NULL;
2469
2470 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2471 tx_ring->dma);
2472 tx_ring->desc = NULL;
2473}
2474
2475/**
2476 * e1000e_free_rx_resources - Free Rx Resources
55aa6985 2477 * @rx_ring: Rx descriptor ring
bc7f75fa
AK
2478 *
2479 * Free all receive software resources
2480 **/
55aa6985 2481void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
bc7f75fa 2482{
55aa6985 2483 struct e1000_adapter *adapter = rx_ring->adapter;
bc7f75fa 2484 struct pci_dev *pdev = adapter->pdev;
47f44e40 2485 int i;
bc7f75fa 2486
55aa6985 2487 e1000_clean_rx_ring(rx_ring);
bc7f75fa 2488
b1cdfead 2489 for (i = 0; i < rx_ring->count; i++)
47f44e40 2490 kfree(rx_ring->buffer_info[i].ps_pages);
47f44e40 2491
bc7f75fa
AK
2492 vfree(rx_ring->buffer_info);
2493 rx_ring->buffer_info = NULL;
2494
bc7f75fa
AK
2495 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2496 rx_ring->dma);
2497 rx_ring->desc = NULL;
2498}
2499
2500/**
2501 * e1000_update_itr - update the dynamic ITR value based on statistics
489815ce
AK
2502 * @adapter: pointer to adapter
2503 * @itr_setting: current adapter->itr
2504 * @packets: the number of packets during this measurement interval
2505 * @bytes: the number of bytes during this measurement interval
2506 *
bc7f75fa
AK
2507 * Stores a new ITR value based on packets and byte
2508 * counts during the last interrupt. The advantage of per interrupt
2509 * computation is faster updates and more accurate ITR for the current
2510 * traffic pattern. Constants in this function were computed
2511 * based on theoretical maximum wire speed and thresholds were set based
2512 * on testing data as well as attempting to minimize response time
4662e82b
BA
2513 * while increasing bulk throughput. This functionality is controlled
2514 * by the InterruptThrottleRate module parameter.
bc7f75fa 2515 **/
8bb62869 2516static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
bc7f75fa
AK
2517{
2518 unsigned int retval = itr_setting;
2519
2520 if (packets == 0)
5015e53a 2521 return itr_setting;
bc7f75fa
AK
2522
2523 switch (itr_setting) {
2524 case lowest_latency:
2525 /* handle TSO and jumbo frames */
362e20ca 2526 if (bytes / packets > 8000)
bc7f75fa 2527 retval = bulk_latency;
b1cdfead 2528 else if ((packets < 5) && (bytes > 512))
bc7f75fa 2529 retval = low_latency;
bc7f75fa 2530 break;
e80bd1d1 2531 case low_latency: /* 50 usec aka 20000 ints/s */
bc7f75fa
AK
2532 if (bytes > 10000) {
2533 /* this if handles the TSO accounting */
362e20ca 2534 if (bytes / packets > 8000)
bc7f75fa 2535 retval = bulk_latency;
362e20ca 2536 else if ((packets < 10) || ((bytes / packets) > 1200))
bc7f75fa 2537 retval = bulk_latency;
b1cdfead 2538 else if ((packets > 35))
bc7f75fa 2539 retval = lowest_latency;
362e20ca 2540 } else if (bytes / packets > 2000) {
bc7f75fa
AK
2541 retval = bulk_latency;
2542 } else if (packets <= 2 && bytes < 512) {
2543 retval = lowest_latency;
2544 }
2545 break;
e80bd1d1 2546 case bulk_latency: /* 250 usec aka 4000 ints/s */
bc7f75fa 2547 if (bytes > 25000) {
b1cdfead 2548 if (packets > 35)
bc7f75fa 2549 retval = low_latency;
bc7f75fa
AK
2550 } else if (bytes < 6000) {
2551 retval = low_latency;
2552 }
2553 break;
2554 }
2555
bc7f75fa
AK
2556 return retval;
2557}
2558
2559static void e1000_set_itr(struct e1000_adapter *adapter)
2560{
bc7f75fa
AK
2561 u16 current_itr;
2562 u32 new_itr = adapter->itr;
2563
2564 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2565 if (adapter->link_speed != SPEED_1000) {
2566 current_itr = 0;
2567 new_itr = 4000;
2568 goto set_itr_now;
2569 }
2570
828bac87
BA
2571 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2572 new_itr = 0;
2573 goto set_itr_now;
2574 }
2575
8bb62869
BA
2576 adapter->tx_itr = e1000_update_itr(adapter->tx_itr,
2577 adapter->total_tx_packets,
2578 adapter->total_tx_bytes);
bc7f75fa
AK
2579 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2580 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2581 adapter->tx_itr = low_latency;
2582
8bb62869
BA
2583 adapter->rx_itr = e1000_update_itr(adapter->rx_itr,
2584 adapter->total_rx_packets,
2585 adapter->total_rx_bytes);
bc7f75fa
AK
2586 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2587 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2588 adapter->rx_itr = low_latency;
2589
2590 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2591
bc7f75fa 2592 /* counts and packets in update_itr are dependent on these numbers */
33550cec 2593 switch (current_itr) {
bc7f75fa
AK
2594 case lowest_latency:
2595 new_itr = 70000;
2596 break;
2597 case low_latency:
e80bd1d1 2598 new_itr = 20000; /* aka hwitr = ~200 */
bc7f75fa
AK
2599 break;
2600 case bulk_latency:
2601 new_itr = 4000;
2602 break;
2603 default:
2604 break;
2605 }
2606
2607set_itr_now:
2608 if (new_itr != adapter->itr) {
e921eb1a 2609 /* this attempts to bias the interrupt rate towards Bulk
bc7f75fa 2610 * by adding intermediate steps when interrupt rate is
ad68076e
BA
2611 * increasing
2612 */
bc7f75fa 2613 new_itr = new_itr > adapter->itr ?
f0ff4398 2614 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
bc7f75fa 2615 adapter->itr = new_itr;
4662e82b
BA
2616 adapter->rx_ring->itr_val = new_itr;
2617 if (adapter->msix_entries)
2618 adapter->rx_ring->set_itr = 1;
2619 else
e3d14b08 2620 e1000e_write_itr(adapter, new_itr);
bc7f75fa
AK
2621 }
2622}
2623
22a4cca2
MV
2624/**
2625 * e1000e_write_itr - write the ITR value to the appropriate registers
2626 * @adapter: address of board private structure
2627 * @itr: new ITR value to program
2628 *
2629 * e1000e_write_itr determines if the adapter is in MSI-X mode
2630 * and, if so, writes the EITR registers with the ITR value.
2631 * Otherwise, it writes the ITR value into the ITR register.
2632 **/
2633void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
2634{
2635 struct e1000_hw *hw = &adapter->hw;
2636 u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
2637
2638 if (adapter->msix_entries) {
2639 int vector;
2640
2641 for (vector = 0; vector < adapter->num_vectors; vector++)
2642 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
2643 } else {
2644 ew32(ITR, new_itr);
2645 }
2646}
2647
4662e82b
BA
2648/**
2649 * e1000_alloc_queues - Allocate memory for all rings
2650 * @adapter: board private structure to initialize
2651 **/
9f9a12f8 2652static int e1000_alloc_queues(struct e1000_adapter *adapter)
4662e82b 2653{
55aa6985
BA
2654 int size = sizeof(struct e1000_ring);
2655
2656 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
4662e82b
BA
2657 if (!adapter->tx_ring)
2658 goto err;
55aa6985
BA
2659 adapter->tx_ring->count = adapter->tx_ring_count;
2660 adapter->tx_ring->adapter = adapter;
4662e82b 2661
55aa6985 2662 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
4662e82b
BA
2663 if (!adapter->rx_ring)
2664 goto err;
55aa6985
BA
2665 adapter->rx_ring->count = adapter->rx_ring_count;
2666 adapter->rx_ring->adapter = adapter;
4662e82b
BA
2667
2668 return 0;
2669err:
2670 e_err("Unable to allocate memory for queues\n");
2671 kfree(adapter->rx_ring);
2672 kfree(adapter->tx_ring);
2673 return -ENOMEM;
2674}
2675
bc7f75fa 2676/**
c58c8a78 2677 * e1000e_poll - NAPI Rx polling callback
ad68076e 2678 * @napi: struct associated with this polling callback
c58c8a78 2679 * @weight: number of packets driver is allowed to process this poll
bc7f75fa 2680 **/
c58c8a78 2681static int e1000e_poll(struct napi_struct *napi, int weight)
bc7f75fa 2682{
c58c8a78
BA
2683 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2684 napi);
4662e82b 2685 struct e1000_hw *hw = &adapter->hw;
bc7f75fa 2686 struct net_device *poll_dev = adapter->netdev;
679e8a0f 2687 int tx_cleaned = 1, work_done = 0;
bc7f75fa 2688
4cf1653a 2689 adapter = netdev_priv(poll_dev);
bc7f75fa 2690
c58c8a78
BA
2691 if (!adapter->msix_entries ||
2692 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2693 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
4662e82b 2694
c58c8a78 2695 adapter->clean_rx(adapter->rx_ring, &work_done, weight);
d2c7ddd6 2696
12d04a3c 2697 if (!tx_cleaned)
c58c8a78 2698 work_done = weight;
bc7f75fa 2699
c58c8a78
BA
2700 /* If weight not fully consumed, exit the polling mode */
2701 if (work_done < weight) {
bc7f75fa
AK
2702 if (adapter->itr_setting & 3)
2703 e1000_set_itr(adapter);
288379f0 2704 napi_complete(napi);
a3c69fef
JB
2705 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2706 if (adapter->msix_entries)
2707 ew32(IMS, adapter->rx_ring->ims_val);
2708 else
2709 e1000_irq_enable(adapter);
2710 }
bc7f75fa
AK
2711 }
2712
2713 return work_done;
2714}
2715
80d5c368 2716static int e1000_vlan_rx_add_vid(struct net_device *netdev,
603cdca9 2717 __always_unused __be16 proto, u16 vid)
bc7f75fa
AK
2718{
2719 struct e1000_adapter *adapter = netdev_priv(netdev);
2720 struct e1000_hw *hw = &adapter->hw;
2721 u32 vfta, index;
2722
2723 /* don't update vlan cookie if already programmed */
2724 if ((adapter->hw.mng_cookie.status &
2725 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2726 (vid == adapter->mng_vlan_id))
8e586137 2727 return 0;
caaddaf8 2728
bc7f75fa 2729 /* add VID to filter table */
caaddaf8
BA
2730 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2731 index = (vid >> 5) & 0x7F;
2732 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2733 vfta |= (1 << (vid & 0x1F));
2734 hw->mac.ops.write_vfta(hw, index, vfta);
2735 }
86d70e53
JK
2736
2737 set_bit(vid, adapter->active_vlans);
8e586137
JP
2738
2739 return 0;
bc7f75fa
AK
2740}
2741
80d5c368 2742static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
603cdca9 2743 __always_unused __be16 proto, u16 vid)
bc7f75fa
AK
2744{
2745 struct e1000_adapter *adapter = netdev_priv(netdev);
2746 struct e1000_hw *hw = &adapter->hw;
2747 u32 vfta, index;
2748
bc7f75fa
AK
2749 if ((adapter->hw.mng_cookie.status &
2750 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2751 (vid == adapter->mng_vlan_id)) {
2752 /* release control to f/w */
31dbe5b4 2753 e1000e_release_hw_control(adapter);
8e586137 2754 return 0;
bc7f75fa
AK
2755 }
2756
2757 /* remove VID from filter table */
caaddaf8
BA
2758 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2759 index = (vid >> 5) & 0x7F;
2760 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2761 vfta &= ~(1 << (vid & 0x1F));
2762 hw->mac.ops.write_vfta(hw, index, vfta);
2763 }
86d70e53
JK
2764
2765 clear_bit(vid, adapter->active_vlans);
8e586137
JP
2766
2767 return 0;
bc7f75fa
AK
2768}
2769
86d70e53
JK
2770/**
2771 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2772 * @adapter: board private structure to initialize
2773 **/
2774static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
bc7f75fa
AK
2775{
2776 struct net_device *netdev = adapter->netdev;
86d70e53
JK
2777 struct e1000_hw *hw = &adapter->hw;
2778 u32 rctl;
bc7f75fa 2779
86d70e53
JK
2780 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2781 /* disable VLAN receive filtering */
2782 rctl = er32(RCTL);
2783 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2784 ew32(RCTL, rctl);
2785
2786 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
80d5c368
PM
2787 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
2788 adapter->mng_vlan_id);
86d70e53 2789 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
bc7f75fa 2790 }
bc7f75fa
AK
2791 }
2792}
2793
86d70e53
JK
2794/**
2795 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2796 * @adapter: board private structure to initialize
2797 **/
2798static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2799{
2800 struct e1000_hw *hw = &adapter->hw;
2801 u32 rctl;
2802
2803 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2804 /* enable VLAN receive filtering */
2805 rctl = er32(RCTL);
2806 rctl |= E1000_RCTL_VFE;
2807 rctl &= ~E1000_RCTL_CFIEN;
2808 ew32(RCTL, rctl);
2809 }
2810}
bc7f75fa 2811
86d70e53
JK
2812/**
2813 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
2814 * @adapter: board private structure to initialize
2815 **/
2816static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
bc7f75fa 2817{
bc7f75fa 2818 struct e1000_hw *hw = &adapter->hw;
86d70e53 2819 u32 ctrl;
bc7f75fa 2820
86d70e53
JK
2821 /* disable VLAN tag insert/strip */
2822 ctrl = er32(CTRL);
2823 ctrl &= ~E1000_CTRL_VME;
2824 ew32(CTRL, ctrl);
2825}
bc7f75fa 2826
86d70e53
JK
2827/**
2828 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2829 * @adapter: board private structure to initialize
2830 **/
2831static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2832{
2833 struct e1000_hw *hw = &adapter->hw;
2834 u32 ctrl;
bc7f75fa 2835
86d70e53
JK
2836 /* enable VLAN tag insert/strip */
2837 ctrl = er32(CTRL);
2838 ctrl |= E1000_CTRL_VME;
2839 ew32(CTRL, ctrl);
2840}
bc7f75fa 2841
86d70e53
JK
2842static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2843{
2844 struct net_device *netdev = adapter->netdev;
2845 u16 vid = adapter->hw.mng_cookie.vlan_id;
2846 u16 old_vid = adapter->mng_vlan_id;
2847
e5fe2541 2848 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
80d5c368 2849 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
86d70e53 2850 adapter->mng_vlan_id = vid;
bc7f75fa
AK
2851 }
2852
86d70e53 2853 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
80d5c368 2854 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
bc7f75fa
AK
2855}
2856
2857static void e1000_restore_vlan(struct e1000_adapter *adapter)
2858{
2859 u16 vid;
2860
80d5c368 2861 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
bc7f75fa 2862
86d70e53 2863 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
80d5c368 2864 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
bc7f75fa
AK
2865}
2866
cd791618 2867static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
bc7f75fa
AK
2868{
2869 struct e1000_hw *hw = &adapter->hw;
cd791618 2870 u32 manc, manc2h, mdef, i, j;
bc7f75fa
AK
2871
2872 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2873 return;
2874
2875 manc = er32(MANC);
2876
e921eb1a 2877 /* enable receiving management packets to the host. this will probably
bc7f75fa 2878 * generate destination unreachable messages from the host OS, but
ad68076e
BA
2879 * the packets will be handled on SMBUS
2880 */
bc7f75fa
AK
2881 manc |= E1000_MANC_EN_MNG2HOST;
2882 manc2h = er32(MANC2H);
cd791618
BA
2883
2884 switch (hw->mac.type) {
2885 default:
2886 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2887 break;
2888 case e1000_82574:
2889 case e1000_82583:
e921eb1a 2890 /* Check if IPMI pass-through decision filter already exists;
cd791618
BA
2891 * if so, enable it.
2892 */
2893 for (i = 0, j = 0; i < 8; i++) {
2894 mdef = er32(MDEF(i));
2895
2896 /* Ignore filters with anything other than IPMI ports */
3b21b508 2897 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
cd791618
BA
2898 continue;
2899
2900 /* Enable this decision filter in MANC2H */
2901 if (mdef)
2902 manc2h |= (1 << i);
2903
2904 j |= mdef;
2905 }
2906
2907 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2908 break;
2909
2910 /* Create new decision filter in an empty filter */
2911 for (i = 0, j = 0; i < 8; i++)
2912 if (er32(MDEF(i)) == 0) {
2913 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2914 E1000_MDEF_PORT_664));
2915 manc2h |= (1 << 1);
2916 j++;
2917 break;
2918 }
2919
2920 if (!j)
2921 e_warn("Unable to create IPMI pass-through filter\n");
2922 break;
2923 }
2924
bc7f75fa
AK
2925 ew32(MANC2H, manc2h);
2926 ew32(MANC, manc);
2927}
2928
2929/**
af667a29 2930 * e1000_configure_tx - Configure Transmit Unit after Reset
bc7f75fa
AK
2931 * @adapter: board private structure
2932 *
2933 * Configure the Tx unit of the MAC after a reset.
2934 **/
2935static void e1000_configure_tx(struct e1000_adapter *adapter)
2936{
2937 struct e1000_hw *hw = &adapter->hw;
2938 struct e1000_ring *tx_ring = adapter->tx_ring;
2939 u64 tdba;
e7e834aa 2940 u32 tdlen, tctl, tarc;
bc7f75fa
AK
2941
2942 /* Setup the HW Tx Head and Tail descriptor pointers */
2943 tdba = tx_ring->dma;
2944 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
1e36052e
BA
2945 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2946 ew32(TDBAH(0), (tdba >> 32));
2947 ew32(TDLEN(0), tdlen);
2948 ew32(TDH(0), 0);
2949 ew32(TDT(0), 0);
2950 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2951 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
bc7f75fa 2952
bc7f75fa
AK
2953 /* Set the Tx Interrupt Delay register */
2954 ew32(TIDV, adapter->tx_int_delay);
ad68076e 2955 /* Tx irq moderation */
bc7f75fa
AK
2956 ew32(TADV, adapter->tx_abs_int_delay);
2957
3a3b7586
JB
2958 if (adapter->flags2 & FLAG2_DMA_BURST) {
2959 u32 txdctl = er32(TXDCTL(0));
6cf08d1c 2960
3a3b7586
JB
2961 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2962 E1000_TXDCTL_WTHRESH);
e921eb1a 2963 /* set up some performance related parameters to encourage the
3a3b7586
JB
2964 * hardware to use the bus more efficiently in bursts, depends
2965 * on the tx_int_delay to be enabled,
8edc0e62 2966 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
3a3b7586
JB
2967 * hthresh = 1 ==> prefetch when one or more available
2968 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2969 * BEWARE: this seems to work but should be considered first if
af667a29 2970 * there are Tx hangs or other Tx related bugs
3a3b7586
JB
2971 */
2972 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2973 ew32(TXDCTL(0), txdctl);
3a3b7586 2974 }
56032be7
BA
2975 /* erratum work around: set txdctl the same for both queues */
2976 ew32(TXDCTL(1), er32(TXDCTL(0)));
3a3b7586 2977
e7e834aa
DE
2978 /* Program the Transmit Control Register */
2979 tctl = er32(TCTL);
2980 tctl &= ~E1000_TCTL_CT;
2981 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2982 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2983
bc7f75fa 2984 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
e9ec2c0f 2985 tarc = er32(TARC(0));
e921eb1a 2986 /* set the speed mode bit, we'll clear it if we're not at
ad68076e
BA
2987 * gigabit link later
2988 */
bc7f75fa
AK
2989#define SPEED_MODE_BIT (1 << 21)
2990 tarc |= SPEED_MODE_BIT;
e9ec2c0f 2991 ew32(TARC(0), tarc);
bc7f75fa
AK
2992 }
2993
2994 /* errata: program both queues to unweighted RR */
2995 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
e9ec2c0f 2996 tarc = er32(TARC(0));
bc7f75fa 2997 tarc |= 1;
e9ec2c0f
JK
2998 ew32(TARC(0), tarc);
2999 tarc = er32(TARC(1));
bc7f75fa 3000 tarc |= 1;
e9ec2c0f 3001 ew32(TARC(1), tarc);
bc7f75fa
AK
3002 }
3003
bc7f75fa
AK
3004 /* Setup Transmit Descriptor Settings for eop descriptor */
3005 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
3006
3007 /* only set IDE if we are delaying interrupts using the timers */
3008 if (adapter->tx_int_delay)
3009 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3010
3011 /* enable Report Status bit */
3012 adapter->txd_cmd |= E1000_TXD_CMD_RS;
3013
e7e834aa
DE
3014 ew32(TCTL, tctl);
3015
57cde763 3016 hw->mac.ops.config_collision_dist(hw);
bc7f75fa
AK
3017}
3018
3019/**
3020 * e1000_setup_rctl - configure the receive control registers
3021 * @adapter: Board private structure
3022 **/
3023#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
3024 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
3025static void e1000_setup_rctl(struct e1000_adapter *adapter)
3026{
3027 struct e1000_hw *hw = &adapter->hw;
3028 u32 rctl, rfctl;
bc7f75fa
AK
3029 u32 pages = 0;
3030
b20a7744
DE
3031 /* Workaround Si errata on PCHx - configure jumbo frame flow.
3032 * If jumbo frames not set, program related MAC/PHY registers
3033 * to h/w defaults
3034 */
3035 if (hw->mac.type >= e1000_pch2lan) {
3036 s32 ret_val;
3037
3038 if (adapter->netdev->mtu > ETH_DATA_LEN)
3039 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
3040 else
3041 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
3042
3043 if (ret_val)
3044 e_dbg("failed to enable|disable jumbo frame workaround mode\n");
3045 }
a1ce6473 3046
bc7f75fa
AK
3047 /* Program MC offset vector base */
3048 rctl = er32(RCTL);
3049 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3050 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
f0ff4398
BA
3051 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
3052 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
bc7f75fa
AK
3053
3054 /* Do not Store bad packets */
3055 rctl &= ~E1000_RCTL_SBP;
3056
3057 /* Enable Long Packet receive */
3058 if (adapter->netdev->mtu <= ETH_DATA_LEN)
3059 rctl &= ~E1000_RCTL_LPE;
3060 else
3061 rctl |= E1000_RCTL_LPE;
3062
eb7c3adb
JK
3063 /* Some systems expect that the CRC is included in SMBUS traffic. The
3064 * hardware strips the CRC before sending to both SMBUS (BMC) and to
3065 * host memory when this is enabled
3066 */
3067 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
3068 rctl |= E1000_RCTL_SECRC;
5918bd88 3069
a4f58f54
BA
3070 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
3071 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
3072 u16 phy_data;
3073
3074 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
3075 phy_data &= 0xfff8;
3076 phy_data |= (1 << 2);
3077 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
3078
3079 e1e_rphy(hw, 22, &phy_data);
3080 phy_data &= 0x0fff;
3081 phy_data |= (1 << 14);
3082 e1e_wphy(hw, 0x10, 0x2823);
3083 e1e_wphy(hw, 0x11, 0x0003);
3084 e1e_wphy(hw, 22, phy_data);
3085 }
3086
bc7f75fa
AK
3087 /* Setup buffer sizes */
3088 rctl &= ~E1000_RCTL_SZ_4096;
3089 rctl |= E1000_RCTL_BSEX;
3090 switch (adapter->rx_buffer_len) {
bc7f75fa
AK
3091 case 2048:
3092 default:
3093 rctl |= E1000_RCTL_SZ_2048;
3094 rctl &= ~E1000_RCTL_BSEX;
3095 break;
3096 case 4096:
3097 rctl |= E1000_RCTL_SZ_4096;
3098 break;
3099 case 8192:
3100 rctl |= E1000_RCTL_SZ_8192;
3101 break;
3102 case 16384:
3103 rctl |= E1000_RCTL_SZ_16384;
3104 break;
3105 }
3106
5f450212
BA
3107 /* Enable Extended Status in all Receive Descriptors */
3108 rfctl = er32(RFCTL);
3109 rfctl |= E1000_RFCTL_EXTEN;
f6bd5577 3110 ew32(RFCTL, rfctl);
5f450212 3111
e921eb1a 3112 /* 82571 and greater support packet-split where the protocol
bc7f75fa
AK
3113 * header is placed in skb->data and the packet data is
3114 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
3115 * In the case of a non-split, skb->data is linearly filled,
3116 * followed by the page buffers. Therefore, skb->data is
3117 * sized to hold the largest protocol header.
3118 *
3119 * allocations using alloc_page take too long for regular MTU
3120 * so only enable packet split for jumbo frames
3121 *
3122 * Using pages when the page size is greater than 16k wastes
3123 * a lot of memory, since we allocate 3 pages at all times
3124 * per packet.
3125 */
bc7f75fa 3126 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
79d4e908 3127 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
bc7f75fa 3128 adapter->rx_ps_pages = pages;
97ac8cae
BA
3129 else
3130 adapter->rx_ps_pages = 0;
bc7f75fa
AK
3131
3132 if (adapter->rx_ps_pages) {
90da0669
BA
3133 u32 psrctl = 0;
3134
140a7480
AK
3135 /* Enable Packet split descriptors */
3136 rctl |= E1000_RCTL_DTYP_PS;
bc7f75fa 3137
e5fe2541 3138 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
bc7f75fa
AK
3139
3140 switch (adapter->rx_ps_pages) {
3141 case 3:
e5fe2541
BA
3142 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
3143 /* fall-through */
bc7f75fa 3144 case 2:
e5fe2541
BA
3145 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
3146 /* fall-through */
bc7f75fa 3147 case 1:
e5fe2541 3148 psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
bc7f75fa
AK
3149 break;
3150 }
3151
3152 ew32(PSRCTL, psrctl);
3153 }
3154
cf955e6c
BG
3155 /* This is useful for sniffing bad packets. */
3156 if (adapter->netdev->features & NETIF_F_RXALL) {
3157 /* UPE and MPE will be handled by normal PROMISC logic
e921eb1a
BA
3158 * in e1000e_set_rx_mode
3159 */
e80bd1d1
BA
3160 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3161 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3162 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
cf955e6c 3163
e80bd1d1
BA
3164 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3165 E1000_RCTL_DPF | /* Allow filtered pause */
3166 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
cf955e6c
BG
3167 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3168 * and that breaks VLANs.
3169 */
3170 }
3171
bc7f75fa 3172 ew32(RCTL, rctl);
318a94d6 3173 /* just started the receive unit, no need to restart */
12d43f7d 3174 adapter->flags &= ~FLAG_RESTART_NOW;
bc7f75fa
AK
3175}
3176
3177/**
3178 * e1000_configure_rx - Configure Receive Unit after Reset
3179 * @adapter: board private structure
3180 *
3181 * Configure the Rx unit of the MAC after a reset.
3182 **/
3183static void e1000_configure_rx(struct e1000_adapter *adapter)
3184{
3185 struct e1000_hw *hw = &adapter->hw;
3186 struct e1000_ring *rx_ring = adapter->rx_ring;
3187 u64 rdba;
3188 u32 rdlen, rctl, rxcsum, ctrl_ext;
3189
3190 if (adapter->rx_ps_pages) {
3191 /* this is a 32 byte descriptor */
3192 rdlen = rx_ring->count *
af667a29 3193 sizeof(union e1000_rx_desc_packet_split);
bc7f75fa
AK
3194 adapter->clean_rx = e1000_clean_rx_irq_ps;
3195 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
97ac8cae 3196 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
5f450212 3197 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
97ac8cae
BA
3198 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3199 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
bc7f75fa 3200 } else {
5f450212 3201 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
bc7f75fa
AK
3202 adapter->clean_rx = e1000_clean_rx_irq;
3203 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3204 }
3205
3206 /* disable receives while setting up the descriptors */
3207 rctl = er32(RCTL);
7f99ae63
BA
3208 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3209 ew32(RCTL, rctl & ~E1000_RCTL_EN);
bc7f75fa 3210 e1e_flush();
1bba4386 3211 usleep_range(10000, 20000);
bc7f75fa 3212
3a3b7586 3213 if (adapter->flags2 & FLAG2_DMA_BURST) {
e921eb1a 3214 /* set the writeback threshold (only takes effect if the RDTR
3a3b7586 3215 * is set). set GRAN=1 and write back up to 0x4 worth, and
af667a29 3216 * enable prefetching of 0x20 Rx descriptors
3a3b7586
JB
3217 * granularity = 01
3218 * wthresh = 04,
3219 * hthresh = 04,
3220 * pthresh = 0x20
3221 */
3222 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3223 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3224
e921eb1a 3225 /* override the delay timers for enabling bursting, only if
3a3b7586
JB
3226 * the value was not set by the user via module options
3227 */
3228 if (adapter->rx_int_delay == DEFAULT_RDTR)
3229 adapter->rx_int_delay = BURST_RDTR;
3230 if (adapter->rx_abs_int_delay == DEFAULT_RADV)
3231 adapter->rx_abs_int_delay = BURST_RADV;
3232 }
3233
bc7f75fa
AK
3234 /* set the Receive Delay Timer Register */
3235 ew32(RDTR, adapter->rx_int_delay);
3236
3237 /* irq moderation */
3238 ew32(RADV, adapter->rx_abs_int_delay);
828bac87 3239 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
22a4cca2 3240 e1000e_write_itr(adapter, adapter->itr);
bc7f75fa
AK
3241
3242 ctrl_ext = er32(CTRL_EXT);
bc7f75fa
AK
3243 /* Auto-Mask interrupts upon ICR access */
3244 ctrl_ext |= E1000_CTRL_EXT_IAME;
3245 ew32(IAM, 0xffffffff);
3246 ew32(CTRL_EXT, ctrl_ext);
3247 e1e_flush();
3248
e921eb1a 3249 /* Setup the HW Rx Head and Tail Descriptor Pointers and
ad68076e
BA
3250 * the Base and Length of the Rx Descriptor Ring
3251 */
bc7f75fa 3252 rdba = rx_ring->dma;
1e36052e
BA
3253 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3254 ew32(RDBAH(0), (rdba >> 32));
3255 ew32(RDLEN(0), rdlen);
3256 ew32(RDH(0), 0);
3257 ew32(RDT(0), 0);
3258 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3259 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
bc7f75fa
AK
3260
3261 /* Enable Receive Checksum Offload for TCP and UDP */
3262 rxcsum = er32(RXCSUM);
2e1706f2 3263 if (adapter->netdev->features & NETIF_F_RXCSUM)
bc7f75fa 3264 rxcsum |= E1000_RXCSUM_TUOFL;
2e1706f2 3265 else
bc7f75fa 3266 rxcsum &= ~E1000_RXCSUM_TUOFL;
bc7f75fa
AK
3267 ew32(RXCSUM, rxcsum);
3268
3e35d991
BA
3269 /* With jumbo frames, excessive C-state transition latencies result
3270 * in dropped transactions.
3271 */
3272 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3273 u32 lat =
3274 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 -
3275 adapter->max_frame_size) * 8 / 1000;
3276
3277 if (adapter->flags & FLAG_IS_ICH) {
53ec5498 3278 u32 rxdctl = er32(RXDCTL(0));
6cf08d1c 3279
53ec5498 3280 ew32(RXDCTL(0), rxdctl | 0x3);
53ec5498 3281 }
3e35d991
BA
3282
3283 pm_qos_update_request(&adapter->netdev->pm_qos_req, lat);
3284 } else {
3285 pm_qos_update_request(&adapter->netdev->pm_qos_req,
3286 PM_QOS_DEFAULT_VALUE);
97ac8cae 3287 }
bc7f75fa
AK
3288
3289 /* Enable Receives */
3290 ew32(RCTL, rctl);
3291}
3292
3293/**
ef9b965a
JB
3294 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3295 * @netdev: network interface device structure
bc7f75fa 3296 *
ef9b965a
JB
3297 * Writes multicast address list to the MTA hash table.
3298 * Returns: -ENOMEM on failure
3299 * 0 on no addresses written
3300 * X on writing X addresses to MTA
3301 */
3302static int e1000e_write_mc_addr_list(struct net_device *netdev)
3303{
3304 struct e1000_adapter *adapter = netdev_priv(netdev);
3305 struct e1000_hw *hw = &adapter->hw;
3306 struct netdev_hw_addr *ha;
3307 u8 *mta_list;
3308 int i;
3309
3310 if (netdev_mc_empty(netdev)) {
3311 /* nothing to program, so clear mc list */
3312 hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
3313 return 0;
3314 }
3315
3316 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
3317 if (!mta_list)
3318 return -ENOMEM;
3319
3320 /* update_mc_addr_list expects a packed array of only addresses. */
3321 i = 0;
3322 netdev_for_each_mc_addr(ha, netdev)
f0ff4398 3323 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
ef9b965a
JB
3324
3325 hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3326 kfree(mta_list);
3327
3328 return netdev_mc_count(netdev);
3329}
3330
3331/**
3332 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3333 * @netdev: network interface device structure
bc7f75fa 3334 *
ef9b965a
JB
3335 * Writes unicast address list to the RAR table.
3336 * Returns: -ENOMEM on failure/insufficient address space
3337 * 0 on no addresses written
3338 * X on writing X addresses to the RAR table
bc7f75fa 3339 **/
ef9b965a 3340static int e1000e_write_uc_addr_list(struct net_device *netdev)
bc7f75fa 3341{
ef9b965a
JB
3342 struct e1000_adapter *adapter = netdev_priv(netdev);
3343 struct e1000_hw *hw = &adapter->hw;
b3e5bf1f 3344 unsigned int rar_entries;
ef9b965a
JB
3345 int count = 0;
3346
b3e5bf1f
DE
3347 rar_entries = hw->mac.ops.rar_get_count(hw);
3348
ef9b965a
JB
3349 /* save a rar entry for our hardware address */
3350 rar_entries--;
3351
3352 /* save a rar entry for the LAA workaround */
3353 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
3354 rar_entries--;
3355
3356 /* return ENOMEM indicating insufficient memory for addresses */
3357 if (netdev_uc_count(netdev) > rar_entries)
3358 return -ENOMEM;
3359
3360 if (!netdev_uc_empty(netdev) && rar_entries) {
3361 struct netdev_hw_addr *ha;
3362
e921eb1a 3363 /* write the addresses in reverse order to avoid write
ef9b965a
JB
3364 * combining
3365 */
3366 netdev_for_each_uc_addr(ha, netdev) {
b3e5bf1f
DE
3367 int rval;
3368
ef9b965a
JB
3369 if (!rar_entries)
3370 break;
b3e5bf1f
DE
3371 rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3372 if (rval < 0)
3373 return -ENOMEM;
ef9b965a
JB
3374 count++;
3375 }
3376 }
3377
3378 /* zero out the remaining RAR entries not used above */
3379 for (; rar_entries > 0; rar_entries--) {
3380 ew32(RAH(rar_entries), 0);
3381 ew32(RAL(rar_entries), 0);
3382 }
3383 e1e_flush();
3384
3385 return count;
bc7f75fa
AK
3386}
3387
3388/**
ef9b965a 3389 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
bc7f75fa
AK
3390 * @netdev: network interface device structure
3391 *
ef9b965a
JB
3392 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
3393 * address list or the network interface flags are updated. This routine is
3394 * responsible for configuring the hardware for proper unicast, multicast,
bc7f75fa
AK
3395 * promiscuous mode, and all-multi behavior.
3396 **/
ef9b965a 3397static void e1000e_set_rx_mode(struct net_device *netdev)
bc7f75fa
AK
3398{
3399 struct e1000_adapter *adapter = netdev_priv(netdev);
3400 struct e1000_hw *hw = &adapter->hw;
bc7f75fa 3401 u32 rctl;
bc7f75fa 3402
63eb48f1
DE
3403 if (pm_runtime_suspended(netdev->dev.parent))
3404 return;
3405
bc7f75fa 3406 /* Check for Promiscuous and All Multicast modes */
bc7f75fa
AK
3407 rctl = er32(RCTL);
3408
ef9b965a
JB
3409 /* clear the affected bits */
3410 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3411
bc7f75fa
AK
3412 if (netdev->flags & IFF_PROMISC) {
3413 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
86d70e53
JK
3414 /* Do not hardware filter VLANs in promisc mode */
3415 e1000e_vlan_filter_disable(adapter);
bc7f75fa 3416 } else {
ef9b965a 3417 int count;
3d3a1676 3418
746b9f02
PM
3419 if (netdev->flags & IFF_ALLMULTI) {
3420 rctl |= E1000_RCTL_MPE;
746b9f02 3421 } else {
e921eb1a 3422 /* Write addresses to the MTA, if the attempt fails
ef9b965a
JB
3423 * then we should just turn on promiscuous mode so
3424 * that we can at least receive multicast traffic
3425 */
3426 count = e1000e_write_mc_addr_list(netdev);
3427 if (count < 0)
3428 rctl |= E1000_RCTL_MPE;
746b9f02 3429 }
86d70e53 3430 e1000e_vlan_filter_enable(adapter);
e921eb1a 3431 /* Write addresses to available RAR registers, if there is not
ef9b965a
JB
3432 * sufficient space to store all the addresses then enable
3433 * unicast promiscuous mode
bc7f75fa 3434 */
ef9b965a
JB
3435 count = e1000e_write_uc_addr_list(netdev);
3436 if (count < 0)
3437 rctl |= E1000_RCTL_UPE;
bc7f75fa 3438 }
86d70e53 3439
ef9b965a
JB
3440 ew32(RCTL, rctl);
3441
f646968f 3442 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
86d70e53
JK
3443 e1000e_vlan_strip_enable(adapter);
3444 else
3445 e1000e_vlan_strip_disable(adapter);
bc7f75fa
AK
3446}
3447
70495a50
BA
3448static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3449{
3450 struct e1000_hw *hw = &adapter->hw;
3451 u32 mrqc, rxcsum;
5c8d19da 3452 u32 rss_key[10];
70495a50 3453 int i;
70495a50 3454
5c8d19da 3455 netdev_rss_key_fill(rss_key, sizeof(rss_key));
70495a50 3456 for (i = 0; i < 10; i++)
5c8d19da 3457 ew32(RSSRK(i), rss_key[i]);
70495a50
BA
3458
3459 /* Direct all traffic to queue 0 */
3460 for (i = 0; i < 32; i++)
3461 ew32(RETA(i), 0);
3462
e921eb1a 3463 /* Disable raw packet checksumming so that RSS hash is placed in
70495a50
BA
3464 * descriptor on writeback.
3465 */
3466 rxcsum = er32(RXCSUM);
3467 rxcsum |= E1000_RXCSUM_PCSD;
3468
3469 ew32(RXCSUM, rxcsum);
3470
3471 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
3472 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3473 E1000_MRQC_RSS_FIELD_IPV6 |
3474 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3475 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3476
3477 ew32(MRQC, mrqc);
3478}
3479
b67e1913
BA
3480/**
3481 * e1000e_get_base_timinca - get default SYSTIM time increment attributes
3482 * @adapter: board private structure
3483 * @timinca: pointer to returned time increment attributes
3484 *
3485 * Get attributes for incrementing the System Time Register SYSTIML/H at
3486 * the default base frequency, and set the cyclecounter shift value.
3487 **/
d89777bf 3488s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
b67e1913
BA
3489{
3490 struct e1000_hw *hw = &adapter->hw;
3491 u32 incvalue, incperiod, shift;
3492
3493 /* Make sure clock is enabled on I217 before checking the frequency */
3494 if ((hw->mac.type == e1000_pch_lpt) &&
3495 !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
3496 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
3497 u32 fextnvm7 = er32(FEXTNVM7);
3498
3499 if (!(fextnvm7 & (1 << 0))) {
3500 ew32(FEXTNVM7, fextnvm7 | (1 << 0));
3501 e1e_flush();
3502 }
3503 }
3504
3505 switch (hw->mac.type) {
3506 case e1000_pch2lan:
3507 case e1000_pch_lpt:
3508 /* On I217, the clock frequency is 25MHz or 96MHz as
3509 * indicated by the System Clock Frequency Indication
3510 */
3511 if ((hw->mac.type != e1000_pch_lpt) ||
3512 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
3513 /* Stable 96MHz frequency */
3514 incperiod = INCPERIOD_96MHz;
3515 incvalue = INCVALUE_96MHz;
3516 shift = INCVALUE_SHIFT_96MHz;
3517 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
3518 break;
3519 }
3520 /* fall-through */
3521 case e1000_82574:
3522 case e1000_82583:
3523 /* Stable 25MHz frequency */
3524 incperiod = INCPERIOD_25MHz;
3525 incvalue = INCVALUE_25MHz;
3526 shift = INCVALUE_SHIFT_25MHz;
3527 adapter->cc.shift = shift;
3528 break;
3529 default:
3530 return -EINVAL;
3531 }
3532
3533 *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) |
3534 ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK));
3535
3536 return 0;
3537}
3538
3539/**
3540 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
3541 * @adapter: board private structure
3542 *
3543 * Outgoing time stamping can be enabled and disabled. Play nice and
3544 * disable it when requested, although it shouldn't cause any overhead
3545 * when no packet needs it. At most one packet in the queue may be
3546 * marked for time stamping, otherwise it would be impossible to tell
3547 * for sure to which packet the hardware time stamp belongs.
3548 *
3549 * Incoming time stamping has to be configured via the hardware filters.
3550 * Not all combinations are supported, in particular event type has to be
3551 * specified. Matching the kind of event packet is not supported, with the
3552 * exception of "all V2 events regardless of level 2 or 4".
3553 **/
62d7e3a2
BH
3554static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
3555 struct hwtstamp_config *config)
b67e1913
BA
3556{
3557 struct e1000_hw *hw = &adapter->hw;
b67e1913
BA
3558 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
3559 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
d89777bf
BA
3560 u32 rxmtrl = 0;
3561 u16 rxudp = 0;
3562 bool is_l4 = false;
3563 bool is_l2 = false;
b67e1913
BA
3564 u32 regval;
3565 s32 ret_val;
3566
3567 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
3568 return -EINVAL;
3569
3570 /* flags reserved for future extensions - must be zero */
3571 if (config->flags)
3572 return -EINVAL;
3573
3574 switch (config->tx_type) {
3575 case HWTSTAMP_TX_OFF:
3576 tsync_tx_ctl = 0;
3577 break;
3578 case HWTSTAMP_TX_ON:
3579 break;
3580 default:
3581 return -ERANGE;
3582 }
3583
3584 switch (config->rx_filter) {
3585 case HWTSTAMP_FILTER_NONE:
3586 tsync_rx_ctl = 0;
3587 break;
d89777bf
BA
3588 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3589 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3590 rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE;
3591 is_l4 = true;
3592 break;
3593 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3594 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3595 rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE;
3596 is_l4 = true;
3597 break;
3598 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3599 /* Also time stamps V2 L2 Path Delay Request/Response */
3600 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3601 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3602 is_l2 = true;
3603 break;
3604 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3605 /* Also time stamps V2 L2 Path Delay Request/Response. */
3606 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3607 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3608 is_l2 = true;
3609 break;
3610 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3611 /* Hardware cannot filter just V2 L4 Sync messages;
3612 * fall-through to V2 (both L2 and L4) Sync.
3613 */
3614 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3615 /* Also time stamps V2 Path Delay Request/Response. */
3616 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3617 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3618 is_l2 = true;
3619 is_l4 = true;
3620 break;
3621 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3622 /* Hardware cannot filter just V2 L4 Delay Request messages;
3623 * fall-through to V2 (both L2 and L4) Delay Request.
3624 */
3625 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3626 /* Also time stamps V2 Path Delay Request/Response. */
3627 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3628 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3629 is_l2 = true;
3630 is_l4 = true;
3631 break;
3632 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3633 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3634 /* Hardware cannot filter just V2 L4 or L2 Event messages;
3635 * fall-through to all V2 (both L2 and L4) Events.
3636 */
3637 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3638 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
3639 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
3640 is_l2 = true;
3641 is_l4 = true;
3642 break;
3643 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3644 /* For V1, the hardware can only filter Sync messages or
3645 * Delay Request messages but not both so fall-through to
3646 * time stamp all packets.
3647 */
b67e1913 3648 case HWTSTAMP_FILTER_ALL:
d89777bf
BA
3649 is_l2 = true;
3650 is_l4 = true;
b67e1913
BA
3651 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
3652 config->rx_filter = HWTSTAMP_FILTER_ALL;
3653 break;
3654 default:
3655 return -ERANGE;
3656 }
3657
62d7e3a2
BH
3658 adapter->hwtstamp_config = *config;
3659
b67e1913
BA
3660 /* enable/disable Tx h/w time stamping */
3661 regval = er32(TSYNCTXCTL);
3662 regval &= ~E1000_TSYNCTXCTL_ENABLED;
3663 regval |= tsync_tx_ctl;
3664 ew32(TSYNCTXCTL, regval);
3665 if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
3666 (regval & E1000_TSYNCTXCTL_ENABLED)) {
3667 e_err("Timesync Tx Control register not set as expected\n");
3668 return -EAGAIN;
3669 }
3670
3671 /* enable/disable Rx h/w time stamping */
3672 regval = er32(TSYNCRXCTL);
3673 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
3674 regval |= tsync_rx_ctl;
3675 ew32(TSYNCRXCTL, regval);
3676 if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED |
3677 E1000_TSYNCRXCTL_TYPE_MASK)) !=
3678 (regval & (E1000_TSYNCRXCTL_ENABLED |
3679 E1000_TSYNCRXCTL_TYPE_MASK))) {
3680 e_err("Timesync Rx Control register not set as expected\n");
3681 return -EAGAIN;
3682 }
3683
d89777bf
BA
3684 /* L2: define ethertype filter for time stamped packets */
3685 if (is_l2)
3686 rxmtrl |= ETH_P_1588;
3687
3688 /* define which PTP packets get time stamped */
3689 ew32(RXMTRL, rxmtrl);
3690
3691 /* Filter by destination port */
3692 if (is_l4) {
3693 rxudp = PTP_EV_PORT;
3694 cpu_to_be16s(&rxudp);
3695 }
3696 ew32(RXUDP, rxudp);
3697
3698 e1e_flush();
3699
b67e1913 3700 /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */
70806a7f
BA
3701 er32(RXSTMPH);
3702 er32(TXSTMPH);
b67e1913
BA
3703
3704 /* Get and set the System Time Register SYSTIM base frequency */
3705 ret_val = e1000e_get_base_timinca(adapter, &regval);
3706 if (ret_val)
3707 return ret_val;
3708 ew32(TIMINCA, regval);
3709
3710 /* reset the ns time counter */
3711 timecounter_init(&adapter->tc, &adapter->cc,
3712 ktime_to_ns(ktime_get_real()));
3713
3714 return 0;
3715}
3716
bc7f75fa 3717/**
ad68076e 3718 * e1000_configure - configure the hardware for Rx and Tx
bc7f75fa
AK
3719 * @adapter: private board structure
3720 **/
3721static void e1000_configure(struct e1000_adapter *adapter)
3722{
55aa6985
BA
3723 struct e1000_ring *rx_ring = adapter->rx_ring;
3724
ef9b965a 3725 e1000e_set_rx_mode(adapter->netdev);
bc7f75fa
AK
3726
3727 e1000_restore_vlan(adapter);
cd791618 3728 e1000_init_manageability_pt(adapter);
bc7f75fa
AK
3729
3730 e1000_configure_tx(adapter);
70495a50
BA
3731
3732 if (adapter->netdev->features & NETIF_F_RXHASH)
3733 e1000e_setup_rss_hash(adapter);
bc7f75fa
AK
3734 e1000_setup_rctl(adapter);
3735 e1000_configure_rx(adapter);
55aa6985 3736 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
bc7f75fa
AK
3737}
3738
3739/**
3740 * e1000e_power_up_phy - restore link in case the phy was powered down
3741 * @adapter: address of board private structure
3742 *
3743 * The phy may be powered down to save power and turn off link when the
3744 * driver is unloaded and wake on lan is not enabled (among others)
3745 * *** this routine MUST be followed by a call to e1000e_reset ***
3746 **/
3747void e1000e_power_up_phy(struct e1000_adapter *adapter)
3748{
17f208de
BA
3749 if (adapter->hw.phy.ops.power_up)
3750 adapter->hw.phy.ops.power_up(&adapter->hw);
bc7f75fa
AK
3751
3752 adapter->hw.mac.ops.setup_link(&adapter->hw);
3753}
3754
3755/**
3756 * e1000_power_down_phy - Power down the PHY
3757 *
17f208de
BA
3758 * Power down the PHY so no link is implied when interface is down.
3759 * The PHY cannot be powered down if management or WoL is active.
bc7f75fa
AK
3760 */
3761static void e1000_power_down_phy(struct e1000_adapter *adapter)
3762{
17f208de
BA
3763 if (adapter->hw.phy.ops.power_down)
3764 adapter->hw.phy.ops.power_down(&adapter->hw);
bc7f75fa
AK
3765}
3766
3767/**
3768 * e1000e_reset - bring the hardware into a known good state
3769 *
3770 * This function boots the hardware and enables some settings that
3771 * require a configuration cycle of the hardware - those cannot be
3772 * set/changed during runtime. After reset the device needs to be
ad68076e 3773 * properly configured for Rx, Tx etc.
bc7f75fa
AK
3774 */
3775void e1000e_reset(struct e1000_adapter *adapter)
3776{
3777 struct e1000_mac_info *mac = &adapter->hw.mac;
318a94d6 3778 struct e1000_fc_info *fc = &adapter->hw.fc;
bc7f75fa
AK
3779 struct e1000_hw *hw = &adapter->hw;
3780 u32 tx_space, min_tx_space, min_rx_space;
318a94d6 3781 u32 pba = adapter->pba;
bc7f75fa
AK
3782 u16 hwm;
3783
ad68076e 3784 /* reset Packet Buffer Allocation to default */
318a94d6 3785 ew32(PBA, pba);
df762464 3786
318a94d6 3787 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
e921eb1a 3788 /* To maintain wire speed transmits, the Tx FIFO should be
bc7f75fa
AK
3789 * large enough to accommodate two full transmit packets,
3790 * rounded up to the next 1KB and expressed in KB. Likewise,
3791 * the Rx FIFO should be large enough to accommodate at least
3792 * one full receive packet and is similarly rounded up and
ad68076e
BA
3793 * expressed in KB.
3794 */
df762464 3795 pba = er32(PBA);
bc7f75fa 3796 /* upper 16 bits has Tx packet buffer allocation size in KB */
df762464 3797 tx_space = pba >> 16;
bc7f75fa 3798 /* lower 16 bits has Rx packet buffer allocation size in KB */
df762464 3799 pba &= 0xffff;
e921eb1a 3800 /* the Tx fifo also stores 16 bytes of information about the Tx
ad68076e 3801 * but don't include ethernet FCS because hardware appends it
318a94d6
JK
3802 */
3803 min_tx_space = (adapter->max_frame_size +
e5fe2541 3804 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
bc7f75fa
AK
3805 min_tx_space = ALIGN(min_tx_space, 1024);
3806 min_tx_space >>= 10;
3807 /* software strips receive CRC, so leave room for it */
318a94d6 3808 min_rx_space = adapter->max_frame_size;
bc7f75fa
AK
3809 min_rx_space = ALIGN(min_rx_space, 1024);
3810 min_rx_space >>= 10;
3811
e921eb1a 3812 /* If current Tx allocation is less than the min Tx FIFO size,
bc7f75fa 3813 * and the min Tx FIFO size is less than the current Rx FIFO
ad68076e
BA
3814 * allocation, take space away from current Rx allocation
3815 */
df762464
AK
3816 if ((tx_space < min_tx_space) &&
3817 ((min_tx_space - tx_space) < pba)) {
3818 pba -= min_tx_space - tx_space;
bc7f75fa 3819
e921eb1a 3820 /* if short on Rx space, Rx wins and must trump Tx
419e551c 3821 * adjustment
ad68076e 3822 */
79d4e908 3823 if (pba < min_rx_space)
df762464 3824 pba = min_rx_space;
bc7f75fa 3825 }
df762464
AK
3826
3827 ew32(PBA, pba);
bc7f75fa
AK
3828 }
3829
e921eb1a 3830 /* flow control settings
ad68076e 3831 *
38eb394e 3832 * The high water mark must be low enough to fit one full frame
bc7f75fa
AK
3833 * (or the size used for early receive) above it in the Rx FIFO.
3834 * Set it to the lower of:
3835 * - 90% of the Rx FIFO size, and
38eb394e 3836 * - the full Rx FIFO size minus one full frame
ad68076e 3837 */
d3738bb8
BA
3838 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3839 fc->pause_time = 0xFFFF;
3840 else
3841 fc->pause_time = E1000_FC_PAUSE_TIME;
b20caa80 3842 fc->send_xon = true;
d3738bb8
BA
3843 fc->current_mode = fc->requested_mode;
3844
3845 switch (hw->mac.type) {
79d4e908
BA
3846 case e1000_ich9lan:
3847 case e1000_ich10lan:
3848 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3849 pba = 14;
3850 ew32(PBA, pba);
3851 fc->high_water = 0x2800;
3852 fc->low_water = fc->high_water - 8;
3853 break;
3854 }
3855 /* fall-through */
d3738bb8 3856 default:
79d4e908
BA
3857 hwm = min(((pba << 10) * 9 / 10),
3858 ((pba << 10) - adapter->max_frame_size));
d3738bb8 3859
e80bd1d1 3860 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
d3738bb8
BA
3861 fc->low_water = fc->high_water - 8;
3862 break;
3863 case e1000_pchlan:
e921eb1a 3864 /* Workaround PCH LOM adapter hangs with certain network
38eb394e
BA
3865 * loads. If hangs persist, try disabling Tx flow control.
3866 */
3867 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3868 fc->high_water = 0x3500;
e80bd1d1 3869 fc->low_water = 0x1500;
38eb394e
BA
3870 } else {
3871 fc->high_water = 0x5000;
e80bd1d1 3872 fc->low_water = 0x3000;
38eb394e 3873 }
a305595b 3874 fc->refresh_time = 0x1000;
d3738bb8
BA
3875 break;
3876 case e1000_pch2lan:
2fbe4526 3877 case e1000_pch_lpt:
d3738bb8 3878 fc->refresh_time = 0x0400;
347b5201
BA
3879
3880 if (adapter->netdev->mtu <= ETH_DATA_LEN) {
3881 fc->high_water = 0x05C20;
3882 fc->low_water = 0x05048;
3883 fc->pause_time = 0x0650;
3884 break;
828bac87 3885 }
347b5201 3886
ce345e08
BA
3887 pba = 14;
3888 ew32(PBA, pba);
347b5201
BA
3889 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
3890 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
d3738bb8 3891 break;
38eb394e 3892 }
bc7f75fa 3893
e921eb1a 3894 /* Alignment of Tx data is on an arbitrary byte boundary with the
d821a4c4
BA
3895 * maximum size per Tx descriptor limited only to the transmit
3896 * allocation of the packet buffer minus 96 bytes with an upper
3897 * limit of 24KB due to receive synchronization limitations.
3898 */
3899 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3900 24 << 10);
3901
e921eb1a 3902 /* Disable Adaptive Interrupt Moderation if 2 full packets cannot
79d4e908 3903 * fit in receive buffer.
828bac87
BA
3904 */
3905 if (adapter->itr_setting & 0x3) {
79d4e908 3906 if ((adapter->max_frame_size * 2) > (pba << 10)) {
828bac87
BA
3907 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3908 dev_info(&adapter->pdev->dev,
17e813ec 3909 "Interrupt Throttle Rate off\n");
828bac87 3910 adapter->flags2 |= FLAG2_DISABLE_AIM;
22a4cca2 3911 e1000e_write_itr(adapter, 0);
828bac87
BA
3912 }
3913 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3914 dev_info(&adapter->pdev->dev,
17e813ec 3915 "Interrupt Throttle Rate on\n");
828bac87
BA
3916 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3917 adapter->itr = 20000;
22a4cca2 3918 e1000e_write_itr(adapter, adapter->itr);
828bac87
BA
3919 }
3920 }
3921
bc7f75fa
AK
3922 /* Allow time for pending master requests to run */
3923 mac->ops.reset_hw(hw);
97ac8cae 3924
e921eb1a 3925 /* For parts with AMT enabled, let the firmware know
97ac8cae
BA
3926 * that the network interface is in control
3927 */
c43bc57e 3928 if (adapter->flags & FLAG_HAS_AMT)
31dbe5b4 3929 e1000e_get_hw_control(adapter);
97ac8cae 3930
bc7f75fa
AK
3931 ew32(WUC, 0);
3932
3933 if (mac->ops.init_hw(hw))
44defeb3 3934 e_err("Hardware Error\n");
bc7f75fa
AK
3935
3936 e1000_update_mng_vlan(adapter);
3937
3938 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3939 ew32(VET, ETH_P_8021Q);
3940
3941 e1000e_reset_adaptive(hw);
31dbe5b4 3942
b67e1913 3943 /* initialize systim and reset the ns time counter */
62d7e3a2 3944 e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
b67e1913 3945
d495bcb8
BA
3946 /* Set EEE advertisement as appropriate */
3947 if (adapter->flags2 & FLAG2_HAS_EEE) {
3948 s32 ret_val;
3949 u16 adv_addr;
3950
3951 switch (hw->phy.type) {
3952 case e1000_phy_82579:
3953 adv_addr = I82579_EEE_ADVERTISEMENT;
3954 break;
3955 case e1000_phy_i217:
3956 adv_addr = I217_EEE_ADVERTISEMENT;
3957 break;
3958 default:
3959 dev_err(&adapter->pdev->dev,
3960 "Invalid PHY type setting EEE advertisement\n");
3961 return;
3962 }
3963
3964 ret_val = hw->phy.ops.acquire(hw);
3965 if (ret_val) {
3966 dev_err(&adapter->pdev->dev,
3967 "EEE advertisement - unable to acquire PHY\n");
3968 return;
3969 }
3970
3971 e1000_write_emi_reg_locked(hw, adv_addr,
3972 hw->dev_spec.ich8lan.eee_disable ?
3973 0 : adapter->eee_advert);
3974
3975 hw->phy.ops.release(hw);
3976 }
3977
31dbe5b4 3978 if (!netif_running(adapter->netdev) &&
28002099 3979 !test_bit(__E1000_TESTING, &adapter->state))
31dbe5b4 3980 e1000_power_down_phy(adapter);
31dbe5b4 3981
bc7f75fa
AK
3982 e1000_get_phy_info(hw);
3983
918d7197
BA
3984 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3985 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
bc7f75fa 3986 u16 phy_data = 0;
e921eb1a 3987 /* speed up time to link by disabling smart power down, ignore
bc7f75fa 3988 * the return value of this function because there is nothing
ad68076e
BA
3989 * different we would do if it failed
3990 */
bc7f75fa
AK
3991 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3992 phy_data &= ~IGP02E1000_PM_SPD;
3993 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3994 }
bc7f75fa
AK
3995}
3996
3997int e1000e_up(struct e1000_adapter *adapter)
3998{
3999 struct e1000_hw *hw = &adapter->hw;
4000
4001 /* hardware has been reset, we need to reload some things */
4002 e1000_configure(adapter);
4003
4004 clear_bit(__E1000_DOWN, &adapter->state);
4005
4662e82b
BA
4006 if (adapter->msix_entries)
4007 e1000_configure_msix(adapter);
bc7f75fa
AK
4008 e1000_irq_enable(adapter);
4009
400484fa 4010 netif_start_queue(adapter->netdev);
4cb9be7a 4011
bc7f75fa 4012 /* fire a link change interrupt to start the watchdog */
52a9b231
BA
4013 if (adapter->msix_entries)
4014 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
4015 else
4016 ew32(ICS, E1000_ICS_LSC);
4017
bc7f75fa
AK
4018 return 0;
4019}
4020
713b3c9e
JB
4021static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
4022{
4023 struct e1000_hw *hw = &adapter->hw;
4024
4025 if (!(adapter->flags2 & FLAG2_DMA_BURST))
4026 return;
4027
4028 /* flush pending descriptor writebacks to memory */
4029 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4030 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
4031
4032 /* execute the writes immediately */
4033 e1e_flush();
bf03085f 4034
e921eb1a 4035 /* due to rare timing issues, write to TIDV/RDTR again to ensure the
bf03085f
MV
4036 * write is successful
4037 */
4038 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4039 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
713b3c9e
JB
4040
4041 /* execute the writes immediately */
4042 e1e_flush();
4043}
4044
67fd4fcb
JK
4045static void e1000e_update_stats(struct e1000_adapter *adapter);
4046
28002099
DE
4047/**
4048 * e1000e_down - quiesce the device and optionally reset the hardware
4049 * @adapter: board private structure
4050 * @reset: boolean flag to reset the hardware or not
4051 */
4052void e1000e_down(struct e1000_adapter *adapter, bool reset)
bc7f75fa
AK
4053{
4054 struct net_device *netdev = adapter->netdev;
4055 struct e1000_hw *hw = &adapter->hw;
4056 u32 tctl, rctl;
4057
e921eb1a 4058 /* signal that we're down so the interrupt handler does not
ad68076e
BA
4059 * reschedule our watchdog timer
4060 */
bc7f75fa
AK
4061 set_bit(__E1000_DOWN, &adapter->state);
4062
4063 /* disable receives in the hardware */
4064 rctl = er32(RCTL);
7f99ae63
BA
4065 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
4066 ew32(RCTL, rctl & ~E1000_RCTL_EN);
bc7f75fa
AK
4067 /* flush and sleep below */
4068
4cb9be7a 4069 netif_stop_queue(netdev);
bc7f75fa
AK
4070
4071 /* disable transmits in the hardware */
4072 tctl = er32(TCTL);
4073 tctl &= ~E1000_TCTL_EN;
4074 ew32(TCTL, tctl);
7f99ae63 4075
bc7f75fa
AK
4076 /* flush both disables and wait for them to finish */
4077 e1e_flush();
1bba4386 4078 usleep_range(10000, 20000);
bc7f75fa 4079
bc7f75fa
AK
4080 e1000_irq_disable(adapter);
4081
a3b87a4c
BA
4082 napi_synchronize(&adapter->napi);
4083
bc7f75fa
AK
4084 del_timer_sync(&adapter->watchdog_timer);
4085 del_timer_sync(&adapter->phy_info_timer);
4086
bc7f75fa 4087 netif_carrier_off(netdev);
67fd4fcb
JK
4088
4089 spin_lock(&adapter->stats64_lock);
4090 e1000e_update_stats(adapter);
4091 spin_unlock(&adapter->stats64_lock);
4092
400484fa 4093 e1000e_flush_descriptors(adapter);
55aa6985
BA
4094 e1000_clean_tx_ring(adapter->tx_ring);
4095 e1000_clean_rx_ring(adapter->rx_ring);
400484fa 4096
bc7f75fa
AK
4097 adapter->link_speed = 0;
4098 adapter->link_duplex = 0;
4099
da1e2046
BA
4100 /* Disable Si errata workaround on PCHx for jumbo frame flow */
4101 if ((hw->mac.type >= e1000_pch2lan) &&
4102 (adapter->netdev->mtu > ETH_DATA_LEN) &&
4103 e1000_lv_jumbo_workaround_ich8lan(hw, false))
4104 e_dbg("failed to disable jumbo frame workaround mode\n");
4105
28002099 4106 if (reset && !pci_channel_offline(adapter->pdev))
52cc3086 4107 e1000e_reset(adapter);
bc7f75fa
AK
4108}
4109
4110void e1000e_reinit_locked(struct e1000_adapter *adapter)
4111{
4112 might_sleep();
4113 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
1bba4386 4114 usleep_range(1000, 2000);
28002099 4115 e1000e_down(adapter, true);
bc7f75fa
AK
4116 e1000e_up(adapter);
4117 clear_bit(__E1000_RESETTING, &adapter->state);
4118}
4119
b67e1913
BA
4120/**
4121 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4122 * @cc: cyclecounter structure
4123 **/
4124static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4125{
4126 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
4127 cc);
4128 struct e1000_hw *hw = &adapter->hw;
5e7ff970 4129 cycle_t systim, systim_next;
b67e1913
BA
4130
4131 /* latch SYSTIMH on read of SYSTIML */
4132 systim = (cycle_t)er32(SYSTIML);
4133 systim |= (cycle_t)er32(SYSTIMH) << 32;
4134
5e7ff970
TF
4135 if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
4136 u64 incvalue, time_delta, rem, temp;
4137 int i;
4138
4139 /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
4140 * check to see that the time is incrementing at a reasonable
4141 * rate and is a multiple of incvalue
4142 */
4143 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4144 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4145 /* latch SYSTIMH on read of SYSTIML */
4146 systim_next = (cycle_t)er32(SYSTIML);
4147 systim_next |= (cycle_t)er32(SYSTIMH) << 32;
4148
4149 time_delta = systim_next - systim;
4150 temp = time_delta;
4151 rem = do_div(temp, incvalue);
4152
4153 systim = systim_next;
4154
4155 if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
4156 (rem == 0))
4157 break;
4158 }
4159 }
b67e1913
BA
4160 return systim;
4161}
4162
bc7f75fa
AK
4163/**
4164 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
4165 * @adapter: board private structure to initialize
4166 *
4167 * e1000_sw_init initializes the Adapter private data structure.
4168 * Fields are initialized based on PCI device information and
4169 * OS network device settings (MTU size).
4170 **/
9f9a12f8 4171static int e1000_sw_init(struct e1000_adapter *adapter)
bc7f75fa 4172{
bc7f75fa
AK
4173 struct net_device *netdev = adapter->netdev;
4174
4175 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
4176 adapter->rx_ps_bsize0 = 128;
318a94d6
JK
4177 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4178 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
55aa6985
BA
4179 adapter->tx_ring_count = E1000_DEFAULT_TXD;
4180 adapter->rx_ring_count = E1000_DEFAULT_RXD;
bc7f75fa 4181
67fd4fcb
JK
4182 spin_lock_init(&adapter->stats64_lock);
4183
4662e82b 4184 e1000e_set_interrupt_capability(adapter);
bc7f75fa 4185
4662e82b
BA
4186 if (e1000_alloc_queues(adapter))
4187 return -ENOMEM;
bc7f75fa 4188
b67e1913
BA
4189 /* Setup hardware time stamping cyclecounter */
4190 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
4191 adapter->cc.read = e1000e_cyclecounter_read;
4192 adapter->cc.mask = CLOCKSOURCE_MASK(64);
4193 adapter->cc.mult = 1;
4194 /* cc.shift set in e1000e_get_base_tininca() */
4195
4196 spin_lock_init(&adapter->systim_lock);
4197 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work);
4198 }
4199
bc7f75fa 4200 /* Explicitly disable IRQ since the NIC can be in any state. */
bc7f75fa
AK
4201 e1000_irq_disable(adapter);
4202
bc7f75fa
AK
4203 set_bit(__E1000_DOWN, &adapter->state);
4204 return 0;
bc7f75fa
AK
4205}
4206
f8d59f78
BA
4207/**
4208 * e1000_intr_msi_test - Interrupt Handler
4209 * @irq: interrupt number
4210 * @data: pointer to a network interface device structure
4211 **/
8bb62869 4212static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data)
f8d59f78
BA
4213{
4214 struct net_device *netdev = data;
4215 struct e1000_adapter *adapter = netdev_priv(netdev);
4216 struct e1000_hw *hw = &adapter->hw;
4217 u32 icr = er32(ICR);
4218
3bb99fe2 4219 e_dbg("icr is %08X\n", icr);
f8d59f78
BA
4220 if (icr & E1000_ICR_RXSEQ) {
4221 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
e921eb1a 4222 /* Force memory writes to complete before acknowledging the
bc76329d
BA
4223 * interrupt is handled.
4224 */
f8d59f78
BA
4225 wmb();
4226 }
4227
4228 return IRQ_HANDLED;
4229}
4230
4231/**
4232 * e1000_test_msi_interrupt - Returns 0 for successful test
4233 * @adapter: board private struct
4234 *
4235 * code flow taken from tg3.c
4236 **/
4237static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
4238{
4239 struct net_device *netdev = adapter->netdev;
4240 struct e1000_hw *hw = &adapter->hw;
4241 int err;
4242
4243 /* poll_enable hasn't been called yet, so don't need disable */
4244 /* clear any pending events */
4245 er32(ICR);
4246
4247 /* free the real vector and request a test handler */
4248 e1000_free_irq(adapter);
4662e82b 4249 e1000e_reset_interrupt_capability(adapter);
f8d59f78
BA
4250
4251 /* Assume that the test fails, if it succeeds then the test
e921eb1a
BA
4252 * MSI irq handler will unset this flag
4253 */
f8d59f78
BA
4254 adapter->flags |= FLAG_MSI_TEST_FAILED;
4255
4256 err = pci_enable_msi(adapter->pdev);
4257 if (err)
4258 goto msi_test_failed;
4259
a0607fd3 4260 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
f8d59f78
BA
4261 netdev->name, netdev);
4262 if (err) {
4263 pci_disable_msi(adapter->pdev);
4264 goto msi_test_failed;
4265 }
4266
e921eb1a 4267 /* Force memory writes to complete before enabling and firing an
bc76329d
BA
4268 * interrupt.
4269 */
f8d59f78
BA
4270 wmb();
4271
4272 e1000_irq_enable(adapter);
4273
4274 /* fire an unusual interrupt on the test handler */
4275 ew32(ICS, E1000_ICS_RXSEQ);
4276 e1e_flush();
569a3aff 4277 msleep(100);
f8d59f78
BA
4278
4279 e1000_irq_disable(adapter);
4280
bc76329d 4281 rmb(); /* read flags after interrupt has been fired */
f8d59f78
BA
4282
4283 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
4662e82b 4284 adapter->int_mode = E1000E_INT_MODE_LEGACY;
068e8a30 4285 e_info("MSI interrupt test failed, using legacy interrupt.\n");
24b706b2 4286 } else {
068e8a30 4287 e_dbg("MSI interrupt test succeeded!\n");
24b706b2 4288 }
f8d59f78
BA
4289
4290 free_irq(adapter->pdev->irq, netdev);
4291 pci_disable_msi(adapter->pdev);
4292
f8d59f78 4293msi_test_failed:
4662e82b 4294 e1000e_set_interrupt_capability(adapter);
068e8a30 4295 return e1000_request_irq(adapter);
f8d59f78
BA
4296}
4297
4298/**
4299 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
4300 * @adapter: board private struct
4301 *
4302 * code flow taken from tg3.c, called with e1000 interrupts disabled.
4303 **/
4304static int e1000_test_msi(struct e1000_adapter *adapter)
4305{
4306 int err;
4307 u16 pci_cmd;
4308
4309 if (!(adapter->flags & FLAG_MSI_ENABLED))
4310 return 0;
4311
4312 /* disable SERR in case the MSI write causes a master abort */
4313 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
36f2407f
DN
4314 if (pci_cmd & PCI_COMMAND_SERR)
4315 pci_write_config_word(adapter->pdev, PCI_COMMAND,
4316 pci_cmd & ~PCI_COMMAND_SERR);
f8d59f78
BA
4317
4318 err = e1000_test_msi_interrupt(adapter);
4319
36f2407f
DN
4320 /* re-enable SERR */
4321 if (pci_cmd & PCI_COMMAND_SERR) {
4322 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4323 pci_cmd |= PCI_COMMAND_SERR;
4324 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
4325 }
f8d59f78 4326
f8d59f78
BA
4327 return err;
4328}
4329
bc7f75fa
AK
4330/**
4331 * e1000_open - Called when a network interface is made active
4332 * @netdev: network interface device structure
4333 *
4334 * Returns 0 on success, negative value on failure
4335 *
4336 * The open entry point is called when a network interface is made
4337 * active by the system (IFF_UP). At this point all resources needed
4338 * for transmit and receive operations are allocated, the interrupt
4339 * handler is registered with the OS, the watchdog timer is started,
4340 * and the stack is notified that the interface is ready.
4341 **/
4342static int e1000_open(struct net_device *netdev)
4343{
4344 struct e1000_adapter *adapter = netdev_priv(netdev);
4345 struct e1000_hw *hw = &adapter->hw;
23606cf5 4346 struct pci_dev *pdev = adapter->pdev;
bc7f75fa
AK
4347 int err;
4348
4349 /* disallow open during test */
4350 if (test_bit(__E1000_TESTING, &adapter->state))
4351 return -EBUSY;
4352
23606cf5
RW
4353 pm_runtime_get_sync(&pdev->dev);
4354
9c563d20
JB
4355 netif_carrier_off(netdev);
4356
bc7f75fa 4357 /* allocate transmit descriptors */
55aa6985 4358 err = e1000e_setup_tx_resources(adapter->tx_ring);
bc7f75fa
AK
4359 if (err)
4360 goto err_setup_tx;
4361
4362 /* allocate receive descriptors */
55aa6985 4363 err = e1000e_setup_rx_resources(adapter->rx_ring);
bc7f75fa
AK
4364 if (err)
4365 goto err_setup_rx;
4366
e921eb1a 4367 /* If AMT is enabled, let the firmware know that the network
11b08be8
BA
4368 * interface is now open and reset the part to a known state.
4369 */
4370 if (adapter->flags & FLAG_HAS_AMT) {
31dbe5b4 4371 e1000e_get_hw_control(adapter);
11b08be8
BA
4372 e1000e_reset(adapter);
4373 }
4374
bc7f75fa
AK
4375 e1000e_power_up_phy(adapter);
4376
4377 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
e5fe2541 4378 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
bc7f75fa
AK
4379 e1000_update_mng_vlan(adapter);
4380
79d4e908 4381 /* DMA latency requirement to workaround jumbo issue */
3e35d991
BA
4382 pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
4383 PM_QOS_DEFAULT_VALUE);
c128ec29 4384
e921eb1a 4385 /* before we allocate an interrupt, we must be ready to handle it.
bc7f75fa
AK
4386 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
4387 * as soon as we call pci_request_irq, so we have to setup our
ad68076e
BA
4388 * clean_rx handler before we do so.
4389 */
bc7f75fa
AK
4390 e1000_configure(adapter);
4391
4392 err = e1000_request_irq(adapter);
4393 if (err)
4394 goto err_req_irq;
4395
e921eb1a 4396 /* Work around PCIe errata with MSI interrupts causing some chipsets to
f8d59f78
BA
4397 * ignore e1000e MSI messages, which means we need to test our MSI
4398 * interrupt now
4399 */
4662e82b 4400 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
f8d59f78
BA
4401 err = e1000_test_msi(adapter);
4402 if (err) {
4403 e_err("Interrupt allocation failed\n");
4404 goto err_req_irq;
4405 }
4406 }
4407
bc7f75fa
AK
4408 /* From here on the code is the same as e1000e_up() */
4409 clear_bit(__E1000_DOWN, &adapter->state);
4410
4411 napi_enable(&adapter->napi);
4412
4413 e1000_irq_enable(adapter);
4414
09357b00 4415 adapter->tx_hang_recheck = false;
4cb9be7a 4416 netif_start_queue(netdev);
d55b53ff 4417
66148bab 4418 hw->mac.get_link_status = true;
23606cf5
RW
4419 pm_runtime_put(&pdev->dev);
4420
bc7f75fa 4421 /* fire a link status change interrupt to start the watchdog */
52a9b231
BA
4422 if (adapter->msix_entries)
4423 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
4424 else
4425 ew32(ICS, E1000_ICS_LSC);
bc7f75fa
AK
4426
4427 return 0;
4428
4429err_req_irq:
31dbe5b4 4430 e1000e_release_hw_control(adapter);
bc7f75fa 4431 e1000_power_down_phy(adapter);
55aa6985 4432 e1000e_free_rx_resources(adapter->rx_ring);
bc7f75fa 4433err_setup_rx:
55aa6985 4434 e1000e_free_tx_resources(adapter->tx_ring);
bc7f75fa
AK
4435err_setup_tx:
4436 e1000e_reset(adapter);
23606cf5 4437 pm_runtime_put_sync(&pdev->dev);
bc7f75fa
AK
4438
4439 return err;
4440}
4441
4442/**
4443 * e1000_close - Disables a network interface
4444 * @netdev: network interface device structure
4445 *
4446 * Returns 0, this is not allowed to fail
4447 *
4448 * The close entry point is called when an interface is de-activated
4449 * by the OS. The hardware is still under the drivers control, but
4450 * needs to be disabled. A global MAC reset is issued to stop the
4451 * hardware, and all transmit and receive resources are freed.
4452 **/
4453static int e1000_close(struct net_device *netdev)
4454{
4455 struct e1000_adapter *adapter = netdev_priv(netdev);
23606cf5 4456 struct pci_dev *pdev = adapter->pdev;
bb9e44d0
BA
4457 int count = E1000_CHECK_RESET_COUNT;
4458
4459 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
4460 usleep_range(10000, 20000);
bc7f75fa
AK
4461
4462 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
23606cf5
RW
4463
4464 pm_runtime_get_sync(&pdev->dev);
4465
4466 if (!test_bit(__E1000_DOWN, &adapter->state)) {
28002099 4467 e1000e_down(adapter, true);
23606cf5 4468 e1000_free_irq(adapter);
63eb48f1
DE
4469
4470 /* Link status message must follow this format */
4471 pr_info("%s NIC Link is Down\n", adapter->netdev->name);
23606cf5 4472 }
a3b87a4c
BA
4473
4474 napi_disable(&adapter->napi);
4475
55aa6985
BA
4476 e1000e_free_tx_resources(adapter->tx_ring);
4477 e1000e_free_rx_resources(adapter->rx_ring);
bc7f75fa 4478
e921eb1a 4479 /* kill manageability vlan ID if supported, but not if a vlan with
ad68076e
BA
4480 * the same ID is registered on the host OS (let 8021q kill it)
4481 */
e5fe2541 4482 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
80d5c368
PM
4483 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
4484 adapter->mng_vlan_id);
bc7f75fa 4485
e921eb1a 4486 /* If AMT is enabled, let the firmware know that the network
ad68076e
BA
4487 * interface is now closed
4488 */
31dbe5b4
BA
4489 if ((adapter->flags & FLAG_HAS_AMT) &&
4490 !test_bit(__E1000_TESTING, &adapter->state))
4491 e1000e_release_hw_control(adapter);
bc7f75fa 4492
3e35d991 4493 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
c128ec29 4494
23606cf5
RW
4495 pm_runtime_put_sync(&pdev->dev);
4496
bc7f75fa
AK
4497 return 0;
4498}
fc830b78 4499
bc7f75fa
AK
4500/**
4501 * e1000_set_mac - Change the Ethernet Address of the NIC
4502 * @netdev: network interface device structure
4503 * @p: pointer to an address structure
4504 *
4505 * Returns 0 on success, negative on failure
4506 **/
4507static int e1000_set_mac(struct net_device *netdev, void *p)
4508{
4509 struct e1000_adapter *adapter = netdev_priv(netdev);
69e1e019 4510 struct e1000_hw *hw = &adapter->hw;
bc7f75fa
AK
4511 struct sockaddr *addr = p;
4512
4513 if (!is_valid_ether_addr(addr->sa_data))
4514 return -EADDRNOTAVAIL;
4515
4516 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4517 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4518
69e1e019 4519 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
bc7f75fa
AK
4520
4521 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4522 /* activate the work around */
4523 e1000e_set_laa_state_82571(&adapter->hw, 1);
4524
e921eb1a 4525 /* Hold a copy of the LAA in RAR[14] This is done so that
bc7f75fa
AK
4526 * between the time RAR[0] gets clobbered and the time it
4527 * gets fixed (in e1000_watchdog), the actual LAA is in one
4528 * of the RARs and no incoming packets directed to this port
4529 * are dropped. Eventually the LAA will be in RAR[0] and
ad68076e
BA
4530 * RAR[14]
4531 */
69e1e019
BA
4532 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4533 adapter->hw.mac.rar_entry_count - 1);
bc7f75fa
AK
4534 }
4535
4536 return 0;
4537}
4538
a8f88ff5
JB
4539/**
4540 * e1000e_update_phy_task - work thread to update phy
4541 * @work: pointer to our work struct
4542 *
4543 * this worker thread exists because we must acquire a
4544 * semaphore to read the phy, which we could msleep while
4545 * waiting for it, and we can't msleep in a timer.
4546 **/
4547static void e1000e_update_phy_task(struct work_struct *work)
4548{
4549 struct e1000_adapter *adapter = container_of(work,
17e813ec
BA
4550 struct e1000_adapter,
4551 update_phy_task);
a03206ed 4552 struct e1000_hw *hw = &adapter->hw;
615b32af
JB
4553
4554 if (test_bit(__E1000_DOWN, &adapter->state))
4555 return;
4556
a03206ed
DE
4557 e1000_get_phy_info(hw);
4558
4559 /* Enable EEE on 82579 after link up */
50844bb7 4560 if (hw->phy.type >= e1000_phy_82579)
a03206ed 4561 e1000_set_eee_pchlan(hw);
a8f88ff5
JB
4562}
4563
e921eb1a
BA
4564/**
4565 * e1000_update_phy_info - timre call-back to update PHY info
4566 * @data: pointer to adapter cast into an unsigned long
4567 *
ad68076e
BA
4568 * Need to wait a few seconds after link up to get diagnostic information from
4569 * the phy
e921eb1a 4570 **/
bc7f75fa
AK
4571static void e1000_update_phy_info(unsigned long data)
4572{
53aa82da 4573 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
615b32af
JB
4574
4575 if (test_bit(__E1000_DOWN, &adapter->state))
4576 return;
4577
a8f88ff5 4578 schedule_work(&adapter->update_phy_task);
bc7f75fa
AK
4579}
4580
8c7bbb92
BA
4581/**
4582 * e1000e_update_phy_stats - Update the PHY statistics counters
4583 * @adapter: board private structure
2b6b168d
BA
4584 *
4585 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
8c7bbb92
BA
4586 **/
4587static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4588{
4589 struct e1000_hw *hw = &adapter->hw;
4590 s32 ret_val;
4591 u16 phy_data;
4592
4593 ret_val = hw->phy.ops.acquire(hw);
4594 if (ret_val)
4595 return;
4596
e921eb1a 4597 /* A page set is expensive so check if already on desired page.
8c7bbb92
BA
4598 * If not, set to the page with the PHY status registers.
4599 */
2b6b168d 4600 hw->phy.addr = 1;
8c7bbb92
BA
4601 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4602 &phy_data);
4603 if (ret_val)
4604 goto release;
2b6b168d
BA
4605 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
4606 ret_val = hw->phy.ops.set_page(hw,
4607 HV_STATS_PAGE << IGP_PAGE_SHIFT);
8c7bbb92
BA
4608 if (ret_val)
4609 goto release;
4610 }
4611
8c7bbb92 4612 /* Single Collision Count */
2b6b168d
BA
4613 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4614 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
8c7bbb92
BA
4615 if (!ret_val)
4616 adapter->stats.scc += phy_data;
4617
4618 /* Excessive Collision Count */
2b6b168d
BA
4619 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4620 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
8c7bbb92
BA
4621 if (!ret_val)
4622 adapter->stats.ecol += phy_data;
4623
4624 /* Multiple Collision Count */
2b6b168d
BA
4625 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4626 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
8c7bbb92
BA
4627 if (!ret_val)
4628 adapter->stats.mcc += phy_data;
4629
4630 /* Late Collision Count */
2b6b168d
BA
4631 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4632 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
8c7bbb92
BA
4633 if (!ret_val)
4634 adapter->stats.latecol += phy_data;
4635
4636 /* Collision Count - also used for adaptive IFS */
2b6b168d
BA
4637 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4638 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
8c7bbb92
BA
4639 if (!ret_val)
4640 hw->mac.collision_delta = phy_data;
4641
4642 /* Defer Count */
2b6b168d
BA
4643 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4644 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
8c7bbb92
BA
4645 if (!ret_val)
4646 adapter->stats.dc += phy_data;
4647
4648 /* Transmit with no CRS */
2b6b168d
BA
4649 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4650 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
8c7bbb92
BA
4651 if (!ret_val)
4652 adapter->stats.tncrs += phy_data;
4653
4654release:
4655 hw->phy.ops.release(hw);
4656}
4657
bc7f75fa
AK
4658/**
4659 * e1000e_update_stats - Update the board statistics counters
4660 * @adapter: board private structure
4661 **/
67fd4fcb 4662static void e1000e_update_stats(struct e1000_adapter *adapter)
bc7f75fa 4663{
7274c20f 4664 struct net_device *netdev = adapter->netdev;
bc7f75fa
AK
4665 struct e1000_hw *hw = &adapter->hw;
4666 struct pci_dev *pdev = adapter->pdev;
bc7f75fa 4667
e921eb1a 4668 /* Prevent stats update while adapter is being reset, or if the pci
bc7f75fa
AK
4669 * connection is down.
4670 */
4671 if (adapter->link_speed == 0)
4672 return;
4673 if (pci_channel_offline(pdev))
4674 return;
4675
bc7f75fa
AK
4676 adapter->stats.crcerrs += er32(CRCERRS);
4677 adapter->stats.gprc += er32(GPRC);
7c25769f 4678 adapter->stats.gorc += er32(GORCL);
e80bd1d1 4679 er32(GORCH); /* Clear gorc */
bc7f75fa
AK
4680 adapter->stats.bprc += er32(BPRC);
4681 adapter->stats.mprc += er32(MPRC);
4682 adapter->stats.roc += er32(ROC);
4683
bc7f75fa 4684 adapter->stats.mpc += er32(MPC);
8c7bbb92
BA
4685
4686 /* Half-duplex statistics */
4687 if (adapter->link_duplex == HALF_DUPLEX) {
4688 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4689 e1000e_update_phy_stats(adapter);
4690 } else {
4691 adapter->stats.scc += er32(SCC);
4692 adapter->stats.ecol += er32(ECOL);
4693 adapter->stats.mcc += er32(MCC);
4694 adapter->stats.latecol += er32(LATECOL);
4695 adapter->stats.dc += er32(DC);
4696
4697 hw->mac.collision_delta = er32(COLC);
4698
4699 if ((hw->mac.type != e1000_82574) &&
4700 (hw->mac.type != e1000_82583))
4701 adapter->stats.tncrs += er32(TNCRS);
4702 }
4703 adapter->stats.colc += hw->mac.collision_delta;
a4f58f54 4704 }
8c7bbb92 4705
bc7f75fa
AK
4706 adapter->stats.xonrxc += er32(XONRXC);
4707 adapter->stats.xontxc += er32(XONTXC);
4708 adapter->stats.xoffrxc += er32(XOFFRXC);
4709 adapter->stats.xofftxc += er32(XOFFTXC);
bc7f75fa 4710 adapter->stats.gptc += er32(GPTC);
7c25769f 4711 adapter->stats.gotc += er32(GOTCL);
e80bd1d1 4712 er32(GOTCH); /* Clear gotc */
bc7f75fa
AK
4713 adapter->stats.rnbc += er32(RNBC);
4714 adapter->stats.ruc += er32(RUC);
bc7f75fa
AK
4715
4716 adapter->stats.mptc += er32(MPTC);
4717 adapter->stats.bptc += er32(BPTC);
4718
4719 /* used for adaptive IFS */
4720
4721 hw->mac.tx_packet_delta = er32(TPT);
4722 adapter->stats.tpt += hw->mac.tx_packet_delta;
bc7f75fa
AK
4723
4724 adapter->stats.algnerrc += er32(ALGNERRC);
4725 adapter->stats.rxerrc += er32(RXERRC);
bc7f75fa
AK
4726 adapter->stats.cexterr += er32(CEXTERR);
4727 adapter->stats.tsctc += er32(TSCTC);
4728 adapter->stats.tsctfc += er32(TSCTFC);
4729
bc7f75fa 4730 /* Fill out the OS statistics structure */
7274c20f
AK
4731 netdev->stats.multicast = adapter->stats.mprc;
4732 netdev->stats.collisions = adapter->stats.colc;
bc7f75fa
AK
4733
4734 /* Rx Errors */
4735
e921eb1a 4736 /* RLEC on some newer hardware can be incorrect so build
ad68076e
BA
4737 * our own version based on RUC and ROC
4738 */
7274c20f 4739 netdev->stats.rx_errors = adapter->stats.rxerrc +
f0ff4398
BA
4740 adapter->stats.crcerrs + adapter->stats.algnerrc +
4741 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
7274c20f 4742 netdev->stats.rx_length_errors = adapter->stats.ruc +
f0ff4398 4743 adapter->stats.roc;
7274c20f
AK
4744 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4745 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4746 netdev->stats.rx_missed_errors = adapter->stats.mpc;
bc7f75fa
AK
4747
4748 /* Tx Errors */
f0ff4398 4749 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
7274c20f
AK
4750 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4751 netdev->stats.tx_window_errors = adapter->stats.latecol;
4752 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
bc7f75fa
AK
4753
4754 /* Tx Dropped needs to be maintained elsewhere */
4755
bc7f75fa
AK
4756 /* Management Stats */
4757 adapter->stats.mgptc += er32(MGTPTC);
4758 adapter->stats.mgprc += er32(MGTPRC);
4759 adapter->stats.mgpdc += er32(MGTPDC);
94fb848b
BA
4760
4761 /* Correctable ECC Errors */
4762 if (hw->mac.type == e1000_pch_lpt) {
4763 u32 pbeccsts = er32(PBECCSTS);
6cf08d1c 4764
94fb848b
BA
4765 adapter->corr_errors +=
4766 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
4767 adapter->uncorr_errors +=
4768 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
4769 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
4770 }
bc7f75fa
AK
4771}
4772
7c25769f
BA
4773/**
4774 * e1000_phy_read_status - Update the PHY register status snapshot
4775 * @adapter: board private structure
4776 **/
4777static void e1000_phy_read_status(struct e1000_adapter *adapter)
4778{
4779 struct e1000_hw *hw = &adapter->hw;
4780 struct e1000_phy_regs *phy = &adapter->phy_regs;
7c25769f 4781
97390ab8
BA
4782 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
4783 (er32(STATUS) & E1000_STATUS_LU) &&
7c25769f 4784 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
90da0669
BA
4785 int ret_val;
4786
c2ade1a4
BA
4787 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
4788 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
4789 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
4790 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa);
4791 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion);
4792 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000);
4793 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000);
4794 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
7c25769f 4795 if (ret_val)
44defeb3 4796 e_warn("Error reading PHY register\n");
7c25769f 4797 } else {
e921eb1a 4798 /* Do not read PHY registers if link is not up
7c25769f
BA
4799 * Set values to typical power-on defaults
4800 */
4801 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4802 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4803 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4804 BMSR_ERCAP);
4805 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4806 ADVERTISE_ALL | ADVERTISE_CSMA);
4807 phy->lpa = 0;
4808 phy->expansion = EXPANSION_ENABLENPAGE;
4809 phy->ctrl1000 = ADVERTISE_1000FULL;
4810 phy->stat1000 = 0;
4811 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4812 }
7c25769f
BA
4813}
4814
bc7f75fa
AK
4815static void e1000_print_link_info(struct e1000_adapter *adapter)
4816{
bc7f75fa
AK
4817 struct e1000_hw *hw = &adapter->hw;
4818 u32 ctrl = er32(CTRL);
4819
8f12fe86 4820 /* Link status message must follow this format for user tools */
7dbc1672
BA
4821 pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4822 adapter->netdev->name, adapter->link_speed,
ef456f85
JK
4823 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
4824 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
4825 (ctrl & E1000_CTRL_RFCE) ? "Rx" :
4826 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
bc7f75fa
AK
4827}
4828
0c6bdb30 4829static bool e1000e_has_link(struct e1000_adapter *adapter)
318a94d6
JK
4830{
4831 struct e1000_hw *hw = &adapter->hw;
3db1cd5c 4832 bool link_active = false;
318a94d6
JK
4833 s32 ret_val = 0;
4834
e921eb1a 4835 /* get_link_status is set on LSC (link status) interrupt or
318a94d6
JK
4836 * Rx sequence error interrupt. get_link_status will stay
4837 * false until the check_for_link establishes link
4838 * for copper adapters ONLY
4839 */
4840 switch (hw->phy.media_type) {
4841 case e1000_media_type_copper:
4842 if (hw->mac.get_link_status) {
4843 ret_val = hw->mac.ops.check_for_link(hw);
4844 link_active = !hw->mac.get_link_status;
4845 } else {
3db1cd5c 4846 link_active = true;
318a94d6
JK
4847 }
4848 break;
4849 case e1000_media_type_fiber:
4850 ret_val = hw->mac.ops.check_for_link(hw);
4851 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4852 break;
4853 case e1000_media_type_internal_serdes:
4854 ret_val = hw->mac.ops.check_for_link(hw);
4855 link_active = adapter->hw.mac.serdes_has_link;
4856 break;
4857 default:
4858 case e1000_media_type_unknown:
4859 break;
4860 }
4861
4862 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4863 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4864 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
44defeb3 4865 e_info("Gigabit has been disabled, downgrading speed\n");
318a94d6
JK
4866 }
4867
4868 return link_active;
4869}
4870
4871static void e1000e_enable_receives(struct e1000_adapter *adapter)
4872{
4873 /* make sure the receive unit is started */
4874 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
12d43f7d 4875 (adapter->flags & FLAG_RESTART_NOW)) {
318a94d6
JK
4876 struct e1000_hw *hw = &adapter->hw;
4877 u32 rctl = er32(RCTL);
6cf08d1c 4878
318a94d6 4879 ew32(RCTL, rctl | E1000_RCTL_EN);
12d43f7d 4880 adapter->flags &= ~FLAG_RESTART_NOW;
318a94d6
JK
4881 }
4882}
4883
ff10e13c
CW
4884static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4885{
4886 struct e1000_hw *hw = &adapter->hw;
4887
e921eb1a 4888 /* With 82574 controllers, PHY needs to be checked periodically
ff10e13c
CW
4889 * for hung state and reset, if two calls return true
4890 */
4891 if (e1000_check_phy_82574(hw))
4892 adapter->phy_hang_count++;
4893 else
4894 adapter->phy_hang_count = 0;
4895
4896 if (adapter->phy_hang_count > 1) {
4897 adapter->phy_hang_count = 0;
d9554e96 4898 e_dbg("PHY appears hung - resetting\n");
ff10e13c
CW
4899 schedule_work(&adapter->reset_task);
4900 }
4901}
4902
bc7f75fa
AK
4903/**
4904 * e1000_watchdog - Timer Call-back
4905 * @data: pointer to adapter cast into an unsigned long
4906 **/
4907static void e1000_watchdog(unsigned long data)
4908{
53aa82da 4909 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
bc7f75fa
AK
4910
4911 /* Do the rest outside of interrupt context */
4912 schedule_work(&adapter->watchdog_task);
4913
4914 /* TODO: make this use queue_delayed_work() */
4915}
4916
4917static void e1000_watchdog_task(struct work_struct *work)
4918{
4919 struct e1000_adapter *adapter = container_of(work,
17e813ec
BA
4920 struct e1000_adapter,
4921 watchdog_task);
bc7f75fa
AK
4922 struct net_device *netdev = adapter->netdev;
4923 struct e1000_mac_info *mac = &adapter->hw.mac;
75eb0fad 4924 struct e1000_phy_info *phy = &adapter->hw.phy;
bc7f75fa
AK
4925 struct e1000_ring *tx_ring = adapter->tx_ring;
4926 struct e1000_hw *hw = &adapter->hw;
4927 u32 link, tctl;
bc7f75fa 4928
615b32af
JB
4929 if (test_bit(__E1000_DOWN, &adapter->state))
4930 return;
4931
b405e8df 4932 link = e1000e_has_link(adapter);
318a94d6 4933 if ((netif_carrier_ok(netdev)) && link) {
23606cf5
RW
4934 /* Cancel scheduled suspend requests. */
4935 pm_runtime_resume(netdev->dev.parent);
4936
318a94d6 4937 e1000e_enable_receives(adapter);
bc7f75fa 4938 goto link_up;
bc7f75fa
AK
4939 }
4940
4941 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4942 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4943 e1000_update_mng_vlan(adapter);
4944
bc7f75fa
AK
4945 if (link) {
4946 if (!netif_carrier_ok(netdev)) {
3db1cd5c 4947 bool txb2b = true;
23606cf5
RW
4948
4949 /* Cancel scheduled suspend requests. */
4950 pm_runtime_resume(netdev->dev.parent);
4951
318a94d6 4952 /* update snapshot of PHY registers on LSC */
7c25769f 4953 e1000_phy_read_status(adapter);
bc7f75fa 4954 mac->ops.get_link_up_info(&adapter->hw,
17e813ec
BA
4955 &adapter->link_speed,
4956 &adapter->link_duplex);
bc7f75fa 4957 e1000_print_link_info(adapter);
e792cd91
KS
4958
4959 /* check if SmartSpeed worked */
4960 e1000e_check_downshift(hw);
4961 if (phy->speed_downgraded)
4962 netdev_warn(netdev,
4963 "Link Speed was downgraded by SmartSpeed\n");
4964
e921eb1a 4965 /* On supported PHYs, check for duplex mismatch only
f4187b56
BA
4966 * if link has autonegotiated at 10/100 half
4967 */
4968 if ((hw->phy.type == e1000_phy_igp_3 ||
4969 hw->phy.type == e1000_phy_bm) &&
138953bb 4970 hw->mac.autoneg &&
f4187b56
BA
4971 (adapter->link_speed == SPEED_10 ||
4972 adapter->link_speed == SPEED_100) &&
4973 (adapter->link_duplex == HALF_DUPLEX)) {
4974 u16 autoneg_exp;
4975
c2ade1a4 4976 e1e_rphy(hw, MII_EXPANSION, &autoneg_exp);
f4187b56 4977
c2ade1a4 4978 if (!(autoneg_exp & EXPANSION_NWAY))
ef456f85 4979 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
f4187b56
BA
4980 }
4981
f49c57e1 4982 /* adjust timeout factor according to speed/duplex */
bc7f75fa
AK
4983 adapter->tx_timeout_factor = 1;
4984 switch (adapter->link_speed) {
4985 case SPEED_10:
3db1cd5c 4986 txb2b = false;
10f1b492 4987 adapter->tx_timeout_factor = 16;
bc7f75fa
AK
4988 break;
4989 case SPEED_100:
3db1cd5c 4990 txb2b = false;
4c86e0b9 4991 adapter->tx_timeout_factor = 10;
bc7f75fa
AK
4992 break;
4993 }
4994
e921eb1a 4995 /* workaround: re-program speed mode bit after
ad68076e
BA
4996 * link-up event
4997 */
bc7f75fa
AK
4998 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4999 !txb2b) {
5000 u32 tarc0;
6cf08d1c 5001
e9ec2c0f 5002 tarc0 = er32(TARC(0));
bc7f75fa 5003 tarc0 &= ~SPEED_MODE_BIT;
e9ec2c0f 5004 ew32(TARC(0), tarc0);
bc7f75fa
AK
5005 }
5006
e921eb1a 5007 /* disable TSO for pcie and 10/100 speeds, to avoid
ad68076e
BA
5008 * some hardware issues
5009 */
bc7f75fa
AK
5010 if (!(adapter->flags & FLAG_TSO_FORCE)) {
5011 switch (adapter->link_speed) {
5012 case SPEED_10:
5013 case SPEED_100:
44defeb3 5014 e_info("10/100 speed: disabling TSO\n");
bc7f75fa
AK
5015 netdev->features &= ~NETIF_F_TSO;
5016 netdev->features &= ~NETIF_F_TSO6;
5017 break;
5018 case SPEED_1000:
5019 netdev->features |= NETIF_F_TSO;
5020 netdev->features |= NETIF_F_TSO6;
5021 break;
5022 default:
5023 /* oops */
5024 break;
5025 }
5026 }
5027
e921eb1a 5028 /* enable transmits in the hardware, need to do this
ad68076e
BA
5029 * after setting TARC(0)
5030 */
bc7f75fa
AK
5031 tctl = er32(TCTL);
5032 tctl |= E1000_TCTL_EN;
5033 ew32(TCTL, tctl);
5034
e921eb1a 5035 /* Perform any post-link-up configuration before
75eb0fad
BA
5036 * reporting link up.
5037 */
5038 if (phy->ops.cfg_on_link_up)
5039 phy->ops.cfg_on_link_up(hw);
5040
bc7f75fa 5041 netif_carrier_on(netdev);
bc7f75fa
AK
5042
5043 if (!test_bit(__E1000_DOWN, &adapter->state))
5044 mod_timer(&adapter->phy_info_timer,
5045 round_jiffies(jiffies + 2 * HZ));
bc7f75fa
AK
5046 }
5047 } else {
5048 if (netif_carrier_ok(netdev)) {
5049 adapter->link_speed = 0;
5050 adapter->link_duplex = 0;
8f12fe86 5051 /* Link status message must follow this format */
7dbc1672 5052 pr_info("%s NIC Link is Down\n", adapter->netdev->name);
bc7f75fa 5053 netif_carrier_off(netdev);
bc7f75fa
AK
5054 if (!test_bit(__E1000_DOWN, &adapter->state))
5055 mod_timer(&adapter->phy_info_timer,
5056 round_jiffies(jiffies + 2 * HZ));
5057
d9554e96
DE
5058 /* 8000ES2LAN requires a Rx packet buffer work-around
5059 * on link down event; reset the controller to flush
5060 * the Rx packet buffer.
12d43f7d 5061 */
d9554e96 5062 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
12d43f7d 5063 adapter->flags |= FLAG_RESTART_NOW;
23606cf5
RW
5064 else
5065 pm_schedule_suspend(netdev->dev.parent,
17e813ec 5066 LINK_TIMEOUT);
bc7f75fa
AK
5067 }
5068 }
5069
5070link_up:
67fd4fcb 5071 spin_lock(&adapter->stats64_lock);
bc7f75fa
AK
5072 e1000e_update_stats(adapter);
5073
5074 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
5075 adapter->tpt_old = adapter->stats.tpt;
5076 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
5077 adapter->colc_old = adapter->stats.colc;
5078
7c25769f
BA
5079 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
5080 adapter->gorc_old = adapter->stats.gorc;
5081 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
5082 adapter->gotc_old = adapter->stats.gotc;
2084b114 5083 spin_unlock(&adapter->stats64_lock);
bc7f75fa 5084
d9554e96
DE
5085 /* If the link is lost the controller stops DMA, but
5086 * if there is queued Tx work it cannot be done. So
5087 * reset the controller to flush the Tx packet buffers.
5088 */
5089 if (!netif_carrier_ok(netdev) &&
5090 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
5091 adapter->flags |= FLAG_RESTART_NOW;
5092
5093 /* If reset is necessary, do it outside of interrupt context. */
12d43f7d 5094 if (adapter->flags & FLAG_RESTART_NOW) {
90da0669
BA
5095 schedule_work(&adapter->reset_task);
5096 /* return immediately since reset is imminent */
5097 return;
bc7f75fa
AK
5098 }
5099
12d43f7d
BA
5100 e1000e_update_adaptive(&adapter->hw);
5101
eab2abf5
JB
5102 /* Simple mode for Interrupt Throttle Rate (ITR) */
5103 if (adapter->itr_setting == 4) {
e921eb1a 5104 /* Symmetric Tx/Rx gets a reduced ITR=2000;
eab2abf5
JB
5105 * Total asymmetrical Tx or Rx gets ITR=8000;
5106 * everyone else is between 2000-8000.
5107 */
5108 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
5109 u32 dif = (adapter->gotc > adapter->gorc ?
17e813ec
BA
5110 adapter->gotc - adapter->gorc :
5111 adapter->gorc - adapter->gotc) / 10000;
eab2abf5
JB
5112 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
5113
22a4cca2 5114 e1000e_write_itr(adapter, itr);
eab2abf5
JB
5115 }
5116
ad68076e 5117 /* Cause software interrupt to ensure Rx ring is cleaned */
4662e82b
BA
5118 if (adapter->msix_entries)
5119 ew32(ICS, adapter->rx_ring->ims_val);
5120 else
5121 ew32(ICS, E1000_ICS_RXDMT0);
bc7f75fa 5122
713b3c9e
JB
5123 /* flush pending descriptors to memory before detecting Tx hang */
5124 e1000e_flush_descriptors(adapter);
5125
bc7f75fa 5126 /* Force detection of hung controller every watchdog period */
3db1cd5c 5127 adapter->detect_tx_hung = true;
bc7f75fa 5128
e921eb1a 5129 /* With 82571 controllers, LAA may be overwritten due to controller
ad68076e
BA
5130 * reset from the other port. Set the appropriate LAA in RAR[0]
5131 */
bc7f75fa 5132 if (e1000e_get_laa_state_82571(hw))
69e1e019 5133 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
bc7f75fa 5134
ff10e13c
CW
5135 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
5136 e1000e_check_82574_phy_workaround(adapter);
5137
b67e1913
BA
5138 /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */
5139 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
5140 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) &&
5141 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) {
5142 er32(RXSTMPH);
5143 adapter->rx_hwtstamp_cleared++;
5144 } else {
5145 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP;
5146 }
5147 }
5148
bc7f75fa
AK
5149 /* Reset the timer */
5150 if (!test_bit(__E1000_DOWN, &adapter->state))
5151 mod_timer(&adapter->watchdog_timer,
5152 round_jiffies(jiffies + 2 * HZ));
5153}
5154
5155#define E1000_TX_FLAGS_CSUM 0x00000001
5156#define E1000_TX_FLAGS_VLAN 0x00000002
5157#define E1000_TX_FLAGS_TSO 0x00000004
5158#define E1000_TX_FLAGS_IPV4 0x00000008
943146de 5159#define E1000_TX_FLAGS_NO_FCS 0x00000010
b67e1913 5160#define E1000_TX_FLAGS_HWTSTAMP 0x00000020
bc7f75fa
AK
5161#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
5162#define E1000_TX_FLAGS_VLAN_SHIFT 16
5163
47ccd1ed
VY
5164static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
5165 __be16 protocol)
bc7f75fa 5166{
bc7f75fa
AK
5167 struct e1000_context_desc *context_desc;
5168 struct e1000_buffer *buffer_info;
5169 unsigned int i;
5170 u32 cmd_length = 0;
70443ae9 5171 u16 ipcse = 0, mss;
bc7f75fa 5172 u8 ipcss, ipcso, tucss, tucso, hdr_len;
bcf1f57f 5173 int err;
bc7f75fa 5174
3d5e33c9
BA
5175 if (!skb_is_gso(skb))
5176 return 0;
bc7f75fa 5177
bcf1f57f
FR
5178 err = skb_cow_head(skb, 0);
5179 if (err < 0)
5180 return err;
bc7f75fa 5181
3d5e33c9
BA
5182 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5183 mss = skb_shinfo(skb)->gso_size;
47ccd1ed 5184 if (protocol == htons(ETH_P_IP)) {
3d5e33c9
BA
5185 struct iphdr *iph = ip_hdr(skb);
5186 iph->tot_len = 0;
5187 iph->check = 0;
5188 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
f0ff4398 5189 0, IPPROTO_TCP, 0);
3d5e33c9
BA
5190 cmd_length = E1000_TXD_CMD_IP;
5191 ipcse = skb_transport_offset(skb) - 1;
8e1e8a47 5192 } else if (skb_is_gso_v6(skb)) {
3d5e33c9
BA
5193 ipv6_hdr(skb)->payload_len = 0;
5194 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
f0ff4398
BA
5195 &ipv6_hdr(skb)->daddr,
5196 0, IPPROTO_TCP, 0);
3d5e33c9
BA
5197 ipcse = 0;
5198 }
5199 ipcss = skb_network_offset(skb);
5200 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
5201 tucss = skb_transport_offset(skb);
5202 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
3d5e33c9
BA
5203
5204 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
f0ff4398 5205 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
3d5e33c9
BA
5206
5207 i = tx_ring->next_to_use;
5208 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5209 buffer_info = &tx_ring->buffer_info[i];
5210
e80bd1d1
BA
5211 context_desc->lower_setup.ip_fields.ipcss = ipcss;
5212 context_desc->lower_setup.ip_fields.ipcso = ipcso;
5213 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
3d5e33c9
BA
5214 context_desc->upper_setup.tcp_fields.tucss = tucss;
5215 context_desc->upper_setup.tcp_fields.tucso = tucso;
70443ae9 5216 context_desc->upper_setup.tcp_fields.tucse = 0;
e80bd1d1 5217 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
3d5e33c9
BA
5218 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
5219 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
5220
5221 buffer_info->time_stamp = jiffies;
5222 buffer_info->next_to_watch = i;
5223
5224 i++;
5225 if (i == tx_ring->count)
5226 i = 0;
5227 tx_ring->next_to_use = i;
5228
5229 return 1;
bc7f75fa
AK
5230}
5231
47ccd1ed
VY
5232static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
5233 __be16 protocol)
bc7f75fa 5234{
55aa6985 5235 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa
AK
5236 struct e1000_context_desc *context_desc;
5237 struct e1000_buffer *buffer_info;
5238 unsigned int i;
5239 u8 css;
af807c82 5240 u32 cmd_len = E1000_TXD_CMD_DEXT;
bc7f75fa 5241
af807c82 5242 if (skb->ip_summed != CHECKSUM_PARTIAL)
3992c8ed 5243 return false;
bc7f75fa 5244
3f518390 5245 switch (protocol) {
09640e63 5246 case cpu_to_be16(ETH_P_IP):
af807c82
DG
5247 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5248 cmd_len |= E1000_TXD_CMD_TCP;
5249 break;
09640e63 5250 case cpu_to_be16(ETH_P_IPV6):
af807c82
DG
5251 /* XXX not handling all IPV6 headers */
5252 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5253 cmd_len |= E1000_TXD_CMD_TCP;
5254 break;
5255 default:
5256 if (unlikely(net_ratelimit()))
5f66f208
AJ
5257 e_warn("checksum_partial proto=%x!\n",
5258 be16_to_cpu(protocol));
af807c82 5259 break;
bc7f75fa
AK
5260 }
5261
0d0b1672 5262 css = skb_checksum_start_offset(skb);
af807c82
DG
5263
5264 i = tx_ring->next_to_use;
5265 buffer_info = &tx_ring->buffer_info[i];
5266 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5267
5268 context_desc->lower_setup.ip_config = 0;
5269 context_desc->upper_setup.tcp_fields.tucss = css;
f0ff4398 5270 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
af807c82
DG
5271 context_desc->upper_setup.tcp_fields.tucse = 0;
5272 context_desc->tcp_seg_setup.data = 0;
5273 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
5274
5275 buffer_info->time_stamp = jiffies;
5276 buffer_info->next_to_watch = i;
5277
5278 i++;
5279 if (i == tx_ring->count)
5280 i = 0;
5281 tx_ring->next_to_use = i;
5282
3992c8ed 5283 return true;
bc7f75fa
AK
5284}
5285
55aa6985
BA
5286static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
5287 unsigned int first, unsigned int max_per_txd,
d821a4c4 5288 unsigned int nr_frags)
bc7f75fa 5289{
55aa6985 5290 struct e1000_adapter *adapter = tx_ring->adapter;
03b1320d 5291 struct pci_dev *pdev = adapter->pdev;
1b7719c4 5292 struct e1000_buffer *buffer_info;
8ddc951c 5293 unsigned int len = skb_headlen(skb);
03b1320d 5294 unsigned int offset = 0, size, count = 0, i;
9ed318d5 5295 unsigned int f, bytecount, segs;
bc7f75fa
AK
5296
5297 i = tx_ring->next_to_use;
5298
5299 while (len) {
1b7719c4 5300 buffer_info = &tx_ring->buffer_info[i];
bc7f75fa
AK
5301 size = min(len, max_per_txd);
5302
bc7f75fa 5303 buffer_info->length = size;
bc7f75fa 5304 buffer_info->time_stamp = jiffies;
bc7f75fa 5305 buffer_info->next_to_watch = i;
0be3f55f
NN
5306 buffer_info->dma = dma_map_single(&pdev->dev,
5307 skb->data + offset,
af667a29 5308 size, DMA_TO_DEVICE);
03b1320d 5309 buffer_info->mapped_as_page = false;
0be3f55f 5310 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
03b1320d 5311 goto dma_error;
bc7f75fa
AK
5312
5313 len -= size;
5314 offset += size;
03b1320d 5315 count++;
1b7719c4
AD
5316
5317 if (len) {
5318 i++;
5319 if (i == tx_ring->count)
5320 i = 0;
5321 }
bc7f75fa
AK
5322 }
5323
5324 for (f = 0; f < nr_frags; f++) {
9e903e08 5325 const struct skb_frag_struct *frag;
bc7f75fa
AK
5326
5327 frag = &skb_shinfo(skb)->frags[f];
9e903e08 5328 len = skb_frag_size(frag);
877749bf 5329 offset = 0;
bc7f75fa
AK
5330
5331 while (len) {
1b7719c4
AD
5332 i++;
5333 if (i == tx_ring->count)
5334 i = 0;
5335
bc7f75fa
AK
5336 buffer_info = &tx_ring->buffer_info[i];
5337 size = min(len, max_per_txd);
bc7f75fa
AK
5338
5339 buffer_info->length = size;
5340 buffer_info->time_stamp = jiffies;
bc7f75fa 5341 buffer_info->next_to_watch = i;
877749bf 5342 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
17e813ec
BA
5343 offset, size,
5344 DMA_TO_DEVICE);
03b1320d 5345 buffer_info->mapped_as_page = true;
0be3f55f 5346 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
03b1320d 5347 goto dma_error;
bc7f75fa
AK
5348
5349 len -= size;
5350 offset += size;
5351 count++;
bc7f75fa
AK
5352 }
5353 }
5354
af667a29 5355 segs = skb_shinfo(skb)->gso_segs ? : 1;
9ed318d5
TH
5356 /* multiply data chunks by size of headers */
5357 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
5358
bc7f75fa 5359 tx_ring->buffer_info[i].skb = skb;
9ed318d5
TH
5360 tx_ring->buffer_info[i].segs = segs;
5361 tx_ring->buffer_info[i].bytecount = bytecount;
bc7f75fa
AK
5362 tx_ring->buffer_info[first].next_to_watch = i;
5363
5364 return count;
03b1320d
AD
5365
5366dma_error:
af667a29 5367 dev_err(&pdev->dev, "Tx DMA map failed\n");
03b1320d 5368 buffer_info->dma = 0;
c1fa347f 5369 if (count)
03b1320d 5370 count--;
c1fa347f
RK
5371
5372 while (count--) {
af667a29 5373 if (i == 0)
03b1320d 5374 i += tx_ring->count;
c1fa347f 5375 i--;
03b1320d 5376 buffer_info = &tx_ring->buffer_info[i];
55aa6985 5377 e1000_put_txbuf(tx_ring, buffer_info);
03b1320d
AD
5378 }
5379
5380 return 0;
bc7f75fa
AK
5381}
5382
55aa6985 5383static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
bc7f75fa 5384{
55aa6985 5385 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa
AK
5386 struct e1000_tx_desc *tx_desc = NULL;
5387 struct e1000_buffer *buffer_info;
5388 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
5389 unsigned int i;
5390
5391 if (tx_flags & E1000_TX_FLAGS_TSO) {
5392 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
f0ff4398 5393 E1000_TXD_CMD_TSE;
bc7f75fa
AK
5394 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5395
5396 if (tx_flags & E1000_TX_FLAGS_IPV4)
5397 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
5398 }
5399
5400 if (tx_flags & E1000_TX_FLAGS_CSUM) {
5401 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5402 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5403 }
5404
5405 if (tx_flags & E1000_TX_FLAGS_VLAN) {
5406 txd_lower |= E1000_TXD_CMD_VLE;
5407 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
5408 }
5409
943146de
BG
5410 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5411 txd_lower &= ~(E1000_TXD_CMD_IFCS);
5412
b67e1913
BA
5413 if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) {
5414 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5415 txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
5416 }
5417
bc7f75fa
AK
5418 i = tx_ring->next_to_use;
5419
36b973df 5420 do {
bc7f75fa
AK
5421 buffer_info = &tx_ring->buffer_info[i];
5422 tx_desc = E1000_TX_DESC(*tx_ring, i);
5423 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
f0ff4398
BA
5424 tx_desc->lower.data = cpu_to_le32(txd_lower |
5425 buffer_info->length);
bc7f75fa
AK
5426 tx_desc->upper.data = cpu_to_le32(txd_upper);
5427
5428 i++;
5429 if (i == tx_ring->count)
5430 i = 0;
36b973df 5431 } while (--count > 0);
bc7f75fa
AK
5432
5433 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
5434
943146de
BG
5435 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
5436 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5437 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
5438
e921eb1a 5439 /* Force memory writes to complete before letting h/w
bc7f75fa
AK
5440 * know there are new descriptors to fetch. (Only
5441 * applicable for weak-ordered memory model archs,
ad68076e
BA
5442 * such as IA-64).
5443 */
bc7f75fa
AK
5444 wmb();
5445
5446 tx_ring->next_to_use = i;
c6e7f51e
BA
5447
5448 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
55aa6985 5449 e1000e_update_tdt_wa(tx_ring, i);
c6e7f51e 5450 else
c5083cf6 5451 writel(i, tx_ring->tail);
c6e7f51e 5452
e921eb1a 5453 /* we need this if more than one processor can write to our tail
ad68076e
BA
5454 * at a time, it synchronizes IO on IA64/Altix systems
5455 */
bc7f75fa
AK
5456 mmiowb();
5457}
5458
5459#define MINIMUM_DHCP_PACKET_SIZE 282
5460static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5461 struct sk_buff *skb)
5462{
e80bd1d1 5463 struct e1000_hw *hw = &adapter->hw;
bc7f75fa
AK
5464 u16 length, offset;
5465
d60923c4
BA
5466 if (vlan_tx_tag_present(skb) &&
5467 !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
5468 (adapter->hw.mng_cookie.status &
5469 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
5470 return 0;
bc7f75fa
AK
5471
5472 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
5473 return 0;
5474
53aa82da 5475 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
bc7f75fa
AK
5476 return 0;
5477
5478 {
362e20ca 5479 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
bc7f75fa
AK
5480 struct udphdr *udp;
5481
5482 if (ip->protocol != IPPROTO_UDP)
5483 return 0;
5484
5485 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
5486 if (ntohs(udp->dest) != 67)
5487 return 0;
5488
5489 offset = (u8 *)udp + 8 - skb->data;
5490 length = skb->len - offset;
5491 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
5492 }
5493
5494 return 0;
5495}
5496
55aa6985 5497static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
bc7f75fa 5498{
55aa6985 5499 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa 5500
55aa6985 5501 netif_stop_queue(adapter->netdev);
e921eb1a 5502 /* Herbert's original patch had:
bc7f75fa 5503 * smp_mb__after_netif_stop_queue();
ad68076e
BA
5504 * but since that doesn't exist yet, just open code it.
5505 */
bc7f75fa
AK
5506 smp_mb();
5507
e921eb1a 5508 /* We need to check again in a case another CPU has just
ad68076e
BA
5509 * made room available.
5510 */
55aa6985 5511 if (e1000_desc_unused(tx_ring) < size)
bc7f75fa
AK
5512 return -EBUSY;
5513
5514 /* A reprieve! */
55aa6985 5515 netif_start_queue(adapter->netdev);
bc7f75fa
AK
5516 ++adapter->restart_queue;
5517 return 0;
5518}
5519
55aa6985 5520static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
bc7f75fa 5521{
d821a4c4
BA
5522 BUG_ON(size > tx_ring->count);
5523
55aa6985 5524 if (e1000_desc_unused(tx_ring) >= size)
bc7f75fa 5525 return 0;
55aa6985 5526 return __e1000_maybe_stop_tx(tx_ring, size);
bc7f75fa
AK
5527}
5528
3b29a56d
SH
5529static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5530 struct net_device *netdev)
bc7f75fa
AK
5531{
5532 struct e1000_adapter *adapter = netdev_priv(netdev);
5533 struct e1000_ring *tx_ring = adapter->tx_ring;
5534 unsigned int first;
bc7f75fa 5535 unsigned int tx_flags = 0;
e743d313 5536 unsigned int len = skb_headlen(skb);
4e6c709c
AK
5537 unsigned int nr_frags;
5538 unsigned int mss;
bc7f75fa
AK
5539 int count = 0;
5540 int tso;
5541 unsigned int f;
47ccd1ed 5542 __be16 protocol = vlan_get_protocol(skb);
bc7f75fa
AK
5543
5544 if (test_bit(__E1000_DOWN, &adapter->state)) {
5545 dev_kfree_skb_any(skb);
5546 return NETDEV_TX_OK;
5547 }
5548
5549 if (skb->len <= 0) {
5550 dev_kfree_skb_any(skb);
5551 return NETDEV_TX_OK;
5552 }
5553
e921eb1a 5554 /* The minimum packet size with TCTL.PSP set is 17 bytes so
6e97c170
TD
5555 * pad skb in order to meet this minimum size requirement
5556 */
5557 if (unlikely(skb->len < 17)) {
5558 if (skb_pad(skb, 17 - skb->len))
5559 return NETDEV_TX_OK;
5560 skb->len = 17;
5561 skb_set_tail_pointer(skb, 17);
5562 }
5563
bc7f75fa 5564 mss = skb_shinfo(skb)->gso_size;
bc7f75fa
AK
5565 if (mss) {
5566 u8 hdr_len;
bc7f75fa 5567
e921eb1a 5568 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
ad68076e
BA
5569 * points to just header, pull a few bytes of payload from
5570 * frags into skb->data
5571 */
bc7f75fa 5572 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
e921eb1a 5573 /* we do this workaround for ES2LAN, but it is un-necessary,
ad68076e
BA
5574 * avoiding it could save a lot of cycles
5575 */
4e6c709c 5576 if (skb->data_len && (hdr_len == len)) {
bc7f75fa
AK
5577 unsigned int pull_size;
5578
a2a5b323 5579 pull_size = min_t(unsigned int, 4, skb->data_len);
bc7f75fa 5580 if (!__pskb_pull_tail(skb, pull_size)) {
44defeb3 5581 e_err("__pskb_pull_tail failed.\n");
bc7f75fa
AK
5582 dev_kfree_skb_any(skb);
5583 return NETDEV_TX_OK;
5584 }
e743d313 5585 len = skb_headlen(skb);
bc7f75fa
AK
5586 }
5587 }
5588
5589 /* reserve a descriptor for the offload context */
5590 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5591 count++;
5592 count++;
5593
d821a4c4 5594 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
bc7f75fa
AK
5595
5596 nr_frags = skb_shinfo(skb)->nr_frags;
5597 for (f = 0; f < nr_frags; f++)
d821a4c4
BA
5598 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5599 adapter->tx_fifo_limit);
bc7f75fa
AK
5600
5601 if (adapter->hw.mac.tx_pkt_filtering)
5602 e1000_transfer_dhcp_info(adapter, skb);
5603
e921eb1a 5604 /* need: count + 2 desc gap to keep tail from touching
ad68076e
BA
5605 * head, otherwise try next time
5606 */
55aa6985 5607 if (e1000_maybe_stop_tx(tx_ring, count + 2))
bc7f75fa 5608 return NETDEV_TX_BUSY;
bc7f75fa 5609
eab6d18d 5610 if (vlan_tx_tag_present(skb)) {
bc7f75fa
AK
5611 tx_flags |= E1000_TX_FLAGS_VLAN;
5612 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
5613 }
5614
5615 first = tx_ring->next_to_use;
5616
47ccd1ed 5617 tso = e1000_tso(tx_ring, skb, protocol);
bc7f75fa
AK
5618 if (tso < 0) {
5619 dev_kfree_skb_any(skb);
bc7f75fa
AK
5620 return NETDEV_TX_OK;
5621 }
5622
5623 if (tso)
5624 tx_flags |= E1000_TX_FLAGS_TSO;
47ccd1ed 5625 else if (e1000_tx_csum(tx_ring, skb, protocol))
bc7f75fa
AK
5626 tx_flags |= E1000_TX_FLAGS_CSUM;
5627
e921eb1a 5628 /* Old method was to assume IPv4 packet by default if TSO was enabled.
bc7f75fa 5629 * 82571 hardware supports TSO capabilities for IPv6 as well...
ad68076e
BA
5630 * no longer assume, we must.
5631 */
47ccd1ed 5632 if (protocol == htons(ETH_P_IP))
bc7f75fa
AK
5633 tx_flags |= E1000_TX_FLAGS_IPV4;
5634
943146de
BG
5635 if (unlikely(skb->no_fcs))
5636 tx_flags |= E1000_TX_FLAGS_NO_FCS;
5637
25985edc 5638 /* if count is 0 then mapping error has occurred */
d821a4c4
BA
5639 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5640 nr_frags);
1b7719c4 5641 if (count) {
b67e1913
BA
5642 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
5643 !adapter->tx_hwtstamp_skb)) {
5644 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5645 tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
5646 adapter->tx_hwtstamp_skb = skb_get(skb);
59c871c5 5647 adapter->tx_hwtstamp_start = jiffies;
b67e1913
BA
5648 schedule_work(&adapter->tx_hwtstamp_work);
5649 } else {
5650 skb_tx_timestamp(skb);
5651 }
80be3129 5652
3f0cfa3b 5653 netdev_sent_queue(netdev, skb->len);
55aa6985 5654 e1000_tx_queue(tx_ring, tx_flags, count);
1b7719c4 5655 /* Make sure there is space in the ring for the next send. */
d821a4c4
BA
5656 e1000_maybe_stop_tx(tx_ring,
5657 (MAX_SKB_FRAGS *
5658 DIV_ROUND_UP(PAGE_SIZE,
5659 adapter->tx_fifo_limit) + 2));
1b7719c4 5660 } else {
bc7f75fa 5661 dev_kfree_skb_any(skb);
1b7719c4
AD
5662 tx_ring->buffer_info[first].time_stamp = 0;
5663 tx_ring->next_to_use = first;
bc7f75fa
AK
5664 }
5665
bc7f75fa
AK
5666 return NETDEV_TX_OK;
5667}
5668
5669/**
5670 * e1000_tx_timeout - Respond to a Tx Hang
5671 * @netdev: network interface device structure
5672 **/
5673static void e1000_tx_timeout(struct net_device *netdev)
5674{
5675 struct e1000_adapter *adapter = netdev_priv(netdev);
5676
5677 /* Do the reset outside of interrupt context */
5678 adapter->tx_timeout_count++;
5679 schedule_work(&adapter->reset_task);
5680}
5681
5682static void e1000_reset_task(struct work_struct *work)
5683{
5684 struct e1000_adapter *adapter;
5685 adapter = container_of(work, struct e1000_adapter, reset_task);
5686
615b32af
JB
5687 /* don't run the task if already down */
5688 if (test_bit(__E1000_DOWN, &adapter->state))
5689 return;
5690
12d43f7d 5691 if (!(adapter->flags & FLAG_RESTART_NOW)) {
affa9dfb 5692 e1000e_dump(adapter);
12d43f7d 5693 e_err("Reset adapter unexpectedly\n");
affa9dfb 5694 }
bc7f75fa
AK
5695 e1000e_reinit_locked(adapter);
5696}
5697
5698/**
67fd4fcb 5699 * e1000_get_stats64 - Get System Network Statistics
bc7f75fa 5700 * @netdev: network interface device structure
67fd4fcb 5701 * @stats: rtnl_link_stats64 pointer
bc7f75fa
AK
5702 *
5703 * Returns the address of the device statistics structure.
bc7f75fa 5704 **/
67fd4fcb 5705struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
66501f56 5706 struct rtnl_link_stats64 *stats)
bc7f75fa 5707{
67fd4fcb
JK
5708 struct e1000_adapter *adapter = netdev_priv(netdev);
5709
5710 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5711 spin_lock(&adapter->stats64_lock);
5712 e1000e_update_stats(adapter);
5713 /* Fill out the OS statistics structure */
5714 stats->rx_bytes = adapter->stats.gorc;
5715 stats->rx_packets = adapter->stats.gprc;
5716 stats->tx_bytes = adapter->stats.gotc;
5717 stats->tx_packets = adapter->stats.gptc;
5718 stats->multicast = adapter->stats.mprc;
5719 stats->collisions = adapter->stats.colc;
5720
5721 /* Rx Errors */
5722
e921eb1a 5723 /* RLEC on some newer hardware can be incorrect so build
67fd4fcb
JK
5724 * our own version based on RUC and ROC
5725 */
5726 stats->rx_errors = adapter->stats.rxerrc +
f0ff4398
BA
5727 adapter->stats.crcerrs + adapter->stats.algnerrc +
5728 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
5729 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
67fd4fcb
JK
5730 stats->rx_crc_errors = adapter->stats.crcerrs;
5731 stats->rx_frame_errors = adapter->stats.algnerrc;
5732 stats->rx_missed_errors = adapter->stats.mpc;
5733
5734 /* Tx Errors */
f0ff4398 5735 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
67fd4fcb
JK
5736 stats->tx_aborted_errors = adapter->stats.ecol;
5737 stats->tx_window_errors = adapter->stats.latecol;
5738 stats->tx_carrier_errors = adapter->stats.tncrs;
5739
5740 /* Tx Dropped needs to be maintained elsewhere */
5741
5742 spin_unlock(&adapter->stats64_lock);
5743 return stats;
bc7f75fa
AK
5744}
5745
5746/**
5747 * e1000_change_mtu - Change the Maximum Transfer Unit
5748 * @netdev: network interface device structure
5749 * @new_mtu: new value for maximum frame size
5750 *
5751 * Returns 0 on success, negative on failure
5752 **/
5753static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5754{
5755 struct e1000_adapter *adapter = netdev_priv(netdev);
c751a3d5 5756 int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
bc7f75fa 5757
2adc55c9 5758 /* Jumbo frame support */
2e1706f2
BA
5759 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5760 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5761 e_err("Jumbo Frames not supported.\n");
5762 return -EINVAL;
bc7f75fa
AK
5763 }
5764
2adc55c9
BA
5765 /* Supported frame sizes */
5766 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5767 (max_frame > adapter->max_hw_frame_size)) {
5768 e_err("Unsupported MTU setting\n");
bc7f75fa
AK
5769 return -EINVAL;
5770 }
5771
2fbe4526
BA
5772 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
5773 if ((adapter->hw.mac.type >= e1000_pch2lan) &&
a1ce6473
BA
5774 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5775 (new_mtu > ETH_DATA_LEN)) {
2fbe4526 5776 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
a1ce6473
BA
5777 return -EINVAL;
5778 }
5779
bc7f75fa 5780 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
1bba4386 5781 usleep_range(1000, 2000);
610c9928 5782 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
318a94d6 5783 adapter->max_frame_size = max_frame;
610c9928
BA
5784 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5785 netdev->mtu = new_mtu;
63eb48f1
DE
5786
5787 pm_runtime_get_sync(netdev->dev.parent);
5788
bc7f75fa 5789 if (netif_running(netdev))
28002099 5790 e1000e_down(adapter, true);
bc7f75fa 5791
e921eb1a 5792 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
bc7f75fa
AK
5793 * means we reserve 2 more, this pushes us to allocate from the next
5794 * larger slab size.
ad68076e 5795 * i.e. RXBUFFER_2048 --> size-4096 slab
97ac8cae
BA
5796 * However with the new *_jumbo_rx* routines, jumbo receives will use
5797 * fragmented skbs
ad68076e 5798 */
bc7f75fa 5799
9926146b 5800 if (max_frame <= 2048)
bc7f75fa
AK
5801 adapter->rx_buffer_len = 2048;
5802 else
5803 adapter->rx_buffer_len = 4096;
5804
5805 /* adjust allocation if LPE protects us, and we aren't using SBP */
5806 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
17e813ec 5807 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
bc7f75fa 5808 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
17e813ec 5809 + ETH_FCS_LEN;
bc7f75fa 5810
bc7f75fa
AK
5811 if (netif_running(netdev))
5812 e1000e_up(adapter);
5813 else
5814 e1000e_reset(adapter);
5815
63eb48f1
DE
5816 pm_runtime_put_sync(netdev->dev.parent);
5817
bc7f75fa
AK
5818 clear_bit(__E1000_RESETTING, &adapter->state);
5819
5820 return 0;
5821}
5822
5823static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5824 int cmd)
5825{
5826 struct e1000_adapter *adapter = netdev_priv(netdev);
5827 struct mii_ioctl_data *data = if_mii(ifr);
bc7f75fa 5828
318a94d6 5829 if (adapter->hw.phy.media_type != e1000_media_type_copper)
bc7f75fa
AK
5830 return -EOPNOTSUPP;
5831
5832 switch (cmd) {
5833 case SIOCGMIIPHY:
5834 data->phy_id = adapter->hw.phy.addr;
5835 break;
5836 case SIOCGMIIREG:
b16a002e
BA
5837 e1000_phy_read_status(adapter);
5838
7c25769f
BA
5839 switch (data->reg_num & 0x1F) {
5840 case MII_BMCR:
5841 data->val_out = adapter->phy_regs.bmcr;
5842 break;
5843 case MII_BMSR:
5844 data->val_out = adapter->phy_regs.bmsr;
5845 break;
5846 case MII_PHYSID1:
5847 data->val_out = (adapter->hw.phy.id >> 16);
5848 break;
5849 case MII_PHYSID2:
5850 data->val_out = (adapter->hw.phy.id & 0xFFFF);
5851 break;
5852 case MII_ADVERTISE:
5853 data->val_out = adapter->phy_regs.advertise;
5854 break;
5855 case MII_LPA:
5856 data->val_out = adapter->phy_regs.lpa;
5857 break;
5858 case MII_EXPANSION:
5859 data->val_out = adapter->phy_regs.expansion;
5860 break;
5861 case MII_CTRL1000:
5862 data->val_out = adapter->phy_regs.ctrl1000;
5863 break;
5864 case MII_STAT1000:
5865 data->val_out = adapter->phy_regs.stat1000;
5866 break;
5867 case MII_ESTATUS:
5868 data->val_out = adapter->phy_regs.estatus;
5869 break;
5870 default:
bc7f75fa
AK
5871 return -EIO;
5872 }
bc7f75fa
AK
5873 break;
5874 case SIOCSMIIREG:
5875 default:
5876 return -EOPNOTSUPP;
5877 }
5878 return 0;
5879}
5880
b67e1913
BA
5881/**
5882 * e1000e_hwtstamp_ioctl - control hardware time stamping
5883 * @netdev: network interface device structure
5884 * @ifreq: interface request
5885 *
5886 * Outgoing time stamping can be enabled and disabled. Play nice and
5887 * disable it when requested, although it shouldn't cause any overhead
5888 * when no packet needs it. At most one packet in the queue may be
5889 * marked for time stamping, otherwise it would be impossible to tell
5890 * for sure to which packet the hardware time stamp belongs.
5891 *
5892 * Incoming time stamping has to be configured via the hardware filters.
5893 * Not all combinations are supported, in particular event type has to be
5894 * specified. Matching the kind of event packet is not supported, with the
5895 * exception of "all V2 events regardless of level 2 or 4".
5896 **/
4e8cff64 5897static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
b67e1913
BA
5898{
5899 struct e1000_adapter *adapter = netdev_priv(netdev);
5900 struct hwtstamp_config config;
5901 int ret_val;
5902
5903 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5904 return -EFAULT;
5905
62d7e3a2 5906 ret_val = e1000e_config_hwtstamp(adapter, &config);
b67e1913
BA
5907 if (ret_val)
5908 return ret_val;
5909
d89777bf
BA
5910 switch (config.rx_filter) {
5911 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5912 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5913 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5914 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5915 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5916 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5917 /* With V2 type filters which specify a Sync or Delay Request,
5918 * Path Delay Request/Response messages are also time stamped
5919 * by hardware so notify the caller the requested packets plus
5920 * some others are time stamped.
5921 */
5922 config.rx_filter = HWTSTAMP_FILTER_SOME;
5923 break;
5924 default:
5925 break;
5926 }
5927
b67e1913
BA
5928 return copy_to_user(ifr->ifr_data, &config,
5929 sizeof(config)) ? -EFAULT : 0;
5930}
5931
4e8cff64
BH
5932static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
5933{
5934 struct e1000_adapter *adapter = netdev_priv(netdev);
5935
5936 return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
5937 sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
5938}
5939
bc7f75fa
AK
5940static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5941{
5942 switch (cmd) {
5943 case SIOCGMIIPHY:
5944 case SIOCGMIIREG:
5945 case SIOCSMIIREG:
5946 return e1000_mii_ioctl(netdev, ifr, cmd);
b67e1913 5947 case SIOCSHWTSTAMP:
4e8cff64
BH
5948 return e1000e_hwtstamp_set(netdev, ifr);
5949 case SIOCGHWTSTAMP:
5950 return e1000e_hwtstamp_get(netdev, ifr);
bc7f75fa
AK
5951 default:
5952 return -EOPNOTSUPP;
5953 }
5954}
5955
a4f58f54
BA
5956static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5957{
5958 struct e1000_hw *hw = &adapter->hw;
74f350ee 5959 u32 i, mac_reg, wuc;
2b6b168d 5960 u16 phy_reg, wuc_enable;
70806a7f 5961 int retval;
a4f58f54
BA
5962
5963 /* copy MAC RARs to PHY RARs */
d3738bb8 5964 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
a4f58f54 5965
2b6b168d
BA
5966 retval = hw->phy.ops.acquire(hw);
5967 if (retval) {
5968 e_err("Could not acquire PHY\n");
5969 return retval;
5970 }
5971
5972 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
5973 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5974 if (retval)
75ce1532 5975 goto release;
2b6b168d
BA
5976
5977 /* copy MAC MTA to PHY MTA - only needed for pchlan */
a4f58f54
BA
5978 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5979 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
2b6b168d
BA
5980 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
5981 (u16)(mac_reg & 0xFFFF));
5982 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
5983 (u16)((mac_reg >> 16) & 0xFFFF));
a4f58f54
BA
5984 }
5985
5986 /* configure PHY Rx Control register */
2b6b168d 5987 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
a4f58f54
BA
5988 mac_reg = er32(RCTL);
5989 if (mac_reg & E1000_RCTL_UPE)
5990 phy_reg |= BM_RCTL_UPE;
5991 if (mac_reg & E1000_RCTL_MPE)
5992 phy_reg |= BM_RCTL_MPE;
5993 phy_reg &= ~(BM_RCTL_MO_MASK);
5994 if (mac_reg & E1000_RCTL_MO_3)
5995 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
17e813ec 5996 << BM_RCTL_MO_SHIFT);
a4f58f54
BA
5997 if (mac_reg & E1000_RCTL_BAM)
5998 phy_reg |= BM_RCTL_BAM;
5999 if (mac_reg & E1000_RCTL_PMCF)
6000 phy_reg |= BM_RCTL_PMCF;
6001 mac_reg = er32(CTRL);
6002 if (mac_reg & E1000_CTRL_RFCE)
6003 phy_reg |= BM_RCTL_RFCE;
2b6b168d 6004 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
a4f58f54 6005
74f350ee
DE
6006 wuc = E1000_WUC_PME_EN;
6007 if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC))
6008 wuc |= E1000_WUC_APME;
6009
a4f58f54
BA
6010 /* enable PHY wakeup in MAC register */
6011 ew32(WUFC, wufc);
74f350ee
DE
6012 ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME |
6013 E1000_WUC_PME_STATUS | wuc));
a4f58f54
BA
6014
6015 /* configure and enable PHY wakeup in PHY registers */
2b6b168d 6016 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
74f350ee 6017 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc);
a4f58f54
BA
6018
6019 /* activate PHY wakeup */
2b6b168d
BA
6020 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
6021 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
a4f58f54
BA
6022 if (retval)
6023 e_err("Could not set PHY Host Wakeup bit\n");
75ce1532 6024release:
94d8186a 6025 hw->phy.ops.release(hw);
a4f58f54
BA
6026
6027 return retval;
6028}
6029
2a7e19af
DE
6030static void e1000e_flush_lpic(struct pci_dev *pdev)
6031{
6032 struct net_device *netdev = pci_get_drvdata(pdev);
6033 struct e1000_adapter *adapter = netdev_priv(netdev);
6034 struct e1000_hw *hw = &adapter->hw;
6035 u32 ret_val;
6036
6037 pm_runtime_get_sync(netdev->dev.parent);
6038
6039 ret_val = hw->phy.ops.acquire(hw);
6040 if (ret_val)
6041 goto fl_out;
6042
6043 pr_info("EEE TX LPI TIMER: %08X\n",
6044 er32(LPIC) >> E1000_LPIC_LPIET_SHIFT);
6045
6046 hw->phy.ops.release(hw);
6047
6048fl_out:
6049 pm_runtime_put_sync(netdev->dev.parent);
6050}
6051
28002099 6052static int e1000e_pm_freeze(struct device *dev)
bc7f75fa 6053{
28002099 6054 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
bc7f75fa 6055 struct e1000_adapter *adapter = netdev_priv(netdev);
bc7f75fa
AK
6056
6057 netif_device_detach(netdev);
6058
6059 if (netif_running(netdev)) {
bb9e44d0
BA
6060 int count = E1000_CHECK_RESET_COUNT;
6061
6062 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
6063 usleep_range(10000, 20000);
6064
bc7f75fa 6065 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
28002099
DE
6066
6067 /* Quiesce the device without resetting the hardware */
6068 e1000e_down(adapter, false);
bc7f75fa
AK
6069 e1000_free_irq(adapter);
6070 }
4662e82b 6071 e1000e_reset_interrupt_capability(adapter);
bc7f75fa 6072
28002099
DE
6073 /* Allow time for pending master requests to run */
6074 e1000e_disable_pcie_master(&adapter->hw);
6075
6076 return 0;
6077}
6078
6079static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
6080{
6081 struct net_device *netdev = pci_get_drvdata(pdev);
6082 struct e1000_adapter *adapter = netdev_priv(netdev);
6083 struct e1000_hw *hw = &adapter->hw;
6084 u32 ctrl, ctrl_ext, rctl, status;
6085 /* Runtime suspend should only enable wakeup for link changes */
6086 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
6087 int retval = 0;
6088
bc7f75fa
AK
6089 status = er32(STATUS);
6090 if (status & E1000_STATUS_LU)
6091 wufc &= ~E1000_WUFC_LNKC;
6092
6093 if (wufc) {
6094 e1000_setup_rctl(adapter);
ef9b965a 6095 e1000e_set_rx_mode(netdev);
bc7f75fa
AK
6096
6097 /* turn on all-multi mode if wake on multicast is enabled */
6098 if (wufc & E1000_WUFC_MC) {
6099 rctl = er32(RCTL);
6100 rctl |= E1000_RCTL_MPE;
6101 ew32(RCTL, rctl);
6102 }
6103
6104 ctrl = er32(CTRL);
a4f58f54
BA
6105 ctrl |= E1000_CTRL_ADVD3WUC;
6106 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
6107 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
bc7f75fa
AK
6108 ew32(CTRL, ctrl);
6109
318a94d6
JK
6110 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
6111 adapter->hw.phy.media_type ==
6112 e1000_media_type_internal_serdes) {
bc7f75fa
AK
6113 /* keep the laser running in D3 */
6114 ctrl_ext = er32(CTRL_EXT);
93a23f48 6115 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
bc7f75fa
AK
6116 ew32(CTRL_EXT, ctrl_ext);
6117 }
6118
63eb48f1
DE
6119 if (!runtime)
6120 e1000e_power_up_phy(adapter);
6121
97ac8cae 6122 if (adapter->flags & FLAG_IS_ICH)
99730e4c 6123 e1000_suspend_workarounds_ich8lan(&adapter->hw);
97ac8cae 6124
82776a4b 6125 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
a4f58f54
BA
6126 /* enable wakeup by the PHY */
6127 retval = e1000_init_phy_wakeup(adapter, wufc);
6128 if (retval)
6129 return retval;
6130 } else {
6131 /* enable wakeup by the MAC */
6132 ew32(WUFC, wufc);
6133 ew32(WUC, E1000_WUC_PME_EN);
6134 }
bc7f75fa
AK
6135 } else {
6136 ew32(WUC, 0);
6137 ew32(WUFC, 0);
28002099
DE
6138
6139 e1000_power_down_phy(adapter);
bc7f75fa
AK
6140 }
6141
74f350ee 6142 if (adapter->hw.phy.type == e1000_phy_igp_3) {
bc7f75fa 6143 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
74f350ee
DE
6144 } else if (hw->mac.type == e1000_pch_lpt) {
6145 if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
6146 /* ULP does not support wake from unicast, multicast
6147 * or broadcast.
6148 */
6149 retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
6150
6151 if (retval)
6152 return retval;
6153 }
6154
bc7f75fa 6155
e921eb1a 6156 /* Release control of h/w to f/w. If f/w is AMT enabled, this
ad68076e
BA
6157 * would have already happened in close and is redundant.
6158 */
31dbe5b4 6159 e1000e_release_hw_control(adapter);
bc7f75fa 6160
24b41c97
DN
6161 pci_clear_master(pdev);
6162
e921eb1a 6163 /* The pci-e switch on some quad port adapters will report a
005cbdfc
AD
6164 * correctable error when the MAC transitions from D0 to D3. To
6165 * prevent this we need to mask off the correctable errors on the
6166 * downstream port of the pci-e switch.
e8c254c5
LZ
6167 *
6168 * We don't have the associated upstream bridge while assigning
6169 * the PCI device into guest. For example, the KVM on power is
6170 * one of the cases.
005cbdfc
AD
6171 */
6172 if (adapter->flags & FLAG_IS_QUAD_PORT) {
6173 struct pci_dev *us_dev = pdev->bus->self;
005cbdfc
AD
6174 u16 devctl;
6175
e8c254c5
LZ
6176 if (!us_dev)
6177 return 0;
6178
f8c0fcac
JL
6179 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
6180 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
6181 (devctl & ~PCI_EXP_DEVCTL_CERE));
005cbdfc 6182
66148bab
KK
6183 pci_save_state(pdev);
6184 pci_prepare_to_sleep(pdev);
005cbdfc 6185
f8c0fcac 6186 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
005cbdfc 6187 }
66148bab
KK
6188
6189 return 0;
bc7f75fa
AK
6190}
6191
13129d9b
CW
6192/**
6193 * e1000e_disable_aspm - Disable ASPM states
6194 * @pdev: pointer to PCI device struct
6195 * @state: bit-mask of ASPM states to disable
6196 *
6197 * Some devices *must* have certain ASPM states disabled per hardware errata.
6198 **/
6199static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6f461f6c 6200{
13129d9b
CW
6201 struct pci_dev *parent = pdev->bus->self;
6202 u16 aspm_dis_mask = 0;
6203 u16 pdev_aspmc, parent_aspmc;
6204
6205 switch (state) {
6206 case PCIE_LINK_STATE_L0S:
6207 case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
6208 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
6209 /* fall-through - can't have L1 without L0s */
6210 case PCIE_LINK_STATE_L1:
6211 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
6212 break;
6213 default:
6214 return;
6215 }
6216
6217 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6218 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6219
6220 if (parent) {
6221 pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
6222 &parent_aspmc);
6223 parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6224 }
6225
6226 /* Nothing to do if the ASPM states to be disabled already are */
6227 if (!(pdev_aspmc & aspm_dis_mask) &&
6228 (!parent || !(parent_aspmc & aspm_dis_mask)))
6229 return;
6230
6231 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
6232 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
6233 "L0s" : "",
6234 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
6235 "L1" : "");
6236
6237#ifdef CONFIG_PCIEASPM
9f728f53 6238 pci_disable_link_state_locked(pdev, state);
ffe0b2ff 6239
13129d9b
CW
6240 /* Double-check ASPM control. If not disabled by the above, the
6241 * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
6242 * not enabled); override by writing PCI config space directly.
6243 */
6244 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6245 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6246
6247 if (!(aspm_dis_mask & pdev_aspmc))
6248 return;
6249#endif
ffe0b2ff 6250
e921eb1a 6251 /* Both device and parent should have the same ASPM setting.
6f461f6c 6252 * Disable ASPM in downstream component first and then upstream.
1eae4eb2 6253 */
13129d9b 6254 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
6f461f6c 6255
13129d9b
CW
6256 if (parent)
6257 pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
6258 aspm_dis_mask);
1eae4eb2
AK
6259}
6260
aa338601 6261#ifdef CONFIG_PM
23606cf5 6262static int __e1000_resume(struct pci_dev *pdev)
bc7f75fa
AK
6263{
6264 struct net_device *netdev = pci_get_drvdata(pdev);
6265 struct e1000_adapter *adapter = netdev_priv(netdev);
6266 struct e1000_hw *hw = &adapter->hw;
78cd29d5 6267 u16 aspm_disable_flag = 0;
bc7f75fa 6268
78cd29d5
BA
6269 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6270 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6271 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
6272 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6273 if (aspm_disable_flag)
6274 e1000e_disable_aspm(pdev, aspm_disable_flag);
6275
66148bab 6276 pci_set_master(pdev);
6e4f6f6b 6277
2fbe4526 6278 if (hw->mac.type >= e1000_pch2lan)
99730e4c
BA
6279 e1000_resume_workarounds_pchlan(&adapter->hw);
6280
bc7f75fa 6281 e1000e_power_up_phy(adapter);
a4f58f54
BA
6282
6283 /* report the system wakeup cause from S3/S4 */
6284 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
6285 u16 phy_data;
6286
6287 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
6288 if (phy_data) {
6289 e_info("PHY Wakeup cause - %s\n",
17e813ec
BA
6290 phy_data & E1000_WUS_EX ? "Unicast Packet" :
6291 phy_data & E1000_WUS_MC ? "Multicast Packet" :
6292 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
6293 phy_data & E1000_WUS_MAG ? "Magic Packet" :
6294 phy_data & E1000_WUS_LNKC ?
6295 "Link Status Change" : "other");
a4f58f54
BA
6296 }
6297 e1e_wphy(&adapter->hw, BM_WUS, ~0);
6298 } else {
6299 u32 wus = er32(WUS);
6cf08d1c 6300
a4f58f54
BA
6301 if (wus) {
6302 e_info("MAC Wakeup cause - %s\n",
17e813ec
BA
6303 wus & E1000_WUS_EX ? "Unicast Packet" :
6304 wus & E1000_WUS_MC ? "Multicast Packet" :
6305 wus & E1000_WUS_BC ? "Broadcast Packet" :
6306 wus & E1000_WUS_MAG ? "Magic Packet" :
6307 wus & E1000_WUS_LNKC ? "Link Status Change" :
6308 "other");
a4f58f54
BA
6309 }
6310 ew32(WUS, ~0);
6311 }
6312
bc7f75fa 6313 e1000e_reset(adapter);
bc7f75fa 6314
cd791618 6315 e1000_init_manageability_pt(adapter);
bc7f75fa 6316
e921eb1a 6317 /* If the controller has AMT, do not set DRV_LOAD until the interface
bc7f75fa 6318 * is up. For all other cases, let the f/w know that the h/w is now
ad68076e
BA
6319 * under the control of the driver.
6320 */
c43bc57e 6321 if (!(adapter->flags & FLAG_HAS_AMT))
31dbe5b4 6322 e1000e_get_hw_control(adapter);
bc7f75fa
AK
6323
6324 return 0;
6325}
23606cf5 6326
3e7986f6 6327#ifdef CONFIG_PM_SLEEP
28002099
DE
6328static int e1000e_pm_thaw(struct device *dev)
6329{
6330 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
6331 struct e1000_adapter *adapter = netdev_priv(netdev);
6332
6333 e1000e_set_interrupt_capability(adapter);
6334 if (netif_running(netdev)) {
6335 u32 err = e1000_request_irq(adapter);
6336
6337 if (err)
6338 return err;
6339
6340 e1000e_up(adapter);
6341 }
6342
6343 netif_device_attach(netdev);
6344
6345 return 0;
6346}
6347
28002099 6348static int e1000e_pm_suspend(struct device *dev)
a0340162
RW
6349{
6350 struct pci_dev *pdev = to_pci_dev(dev);
a0340162 6351
2a7e19af
DE
6352 e1000e_flush_lpic(pdev);
6353
28002099
DE
6354 e1000e_pm_freeze(dev);
6355
66148bab 6356 return __e1000_shutdown(pdev, false);
a0340162
RW
6357}
6358
28002099 6359static int e1000e_pm_resume(struct device *dev)
23606cf5
RW
6360{
6361 struct pci_dev *pdev = to_pci_dev(dev);
28002099 6362 int rc;
23606cf5 6363
28002099
DE
6364 rc = __e1000_resume(pdev);
6365 if (rc)
6366 return rc;
23606cf5 6367
28002099 6368 return e1000e_pm_thaw(dev);
23606cf5 6369}
38a529b5 6370#endif /* CONFIG_PM_SLEEP */
a0340162
RW
6371
6372#ifdef CONFIG_PM_RUNTIME
63eb48f1 6373static int e1000e_pm_runtime_idle(struct device *dev)
a0340162
RW
6374{
6375 struct pci_dev *pdev = to_pci_dev(dev);
6376 struct net_device *netdev = pci_get_drvdata(pdev);
6377 struct e1000_adapter *adapter = netdev_priv(netdev);
2116bc25 6378 u16 eee_lp;
a0340162 6379
2116bc25
DE
6380 eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability;
6381
6382 if (!e1000e_has_link(adapter)) {
6383 adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp;
63eb48f1 6384 pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
2116bc25 6385 }
a0340162 6386
63eb48f1 6387 return -EBUSY;
a0340162
RW
6388}
6389
63eb48f1 6390static int e1000e_pm_runtime_resume(struct device *dev)
a0340162
RW
6391{
6392 struct pci_dev *pdev = to_pci_dev(dev);
6393 struct net_device *netdev = pci_get_drvdata(pdev);
6394 struct e1000_adapter *adapter = netdev_priv(netdev);
63eb48f1 6395 int rc;
a0340162 6396
63eb48f1
DE
6397 rc = __e1000_resume(pdev);
6398 if (rc)
6399 return rc;
a0340162 6400
63eb48f1
DE
6401 if (netdev->flags & IFF_UP)
6402 rc = e1000e_up(adapter);
a0340162 6403
63eb48f1 6404 return rc;
a0340162 6405}
23606cf5 6406
63eb48f1 6407static int e1000e_pm_runtime_suspend(struct device *dev)
23606cf5
RW
6408{
6409 struct pci_dev *pdev = to_pci_dev(dev);
6410 struct net_device *netdev = pci_get_drvdata(pdev);
6411 struct e1000_adapter *adapter = netdev_priv(netdev);
6412
63eb48f1
DE
6413 if (netdev->flags & IFF_UP) {
6414 int count = E1000_CHECK_RESET_COUNT;
6415
6416 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
6417 usleep_range(10000, 20000);
23606cf5 6418
63eb48f1
DE
6419 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
6420
6421 /* Down the device without resetting the hardware */
6422 e1000e_down(adapter, false);
6423 }
6424
6425 if (__e1000_shutdown(pdev, true)) {
6426 e1000e_pm_runtime_resume(dev);
6427 return -EBUSY;
6428 }
6429
6430 return 0;
23606cf5 6431}
a0340162 6432#endif /* CONFIG_PM_RUNTIME */
aa338601 6433#endif /* CONFIG_PM */
bc7f75fa
AK
6434
6435static void e1000_shutdown(struct pci_dev *pdev)
6436{
2a7e19af
DE
6437 e1000e_flush_lpic(pdev);
6438
28002099
DE
6439 e1000e_pm_freeze(&pdev->dev);
6440
66148bab 6441 __e1000_shutdown(pdev, false);
bc7f75fa
AK
6442}
6443
6444#ifdef CONFIG_NET_POLL_CONTROLLER
147b2c8c 6445
8bb62869 6446static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data)
147b2c8c
DD
6447{
6448 struct net_device *netdev = data;
6449 struct e1000_adapter *adapter = netdev_priv(netdev);
147b2c8c
DD
6450
6451 if (adapter->msix_entries) {
90da0669
BA
6452 int vector, msix_irq;
6453
147b2c8c
DD
6454 vector = 0;
6455 msix_irq = adapter->msix_entries[vector].vector;
6456 disable_irq(msix_irq);
6457 e1000_intr_msix_rx(msix_irq, netdev);
6458 enable_irq(msix_irq);
6459
6460 vector++;
6461 msix_irq = adapter->msix_entries[vector].vector;
6462 disable_irq(msix_irq);
6463 e1000_intr_msix_tx(msix_irq, netdev);
6464 enable_irq(msix_irq);
6465
6466 vector++;
6467 msix_irq = adapter->msix_entries[vector].vector;
6468 disable_irq(msix_irq);
6469 e1000_msix_other(msix_irq, netdev);
6470 enable_irq(msix_irq);
6471 }
6472
6473 return IRQ_HANDLED;
6474}
6475
e921eb1a
BA
6476/**
6477 * e1000_netpoll
6478 * @netdev: network interface device structure
6479 *
bc7f75fa
AK
6480 * Polling 'interrupt' - used by things like netconsole to send skbs
6481 * without having to re-enable interrupts. It's not called while
6482 * the interrupt routine is executing.
6483 */
6484static void e1000_netpoll(struct net_device *netdev)
6485{
6486 struct e1000_adapter *adapter = netdev_priv(netdev);
6487
147b2c8c
DD
6488 switch (adapter->int_mode) {
6489 case E1000E_INT_MODE_MSIX:
6490 e1000_intr_msix(adapter->pdev->irq, netdev);
6491 break;
6492 case E1000E_INT_MODE_MSI:
6493 disable_irq(adapter->pdev->irq);
6494 e1000_intr_msi(adapter->pdev->irq, netdev);
6495 enable_irq(adapter->pdev->irq);
6496 break;
e80bd1d1 6497 default: /* E1000E_INT_MODE_LEGACY */
147b2c8c
DD
6498 disable_irq(adapter->pdev->irq);
6499 e1000_intr(adapter->pdev->irq, netdev);
6500 enable_irq(adapter->pdev->irq);
6501 break;
6502 }
bc7f75fa
AK
6503}
6504#endif
6505
6506/**
6507 * e1000_io_error_detected - called when PCI error is detected
6508 * @pdev: Pointer to PCI device
6509 * @state: The current pci connection state
6510 *
6511 * This function is called after a PCI bus error affecting
6512 * this device has been detected.
6513 */
6514static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
6515 pci_channel_state_t state)
6516{
6517 struct net_device *netdev = pci_get_drvdata(pdev);
6518 struct e1000_adapter *adapter = netdev_priv(netdev);
6519
6520 netif_device_detach(netdev);
6521
c93b5a76
MM
6522 if (state == pci_channel_io_perm_failure)
6523 return PCI_ERS_RESULT_DISCONNECT;
6524
bc7f75fa 6525 if (netif_running(netdev))
28002099 6526 e1000e_down(adapter, true);
bc7f75fa
AK
6527 pci_disable_device(pdev);
6528
6529 /* Request a slot slot reset. */
6530 return PCI_ERS_RESULT_NEED_RESET;
6531}
6532
6533/**
6534 * e1000_io_slot_reset - called after the pci bus has been reset.
6535 * @pdev: Pointer to PCI device
6536 *
6537 * Restart the card from scratch, as if from a cold-boot. Implementation
28002099 6538 * resembles the first-half of the e1000e_pm_resume routine.
bc7f75fa
AK
6539 */
6540static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
6541{
6542 struct net_device *netdev = pci_get_drvdata(pdev);
6543 struct e1000_adapter *adapter = netdev_priv(netdev);
6544 struct e1000_hw *hw = &adapter->hw;
78cd29d5 6545 u16 aspm_disable_flag = 0;
6e4f6f6b 6546 int err;
111b9dc5 6547 pci_ers_result_t result;
bc7f75fa 6548
78cd29d5
BA
6549 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6550 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6f461f6c 6551 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
78cd29d5
BA
6552 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6553 if (aspm_disable_flag)
6554 e1000e_disable_aspm(pdev, aspm_disable_flag);
6555
f0f422e5 6556 err = pci_enable_device_mem(pdev);
6e4f6f6b 6557 if (err) {
bc7f75fa
AK
6558 dev_err(&pdev->dev,
6559 "Cannot re-enable PCI device after reset.\n");
111b9dc5
JB
6560 result = PCI_ERS_RESULT_DISCONNECT;
6561 } else {
23606cf5 6562 pdev->state_saved = true;
111b9dc5 6563 pci_restore_state(pdev);
66148bab 6564 pci_set_master(pdev);
bc7f75fa 6565
111b9dc5
JB
6566 pci_enable_wake(pdev, PCI_D3hot, 0);
6567 pci_enable_wake(pdev, PCI_D3cold, 0);
bc7f75fa 6568
111b9dc5
JB
6569 e1000e_reset(adapter);
6570 ew32(WUS, ~0);
6571 result = PCI_ERS_RESULT_RECOVERED;
6572 }
bc7f75fa 6573
111b9dc5
JB
6574 pci_cleanup_aer_uncorrect_error_status(pdev);
6575
6576 return result;
bc7f75fa
AK
6577}
6578
6579/**
6580 * e1000_io_resume - called when traffic can start flowing again.
6581 * @pdev: Pointer to PCI device
6582 *
6583 * This callback is called when the error recovery driver tells us that
6584 * its OK to resume normal operation. Implementation resembles the
28002099 6585 * second-half of the e1000e_pm_resume routine.
bc7f75fa
AK
6586 */
6587static void e1000_io_resume(struct pci_dev *pdev)
6588{
6589 struct net_device *netdev = pci_get_drvdata(pdev);
6590 struct e1000_adapter *adapter = netdev_priv(netdev);
6591
cd791618 6592 e1000_init_manageability_pt(adapter);
bc7f75fa
AK
6593
6594 if (netif_running(netdev)) {
6595 if (e1000e_up(adapter)) {
6596 dev_err(&pdev->dev,
6597 "can't bring device back up after reset\n");
6598 return;
6599 }
6600 }
6601
6602 netif_device_attach(netdev);
6603
e921eb1a 6604 /* If the controller has AMT, do not set DRV_LOAD until the interface
bc7f75fa 6605 * is up. For all other cases, let the f/w know that the h/w is now
ad68076e
BA
6606 * under the control of the driver.
6607 */
c43bc57e 6608 if (!(adapter->flags & FLAG_HAS_AMT))
31dbe5b4 6609 e1000e_get_hw_control(adapter);
bc7f75fa
AK
6610}
6611
6612static void e1000_print_device_info(struct e1000_adapter *adapter)
6613{
6614 struct e1000_hw *hw = &adapter->hw;
6615 struct net_device *netdev = adapter->netdev;
073287c0
BA
6616 u32 ret_val;
6617 u8 pba_str[E1000_PBANUM_LENGTH];
bc7f75fa
AK
6618
6619 /* print bus type/speed/width info */
a5cc7642 6620 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
44defeb3
JK
6621 /* bus width */
6622 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
f0ff4398 6623 "Width x1"),
44defeb3 6624 /* MAC address */
7c510e4b 6625 netdev->dev_addr);
44defeb3
JK
6626 e_info("Intel(R) PRO/%s Network Connection\n",
6627 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
073287c0
BA
6628 ret_val = e1000_read_pba_string_generic(hw, pba_str,
6629 E1000_PBANUM_LENGTH);
6630 if (ret_val)
f2315bf1 6631 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
073287c0
BA
6632 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
6633 hw->mac.type, hw->phy.type, pba_str);
bc7f75fa
AK
6634}
6635
10aa4c04
AK
6636static void e1000_eeprom_checks(struct e1000_adapter *adapter)
6637{
6638 struct e1000_hw *hw = &adapter->hw;
6639 int ret_val;
6640 u16 buf = 0;
6641
6642 if (hw->mac.type != e1000_82573)
6643 return;
6644
6645 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
e885d762
BA
6646 le16_to_cpus(&buf);
6647 if (!ret_val && (!(buf & (1 << 0)))) {
10aa4c04 6648 /* Deep Smart Power Down (DSPD) */
6c2a9efa
FP
6649 dev_warn(&adapter->pdev->dev,
6650 "Warning: detected DSPD enabled in EEPROM\n");
10aa4c04 6651 }
10aa4c04
AK
6652}
6653
c8f44aff 6654static int e1000_set_features(struct net_device *netdev,
70495a50 6655 netdev_features_t features)
dc221294
BA
6656{
6657 struct e1000_adapter *adapter = netdev_priv(netdev);
c8f44aff 6658 netdev_features_t changed = features ^ netdev->features;
dc221294
BA
6659
6660 if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
6661 adapter->flags |= FLAG_TSO_FORCE;
6662
f646968f 6663 if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
cf955e6c
BG
6664 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
6665 NETIF_F_RXALL)))
dc221294
BA
6666 return 0;
6667
0184039a
BG
6668 if (changed & NETIF_F_RXFCS) {
6669 if (features & NETIF_F_RXFCS) {
6670 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6671 } else {
6672 /* We need to take it back to defaults, which might mean
6673 * stripping is still disabled at the adapter level.
6674 */
6675 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
6676 adapter->flags2 |= FLAG2_CRC_STRIPPING;
6677 else
6678 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6679 }
6680 }
6681
70495a50
BA
6682 netdev->features = features;
6683
dc221294
BA
6684 if (netif_running(netdev))
6685 e1000e_reinit_locked(adapter);
6686 else
6687 e1000e_reset(adapter);
6688
6689 return 0;
6690}
6691
651c2466
SH
6692static const struct net_device_ops e1000e_netdev_ops = {
6693 .ndo_open = e1000_open,
6694 .ndo_stop = e1000_close,
00829823 6695 .ndo_start_xmit = e1000_xmit_frame,
67fd4fcb 6696 .ndo_get_stats64 = e1000e_get_stats64,
ef9b965a 6697 .ndo_set_rx_mode = e1000e_set_rx_mode,
651c2466
SH
6698 .ndo_set_mac_address = e1000_set_mac,
6699 .ndo_change_mtu = e1000_change_mtu,
6700 .ndo_do_ioctl = e1000_ioctl,
6701 .ndo_tx_timeout = e1000_tx_timeout,
6702 .ndo_validate_addr = eth_validate_addr,
6703
651c2466
SH
6704 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
6705 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
6706#ifdef CONFIG_NET_POLL_CONTROLLER
6707 .ndo_poll_controller = e1000_netpoll,
6708#endif
dc221294 6709 .ndo_set_features = e1000_set_features,
651c2466
SH
6710};
6711
bc7f75fa
AK
6712/**
6713 * e1000_probe - Device Initialization Routine
6714 * @pdev: PCI device information struct
6715 * @ent: entry in e1000_pci_tbl
6716 *
6717 * Returns 0 on success, negative on failure
6718 *
6719 * e1000_probe initializes an adapter identified by a pci_dev structure.
6720 * The OS initialization, configuring of the adapter private structure,
6721 * and a hardware reset occur.
6722 **/
1dd06ae8 6723static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bc7f75fa
AK
6724{
6725 struct net_device *netdev;
6726 struct e1000_adapter *adapter;
6727 struct e1000_hw *hw;
6728 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
f47e81fc
BB
6729 resource_size_t mmio_start, mmio_len;
6730 resource_size_t flash_start, flash_len;
bc7f75fa 6731 static int cards_found;
78cd29d5 6732 u16 aspm_disable_flag = 0;
17e813ec 6733 int bars, i, err, pci_using_dac;
bc7f75fa
AK
6734 u16 eeprom_data = 0;
6735 u16 eeprom_apme_mask = E1000_EEPROM_APME;
491a04d2 6736 s32 rval = 0;
bc7f75fa 6737
78cd29d5
BA
6738 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
6739 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6f461f6c 6740 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
78cd29d5
BA
6741 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6742 if (aspm_disable_flag)
6743 e1000e_disable_aspm(pdev, aspm_disable_flag);
6e4f6f6b 6744
f0f422e5 6745 err = pci_enable_device_mem(pdev);
bc7f75fa
AK
6746 if (err)
6747 return err;
6748
6749 pci_using_dac = 0;
718a39eb 6750 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
bc7f75fa 6751 if (!err) {
718a39eb 6752 pci_using_dac = 1;
bc7f75fa 6753 } else {
718a39eb 6754 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
bc7f75fa 6755 if (err) {
718a39eb
RK
6756 dev_err(&pdev->dev,
6757 "No usable DMA configuration, aborting\n");
6758 goto err_dma;
bc7f75fa
AK
6759 }
6760 }
6761
17e813ec
BA
6762 bars = pci_select_bars(pdev, IORESOURCE_MEM);
6763 err = pci_request_selected_regions_exclusive(pdev, bars,
6764 e1000e_driver_name);
bc7f75fa
AK
6765 if (err)
6766 goto err_pci_reg;
6767
68eac460 6768 /* AER (Advanced Error Reporting) hooks */
19d5afd4 6769 pci_enable_pcie_error_reporting(pdev);
68eac460 6770
bc7f75fa 6771 pci_set_master(pdev);
438b365a
BA
6772 /* PCI config space info */
6773 err = pci_save_state(pdev);
6774 if (err)
6775 goto err_alloc_etherdev;
bc7f75fa
AK
6776
6777 err = -ENOMEM;
6778 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6779 if (!netdev)
6780 goto err_alloc_etherdev;
6781
bc7f75fa
AK
6782 SET_NETDEV_DEV(netdev, &pdev->dev);
6783
f85e4dfa
TH
6784 netdev->irq = pdev->irq;
6785
bc7f75fa
AK
6786 pci_set_drvdata(pdev, netdev);
6787 adapter = netdev_priv(netdev);
6788 hw = &adapter->hw;
6789 adapter->netdev = netdev;
6790 adapter->pdev = pdev;
6791 adapter->ei = ei;
6792 adapter->pba = ei->pba;
6793 adapter->flags = ei->flags;
eb7c3adb 6794 adapter->flags2 = ei->flags2;
bc7f75fa
AK
6795 adapter->hw.adapter = adapter;
6796 adapter->hw.mac.type = ei->mac;
2adc55c9 6797 adapter->max_hw_frame_size = ei->max_hw_frame_size;
b3f4d599 6798 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
bc7f75fa
AK
6799
6800 mmio_start = pci_resource_start(pdev, 0);
6801 mmio_len = pci_resource_len(pdev, 0);
6802
6803 err = -EIO;
6804 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
6805 if (!adapter->hw.hw_addr)
6806 goto err_ioremap;
6807
6808 if ((adapter->flags & FLAG_HAS_FLASH) &&
6809 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
6810 flash_start = pci_resource_start(pdev, 1);
6811 flash_len = pci_resource_len(pdev, 1);
6812 adapter->hw.flash_address = ioremap(flash_start, flash_len);
6813 if (!adapter->hw.flash_address)
6814 goto err_flashmap;
6815 }
6816
d495bcb8
BA
6817 /* Set default EEE advertisement */
6818 if (adapter->flags2 & FLAG2_HAS_EEE)
6819 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
6820
bc7f75fa 6821 /* construct the net_device struct */
e80bd1d1 6822 netdev->netdev_ops = &e1000e_netdev_ops;
bc7f75fa 6823 e1000e_set_ethtool_ops(netdev);
e80bd1d1 6824 netdev->watchdog_timeo = 5 * HZ;
c58c8a78 6825 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
f2315bf1 6826 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
bc7f75fa
AK
6827
6828 netdev->mem_start = mmio_start;
6829 netdev->mem_end = mmio_start + mmio_len;
6830
6831 adapter->bd_number = cards_found++;
6832
4662e82b
BA
6833 e1000e_check_options(adapter);
6834
bc7f75fa
AK
6835 /* setup adapter struct */
6836 err = e1000_sw_init(adapter);
6837 if (err)
6838 goto err_sw_init;
6839
bc7f75fa
AK
6840 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6841 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
6842 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6843
69e3fd8c 6844 err = ei->get_variants(adapter);
bc7f75fa
AK
6845 if (err)
6846 goto err_hw_init;
6847
4a770358
BA
6848 if ((adapter->flags & FLAG_IS_ICH) &&
6849 (adapter->flags & FLAG_READ_ONLY_NVM))
6850 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
6851
bc7f75fa
AK
6852 hw->mac.ops.get_bus_info(&adapter->hw);
6853
318a94d6 6854 adapter->hw.phy.autoneg_wait_to_complete = 0;
bc7f75fa
AK
6855
6856 /* Copper options */
318a94d6 6857 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
bc7f75fa
AK
6858 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6859 adapter->hw.phy.disable_polarity_correction = 0;
6860 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6861 }
6862
470a5420 6863 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
185095fb
BA
6864 dev_info(&pdev->dev,
6865 "PHY reset is blocked due to SOL/IDER session.\n");
bc7f75fa 6866
dc221294
BA
6867 /* Set initial default active device features */
6868 netdev->features = (NETIF_F_SG |
f646968f
PM
6869 NETIF_F_HW_VLAN_CTAG_RX |
6870 NETIF_F_HW_VLAN_CTAG_TX |
dc221294
BA
6871 NETIF_F_TSO |
6872 NETIF_F_TSO6 |
70495a50 6873 NETIF_F_RXHASH |
dc221294
BA
6874 NETIF_F_RXCSUM |
6875 NETIF_F_HW_CSUM);
6876
6877 /* Set user-changeable features (subset of all device features) */
6878 netdev->hw_features = netdev->features;
0184039a 6879 netdev->hw_features |= NETIF_F_RXFCS;
943146de 6880 netdev->priv_flags |= IFF_SUPP_NOFCS;
cf955e6c 6881 netdev->hw_features |= NETIF_F_RXALL;
bc7f75fa
AK
6882
6883 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
f646968f 6884 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
bc7f75fa 6885
dc221294
BA
6886 netdev->vlan_features |= (NETIF_F_SG |
6887 NETIF_F_TSO |
6888 NETIF_F_TSO6 |
6889 NETIF_F_HW_CSUM);
a5136e23 6890
ef9b965a
JB
6891 netdev->priv_flags |= IFF_UNICAST_FLT;
6892
7b872a55 6893 if (pci_using_dac) {
bc7f75fa 6894 netdev->features |= NETIF_F_HIGHDMA;
7b872a55
YZ
6895 netdev->vlan_features |= NETIF_F_HIGHDMA;
6896 }
bc7f75fa 6897
bc7f75fa
AK
6898 if (e1000e_enable_mng_pass_thru(&adapter->hw))
6899 adapter->flags |= FLAG_MNG_PT_ENABLED;
6900
e921eb1a 6901 /* before reading the NVM, reset the controller to
ad68076e
BA
6902 * put the device in a known good starting state
6903 */
bc7f75fa
AK
6904 adapter->hw.mac.ops.reset_hw(&adapter->hw);
6905
e921eb1a 6906 /* systems with ASPM and others may see the checksum fail on the first
bc7f75fa
AK
6907 * attempt. Let's give it a few tries
6908 */
6909 for (i = 0;; i++) {
6910 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6911 break;
6912 if (i == 2) {
185095fb 6913 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
bc7f75fa
AK
6914 err = -EIO;
6915 goto err_eeprom;
6916 }
6917 }
6918
10aa4c04
AK
6919 e1000_eeprom_checks(adapter);
6920
608f8a0d 6921 /* copy the MAC address */
bc7f75fa 6922 if (e1000e_read_mac_addr(&adapter->hw))
185095fb
BA
6923 dev_err(&pdev->dev,
6924 "NVM Read Error while reading MAC address\n");
bc7f75fa
AK
6925
6926 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
bc7f75fa 6927
aaeb6cdf 6928 if (!is_valid_ether_addr(netdev->dev_addr)) {
185095fb 6929 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
aaeb6cdf 6930 netdev->dev_addr);
bc7f75fa
AK
6931 err = -EIO;
6932 goto err_eeprom;
6933 }
6934
6935 init_timer(&adapter->watchdog_timer);
c061b18d 6936 adapter->watchdog_timer.function = e1000_watchdog;
53aa82da 6937 adapter->watchdog_timer.data = (unsigned long)adapter;
bc7f75fa
AK
6938
6939 init_timer(&adapter->phy_info_timer);
c061b18d 6940 adapter->phy_info_timer.function = e1000_update_phy_info;
53aa82da 6941 adapter->phy_info_timer.data = (unsigned long)adapter;
bc7f75fa
AK
6942
6943 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6944 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
a8f88ff5
JB
6945 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6946 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
41cec6f1 6947 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
bc7f75fa 6948
bc7f75fa
AK
6949 /* Initialize link parameters. User can change them with ethtool */
6950 adapter->hw.mac.autoneg = 1;
3db1cd5c 6951 adapter->fc_autoneg = true;
5c48ef3e
BA
6952 adapter->hw.fc.requested_mode = e1000_fc_default;
6953 adapter->hw.fc.current_mode = e1000_fc_default;
bc7f75fa
AK
6954 adapter->hw.phy.autoneg_advertised = 0x2f;
6955
e921eb1a 6956 /* Initial Wake on LAN setting - If APM wake is enabled in
bc7f75fa
AK
6957 * the EEPROM, enable the ACPI Magic Packet filter
6958 */
6959 if (adapter->flags & FLAG_APME_IN_WUC) {
6960 /* APME bit in EEPROM is mapped to WUC.APME */
6961 eeprom_data = er32(WUC);
6962 eeprom_apme_mask = E1000_WUC_APME;
4def99bb
BA
6963 if ((hw->mac.type > e1000_ich10lan) &&
6964 (eeprom_data & E1000_WUC_PHY_WAKE))
a4f58f54 6965 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
bc7f75fa
AK
6966 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6967 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6968 (adapter->hw.bus.func == 1))
491a04d2
DE
6969 rval = e1000_read_nvm(&adapter->hw,
6970 NVM_INIT_CONTROL3_PORT_B,
6971 1, &eeprom_data);
bc7f75fa 6972 else
491a04d2
DE
6973 rval = e1000_read_nvm(&adapter->hw,
6974 NVM_INIT_CONTROL3_PORT_A,
6975 1, &eeprom_data);
bc7f75fa
AK
6976 }
6977
6978 /* fetch WoL from EEPROM */
491a04d2
DE
6979 if (rval)
6980 e_dbg("NVM read error getting WoL initial values: %d\n", rval);
6981 else if (eeprom_data & eeprom_apme_mask)
bc7f75fa
AK
6982 adapter->eeprom_wol |= E1000_WUFC_MAG;
6983
e921eb1a 6984 /* now that we have the eeprom settings, apply the special cases
bc7f75fa
AK
6985 * where the eeprom may be wrong or the board simply won't support
6986 * wake on lan on a particular port
6987 */
6988 if (!(adapter->flags & FLAG_HAS_WOL))
6989 adapter->eeprom_wol = 0;
6990
6991 /* initialize the wol settings based on the eeprom settings */
6992 adapter->wol = adapter->eeprom_wol;
66148bab
KK
6993
6994 /* make sure adapter isn't asleep if manageability is enabled */
6995 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
6996 (hw->mac.ops.check_mng_mode(hw)))
6997 device_wakeup_enable(&pdev->dev);
bc7f75fa 6998
84527590 6999 /* save off EEPROM version number */
491a04d2
DE
7000 rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
7001
7002 if (rval) {
7003 e_dbg("NVM read error getting EEPROM version: %d\n", rval);
7004 adapter->eeprom_vers = 0;
7005 }
84527590 7006
bc7f75fa
AK
7007 /* reset the hardware with the new settings */
7008 e1000e_reset(adapter);
7009
e921eb1a 7010 /* If the controller has AMT, do not set DRV_LOAD until the interface
bc7f75fa 7011 * is up. For all other cases, let the f/w know that the h/w is now
ad68076e
BA
7012 * under the control of the driver.
7013 */
c43bc57e 7014 if (!(adapter->flags & FLAG_HAS_AMT))
31dbe5b4 7015 e1000e_get_hw_control(adapter);
bc7f75fa 7016
f2315bf1 7017 strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
bc7f75fa
AK
7018 err = register_netdev(netdev);
7019 if (err)
7020 goto err_register;
7021
9c563d20
JB
7022 /* carrier off reporting is important to ethtool even BEFORE open */
7023 netif_carrier_off(netdev);
7024
d89777bf
BA
7025 /* init PTP hardware clock */
7026 e1000e_ptp_init(adapter);
7027
bc7f75fa
AK
7028 e1000_print_device_info(adapter);
7029
f3ec4f87
AS
7030 if (pci_dev_run_wake(pdev))
7031 pm_runtime_put_noidle(&pdev->dev);
23606cf5 7032
bc7f75fa
AK
7033 return 0;
7034
7035err_register:
c43bc57e 7036 if (!(adapter->flags & FLAG_HAS_AMT))
31dbe5b4 7037 e1000e_release_hw_control(adapter);
bc7f75fa 7038err_eeprom:
470a5420 7039 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
bc7f75fa 7040 e1000_phy_hw_reset(&adapter->hw);
c43bc57e 7041err_hw_init:
bc7f75fa
AK
7042 kfree(adapter->tx_ring);
7043 kfree(adapter->rx_ring);
7044err_sw_init:
c43bc57e
JB
7045 if (adapter->hw.flash_address)
7046 iounmap(adapter->hw.flash_address);
e82f54ba 7047 e1000e_reset_interrupt_capability(adapter);
c43bc57e 7048err_flashmap:
bc7f75fa
AK
7049 iounmap(adapter->hw.hw_addr);
7050err_ioremap:
7051 free_netdev(netdev);
7052err_alloc_etherdev:
f0f422e5 7053 pci_release_selected_regions(pdev,
f0ff4398 7054 pci_select_bars(pdev, IORESOURCE_MEM));
bc7f75fa
AK
7055err_pci_reg:
7056err_dma:
7057 pci_disable_device(pdev);
7058 return err;
7059}
7060
7061/**
7062 * e1000_remove - Device Removal Routine
7063 * @pdev: PCI device information struct
7064 *
7065 * e1000_remove is called by the PCI subsystem to alert the driver
7066 * that it should release a PCI device. The could be caused by a
7067 * Hot-Plug event, or because the driver is going to be removed from
7068 * memory.
7069 **/
9f9a12f8 7070static void e1000_remove(struct pci_dev *pdev)
bc7f75fa
AK
7071{
7072 struct net_device *netdev = pci_get_drvdata(pdev);
7073 struct e1000_adapter *adapter = netdev_priv(netdev);
23606cf5
RW
7074 bool down = test_bit(__E1000_DOWN, &adapter->state);
7075
d89777bf
BA
7076 e1000e_ptp_remove(adapter);
7077
e921eb1a 7078 /* The timers may be rescheduled, so explicitly disable them
23f333a2 7079 * from being rescheduled.
ad68076e 7080 */
23606cf5
RW
7081 if (!down)
7082 set_bit(__E1000_DOWN, &adapter->state);
bc7f75fa
AK
7083 del_timer_sync(&adapter->watchdog_timer);
7084 del_timer_sync(&adapter->phy_info_timer);
7085
41cec6f1
BA
7086 cancel_work_sync(&adapter->reset_task);
7087 cancel_work_sync(&adapter->watchdog_task);
7088 cancel_work_sync(&adapter->downshift_task);
7089 cancel_work_sync(&adapter->update_phy_task);
7090 cancel_work_sync(&adapter->print_hang_task);
bc7f75fa 7091
b67e1913
BA
7092 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
7093 cancel_work_sync(&adapter->tx_hwtstamp_work);
7094 if (adapter->tx_hwtstamp_skb) {
7095 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
7096 adapter->tx_hwtstamp_skb = NULL;
7097 }
7098 }
7099
23606cf5
RW
7100 /* Don't lie to e1000_close() down the road. */
7101 if (!down)
7102 clear_bit(__E1000_DOWN, &adapter->state);
17f208de
BA
7103 unregister_netdev(netdev);
7104
f3ec4f87
AS
7105 if (pci_dev_run_wake(pdev))
7106 pm_runtime_get_noresume(&pdev->dev);
23606cf5 7107
e921eb1a 7108 /* Release control of h/w to f/w. If f/w is AMT enabled, this
ad68076e
BA
7109 * would have already happened in close and is redundant.
7110 */
31dbe5b4 7111 e1000e_release_hw_control(adapter);
bc7f75fa 7112
4662e82b 7113 e1000e_reset_interrupt_capability(adapter);
bc7f75fa
AK
7114 kfree(adapter->tx_ring);
7115 kfree(adapter->rx_ring);
7116
7117 iounmap(adapter->hw.hw_addr);
7118 if (adapter->hw.flash_address)
7119 iounmap(adapter->hw.flash_address);
f0f422e5 7120 pci_release_selected_regions(pdev,
f0ff4398 7121 pci_select_bars(pdev, IORESOURCE_MEM));
bc7f75fa
AK
7122
7123 free_netdev(netdev);
7124
111b9dc5 7125 /* AER disable */
19d5afd4 7126 pci_disable_pcie_error_reporting(pdev);
111b9dc5 7127
bc7f75fa
AK
7128 pci_disable_device(pdev);
7129}
7130
7131/* PCI Error Recovery (ERS) */
3646f0e5 7132static const struct pci_error_handlers e1000_err_handler = {
bc7f75fa
AK
7133 .error_detected = e1000_io_error_detected,
7134 .slot_reset = e1000_io_slot_reset,
7135 .resume = e1000_io_resume,
7136};
7137
0e8e842b 7138static const struct pci_device_id e1000_pci_tbl[] = {
bc7f75fa
AK
7139 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
7140 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
7141 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
c29c3ba5
BA
7142 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
7143 board_82571 },
bc7f75fa
AK
7144 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
7145 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
040babf9
AK
7146 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
7147 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
7148 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
ad68076e 7149
bc7f75fa
AK
7150 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
7151 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
7152 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
7153 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
ad68076e 7154
bc7f75fa
AK
7155 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
7156 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
7157 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
ad68076e 7158
4662e82b 7159 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
bef28b11 7160 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
8c81c9c3 7161 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
4662e82b 7162
bc7f75fa
AK
7163 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
7164 board_80003es2lan },
7165 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
7166 board_80003es2lan },
7167 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
7168 board_80003es2lan },
7169 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
7170 board_80003es2lan },
ad68076e 7171
bc7f75fa
AK
7172 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
7173 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
7174 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
7175 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
7176 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
7177 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
7178 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
9e135a2e 7179 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
ad68076e 7180
bc7f75fa
AK
7181 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
7182 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
7183 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
7184 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
7185 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
2f15f9d6 7186 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
97ac8cae
BA
7187 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
7188 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
7189 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
7190
7191 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
7192 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
7193 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
bc7f75fa 7194
f4187b56
BA
7195 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
7196 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
10df0b91 7197 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
f4187b56 7198
a4f58f54
BA
7199 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
7200 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
7201 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
7202 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
7203
d3738bb8
BA
7204 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
7205 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
7206
2fbe4526
BA
7207 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
7208 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
16e310ae
BA
7209 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
7210 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
91a3d82f
BA
7211 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
7212 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
7213 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
7214 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
2fbe4526 7215
f36bb6ca 7216 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
bc7f75fa
AK
7217};
7218MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
7219
23606cf5 7220static const struct dev_pm_ops e1000_pm_ops = {
72f72dcc 7221#ifdef CONFIG_PM_SLEEP
28002099
DE
7222 .suspend = e1000e_pm_suspend,
7223 .resume = e1000e_pm_resume,
7224 .freeze = e1000e_pm_freeze,
7225 .thaw = e1000e_pm_thaw,
7226 .poweroff = e1000e_pm_suspend,
7227 .restore = e1000e_pm_resume,
72f72dcc 7228#endif
63eb48f1
DE
7229 SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
7230 e1000e_pm_runtime_idle)
23606cf5
RW
7231};
7232
bc7f75fa
AK
7233/* PCI Device API Driver */
7234static struct pci_driver e1000_driver = {
7235 .name = e1000e_driver_name,
7236 .id_table = e1000_pci_tbl,
7237 .probe = e1000_probe,
9f9a12f8 7238 .remove = e1000_remove,
f36bb6ca
BA
7239 .driver = {
7240 .pm = &e1000_pm_ops,
7241 },
bc7f75fa
AK
7242 .shutdown = e1000_shutdown,
7243 .err_handler = &e1000_err_handler
7244};
7245
7246/**
7247 * e1000_init_module - Driver Registration Routine
7248 *
7249 * e1000_init_module is the first routine called when the driver is
7250 * loaded. All it does is register with the PCI subsystem.
7251 **/
7252static int __init e1000_init_module(void)
7253{
7254 int ret;
6cf08d1c 7255
8544b9f7
BA
7256 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
7257 e1000e_driver_version);
e78b80b1 7258 pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
bc7f75fa 7259 ret = pci_register_driver(&e1000_driver);
53ec5498 7260
bc7f75fa
AK
7261 return ret;
7262}
7263module_init(e1000_init_module);
7264
7265/**
7266 * e1000_exit_module - Driver Exit Cleanup Routine
7267 *
7268 * e1000_exit_module is called just before the driver is removed
7269 * from memory.
7270 **/
7271static void __exit e1000_exit_module(void)
7272{
7273 pci_unregister_driver(&e1000_driver);
7274}
7275module_exit(e1000_exit_module);
7276
bc7f75fa
AK
7277MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
7278MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
7279MODULE_LICENSE("GPL");
7280MODULE_VERSION(DRV_VERSION);
7281
06c24b91 7282/* netdev.c */
This page took 1.844722 seconds and 5 git commands to generate.