e1000e: Correctly include VLAN_HLEN when changing interface MTU
[deliverable/linux.git] / drivers / net / ethernet / intel / e1000e / netdev.c
CommitLineData
e78b80b1
DE
1/* Intel PRO/1000 Linux driver
2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 * Contact Information:
17 * Linux NICS <linux.nics@intel.com>
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
bc7f75fa 21
8544b9f7
BA
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
bc7f75fa
AK
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/init.h>
27#include <linux/pci.h>
28#include <linux/vmalloc.h>
29#include <linux/pagemap.h>
30#include <linux/delay.h>
31#include <linux/netdevice.h>
9fb7a5f7 32#include <linux/interrupt.h>
bc7f75fa
AK
33#include <linux/tcp.h>
34#include <linux/ipv6.h>
5a0e3ad6 35#include <linux/slab.h>
bc7f75fa
AK
36#include <net/checksum.h>
37#include <net/ip6_checksum.h>
bc7f75fa
AK
38#include <linux/ethtool.h>
39#include <linux/if_vlan.h>
40#include <linux/cpu.h>
41#include <linux/smp.h>
e8db0be1 42#include <linux/pm_qos.h>
23606cf5 43#include <linux/pm_runtime.h>
111b9dc5 44#include <linux/aer.h>
70c71606 45#include <linux/prefetch.h>
bc7f75fa
AK
46
47#include "e1000.h"
48
b3ccf267 49#define DRV_EXTRAVERSION "-k"
c14c643b 50
8defe713 51#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
bc7f75fa
AK
52char e1000e_driver_name[] = "e1000e";
53const char e1000e_driver_version[] = DRV_VERSION;
54
b3f4d599 55#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
56static int debug = -1;
57module_param(debug, int, 0);
58MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
59
bc7f75fa
AK
60static const struct e1000_info *e1000_info_tbl[] = {
61 [board_82571] = &e1000_82571_info,
62 [board_82572] = &e1000_82572_info,
63 [board_82573] = &e1000_82573_info,
4662e82b 64 [board_82574] = &e1000_82574_info,
8c81c9c3 65 [board_82583] = &e1000_82583_info,
bc7f75fa
AK
66 [board_80003es2lan] = &e1000_es2_info,
67 [board_ich8lan] = &e1000_ich8_info,
68 [board_ich9lan] = &e1000_ich9_info,
f4187b56 69 [board_ich10lan] = &e1000_ich10_info,
a4f58f54 70 [board_pchlan] = &e1000_pch_info,
d3738bb8 71 [board_pch2lan] = &e1000_pch2_info,
2fbe4526 72 [board_pch_lpt] = &e1000_pch_lpt_info,
bc7f75fa
AK
73};
74
84f4ee90
TI
75struct e1000_reg_info {
76 u32 ofs;
77 char *name;
78};
79
84f4ee90 80static const struct e1000_reg_info e1000_reg_info_tbl[] = {
84f4ee90
TI
81 /* General Registers */
82 {E1000_CTRL, "CTRL"},
83 {E1000_STATUS, "STATUS"},
84 {E1000_CTRL_EXT, "CTRL_EXT"},
85
86 /* Interrupt Registers */
87 {E1000_ICR, "ICR"},
88
af667a29 89 /* Rx Registers */
84f4ee90 90 {E1000_RCTL, "RCTL"},
1e36052e
BA
91 {E1000_RDLEN(0), "RDLEN"},
92 {E1000_RDH(0), "RDH"},
93 {E1000_RDT(0), "RDT"},
84f4ee90
TI
94 {E1000_RDTR, "RDTR"},
95 {E1000_RXDCTL(0), "RXDCTL"},
96 {E1000_ERT, "ERT"},
1e36052e
BA
97 {E1000_RDBAL(0), "RDBAL"},
98 {E1000_RDBAH(0), "RDBAH"},
84f4ee90
TI
99 {E1000_RDFH, "RDFH"},
100 {E1000_RDFT, "RDFT"},
101 {E1000_RDFHS, "RDFHS"},
102 {E1000_RDFTS, "RDFTS"},
103 {E1000_RDFPC, "RDFPC"},
104
af667a29 105 /* Tx Registers */
84f4ee90 106 {E1000_TCTL, "TCTL"},
1e36052e
BA
107 {E1000_TDBAL(0), "TDBAL"},
108 {E1000_TDBAH(0), "TDBAH"},
109 {E1000_TDLEN(0), "TDLEN"},
110 {E1000_TDH(0), "TDH"},
111 {E1000_TDT(0), "TDT"},
84f4ee90
TI
112 {E1000_TIDV, "TIDV"},
113 {E1000_TXDCTL(0), "TXDCTL"},
114 {E1000_TADV, "TADV"},
115 {E1000_TARC(0), "TARC"},
116 {E1000_TDFH, "TDFH"},
117 {E1000_TDFT, "TDFT"},
118 {E1000_TDFHS, "TDFHS"},
119 {E1000_TDFTS, "TDFTS"},
120 {E1000_TDFPC, "TDFPC"},
121
122 /* List Terminator */
f36bb6ca 123 {0, NULL}
84f4ee90
TI
124};
125
e921eb1a 126/**
84f4ee90 127 * e1000_regdump - register printout routine
e921eb1a
BA
128 * @hw: pointer to the HW structure
129 * @reginfo: pointer to the register info table
130 **/
84f4ee90
TI
131static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
132{
133 int n = 0;
134 char rname[16];
135 u32 regs[8];
136
137 switch (reginfo->ofs) {
138 case E1000_RXDCTL(0):
139 for (n = 0; n < 2; n++)
140 regs[n] = __er32(hw, E1000_RXDCTL(n));
141 break;
142 case E1000_TXDCTL(0):
143 for (n = 0; n < 2; n++)
144 regs[n] = __er32(hw, E1000_TXDCTL(n));
145 break;
146 case E1000_TARC(0):
147 for (n = 0; n < 2; n++)
148 regs[n] = __er32(hw, E1000_TARC(n));
149 break;
150 default:
ef456f85
JK
151 pr_info("%-15s %08x\n",
152 reginfo->name, __er32(hw, reginfo->ofs));
84f4ee90
TI
153 return;
154 }
155
156 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
ef456f85 157 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
84f4ee90
TI
158}
159
f0c5dadf
ET
160static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
161 struct e1000_buffer *bi)
162{
163 int i;
164 struct e1000_ps_page *ps_page;
165
166 for (i = 0; i < adapter->rx_ps_pages; i++) {
167 ps_page = &bi->ps_pages[i];
168
169 if (ps_page->page) {
170 pr_info("packet dump for ps_page %d:\n", i);
171 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
172 16, 1, page_address(ps_page->page),
173 PAGE_SIZE, true);
174 }
175 }
176}
177
e921eb1a 178/**
af667a29 179 * e1000e_dump - Print registers, Tx-ring and Rx-ring
e921eb1a
BA
180 * @adapter: board private structure
181 **/
84f4ee90
TI
182static void e1000e_dump(struct e1000_adapter *adapter)
183{
184 struct net_device *netdev = adapter->netdev;
185 struct e1000_hw *hw = &adapter->hw;
186 struct e1000_reg_info *reginfo;
187 struct e1000_ring *tx_ring = adapter->tx_ring;
188 struct e1000_tx_desc *tx_desc;
af667a29 189 struct my_u0 {
e885d762
BA
190 __le64 a;
191 __le64 b;
af667a29 192 } *u0;
84f4ee90
TI
193 struct e1000_buffer *buffer_info;
194 struct e1000_ring *rx_ring = adapter->rx_ring;
195 union e1000_rx_desc_packet_split *rx_desc_ps;
5f450212 196 union e1000_rx_desc_extended *rx_desc;
af667a29 197 struct my_u1 {
e885d762
BA
198 __le64 a;
199 __le64 b;
200 __le64 c;
201 __le64 d;
af667a29 202 } *u1;
84f4ee90
TI
203 u32 staterr;
204 int i = 0;
205
206 if (!netif_msg_hw(adapter))
207 return;
208
209 /* Print netdevice Info */
210 if (netdev) {
211 dev_info(&adapter->pdev->dev, "Net device Info\n");
ef456f85 212 pr_info("Device Name state trans_start last_rx\n");
e5fe2541
BA
213 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
214 netdev->state, netdev->trans_start, netdev->last_rx);
84f4ee90
TI
215 }
216
217 /* Print Registers */
218 dev_info(&adapter->pdev->dev, "Register Dump\n");
ef456f85 219 pr_info(" Register Name Value\n");
84f4ee90
TI
220 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
221 reginfo->name; reginfo++) {
222 e1000_regdump(hw, reginfo);
223 }
224
af667a29 225 /* Print Tx Ring Summary */
84f4ee90 226 if (!netdev || !netif_running(netdev))
fe1e980f 227 return;
84f4ee90 228
af667a29 229 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
ef456f85 230 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
84f4ee90 231 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
ef456f85
JK
232 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
233 0, tx_ring->next_to_use, tx_ring->next_to_clean,
234 (unsigned long long)buffer_info->dma,
235 buffer_info->length,
236 buffer_info->next_to_watch,
237 (unsigned long long)buffer_info->time_stamp);
84f4ee90 238
af667a29 239 /* Print Tx Ring */
84f4ee90
TI
240 if (!netif_msg_tx_done(adapter))
241 goto rx_ring_summary;
242
af667a29 243 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
84f4ee90
TI
244
245 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
246 *
247 * Legacy Transmit Descriptor
248 * +--------------------------------------------------------------+
249 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
250 * +--------------------------------------------------------------+
251 * 8 | Special | CSS | Status | CMD | CSO | Length |
252 * +--------------------------------------------------------------+
253 * 63 48 47 36 35 32 31 24 23 16 15 0
254 *
255 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
256 * 63 48 47 40 39 32 31 16 15 8 7 0
257 * +----------------------------------------------------------------+
258 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
259 * +----------------------------------------------------------------+
260 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
261 * +----------------------------------------------------------------+
262 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
263 *
264 * Extended Data Descriptor (DTYP=0x1)
265 * +----------------------------------------------------------------+
266 * 0 | Buffer Address [63:0] |
267 * +----------------------------------------------------------------+
268 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
269 * +----------------------------------------------------------------+
270 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
271 */
ef456f85
JK
272 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
273 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
274 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
84f4ee90 275 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
ef456f85 276 const char *next_desc;
84f4ee90
TI
277 tx_desc = E1000_TX_DESC(*tx_ring, i);
278 buffer_info = &tx_ring->buffer_info[i];
279 u0 = (struct my_u0 *)tx_desc;
84f4ee90 280 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
ef456f85 281 next_desc = " NTC/U";
84f4ee90 282 else if (i == tx_ring->next_to_use)
ef456f85 283 next_desc = " NTU";
84f4ee90 284 else if (i == tx_ring->next_to_clean)
ef456f85 285 next_desc = " NTC";
84f4ee90 286 else
ef456f85
JK
287 next_desc = "";
288 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
289 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
290 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
291 i,
292 (unsigned long long)le64_to_cpu(u0->a),
293 (unsigned long long)le64_to_cpu(u0->b),
294 (unsigned long long)buffer_info->dma,
295 buffer_info->length, buffer_info->next_to_watch,
296 (unsigned long long)buffer_info->time_stamp,
297 buffer_info->skb, next_desc);
84f4ee90 298
f0c5dadf 299 if (netif_msg_pktdata(adapter) && buffer_info->skb)
84f4ee90 300 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
f0c5dadf
ET
301 16, 1, buffer_info->skb->data,
302 buffer_info->skb->len, true);
84f4ee90
TI
303 }
304
af667a29 305 /* Print Rx Ring Summary */
84f4ee90 306rx_ring_summary:
af667a29 307 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
ef456f85
JK
308 pr_info("Queue [NTU] [NTC]\n");
309 pr_info(" %5d %5X %5X\n",
310 0, rx_ring->next_to_use, rx_ring->next_to_clean);
84f4ee90 311
af667a29 312 /* Print Rx Ring */
84f4ee90 313 if (!netif_msg_rx_status(adapter))
fe1e980f 314 return;
84f4ee90 315
af667a29 316 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
84f4ee90
TI
317 switch (adapter->rx_ps_pages) {
318 case 1:
319 case 2:
320 case 3:
321 /* [Extended] Packet Split Receive Descriptor Format
322 *
323 * +-----------------------------------------------------+
324 * 0 | Buffer Address 0 [63:0] |
325 * +-----------------------------------------------------+
326 * 8 | Buffer Address 1 [63:0] |
327 * +-----------------------------------------------------+
328 * 16 | Buffer Address 2 [63:0] |
329 * +-----------------------------------------------------+
330 * 24 | Buffer Address 3 [63:0] |
331 * +-----------------------------------------------------+
332 */
ef456f85 333 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
84f4ee90
TI
334 /* [Extended] Receive Descriptor (Write-Back) Format
335 *
336 * 63 48 47 32 31 13 12 8 7 4 3 0
337 * +------------------------------------------------------+
338 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
339 * | Checksum | Ident | | Queue | | Type |
340 * +------------------------------------------------------+
341 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
342 * +------------------------------------------------------+
343 * 63 48 47 32 31 20 19 0
344 */
ef456f85 345 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
84f4ee90 346 for (i = 0; i < rx_ring->count; i++) {
ef456f85 347 const char *next_desc;
84f4ee90
TI
348 buffer_info = &rx_ring->buffer_info[i];
349 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
350 u1 = (struct my_u1 *)rx_desc_ps;
351 staterr =
af667a29 352 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
ef456f85
JK
353
354 if (i == rx_ring->next_to_use)
355 next_desc = " NTU";
356 else if (i == rx_ring->next_to_clean)
357 next_desc = " NTC";
358 else
359 next_desc = "";
360
84f4ee90
TI
361 if (staterr & E1000_RXD_STAT_DD) {
362 /* Descriptor Done */
ef456f85
JK
363 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
364 "RWB", i,
365 (unsigned long long)le64_to_cpu(u1->a),
366 (unsigned long long)le64_to_cpu(u1->b),
367 (unsigned long long)le64_to_cpu(u1->c),
368 (unsigned long long)le64_to_cpu(u1->d),
369 buffer_info->skb, next_desc);
84f4ee90 370 } else {
ef456f85
JK
371 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
372 "R ", i,
373 (unsigned long long)le64_to_cpu(u1->a),
374 (unsigned long long)le64_to_cpu(u1->b),
375 (unsigned long long)le64_to_cpu(u1->c),
376 (unsigned long long)le64_to_cpu(u1->d),
377 (unsigned long long)buffer_info->dma,
378 buffer_info->skb, next_desc);
84f4ee90
TI
379
380 if (netif_msg_pktdata(adapter))
f0c5dadf
ET
381 e1000e_dump_ps_pages(adapter,
382 buffer_info);
84f4ee90 383 }
84f4ee90
TI
384 }
385 break;
386 default:
387 case 0:
5f450212 388 /* Extended Receive Descriptor (Read) Format
84f4ee90 389 *
5f450212
BA
390 * +-----------------------------------------------------+
391 * 0 | Buffer Address [63:0] |
392 * +-----------------------------------------------------+
393 * 8 | Reserved |
394 * +-----------------------------------------------------+
84f4ee90 395 */
ef456f85 396 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
5f450212
BA
397 /* Extended Receive Descriptor (Write-Back) Format
398 *
399 * 63 48 47 32 31 24 23 4 3 0
400 * +------------------------------------------------------+
401 * | RSS Hash | | | |
402 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
403 * | Packet | IP | | | Type |
404 * | Checksum | Ident | | | |
405 * +------------------------------------------------------+
406 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
407 * +------------------------------------------------------+
408 * 63 48 47 32 31 20 19 0
409 */
ef456f85 410 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
5f450212
BA
411
412 for (i = 0; i < rx_ring->count; i++) {
ef456f85
JK
413 const char *next_desc;
414
84f4ee90 415 buffer_info = &rx_ring->buffer_info[i];
5f450212
BA
416 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
417 u1 = (struct my_u1 *)rx_desc;
418 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
ef456f85
JK
419
420 if (i == rx_ring->next_to_use)
421 next_desc = " NTU";
422 else if (i == rx_ring->next_to_clean)
423 next_desc = " NTC";
424 else
425 next_desc = "";
426
5f450212
BA
427 if (staterr & E1000_RXD_STAT_DD) {
428 /* Descriptor Done */
ef456f85
JK
429 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
430 "RWB", i,
431 (unsigned long long)le64_to_cpu(u1->a),
432 (unsigned long long)le64_to_cpu(u1->b),
433 buffer_info->skb, next_desc);
5f450212 434 } else {
ef456f85
JK
435 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
436 "R ", i,
437 (unsigned long long)le64_to_cpu(u1->a),
438 (unsigned long long)le64_to_cpu(u1->b),
439 (unsigned long long)buffer_info->dma,
440 buffer_info->skb, next_desc);
5f450212 441
f0c5dadf
ET
442 if (netif_msg_pktdata(adapter) &&
443 buffer_info->skb)
5f450212
BA
444 print_hex_dump(KERN_INFO, "",
445 DUMP_PREFIX_ADDRESS, 16,
446 1,
f0c5dadf 447 buffer_info->skb->data,
5f450212
BA
448 adapter->rx_buffer_len,
449 true);
450 }
84f4ee90
TI
451 }
452 }
84f4ee90
TI
453}
454
bc7f75fa
AK
455/**
456 * e1000_desc_unused - calculate if we have unused descriptors
457 **/
458static int e1000_desc_unused(struct e1000_ring *ring)
459{
460 if (ring->next_to_clean > ring->next_to_use)
461 return ring->next_to_clean - ring->next_to_use - 1;
462
463 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
464}
465
b67e1913
BA
466/**
467 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
468 * @adapter: board private structure
469 * @hwtstamps: time stamp structure to update
470 * @systim: unsigned 64bit system time value.
471 *
472 * Convert the system time value stored in the RX/TXSTMP registers into a
473 * hwtstamp which can be used by the upper level time stamping functions.
474 *
475 * The 'systim_lock' spinlock is used to protect the consistency of the
476 * system time value. This is needed because reading the 64 bit time
477 * value involves reading two 32 bit registers. The first read latches the
478 * value.
479 **/
480static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
481 struct skb_shared_hwtstamps *hwtstamps,
482 u64 systim)
483{
484 u64 ns;
485 unsigned long flags;
486
487 spin_lock_irqsave(&adapter->systim_lock, flags);
488 ns = timecounter_cyc2time(&adapter->tc, systim);
489 spin_unlock_irqrestore(&adapter->systim_lock, flags);
490
491 memset(hwtstamps, 0, sizeof(*hwtstamps));
492 hwtstamps->hwtstamp = ns_to_ktime(ns);
493}
494
495/**
496 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
497 * @adapter: board private structure
498 * @status: descriptor extended error and status field
499 * @skb: particular skb to include time stamp
500 *
501 * If the time stamp is valid, convert it into the timecounter ns value
502 * and store that result into the shhwtstamps structure which is passed
503 * up the network stack.
504 **/
505static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
506 struct sk_buff *skb)
507{
508 struct e1000_hw *hw = &adapter->hw;
509 u64 rxstmp;
510
511 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
512 !(status & E1000_RXDEXT_STATERR_TST) ||
513 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
514 return;
515
516 /* The Rx time stamp registers contain the time stamp. No other
517 * received packet will be time stamped until the Rx time stamp
518 * registers are read. Because only one packet can be time stamped
519 * at a time, the register values must belong to this packet and
520 * therefore none of the other additional attributes need to be
521 * compared.
522 */
523 rxstmp = (u64)er32(RXSTMPL);
524 rxstmp |= (u64)er32(RXSTMPH) << 32;
525 e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
526
527 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
528}
529
bc7f75fa 530/**
ad68076e 531 * e1000_receive_skb - helper function to handle Rx indications
bc7f75fa 532 * @adapter: board private structure
b67e1913 533 * @staterr: descriptor extended error and status field as written by hardware
bc7f75fa
AK
534 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
535 * @skb: pointer to sk_buff to be indicated to stack
536 **/
537static void e1000_receive_skb(struct e1000_adapter *adapter,
af667a29 538 struct net_device *netdev, struct sk_buff *skb,
b67e1913 539 u32 staterr, __le16 vlan)
bc7f75fa 540{
86d70e53 541 u16 tag = le16_to_cpu(vlan);
b67e1913
BA
542
543 e1000e_rx_hwtstamp(adapter, staterr, skb);
544
bc7f75fa
AK
545 skb->protocol = eth_type_trans(skb, netdev);
546
b67e1913 547 if (staterr & E1000_RXD_STAT_VP)
86a9bad3 548 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
86d70e53
JK
549
550 napi_gro_receive(&adapter->napi, skb);
bc7f75fa
AK
551}
552
553/**
af667a29 554 * e1000_rx_checksum - Receive Checksum Offload
afd12939
BA
555 * @adapter: board private structure
556 * @status_err: receive descriptor status and error fields
557 * @csum: receive descriptor csum field
558 * @sk_buff: socket buffer with received data
bc7f75fa
AK
559 **/
560static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
2e1706f2 561 struct sk_buff *skb)
bc7f75fa
AK
562{
563 u16 status = (u16)status_err;
564 u8 errors = (u8)(status_err >> 24);
bc8acf2c
ED
565
566 skb_checksum_none_assert(skb);
bc7f75fa 567
afd12939
BA
568 /* Rx checksum disabled */
569 if (!(adapter->netdev->features & NETIF_F_RXCSUM))
570 return;
571
bc7f75fa
AK
572 /* Ignore Checksum bit is set */
573 if (status & E1000_RXD_STAT_IXSM)
574 return;
afd12939 575
2e1706f2
BA
576 /* TCP/UDP checksum error bit or IP checksum error bit is set */
577 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
bc7f75fa
AK
578 /* let the stack verify checksum errors */
579 adapter->hw_csum_err++;
580 return;
581 }
582
583 /* TCP/UDP Checksum has not been calculated */
584 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
585 return;
586
587 /* It must be a TCP or UDP packet with a valid checksum */
2e1706f2 588 skb->ip_summed = CHECKSUM_UNNECESSARY;
bc7f75fa
AK
589 adapter->hw_csum_good++;
590}
591
55aa6985 592static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
c6e7f51e 593{
55aa6985 594 struct e1000_adapter *adapter = rx_ring->adapter;
c6e7f51e 595 struct e1000_hw *hw = &adapter->hw;
bdc125f7
BA
596 s32 ret_val = __ew32_prepare(hw);
597
598 writel(i, rx_ring->tail);
c6e7f51e 599
bdc125f7 600 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
c6e7f51e
BA
601 u32 rctl = er32(RCTL);
602 ew32(RCTL, rctl & ~E1000_RCTL_EN);
603 e_err("ME firmware caused invalid RDT - resetting\n");
604 schedule_work(&adapter->reset_task);
605 }
606}
607
55aa6985 608static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
c6e7f51e 609{
55aa6985 610 struct e1000_adapter *adapter = tx_ring->adapter;
c6e7f51e 611 struct e1000_hw *hw = &adapter->hw;
bdc125f7 612 s32 ret_val = __ew32_prepare(hw);
c6e7f51e 613
bdc125f7
BA
614 writel(i, tx_ring->tail);
615
616 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
c6e7f51e
BA
617 u32 tctl = er32(TCTL);
618 ew32(TCTL, tctl & ~E1000_TCTL_EN);
619 e_err("ME firmware caused invalid TDT - resetting\n");
620 schedule_work(&adapter->reset_task);
621 }
622}
623
bc7f75fa 624/**
5f450212 625 * e1000_alloc_rx_buffers - Replace used receive buffers
55aa6985 626 * @rx_ring: Rx descriptor ring
bc7f75fa 627 **/
55aa6985 628static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
c2fed996 629 int cleaned_count, gfp_t gfp)
bc7f75fa 630{
55aa6985 631 struct e1000_adapter *adapter = rx_ring->adapter;
bc7f75fa
AK
632 struct net_device *netdev = adapter->netdev;
633 struct pci_dev *pdev = adapter->pdev;
5f450212 634 union e1000_rx_desc_extended *rx_desc;
bc7f75fa
AK
635 struct e1000_buffer *buffer_info;
636 struct sk_buff *skb;
637 unsigned int i;
89d71a66 638 unsigned int bufsz = adapter->rx_buffer_len;
bc7f75fa
AK
639
640 i = rx_ring->next_to_use;
641 buffer_info = &rx_ring->buffer_info[i];
642
643 while (cleaned_count--) {
644 skb = buffer_info->skb;
645 if (skb) {
646 skb_trim(skb, 0);
647 goto map_skb;
648 }
649
c2fed996 650 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
bc7f75fa
AK
651 if (!skb) {
652 /* Better luck next round */
653 adapter->alloc_rx_buff_failed++;
654 break;
655 }
656
bc7f75fa
AK
657 buffer_info->skb = skb;
658map_skb:
0be3f55f 659 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bc7f75fa 660 adapter->rx_buffer_len,
0be3f55f
NN
661 DMA_FROM_DEVICE);
662 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
af667a29 663 dev_err(&pdev->dev, "Rx DMA map failed\n");
bc7f75fa
AK
664 adapter->rx_dma_failed++;
665 break;
666 }
667
5f450212
BA
668 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
669 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
bc7f75fa 670
50849d79 671 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
e921eb1a 672 /* Force memory writes to complete before letting h/w
50849d79
TH
673 * know there are new descriptors to fetch. (Only
674 * applicable for weak-ordered memory model archs,
675 * such as IA-64).
676 */
677 wmb();
c6e7f51e 678 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
55aa6985 679 e1000e_update_rdt_wa(rx_ring, i);
c6e7f51e 680 else
c5083cf6 681 writel(i, rx_ring->tail);
50849d79 682 }
bc7f75fa
AK
683 i++;
684 if (i == rx_ring->count)
685 i = 0;
686 buffer_info = &rx_ring->buffer_info[i];
687 }
688
50849d79 689 rx_ring->next_to_use = i;
bc7f75fa
AK
690}
691
692/**
693 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
55aa6985 694 * @rx_ring: Rx descriptor ring
bc7f75fa 695 **/
55aa6985 696static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
c2fed996 697 int cleaned_count, gfp_t gfp)
bc7f75fa 698{
55aa6985 699 struct e1000_adapter *adapter = rx_ring->adapter;
bc7f75fa
AK
700 struct net_device *netdev = adapter->netdev;
701 struct pci_dev *pdev = adapter->pdev;
702 union e1000_rx_desc_packet_split *rx_desc;
bc7f75fa
AK
703 struct e1000_buffer *buffer_info;
704 struct e1000_ps_page *ps_page;
705 struct sk_buff *skb;
706 unsigned int i, j;
707
708 i = rx_ring->next_to_use;
709 buffer_info = &rx_ring->buffer_info[i];
710
711 while (cleaned_count--) {
712 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
713
714 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
47f44e40
AK
715 ps_page = &buffer_info->ps_pages[j];
716 if (j >= adapter->rx_ps_pages) {
717 /* all unused desc entries get hw null ptr */
af667a29
BA
718 rx_desc->read.buffer_addr[j + 1] =
719 ~cpu_to_le64(0);
47f44e40
AK
720 continue;
721 }
722 if (!ps_page->page) {
c2fed996 723 ps_page->page = alloc_page(gfp);
bc7f75fa 724 if (!ps_page->page) {
47f44e40
AK
725 adapter->alloc_rx_buff_failed++;
726 goto no_buffers;
727 }
0be3f55f
NN
728 ps_page->dma = dma_map_page(&pdev->dev,
729 ps_page->page,
730 0, PAGE_SIZE,
731 DMA_FROM_DEVICE);
732 if (dma_mapping_error(&pdev->dev,
733 ps_page->dma)) {
47f44e40 734 dev_err(&adapter->pdev->dev,
af667a29 735 "Rx DMA page map failed\n");
47f44e40
AK
736 adapter->rx_dma_failed++;
737 goto no_buffers;
bc7f75fa 738 }
bc7f75fa 739 }
e921eb1a 740 /* Refresh the desc even if buffer_addrs
47f44e40
AK
741 * didn't change because each write-back
742 * erases this info.
743 */
af667a29
BA
744 rx_desc->read.buffer_addr[j + 1] =
745 cpu_to_le64(ps_page->dma);
bc7f75fa
AK
746 }
747
e5fe2541 748 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
c2fed996 749 gfp);
bc7f75fa
AK
750
751 if (!skb) {
752 adapter->alloc_rx_buff_failed++;
753 break;
754 }
755
bc7f75fa 756 buffer_info->skb = skb;
0be3f55f 757 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bc7f75fa 758 adapter->rx_ps_bsize0,
0be3f55f
NN
759 DMA_FROM_DEVICE);
760 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
af667a29 761 dev_err(&pdev->dev, "Rx DMA map failed\n");
bc7f75fa
AK
762 adapter->rx_dma_failed++;
763 /* cleanup skb */
764 dev_kfree_skb_any(skb);
765 buffer_info->skb = NULL;
766 break;
767 }
768
769 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
770
50849d79 771 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
e921eb1a 772 /* Force memory writes to complete before letting h/w
50849d79
TH
773 * know there are new descriptors to fetch. (Only
774 * applicable for weak-ordered memory model archs,
775 * such as IA-64).
776 */
777 wmb();
c6e7f51e 778 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
55aa6985 779 e1000e_update_rdt_wa(rx_ring, i << 1);
c6e7f51e 780 else
c5083cf6 781 writel(i << 1, rx_ring->tail);
50849d79
TH
782 }
783
bc7f75fa
AK
784 i++;
785 if (i == rx_ring->count)
786 i = 0;
787 buffer_info = &rx_ring->buffer_info[i];
788 }
789
790no_buffers:
50849d79 791 rx_ring->next_to_use = i;
bc7f75fa
AK
792}
793
97ac8cae
BA
794/**
795 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
55aa6985 796 * @rx_ring: Rx descriptor ring
97ac8cae
BA
797 * @cleaned_count: number of buffers to allocate this pass
798 **/
799
55aa6985 800static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
c2fed996 801 int cleaned_count, gfp_t gfp)
97ac8cae 802{
55aa6985 803 struct e1000_adapter *adapter = rx_ring->adapter;
97ac8cae
BA
804 struct net_device *netdev = adapter->netdev;
805 struct pci_dev *pdev = adapter->pdev;
5f450212 806 union e1000_rx_desc_extended *rx_desc;
97ac8cae
BA
807 struct e1000_buffer *buffer_info;
808 struct sk_buff *skb;
809 unsigned int i;
2a2293b9 810 unsigned int bufsz = 256 - 16; /* for skb_reserve */
97ac8cae
BA
811
812 i = rx_ring->next_to_use;
813 buffer_info = &rx_ring->buffer_info[i];
814
815 while (cleaned_count--) {
816 skb = buffer_info->skb;
817 if (skb) {
818 skb_trim(skb, 0);
819 goto check_page;
820 }
821
c2fed996 822 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
97ac8cae
BA
823 if (unlikely(!skb)) {
824 /* Better luck next round */
825 adapter->alloc_rx_buff_failed++;
826 break;
827 }
828
97ac8cae
BA
829 buffer_info->skb = skb;
830check_page:
831 /* allocate a new page if necessary */
832 if (!buffer_info->page) {
c2fed996 833 buffer_info->page = alloc_page(gfp);
97ac8cae
BA
834 if (unlikely(!buffer_info->page)) {
835 adapter->alloc_rx_buff_failed++;
836 break;
837 }
838 }
839
37287fae 840 if (!buffer_info->dma) {
0be3f55f 841 buffer_info->dma = dma_map_page(&pdev->dev,
f0ff4398
BA
842 buffer_info->page, 0,
843 PAGE_SIZE,
0be3f55f 844 DMA_FROM_DEVICE);
37287fae
CP
845 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
846 adapter->alloc_rx_buff_failed++;
847 break;
848 }
849 }
97ac8cae 850
5f450212
BA
851 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
852 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
97ac8cae
BA
853
854 if (unlikely(++i == rx_ring->count))
855 i = 0;
856 buffer_info = &rx_ring->buffer_info[i];
857 }
858
859 if (likely(rx_ring->next_to_use != i)) {
860 rx_ring->next_to_use = i;
861 if (unlikely(i-- == 0))
862 i = (rx_ring->count - 1);
863
864 /* Force memory writes to complete before letting h/w
865 * know there are new descriptors to fetch. (Only
866 * applicable for weak-ordered memory model archs,
e921eb1a
BA
867 * such as IA-64).
868 */
97ac8cae 869 wmb();
c6e7f51e 870 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
55aa6985 871 e1000e_update_rdt_wa(rx_ring, i);
c6e7f51e 872 else
c5083cf6 873 writel(i, rx_ring->tail);
97ac8cae
BA
874 }
875}
876
70495a50
BA
877static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
878 struct sk_buff *skb)
879{
880 if (netdev->features & NETIF_F_RXHASH)
e25909bc 881 skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
70495a50
BA
882}
883
bc7f75fa 884/**
55aa6985
BA
885 * e1000_clean_rx_irq - Send received data up the network stack
886 * @rx_ring: Rx descriptor ring
bc7f75fa
AK
887 *
888 * the return value indicates whether actual cleaning was done, there
889 * is no guarantee that everything was cleaned
890 **/
55aa6985
BA
891static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
892 int work_to_do)
bc7f75fa 893{
55aa6985 894 struct e1000_adapter *adapter = rx_ring->adapter;
bc7f75fa
AK
895 struct net_device *netdev = adapter->netdev;
896 struct pci_dev *pdev = adapter->pdev;
3bb99fe2 897 struct e1000_hw *hw = &adapter->hw;
5f450212 898 union e1000_rx_desc_extended *rx_desc, *next_rxd;
bc7f75fa 899 struct e1000_buffer *buffer_info, *next_buffer;
5f450212 900 u32 length, staterr;
bc7f75fa
AK
901 unsigned int i;
902 int cleaned_count = 0;
3db1cd5c 903 bool cleaned = false;
bc7f75fa
AK
904 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
905
906 i = rx_ring->next_to_clean;
5f450212
BA
907 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
908 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
bc7f75fa
AK
909 buffer_info = &rx_ring->buffer_info[i];
910
5f450212 911 while (staterr & E1000_RXD_STAT_DD) {
bc7f75fa 912 struct sk_buff *skb;
bc7f75fa
AK
913
914 if (*work_done >= work_to_do)
915 break;
916 (*work_done)++;
2d0bb1c1 917 rmb(); /* read descriptor and rx_buffer_info after status DD */
bc7f75fa 918
bc7f75fa
AK
919 skb = buffer_info->skb;
920 buffer_info->skb = NULL;
921
922 prefetch(skb->data - NET_IP_ALIGN);
923
924 i++;
925 if (i == rx_ring->count)
926 i = 0;
5f450212 927 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
bc7f75fa
AK
928 prefetch(next_rxd);
929
930 next_buffer = &rx_ring->buffer_info[i];
931
3db1cd5c 932 cleaned = true;
bc7f75fa 933 cleaned_count++;
e5fe2541
BA
934 dma_unmap_single(&pdev->dev, buffer_info->dma,
935 adapter->rx_buffer_len, DMA_FROM_DEVICE);
bc7f75fa
AK
936 buffer_info->dma = 0;
937
5f450212 938 length = le16_to_cpu(rx_desc->wb.upper.length);
bc7f75fa 939
e921eb1a 940 /* !EOP means multiple descriptors were used to store a single
b94b5028
JB
941 * packet, if that's the case we need to toss it. In fact, we
942 * need to toss every packet with the EOP bit clear and the
943 * next frame that _does_ have the EOP bit set, as it is by
944 * definition only a frame fragment
945 */
5f450212 946 if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
b94b5028
JB
947 adapter->flags2 |= FLAG2_IS_DISCARDING;
948
949 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
bc7f75fa 950 /* All receives must fit into a single buffer */
3bb99fe2 951 e_dbg("Receive packet consumed multiple buffers\n");
bc7f75fa
AK
952 /* recycle */
953 buffer_info->skb = skb;
5f450212 954 if (staterr & E1000_RXD_STAT_EOP)
b94b5028 955 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
bc7f75fa
AK
956 goto next_desc;
957 }
958
cf955e6c
BG
959 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
960 !(netdev->features & NETIF_F_RXALL))) {
bc7f75fa
AK
961 /* recycle */
962 buffer_info->skb = skb;
963 goto next_desc;
964 }
965
eb7c3adb 966 /* adjust length to remove Ethernet CRC */
0184039a
BG
967 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
968 /* If configured to store CRC, don't subtract FCS,
969 * but keep the FCS bytes out of the total_rx_bytes
970 * counter
971 */
972 if (netdev->features & NETIF_F_RXFCS)
973 total_rx_bytes -= 4;
974 else
975 length -= 4;
976 }
eb7c3adb 977
bc7f75fa
AK
978 total_rx_bytes += length;
979 total_rx_packets++;
980
e921eb1a 981 /* code added for copybreak, this should improve
bc7f75fa 982 * performance for small packets with large amounts
ad68076e
BA
983 * of reassembly being done in the stack
984 */
bc7f75fa
AK
985 if (length < copybreak) {
986 struct sk_buff *new_skb =
89d71a66 987 netdev_alloc_skb_ip_align(netdev, length);
bc7f75fa 988 if (new_skb) {
808ff676
BA
989 skb_copy_to_linear_data_offset(new_skb,
990 -NET_IP_ALIGN,
991 (skb->data -
992 NET_IP_ALIGN),
993 (length +
994 NET_IP_ALIGN));
bc7f75fa
AK
995 /* save the skb in buffer_info as good */
996 buffer_info->skb = skb;
997 skb = new_skb;
998 }
999 /* else just continue with the old one */
1000 }
1001 /* end copybreak code */
1002 skb_put(skb, length);
1003
1004 /* Receive Checksum Offload */
2e1706f2 1005 e1000_rx_checksum(adapter, staterr, skb);
bc7f75fa 1006
70495a50
BA
1007 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1008
5f450212
BA
1009 e1000_receive_skb(adapter, netdev, skb, staterr,
1010 rx_desc->wb.upper.vlan);
bc7f75fa
AK
1011
1012next_desc:
5f450212 1013 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
bc7f75fa
AK
1014
1015 /* return some buffers to hardware, one at a time is too slow */
1016 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
55aa6985 1017 adapter->alloc_rx_buf(rx_ring, cleaned_count,
c2fed996 1018 GFP_ATOMIC);
bc7f75fa
AK
1019 cleaned_count = 0;
1020 }
1021
1022 /* use prefetched values */
1023 rx_desc = next_rxd;
1024 buffer_info = next_buffer;
5f450212
BA
1025
1026 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
bc7f75fa
AK
1027 }
1028 rx_ring->next_to_clean = i;
1029
1030 cleaned_count = e1000_desc_unused(rx_ring);
1031 if (cleaned_count)
55aa6985 1032 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
bc7f75fa 1033
bc7f75fa 1034 adapter->total_rx_bytes += total_rx_bytes;
7c25769f 1035 adapter->total_rx_packets += total_rx_packets;
bc7f75fa
AK
1036 return cleaned;
1037}
1038
55aa6985
BA
1039static void e1000_put_txbuf(struct e1000_ring *tx_ring,
1040 struct e1000_buffer *buffer_info)
bc7f75fa 1041{
55aa6985
BA
1042 struct e1000_adapter *adapter = tx_ring->adapter;
1043
03b1320d
AD
1044 if (buffer_info->dma) {
1045 if (buffer_info->mapped_as_page)
0be3f55f
NN
1046 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1047 buffer_info->length, DMA_TO_DEVICE);
03b1320d 1048 else
0be3f55f
NN
1049 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1050 buffer_info->length, DMA_TO_DEVICE);
03b1320d
AD
1051 buffer_info->dma = 0;
1052 }
bc7f75fa
AK
1053 if (buffer_info->skb) {
1054 dev_kfree_skb_any(buffer_info->skb);
1055 buffer_info->skb = NULL;
1056 }
1b7719c4 1057 buffer_info->time_stamp = 0;
bc7f75fa
AK
1058}
1059
41cec6f1 1060static void e1000_print_hw_hang(struct work_struct *work)
bc7f75fa 1061{
41cec6f1 1062 struct e1000_adapter *adapter = container_of(work,
f0ff4398
BA
1063 struct e1000_adapter,
1064 print_hang_task);
09357b00 1065 struct net_device *netdev = adapter->netdev;
bc7f75fa
AK
1066 struct e1000_ring *tx_ring = adapter->tx_ring;
1067 unsigned int i = tx_ring->next_to_clean;
1068 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1069 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
41cec6f1
BA
1070 struct e1000_hw *hw = &adapter->hw;
1071 u16 phy_status, phy_1000t_status, phy_ext_status;
1072 u16 pci_status;
1073
615b32af
JB
1074 if (test_bit(__E1000_DOWN, &adapter->state))
1075 return;
1076
e5fe2541 1077 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
e921eb1a 1078 /* May be block on write-back, flush and detect again
09357b00
JK
1079 * flush pending descriptor writebacks to memory
1080 */
1081 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1082 /* execute the writes immediately */
1083 e1e_flush();
e921eb1a 1084 /* Due to rare timing issues, write to TIDV again to ensure
bf03085f
MV
1085 * the write is successful
1086 */
1087 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1088 /* execute the writes immediately */
1089 e1e_flush();
09357b00
JK
1090 adapter->tx_hang_recheck = true;
1091 return;
1092 }
09357b00 1093 adapter->tx_hang_recheck = false;
d9554e96
DE
1094
1095 if (er32(TDH(0)) == er32(TDT(0))) {
1096 e_dbg("false hang detected, ignoring\n");
1097 return;
1098 }
1099
1100 /* Real hang detected */
09357b00
JK
1101 netif_stop_queue(netdev);
1102
c2ade1a4
BA
1103 e1e_rphy(hw, MII_BMSR, &phy_status);
1104 e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
1105 e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);
bc7f75fa 1106
41cec6f1
BA
1107 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1108
1109 /* detected Hardware unit hang */
1110 e_err("Detected Hardware Unit Hang:\n"
44defeb3
JK
1111 " TDH <%x>\n"
1112 " TDT <%x>\n"
1113 " next_to_use <%x>\n"
1114 " next_to_clean <%x>\n"
1115 "buffer_info[next_to_clean]:\n"
1116 " time_stamp <%lx>\n"
1117 " next_to_watch <%x>\n"
1118 " jiffies <%lx>\n"
41cec6f1
BA
1119 " next_to_watch.status <%x>\n"
1120 "MAC Status <%x>\n"
1121 "PHY Status <%x>\n"
1122 "PHY 1000BASE-T Status <%x>\n"
1123 "PHY Extended Status <%x>\n"
1124 "PCI Status <%x>\n",
e5fe2541
BA
1125 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
1126 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
1127 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
1128 phy_status, phy_1000t_status, phy_ext_status, pci_status);
7c0427ee 1129
d9554e96
DE
1130 e1000e_dump(adapter);
1131
7c0427ee
BA
1132 /* Suggest workaround for known h/w issue */
1133 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1134 e_err("Try turning off Tx pause (flow control) via ethtool\n");
bc7f75fa
AK
1135}
1136
b67e1913
BA
1137/**
1138 * e1000e_tx_hwtstamp_work - check for Tx time stamp
1139 * @work: pointer to work struct
1140 *
1141 * This work function polls the TSYNCTXCTL valid bit to determine when a
1142 * timestamp has been taken for the current stored skb. The timestamp must
1143 * be for this skb because only one such packet is allowed in the queue.
1144 */
1145static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1146{
1147 struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
1148 tx_hwtstamp_work);
1149 struct e1000_hw *hw = &adapter->hw;
1150
b67e1913
BA
1151 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
1152 struct skb_shared_hwtstamps shhwtstamps;
1153 u64 txstmp;
1154
1155 txstmp = er32(TXSTMPL);
1156 txstmp |= (u64)er32(TXSTMPH) << 32;
1157
1158 e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
1159
1160 skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
1161 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1162 adapter->tx_hwtstamp_skb = NULL;
59c871c5
JK
1163 } else if (time_after(jiffies, adapter->tx_hwtstamp_start
1164 + adapter->tx_timeout_factor * HZ)) {
1165 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1166 adapter->tx_hwtstamp_skb = NULL;
1167 adapter->tx_hwtstamp_timeouts++;
1168 e_warn("clearing Tx timestamp hang");
b67e1913
BA
1169 } else {
1170 /* reschedule to check later */
1171 schedule_work(&adapter->tx_hwtstamp_work);
1172 }
1173}
1174
bc7f75fa
AK
1175/**
1176 * e1000_clean_tx_irq - Reclaim resources after transmit completes
55aa6985 1177 * @tx_ring: Tx descriptor ring
bc7f75fa
AK
1178 *
1179 * the return value indicates whether actual cleaning was done, there
1180 * is no guarantee that everything was cleaned
1181 **/
55aa6985 1182static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
bc7f75fa 1183{
55aa6985 1184 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa
AK
1185 struct net_device *netdev = adapter->netdev;
1186 struct e1000_hw *hw = &adapter->hw;
bc7f75fa
AK
1187 struct e1000_tx_desc *tx_desc, *eop_desc;
1188 struct e1000_buffer *buffer_info;
1189 unsigned int i, eop;
1190 unsigned int count = 0;
bc7f75fa 1191 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3f0cfa3b 1192 unsigned int bytes_compl = 0, pkts_compl = 0;
bc7f75fa
AK
1193
1194 i = tx_ring->next_to_clean;
1195 eop = tx_ring->buffer_info[i].next_to_watch;
1196 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1197
12d04a3c
AD
1198 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1199 (count < tx_ring->count)) {
a86043c2 1200 bool cleaned = false;
e80bd1d1 1201 rmb(); /* read buffer_info after eop_desc */
a86043c2 1202 for (; !cleaned; count++) {
bc7f75fa
AK
1203 tx_desc = E1000_TX_DESC(*tx_ring, i);
1204 buffer_info = &tx_ring->buffer_info[i];
1205 cleaned = (i == eop);
1206
1207 if (cleaned) {
9ed318d5
TH
1208 total_tx_packets += buffer_info->segs;
1209 total_tx_bytes += buffer_info->bytecount;
3f0cfa3b
TH
1210 if (buffer_info->skb) {
1211 bytes_compl += buffer_info->skb->len;
1212 pkts_compl++;
1213 }
bc7f75fa
AK
1214 }
1215
55aa6985 1216 e1000_put_txbuf(tx_ring, buffer_info);
bc7f75fa
AK
1217 tx_desc->upper.data = 0;
1218
1219 i++;
1220 if (i == tx_ring->count)
1221 i = 0;
1222 }
1223
dac87619
TL
1224 if (i == tx_ring->next_to_use)
1225 break;
bc7f75fa
AK
1226 eop = tx_ring->buffer_info[i].next_to_watch;
1227 eop_desc = E1000_TX_DESC(*tx_ring, eop);
bc7f75fa
AK
1228 }
1229
1230 tx_ring->next_to_clean = i;
1231
3f0cfa3b
TH
1232 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1233
bc7f75fa 1234#define TX_WAKE_THRESHOLD 32
a86043c2
JB
1235 if (count && netif_carrier_ok(netdev) &&
1236 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
bc7f75fa
AK
1237 /* Make sure that anybody stopping the queue after this
1238 * sees the new next_to_clean.
1239 */
1240 smp_mb();
1241
1242 if (netif_queue_stopped(netdev) &&
1243 !(test_bit(__E1000_DOWN, &adapter->state))) {
1244 netif_wake_queue(netdev);
1245 ++adapter->restart_queue;
1246 }
1247 }
1248
1249 if (adapter->detect_tx_hung) {
e921eb1a 1250 /* Detect a transmit hang in hardware, this serializes the
41cec6f1
BA
1251 * check with the clearing of time_stamp and movement of i
1252 */
3db1cd5c 1253 adapter->detect_tx_hung = false;
12d04a3c
AD
1254 if (tx_ring->buffer_info[i].time_stamp &&
1255 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
8e95a202 1256 + (adapter->tx_timeout_factor * HZ)) &&
09357b00 1257 !(er32(STATUS) & E1000_STATUS_TXOFF))
41cec6f1 1258 schedule_work(&adapter->print_hang_task);
09357b00
JK
1259 else
1260 adapter->tx_hang_recheck = false;
bc7f75fa
AK
1261 }
1262 adapter->total_tx_bytes += total_tx_bytes;
1263 adapter->total_tx_packets += total_tx_packets;
807540ba 1264 return count < tx_ring->count;
bc7f75fa
AK
1265}
1266
bc7f75fa
AK
1267/**
1268 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
55aa6985 1269 * @rx_ring: Rx descriptor ring
bc7f75fa
AK
1270 *
1271 * the return value indicates whether actual cleaning was done, there
1272 * is no guarantee that everything was cleaned
1273 **/
55aa6985
BA
1274static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1275 int work_to_do)
bc7f75fa 1276{
55aa6985 1277 struct e1000_adapter *adapter = rx_ring->adapter;
3bb99fe2 1278 struct e1000_hw *hw = &adapter->hw;
bc7f75fa
AK
1279 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1280 struct net_device *netdev = adapter->netdev;
1281 struct pci_dev *pdev = adapter->pdev;
bc7f75fa
AK
1282 struct e1000_buffer *buffer_info, *next_buffer;
1283 struct e1000_ps_page *ps_page;
1284 struct sk_buff *skb;
1285 unsigned int i, j;
1286 u32 length, staterr;
1287 int cleaned_count = 0;
3db1cd5c 1288 bool cleaned = false;
bc7f75fa
AK
1289 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1290
1291 i = rx_ring->next_to_clean;
1292 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1293 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1294 buffer_info = &rx_ring->buffer_info[i];
1295
1296 while (staterr & E1000_RXD_STAT_DD) {
1297 if (*work_done >= work_to_do)
1298 break;
1299 (*work_done)++;
1300 skb = buffer_info->skb;
2d0bb1c1 1301 rmb(); /* read descriptor and rx_buffer_info after status DD */
bc7f75fa
AK
1302
1303 /* in the packet split case this is header only */
1304 prefetch(skb->data - NET_IP_ALIGN);
1305
1306 i++;
1307 if (i == rx_ring->count)
1308 i = 0;
1309 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1310 prefetch(next_rxd);
1311
1312 next_buffer = &rx_ring->buffer_info[i];
1313
3db1cd5c 1314 cleaned = true;
bc7f75fa 1315 cleaned_count++;
0be3f55f 1316 dma_unmap_single(&pdev->dev, buffer_info->dma,
af667a29 1317 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
bc7f75fa
AK
1318 buffer_info->dma = 0;
1319
af667a29 1320 /* see !EOP comment in other Rx routine */
b94b5028
JB
1321 if (!(staterr & E1000_RXD_STAT_EOP))
1322 adapter->flags2 |= FLAG2_IS_DISCARDING;
1323
1324 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
ef456f85 1325 e_dbg("Packet Split buffers didn't pick up the full packet\n");
bc7f75fa 1326 dev_kfree_skb_irq(skb);
b94b5028
JB
1327 if (staterr & E1000_RXD_STAT_EOP)
1328 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
bc7f75fa
AK
1329 goto next_desc;
1330 }
1331
cf955e6c
BG
1332 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1333 !(netdev->features & NETIF_F_RXALL))) {
bc7f75fa
AK
1334 dev_kfree_skb_irq(skb);
1335 goto next_desc;
1336 }
1337
1338 length = le16_to_cpu(rx_desc->wb.middle.length0);
1339
1340 if (!length) {
ef456f85 1341 e_dbg("Last part of the packet spanning multiple descriptors\n");
bc7f75fa
AK
1342 dev_kfree_skb_irq(skb);
1343 goto next_desc;
1344 }
1345
1346 /* Good Receive */
1347 skb_put(skb, length);
1348
1349 {
e921eb1a 1350 /* this looks ugly, but it seems compiler issues make
0e15df49
BA
1351 * it more efficient than reusing j
1352 */
1353 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
bc7f75fa 1354
e921eb1a 1355 /* page alloc/put takes too long and effects small
0e15df49
BA
1356 * packet throughput, so unsplit small packets and
1357 * save the alloc/put only valid in softirq (napi)
1358 * context to call kmap_*
ad68076e 1359 */
0e15df49
BA
1360 if (l1 && (l1 <= copybreak) &&
1361 ((length + l1) <= adapter->rx_ps_bsize0)) {
1362 u8 *vaddr;
1363
1364 ps_page = &buffer_info->ps_pages[0];
1365
e921eb1a 1366 /* there is no documentation about how to call
0e15df49
BA
1367 * kmap_atomic, so we can't hold the mapping
1368 * very long
1369 */
1370 dma_sync_single_for_cpu(&pdev->dev,
1371 ps_page->dma,
1372 PAGE_SIZE,
1373 DMA_FROM_DEVICE);
9f393834 1374 vaddr = kmap_atomic(ps_page->page);
0e15df49 1375 memcpy(skb_tail_pointer(skb), vaddr, l1);
9f393834 1376 kunmap_atomic(vaddr);
0e15df49
BA
1377 dma_sync_single_for_device(&pdev->dev,
1378 ps_page->dma,
1379 PAGE_SIZE,
1380 DMA_FROM_DEVICE);
1381
1382 /* remove the CRC */
0184039a
BG
1383 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1384 if (!(netdev->features & NETIF_F_RXFCS))
1385 l1 -= 4;
1386 }
0e15df49
BA
1387
1388 skb_put(skb, l1);
1389 goto copydone;
e80bd1d1 1390 } /* if */
bc7f75fa
AK
1391 }
1392
1393 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1394 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1395 if (!length)
1396 break;
1397
47f44e40 1398 ps_page = &buffer_info->ps_pages[j];
0be3f55f
NN
1399 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1400 DMA_FROM_DEVICE);
bc7f75fa
AK
1401 ps_page->dma = 0;
1402 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1403 ps_page->page = NULL;
1404 skb->len += length;
1405 skb->data_len += length;
98a045d7 1406 skb->truesize += PAGE_SIZE;
bc7f75fa
AK
1407 }
1408
eb7c3adb
JK
1409 /* strip the ethernet crc, problem is we're using pages now so
1410 * this whole operation can get a little cpu intensive
1411 */
0184039a
BG
1412 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1413 if (!(netdev->features & NETIF_F_RXFCS))
1414 pskb_trim(skb, skb->len - 4);
1415 }
eb7c3adb 1416
bc7f75fa
AK
1417copydone:
1418 total_rx_bytes += skb->len;
1419 total_rx_packets++;
1420
2e1706f2 1421 e1000_rx_checksum(adapter, staterr, skb);
bc7f75fa 1422
70495a50
BA
1423 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1424
bc7f75fa 1425 if (rx_desc->wb.upper.header_status &
17e813ec 1426 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
bc7f75fa
AK
1427 adapter->rx_hdr_split++;
1428
b67e1913
BA
1429 e1000_receive_skb(adapter, netdev, skb, staterr,
1430 rx_desc->wb.middle.vlan);
bc7f75fa
AK
1431
1432next_desc:
1433 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1434 buffer_info->skb = NULL;
1435
1436 /* return some buffers to hardware, one at a time is too slow */
1437 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
55aa6985 1438 adapter->alloc_rx_buf(rx_ring, cleaned_count,
c2fed996 1439 GFP_ATOMIC);
bc7f75fa
AK
1440 cleaned_count = 0;
1441 }
1442
1443 /* use prefetched values */
1444 rx_desc = next_rxd;
1445 buffer_info = next_buffer;
1446
1447 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1448 }
1449 rx_ring->next_to_clean = i;
1450
1451 cleaned_count = e1000_desc_unused(rx_ring);
1452 if (cleaned_count)
55aa6985 1453 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
bc7f75fa 1454
bc7f75fa 1455 adapter->total_rx_bytes += total_rx_bytes;
7c25769f 1456 adapter->total_rx_packets += total_rx_packets;
bc7f75fa
AK
1457 return cleaned;
1458}
1459
97ac8cae
BA
1460/**
1461 * e1000_consume_page - helper function
1462 **/
1463static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
66501f56 1464 u16 length)
97ac8cae
BA
1465{
1466 bi->page = NULL;
1467 skb->len += length;
1468 skb->data_len += length;
98a045d7 1469 skb->truesize += PAGE_SIZE;
97ac8cae
BA
1470}
1471
1472/**
1473 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1474 * @adapter: board private structure
1475 *
1476 * the return value indicates whether actual cleaning was done, there
1477 * is no guarantee that everything was cleaned
1478 **/
55aa6985
BA
1479static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1480 int work_to_do)
97ac8cae 1481{
55aa6985 1482 struct e1000_adapter *adapter = rx_ring->adapter;
97ac8cae
BA
1483 struct net_device *netdev = adapter->netdev;
1484 struct pci_dev *pdev = adapter->pdev;
5f450212 1485 union e1000_rx_desc_extended *rx_desc, *next_rxd;
97ac8cae 1486 struct e1000_buffer *buffer_info, *next_buffer;
5f450212 1487 u32 length, staterr;
97ac8cae
BA
1488 unsigned int i;
1489 int cleaned_count = 0;
1490 bool cleaned = false;
362e20ca 1491 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
17e813ec 1492 struct skb_shared_info *shinfo;
97ac8cae
BA
1493
1494 i = rx_ring->next_to_clean;
5f450212
BA
1495 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1496 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
97ac8cae
BA
1497 buffer_info = &rx_ring->buffer_info[i];
1498
5f450212 1499 while (staterr & E1000_RXD_STAT_DD) {
97ac8cae 1500 struct sk_buff *skb;
97ac8cae
BA
1501
1502 if (*work_done >= work_to_do)
1503 break;
1504 (*work_done)++;
2d0bb1c1 1505 rmb(); /* read descriptor and rx_buffer_info after status DD */
97ac8cae 1506
97ac8cae
BA
1507 skb = buffer_info->skb;
1508 buffer_info->skb = NULL;
1509
1510 ++i;
1511 if (i == rx_ring->count)
1512 i = 0;
5f450212 1513 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
97ac8cae
BA
1514 prefetch(next_rxd);
1515
1516 next_buffer = &rx_ring->buffer_info[i];
1517
1518 cleaned = true;
1519 cleaned_count++;
0be3f55f
NN
1520 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1521 DMA_FROM_DEVICE);
97ac8cae
BA
1522 buffer_info->dma = 0;
1523
5f450212 1524 length = le16_to_cpu(rx_desc->wb.upper.length);
97ac8cae
BA
1525
1526 /* errors is only valid for DD + EOP descriptors */
5f450212 1527 if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
cf955e6c
BG
1528 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1529 !(netdev->features & NETIF_F_RXALL)))) {
5f450212
BA
1530 /* recycle both page and skb */
1531 buffer_info->skb = skb;
1532 /* an error means any chain goes out the window too */
1533 if (rx_ring->rx_skb_top)
1534 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1535 rx_ring->rx_skb_top = NULL;
1536 goto next_desc;
97ac8cae 1537 }
f0f1a172 1538#define rxtop (rx_ring->rx_skb_top)
5f450212 1539 if (!(staterr & E1000_RXD_STAT_EOP)) {
97ac8cae
BA
1540 /* this descriptor is only the beginning (or middle) */
1541 if (!rxtop) {
1542 /* this is the beginning of a chain */
1543 rxtop = skb;
1544 skb_fill_page_desc(rxtop, 0, buffer_info->page,
f0ff4398 1545 0, length);
97ac8cae
BA
1546 } else {
1547 /* this is the middle of a chain */
17e813ec
BA
1548 shinfo = skb_shinfo(rxtop);
1549 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1550 buffer_info->page, 0,
1551 length);
97ac8cae
BA
1552 /* re-use the skb, only consumed the page */
1553 buffer_info->skb = skb;
1554 }
1555 e1000_consume_page(buffer_info, rxtop, length);
1556 goto next_desc;
1557 } else {
1558 if (rxtop) {
1559 /* end of the chain */
17e813ec
BA
1560 shinfo = skb_shinfo(rxtop);
1561 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1562 buffer_info->page, 0,
1563 length);
97ac8cae 1564 /* re-use the current skb, we only consumed the
e921eb1a
BA
1565 * page
1566 */
97ac8cae
BA
1567 buffer_info->skb = skb;
1568 skb = rxtop;
1569 rxtop = NULL;
1570 e1000_consume_page(buffer_info, skb, length);
1571 } else {
1572 /* no chain, got EOP, this buf is the packet
e921eb1a
BA
1573 * copybreak to save the put_page/alloc_page
1574 */
97ac8cae
BA
1575 if (length <= copybreak &&
1576 skb_tailroom(skb) >= length) {
1577 u8 *vaddr;
4679026d 1578 vaddr = kmap_atomic(buffer_info->page);
97ac8cae
BA
1579 memcpy(skb_tail_pointer(skb), vaddr,
1580 length);
4679026d 1581 kunmap_atomic(vaddr);
97ac8cae 1582 /* re-use the page, so don't erase
e921eb1a
BA
1583 * buffer_info->page
1584 */
97ac8cae
BA
1585 skb_put(skb, length);
1586 } else {
1587 skb_fill_page_desc(skb, 0,
f0ff4398
BA
1588 buffer_info->page, 0,
1589 length);
97ac8cae 1590 e1000_consume_page(buffer_info, skb,
f0ff4398 1591 length);
97ac8cae
BA
1592 }
1593 }
1594 }
1595
2e1706f2
BA
1596 /* Receive Checksum Offload */
1597 e1000_rx_checksum(adapter, staterr, skb);
97ac8cae 1598
70495a50
BA
1599 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1600
97ac8cae
BA
1601 /* probably a little skewed due to removing CRC */
1602 total_rx_bytes += skb->len;
1603 total_rx_packets++;
1604
1605 /* eth type trans needs skb->data to point to something */
1606 if (!pskb_may_pull(skb, ETH_HLEN)) {
44defeb3 1607 e_err("pskb_may_pull failed.\n");
ef5ab89c 1608 dev_kfree_skb_irq(skb);
97ac8cae
BA
1609 goto next_desc;
1610 }
1611
5f450212
BA
1612 e1000_receive_skb(adapter, netdev, skb, staterr,
1613 rx_desc->wb.upper.vlan);
97ac8cae
BA
1614
1615next_desc:
5f450212 1616 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
97ac8cae
BA
1617
1618 /* return some buffers to hardware, one at a time is too slow */
1619 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
55aa6985 1620 adapter->alloc_rx_buf(rx_ring, cleaned_count,
c2fed996 1621 GFP_ATOMIC);
97ac8cae
BA
1622 cleaned_count = 0;
1623 }
1624
1625 /* use prefetched values */
1626 rx_desc = next_rxd;
1627 buffer_info = next_buffer;
5f450212
BA
1628
1629 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
97ac8cae
BA
1630 }
1631 rx_ring->next_to_clean = i;
1632
1633 cleaned_count = e1000_desc_unused(rx_ring);
1634 if (cleaned_count)
55aa6985 1635 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
97ac8cae
BA
1636
1637 adapter->total_rx_bytes += total_rx_bytes;
1638 adapter->total_rx_packets += total_rx_packets;
97ac8cae
BA
1639 return cleaned;
1640}
1641
bc7f75fa
AK
1642/**
1643 * e1000_clean_rx_ring - Free Rx Buffers per Queue
55aa6985 1644 * @rx_ring: Rx descriptor ring
bc7f75fa 1645 **/
55aa6985 1646static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
bc7f75fa 1647{
55aa6985 1648 struct e1000_adapter *adapter = rx_ring->adapter;
bc7f75fa
AK
1649 struct e1000_buffer *buffer_info;
1650 struct e1000_ps_page *ps_page;
1651 struct pci_dev *pdev = adapter->pdev;
bc7f75fa
AK
1652 unsigned int i, j;
1653
1654 /* Free all the Rx ring sk_buffs */
1655 for (i = 0; i < rx_ring->count; i++) {
1656 buffer_info = &rx_ring->buffer_info[i];
1657 if (buffer_info->dma) {
1658 if (adapter->clean_rx == e1000_clean_rx_irq)
0be3f55f 1659 dma_unmap_single(&pdev->dev, buffer_info->dma,
bc7f75fa 1660 adapter->rx_buffer_len,
0be3f55f 1661 DMA_FROM_DEVICE);
97ac8cae 1662 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
0be3f55f 1663 dma_unmap_page(&pdev->dev, buffer_info->dma,
f0ff4398 1664 PAGE_SIZE, DMA_FROM_DEVICE);
bc7f75fa 1665 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
0be3f55f 1666 dma_unmap_single(&pdev->dev, buffer_info->dma,
bc7f75fa 1667 adapter->rx_ps_bsize0,
0be3f55f 1668 DMA_FROM_DEVICE);
bc7f75fa
AK
1669 buffer_info->dma = 0;
1670 }
1671
97ac8cae
BA
1672 if (buffer_info->page) {
1673 put_page(buffer_info->page);
1674 buffer_info->page = NULL;
1675 }
1676
bc7f75fa
AK
1677 if (buffer_info->skb) {
1678 dev_kfree_skb(buffer_info->skb);
1679 buffer_info->skb = NULL;
1680 }
1681
1682 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
47f44e40 1683 ps_page = &buffer_info->ps_pages[j];
bc7f75fa
AK
1684 if (!ps_page->page)
1685 break;
0be3f55f
NN
1686 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1687 DMA_FROM_DEVICE);
bc7f75fa
AK
1688 ps_page->dma = 0;
1689 put_page(ps_page->page);
1690 ps_page->page = NULL;
1691 }
1692 }
1693
1694 /* there also may be some cached data from a chained receive */
1695 if (rx_ring->rx_skb_top) {
1696 dev_kfree_skb(rx_ring->rx_skb_top);
1697 rx_ring->rx_skb_top = NULL;
1698 }
1699
bc7f75fa
AK
1700 /* Zero out the descriptor ring */
1701 memset(rx_ring->desc, 0, rx_ring->size);
1702
1703 rx_ring->next_to_clean = 0;
1704 rx_ring->next_to_use = 0;
b94b5028 1705 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
bc7f75fa 1706
c5083cf6 1707 writel(0, rx_ring->head);
b485dbae 1708 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
bdc125f7
BA
1709 e1000e_update_rdt_wa(rx_ring, 0);
1710 else
1711 writel(0, rx_ring->tail);
bc7f75fa
AK
1712}
1713
a8f88ff5
JB
1714static void e1000e_downshift_workaround(struct work_struct *work)
1715{
1716 struct e1000_adapter *adapter = container_of(work,
17e813ec
BA
1717 struct e1000_adapter,
1718 downshift_task);
a8f88ff5 1719
615b32af
JB
1720 if (test_bit(__E1000_DOWN, &adapter->state))
1721 return;
1722
a8f88ff5
JB
1723 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1724}
1725
bc7f75fa
AK
1726/**
1727 * e1000_intr_msi - Interrupt Handler
1728 * @irq: interrupt number
1729 * @data: pointer to a network interface device structure
1730 **/
8bb62869 1731static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
bc7f75fa
AK
1732{
1733 struct net_device *netdev = data;
1734 struct e1000_adapter *adapter = netdev_priv(netdev);
1735 struct e1000_hw *hw = &adapter->hw;
1736 u32 icr = er32(ICR);
1737
e921eb1a 1738 /* read ICR disables interrupts using IAM */
573cca8c 1739 if (icr & E1000_ICR_LSC) {
f92518dd 1740 hw->mac.get_link_status = true;
e921eb1a 1741 /* ICH8 workaround-- Call gig speed drop workaround on cable
ad68076e
BA
1742 * disconnect (LSC) before accessing any PHY registers
1743 */
bc7f75fa
AK
1744 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1745 (!(er32(STATUS) & E1000_STATUS_LU)))
a8f88ff5 1746 schedule_work(&adapter->downshift_task);
bc7f75fa 1747
e921eb1a 1748 /* 80003ES2LAN workaround-- For packet buffer work-around on
bc7f75fa 1749 * link down event; disable receives here in the ISR and reset
ad68076e
BA
1750 * adapter in watchdog
1751 */
bc7f75fa
AK
1752 if (netif_carrier_ok(netdev) &&
1753 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1754 /* disable receives */
1755 u32 rctl = er32(RCTL);
1756 ew32(RCTL, rctl & ~E1000_RCTL_EN);
12d43f7d 1757 adapter->flags |= FLAG_RESTART_NOW;
bc7f75fa
AK
1758 }
1759 /* guard against interrupt when we're going down */
1760 if (!test_bit(__E1000_DOWN, &adapter->state))
1761 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1762 }
1763
94fb848b
BA
1764 /* Reset on uncorrectable ECC error */
1765 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1766 u32 pbeccsts = er32(PBECCSTS);
1767
1768 adapter->corr_errors +=
1769 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1770 adapter->uncorr_errors +=
1771 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1772 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1773
1774 /* Do the reset outside of interrupt context */
1775 schedule_work(&adapter->reset_task);
1776
1777 /* return immediately since reset is imminent */
1778 return IRQ_HANDLED;
1779 }
1780
288379f0 1781 if (napi_schedule_prep(&adapter->napi)) {
bc7f75fa
AK
1782 adapter->total_tx_bytes = 0;
1783 adapter->total_tx_packets = 0;
1784 adapter->total_rx_bytes = 0;
1785 adapter->total_rx_packets = 0;
288379f0 1786 __napi_schedule(&adapter->napi);
bc7f75fa
AK
1787 }
1788
1789 return IRQ_HANDLED;
1790}
1791
1792/**
1793 * e1000_intr - Interrupt Handler
1794 * @irq: interrupt number
1795 * @data: pointer to a network interface device structure
1796 **/
8bb62869 1797static irqreturn_t e1000_intr(int __always_unused irq, void *data)
bc7f75fa
AK
1798{
1799 struct net_device *netdev = data;
1800 struct e1000_adapter *adapter = netdev_priv(netdev);
1801 struct e1000_hw *hw = &adapter->hw;
bc7f75fa 1802 u32 rctl, icr = er32(ICR);
4662e82b 1803
a68ea775 1804 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
e80bd1d1 1805 return IRQ_NONE; /* Not our interrupt */
bc7f75fa 1806
e921eb1a 1807 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
ad68076e
BA
1808 * not set, then the adapter didn't send an interrupt
1809 */
bc7f75fa
AK
1810 if (!(icr & E1000_ICR_INT_ASSERTED))
1811 return IRQ_NONE;
1812
e921eb1a 1813 /* Interrupt Auto-Mask...upon reading ICR,
ad68076e
BA
1814 * interrupts are masked. No need for the
1815 * IMC write
1816 */
bc7f75fa 1817
573cca8c 1818 if (icr & E1000_ICR_LSC) {
f92518dd 1819 hw->mac.get_link_status = true;
e921eb1a 1820 /* ICH8 workaround-- Call gig speed drop workaround on cable
ad68076e
BA
1821 * disconnect (LSC) before accessing any PHY registers
1822 */
bc7f75fa
AK
1823 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1824 (!(er32(STATUS) & E1000_STATUS_LU)))
a8f88ff5 1825 schedule_work(&adapter->downshift_task);
bc7f75fa 1826
e921eb1a 1827 /* 80003ES2LAN workaround--
bc7f75fa
AK
1828 * For packet buffer work-around on link down event;
1829 * disable receives here in the ISR and
1830 * reset adapter in watchdog
1831 */
1832 if (netif_carrier_ok(netdev) &&
1833 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1834 /* disable receives */
1835 rctl = er32(RCTL);
1836 ew32(RCTL, rctl & ~E1000_RCTL_EN);
12d43f7d 1837 adapter->flags |= FLAG_RESTART_NOW;
bc7f75fa
AK
1838 }
1839 /* guard against interrupt when we're going down */
1840 if (!test_bit(__E1000_DOWN, &adapter->state))
1841 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1842 }
1843
94fb848b
BA
1844 /* Reset on uncorrectable ECC error */
1845 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1846 u32 pbeccsts = er32(PBECCSTS);
1847
1848 adapter->corr_errors +=
1849 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1850 adapter->uncorr_errors +=
1851 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1852 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1853
1854 /* Do the reset outside of interrupt context */
1855 schedule_work(&adapter->reset_task);
1856
1857 /* return immediately since reset is imminent */
1858 return IRQ_HANDLED;
1859 }
1860
288379f0 1861 if (napi_schedule_prep(&adapter->napi)) {
bc7f75fa
AK
1862 adapter->total_tx_bytes = 0;
1863 adapter->total_tx_packets = 0;
1864 adapter->total_rx_bytes = 0;
1865 adapter->total_rx_packets = 0;
288379f0 1866 __napi_schedule(&adapter->napi);
bc7f75fa
AK
1867 }
1868
1869 return IRQ_HANDLED;
1870}
1871
8bb62869 1872static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
4662e82b
BA
1873{
1874 struct net_device *netdev = data;
1875 struct e1000_adapter *adapter = netdev_priv(netdev);
1876 struct e1000_hw *hw = &adapter->hw;
1877 u32 icr = er32(ICR);
1878
1879 if (!(icr & E1000_ICR_INT_ASSERTED)) {
a3c69fef
JB
1880 if (!test_bit(__E1000_DOWN, &adapter->state))
1881 ew32(IMS, E1000_IMS_OTHER);
4662e82b
BA
1882 return IRQ_NONE;
1883 }
1884
1885 if (icr & adapter->eiac_mask)
1886 ew32(ICS, (icr & adapter->eiac_mask));
1887
1888 if (icr & E1000_ICR_OTHER) {
1889 if (!(icr & E1000_ICR_LSC))
1890 goto no_link_interrupt;
f92518dd 1891 hw->mac.get_link_status = true;
4662e82b
BA
1892 /* guard against interrupt when we're going down */
1893 if (!test_bit(__E1000_DOWN, &adapter->state))
1894 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1895 }
1896
1897no_link_interrupt:
a3c69fef
JB
1898 if (!test_bit(__E1000_DOWN, &adapter->state))
1899 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
4662e82b
BA
1900
1901 return IRQ_HANDLED;
1902}
1903
8bb62869 1904static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
4662e82b
BA
1905{
1906 struct net_device *netdev = data;
1907 struct e1000_adapter *adapter = netdev_priv(netdev);
1908 struct e1000_hw *hw = &adapter->hw;
1909 struct e1000_ring *tx_ring = adapter->tx_ring;
1910
4662e82b
BA
1911 adapter->total_tx_bytes = 0;
1912 adapter->total_tx_packets = 0;
1913
55aa6985 1914 if (!e1000_clean_tx_irq(tx_ring))
4662e82b
BA
1915 /* Ring was not completely cleaned, so fire another interrupt */
1916 ew32(ICS, tx_ring->ims_val);
1917
1918 return IRQ_HANDLED;
1919}
1920
8bb62869 1921static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
4662e82b
BA
1922{
1923 struct net_device *netdev = data;
1924 struct e1000_adapter *adapter = netdev_priv(netdev);
55aa6985 1925 struct e1000_ring *rx_ring = adapter->rx_ring;
4662e82b
BA
1926
1927 /* Write the ITR value calculated at the end of the
1928 * previous interrupt.
1929 */
55aa6985
BA
1930 if (rx_ring->set_itr) {
1931 writel(1000000000 / (rx_ring->itr_val * 256),
1932 rx_ring->itr_register);
1933 rx_ring->set_itr = 0;
4662e82b
BA
1934 }
1935
288379f0 1936 if (napi_schedule_prep(&adapter->napi)) {
4662e82b
BA
1937 adapter->total_rx_bytes = 0;
1938 adapter->total_rx_packets = 0;
288379f0 1939 __napi_schedule(&adapter->napi);
4662e82b
BA
1940 }
1941 return IRQ_HANDLED;
1942}
1943
1944/**
1945 * e1000_configure_msix - Configure MSI-X hardware
1946 *
1947 * e1000_configure_msix sets up the hardware to properly
1948 * generate MSI-X interrupts.
1949 **/
1950static void e1000_configure_msix(struct e1000_adapter *adapter)
1951{
1952 struct e1000_hw *hw = &adapter->hw;
1953 struct e1000_ring *rx_ring = adapter->rx_ring;
1954 struct e1000_ring *tx_ring = adapter->tx_ring;
1955 int vector = 0;
1956 u32 ctrl_ext, ivar = 0;
1957
1958 adapter->eiac_mask = 0;
1959
1960 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1961 if (hw->mac.type == e1000_82574) {
1962 u32 rfctl = er32(RFCTL);
1963 rfctl |= E1000_RFCTL_ACK_DIS;
1964 ew32(RFCTL, rfctl);
1965 }
1966
4662e82b
BA
1967 /* Configure Rx vector */
1968 rx_ring->ims_val = E1000_IMS_RXQ0;
1969 adapter->eiac_mask |= rx_ring->ims_val;
1970 if (rx_ring->itr_val)
1971 writel(1000000000 / (rx_ring->itr_val * 256),
c5083cf6 1972 rx_ring->itr_register);
4662e82b 1973 else
c5083cf6 1974 writel(1, rx_ring->itr_register);
4662e82b
BA
1975 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1976
1977 /* Configure Tx vector */
1978 tx_ring->ims_val = E1000_IMS_TXQ0;
1979 vector++;
1980 if (tx_ring->itr_val)
1981 writel(1000000000 / (tx_ring->itr_val * 256),
c5083cf6 1982 tx_ring->itr_register);
4662e82b 1983 else
c5083cf6 1984 writel(1, tx_ring->itr_register);
4662e82b
BA
1985 adapter->eiac_mask |= tx_ring->ims_val;
1986 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1987
1988 /* set vector for Other Causes, e.g. link changes */
1989 vector++;
1990 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1991 if (rx_ring->itr_val)
1992 writel(1000000000 / (rx_ring->itr_val * 256),
1993 hw->hw_addr + E1000_EITR_82574(vector));
1994 else
1995 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1996
1997 /* Cause Tx interrupts on every write back */
1998 ivar |= (1 << 31);
1999
2000 ew32(IVAR, ivar);
2001
2002 /* enable MSI-X PBA support */
2003 ctrl_ext = er32(CTRL_EXT);
2004 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
2005
2006 /* Auto-Mask Other interrupts upon ICR read */
4662e82b
BA
2007 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
2008 ctrl_ext |= E1000_CTRL_EXT_EIAME;
2009 ew32(CTRL_EXT, ctrl_ext);
2010 e1e_flush();
2011}
2012
2013void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
2014{
2015 if (adapter->msix_entries) {
2016 pci_disable_msix(adapter->pdev);
2017 kfree(adapter->msix_entries);
2018 adapter->msix_entries = NULL;
2019 } else if (adapter->flags & FLAG_MSI_ENABLED) {
2020 pci_disable_msi(adapter->pdev);
2021 adapter->flags &= ~FLAG_MSI_ENABLED;
2022 }
4662e82b
BA
2023}
2024
2025/**
2026 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
2027 *
2028 * Attempt to configure interrupts using the best available
2029 * capabilities of the hardware and kernel.
2030 **/
2031void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
2032{
2033 int err;
8e86acd7 2034 int i;
4662e82b
BA
2035
2036 switch (adapter->int_mode) {
2037 case E1000E_INT_MODE_MSIX:
2038 if (adapter->flags & FLAG_HAS_MSIX) {
8e86acd7
JK
2039 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
2040 adapter->msix_entries = kcalloc(adapter->num_vectors,
17e813ec
BA
2041 sizeof(struct
2042 msix_entry),
2043 GFP_KERNEL);
4662e82b 2044 if (adapter->msix_entries) {
0cc7c959
AG
2045 struct e1000_adapter *a = adapter;
2046
8e86acd7 2047 for (i = 0; i < adapter->num_vectors; i++)
4662e82b
BA
2048 adapter->msix_entries[i].entry = i;
2049
0cc7c959
AG
2050 err = pci_enable_msix_range(a->pdev,
2051 a->msix_entries,
2052 a->num_vectors,
2053 a->num_vectors);
2054 if (err > 0)
4662e82b
BA
2055 return;
2056 }
2057 /* MSI-X failed, so fall through and try MSI */
ef456f85 2058 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
4662e82b
BA
2059 e1000e_reset_interrupt_capability(adapter);
2060 }
2061 adapter->int_mode = E1000E_INT_MODE_MSI;
2062 /* Fall through */
2063 case E1000E_INT_MODE_MSI:
2064 if (!pci_enable_msi(adapter->pdev)) {
2065 adapter->flags |= FLAG_MSI_ENABLED;
2066 } else {
2067 adapter->int_mode = E1000E_INT_MODE_LEGACY;
ef456f85 2068 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
4662e82b
BA
2069 }
2070 /* Fall through */
2071 case E1000E_INT_MODE_LEGACY:
2072 /* Don't do anything; this is the system default */
2073 break;
2074 }
8e86acd7
JK
2075
2076 /* store the number of vectors being used */
2077 adapter->num_vectors = 1;
4662e82b
BA
2078}
2079
2080/**
2081 * e1000_request_msix - Initialize MSI-X interrupts
2082 *
2083 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
2084 * kernel.
2085 **/
2086static int e1000_request_msix(struct e1000_adapter *adapter)
2087{
2088 struct net_device *netdev = adapter->netdev;
2089 int err = 0, vector = 0;
2090
2091 if (strlen(netdev->name) < (IFNAMSIZ - 5))
79f5e840
BA
2092 snprintf(adapter->rx_ring->name,
2093 sizeof(adapter->rx_ring->name) - 1,
2094 "%s-rx-0", netdev->name);
4662e82b
BA
2095 else
2096 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
2097 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 2098 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
4662e82b
BA
2099 netdev);
2100 if (err)
5015e53a 2101 return err;
c5083cf6
BA
2102 adapter->rx_ring->itr_register = adapter->hw.hw_addr +
2103 E1000_EITR_82574(vector);
4662e82b
BA
2104 adapter->rx_ring->itr_val = adapter->itr;
2105 vector++;
2106
2107 if (strlen(netdev->name) < (IFNAMSIZ - 5))
79f5e840
BA
2108 snprintf(adapter->tx_ring->name,
2109 sizeof(adapter->tx_ring->name) - 1,
2110 "%s-tx-0", netdev->name);
4662e82b
BA
2111 else
2112 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
2113 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 2114 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
4662e82b
BA
2115 netdev);
2116 if (err)
5015e53a 2117 return err;
c5083cf6
BA
2118 adapter->tx_ring->itr_register = adapter->hw.hw_addr +
2119 E1000_EITR_82574(vector);
4662e82b
BA
2120 adapter->tx_ring->itr_val = adapter->itr;
2121 vector++;
2122
2123 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 2124 e1000_msix_other, 0, netdev->name, netdev);
4662e82b 2125 if (err)
5015e53a 2126 return err;
4662e82b
BA
2127
2128 e1000_configure_msix(adapter);
5015e53a 2129
4662e82b 2130 return 0;
4662e82b
BA
2131}
2132
f8d59f78
BA
2133/**
2134 * e1000_request_irq - initialize interrupts
2135 *
2136 * Attempts to configure interrupts using the best available
2137 * capabilities of the hardware and kernel.
2138 **/
bc7f75fa
AK
2139static int e1000_request_irq(struct e1000_adapter *adapter)
2140{
2141 struct net_device *netdev = adapter->netdev;
bc7f75fa
AK
2142 int err;
2143
4662e82b
BA
2144 if (adapter->msix_entries) {
2145 err = e1000_request_msix(adapter);
2146 if (!err)
2147 return err;
2148 /* fall back to MSI */
2149 e1000e_reset_interrupt_capability(adapter);
2150 adapter->int_mode = E1000E_INT_MODE_MSI;
2151 e1000e_set_interrupt_capability(adapter);
bc7f75fa 2152 }
4662e82b 2153 if (adapter->flags & FLAG_MSI_ENABLED) {
a0607fd3 2154 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
4662e82b
BA
2155 netdev->name, netdev);
2156 if (!err)
2157 return err;
bc7f75fa 2158
4662e82b
BA
2159 /* fall back to legacy interrupt */
2160 e1000e_reset_interrupt_capability(adapter);
2161 adapter->int_mode = E1000E_INT_MODE_LEGACY;
bc7f75fa
AK
2162 }
2163
a0607fd3 2164 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
4662e82b
BA
2165 netdev->name, netdev);
2166 if (err)
2167 e_err("Unable to allocate interrupt, Error: %d\n", err);
2168
bc7f75fa
AK
2169 return err;
2170}
2171
2172static void e1000_free_irq(struct e1000_adapter *adapter)
2173{
2174 struct net_device *netdev = adapter->netdev;
2175
4662e82b
BA
2176 if (adapter->msix_entries) {
2177 int vector = 0;
2178
2179 free_irq(adapter->msix_entries[vector].vector, netdev);
2180 vector++;
2181
2182 free_irq(adapter->msix_entries[vector].vector, netdev);
2183 vector++;
2184
2185 /* Other Causes interrupt vector */
2186 free_irq(adapter->msix_entries[vector].vector, netdev);
2187 return;
bc7f75fa 2188 }
4662e82b
BA
2189
2190 free_irq(adapter->pdev->irq, netdev);
bc7f75fa
AK
2191}
2192
2193/**
2194 * e1000_irq_disable - Mask off interrupt generation on the NIC
2195 **/
2196static void e1000_irq_disable(struct e1000_adapter *adapter)
2197{
2198 struct e1000_hw *hw = &adapter->hw;
2199
bc7f75fa 2200 ew32(IMC, ~0);
4662e82b
BA
2201 if (adapter->msix_entries)
2202 ew32(EIAC_82574, 0);
bc7f75fa 2203 e1e_flush();
8e86acd7
JK
2204
2205 if (adapter->msix_entries) {
2206 int i;
2207 for (i = 0; i < adapter->num_vectors; i++)
2208 synchronize_irq(adapter->msix_entries[i].vector);
2209 } else {
2210 synchronize_irq(adapter->pdev->irq);
2211 }
bc7f75fa
AK
2212}
2213
2214/**
2215 * e1000_irq_enable - Enable default interrupt generation settings
2216 **/
2217static void e1000_irq_enable(struct e1000_adapter *adapter)
2218{
2219 struct e1000_hw *hw = &adapter->hw;
2220
4662e82b
BA
2221 if (adapter->msix_entries) {
2222 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2223 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
94fb848b
BA
2224 } else if (hw->mac.type == e1000_pch_lpt) {
2225 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
4662e82b
BA
2226 } else {
2227 ew32(IMS, IMS_ENABLE_MASK);
2228 }
74ef9c39 2229 e1e_flush();
bc7f75fa
AK
2230}
2231
2232/**
31dbe5b4 2233 * e1000e_get_hw_control - get control of the h/w from f/w
bc7f75fa
AK
2234 * @adapter: address of board private structure
2235 *
31dbe5b4 2236 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
bc7f75fa
AK
2237 * For ASF and Pass Through versions of f/w this means that
2238 * the driver is loaded. For AMT version (only with 82573)
2239 * of the f/w this means that the network i/f is open.
2240 **/
31dbe5b4 2241void e1000e_get_hw_control(struct e1000_adapter *adapter)
bc7f75fa
AK
2242{
2243 struct e1000_hw *hw = &adapter->hw;
2244 u32 ctrl_ext;
2245 u32 swsm;
2246
2247 /* Let firmware know the driver has taken over */
2248 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2249 swsm = er32(SWSM);
2250 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2251 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2252 ctrl_ext = er32(CTRL_EXT);
ad68076e 2253 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
bc7f75fa
AK
2254 }
2255}
2256
2257/**
31dbe5b4 2258 * e1000e_release_hw_control - release control of the h/w to f/w
bc7f75fa
AK
2259 * @adapter: address of board private structure
2260 *
31dbe5b4 2261 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
bc7f75fa
AK
2262 * For ASF and Pass Through versions of f/w this means that the
2263 * driver is no longer loaded. For AMT version (only with 82573) i
2264 * of the f/w this means that the network i/f is closed.
2265 *
2266 **/
31dbe5b4 2267void e1000e_release_hw_control(struct e1000_adapter *adapter)
bc7f75fa
AK
2268{
2269 struct e1000_hw *hw = &adapter->hw;
2270 u32 ctrl_ext;
2271 u32 swsm;
2272
2273 /* Let firmware taken over control of h/w */
2274 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2275 swsm = er32(SWSM);
2276 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2277 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2278 ctrl_ext = er32(CTRL_EXT);
ad68076e 2279 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
bc7f75fa
AK
2280 }
2281}
2282
bc7f75fa 2283/**
49ce9c2c 2284 * e1000_alloc_ring_dma - allocate memory for a ring structure
bc7f75fa
AK
2285 **/
2286static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2287 struct e1000_ring *ring)
2288{
2289 struct pci_dev *pdev = adapter->pdev;
2290
2291 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2292 GFP_KERNEL);
2293 if (!ring->desc)
2294 return -ENOMEM;
2295
2296 return 0;
2297}
2298
2299/**
2300 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
55aa6985 2301 * @tx_ring: Tx descriptor ring
bc7f75fa
AK
2302 *
2303 * Return 0 on success, negative on failure
2304 **/
55aa6985 2305int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
bc7f75fa 2306{
55aa6985 2307 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa
AK
2308 int err = -ENOMEM, size;
2309
2310 size = sizeof(struct e1000_buffer) * tx_ring->count;
89bf67f1 2311 tx_ring->buffer_info = vzalloc(size);
bc7f75fa
AK
2312 if (!tx_ring->buffer_info)
2313 goto err;
bc7f75fa
AK
2314
2315 /* round up to nearest 4K */
2316 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2317 tx_ring->size = ALIGN(tx_ring->size, 4096);
2318
2319 err = e1000_alloc_ring_dma(adapter, tx_ring);
2320 if (err)
2321 goto err;
2322
2323 tx_ring->next_to_use = 0;
2324 tx_ring->next_to_clean = 0;
bc7f75fa
AK
2325
2326 return 0;
2327err:
2328 vfree(tx_ring->buffer_info);
44defeb3 2329 e_err("Unable to allocate memory for the transmit descriptor ring\n");
bc7f75fa
AK
2330 return err;
2331}
2332
2333/**
2334 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
55aa6985 2335 * @rx_ring: Rx descriptor ring
bc7f75fa
AK
2336 *
2337 * Returns 0 on success, negative on failure
2338 **/
55aa6985 2339int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
bc7f75fa 2340{
55aa6985 2341 struct e1000_adapter *adapter = rx_ring->adapter;
47f44e40
AK
2342 struct e1000_buffer *buffer_info;
2343 int i, size, desc_len, err = -ENOMEM;
bc7f75fa
AK
2344
2345 size = sizeof(struct e1000_buffer) * rx_ring->count;
89bf67f1 2346 rx_ring->buffer_info = vzalloc(size);
bc7f75fa
AK
2347 if (!rx_ring->buffer_info)
2348 goto err;
bc7f75fa 2349
47f44e40
AK
2350 for (i = 0; i < rx_ring->count; i++) {
2351 buffer_info = &rx_ring->buffer_info[i];
2352 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2353 sizeof(struct e1000_ps_page),
2354 GFP_KERNEL);
2355 if (!buffer_info->ps_pages)
2356 goto err_pages;
2357 }
bc7f75fa
AK
2358
2359 desc_len = sizeof(union e1000_rx_desc_packet_split);
2360
2361 /* Round up to nearest 4K */
2362 rx_ring->size = rx_ring->count * desc_len;
2363 rx_ring->size = ALIGN(rx_ring->size, 4096);
2364
2365 err = e1000_alloc_ring_dma(adapter, rx_ring);
2366 if (err)
47f44e40 2367 goto err_pages;
bc7f75fa
AK
2368
2369 rx_ring->next_to_clean = 0;
2370 rx_ring->next_to_use = 0;
2371 rx_ring->rx_skb_top = NULL;
2372
2373 return 0;
47f44e40
AK
2374
2375err_pages:
2376 for (i = 0; i < rx_ring->count; i++) {
2377 buffer_info = &rx_ring->buffer_info[i];
2378 kfree(buffer_info->ps_pages);
2379 }
bc7f75fa
AK
2380err:
2381 vfree(rx_ring->buffer_info);
e9262447 2382 e_err("Unable to allocate memory for the receive descriptor ring\n");
bc7f75fa
AK
2383 return err;
2384}
2385
2386/**
2387 * e1000_clean_tx_ring - Free Tx Buffers
55aa6985 2388 * @tx_ring: Tx descriptor ring
bc7f75fa 2389 **/
55aa6985 2390static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
bc7f75fa 2391{
55aa6985 2392 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa
AK
2393 struct e1000_buffer *buffer_info;
2394 unsigned long size;
2395 unsigned int i;
2396
2397 for (i = 0; i < tx_ring->count; i++) {
2398 buffer_info = &tx_ring->buffer_info[i];
55aa6985 2399 e1000_put_txbuf(tx_ring, buffer_info);
bc7f75fa
AK
2400 }
2401
3f0cfa3b 2402 netdev_reset_queue(adapter->netdev);
bc7f75fa
AK
2403 size = sizeof(struct e1000_buffer) * tx_ring->count;
2404 memset(tx_ring->buffer_info, 0, size);
2405
2406 memset(tx_ring->desc, 0, tx_ring->size);
2407
2408 tx_ring->next_to_use = 0;
2409 tx_ring->next_to_clean = 0;
2410
c5083cf6 2411 writel(0, tx_ring->head);
b485dbae 2412 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
bdc125f7
BA
2413 e1000e_update_tdt_wa(tx_ring, 0);
2414 else
2415 writel(0, tx_ring->tail);
bc7f75fa
AK
2416}
2417
2418/**
2419 * e1000e_free_tx_resources - Free Tx Resources per Queue
55aa6985 2420 * @tx_ring: Tx descriptor ring
bc7f75fa
AK
2421 *
2422 * Free all transmit software resources
2423 **/
55aa6985 2424void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
bc7f75fa 2425{
55aa6985 2426 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa 2427 struct pci_dev *pdev = adapter->pdev;
bc7f75fa 2428
55aa6985 2429 e1000_clean_tx_ring(tx_ring);
bc7f75fa
AK
2430
2431 vfree(tx_ring->buffer_info);
2432 tx_ring->buffer_info = NULL;
2433
2434 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2435 tx_ring->dma);
2436 tx_ring->desc = NULL;
2437}
2438
2439/**
2440 * e1000e_free_rx_resources - Free Rx Resources
55aa6985 2441 * @rx_ring: Rx descriptor ring
bc7f75fa
AK
2442 *
2443 * Free all receive software resources
2444 **/
55aa6985 2445void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
bc7f75fa 2446{
55aa6985 2447 struct e1000_adapter *adapter = rx_ring->adapter;
bc7f75fa 2448 struct pci_dev *pdev = adapter->pdev;
47f44e40 2449 int i;
bc7f75fa 2450
55aa6985 2451 e1000_clean_rx_ring(rx_ring);
bc7f75fa 2452
b1cdfead 2453 for (i = 0; i < rx_ring->count; i++)
47f44e40 2454 kfree(rx_ring->buffer_info[i].ps_pages);
47f44e40 2455
bc7f75fa
AK
2456 vfree(rx_ring->buffer_info);
2457 rx_ring->buffer_info = NULL;
2458
bc7f75fa
AK
2459 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2460 rx_ring->dma);
2461 rx_ring->desc = NULL;
2462}
2463
2464/**
2465 * e1000_update_itr - update the dynamic ITR value based on statistics
489815ce
AK
2466 * @adapter: pointer to adapter
2467 * @itr_setting: current adapter->itr
2468 * @packets: the number of packets during this measurement interval
2469 * @bytes: the number of bytes during this measurement interval
2470 *
bc7f75fa
AK
2471 * Stores a new ITR value based on packets and byte
2472 * counts during the last interrupt. The advantage of per interrupt
2473 * computation is faster updates and more accurate ITR for the current
2474 * traffic pattern. Constants in this function were computed
2475 * based on theoretical maximum wire speed and thresholds were set based
2476 * on testing data as well as attempting to minimize response time
4662e82b
BA
2477 * while increasing bulk throughput. This functionality is controlled
2478 * by the InterruptThrottleRate module parameter.
bc7f75fa 2479 **/
8bb62869 2480static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
bc7f75fa
AK
2481{
2482 unsigned int retval = itr_setting;
2483
2484 if (packets == 0)
5015e53a 2485 return itr_setting;
bc7f75fa
AK
2486
2487 switch (itr_setting) {
2488 case lowest_latency:
2489 /* handle TSO and jumbo frames */
362e20ca 2490 if (bytes / packets > 8000)
bc7f75fa 2491 retval = bulk_latency;
b1cdfead 2492 else if ((packets < 5) && (bytes > 512))
bc7f75fa 2493 retval = low_latency;
bc7f75fa 2494 break;
e80bd1d1 2495 case low_latency: /* 50 usec aka 20000 ints/s */
bc7f75fa
AK
2496 if (bytes > 10000) {
2497 /* this if handles the TSO accounting */
362e20ca 2498 if (bytes / packets > 8000)
bc7f75fa 2499 retval = bulk_latency;
362e20ca 2500 else if ((packets < 10) || ((bytes / packets) > 1200))
bc7f75fa 2501 retval = bulk_latency;
b1cdfead 2502 else if ((packets > 35))
bc7f75fa 2503 retval = lowest_latency;
362e20ca 2504 } else if (bytes / packets > 2000) {
bc7f75fa
AK
2505 retval = bulk_latency;
2506 } else if (packets <= 2 && bytes < 512) {
2507 retval = lowest_latency;
2508 }
2509 break;
e80bd1d1 2510 case bulk_latency: /* 250 usec aka 4000 ints/s */
bc7f75fa 2511 if (bytes > 25000) {
b1cdfead 2512 if (packets > 35)
bc7f75fa 2513 retval = low_latency;
bc7f75fa
AK
2514 } else if (bytes < 6000) {
2515 retval = low_latency;
2516 }
2517 break;
2518 }
2519
bc7f75fa
AK
2520 return retval;
2521}
2522
2523static void e1000_set_itr(struct e1000_adapter *adapter)
2524{
bc7f75fa
AK
2525 u16 current_itr;
2526 u32 new_itr = adapter->itr;
2527
2528 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2529 if (adapter->link_speed != SPEED_1000) {
2530 current_itr = 0;
2531 new_itr = 4000;
2532 goto set_itr_now;
2533 }
2534
828bac87
BA
2535 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2536 new_itr = 0;
2537 goto set_itr_now;
2538 }
2539
8bb62869
BA
2540 adapter->tx_itr = e1000_update_itr(adapter->tx_itr,
2541 adapter->total_tx_packets,
2542 adapter->total_tx_bytes);
bc7f75fa
AK
2543 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2544 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2545 adapter->tx_itr = low_latency;
2546
8bb62869
BA
2547 adapter->rx_itr = e1000_update_itr(adapter->rx_itr,
2548 adapter->total_rx_packets,
2549 adapter->total_rx_bytes);
bc7f75fa
AK
2550 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2551 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2552 adapter->rx_itr = low_latency;
2553
2554 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2555
bc7f75fa 2556 /* counts and packets in update_itr are dependent on these numbers */
33550cec 2557 switch (current_itr) {
bc7f75fa
AK
2558 case lowest_latency:
2559 new_itr = 70000;
2560 break;
2561 case low_latency:
e80bd1d1 2562 new_itr = 20000; /* aka hwitr = ~200 */
bc7f75fa
AK
2563 break;
2564 case bulk_latency:
2565 new_itr = 4000;
2566 break;
2567 default:
2568 break;
2569 }
2570
2571set_itr_now:
2572 if (new_itr != adapter->itr) {
e921eb1a 2573 /* this attempts to bias the interrupt rate towards Bulk
bc7f75fa 2574 * by adding intermediate steps when interrupt rate is
ad68076e
BA
2575 * increasing
2576 */
bc7f75fa 2577 new_itr = new_itr > adapter->itr ?
f0ff4398 2578 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
bc7f75fa 2579 adapter->itr = new_itr;
4662e82b
BA
2580 adapter->rx_ring->itr_val = new_itr;
2581 if (adapter->msix_entries)
2582 adapter->rx_ring->set_itr = 1;
2583 else
e3d14b08 2584 e1000e_write_itr(adapter, new_itr);
bc7f75fa
AK
2585 }
2586}
2587
22a4cca2
MV
2588/**
2589 * e1000e_write_itr - write the ITR value to the appropriate registers
2590 * @adapter: address of board private structure
2591 * @itr: new ITR value to program
2592 *
2593 * e1000e_write_itr determines if the adapter is in MSI-X mode
2594 * and, if so, writes the EITR registers with the ITR value.
2595 * Otherwise, it writes the ITR value into the ITR register.
2596 **/
2597void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
2598{
2599 struct e1000_hw *hw = &adapter->hw;
2600 u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
2601
2602 if (adapter->msix_entries) {
2603 int vector;
2604
2605 for (vector = 0; vector < adapter->num_vectors; vector++)
2606 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
2607 } else {
2608 ew32(ITR, new_itr);
2609 }
2610}
2611
4662e82b
BA
2612/**
2613 * e1000_alloc_queues - Allocate memory for all rings
2614 * @adapter: board private structure to initialize
2615 **/
9f9a12f8 2616static int e1000_alloc_queues(struct e1000_adapter *adapter)
4662e82b 2617{
55aa6985
BA
2618 int size = sizeof(struct e1000_ring);
2619
2620 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
4662e82b
BA
2621 if (!adapter->tx_ring)
2622 goto err;
55aa6985
BA
2623 adapter->tx_ring->count = adapter->tx_ring_count;
2624 adapter->tx_ring->adapter = adapter;
4662e82b 2625
55aa6985 2626 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
4662e82b
BA
2627 if (!adapter->rx_ring)
2628 goto err;
55aa6985
BA
2629 adapter->rx_ring->count = adapter->rx_ring_count;
2630 adapter->rx_ring->adapter = adapter;
4662e82b
BA
2631
2632 return 0;
2633err:
2634 e_err("Unable to allocate memory for queues\n");
2635 kfree(adapter->rx_ring);
2636 kfree(adapter->tx_ring);
2637 return -ENOMEM;
2638}
2639
bc7f75fa 2640/**
c58c8a78 2641 * e1000e_poll - NAPI Rx polling callback
ad68076e 2642 * @napi: struct associated with this polling callback
c58c8a78 2643 * @weight: number of packets driver is allowed to process this poll
bc7f75fa 2644 **/
c58c8a78 2645static int e1000e_poll(struct napi_struct *napi, int weight)
bc7f75fa 2646{
c58c8a78
BA
2647 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2648 napi);
4662e82b 2649 struct e1000_hw *hw = &adapter->hw;
bc7f75fa 2650 struct net_device *poll_dev = adapter->netdev;
679e8a0f 2651 int tx_cleaned = 1, work_done = 0;
bc7f75fa 2652
4cf1653a 2653 adapter = netdev_priv(poll_dev);
bc7f75fa 2654
c58c8a78
BA
2655 if (!adapter->msix_entries ||
2656 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2657 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
4662e82b 2658
c58c8a78 2659 adapter->clean_rx(adapter->rx_ring, &work_done, weight);
d2c7ddd6 2660
12d04a3c 2661 if (!tx_cleaned)
c58c8a78 2662 work_done = weight;
bc7f75fa 2663
c58c8a78
BA
2664 /* If weight not fully consumed, exit the polling mode */
2665 if (work_done < weight) {
bc7f75fa
AK
2666 if (adapter->itr_setting & 3)
2667 e1000_set_itr(adapter);
288379f0 2668 napi_complete(napi);
a3c69fef
JB
2669 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2670 if (adapter->msix_entries)
2671 ew32(IMS, adapter->rx_ring->ims_val);
2672 else
2673 e1000_irq_enable(adapter);
2674 }
bc7f75fa
AK
2675 }
2676
2677 return work_done;
2678}
2679
80d5c368 2680static int e1000_vlan_rx_add_vid(struct net_device *netdev,
603cdca9 2681 __always_unused __be16 proto, u16 vid)
bc7f75fa
AK
2682{
2683 struct e1000_adapter *adapter = netdev_priv(netdev);
2684 struct e1000_hw *hw = &adapter->hw;
2685 u32 vfta, index;
2686
2687 /* don't update vlan cookie if already programmed */
2688 if ((adapter->hw.mng_cookie.status &
2689 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2690 (vid == adapter->mng_vlan_id))
8e586137 2691 return 0;
caaddaf8 2692
bc7f75fa 2693 /* add VID to filter table */
caaddaf8
BA
2694 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2695 index = (vid >> 5) & 0x7F;
2696 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2697 vfta |= (1 << (vid & 0x1F));
2698 hw->mac.ops.write_vfta(hw, index, vfta);
2699 }
86d70e53
JK
2700
2701 set_bit(vid, adapter->active_vlans);
8e586137
JP
2702
2703 return 0;
bc7f75fa
AK
2704}
2705
80d5c368 2706static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
603cdca9 2707 __always_unused __be16 proto, u16 vid)
bc7f75fa
AK
2708{
2709 struct e1000_adapter *adapter = netdev_priv(netdev);
2710 struct e1000_hw *hw = &adapter->hw;
2711 u32 vfta, index;
2712
bc7f75fa
AK
2713 if ((adapter->hw.mng_cookie.status &
2714 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2715 (vid == adapter->mng_vlan_id)) {
2716 /* release control to f/w */
31dbe5b4 2717 e1000e_release_hw_control(adapter);
8e586137 2718 return 0;
bc7f75fa
AK
2719 }
2720
2721 /* remove VID from filter table */
caaddaf8
BA
2722 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2723 index = (vid >> 5) & 0x7F;
2724 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2725 vfta &= ~(1 << (vid & 0x1F));
2726 hw->mac.ops.write_vfta(hw, index, vfta);
2727 }
86d70e53
JK
2728
2729 clear_bit(vid, adapter->active_vlans);
8e586137
JP
2730
2731 return 0;
bc7f75fa
AK
2732}
2733
86d70e53
JK
2734/**
2735 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2736 * @adapter: board private structure to initialize
2737 **/
2738static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
bc7f75fa
AK
2739{
2740 struct net_device *netdev = adapter->netdev;
86d70e53
JK
2741 struct e1000_hw *hw = &adapter->hw;
2742 u32 rctl;
bc7f75fa 2743
86d70e53
JK
2744 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2745 /* disable VLAN receive filtering */
2746 rctl = er32(RCTL);
2747 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2748 ew32(RCTL, rctl);
2749
2750 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
80d5c368
PM
2751 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
2752 adapter->mng_vlan_id);
86d70e53 2753 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
bc7f75fa 2754 }
bc7f75fa
AK
2755 }
2756}
2757
86d70e53
JK
2758/**
2759 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2760 * @adapter: board private structure to initialize
2761 **/
2762static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2763{
2764 struct e1000_hw *hw = &adapter->hw;
2765 u32 rctl;
2766
2767 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2768 /* enable VLAN receive filtering */
2769 rctl = er32(RCTL);
2770 rctl |= E1000_RCTL_VFE;
2771 rctl &= ~E1000_RCTL_CFIEN;
2772 ew32(RCTL, rctl);
2773 }
2774}
bc7f75fa 2775
86d70e53
JK
2776/**
2777 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
2778 * @adapter: board private structure to initialize
2779 **/
2780static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
bc7f75fa 2781{
bc7f75fa 2782 struct e1000_hw *hw = &adapter->hw;
86d70e53 2783 u32 ctrl;
bc7f75fa 2784
86d70e53
JK
2785 /* disable VLAN tag insert/strip */
2786 ctrl = er32(CTRL);
2787 ctrl &= ~E1000_CTRL_VME;
2788 ew32(CTRL, ctrl);
2789}
bc7f75fa 2790
86d70e53
JK
2791/**
2792 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2793 * @adapter: board private structure to initialize
2794 **/
2795static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2796{
2797 struct e1000_hw *hw = &adapter->hw;
2798 u32 ctrl;
bc7f75fa 2799
86d70e53
JK
2800 /* enable VLAN tag insert/strip */
2801 ctrl = er32(CTRL);
2802 ctrl |= E1000_CTRL_VME;
2803 ew32(CTRL, ctrl);
2804}
bc7f75fa 2805
86d70e53
JK
2806static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2807{
2808 struct net_device *netdev = adapter->netdev;
2809 u16 vid = adapter->hw.mng_cookie.vlan_id;
2810 u16 old_vid = adapter->mng_vlan_id;
2811
e5fe2541 2812 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
80d5c368 2813 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
86d70e53 2814 adapter->mng_vlan_id = vid;
bc7f75fa
AK
2815 }
2816
86d70e53 2817 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
80d5c368 2818 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
bc7f75fa
AK
2819}
2820
2821static void e1000_restore_vlan(struct e1000_adapter *adapter)
2822{
2823 u16 vid;
2824
80d5c368 2825 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
bc7f75fa 2826
86d70e53 2827 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
80d5c368 2828 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
bc7f75fa
AK
2829}
2830
cd791618 2831static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
bc7f75fa
AK
2832{
2833 struct e1000_hw *hw = &adapter->hw;
cd791618 2834 u32 manc, manc2h, mdef, i, j;
bc7f75fa
AK
2835
2836 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2837 return;
2838
2839 manc = er32(MANC);
2840
e921eb1a 2841 /* enable receiving management packets to the host. this will probably
bc7f75fa 2842 * generate destination unreachable messages from the host OS, but
ad68076e
BA
2843 * the packets will be handled on SMBUS
2844 */
bc7f75fa
AK
2845 manc |= E1000_MANC_EN_MNG2HOST;
2846 manc2h = er32(MANC2H);
cd791618
BA
2847
2848 switch (hw->mac.type) {
2849 default:
2850 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2851 break;
2852 case e1000_82574:
2853 case e1000_82583:
e921eb1a 2854 /* Check if IPMI pass-through decision filter already exists;
cd791618
BA
2855 * if so, enable it.
2856 */
2857 for (i = 0, j = 0; i < 8; i++) {
2858 mdef = er32(MDEF(i));
2859
2860 /* Ignore filters with anything other than IPMI ports */
3b21b508 2861 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
cd791618
BA
2862 continue;
2863
2864 /* Enable this decision filter in MANC2H */
2865 if (mdef)
2866 manc2h |= (1 << i);
2867
2868 j |= mdef;
2869 }
2870
2871 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2872 break;
2873
2874 /* Create new decision filter in an empty filter */
2875 for (i = 0, j = 0; i < 8; i++)
2876 if (er32(MDEF(i)) == 0) {
2877 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2878 E1000_MDEF_PORT_664));
2879 manc2h |= (1 << 1);
2880 j++;
2881 break;
2882 }
2883
2884 if (!j)
2885 e_warn("Unable to create IPMI pass-through filter\n");
2886 break;
2887 }
2888
bc7f75fa
AK
2889 ew32(MANC2H, manc2h);
2890 ew32(MANC, manc);
2891}
2892
2893/**
af667a29 2894 * e1000_configure_tx - Configure Transmit Unit after Reset
bc7f75fa
AK
2895 * @adapter: board private structure
2896 *
2897 * Configure the Tx unit of the MAC after a reset.
2898 **/
2899static void e1000_configure_tx(struct e1000_adapter *adapter)
2900{
2901 struct e1000_hw *hw = &adapter->hw;
2902 struct e1000_ring *tx_ring = adapter->tx_ring;
2903 u64 tdba;
e7e834aa 2904 u32 tdlen, tctl, tarc;
bc7f75fa
AK
2905
2906 /* Setup the HW Tx Head and Tail descriptor pointers */
2907 tdba = tx_ring->dma;
2908 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
1e36052e
BA
2909 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2910 ew32(TDBAH(0), (tdba >> 32));
2911 ew32(TDLEN(0), tdlen);
2912 ew32(TDH(0), 0);
2913 ew32(TDT(0), 0);
2914 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2915 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
bc7f75fa 2916
bc7f75fa
AK
2917 /* Set the Tx Interrupt Delay register */
2918 ew32(TIDV, adapter->tx_int_delay);
ad68076e 2919 /* Tx irq moderation */
bc7f75fa
AK
2920 ew32(TADV, adapter->tx_abs_int_delay);
2921
3a3b7586
JB
2922 if (adapter->flags2 & FLAG2_DMA_BURST) {
2923 u32 txdctl = er32(TXDCTL(0));
2924 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2925 E1000_TXDCTL_WTHRESH);
e921eb1a 2926 /* set up some performance related parameters to encourage the
3a3b7586
JB
2927 * hardware to use the bus more efficiently in bursts, depends
2928 * on the tx_int_delay to be enabled,
8edc0e62 2929 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
3a3b7586
JB
2930 * hthresh = 1 ==> prefetch when one or more available
2931 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2932 * BEWARE: this seems to work but should be considered first if
af667a29 2933 * there are Tx hangs or other Tx related bugs
3a3b7586
JB
2934 */
2935 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2936 ew32(TXDCTL(0), txdctl);
3a3b7586 2937 }
56032be7
BA
2938 /* erratum work around: set txdctl the same for both queues */
2939 ew32(TXDCTL(1), er32(TXDCTL(0)));
3a3b7586 2940
e7e834aa
DE
2941 /* Program the Transmit Control Register */
2942 tctl = er32(TCTL);
2943 tctl &= ~E1000_TCTL_CT;
2944 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2945 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2946
bc7f75fa 2947 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
e9ec2c0f 2948 tarc = er32(TARC(0));
e921eb1a 2949 /* set the speed mode bit, we'll clear it if we're not at
ad68076e
BA
2950 * gigabit link later
2951 */
bc7f75fa
AK
2952#define SPEED_MODE_BIT (1 << 21)
2953 tarc |= SPEED_MODE_BIT;
e9ec2c0f 2954 ew32(TARC(0), tarc);
bc7f75fa
AK
2955 }
2956
2957 /* errata: program both queues to unweighted RR */
2958 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
e9ec2c0f 2959 tarc = er32(TARC(0));
bc7f75fa 2960 tarc |= 1;
e9ec2c0f
JK
2961 ew32(TARC(0), tarc);
2962 tarc = er32(TARC(1));
bc7f75fa 2963 tarc |= 1;
e9ec2c0f 2964 ew32(TARC(1), tarc);
bc7f75fa
AK
2965 }
2966
bc7f75fa
AK
2967 /* Setup Transmit Descriptor Settings for eop descriptor */
2968 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2969
2970 /* only set IDE if we are delaying interrupts using the timers */
2971 if (adapter->tx_int_delay)
2972 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2973
2974 /* enable Report Status bit */
2975 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2976
e7e834aa
DE
2977 ew32(TCTL, tctl);
2978
57cde763 2979 hw->mac.ops.config_collision_dist(hw);
bc7f75fa
AK
2980}
2981
2982/**
2983 * e1000_setup_rctl - configure the receive control registers
2984 * @adapter: Board private structure
2985 **/
2986#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2987 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2988static void e1000_setup_rctl(struct e1000_adapter *adapter)
2989{
2990 struct e1000_hw *hw = &adapter->hw;
2991 u32 rctl, rfctl;
bc7f75fa
AK
2992 u32 pages = 0;
2993
b20a7744
DE
2994 /* Workaround Si errata on PCHx - configure jumbo frame flow.
2995 * If jumbo frames not set, program related MAC/PHY registers
2996 * to h/w defaults
2997 */
2998 if (hw->mac.type >= e1000_pch2lan) {
2999 s32 ret_val;
3000
3001 if (adapter->netdev->mtu > ETH_DATA_LEN)
3002 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
3003 else
3004 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
3005
3006 if (ret_val)
3007 e_dbg("failed to enable|disable jumbo frame workaround mode\n");
3008 }
a1ce6473 3009
bc7f75fa
AK
3010 /* Program MC offset vector base */
3011 rctl = er32(RCTL);
3012 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3013 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
f0ff4398
BA
3014 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
3015 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
bc7f75fa
AK
3016
3017 /* Do not Store bad packets */
3018 rctl &= ~E1000_RCTL_SBP;
3019
3020 /* Enable Long Packet receive */
3021 if (adapter->netdev->mtu <= ETH_DATA_LEN)
3022 rctl &= ~E1000_RCTL_LPE;
3023 else
3024 rctl |= E1000_RCTL_LPE;
3025
eb7c3adb
JK
3026 /* Some systems expect that the CRC is included in SMBUS traffic. The
3027 * hardware strips the CRC before sending to both SMBUS (BMC) and to
3028 * host memory when this is enabled
3029 */
3030 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
3031 rctl |= E1000_RCTL_SECRC;
5918bd88 3032
a4f58f54
BA
3033 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
3034 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
3035 u16 phy_data;
3036
3037 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
3038 phy_data &= 0xfff8;
3039 phy_data |= (1 << 2);
3040 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
3041
3042 e1e_rphy(hw, 22, &phy_data);
3043 phy_data &= 0x0fff;
3044 phy_data |= (1 << 14);
3045 e1e_wphy(hw, 0x10, 0x2823);
3046 e1e_wphy(hw, 0x11, 0x0003);
3047 e1e_wphy(hw, 22, phy_data);
3048 }
3049
bc7f75fa
AK
3050 /* Setup buffer sizes */
3051 rctl &= ~E1000_RCTL_SZ_4096;
3052 rctl |= E1000_RCTL_BSEX;
3053 switch (adapter->rx_buffer_len) {
bc7f75fa
AK
3054 case 2048:
3055 default:
3056 rctl |= E1000_RCTL_SZ_2048;
3057 rctl &= ~E1000_RCTL_BSEX;
3058 break;
3059 case 4096:
3060 rctl |= E1000_RCTL_SZ_4096;
3061 break;
3062 case 8192:
3063 rctl |= E1000_RCTL_SZ_8192;
3064 break;
3065 case 16384:
3066 rctl |= E1000_RCTL_SZ_16384;
3067 break;
3068 }
3069
5f450212
BA
3070 /* Enable Extended Status in all Receive Descriptors */
3071 rfctl = er32(RFCTL);
3072 rfctl |= E1000_RFCTL_EXTEN;
f6bd5577 3073 ew32(RFCTL, rfctl);
5f450212 3074
e921eb1a 3075 /* 82571 and greater support packet-split where the protocol
bc7f75fa
AK
3076 * header is placed in skb->data and the packet data is
3077 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
3078 * In the case of a non-split, skb->data is linearly filled,
3079 * followed by the page buffers. Therefore, skb->data is
3080 * sized to hold the largest protocol header.
3081 *
3082 * allocations using alloc_page take too long for regular MTU
3083 * so only enable packet split for jumbo frames
3084 *
3085 * Using pages when the page size is greater than 16k wastes
3086 * a lot of memory, since we allocate 3 pages at all times
3087 * per packet.
3088 */
bc7f75fa 3089 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
79d4e908 3090 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
bc7f75fa 3091 adapter->rx_ps_pages = pages;
97ac8cae
BA
3092 else
3093 adapter->rx_ps_pages = 0;
bc7f75fa
AK
3094
3095 if (adapter->rx_ps_pages) {
90da0669
BA
3096 u32 psrctl = 0;
3097
140a7480
AK
3098 /* Enable Packet split descriptors */
3099 rctl |= E1000_RCTL_DTYP_PS;
bc7f75fa 3100
e5fe2541 3101 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
bc7f75fa
AK
3102
3103 switch (adapter->rx_ps_pages) {
3104 case 3:
e5fe2541
BA
3105 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
3106 /* fall-through */
bc7f75fa 3107 case 2:
e5fe2541
BA
3108 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
3109 /* fall-through */
bc7f75fa 3110 case 1:
e5fe2541 3111 psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
bc7f75fa
AK
3112 break;
3113 }
3114
3115 ew32(PSRCTL, psrctl);
3116 }
3117
cf955e6c
BG
3118 /* This is useful for sniffing bad packets. */
3119 if (adapter->netdev->features & NETIF_F_RXALL) {
3120 /* UPE and MPE will be handled by normal PROMISC logic
e921eb1a
BA
3121 * in e1000e_set_rx_mode
3122 */
e80bd1d1
BA
3123 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3124 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3125 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
cf955e6c 3126
e80bd1d1
BA
3127 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3128 E1000_RCTL_DPF | /* Allow filtered pause */
3129 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
cf955e6c
BG
3130 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3131 * and that breaks VLANs.
3132 */
3133 }
3134
bc7f75fa 3135 ew32(RCTL, rctl);
318a94d6 3136 /* just started the receive unit, no need to restart */
12d43f7d 3137 adapter->flags &= ~FLAG_RESTART_NOW;
bc7f75fa
AK
3138}
3139
3140/**
3141 * e1000_configure_rx - Configure Receive Unit after Reset
3142 * @adapter: board private structure
3143 *
3144 * Configure the Rx unit of the MAC after a reset.
3145 **/
3146static void e1000_configure_rx(struct e1000_adapter *adapter)
3147{
3148 struct e1000_hw *hw = &adapter->hw;
3149 struct e1000_ring *rx_ring = adapter->rx_ring;
3150 u64 rdba;
3151 u32 rdlen, rctl, rxcsum, ctrl_ext;
3152
3153 if (adapter->rx_ps_pages) {
3154 /* this is a 32 byte descriptor */
3155 rdlen = rx_ring->count *
af667a29 3156 sizeof(union e1000_rx_desc_packet_split);
bc7f75fa
AK
3157 adapter->clean_rx = e1000_clean_rx_irq_ps;
3158 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
97ac8cae 3159 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
5f450212 3160 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
97ac8cae
BA
3161 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3162 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
bc7f75fa 3163 } else {
5f450212 3164 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
bc7f75fa
AK
3165 adapter->clean_rx = e1000_clean_rx_irq;
3166 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3167 }
3168
3169 /* disable receives while setting up the descriptors */
3170 rctl = er32(RCTL);
7f99ae63
BA
3171 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3172 ew32(RCTL, rctl & ~E1000_RCTL_EN);
bc7f75fa 3173 e1e_flush();
1bba4386 3174 usleep_range(10000, 20000);
bc7f75fa 3175
3a3b7586 3176 if (adapter->flags2 & FLAG2_DMA_BURST) {
e921eb1a 3177 /* set the writeback threshold (only takes effect if the RDTR
3a3b7586 3178 * is set). set GRAN=1 and write back up to 0x4 worth, and
af667a29 3179 * enable prefetching of 0x20 Rx descriptors
3a3b7586
JB
3180 * granularity = 01
3181 * wthresh = 04,
3182 * hthresh = 04,
3183 * pthresh = 0x20
3184 */
3185 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3186 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3187
e921eb1a 3188 /* override the delay timers for enabling bursting, only if
3a3b7586
JB
3189 * the value was not set by the user via module options
3190 */
3191 if (adapter->rx_int_delay == DEFAULT_RDTR)
3192 adapter->rx_int_delay = BURST_RDTR;
3193 if (adapter->rx_abs_int_delay == DEFAULT_RADV)
3194 adapter->rx_abs_int_delay = BURST_RADV;
3195 }
3196
bc7f75fa
AK
3197 /* set the Receive Delay Timer Register */
3198 ew32(RDTR, adapter->rx_int_delay);
3199
3200 /* irq moderation */
3201 ew32(RADV, adapter->rx_abs_int_delay);
828bac87 3202 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
22a4cca2 3203 e1000e_write_itr(adapter, adapter->itr);
bc7f75fa
AK
3204
3205 ctrl_ext = er32(CTRL_EXT);
bc7f75fa
AK
3206 /* Auto-Mask interrupts upon ICR access */
3207 ctrl_ext |= E1000_CTRL_EXT_IAME;
3208 ew32(IAM, 0xffffffff);
3209 ew32(CTRL_EXT, ctrl_ext);
3210 e1e_flush();
3211
e921eb1a 3212 /* Setup the HW Rx Head and Tail Descriptor Pointers and
ad68076e
BA
3213 * the Base and Length of the Rx Descriptor Ring
3214 */
bc7f75fa 3215 rdba = rx_ring->dma;
1e36052e
BA
3216 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3217 ew32(RDBAH(0), (rdba >> 32));
3218 ew32(RDLEN(0), rdlen);
3219 ew32(RDH(0), 0);
3220 ew32(RDT(0), 0);
3221 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3222 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
bc7f75fa
AK
3223
3224 /* Enable Receive Checksum Offload for TCP and UDP */
3225 rxcsum = er32(RXCSUM);
2e1706f2 3226 if (adapter->netdev->features & NETIF_F_RXCSUM)
bc7f75fa 3227 rxcsum |= E1000_RXCSUM_TUOFL;
2e1706f2 3228 else
bc7f75fa 3229 rxcsum &= ~E1000_RXCSUM_TUOFL;
bc7f75fa
AK
3230 ew32(RXCSUM, rxcsum);
3231
3e35d991
BA
3232 /* With jumbo frames, excessive C-state transition latencies result
3233 * in dropped transactions.
3234 */
3235 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3236 u32 lat =
3237 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 -
3238 adapter->max_frame_size) * 8 / 1000;
3239
3240 if (adapter->flags & FLAG_IS_ICH) {
53ec5498
BA
3241 u32 rxdctl = er32(RXDCTL(0));
3242 ew32(RXDCTL(0), rxdctl | 0x3);
53ec5498 3243 }
3e35d991
BA
3244
3245 pm_qos_update_request(&adapter->netdev->pm_qos_req, lat);
3246 } else {
3247 pm_qos_update_request(&adapter->netdev->pm_qos_req,
3248 PM_QOS_DEFAULT_VALUE);
97ac8cae 3249 }
bc7f75fa
AK
3250
3251 /* Enable Receives */
3252 ew32(RCTL, rctl);
3253}
3254
3255/**
ef9b965a
JB
3256 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3257 * @netdev: network interface device structure
bc7f75fa 3258 *
ef9b965a
JB
3259 * Writes multicast address list to the MTA hash table.
3260 * Returns: -ENOMEM on failure
3261 * 0 on no addresses written
3262 * X on writing X addresses to MTA
3263 */
3264static int e1000e_write_mc_addr_list(struct net_device *netdev)
3265{
3266 struct e1000_adapter *adapter = netdev_priv(netdev);
3267 struct e1000_hw *hw = &adapter->hw;
3268 struct netdev_hw_addr *ha;
3269 u8 *mta_list;
3270 int i;
3271
3272 if (netdev_mc_empty(netdev)) {
3273 /* nothing to program, so clear mc list */
3274 hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
3275 return 0;
3276 }
3277
3278 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
3279 if (!mta_list)
3280 return -ENOMEM;
3281
3282 /* update_mc_addr_list expects a packed array of only addresses. */
3283 i = 0;
3284 netdev_for_each_mc_addr(ha, netdev)
f0ff4398 3285 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
ef9b965a
JB
3286
3287 hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3288 kfree(mta_list);
3289
3290 return netdev_mc_count(netdev);
3291}
3292
3293/**
3294 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3295 * @netdev: network interface device structure
bc7f75fa 3296 *
ef9b965a
JB
3297 * Writes unicast address list to the RAR table.
3298 * Returns: -ENOMEM on failure/insufficient address space
3299 * 0 on no addresses written
3300 * X on writing X addresses to the RAR table
bc7f75fa 3301 **/
ef9b965a 3302static int e1000e_write_uc_addr_list(struct net_device *netdev)
bc7f75fa 3303{
ef9b965a
JB
3304 struct e1000_adapter *adapter = netdev_priv(netdev);
3305 struct e1000_hw *hw = &adapter->hw;
3306 unsigned int rar_entries = hw->mac.rar_entry_count;
3307 int count = 0;
3308
3309 /* save a rar entry for our hardware address */
3310 rar_entries--;
3311
3312 /* save a rar entry for the LAA workaround */
3313 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
3314 rar_entries--;
3315
3316 /* return ENOMEM indicating insufficient memory for addresses */
3317 if (netdev_uc_count(netdev) > rar_entries)
3318 return -ENOMEM;
3319
3320 if (!netdev_uc_empty(netdev) && rar_entries) {
3321 struct netdev_hw_addr *ha;
3322
e921eb1a 3323 /* write the addresses in reverse order to avoid write
ef9b965a
JB
3324 * combining
3325 */
3326 netdev_for_each_uc_addr(ha, netdev) {
3327 if (!rar_entries)
3328 break;
69e1e019 3329 hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
ef9b965a
JB
3330 count++;
3331 }
3332 }
3333
3334 /* zero out the remaining RAR entries not used above */
3335 for (; rar_entries > 0; rar_entries--) {
3336 ew32(RAH(rar_entries), 0);
3337 ew32(RAL(rar_entries), 0);
3338 }
3339 e1e_flush();
3340
3341 return count;
bc7f75fa
AK
3342}
3343
3344/**
ef9b965a 3345 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
bc7f75fa
AK
3346 * @netdev: network interface device structure
3347 *
ef9b965a
JB
3348 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
3349 * address list or the network interface flags are updated. This routine is
3350 * responsible for configuring the hardware for proper unicast, multicast,
bc7f75fa
AK
3351 * promiscuous mode, and all-multi behavior.
3352 **/
ef9b965a 3353static void e1000e_set_rx_mode(struct net_device *netdev)
bc7f75fa
AK
3354{
3355 struct e1000_adapter *adapter = netdev_priv(netdev);
3356 struct e1000_hw *hw = &adapter->hw;
bc7f75fa 3357 u32 rctl;
bc7f75fa 3358
63eb48f1
DE
3359 if (pm_runtime_suspended(netdev->dev.parent))
3360 return;
3361
bc7f75fa 3362 /* Check for Promiscuous and All Multicast modes */
bc7f75fa
AK
3363 rctl = er32(RCTL);
3364
ef9b965a
JB
3365 /* clear the affected bits */
3366 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3367
bc7f75fa
AK
3368 if (netdev->flags & IFF_PROMISC) {
3369 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
86d70e53
JK
3370 /* Do not hardware filter VLANs in promisc mode */
3371 e1000e_vlan_filter_disable(adapter);
bc7f75fa 3372 } else {
ef9b965a 3373 int count;
3d3a1676 3374
746b9f02
PM
3375 if (netdev->flags & IFF_ALLMULTI) {
3376 rctl |= E1000_RCTL_MPE;
746b9f02 3377 } else {
e921eb1a 3378 /* Write addresses to the MTA, if the attempt fails
ef9b965a
JB
3379 * then we should just turn on promiscuous mode so
3380 * that we can at least receive multicast traffic
3381 */
3382 count = e1000e_write_mc_addr_list(netdev);
3383 if (count < 0)
3384 rctl |= E1000_RCTL_MPE;
746b9f02 3385 }
86d70e53 3386 e1000e_vlan_filter_enable(adapter);
e921eb1a 3387 /* Write addresses to available RAR registers, if there is not
ef9b965a
JB
3388 * sufficient space to store all the addresses then enable
3389 * unicast promiscuous mode
bc7f75fa 3390 */
ef9b965a
JB
3391 count = e1000e_write_uc_addr_list(netdev);
3392 if (count < 0)
3393 rctl |= E1000_RCTL_UPE;
bc7f75fa 3394 }
86d70e53 3395
ef9b965a
JB
3396 ew32(RCTL, rctl);
3397
f646968f 3398 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
86d70e53
JK
3399 e1000e_vlan_strip_enable(adapter);
3400 else
3401 e1000e_vlan_strip_disable(adapter);
bc7f75fa
AK
3402}
3403
70495a50
BA
3404static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3405{
3406 struct e1000_hw *hw = &adapter->hw;
3407 u32 mrqc, rxcsum;
3408 int i;
3409 static const u32 rsskey[10] = {
3410 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
3411 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
3412 };
3413
3414 /* Fill out hash function seed */
3415 for (i = 0; i < 10; i++)
3416 ew32(RSSRK(i), rsskey[i]);
3417
3418 /* Direct all traffic to queue 0 */
3419 for (i = 0; i < 32; i++)
3420 ew32(RETA(i), 0);
3421
e921eb1a 3422 /* Disable raw packet checksumming so that RSS hash is placed in
70495a50
BA
3423 * descriptor on writeback.
3424 */
3425 rxcsum = er32(RXCSUM);
3426 rxcsum |= E1000_RXCSUM_PCSD;
3427
3428 ew32(RXCSUM, rxcsum);
3429
3430 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
3431 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3432 E1000_MRQC_RSS_FIELD_IPV6 |
3433 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3434 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3435
3436 ew32(MRQC, mrqc);
3437}
3438
b67e1913
BA
3439/**
3440 * e1000e_get_base_timinca - get default SYSTIM time increment attributes
3441 * @adapter: board private structure
3442 * @timinca: pointer to returned time increment attributes
3443 *
3444 * Get attributes for incrementing the System Time Register SYSTIML/H at
3445 * the default base frequency, and set the cyclecounter shift value.
3446 **/
d89777bf 3447s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
b67e1913
BA
3448{
3449 struct e1000_hw *hw = &adapter->hw;
3450 u32 incvalue, incperiod, shift;
3451
3452 /* Make sure clock is enabled on I217 before checking the frequency */
3453 if ((hw->mac.type == e1000_pch_lpt) &&
3454 !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
3455 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
3456 u32 fextnvm7 = er32(FEXTNVM7);
3457
3458 if (!(fextnvm7 & (1 << 0))) {
3459 ew32(FEXTNVM7, fextnvm7 | (1 << 0));
3460 e1e_flush();
3461 }
3462 }
3463
3464 switch (hw->mac.type) {
3465 case e1000_pch2lan:
3466 case e1000_pch_lpt:
3467 /* On I217, the clock frequency is 25MHz or 96MHz as
3468 * indicated by the System Clock Frequency Indication
3469 */
3470 if ((hw->mac.type != e1000_pch_lpt) ||
3471 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
3472 /* Stable 96MHz frequency */
3473 incperiod = INCPERIOD_96MHz;
3474 incvalue = INCVALUE_96MHz;
3475 shift = INCVALUE_SHIFT_96MHz;
3476 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
3477 break;
3478 }
3479 /* fall-through */
3480 case e1000_82574:
3481 case e1000_82583:
3482 /* Stable 25MHz frequency */
3483 incperiod = INCPERIOD_25MHz;
3484 incvalue = INCVALUE_25MHz;
3485 shift = INCVALUE_SHIFT_25MHz;
3486 adapter->cc.shift = shift;
3487 break;
3488 default:
3489 return -EINVAL;
3490 }
3491
3492 *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) |
3493 ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK));
3494
3495 return 0;
3496}
3497
3498/**
3499 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
3500 * @adapter: board private structure
3501 *
3502 * Outgoing time stamping can be enabled and disabled. Play nice and
3503 * disable it when requested, although it shouldn't cause any overhead
3504 * when no packet needs it. At most one packet in the queue may be
3505 * marked for time stamping, otherwise it would be impossible to tell
3506 * for sure to which packet the hardware time stamp belongs.
3507 *
3508 * Incoming time stamping has to be configured via the hardware filters.
3509 * Not all combinations are supported, in particular event type has to be
3510 * specified. Matching the kind of event packet is not supported, with the
3511 * exception of "all V2 events regardless of level 2 or 4".
3512 **/
62d7e3a2
BH
3513static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
3514 struct hwtstamp_config *config)
b67e1913
BA
3515{
3516 struct e1000_hw *hw = &adapter->hw;
b67e1913
BA
3517 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
3518 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
d89777bf
BA
3519 u32 rxmtrl = 0;
3520 u16 rxudp = 0;
3521 bool is_l4 = false;
3522 bool is_l2 = false;
b67e1913
BA
3523 u32 regval;
3524 s32 ret_val;
3525
3526 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
3527 return -EINVAL;
3528
3529 /* flags reserved for future extensions - must be zero */
3530 if (config->flags)
3531 return -EINVAL;
3532
3533 switch (config->tx_type) {
3534 case HWTSTAMP_TX_OFF:
3535 tsync_tx_ctl = 0;
3536 break;
3537 case HWTSTAMP_TX_ON:
3538 break;
3539 default:
3540 return -ERANGE;
3541 }
3542
3543 switch (config->rx_filter) {
3544 case HWTSTAMP_FILTER_NONE:
3545 tsync_rx_ctl = 0;
3546 break;
d89777bf
BA
3547 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3548 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3549 rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE;
3550 is_l4 = true;
3551 break;
3552 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3553 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3554 rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE;
3555 is_l4 = true;
3556 break;
3557 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3558 /* Also time stamps V2 L2 Path Delay Request/Response */
3559 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3560 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3561 is_l2 = true;
3562 break;
3563 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3564 /* Also time stamps V2 L2 Path Delay Request/Response. */
3565 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3566 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3567 is_l2 = true;
3568 break;
3569 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3570 /* Hardware cannot filter just V2 L4 Sync messages;
3571 * fall-through to V2 (both L2 and L4) Sync.
3572 */
3573 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3574 /* Also time stamps V2 Path Delay Request/Response. */
3575 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3576 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3577 is_l2 = true;
3578 is_l4 = true;
3579 break;
3580 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3581 /* Hardware cannot filter just V2 L4 Delay Request messages;
3582 * fall-through to V2 (both L2 and L4) Delay Request.
3583 */
3584 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3585 /* Also time stamps V2 Path Delay Request/Response. */
3586 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3587 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3588 is_l2 = true;
3589 is_l4 = true;
3590 break;
3591 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3592 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3593 /* Hardware cannot filter just V2 L4 or L2 Event messages;
3594 * fall-through to all V2 (both L2 and L4) Events.
3595 */
3596 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3597 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
3598 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
3599 is_l2 = true;
3600 is_l4 = true;
3601 break;
3602 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3603 /* For V1, the hardware can only filter Sync messages or
3604 * Delay Request messages but not both so fall-through to
3605 * time stamp all packets.
3606 */
b67e1913 3607 case HWTSTAMP_FILTER_ALL:
d89777bf
BA
3608 is_l2 = true;
3609 is_l4 = true;
b67e1913
BA
3610 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
3611 config->rx_filter = HWTSTAMP_FILTER_ALL;
3612 break;
3613 default:
3614 return -ERANGE;
3615 }
3616
62d7e3a2
BH
3617 adapter->hwtstamp_config = *config;
3618
b67e1913
BA
3619 /* enable/disable Tx h/w time stamping */
3620 regval = er32(TSYNCTXCTL);
3621 regval &= ~E1000_TSYNCTXCTL_ENABLED;
3622 regval |= tsync_tx_ctl;
3623 ew32(TSYNCTXCTL, regval);
3624 if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
3625 (regval & E1000_TSYNCTXCTL_ENABLED)) {
3626 e_err("Timesync Tx Control register not set as expected\n");
3627 return -EAGAIN;
3628 }
3629
3630 /* enable/disable Rx h/w time stamping */
3631 regval = er32(TSYNCRXCTL);
3632 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
3633 regval |= tsync_rx_ctl;
3634 ew32(TSYNCRXCTL, regval);
3635 if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED |
3636 E1000_TSYNCRXCTL_TYPE_MASK)) !=
3637 (regval & (E1000_TSYNCRXCTL_ENABLED |
3638 E1000_TSYNCRXCTL_TYPE_MASK))) {
3639 e_err("Timesync Rx Control register not set as expected\n");
3640 return -EAGAIN;
3641 }
3642
d89777bf
BA
3643 /* L2: define ethertype filter for time stamped packets */
3644 if (is_l2)
3645 rxmtrl |= ETH_P_1588;
3646
3647 /* define which PTP packets get time stamped */
3648 ew32(RXMTRL, rxmtrl);
3649
3650 /* Filter by destination port */
3651 if (is_l4) {
3652 rxudp = PTP_EV_PORT;
3653 cpu_to_be16s(&rxudp);
3654 }
3655 ew32(RXUDP, rxudp);
3656
3657 e1e_flush();
3658
b67e1913 3659 /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */
70806a7f
BA
3660 er32(RXSTMPH);
3661 er32(TXSTMPH);
b67e1913
BA
3662
3663 /* Get and set the System Time Register SYSTIM base frequency */
3664 ret_val = e1000e_get_base_timinca(adapter, &regval);
3665 if (ret_val)
3666 return ret_val;
3667 ew32(TIMINCA, regval);
3668
3669 /* reset the ns time counter */
3670 timecounter_init(&adapter->tc, &adapter->cc,
3671 ktime_to_ns(ktime_get_real()));
3672
3673 return 0;
3674}
3675
bc7f75fa 3676/**
ad68076e 3677 * e1000_configure - configure the hardware for Rx and Tx
bc7f75fa
AK
3678 * @adapter: private board structure
3679 **/
3680static void e1000_configure(struct e1000_adapter *adapter)
3681{
55aa6985
BA
3682 struct e1000_ring *rx_ring = adapter->rx_ring;
3683
ef9b965a 3684 e1000e_set_rx_mode(adapter->netdev);
bc7f75fa
AK
3685
3686 e1000_restore_vlan(adapter);
cd791618 3687 e1000_init_manageability_pt(adapter);
bc7f75fa
AK
3688
3689 e1000_configure_tx(adapter);
70495a50
BA
3690
3691 if (adapter->netdev->features & NETIF_F_RXHASH)
3692 e1000e_setup_rss_hash(adapter);
bc7f75fa
AK
3693 e1000_setup_rctl(adapter);
3694 e1000_configure_rx(adapter);
55aa6985 3695 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
bc7f75fa
AK
3696}
3697
3698/**
3699 * e1000e_power_up_phy - restore link in case the phy was powered down
3700 * @adapter: address of board private structure
3701 *
3702 * The phy may be powered down to save power and turn off link when the
3703 * driver is unloaded and wake on lan is not enabled (among others)
3704 * *** this routine MUST be followed by a call to e1000e_reset ***
3705 **/
3706void e1000e_power_up_phy(struct e1000_adapter *adapter)
3707{
17f208de
BA
3708 if (adapter->hw.phy.ops.power_up)
3709 adapter->hw.phy.ops.power_up(&adapter->hw);
bc7f75fa
AK
3710
3711 adapter->hw.mac.ops.setup_link(&adapter->hw);
3712}
3713
3714/**
3715 * e1000_power_down_phy - Power down the PHY
3716 *
17f208de
BA
3717 * Power down the PHY so no link is implied when interface is down.
3718 * The PHY cannot be powered down if management or WoL is active.
bc7f75fa
AK
3719 */
3720static void e1000_power_down_phy(struct e1000_adapter *adapter)
3721{
17f208de
BA
3722 if (adapter->hw.phy.ops.power_down)
3723 adapter->hw.phy.ops.power_down(&adapter->hw);
bc7f75fa
AK
3724}
3725
3726/**
3727 * e1000e_reset - bring the hardware into a known good state
3728 *
3729 * This function boots the hardware and enables some settings that
3730 * require a configuration cycle of the hardware - those cannot be
3731 * set/changed during runtime. After reset the device needs to be
ad68076e 3732 * properly configured for Rx, Tx etc.
bc7f75fa
AK
3733 */
3734void e1000e_reset(struct e1000_adapter *adapter)
3735{
3736 struct e1000_mac_info *mac = &adapter->hw.mac;
318a94d6 3737 struct e1000_fc_info *fc = &adapter->hw.fc;
bc7f75fa
AK
3738 struct e1000_hw *hw = &adapter->hw;
3739 u32 tx_space, min_tx_space, min_rx_space;
318a94d6 3740 u32 pba = adapter->pba;
bc7f75fa
AK
3741 u16 hwm;
3742
ad68076e 3743 /* reset Packet Buffer Allocation to default */
318a94d6 3744 ew32(PBA, pba);
df762464 3745
318a94d6 3746 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
e921eb1a 3747 /* To maintain wire speed transmits, the Tx FIFO should be
bc7f75fa
AK
3748 * large enough to accommodate two full transmit packets,
3749 * rounded up to the next 1KB and expressed in KB. Likewise,
3750 * the Rx FIFO should be large enough to accommodate at least
3751 * one full receive packet and is similarly rounded up and
ad68076e
BA
3752 * expressed in KB.
3753 */
df762464 3754 pba = er32(PBA);
bc7f75fa 3755 /* upper 16 bits has Tx packet buffer allocation size in KB */
df762464 3756 tx_space = pba >> 16;
bc7f75fa 3757 /* lower 16 bits has Rx packet buffer allocation size in KB */
df762464 3758 pba &= 0xffff;
e921eb1a 3759 /* the Tx fifo also stores 16 bytes of information about the Tx
ad68076e 3760 * but don't include ethernet FCS because hardware appends it
318a94d6
JK
3761 */
3762 min_tx_space = (adapter->max_frame_size +
e5fe2541 3763 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
bc7f75fa
AK
3764 min_tx_space = ALIGN(min_tx_space, 1024);
3765 min_tx_space >>= 10;
3766 /* software strips receive CRC, so leave room for it */
318a94d6 3767 min_rx_space = adapter->max_frame_size;
bc7f75fa
AK
3768 min_rx_space = ALIGN(min_rx_space, 1024);
3769 min_rx_space >>= 10;
3770
e921eb1a 3771 /* If current Tx allocation is less than the min Tx FIFO size,
bc7f75fa 3772 * and the min Tx FIFO size is less than the current Rx FIFO
ad68076e
BA
3773 * allocation, take space away from current Rx allocation
3774 */
df762464
AK
3775 if ((tx_space < min_tx_space) &&
3776 ((min_tx_space - tx_space) < pba)) {
3777 pba -= min_tx_space - tx_space;
bc7f75fa 3778
e921eb1a 3779 /* if short on Rx space, Rx wins and must trump Tx
419e551c 3780 * adjustment
ad68076e 3781 */
79d4e908 3782 if (pba < min_rx_space)
df762464 3783 pba = min_rx_space;
bc7f75fa 3784 }
df762464
AK
3785
3786 ew32(PBA, pba);
bc7f75fa
AK
3787 }
3788
e921eb1a 3789 /* flow control settings
ad68076e 3790 *
38eb394e 3791 * The high water mark must be low enough to fit one full frame
bc7f75fa
AK
3792 * (or the size used for early receive) above it in the Rx FIFO.
3793 * Set it to the lower of:
3794 * - 90% of the Rx FIFO size, and
38eb394e 3795 * - the full Rx FIFO size minus one full frame
ad68076e 3796 */
d3738bb8
BA
3797 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3798 fc->pause_time = 0xFFFF;
3799 else
3800 fc->pause_time = E1000_FC_PAUSE_TIME;
b20caa80 3801 fc->send_xon = true;
d3738bb8
BA
3802 fc->current_mode = fc->requested_mode;
3803
3804 switch (hw->mac.type) {
79d4e908
BA
3805 case e1000_ich9lan:
3806 case e1000_ich10lan:
3807 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3808 pba = 14;
3809 ew32(PBA, pba);
3810 fc->high_water = 0x2800;
3811 fc->low_water = fc->high_water - 8;
3812 break;
3813 }
3814 /* fall-through */
d3738bb8 3815 default:
79d4e908
BA
3816 hwm = min(((pba << 10) * 9 / 10),
3817 ((pba << 10) - adapter->max_frame_size));
d3738bb8 3818
e80bd1d1 3819 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
d3738bb8
BA
3820 fc->low_water = fc->high_water - 8;
3821 break;
3822 case e1000_pchlan:
e921eb1a 3823 /* Workaround PCH LOM adapter hangs with certain network
38eb394e
BA
3824 * loads. If hangs persist, try disabling Tx flow control.
3825 */
3826 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3827 fc->high_water = 0x3500;
e80bd1d1 3828 fc->low_water = 0x1500;
38eb394e
BA
3829 } else {
3830 fc->high_water = 0x5000;
e80bd1d1 3831 fc->low_water = 0x3000;
38eb394e 3832 }
a305595b 3833 fc->refresh_time = 0x1000;
d3738bb8
BA
3834 break;
3835 case e1000_pch2lan:
2fbe4526 3836 case e1000_pch_lpt:
d3738bb8 3837 fc->refresh_time = 0x0400;
347b5201
BA
3838
3839 if (adapter->netdev->mtu <= ETH_DATA_LEN) {
3840 fc->high_water = 0x05C20;
3841 fc->low_water = 0x05048;
3842 fc->pause_time = 0x0650;
3843 break;
828bac87 3844 }
347b5201 3845
ce345e08
BA
3846 pba = 14;
3847 ew32(PBA, pba);
347b5201
BA
3848 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
3849 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
d3738bb8 3850 break;
38eb394e 3851 }
bc7f75fa 3852
e921eb1a 3853 /* Alignment of Tx data is on an arbitrary byte boundary with the
d821a4c4
BA
3854 * maximum size per Tx descriptor limited only to the transmit
3855 * allocation of the packet buffer minus 96 bytes with an upper
3856 * limit of 24KB due to receive synchronization limitations.
3857 */
3858 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3859 24 << 10);
3860
e921eb1a 3861 /* Disable Adaptive Interrupt Moderation if 2 full packets cannot
79d4e908 3862 * fit in receive buffer.
828bac87
BA
3863 */
3864 if (adapter->itr_setting & 0x3) {
79d4e908 3865 if ((adapter->max_frame_size * 2) > (pba << 10)) {
828bac87
BA
3866 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3867 dev_info(&adapter->pdev->dev,
17e813ec 3868 "Interrupt Throttle Rate off\n");
828bac87 3869 adapter->flags2 |= FLAG2_DISABLE_AIM;
22a4cca2 3870 e1000e_write_itr(adapter, 0);
828bac87
BA
3871 }
3872 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3873 dev_info(&adapter->pdev->dev,
17e813ec 3874 "Interrupt Throttle Rate on\n");
828bac87
BA
3875 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3876 adapter->itr = 20000;
22a4cca2 3877 e1000e_write_itr(adapter, adapter->itr);
828bac87
BA
3878 }
3879 }
3880
bc7f75fa
AK
3881 /* Allow time for pending master requests to run */
3882 mac->ops.reset_hw(hw);
97ac8cae 3883
e921eb1a 3884 /* For parts with AMT enabled, let the firmware know
97ac8cae
BA
3885 * that the network interface is in control
3886 */
c43bc57e 3887 if (adapter->flags & FLAG_HAS_AMT)
31dbe5b4 3888 e1000e_get_hw_control(adapter);
97ac8cae 3889
bc7f75fa
AK
3890 ew32(WUC, 0);
3891
3892 if (mac->ops.init_hw(hw))
44defeb3 3893 e_err("Hardware Error\n");
bc7f75fa
AK
3894
3895 e1000_update_mng_vlan(adapter);
3896
3897 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3898 ew32(VET, ETH_P_8021Q);
3899
3900 e1000e_reset_adaptive(hw);
31dbe5b4 3901
b67e1913 3902 /* initialize systim and reset the ns time counter */
62d7e3a2 3903 e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
b67e1913 3904
d495bcb8
BA
3905 /* Set EEE advertisement as appropriate */
3906 if (adapter->flags2 & FLAG2_HAS_EEE) {
3907 s32 ret_val;
3908 u16 adv_addr;
3909
3910 switch (hw->phy.type) {
3911 case e1000_phy_82579:
3912 adv_addr = I82579_EEE_ADVERTISEMENT;
3913 break;
3914 case e1000_phy_i217:
3915 adv_addr = I217_EEE_ADVERTISEMENT;
3916 break;
3917 default:
3918 dev_err(&adapter->pdev->dev,
3919 "Invalid PHY type setting EEE advertisement\n");
3920 return;
3921 }
3922
3923 ret_val = hw->phy.ops.acquire(hw);
3924 if (ret_val) {
3925 dev_err(&adapter->pdev->dev,
3926 "EEE advertisement - unable to acquire PHY\n");
3927 return;
3928 }
3929
3930 e1000_write_emi_reg_locked(hw, adv_addr,
3931 hw->dev_spec.ich8lan.eee_disable ?
3932 0 : adapter->eee_advert);
3933
3934 hw->phy.ops.release(hw);
3935 }
3936
31dbe5b4 3937 if (!netif_running(adapter->netdev) &&
28002099 3938 !test_bit(__E1000_TESTING, &adapter->state))
31dbe5b4 3939 e1000_power_down_phy(adapter);
31dbe5b4 3940
bc7f75fa
AK
3941 e1000_get_phy_info(hw);
3942
918d7197
BA
3943 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3944 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
bc7f75fa 3945 u16 phy_data = 0;
e921eb1a 3946 /* speed up time to link by disabling smart power down, ignore
bc7f75fa 3947 * the return value of this function because there is nothing
ad68076e
BA
3948 * different we would do if it failed
3949 */
bc7f75fa
AK
3950 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3951 phy_data &= ~IGP02E1000_PM_SPD;
3952 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3953 }
bc7f75fa
AK
3954}
3955
3956int e1000e_up(struct e1000_adapter *adapter)
3957{
3958 struct e1000_hw *hw = &adapter->hw;
3959
3960 /* hardware has been reset, we need to reload some things */
3961 e1000_configure(adapter);
3962
3963 clear_bit(__E1000_DOWN, &adapter->state);
3964
4662e82b
BA
3965 if (adapter->msix_entries)
3966 e1000_configure_msix(adapter);
bc7f75fa
AK
3967 e1000_irq_enable(adapter);
3968
400484fa 3969 netif_start_queue(adapter->netdev);
4cb9be7a 3970
bc7f75fa 3971 /* fire a link change interrupt to start the watchdog */
52a9b231
BA
3972 if (adapter->msix_entries)
3973 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3974 else
3975 ew32(ICS, E1000_ICS_LSC);
3976
bc7f75fa
AK
3977 return 0;
3978}
3979
713b3c9e
JB
3980static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3981{
3982 struct e1000_hw *hw = &adapter->hw;
3983
3984 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3985 return;
3986
3987 /* flush pending descriptor writebacks to memory */
3988 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3989 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3990
3991 /* execute the writes immediately */
3992 e1e_flush();
bf03085f 3993
e921eb1a 3994 /* due to rare timing issues, write to TIDV/RDTR again to ensure the
bf03085f
MV
3995 * write is successful
3996 */
3997 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3998 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
713b3c9e
JB
3999
4000 /* execute the writes immediately */
4001 e1e_flush();
4002}
4003
67fd4fcb
JK
4004static void e1000e_update_stats(struct e1000_adapter *adapter);
4005
28002099
DE
4006/**
4007 * e1000e_down - quiesce the device and optionally reset the hardware
4008 * @adapter: board private structure
4009 * @reset: boolean flag to reset the hardware or not
4010 */
4011void e1000e_down(struct e1000_adapter *adapter, bool reset)
bc7f75fa
AK
4012{
4013 struct net_device *netdev = adapter->netdev;
4014 struct e1000_hw *hw = &adapter->hw;
4015 u32 tctl, rctl;
4016
e921eb1a 4017 /* signal that we're down so the interrupt handler does not
ad68076e
BA
4018 * reschedule our watchdog timer
4019 */
bc7f75fa
AK
4020 set_bit(__E1000_DOWN, &adapter->state);
4021
4022 /* disable receives in the hardware */
4023 rctl = er32(RCTL);
7f99ae63
BA
4024 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
4025 ew32(RCTL, rctl & ~E1000_RCTL_EN);
bc7f75fa
AK
4026 /* flush and sleep below */
4027
4cb9be7a 4028 netif_stop_queue(netdev);
bc7f75fa
AK
4029
4030 /* disable transmits in the hardware */
4031 tctl = er32(TCTL);
4032 tctl &= ~E1000_TCTL_EN;
4033 ew32(TCTL, tctl);
7f99ae63 4034
bc7f75fa
AK
4035 /* flush both disables and wait for them to finish */
4036 e1e_flush();
1bba4386 4037 usleep_range(10000, 20000);
bc7f75fa 4038
bc7f75fa
AK
4039 e1000_irq_disable(adapter);
4040
a3b87a4c
BA
4041 napi_synchronize(&adapter->napi);
4042
bc7f75fa
AK
4043 del_timer_sync(&adapter->watchdog_timer);
4044 del_timer_sync(&adapter->phy_info_timer);
4045
bc7f75fa 4046 netif_carrier_off(netdev);
67fd4fcb
JK
4047
4048 spin_lock(&adapter->stats64_lock);
4049 e1000e_update_stats(adapter);
4050 spin_unlock(&adapter->stats64_lock);
4051
400484fa 4052 e1000e_flush_descriptors(adapter);
55aa6985
BA
4053 e1000_clean_tx_ring(adapter->tx_ring);
4054 e1000_clean_rx_ring(adapter->rx_ring);
400484fa 4055
bc7f75fa
AK
4056 adapter->link_speed = 0;
4057 adapter->link_duplex = 0;
4058
da1e2046
BA
4059 /* Disable Si errata workaround on PCHx for jumbo frame flow */
4060 if ((hw->mac.type >= e1000_pch2lan) &&
4061 (adapter->netdev->mtu > ETH_DATA_LEN) &&
4062 e1000_lv_jumbo_workaround_ich8lan(hw, false))
4063 e_dbg("failed to disable jumbo frame workaround mode\n");
4064
28002099 4065 if (reset && !pci_channel_offline(adapter->pdev))
52cc3086 4066 e1000e_reset(adapter);
bc7f75fa
AK
4067}
4068
4069void e1000e_reinit_locked(struct e1000_adapter *adapter)
4070{
4071 might_sleep();
4072 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
1bba4386 4073 usleep_range(1000, 2000);
28002099 4074 e1000e_down(adapter, true);
bc7f75fa
AK
4075 e1000e_up(adapter);
4076 clear_bit(__E1000_RESETTING, &adapter->state);
4077}
4078
b67e1913
BA
4079/**
4080 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4081 * @cc: cyclecounter structure
4082 **/
4083static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4084{
4085 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
4086 cc);
4087 struct e1000_hw *hw = &adapter->hw;
4088 cycle_t systim;
4089
4090 /* latch SYSTIMH on read of SYSTIML */
4091 systim = (cycle_t)er32(SYSTIML);
4092 systim |= (cycle_t)er32(SYSTIMH) << 32;
4093
4094 return systim;
4095}
4096
bc7f75fa
AK
4097/**
4098 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
4099 * @adapter: board private structure to initialize
4100 *
4101 * e1000_sw_init initializes the Adapter private data structure.
4102 * Fields are initialized based on PCI device information and
4103 * OS network device settings (MTU size).
4104 **/
9f9a12f8 4105static int e1000_sw_init(struct e1000_adapter *adapter)
bc7f75fa 4106{
bc7f75fa
AK
4107 struct net_device *netdev = adapter->netdev;
4108
4109 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
4110 adapter->rx_ps_bsize0 = 128;
318a94d6
JK
4111 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4112 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
55aa6985
BA
4113 adapter->tx_ring_count = E1000_DEFAULT_TXD;
4114 adapter->rx_ring_count = E1000_DEFAULT_RXD;
bc7f75fa 4115
67fd4fcb
JK
4116 spin_lock_init(&adapter->stats64_lock);
4117
4662e82b 4118 e1000e_set_interrupt_capability(adapter);
bc7f75fa 4119
4662e82b
BA
4120 if (e1000_alloc_queues(adapter))
4121 return -ENOMEM;
bc7f75fa 4122
b67e1913
BA
4123 /* Setup hardware time stamping cyclecounter */
4124 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
4125 adapter->cc.read = e1000e_cyclecounter_read;
4126 adapter->cc.mask = CLOCKSOURCE_MASK(64);
4127 adapter->cc.mult = 1;
4128 /* cc.shift set in e1000e_get_base_tininca() */
4129
4130 spin_lock_init(&adapter->systim_lock);
4131 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work);
4132 }
4133
bc7f75fa 4134 /* Explicitly disable IRQ since the NIC can be in any state. */
bc7f75fa
AK
4135 e1000_irq_disable(adapter);
4136
bc7f75fa
AK
4137 set_bit(__E1000_DOWN, &adapter->state);
4138 return 0;
bc7f75fa
AK
4139}
4140
f8d59f78
BA
4141/**
4142 * e1000_intr_msi_test - Interrupt Handler
4143 * @irq: interrupt number
4144 * @data: pointer to a network interface device structure
4145 **/
8bb62869 4146static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data)
f8d59f78
BA
4147{
4148 struct net_device *netdev = data;
4149 struct e1000_adapter *adapter = netdev_priv(netdev);
4150 struct e1000_hw *hw = &adapter->hw;
4151 u32 icr = er32(ICR);
4152
3bb99fe2 4153 e_dbg("icr is %08X\n", icr);
f8d59f78
BA
4154 if (icr & E1000_ICR_RXSEQ) {
4155 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
e921eb1a 4156 /* Force memory writes to complete before acknowledging the
bc76329d
BA
4157 * interrupt is handled.
4158 */
f8d59f78
BA
4159 wmb();
4160 }
4161
4162 return IRQ_HANDLED;
4163}
4164
4165/**
4166 * e1000_test_msi_interrupt - Returns 0 for successful test
4167 * @adapter: board private struct
4168 *
4169 * code flow taken from tg3.c
4170 **/
4171static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
4172{
4173 struct net_device *netdev = adapter->netdev;
4174 struct e1000_hw *hw = &adapter->hw;
4175 int err;
4176
4177 /* poll_enable hasn't been called yet, so don't need disable */
4178 /* clear any pending events */
4179 er32(ICR);
4180
4181 /* free the real vector and request a test handler */
4182 e1000_free_irq(adapter);
4662e82b 4183 e1000e_reset_interrupt_capability(adapter);
f8d59f78
BA
4184
4185 /* Assume that the test fails, if it succeeds then the test
e921eb1a
BA
4186 * MSI irq handler will unset this flag
4187 */
f8d59f78
BA
4188 adapter->flags |= FLAG_MSI_TEST_FAILED;
4189
4190 err = pci_enable_msi(adapter->pdev);
4191 if (err)
4192 goto msi_test_failed;
4193
a0607fd3 4194 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
f8d59f78
BA
4195 netdev->name, netdev);
4196 if (err) {
4197 pci_disable_msi(adapter->pdev);
4198 goto msi_test_failed;
4199 }
4200
e921eb1a 4201 /* Force memory writes to complete before enabling and firing an
bc76329d
BA
4202 * interrupt.
4203 */
f8d59f78
BA
4204 wmb();
4205
4206 e1000_irq_enable(adapter);
4207
4208 /* fire an unusual interrupt on the test handler */
4209 ew32(ICS, E1000_ICS_RXSEQ);
4210 e1e_flush();
569a3aff 4211 msleep(100);
f8d59f78
BA
4212
4213 e1000_irq_disable(adapter);
4214
bc76329d 4215 rmb(); /* read flags after interrupt has been fired */
f8d59f78
BA
4216
4217 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
4662e82b 4218 adapter->int_mode = E1000E_INT_MODE_LEGACY;
068e8a30 4219 e_info("MSI interrupt test failed, using legacy interrupt.\n");
24b706b2 4220 } else {
068e8a30 4221 e_dbg("MSI interrupt test succeeded!\n");
24b706b2 4222 }
f8d59f78
BA
4223
4224 free_irq(adapter->pdev->irq, netdev);
4225 pci_disable_msi(adapter->pdev);
4226
f8d59f78 4227msi_test_failed:
4662e82b 4228 e1000e_set_interrupt_capability(adapter);
068e8a30 4229 return e1000_request_irq(adapter);
f8d59f78
BA
4230}
4231
4232/**
4233 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
4234 * @adapter: board private struct
4235 *
4236 * code flow taken from tg3.c, called with e1000 interrupts disabled.
4237 **/
4238static int e1000_test_msi(struct e1000_adapter *adapter)
4239{
4240 int err;
4241 u16 pci_cmd;
4242
4243 if (!(adapter->flags & FLAG_MSI_ENABLED))
4244 return 0;
4245
4246 /* disable SERR in case the MSI write causes a master abort */
4247 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
36f2407f
DN
4248 if (pci_cmd & PCI_COMMAND_SERR)
4249 pci_write_config_word(adapter->pdev, PCI_COMMAND,
4250 pci_cmd & ~PCI_COMMAND_SERR);
f8d59f78
BA
4251
4252 err = e1000_test_msi_interrupt(adapter);
4253
36f2407f
DN
4254 /* re-enable SERR */
4255 if (pci_cmd & PCI_COMMAND_SERR) {
4256 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4257 pci_cmd |= PCI_COMMAND_SERR;
4258 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
4259 }
f8d59f78 4260
f8d59f78
BA
4261 return err;
4262}
4263
bc7f75fa
AK
4264/**
4265 * e1000_open - Called when a network interface is made active
4266 * @netdev: network interface device structure
4267 *
4268 * Returns 0 on success, negative value on failure
4269 *
4270 * The open entry point is called when a network interface is made
4271 * active by the system (IFF_UP). At this point all resources needed
4272 * for transmit and receive operations are allocated, the interrupt
4273 * handler is registered with the OS, the watchdog timer is started,
4274 * and the stack is notified that the interface is ready.
4275 **/
4276static int e1000_open(struct net_device *netdev)
4277{
4278 struct e1000_adapter *adapter = netdev_priv(netdev);
4279 struct e1000_hw *hw = &adapter->hw;
23606cf5 4280 struct pci_dev *pdev = adapter->pdev;
bc7f75fa
AK
4281 int err;
4282
4283 /* disallow open during test */
4284 if (test_bit(__E1000_TESTING, &adapter->state))
4285 return -EBUSY;
4286
23606cf5
RW
4287 pm_runtime_get_sync(&pdev->dev);
4288
9c563d20
JB
4289 netif_carrier_off(netdev);
4290
bc7f75fa 4291 /* allocate transmit descriptors */
55aa6985 4292 err = e1000e_setup_tx_resources(adapter->tx_ring);
bc7f75fa
AK
4293 if (err)
4294 goto err_setup_tx;
4295
4296 /* allocate receive descriptors */
55aa6985 4297 err = e1000e_setup_rx_resources(adapter->rx_ring);
bc7f75fa
AK
4298 if (err)
4299 goto err_setup_rx;
4300
e921eb1a 4301 /* If AMT is enabled, let the firmware know that the network
11b08be8
BA
4302 * interface is now open and reset the part to a known state.
4303 */
4304 if (adapter->flags & FLAG_HAS_AMT) {
31dbe5b4 4305 e1000e_get_hw_control(adapter);
11b08be8
BA
4306 e1000e_reset(adapter);
4307 }
4308
bc7f75fa
AK
4309 e1000e_power_up_phy(adapter);
4310
4311 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
e5fe2541 4312 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
bc7f75fa
AK
4313 e1000_update_mng_vlan(adapter);
4314
79d4e908 4315 /* DMA latency requirement to workaround jumbo issue */
3e35d991
BA
4316 pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
4317 PM_QOS_DEFAULT_VALUE);
c128ec29 4318
e921eb1a 4319 /* before we allocate an interrupt, we must be ready to handle it.
bc7f75fa
AK
4320 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
4321 * as soon as we call pci_request_irq, so we have to setup our
ad68076e
BA
4322 * clean_rx handler before we do so.
4323 */
bc7f75fa
AK
4324 e1000_configure(adapter);
4325
4326 err = e1000_request_irq(adapter);
4327 if (err)
4328 goto err_req_irq;
4329
e921eb1a 4330 /* Work around PCIe errata with MSI interrupts causing some chipsets to
f8d59f78
BA
4331 * ignore e1000e MSI messages, which means we need to test our MSI
4332 * interrupt now
4333 */
4662e82b 4334 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
f8d59f78
BA
4335 err = e1000_test_msi(adapter);
4336 if (err) {
4337 e_err("Interrupt allocation failed\n");
4338 goto err_req_irq;
4339 }
4340 }
4341
bc7f75fa
AK
4342 /* From here on the code is the same as e1000e_up() */
4343 clear_bit(__E1000_DOWN, &adapter->state);
4344
4345 napi_enable(&adapter->napi);
4346
4347 e1000_irq_enable(adapter);
4348
09357b00 4349 adapter->tx_hang_recheck = false;
4cb9be7a 4350 netif_start_queue(netdev);
d55b53ff 4351
66148bab 4352 hw->mac.get_link_status = true;
23606cf5
RW
4353 pm_runtime_put(&pdev->dev);
4354
bc7f75fa 4355 /* fire a link status change interrupt to start the watchdog */
52a9b231
BA
4356 if (adapter->msix_entries)
4357 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
4358 else
4359 ew32(ICS, E1000_ICS_LSC);
bc7f75fa
AK
4360
4361 return 0;
4362
4363err_req_irq:
31dbe5b4 4364 e1000e_release_hw_control(adapter);
bc7f75fa 4365 e1000_power_down_phy(adapter);
55aa6985 4366 e1000e_free_rx_resources(adapter->rx_ring);
bc7f75fa 4367err_setup_rx:
55aa6985 4368 e1000e_free_tx_resources(adapter->tx_ring);
bc7f75fa
AK
4369err_setup_tx:
4370 e1000e_reset(adapter);
23606cf5 4371 pm_runtime_put_sync(&pdev->dev);
bc7f75fa
AK
4372
4373 return err;
4374}
4375
4376/**
4377 * e1000_close - Disables a network interface
4378 * @netdev: network interface device structure
4379 *
4380 * Returns 0, this is not allowed to fail
4381 *
4382 * The close entry point is called when an interface is de-activated
4383 * by the OS. The hardware is still under the drivers control, but
4384 * needs to be disabled. A global MAC reset is issued to stop the
4385 * hardware, and all transmit and receive resources are freed.
4386 **/
4387static int e1000_close(struct net_device *netdev)
4388{
4389 struct e1000_adapter *adapter = netdev_priv(netdev);
23606cf5 4390 struct pci_dev *pdev = adapter->pdev;
bb9e44d0
BA
4391 int count = E1000_CHECK_RESET_COUNT;
4392
4393 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
4394 usleep_range(10000, 20000);
bc7f75fa
AK
4395
4396 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
23606cf5
RW
4397
4398 pm_runtime_get_sync(&pdev->dev);
4399
4400 if (!test_bit(__E1000_DOWN, &adapter->state)) {
28002099 4401 e1000e_down(adapter, true);
23606cf5 4402 e1000_free_irq(adapter);
63eb48f1
DE
4403
4404 /* Link status message must follow this format */
4405 pr_info("%s NIC Link is Down\n", adapter->netdev->name);
23606cf5 4406 }
a3b87a4c
BA
4407
4408 napi_disable(&adapter->napi);
4409
55aa6985
BA
4410 e1000e_free_tx_resources(adapter->tx_ring);
4411 e1000e_free_rx_resources(adapter->rx_ring);
bc7f75fa 4412
e921eb1a 4413 /* kill manageability vlan ID if supported, but not if a vlan with
ad68076e
BA
4414 * the same ID is registered on the host OS (let 8021q kill it)
4415 */
e5fe2541 4416 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
80d5c368
PM
4417 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
4418 adapter->mng_vlan_id);
bc7f75fa 4419
e921eb1a 4420 /* If AMT is enabled, let the firmware know that the network
ad68076e
BA
4421 * interface is now closed
4422 */
31dbe5b4
BA
4423 if ((adapter->flags & FLAG_HAS_AMT) &&
4424 !test_bit(__E1000_TESTING, &adapter->state))
4425 e1000e_release_hw_control(adapter);
bc7f75fa 4426
3e35d991 4427 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
c128ec29 4428
23606cf5
RW
4429 pm_runtime_put_sync(&pdev->dev);
4430
bc7f75fa
AK
4431 return 0;
4432}
fc830b78 4433
bc7f75fa
AK
4434/**
4435 * e1000_set_mac - Change the Ethernet Address of the NIC
4436 * @netdev: network interface device structure
4437 * @p: pointer to an address structure
4438 *
4439 * Returns 0 on success, negative on failure
4440 **/
4441static int e1000_set_mac(struct net_device *netdev, void *p)
4442{
4443 struct e1000_adapter *adapter = netdev_priv(netdev);
69e1e019 4444 struct e1000_hw *hw = &adapter->hw;
bc7f75fa
AK
4445 struct sockaddr *addr = p;
4446
4447 if (!is_valid_ether_addr(addr->sa_data))
4448 return -EADDRNOTAVAIL;
4449
4450 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4451 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4452
69e1e019 4453 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
bc7f75fa
AK
4454
4455 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4456 /* activate the work around */
4457 e1000e_set_laa_state_82571(&adapter->hw, 1);
4458
e921eb1a 4459 /* Hold a copy of the LAA in RAR[14] This is done so that
bc7f75fa
AK
4460 * between the time RAR[0] gets clobbered and the time it
4461 * gets fixed (in e1000_watchdog), the actual LAA is in one
4462 * of the RARs and no incoming packets directed to this port
4463 * are dropped. Eventually the LAA will be in RAR[0] and
ad68076e
BA
4464 * RAR[14]
4465 */
69e1e019
BA
4466 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4467 adapter->hw.mac.rar_entry_count - 1);
bc7f75fa
AK
4468 }
4469
4470 return 0;
4471}
4472
a8f88ff5
JB
4473/**
4474 * e1000e_update_phy_task - work thread to update phy
4475 * @work: pointer to our work struct
4476 *
4477 * this worker thread exists because we must acquire a
4478 * semaphore to read the phy, which we could msleep while
4479 * waiting for it, and we can't msleep in a timer.
4480 **/
4481static void e1000e_update_phy_task(struct work_struct *work)
4482{
4483 struct e1000_adapter *adapter = container_of(work,
17e813ec
BA
4484 struct e1000_adapter,
4485 update_phy_task);
a03206ed 4486 struct e1000_hw *hw = &adapter->hw;
615b32af
JB
4487
4488 if (test_bit(__E1000_DOWN, &adapter->state))
4489 return;
4490
a03206ed
DE
4491 e1000_get_phy_info(hw);
4492
4493 /* Enable EEE on 82579 after link up */
4494 if (hw->phy.type == e1000_phy_82579)
4495 e1000_set_eee_pchlan(hw);
a8f88ff5
JB
4496}
4497
e921eb1a
BA
4498/**
4499 * e1000_update_phy_info - timre call-back to update PHY info
4500 * @data: pointer to adapter cast into an unsigned long
4501 *
ad68076e
BA
4502 * Need to wait a few seconds after link up to get diagnostic information from
4503 * the phy
e921eb1a 4504 **/
bc7f75fa
AK
4505static void e1000_update_phy_info(unsigned long data)
4506{
53aa82da 4507 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
615b32af
JB
4508
4509 if (test_bit(__E1000_DOWN, &adapter->state))
4510 return;
4511
a8f88ff5 4512 schedule_work(&adapter->update_phy_task);
bc7f75fa
AK
4513}
4514
8c7bbb92
BA
4515/**
4516 * e1000e_update_phy_stats - Update the PHY statistics counters
4517 * @adapter: board private structure
2b6b168d
BA
4518 *
4519 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
8c7bbb92
BA
4520 **/
4521static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4522{
4523 struct e1000_hw *hw = &adapter->hw;
4524 s32 ret_val;
4525 u16 phy_data;
4526
4527 ret_val = hw->phy.ops.acquire(hw);
4528 if (ret_val)
4529 return;
4530
e921eb1a 4531 /* A page set is expensive so check if already on desired page.
8c7bbb92
BA
4532 * If not, set to the page with the PHY status registers.
4533 */
2b6b168d 4534 hw->phy.addr = 1;
8c7bbb92
BA
4535 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4536 &phy_data);
4537 if (ret_val)
4538 goto release;
2b6b168d
BA
4539 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
4540 ret_val = hw->phy.ops.set_page(hw,
4541 HV_STATS_PAGE << IGP_PAGE_SHIFT);
8c7bbb92
BA
4542 if (ret_val)
4543 goto release;
4544 }
4545
8c7bbb92 4546 /* Single Collision Count */
2b6b168d
BA
4547 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4548 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
8c7bbb92
BA
4549 if (!ret_val)
4550 adapter->stats.scc += phy_data;
4551
4552 /* Excessive Collision Count */
2b6b168d
BA
4553 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4554 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
8c7bbb92
BA
4555 if (!ret_val)
4556 adapter->stats.ecol += phy_data;
4557
4558 /* Multiple Collision Count */
2b6b168d
BA
4559 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4560 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
8c7bbb92
BA
4561 if (!ret_val)
4562 adapter->stats.mcc += phy_data;
4563
4564 /* Late Collision Count */
2b6b168d
BA
4565 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4566 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
8c7bbb92
BA
4567 if (!ret_val)
4568 adapter->stats.latecol += phy_data;
4569
4570 /* Collision Count - also used for adaptive IFS */
2b6b168d
BA
4571 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4572 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
8c7bbb92
BA
4573 if (!ret_val)
4574 hw->mac.collision_delta = phy_data;
4575
4576 /* Defer Count */
2b6b168d
BA
4577 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4578 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
8c7bbb92
BA
4579 if (!ret_val)
4580 adapter->stats.dc += phy_data;
4581
4582 /* Transmit with no CRS */
2b6b168d
BA
4583 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4584 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
8c7bbb92
BA
4585 if (!ret_val)
4586 adapter->stats.tncrs += phy_data;
4587
4588release:
4589 hw->phy.ops.release(hw);
4590}
4591
bc7f75fa
AK
4592/**
4593 * e1000e_update_stats - Update the board statistics counters
4594 * @adapter: board private structure
4595 **/
67fd4fcb 4596static void e1000e_update_stats(struct e1000_adapter *adapter)
bc7f75fa 4597{
7274c20f 4598 struct net_device *netdev = adapter->netdev;
bc7f75fa
AK
4599 struct e1000_hw *hw = &adapter->hw;
4600 struct pci_dev *pdev = adapter->pdev;
bc7f75fa 4601
e921eb1a 4602 /* Prevent stats update while adapter is being reset, or if the pci
bc7f75fa
AK
4603 * connection is down.
4604 */
4605 if (adapter->link_speed == 0)
4606 return;
4607 if (pci_channel_offline(pdev))
4608 return;
4609
bc7f75fa
AK
4610 adapter->stats.crcerrs += er32(CRCERRS);
4611 adapter->stats.gprc += er32(GPRC);
7c25769f 4612 adapter->stats.gorc += er32(GORCL);
e80bd1d1 4613 er32(GORCH); /* Clear gorc */
bc7f75fa
AK
4614 adapter->stats.bprc += er32(BPRC);
4615 adapter->stats.mprc += er32(MPRC);
4616 adapter->stats.roc += er32(ROC);
4617
bc7f75fa 4618 adapter->stats.mpc += er32(MPC);
8c7bbb92
BA
4619
4620 /* Half-duplex statistics */
4621 if (adapter->link_duplex == HALF_DUPLEX) {
4622 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4623 e1000e_update_phy_stats(adapter);
4624 } else {
4625 adapter->stats.scc += er32(SCC);
4626 adapter->stats.ecol += er32(ECOL);
4627 adapter->stats.mcc += er32(MCC);
4628 adapter->stats.latecol += er32(LATECOL);
4629 adapter->stats.dc += er32(DC);
4630
4631 hw->mac.collision_delta = er32(COLC);
4632
4633 if ((hw->mac.type != e1000_82574) &&
4634 (hw->mac.type != e1000_82583))
4635 adapter->stats.tncrs += er32(TNCRS);
4636 }
4637 adapter->stats.colc += hw->mac.collision_delta;
a4f58f54 4638 }
8c7bbb92 4639
bc7f75fa
AK
4640 adapter->stats.xonrxc += er32(XONRXC);
4641 adapter->stats.xontxc += er32(XONTXC);
4642 adapter->stats.xoffrxc += er32(XOFFRXC);
4643 adapter->stats.xofftxc += er32(XOFFTXC);
bc7f75fa 4644 adapter->stats.gptc += er32(GPTC);
7c25769f 4645 adapter->stats.gotc += er32(GOTCL);
e80bd1d1 4646 er32(GOTCH); /* Clear gotc */
bc7f75fa
AK
4647 adapter->stats.rnbc += er32(RNBC);
4648 adapter->stats.ruc += er32(RUC);
bc7f75fa
AK
4649
4650 adapter->stats.mptc += er32(MPTC);
4651 adapter->stats.bptc += er32(BPTC);
4652
4653 /* used for adaptive IFS */
4654
4655 hw->mac.tx_packet_delta = er32(TPT);
4656 adapter->stats.tpt += hw->mac.tx_packet_delta;
bc7f75fa
AK
4657
4658 adapter->stats.algnerrc += er32(ALGNERRC);
4659 adapter->stats.rxerrc += er32(RXERRC);
bc7f75fa
AK
4660 adapter->stats.cexterr += er32(CEXTERR);
4661 adapter->stats.tsctc += er32(TSCTC);
4662 adapter->stats.tsctfc += er32(TSCTFC);
4663
bc7f75fa 4664 /* Fill out the OS statistics structure */
7274c20f
AK
4665 netdev->stats.multicast = adapter->stats.mprc;
4666 netdev->stats.collisions = adapter->stats.colc;
bc7f75fa
AK
4667
4668 /* Rx Errors */
4669
e921eb1a 4670 /* RLEC on some newer hardware can be incorrect so build
ad68076e
BA
4671 * our own version based on RUC and ROC
4672 */
7274c20f 4673 netdev->stats.rx_errors = adapter->stats.rxerrc +
f0ff4398
BA
4674 adapter->stats.crcerrs + adapter->stats.algnerrc +
4675 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
7274c20f 4676 netdev->stats.rx_length_errors = adapter->stats.ruc +
f0ff4398 4677 adapter->stats.roc;
7274c20f
AK
4678 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4679 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4680 netdev->stats.rx_missed_errors = adapter->stats.mpc;
bc7f75fa
AK
4681
4682 /* Tx Errors */
f0ff4398 4683 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
7274c20f
AK
4684 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4685 netdev->stats.tx_window_errors = adapter->stats.latecol;
4686 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
bc7f75fa
AK
4687
4688 /* Tx Dropped needs to be maintained elsewhere */
4689
bc7f75fa
AK
4690 /* Management Stats */
4691 adapter->stats.mgptc += er32(MGTPTC);
4692 adapter->stats.mgprc += er32(MGTPRC);
4693 adapter->stats.mgpdc += er32(MGTPDC);
94fb848b
BA
4694
4695 /* Correctable ECC Errors */
4696 if (hw->mac.type == e1000_pch_lpt) {
4697 u32 pbeccsts = er32(PBECCSTS);
4698 adapter->corr_errors +=
4699 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
4700 adapter->uncorr_errors +=
4701 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
4702 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
4703 }
bc7f75fa
AK
4704}
4705
7c25769f
BA
4706/**
4707 * e1000_phy_read_status - Update the PHY register status snapshot
4708 * @adapter: board private structure
4709 **/
4710static void e1000_phy_read_status(struct e1000_adapter *adapter)
4711{
4712 struct e1000_hw *hw = &adapter->hw;
4713 struct e1000_phy_regs *phy = &adapter->phy_regs;
7c25769f 4714
97390ab8
BA
4715 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
4716 (er32(STATUS) & E1000_STATUS_LU) &&
7c25769f 4717 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
90da0669
BA
4718 int ret_val;
4719
c2ade1a4
BA
4720 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
4721 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
4722 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
4723 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa);
4724 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion);
4725 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000);
4726 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000);
4727 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
7c25769f 4728 if (ret_val)
44defeb3 4729 e_warn("Error reading PHY register\n");
7c25769f 4730 } else {
e921eb1a 4731 /* Do not read PHY registers if link is not up
7c25769f
BA
4732 * Set values to typical power-on defaults
4733 */
4734 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4735 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4736 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4737 BMSR_ERCAP);
4738 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4739 ADVERTISE_ALL | ADVERTISE_CSMA);
4740 phy->lpa = 0;
4741 phy->expansion = EXPANSION_ENABLENPAGE;
4742 phy->ctrl1000 = ADVERTISE_1000FULL;
4743 phy->stat1000 = 0;
4744 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4745 }
7c25769f
BA
4746}
4747
bc7f75fa
AK
4748static void e1000_print_link_info(struct e1000_adapter *adapter)
4749{
bc7f75fa
AK
4750 struct e1000_hw *hw = &adapter->hw;
4751 u32 ctrl = er32(CTRL);
4752
8f12fe86 4753 /* Link status message must follow this format for user tools */
7dbc1672
BA
4754 pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4755 adapter->netdev->name, adapter->link_speed,
ef456f85
JK
4756 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
4757 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
4758 (ctrl & E1000_CTRL_RFCE) ? "Rx" :
4759 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
bc7f75fa
AK
4760}
4761
0c6bdb30 4762static bool e1000e_has_link(struct e1000_adapter *adapter)
318a94d6
JK
4763{
4764 struct e1000_hw *hw = &adapter->hw;
3db1cd5c 4765 bool link_active = false;
318a94d6
JK
4766 s32 ret_val = 0;
4767
e921eb1a 4768 /* get_link_status is set on LSC (link status) interrupt or
318a94d6
JK
4769 * Rx sequence error interrupt. get_link_status will stay
4770 * false until the check_for_link establishes link
4771 * for copper adapters ONLY
4772 */
4773 switch (hw->phy.media_type) {
4774 case e1000_media_type_copper:
4775 if (hw->mac.get_link_status) {
4776 ret_val = hw->mac.ops.check_for_link(hw);
4777 link_active = !hw->mac.get_link_status;
4778 } else {
3db1cd5c 4779 link_active = true;
318a94d6
JK
4780 }
4781 break;
4782 case e1000_media_type_fiber:
4783 ret_val = hw->mac.ops.check_for_link(hw);
4784 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4785 break;
4786 case e1000_media_type_internal_serdes:
4787 ret_val = hw->mac.ops.check_for_link(hw);
4788 link_active = adapter->hw.mac.serdes_has_link;
4789 break;
4790 default:
4791 case e1000_media_type_unknown:
4792 break;
4793 }
4794
4795 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4796 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4797 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
44defeb3 4798 e_info("Gigabit has been disabled, downgrading speed\n");
318a94d6
JK
4799 }
4800
4801 return link_active;
4802}
4803
4804static void e1000e_enable_receives(struct e1000_adapter *adapter)
4805{
4806 /* make sure the receive unit is started */
4807 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
12d43f7d 4808 (adapter->flags & FLAG_RESTART_NOW)) {
318a94d6
JK
4809 struct e1000_hw *hw = &adapter->hw;
4810 u32 rctl = er32(RCTL);
4811 ew32(RCTL, rctl | E1000_RCTL_EN);
12d43f7d 4812 adapter->flags &= ~FLAG_RESTART_NOW;
318a94d6
JK
4813 }
4814}
4815
ff10e13c
CW
4816static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4817{
4818 struct e1000_hw *hw = &adapter->hw;
4819
e921eb1a 4820 /* With 82574 controllers, PHY needs to be checked periodically
ff10e13c
CW
4821 * for hung state and reset, if two calls return true
4822 */
4823 if (e1000_check_phy_82574(hw))
4824 adapter->phy_hang_count++;
4825 else
4826 adapter->phy_hang_count = 0;
4827
4828 if (adapter->phy_hang_count > 1) {
4829 adapter->phy_hang_count = 0;
d9554e96 4830 e_dbg("PHY appears hung - resetting\n");
ff10e13c
CW
4831 schedule_work(&adapter->reset_task);
4832 }
4833}
4834
bc7f75fa
AK
4835/**
4836 * e1000_watchdog - Timer Call-back
4837 * @data: pointer to adapter cast into an unsigned long
4838 **/
4839static void e1000_watchdog(unsigned long data)
4840{
53aa82da 4841 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
bc7f75fa
AK
4842
4843 /* Do the rest outside of interrupt context */
4844 schedule_work(&adapter->watchdog_task);
4845
4846 /* TODO: make this use queue_delayed_work() */
4847}
4848
4849static void e1000_watchdog_task(struct work_struct *work)
4850{
4851 struct e1000_adapter *adapter = container_of(work,
17e813ec
BA
4852 struct e1000_adapter,
4853 watchdog_task);
bc7f75fa
AK
4854 struct net_device *netdev = adapter->netdev;
4855 struct e1000_mac_info *mac = &adapter->hw.mac;
75eb0fad 4856 struct e1000_phy_info *phy = &adapter->hw.phy;
bc7f75fa
AK
4857 struct e1000_ring *tx_ring = adapter->tx_ring;
4858 struct e1000_hw *hw = &adapter->hw;
4859 u32 link, tctl;
bc7f75fa 4860
615b32af
JB
4861 if (test_bit(__E1000_DOWN, &adapter->state))
4862 return;
4863
b405e8df 4864 link = e1000e_has_link(adapter);
318a94d6 4865 if ((netif_carrier_ok(netdev)) && link) {
23606cf5
RW
4866 /* Cancel scheduled suspend requests. */
4867 pm_runtime_resume(netdev->dev.parent);
4868
318a94d6 4869 e1000e_enable_receives(adapter);
bc7f75fa 4870 goto link_up;
bc7f75fa
AK
4871 }
4872
4873 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4874 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4875 e1000_update_mng_vlan(adapter);
4876
bc7f75fa
AK
4877 if (link) {
4878 if (!netif_carrier_ok(netdev)) {
3db1cd5c 4879 bool txb2b = true;
23606cf5
RW
4880
4881 /* Cancel scheduled suspend requests. */
4882 pm_runtime_resume(netdev->dev.parent);
4883
318a94d6 4884 /* update snapshot of PHY registers on LSC */
7c25769f 4885 e1000_phy_read_status(adapter);
bc7f75fa 4886 mac->ops.get_link_up_info(&adapter->hw,
17e813ec
BA
4887 &adapter->link_speed,
4888 &adapter->link_duplex);
bc7f75fa 4889 e1000_print_link_info(adapter);
e792cd91
KS
4890
4891 /* check if SmartSpeed worked */
4892 e1000e_check_downshift(hw);
4893 if (phy->speed_downgraded)
4894 netdev_warn(netdev,
4895 "Link Speed was downgraded by SmartSpeed\n");
4896
e921eb1a 4897 /* On supported PHYs, check for duplex mismatch only
f4187b56
BA
4898 * if link has autonegotiated at 10/100 half
4899 */
4900 if ((hw->phy.type == e1000_phy_igp_3 ||
4901 hw->phy.type == e1000_phy_bm) &&
138953bb 4902 hw->mac.autoneg &&
f4187b56
BA
4903 (adapter->link_speed == SPEED_10 ||
4904 adapter->link_speed == SPEED_100) &&
4905 (adapter->link_duplex == HALF_DUPLEX)) {
4906 u16 autoneg_exp;
4907
c2ade1a4 4908 e1e_rphy(hw, MII_EXPANSION, &autoneg_exp);
f4187b56 4909
c2ade1a4 4910 if (!(autoneg_exp & EXPANSION_NWAY))
ef456f85 4911 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
f4187b56
BA
4912 }
4913
f49c57e1 4914 /* adjust timeout factor according to speed/duplex */
bc7f75fa
AK
4915 adapter->tx_timeout_factor = 1;
4916 switch (adapter->link_speed) {
4917 case SPEED_10:
3db1cd5c 4918 txb2b = false;
10f1b492 4919 adapter->tx_timeout_factor = 16;
bc7f75fa
AK
4920 break;
4921 case SPEED_100:
3db1cd5c 4922 txb2b = false;
4c86e0b9 4923 adapter->tx_timeout_factor = 10;
bc7f75fa
AK
4924 break;
4925 }
4926
e921eb1a 4927 /* workaround: re-program speed mode bit after
ad68076e
BA
4928 * link-up event
4929 */
bc7f75fa
AK
4930 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4931 !txb2b) {
4932 u32 tarc0;
e9ec2c0f 4933 tarc0 = er32(TARC(0));
bc7f75fa 4934 tarc0 &= ~SPEED_MODE_BIT;
e9ec2c0f 4935 ew32(TARC(0), tarc0);
bc7f75fa
AK
4936 }
4937
e921eb1a 4938 /* disable TSO for pcie and 10/100 speeds, to avoid
ad68076e
BA
4939 * some hardware issues
4940 */
bc7f75fa
AK
4941 if (!(adapter->flags & FLAG_TSO_FORCE)) {
4942 switch (adapter->link_speed) {
4943 case SPEED_10:
4944 case SPEED_100:
44defeb3 4945 e_info("10/100 speed: disabling TSO\n");
bc7f75fa
AK
4946 netdev->features &= ~NETIF_F_TSO;
4947 netdev->features &= ~NETIF_F_TSO6;
4948 break;
4949 case SPEED_1000:
4950 netdev->features |= NETIF_F_TSO;
4951 netdev->features |= NETIF_F_TSO6;
4952 break;
4953 default:
4954 /* oops */
4955 break;
4956 }
4957 }
4958
e921eb1a 4959 /* enable transmits in the hardware, need to do this
ad68076e
BA
4960 * after setting TARC(0)
4961 */
bc7f75fa
AK
4962 tctl = er32(TCTL);
4963 tctl |= E1000_TCTL_EN;
4964 ew32(TCTL, tctl);
4965
e921eb1a 4966 /* Perform any post-link-up configuration before
75eb0fad
BA
4967 * reporting link up.
4968 */
4969 if (phy->ops.cfg_on_link_up)
4970 phy->ops.cfg_on_link_up(hw);
4971
bc7f75fa 4972 netif_carrier_on(netdev);
bc7f75fa
AK
4973
4974 if (!test_bit(__E1000_DOWN, &adapter->state))
4975 mod_timer(&adapter->phy_info_timer,
4976 round_jiffies(jiffies + 2 * HZ));
bc7f75fa
AK
4977 }
4978 } else {
4979 if (netif_carrier_ok(netdev)) {
4980 adapter->link_speed = 0;
4981 adapter->link_duplex = 0;
8f12fe86 4982 /* Link status message must follow this format */
7dbc1672 4983 pr_info("%s NIC Link is Down\n", adapter->netdev->name);
bc7f75fa 4984 netif_carrier_off(netdev);
bc7f75fa
AK
4985 if (!test_bit(__E1000_DOWN, &adapter->state))
4986 mod_timer(&adapter->phy_info_timer,
4987 round_jiffies(jiffies + 2 * HZ));
4988
d9554e96
DE
4989 /* 8000ES2LAN requires a Rx packet buffer work-around
4990 * on link down event; reset the controller to flush
4991 * the Rx packet buffer.
12d43f7d 4992 */
d9554e96 4993 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
12d43f7d 4994 adapter->flags |= FLAG_RESTART_NOW;
23606cf5
RW
4995 else
4996 pm_schedule_suspend(netdev->dev.parent,
17e813ec 4997 LINK_TIMEOUT);
bc7f75fa
AK
4998 }
4999 }
5000
5001link_up:
67fd4fcb 5002 spin_lock(&adapter->stats64_lock);
bc7f75fa
AK
5003 e1000e_update_stats(adapter);
5004
5005 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
5006 adapter->tpt_old = adapter->stats.tpt;
5007 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
5008 adapter->colc_old = adapter->stats.colc;
5009
7c25769f
BA
5010 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
5011 adapter->gorc_old = adapter->stats.gorc;
5012 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
5013 adapter->gotc_old = adapter->stats.gotc;
2084b114 5014 spin_unlock(&adapter->stats64_lock);
bc7f75fa 5015
d9554e96
DE
5016 /* If the link is lost the controller stops DMA, but
5017 * if there is queued Tx work it cannot be done. So
5018 * reset the controller to flush the Tx packet buffers.
5019 */
5020 if (!netif_carrier_ok(netdev) &&
5021 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
5022 adapter->flags |= FLAG_RESTART_NOW;
5023
5024 /* If reset is necessary, do it outside of interrupt context. */
12d43f7d 5025 if (adapter->flags & FLAG_RESTART_NOW) {
90da0669
BA
5026 schedule_work(&adapter->reset_task);
5027 /* return immediately since reset is imminent */
5028 return;
bc7f75fa
AK
5029 }
5030
12d43f7d
BA
5031 e1000e_update_adaptive(&adapter->hw);
5032
eab2abf5
JB
5033 /* Simple mode for Interrupt Throttle Rate (ITR) */
5034 if (adapter->itr_setting == 4) {
e921eb1a 5035 /* Symmetric Tx/Rx gets a reduced ITR=2000;
eab2abf5
JB
5036 * Total asymmetrical Tx or Rx gets ITR=8000;
5037 * everyone else is between 2000-8000.
5038 */
5039 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
5040 u32 dif = (adapter->gotc > adapter->gorc ?
17e813ec
BA
5041 adapter->gotc - adapter->gorc :
5042 adapter->gorc - adapter->gotc) / 10000;
eab2abf5
JB
5043 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
5044
22a4cca2 5045 e1000e_write_itr(adapter, itr);
eab2abf5
JB
5046 }
5047
ad68076e 5048 /* Cause software interrupt to ensure Rx ring is cleaned */
4662e82b
BA
5049 if (adapter->msix_entries)
5050 ew32(ICS, adapter->rx_ring->ims_val);
5051 else
5052 ew32(ICS, E1000_ICS_RXDMT0);
bc7f75fa 5053
713b3c9e
JB
5054 /* flush pending descriptors to memory before detecting Tx hang */
5055 e1000e_flush_descriptors(adapter);
5056
bc7f75fa 5057 /* Force detection of hung controller every watchdog period */
3db1cd5c 5058 adapter->detect_tx_hung = true;
bc7f75fa 5059
e921eb1a 5060 /* With 82571 controllers, LAA may be overwritten due to controller
ad68076e
BA
5061 * reset from the other port. Set the appropriate LAA in RAR[0]
5062 */
bc7f75fa 5063 if (e1000e_get_laa_state_82571(hw))
69e1e019 5064 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
bc7f75fa 5065
ff10e13c
CW
5066 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
5067 e1000e_check_82574_phy_workaround(adapter);
5068
b67e1913
BA
5069 /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */
5070 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
5071 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) &&
5072 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) {
5073 er32(RXSTMPH);
5074 adapter->rx_hwtstamp_cleared++;
5075 } else {
5076 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP;
5077 }
5078 }
5079
bc7f75fa
AK
5080 /* Reset the timer */
5081 if (!test_bit(__E1000_DOWN, &adapter->state))
5082 mod_timer(&adapter->watchdog_timer,
5083 round_jiffies(jiffies + 2 * HZ));
5084}
5085
5086#define E1000_TX_FLAGS_CSUM 0x00000001
5087#define E1000_TX_FLAGS_VLAN 0x00000002
5088#define E1000_TX_FLAGS_TSO 0x00000004
5089#define E1000_TX_FLAGS_IPV4 0x00000008
943146de 5090#define E1000_TX_FLAGS_NO_FCS 0x00000010
b67e1913 5091#define E1000_TX_FLAGS_HWTSTAMP 0x00000020
bc7f75fa
AK
5092#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
5093#define E1000_TX_FLAGS_VLAN_SHIFT 16
5094
55aa6985 5095static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
bc7f75fa 5096{
bc7f75fa
AK
5097 struct e1000_context_desc *context_desc;
5098 struct e1000_buffer *buffer_info;
5099 unsigned int i;
5100 u32 cmd_length = 0;
70443ae9 5101 u16 ipcse = 0, mss;
bc7f75fa 5102 u8 ipcss, ipcso, tucss, tucso, hdr_len;
bcf1f57f 5103 int err;
bc7f75fa 5104
3d5e33c9
BA
5105 if (!skb_is_gso(skb))
5106 return 0;
bc7f75fa 5107
bcf1f57f
FR
5108 err = skb_cow_head(skb, 0);
5109 if (err < 0)
5110 return err;
bc7f75fa 5111
3d5e33c9
BA
5112 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5113 mss = skb_shinfo(skb)->gso_size;
5114 if (skb->protocol == htons(ETH_P_IP)) {
5115 struct iphdr *iph = ip_hdr(skb);
5116 iph->tot_len = 0;
5117 iph->check = 0;
5118 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
f0ff4398 5119 0, IPPROTO_TCP, 0);
3d5e33c9
BA
5120 cmd_length = E1000_TXD_CMD_IP;
5121 ipcse = skb_transport_offset(skb) - 1;
8e1e8a47 5122 } else if (skb_is_gso_v6(skb)) {
3d5e33c9
BA
5123 ipv6_hdr(skb)->payload_len = 0;
5124 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
f0ff4398
BA
5125 &ipv6_hdr(skb)->daddr,
5126 0, IPPROTO_TCP, 0);
3d5e33c9
BA
5127 ipcse = 0;
5128 }
5129 ipcss = skb_network_offset(skb);
5130 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
5131 tucss = skb_transport_offset(skb);
5132 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
3d5e33c9
BA
5133
5134 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
f0ff4398 5135 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
3d5e33c9
BA
5136
5137 i = tx_ring->next_to_use;
5138 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5139 buffer_info = &tx_ring->buffer_info[i];
5140
e80bd1d1
BA
5141 context_desc->lower_setup.ip_fields.ipcss = ipcss;
5142 context_desc->lower_setup.ip_fields.ipcso = ipcso;
5143 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
3d5e33c9
BA
5144 context_desc->upper_setup.tcp_fields.tucss = tucss;
5145 context_desc->upper_setup.tcp_fields.tucso = tucso;
70443ae9 5146 context_desc->upper_setup.tcp_fields.tucse = 0;
e80bd1d1 5147 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
3d5e33c9
BA
5148 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
5149 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
5150
5151 buffer_info->time_stamp = jiffies;
5152 buffer_info->next_to_watch = i;
5153
5154 i++;
5155 if (i == tx_ring->count)
5156 i = 0;
5157 tx_ring->next_to_use = i;
5158
5159 return 1;
bc7f75fa
AK
5160}
5161
55aa6985 5162static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
bc7f75fa 5163{
55aa6985 5164 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa
AK
5165 struct e1000_context_desc *context_desc;
5166 struct e1000_buffer *buffer_info;
5167 unsigned int i;
5168 u8 css;
af807c82 5169 u32 cmd_len = E1000_TXD_CMD_DEXT;
5f66f208 5170 __be16 protocol;
bc7f75fa 5171
af807c82
DG
5172 if (skb->ip_summed != CHECKSUM_PARTIAL)
5173 return 0;
bc7f75fa 5174
5f66f208
AJ
5175 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5176 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
5177 else
5178 protocol = skb->protocol;
5179
3f518390 5180 switch (protocol) {
09640e63 5181 case cpu_to_be16(ETH_P_IP):
af807c82
DG
5182 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5183 cmd_len |= E1000_TXD_CMD_TCP;
5184 break;
09640e63 5185 case cpu_to_be16(ETH_P_IPV6):
af807c82
DG
5186 /* XXX not handling all IPV6 headers */
5187 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5188 cmd_len |= E1000_TXD_CMD_TCP;
5189 break;
5190 default:
5191 if (unlikely(net_ratelimit()))
5f66f208
AJ
5192 e_warn("checksum_partial proto=%x!\n",
5193 be16_to_cpu(protocol));
af807c82 5194 break;
bc7f75fa
AK
5195 }
5196
0d0b1672 5197 css = skb_checksum_start_offset(skb);
af807c82
DG
5198
5199 i = tx_ring->next_to_use;
5200 buffer_info = &tx_ring->buffer_info[i];
5201 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5202
5203 context_desc->lower_setup.ip_config = 0;
5204 context_desc->upper_setup.tcp_fields.tucss = css;
f0ff4398 5205 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
af807c82
DG
5206 context_desc->upper_setup.tcp_fields.tucse = 0;
5207 context_desc->tcp_seg_setup.data = 0;
5208 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
5209
5210 buffer_info->time_stamp = jiffies;
5211 buffer_info->next_to_watch = i;
5212
5213 i++;
5214 if (i == tx_ring->count)
5215 i = 0;
5216 tx_ring->next_to_use = i;
5217
5218 return 1;
bc7f75fa
AK
5219}
5220
55aa6985
BA
5221static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
5222 unsigned int first, unsigned int max_per_txd,
d821a4c4 5223 unsigned int nr_frags)
bc7f75fa 5224{
55aa6985 5225 struct e1000_adapter *adapter = tx_ring->adapter;
03b1320d 5226 struct pci_dev *pdev = adapter->pdev;
1b7719c4 5227 struct e1000_buffer *buffer_info;
8ddc951c 5228 unsigned int len = skb_headlen(skb);
03b1320d 5229 unsigned int offset = 0, size, count = 0, i;
9ed318d5 5230 unsigned int f, bytecount, segs;
bc7f75fa
AK
5231
5232 i = tx_ring->next_to_use;
5233
5234 while (len) {
1b7719c4 5235 buffer_info = &tx_ring->buffer_info[i];
bc7f75fa
AK
5236 size = min(len, max_per_txd);
5237
bc7f75fa 5238 buffer_info->length = size;
bc7f75fa 5239 buffer_info->time_stamp = jiffies;
bc7f75fa 5240 buffer_info->next_to_watch = i;
0be3f55f
NN
5241 buffer_info->dma = dma_map_single(&pdev->dev,
5242 skb->data + offset,
af667a29 5243 size, DMA_TO_DEVICE);
03b1320d 5244 buffer_info->mapped_as_page = false;
0be3f55f 5245 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
03b1320d 5246 goto dma_error;
bc7f75fa
AK
5247
5248 len -= size;
5249 offset += size;
03b1320d 5250 count++;
1b7719c4
AD
5251
5252 if (len) {
5253 i++;
5254 if (i == tx_ring->count)
5255 i = 0;
5256 }
bc7f75fa
AK
5257 }
5258
5259 for (f = 0; f < nr_frags; f++) {
9e903e08 5260 const struct skb_frag_struct *frag;
bc7f75fa
AK
5261
5262 frag = &skb_shinfo(skb)->frags[f];
9e903e08 5263 len = skb_frag_size(frag);
877749bf 5264 offset = 0;
bc7f75fa
AK
5265
5266 while (len) {
1b7719c4
AD
5267 i++;
5268 if (i == tx_ring->count)
5269 i = 0;
5270
bc7f75fa
AK
5271 buffer_info = &tx_ring->buffer_info[i];
5272 size = min(len, max_per_txd);
bc7f75fa
AK
5273
5274 buffer_info->length = size;
5275 buffer_info->time_stamp = jiffies;
bc7f75fa 5276 buffer_info->next_to_watch = i;
877749bf 5277 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
17e813ec
BA
5278 offset, size,
5279 DMA_TO_DEVICE);
03b1320d 5280 buffer_info->mapped_as_page = true;
0be3f55f 5281 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
03b1320d 5282 goto dma_error;
bc7f75fa
AK
5283
5284 len -= size;
5285 offset += size;
5286 count++;
bc7f75fa
AK
5287 }
5288 }
5289
af667a29 5290 segs = skb_shinfo(skb)->gso_segs ? : 1;
9ed318d5
TH
5291 /* multiply data chunks by size of headers */
5292 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
5293
bc7f75fa 5294 tx_ring->buffer_info[i].skb = skb;
9ed318d5
TH
5295 tx_ring->buffer_info[i].segs = segs;
5296 tx_ring->buffer_info[i].bytecount = bytecount;
bc7f75fa
AK
5297 tx_ring->buffer_info[first].next_to_watch = i;
5298
5299 return count;
03b1320d
AD
5300
5301dma_error:
af667a29 5302 dev_err(&pdev->dev, "Tx DMA map failed\n");
03b1320d 5303 buffer_info->dma = 0;
c1fa347f 5304 if (count)
03b1320d 5305 count--;
c1fa347f
RK
5306
5307 while (count--) {
af667a29 5308 if (i == 0)
03b1320d 5309 i += tx_ring->count;
c1fa347f 5310 i--;
03b1320d 5311 buffer_info = &tx_ring->buffer_info[i];
55aa6985 5312 e1000_put_txbuf(tx_ring, buffer_info);
03b1320d
AD
5313 }
5314
5315 return 0;
bc7f75fa
AK
5316}
5317
55aa6985 5318static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
bc7f75fa 5319{
55aa6985 5320 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa
AK
5321 struct e1000_tx_desc *tx_desc = NULL;
5322 struct e1000_buffer *buffer_info;
5323 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
5324 unsigned int i;
5325
5326 if (tx_flags & E1000_TX_FLAGS_TSO) {
5327 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
f0ff4398 5328 E1000_TXD_CMD_TSE;
bc7f75fa
AK
5329 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5330
5331 if (tx_flags & E1000_TX_FLAGS_IPV4)
5332 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
5333 }
5334
5335 if (tx_flags & E1000_TX_FLAGS_CSUM) {
5336 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5337 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5338 }
5339
5340 if (tx_flags & E1000_TX_FLAGS_VLAN) {
5341 txd_lower |= E1000_TXD_CMD_VLE;
5342 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
5343 }
5344
943146de
BG
5345 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5346 txd_lower &= ~(E1000_TXD_CMD_IFCS);
5347
b67e1913
BA
5348 if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) {
5349 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5350 txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
5351 }
5352
bc7f75fa
AK
5353 i = tx_ring->next_to_use;
5354
36b973df 5355 do {
bc7f75fa
AK
5356 buffer_info = &tx_ring->buffer_info[i];
5357 tx_desc = E1000_TX_DESC(*tx_ring, i);
5358 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
f0ff4398
BA
5359 tx_desc->lower.data = cpu_to_le32(txd_lower |
5360 buffer_info->length);
bc7f75fa
AK
5361 tx_desc->upper.data = cpu_to_le32(txd_upper);
5362
5363 i++;
5364 if (i == tx_ring->count)
5365 i = 0;
36b973df 5366 } while (--count > 0);
bc7f75fa
AK
5367
5368 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
5369
943146de
BG
5370 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
5371 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5372 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
5373
e921eb1a 5374 /* Force memory writes to complete before letting h/w
bc7f75fa
AK
5375 * know there are new descriptors to fetch. (Only
5376 * applicable for weak-ordered memory model archs,
ad68076e
BA
5377 * such as IA-64).
5378 */
bc7f75fa
AK
5379 wmb();
5380
5381 tx_ring->next_to_use = i;
c6e7f51e
BA
5382
5383 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
55aa6985 5384 e1000e_update_tdt_wa(tx_ring, i);
c6e7f51e 5385 else
c5083cf6 5386 writel(i, tx_ring->tail);
c6e7f51e 5387
e921eb1a 5388 /* we need this if more than one processor can write to our tail
ad68076e
BA
5389 * at a time, it synchronizes IO on IA64/Altix systems
5390 */
bc7f75fa
AK
5391 mmiowb();
5392}
5393
5394#define MINIMUM_DHCP_PACKET_SIZE 282
5395static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5396 struct sk_buff *skb)
5397{
e80bd1d1 5398 struct e1000_hw *hw = &adapter->hw;
bc7f75fa
AK
5399 u16 length, offset;
5400
d60923c4
BA
5401 if (vlan_tx_tag_present(skb) &&
5402 !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
5403 (adapter->hw.mng_cookie.status &
5404 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
5405 return 0;
bc7f75fa
AK
5406
5407 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
5408 return 0;
5409
53aa82da 5410 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
bc7f75fa
AK
5411 return 0;
5412
5413 {
362e20ca 5414 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
bc7f75fa
AK
5415 struct udphdr *udp;
5416
5417 if (ip->protocol != IPPROTO_UDP)
5418 return 0;
5419
5420 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
5421 if (ntohs(udp->dest) != 67)
5422 return 0;
5423
5424 offset = (u8 *)udp + 8 - skb->data;
5425 length = skb->len - offset;
5426 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
5427 }
5428
5429 return 0;
5430}
5431
55aa6985 5432static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
bc7f75fa 5433{
55aa6985 5434 struct e1000_adapter *adapter = tx_ring->adapter;
bc7f75fa 5435
55aa6985 5436 netif_stop_queue(adapter->netdev);
e921eb1a 5437 /* Herbert's original patch had:
bc7f75fa 5438 * smp_mb__after_netif_stop_queue();
ad68076e
BA
5439 * but since that doesn't exist yet, just open code it.
5440 */
bc7f75fa
AK
5441 smp_mb();
5442
e921eb1a 5443 /* We need to check again in a case another CPU has just
ad68076e
BA
5444 * made room available.
5445 */
55aa6985 5446 if (e1000_desc_unused(tx_ring) < size)
bc7f75fa
AK
5447 return -EBUSY;
5448
5449 /* A reprieve! */
55aa6985 5450 netif_start_queue(adapter->netdev);
bc7f75fa
AK
5451 ++adapter->restart_queue;
5452 return 0;
5453}
5454
55aa6985 5455static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
bc7f75fa 5456{
d821a4c4
BA
5457 BUG_ON(size > tx_ring->count);
5458
55aa6985 5459 if (e1000_desc_unused(tx_ring) >= size)
bc7f75fa 5460 return 0;
55aa6985 5461 return __e1000_maybe_stop_tx(tx_ring, size);
bc7f75fa
AK
5462}
5463
3b29a56d
SH
5464static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5465 struct net_device *netdev)
bc7f75fa
AK
5466{
5467 struct e1000_adapter *adapter = netdev_priv(netdev);
5468 struct e1000_ring *tx_ring = adapter->tx_ring;
5469 unsigned int first;
bc7f75fa 5470 unsigned int tx_flags = 0;
e743d313 5471 unsigned int len = skb_headlen(skb);
4e6c709c
AK
5472 unsigned int nr_frags;
5473 unsigned int mss;
bc7f75fa
AK
5474 int count = 0;
5475 int tso;
5476 unsigned int f;
bc7f75fa
AK
5477
5478 if (test_bit(__E1000_DOWN, &adapter->state)) {
5479 dev_kfree_skb_any(skb);
5480 return NETDEV_TX_OK;
5481 }
5482
5483 if (skb->len <= 0) {
5484 dev_kfree_skb_any(skb);
5485 return NETDEV_TX_OK;
5486 }
5487
e921eb1a 5488 /* The minimum packet size with TCTL.PSP set is 17 bytes so
6e97c170
TD
5489 * pad skb in order to meet this minimum size requirement
5490 */
5491 if (unlikely(skb->len < 17)) {
5492 if (skb_pad(skb, 17 - skb->len))
5493 return NETDEV_TX_OK;
5494 skb->len = 17;
5495 skb_set_tail_pointer(skb, 17);
5496 }
5497
bc7f75fa 5498 mss = skb_shinfo(skb)->gso_size;
bc7f75fa
AK
5499 if (mss) {
5500 u8 hdr_len;
bc7f75fa 5501
e921eb1a 5502 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
ad68076e
BA
5503 * points to just header, pull a few bytes of payload from
5504 * frags into skb->data
5505 */
bc7f75fa 5506 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
e921eb1a 5507 /* we do this workaround for ES2LAN, but it is un-necessary,
ad68076e
BA
5508 * avoiding it could save a lot of cycles
5509 */
4e6c709c 5510 if (skb->data_len && (hdr_len == len)) {
bc7f75fa
AK
5511 unsigned int pull_size;
5512
a2a5b323 5513 pull_size = min_t(unsigned int, 4, skb->data_len);
bc7f75fa 5514 if (!__pskb_pull_tail(skb, pull_size)) {
44defeb3 5515 e_err("__pskb_pull_tail failed.\n");
bc7f75fa
AK
5516 dev_kfree_skb_any(skb);
5517 return NETDEV_TX_OK;
5518 }
e743d313 5519 len = skb_headlen(skb);
bc7f75fa
AK
5520 }
5521 }
5522
5523 /* reserve a descriptor for the offload context */
5524 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5525 count++;
5526 count++;
5527
d821a4c4 5528 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
bc7f75fa
AK
5529
5530 nr_frags = skb_shinfo(skb)->nr_frags;
5531 for (f = 0; f < nr_frags; f++)
d821a4c4
BA
5532 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5533 adapter->tx_fifo_limit);
bc7f75fa
AK
5534
5535 if (adapter->hw.mac.tx_pkt_filtering)
5536 e1000_transfer_dhcp_info(adapter, skb);
5537
e921eb1a 5538 /* need: count + 2 desc gap to keep tail from touching
ad68076e
BA
5539 * head, otherwise try next time
5540 */
55aa6985 5541 if (e1000_maybe_stop_tx(tx_ring, count + 2))
bc7f75fa 5542 return NETDEV_TX_BUSY;
bc7f75fa 5543
eab6d18d 5544 if (vlan_tx_tag_present(skb)) {
bc7f75fa
AK
5545 tx_flags |= E1000_TX_FLAGS_VLAN;
5546 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
5547 }
5548
5549 first = tx_ring->next_to_use;
5550
55aa6985 5551 tso = e1000_tso(tx_ring, skb);
bc7f75fa
AK
5552 if (tso < 0) {
5553 dev_kfree_skb_any(skb);
bc7f75fa
AK
5554 return NETDEV_TX_OK;
5555 }
5556
5557 if (tso)
5558 tx_flags |= E1000_TX_FLAGS_TSO;
55aa6985 5559 else if (e1000_tx_csum(tx_ring, skb))
bc7f75fa
AK
5560 tx_flags |= E1000_TX_FLAGS_CSUM;
5561
e921eb1a 5562 /* Old method was to assume IPv4 packet by default if TSO was enabled.
bc7f75fa 5563 * 82571 hardware supports TSO capabilities for IPv6 as well...
ad68076e
BA
5564 * no longer assume, we must.
5565 */
bc7f75fa
AK
5566 if (skb->protocol == htons(ETH_P_IP))
5567 tx_flags |= E1000_TX_FLAGS_IPV4;
5568
943146de
BG
5569 if (unlikely(skb->no_fcs))
5570 tx_flags |= E1000_TX_FLAGS_NO_FCS;
5571
25985edc 5572 /* if count is 0 then mapping error has occurred */
d821a4c4
BA
5573 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5574 nr_frags);
1b7719c4 5575 if (count) {
b67e1913
BA
5576 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
5577 !adapter->tx_hwtstamp_skb)) {
5578 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5579 tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
5580 adapter->tx_hwtstamp_skb = skb_get(skb);
59c871c5 5581 adapter->tx_hwtstamp_start = jiffies;
b67e1913
BA
5582 schedule_work(&adapter->tx_hwtstamp_work);
5583 } else {
5584 skb_tx_timestamp(skb);
5585 }
80be3129 5586
3f0cfa3b 5587 netdev_sent_queue(netdev, skb->len);
55aa6985 5588 e1000_tx_queue(tx_ring, tx_flags, count);
1b7719c4 5589 /* Make sure there is space in the ring for the next send. */
d821a4c4
BA
5590 e1000_maybe_stop_tx(tx_ring,
5591 (MAX_SKB_FRAGS *
5592 DIV_ROUND_UP(PAGE_SIZE,
5593 adapter->tx_fifo_limit) + 2));
1b7719c4 5594 } else {
bc7f75fa 5595 dev_kfree_skb_any(skb);
1b7719c4
AD
5596 tx_ring->buffer_info[first].time_stamp = 0;
5597 tx_ring->next_to_use = first;
bc7f75fa
AK
5598 }
5599
bc7f75fa
AK
5600 return NETDEV_TX_OK;
5601}
5602
5603/**
5604 * e1000_tx_timeout - Respond to a Tx Hang
5605 * @netdev: network interface device structure
5606 **/
5607static void e1000_tx_timeout(struct net_device *netdev)
5608{
5609 struct e1000_adapter *adapter = netdev_priv(netdev);
5610
5611 /* Do the reset outside of interrupt context */
5612 adapter->tx_timeout_count++;
5613 schedule_work(&adapter->reset_task);
5614}
5615
5616static void e1000_reset_task(struct work_struct *work)
5617{
5618 struct e1000_adapter *adapter;
5619 adapter = container_of(work, struct e1000_adapter, reset_task);
5620
615b32af
JB
5621 /* don't run the task if already down */
5622 if (test_bit(__E1000_DOWN, &adapter->state))
5623 return;
5624
12d43f7d 5625 if (!(adapter->flags & FLAG_RESTART_NOW)) {
affa9dfb 5626 e1000e_dump(adapter);
12d43f7d 5627 e_err("Reset adapter unexpectedly\n");
affa9dfb 5628 }
bc7f75fa
AK
5629 e1000e_reinit_locked(adapter);
5630}
5631
5632/**
67fd4fcb 5633 * e1000_get_stats64 - Get System Network Statistics
bc7f75fa 5634 * @netdev: network interface device structure
67fd4fcb 5635 * @stats: rtnl_link_stats64 pointer
bc7f75fa
AK
5636 *
5637 * Returns the address of the device statistics structure.
bc7f75fa 5638 **/
67fd4fcb 5639struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
66501f56 5640 struct rtnl_link_stats64 *stats)
bc7f75fa 5641{
67fd4fcb
JK
5642 struct e1000_adapter *adapter = netdev_priv(netdev);
5643
5644 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5645 spin_lock(&adapter->stats64_lock);
5646 e1000e_update_stats(adapter);
5647 /* Fill out the OS statistics structure */
5648 stats->rx_bytes = adapter->stats.gorc;
5649 stats->rx_packets = adapter->stats.gprc;
5650 stats->tx_bytes = adapter->stats.gotc;
5651 stats->tx_packets = adapter->stats.gptc;
5652 stats->multicast = adapter->stats.mprc;
5653 stats->collisions = adapter->stats.colc;
5654
5655 /* Rx Errors */
5656
e921eb1a 5657 /* RLEC on some newer hardware can be incorrect so build
67fd4fcb
JK
5658 * our own version based on RUC and ROC
5659 */
5660 stats->rx_errors = adapter->stats.rxerrc +
f0ff4398
BA
5661 adapter->stats.crcerrs + adapter->stats.algnerrc +
5662 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
5663 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
67fd4fcb
JK
5664 stats->rx_crc_errors = adapter->stats.crcerrs;
5665 stats->rx_frame_errors = adapter->stats.algnerrc;
5666 stats->rx_missed_errors = adapter->stats.mpc;
5667
5668 /* Tx Errors */
f0ff4398 5669 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
67fd4fcb
JK
5670 stats->tx_aborted_errors = adapter->stats.ecol;
5671 stats->tx_window_errors = adapter->stats.latecol;
5672 stats->tx_carrier_errors = adapter->stats.tncrs;
5673
5674 /* Tx Dropped needs to be maintained elsewhere */
5675
5676 spin_unlock(&adapter->stats64_lock);
5677 return stats;
bc7f75fa
AK
5678}
5679
5680/**
5681 * e1000_change_mtu - Change the Maximum Transfer Unit
5682 * @netdev: network interface device structure
5683 * @new_mtu: new value for maximum frame size
5684 *
5685 * Returns 0 on success, negative on failure
5686 **/
5687static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5688{
5689 struct e1000_adapter *adapter = netdev_priv(netdev);
c751a3d5 5690 int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
bc7f75fa 5691
2adc55c9 5692 /* Jumbo frame support */
2e1706f2
BA
5693 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5694 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5695 e_err("Jumbo Frames not supported.\n");
5696 return -EINVAL;
bc7f75fa
AK
5697 }
5698
2adc55c9
BA
5699 /* Supported frame sizes */
5700 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5701 (max_frame > adapter->max_hw_frame_size)) {
5702 e_err("Unsupported MTU setting\n");
bc7f75fa
AK
5703 return -EINVAL;
5704 }
5705
2fbe4526
BA
5706 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
5707 if ((adapter->hw.mac.type >= e1000_pch2lan) &&
a1ce6473
BA
5708 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5709 (new_mtu > ETH_DATA_LEN)) {
2fbe4526 5710 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
a1ce6473
BA
5711 return -EINVAL;
5712 }
5713
bc7f75fa 5714 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
1bba4386 5715 usleep_range(1000, 2000);
610c9928 5716 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
318a94d6 5717 adapter->max_frame_size = max_frame;
610c9928
BA
5718 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5719 netdev->mtu = new_mtu;
63eb48f1
DE
5720
5721 pm_runtime_get_sync(netdev->dev.parent);
5722
bc7f75fa 5723 if (netif_running(netdev))
28002099 5724 e1000e_down(adapter, true);
bc7f75fa 5725
e921eb1a 5726 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
bc7f75fa
AK
5727 * means we reserve 2 more, this pushes us to allocate from the next
5728 * larger slab size.
ad68076e 5729 * i.e. RXBUFFER_2048 --> size-4096 slab
97ac8cae
BA
5730 * However with the new *_jumbo_rx* routines, jumbo receives will use
5731 * fragmented skbs
ad68076e 5732 */
bc7f75fa 5733
9926146b 5734 if (max_frame <= 2048)
bc7f75fa
AK
5735 adapter->rx_buffer_len = 2048;
5736 else
5737 adapter->rx_buffer_len = 4096;
5738
5739 /* adjust allocation if LPE protects us, and we aren't using SBP */
5740 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
17e813ec 5741 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
bc7f75fa 5742 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
17e813ec 5743 + ETH_FCS_LEN;
bc7f75fa 5744
bc7f75fa
AK
5745 if (netif_running(netdev))
5746 e1000e_up(adapter);
5747 else
5748 e1000e_reset(adapter);
5749
63eb48f1
DE
5750 pm_runtime_put_sync(netdev->dev.parent);
5751
bc7f75fa
AK
5752 clear_bit(__E1000_RESETTING, &adapter->state);
5753
5754 return 0;
5755}
5756
5757static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5758 int cmd)
5759{
5760 struct e1000_adapter *adapter = netdev_priv(netdev);
5761 struct mii_ioctl_data *data = if_mii(ifr);
bc7f75fa 5762
318a94d6 5763 if (adapter->hw.phy.media_type != e1000_media_type_copper)
bc7f75fa
AK
5764 return -EOPNOTSUPP;
5765
5766 switch (cmd) {
5767 case SIOCGMIIPHY:
5768 data->phy_id = adapter->hw.phy.addr;
5769 break;
5770 case SIOCGMIIREG:
b16a002e
BA
5771 e1000_phy_read_status(adapter);
5772
7c25769f
BA
5773 switch (data->reg_num & 0x1F) {
5774 case MII_BMCR:
5775 data->val_out = adapter->phy_regs.bmcr;
5776 break;
5777 case MII_BMSR:
5778 data->val_out = adapter->phy_regs.bmsr;
5779 break;
5780 case MII_PHYSID1:
5781 data->val_out = (adapter->hw.phy.id >> 16);
5782 break;
5783 case MII_PHYSID2:
5784 data->val_out = (adapter->hw.phy.id & 0xFFFF);
5785 break;
5786 case MII_ADVERTISE:
5787 data->val_out = adapter->phy_regs.advertise;
5788 break;
5789 case MII_LPA:
5790 data->val_out = adapter->phy_regs.lpa;
5791 break;
5792 case MII_EXPANSION:
5793 data->val_out = adapter->phy_regs.expansion;
5794 break;
5795 case MII_CTRL1000:
5796 data->val_out = adapter->phy_regs.ctrl1000;
5797 break;
5798 case MII_STAT1000:
5799 data->val_out = adapter->phy_regs.stat1000;
5800 break;
5801 case MII_ESTATUS:
5802 data->val_out = adapter->phy_regs.estatus;
5803 break;
5804 default:
bc7f75fa
AK
5805 return -EIO;
5806 }
bc7f75fa
AK
5807 break;
5808 case SIOCSMIIREG:
5809 default:
5810 return -EOPNOTSUPP;
5811 }
5812 return 0;
5813}
5814
b67e1913
BA
5815/**
5816 * e1000e_hwtstamp_ioctl - control hardware time stamping
5817 * @netdev: network interface device structure
5818 * @ifreq: interface request
5819 *
5820 * Outgoing time stamping can be enabled and disabled. Play nice and
5821 * disable it when requested, although it shouldn't cause any overhead
5822 * when no packet needs it. At most one packet in the queue may be
5823 * marked for time stamping, otherwise it would be impossible to tell
5824 * for sure to which packet the hardware time stamp belongs.
5825 *
5826 * Incoming time stamping has to be configured via the hardware filters.
5827 * Not all combinations are supported, in particular event type has to be
5828 * specified. Matching the kind of event packet is not supported, with the
5829 * exception of "all V2 events regardless of level 2 or 4".
5830 **/
4e8cff64 5831static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
b67e1913
BA
5832{
5833 struct e1000_adapter *adapter = netdev_priv(netdev);
5834 struct hwtstamp_config config;
5835 int ret_val;
5836
5837 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5838 return -EFAULT;
5839
62d7e3a2 5840 ret_val = e1000e_config_hwtstamp(adapter, &config);
b67e1913
BA
5841 if (ret_val)
5842 return ret_val;
5843
d89777bf
BA
5844 switch (config.rx_filter) {
5845 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5846 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5847 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5848 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5849 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5850 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5851 /* With V2 type filters which specify a Sync or Delay Request,
5852 * Path Delay Request/Response messages are also time stamped
5853 * by hardware so notify the caller the requested packets plus
5854 * some others are time stamped.
5855 */
5856 config.rx_filter = HWTSTAMP_FILTER_SOME;
5857 break;
5858 default:
5859 break;
5860 }
5861
b67e1913
BA
5862 return copy_to_user(ifr->ifr_data, &config,
5863 sizeof(config)) ? -EFAULT : 0;
5864}
5865
4e8cff64
BH
5866static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
5867{
5868 struct e1000_adapter *adapter = netdev_priv(netdev);
5869
5870 return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
5871 sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
5872}
5873
bc7f75fa
AK
5874static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5875{
5876 switch (cmd) {
5877 case SIOCGMIIPHY:
5878 case SIOCGMIIREG:
5879 case SIOCSMIIREG:
5880 return e1000_mii_ioctl(netdev, ifr, cmd);
b67e1913 5881 case SIOCSHWTSTAMP:
4e8cff64
BH
5882 return e1000e_hwtstamp_set(netdev, ifr);
5883 case SIOCGHWTSTAMP:
5884 return e1000e_hwtstamp_get(netdev, ifr);
bc7f75fa
AK
5885 default:
5886 return -EOPNOTSUPP;
5887 }
5888}
5889
a4f58f54
BA
5890static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5891{
5892 struct e1000_hw *hw = &adapter->hw;
74f350ee 5893 u32 i, mac_reg, wuc;
2b6b168d 5894 u16 phy_reg, wuc_enable;
70806a7f 5895 int retval;
a4f58f54
BA
5896
5897 /* copy MAC RARs to PHY RARs */
d3738bb8 5898 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
a4f58f54 5899
2b6b168d
BA
5900 retval = hw->phy.ops.acquire(hw);
5901 if (retval) {
5902 e_err("Could not acquire PHY\n");
5903 return retval;
5904 }
5905
5906 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
5907 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5908 if (retval)
75ce1532 5909 goto release;
2b6b168d
BA
5910
5911 /* copy MAC MTA to PHY MTA - only needed for pchlan */
a4f58f54
BA
5912 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5913 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
2b6b168d
BA
5914 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
5915 (u16)(mac_reg & 0xFFFF));
5916 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
5917 (u16)((mac_reg >> 16) & 0xFFFF));
a4f58f54
BA
5918 }
5919
5920 /* configure PHY Rx Control register */
2b6b168d 5921 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
a4f58f54
BA
5922 mac_reg = er32(RCTL);
5923 if (mac_reg & E1000_RCTL_UPE)
5924 phy_reg |= BM_RCTL_UPE;
5925 if (mac_reg & E1000_RCTL_MPE)
5926 phy_reg |= BM_RCTL_MPE;
5927 phy_reg &= ~(BM_RCTL_MO_MASK);
5928 if (mac_reg & E1000_RCTL_MO_3)
5929 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
17e813ec 5930 << BM_RCTL_MO_SHIFT);
a4f58f54
BA
5931 if (mac_reg & E1000_RCTL_BAM)
5932 phy_reg |= BM_RCTL_BAM;
5933 if (mac_reg & E1000_RCTL_PMCF)
5934 phy_reg |= BM_RCTL_PMCF;
5935 mac_reg = er32(CTRL);
5936 if (mac_reg & E1000_CTRL_RFCE)
5937 phy_reg |= BM_RCTL_RFCE;
2b6b168d 5938 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
a4f58f54 5939
74f350ee
DE
5940 wuc = E1000_WUC_PME_EN;
5941 if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC))
5942 wuc |= E1000_WUC_APME;
5943
a4f58f54
BA
5944 /* enable PHY wakeup in MAC register */
5945 ew32(WUFC, wufc);
74f350ee
DE
5946 ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME |
5947 E1000_WUC_PME_STATUS | wuc));
a4f58f54
BA
5948
5949 /* configure and enable PHY wakeup in PHY registers */
2b6b168d 5950 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
74f350ee 5951 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc);
a4f58f54
BA
5952
5953 /* activate PHY wakeup */
2b6b168d
BA
5954 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5955 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
a4f58f54
BA
5956 if (retval)
5957 e_err("Could not set PHY Host Wakeup bit\n");
75ce1532 5958release:
94d8186a 5959 hw->phy.ops.release(hw);
a4f58f54
BA
5960
5961 return retval;
5962}
5963
28002099 5964static int e1000e_pm_freeze(struct device *dev)
bc7f75fa 5965{
28002099 5966 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
bc7f75fa 5967 struct e1000_adapter *adapter = netdev_priv(netdev);
bc7f75fa
AK
5968
5969 netif_device_detach(netdev);
5970
5971 if (netif_running(netdev)) {
bb9e44d0
BA
5972 int count = E1000_CHECK_RESET_COUNT;
5973
5974 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
5975 usleep_range(10000, 20000);
5976
bc7f75fa 5977 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
28002099
DE
5978
5979 /* Quiesce the device without resetting the hardware */
5980 e1000e_down(adapter, false);
bc7f75fa
AK
5981 e1000_free_irq(adapter);
5982 }
4662e82b 5983 e1000e_reset_interrupt_capability(adapter);
bc7f75fa 5984
28002099
DE
5985 /* Allow time for pending master requests to run */
5986 e1000e_disable_pcie_master(&adapter->hw);
5987
5988 return 0;
5989}
5990
5991static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5992{
5993 struct net_device *netdev = pci_get_drvdata(pdev);
5994 struct e1000_adapter *adapter = netdev_priv(netdev);
5995 struct e1000_hw *hw = &adapter->hw;
5996 u32 ctrl, ctrl_ext, rctl, status;
5997 /* Runtime suspend should only enable wakeup for link changes */
5998 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5999 int retval = 0;
6000
bc7f75fa
AK
6001 status = er32(STATUS);
6002 if (status & E1000_STATUS_LU)
6003 wufc &= ~E1000_WUFC_LNKC;
6004
6005 if (wufc) {
6006 e1000_setup_rctl(adapter);
ef9b965a 6007 e1000e_set_rx_mode(netdev);
bc7f75fa
AK
6008
6009 /* turn on all-multi mode if wake on multicast is enabled */
6010 if (wufc & E1000_WUFC_MC) {
6011 rctl = er32(RCTL);
6012 rctl |= E1000_RCTL_MPE;
6013 ew32(RCTL, rctl);
6014 }
6015
6016 ctrl = er32(CTRL);
a4f58f54
BA
6017 ctrl |= E1000_CTRL_ADVD3WUC;
6018 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
6019 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
bc7f75fa
AK
6020 ew32(CTRL, ctrl);
6021
318a94d6
JK
6022 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
6023 adapter->hw.phy.media_type ==
6024 e1000_media_type_internal_serdes) {
bc7f75fa
AK
6025 /* keep the laser running in D3 */
6026 ctrl_ext = er32(CTRL_EXT);
93a23f48 6027 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
bc7f75fa
AK
6028 ew32(CTRL_EXT, ctrl_ext);
6029 }
6030
63eb48f1
DE
6031 if (!runtime)
6032 e1000e_power_up_phy(adapter);
6033
97ac8cae 6034 if (adapter->flags & FLAG_IS_ICH)
99730e4c 6035 e1000_suspend_workarounds_ich8lan(&adapter->hw);
97ac8cae 6036
82776a4b 6037 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
a4f58f54
BA
6038 /* enable wakeup by the PHY */
6039 retval = e1000_init_phy_wakeup(adapter, wufc);
6040 if (retval)
6041 return retval;
6042 } else {
6043 /* enable wakeup by the MAC */
6044 ew32(WUFC, wufc);
6045 ew32(WUC, E1000_WUC_PME_EN);
6046 }
bc7f75fa
AK
6047 } else {
6048 ew32(WUC, 0);
6049 ew32(WUFC, 0);
28002099
DE
6050
6051 e1000_power_down_phy(adapter);
bc7f75fa
AK
6052 }
6053
74f350ee 6054 if (adapter->hw.phy.type == e1000_phy_igp_3) {
bc7f75fa 6055 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
74f350ee
DE
6056 } else if (hw->mac.type == e1000_pch_lpt) {
6057 if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
6058 /* ULP does not support wake from unicast, multicast
6059 * or broadcast.
6060 */
6061 retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
6062
6063 if (retval)
6064 return retval;
6065 }
6066
bc7f75fa 6067
e921eb1a 6068 /* Release control of h/w to f/w. If f/w is AMT enabled, this
ad68076e
BA
6069 * would have already happened in close and is redundant.
6070 */
31dbe5b4 6071 e1000e_release_hw_control(adapter);
bc7f75fa 6072
24b41c97
DN
6073 pci_clear_master(pdev);
6074
e921eb1a 6075 /* The pci-e switch on some quad port adapters will report a
005cbdfc
AD
6076 * correctable error when the MAC transitions from D0 to D3. To
6077 * prevent this we need to mask off the correctable errors on the
6078 * downstream port of the pci-e switch.
e8c254c5
LZ
6079 *
6080 * We don't have the associated upstream bridge while assigning
6081 * the PCI device into guest. For example, the KVM on power is
6082 * one of the cases.
005cbdfc
AD
6083 */
6084 if (adapter->flags & FLAG_IS_QUAD_PORT) {
6085 struct pci_dev *us_dev = pdev->bus->self;
005cbdfc
AD
6086 u16 devctl;
6087
e8c254c5
LZ
6088 if (!us_dev)
6089 return 0;
6090
f8c0fcac
JL
6091 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
6092 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
6093 (devctl & ~PCI_EXP_DEVCTL_CERE));
005cbdfc 6094
66148bab
KK
6095 pci_save_state(pdev);
6096 pci_prepare_to_sleep(pdev);
005cbdfc 6097
f8c0fcac 6098 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
005cbdfc 6099 }
66148bab
KK
6100
6101 return 0;
bc7f75fa
AK
6102}
6103
13129d9b
CW
6104/**
6105 * e1000e_disable_aspm - Disable ASPM states
6106 * @pdev: pointer to PCI device struct
6107 * @state: bit-mask of ASPM states to disable
6108 *
6109 * Some devices *must* have certain ASPM states disabled per hardware errata.
6110 **/
6111static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6f461f6c 6112{
13129d9b
CW
6113 struct pci_dev *parent = pdev->bus->self;
6114 u16 aspm_dis_mask = 0;
6115 u16 pdev_aspmc, parent_aspmc;
6116
6117 switch (state) {
6118 case PCIE_LINK_STATE_L0S:
6119 case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
6120 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
6121 /* fall-through - can't have L1 without L0s */
6122 case PCIE_LINK_STATE_L1:
6123 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
6124 break;
6125 default:
6126 return;
6127 }
6128
6129 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6130 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6131
6132 if (parent) {
6133 pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
6134 &parent_aspmc);
6135 parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6136 }
6137
6138 /* Nothing to do if the ASPM states to be disabled already are */
6139 if (!(pdev_aspmc & aspm_dis_mask) &&
6140 (!parent || !(parent_aspmc & aspm_dis_mask)))
6141 return;
6142
6143 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
6144 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
6145 "L0s" : "",
6146 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
6147 "L1" : "");
6148
6149#ifdef CONFIG_PCIEASPM
9f728f53 6150 pci_disable_link_state_locked(pdev, state);
ffe0b2ff 6151
13129d9b
CW
6152 /* Double-check ASPM control. If not disabled by the above, the
6153 * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
6154 * not enabled); override by writing PCI config space directly.
6155 */
6156 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6157 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6158
6159 if (!(aspm_dis_mask & pdev_aspmc))
6160 return;
6161#endif
ffe0b2ff 6162
e921eb1a 6163 /* Both device and parent should have the same ASPM setting.
6f461f6c 6164 * Disable ASPM in downstream component first and then upstream.
1eae4eb2 6165 */
13129d9b 6166 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
6f461f6c 6167
13129d9b
CW
6168 if (parent)
6169 pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
6170 aspm_dis_mask);
1eae4eb2
AK
6171}
6172
aa338601 6173#ifdef CONFIG_PM
23606cf5 6174static int __e1000_resume(struct pci_dev *pdev)
bc7f75fa
AK
6175{
6176 struct net_device *netdev = pci_get_drvdata(pdev);
6177 struct e1000_adapter *adapter = netdev_priv(netdev);
6178 struct e1000_hw *hw = &adapter->hw;
78cd29d5 6179 u16 aspm_disable_flag = 0;
bc7f75fa 6180
78cd29d5
BA
6181 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6182 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6183 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
6184 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6185 if (aspm_disable_flag)
6186 e1000e_disable_aspm(pdev, aspm_disable_flag);
6187
66148bab 6188 pci_set_master(pdev);
6e4f6f6b 6189
2fbe4526 6190 if (hw->mac.type >= e1000_pch2lan)
99730e4c
BA
6191 e1000_resume_workarounds_pchlan(&adapter->hw);
6192
bc7f75fa 6193 e1000e_power_up_phy(adapter);
a4f58f54
BA
6194
6195 /* report the system wakeup cause from S3/S4 */
6196 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
6197 u16 phy_data;
6198
6199 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
6200 if (phy_data) {
6201 e_info("PHY Wakeup cause - %s\n",
17e813ec
BA
6202 phy_data & E1000_WUS_EX ? "Unicast Packet" :
6203 phy_data & E1000_WUS_MC ? "Multicast Packet" :
6204 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
6205 phy_data & E1000_WUS_MAG ? "Magic Packet" :
6206 phy_data & E1000_WUS_LNKC ?
6207 "Link Status Change" : "other");
a4f58f54
BA
6208 }
6209 e1e_wphy(&adapter->hw, BM_WUS, ~0);
6210 } else {
6211 u32 wus = er32(WUS);
6212 if (wus) {
6213 e_info("MAC Wakeup cause - %s\n",
17e813ec
BA
6214 wus & E1000_WUS_EX ? "Unicast Packet" :
6215 wus & E1000_WUS_MC ? "Multicast Packet" :
6216 wus & E1000_WUS_BC ? "Broadcast Packet" :
6217 wus & E1000_WUS_MAG ? "Magic Packet" :
6218 wus & E1000_WUS_LNKC ? "Link Status Change" :
6219 "other");
a4f58f54
BA
6220 }
6221 ew32(WUS, ~0);
6222 }
6223
bc7f75fa 6224 e1000e_reset(adapter);
bc7f75fa 6225
cd791618 6226 e1000_init_manageability_pt(adapter);
bc7f75fa 6227
e921eb1a 6228 /* If the controller has AMT, do not set DRV_LOAD until the interface
bc7f75fa 6229 * is up. For all other cases, let the f/w know that the h/w is now
ad68076e
BA
6230 * under the control of the driver.
6231 */
c43bc57e 6232 if (!(adapter->flags & FLAG_HAS_AMT))
31dbe5b4 6233 e1000e_get_hw_control(adapter);
bc7f75fa
AK
6234
6235 return 0;
6236}
23606cf5 6237
28002099
DE
6238static int e1000e_pm_thaw(struct device *dev)
6239{
6240 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
6241 struct e1000_adapter *adapter = netdev_priv(netdev);
6242
6243 e1000e_set_interrupt_capability(adapter);
6244 if (netif_running(netdev)) {
6245 u32 err = e1000_request_irq(adapter);
6246
6247 if (err)
6248 return err;
6249
6250 e1000e_up(adapter);
6251 }
6252
6253 netif_device_attach(netdev);
6254
6255 return 0;
6256}
6257
38a529b5 6258#ifdef CONFIG_PM_SLEEP
28002099 6259static int e1000e_pm_suspend(struct device *dev)
a0340162
RW
6260{
6261 struct pci_dev *pdev = to_pci_dev(dev);
a0340162 6262
28002099
DE
6263 e1000e_pm_freeze(dev);
6264
66148bab 6265 return __e1000_shutdown(pdev, false);
a0340162
RW
6266}
6267
28002099 6268static int e1000e_pm_resume(struct device *dev)
23606cf5
RW
6269{
6270 struct pci_dev *pdev = to_pci_dev(dev);
28002099 6271 int rc;
23606cf5 6272
28002099
DE
6273 rc = __e1000_resume(pdev);
6274 if (rc)
6275 return rc;
23606cf5 6276
28002099 6277 return e1000e_pm_thaw(dev);
23606cf5 6278}
38a529b5 6279#endif /* CONFIG_PM_SLEEP */
a0340162
RW
6280
6281#ifdef CONFIG_PM_RUNTIME
63eb48f1 6282static int e1000e_pm_runtime_idle(struct device *dev)
a0340162
RW
6283{
6284 struct pci_dev *pdev = to_pci_dev(dev);
6285 struct net_device *netdev = pci_get_drvdata(pdev);
6286 struct e1000_adapter *adapter = netdev_priv(netdev);
6287
63eb48f1
DE
6288 if (!e1000e_has_link(adapter))
6289 pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
a0340162 6290
63eb48f1 6291 return -EBUSY;
a0340162
RW
6292}
6293
63eb48f1 6294static int e1000e_pm_runtime_resume(struct device *dev)
a0340162
RW
6295{
6296 struct pci_dev *pdev = to_pci_dev(dev);
6297 struct net_device *netdev = pci_get_drvdata(pdev);
6298 struct e1000_adapter *adapter = netdev_priv(netdev);
63eb48f1 6299 int rc;
a0340162 6300
63eb48f1
DE
6301 rc = __e1000_resume(pdev);
6302 if (rc)
6303 return rc;
a0340162 6304
63eb48f1
DE
6305 if (netdev->flags & IFF_UP)
6306 rc = e1000e_up(adapter);
a0340162 6307
63eb48f1 6308 return rc;
a0340162 6309}
23606cf5 6310
63eb48f1 6311static int e1000e_pm_runtime_suspend(struct device *dev)
23606cf5
RW
6312{
6313 struct pci_dev *pdev = to_pci_dev(dev);
6314 struct net_device *netdev = pci_get_drvdata(pdev);
6315 struct e1000_adapter *adapter = netdev_priv(netdev);
6316
63eb48f1
DE
6317 if (netdev->flags & IFF_UP) {
6318 int count = E1000_CHECK_RESET_COUNT;
6319
6320 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
6321 usleep_range(10000, 20000);
23606cf5 6322
63eb48f1
DE
6323 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
6324
6325 /* Down the device without resetting the hardware */
6326 e1000e_down(adapter, false);
6327 }
6328
6329 if (__e1000_shutdown(pdev, true)) {
6330 e1000e_pm_runtime_resume(dev);
6331 return -EBUSY;
6332 }
6333
6334 return 0;
23606cf5 6335}
a0340162 6336#endif /* CONFIG_PM_RUNTIME */
aa338601 6337#endif /* CONFIG_PM */
bc7f75fa
AK
6338
6339static void e1000_shutdown(struct pci_dev *pdev)
6340{
28002099
DE
6341 e1000e_pm_freeze(&pdev->dev);
6342
66148bab 6343 __e1000_shutdown(pdev, false);
bc7f75fa
AK
6344}
6345
6346#ifdef CONFIG_NET_POLL_CONTROLLER
147b2c8c 6347
8bb62869 6348static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data)
147b2c8c
DD
6349{
6350 struct net_device *netdev = data;
6351 struct e1000_adapter *adapter = netdev_priv(netdev);
147b2c8c
DD
6352
6353 if (adapter->msix_entries) {
90da0669
BA
6354 int vector, msix_irq;
6355
147b2c8c
DD
6356 vector = 0;
6357 msix_irq = adapter->msix_entries[vector].vector;
6358 disable_irq(msix_irq);
6359 e1000_intr_msix_rx(msix_irq, netdev);
6360 enable_irq(msix_irq);
6361
6362 vector++;
6363 msix_irq = adapter->msix_entries[vector].vector;
6364 disable_irq(msix_irq);
6365 e1000_intr_msix_tx(msix_irq, netdev);
6366 enable_irq(msix_irq);
6367
6368 vector++;
6369 msix_irq = adapter->msix_entries[vector].vector;
6370 disable_irq(msix_irq);
6371 e1000_msix_other(msix_irq, netdev);
6372 enable_irq(msix_irq);
6373 }
6374
6375 return IRQ_HANDLED;
6376}
6377
e921eb1a
BA
6378/**
6379 * e1000_netpoll
6380 * @netdev: network interface device structure
6381 *
bc7f75fa
AK
6382 * Polling 'interrupt' - used by things like netconsole to send skbs
6383 * without having to re-enable interrupts. It's not called while
6384 * the interrupt routine is executing.
6385 */
6386static void e1000_netpoll(struct net_device *netdev)
6387{
6388 struct e1000_adapter *adapter = netdev_priv(netdev);
6389
147b2c8c
DD
6390 switch (adapter->int_mode) {
6391 case E1000E_INT_MODE_MSIX:
6392 e1000_intr_msix(adapter->pdev->irq, netdev);
6393 break;
6394 case E1000E_INT_MODE_MSI:
6395 disable_irq(adapter->pdev->irq);
6396 e1000_intr_msi(adapter->pdev->irq, netdev);
6397 enable_irq(adapter->pdev->irq);
6398 break;
e80bd1d1 6399 default: /* E1000E_INT_MODE_LEGACY */
147b2c8c
DD
6400 disable_irq(adapter->pdev->irq);
6401 e1000_intr(adapter->pdev->irq, netdev);
6402 enable_irq(adapter->pdev->irq);
6403 break;
6404 }
bc7f75fa
AK
6405}
6406#endif
6407
6408/**
6409 * e1000_io_error_detected - called when PCI error is detected
6410 * @pdev: Pointer to PCI device
6411 * @state: The current pci connection state
6412 *
6413 * This function is called after a PCI bus error affecting
6414 * this device has been detected.
6415 */
6416static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
6417 pci_channel_state_t state)
6418{
6419 struct net_device *netdev = pci_get_drvdata(pdev);
6420 struct e1000_adapter *adapter = netdev_priv(netdev);
6421
6422 netif_device_detach(netdev);
6423
c93b5a76
MM
6424 if (state == pci_channel_io_perm_failure)
6425 return PCI_ERS_RESULT_DISCONNECT;
6426
bc7f75fa 6427 if (netif_running(netdev))
28002099 6428 e1000e_down(adapter, true);
bc7f75fa
AK
6429 pci_disable_device(pdev);
6430
6431 /* Request a slot slot reset. */
6432 return PCI_ERS_RESULT_NEED_RESET;
6433}
6434
6435/**
6436 * e1000_io_slot_reset - called after the pci bus has been reset.
6437 * @pdev: Pointer to PCI device
6438 *
6439 * Restart the card from scratch, as if from a cold-boot. Implementation
28002099 6440 * resembles the first-half of the e1000e_pm_resume routine.
bc7f75fa
AK
6441 */
6442static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
6443{
6444 struct net_device *netdev = pci_get_drvdata(pdev);
6445 struct e1000_adapter *adapter = netdev_priv(netdev);
6446 struct e1000_hw *hw = &adapter->hw;
78cd29d5 6447 u16 aspm_disable_flag = 0;
6e4f6f6b 6448 int err;
111b9dc5 6449 pci_ers_result_t result;
bc7f75fa 6450
78cd29d5
BA
6451 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6452 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6f461f6c 6453 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
78cd29d5
BA
6454 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6455 if (aspm_disable_flag)
6456 e1000e_disable_aspm(pdev, aspm_disable_flag);
6457
f0f422e5 6458 err = pci_enable_device_mem(pdev);
6e4f6f6b 6459 if (err) {
bc7f75fa
AK
6460 dev_err(&pdev->dev,
6461 "Cannot re-enable PCI device after reset.\n");
111b9dc5
JB
6462 result = PCI_ERS_RESULT_DISCONNECT;
6463 } else {
23606cf5 6464 pdev->state_saved = true;
111b9dc5 6465 pci_restore_state(pdev);
66148bab 6466 pci_set_master(pdev);
bc7f75fa 6467
111b9dc5
JB
6468 pci_enable_wake(pdev, PCI_D3hot, 0);
6469 pci_enable_wake(pdev, PCI_D3cold, 0);
bc7f75fa 6470
111b9dc5
JB
6471 e1000e_reset(adapter);
6472 ew32(WUS, ~0);
6473 result = PCI_ERS_RESULT_RECOVERED;
6474 }
bc7f75fa 6475
111b9dc5
JB
6476 pci_cleanup_aer_uncorrect_error_status(pdev);
6477
6478 return result;
bc7f75fa
AK
6479}
6480
6481/**
6482 * e1000_io_resume - called when traffic can start flowing again.
6483 * @pdev: Pointer to PCI device
6484 *
6485 * This callback is called when the error recovery driver tells us that
6486 * its OK to resume normal operation. Implementation resembles the
28002099 6487 * second-half of the e1000e_pm_resume routine.
bc7f75fa
AK
6488 */
6489static void e1000_io_resume(struct pci_dev *pdev)
6490{
6491 struct net_device *netdev = pci_get_drvdata(pdev);
6492 struct e1000_adapter *adapter = netdev_priv(netdev);
6493
cd791618 6494 e1000_init_manageability_pt(adapter);
bc7f75fa
AK
6495
6496 if (netif_running(netdev)) {
6497 if (e1000e_up(adapter)) {
6498 dev_err(&pdev->dev,
6499 "can't bring device back up after reset\n");
6500 return;
6501 }
6502 }
6503
6504 netif_device_attach(netdev);
6505
e921eb1a 6506 /* If the controller has AMT, do not set DRV_LOAD until the interface
bc7f75fa 6507 * is up. For all other cases, let the f/w know that the h/w is now
ad68076e
BA
6508 * under the control of the driver.
6509 */
c43bc57e 6510 if (!(adapter->flags & FLAG_HAS_AMT))
31dbe5b4 6511 e1000e_get_hw_control(adapter);
bc7f75fa
AK
6512}
6513
6514static void e1000_print_device_info(struct e1000_adapter *adapter)
6515{
6516 struct e1000_hw *hw = &adapter->hw;
6517 struct net_device *netdev = adapter->netdev;
073287c0
BA
6518 u32 ret_val;
6519 u8 pba_str[E1000_PBANUM_LENGTH];
bc7f75fa
AK
6520
6521 /* print bus type/speed/width info */
a5cc7642 6522 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
44defeb3
JK
6523 /* bus width */
6524 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
f0ff4398 6525 "Width x1"),
44defeb3 6526 /* MAC address */
7c510e4b 6527 netdev->dev_addr);
44defeb3
JK
6528 e_info("Intel(R) PRO/%s Network Connection\n",
6529 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
073287c0
BA
6530 ret_val = e1000_read_pba_string_generic(hw, pba_str,
6531 E1000_PBANUM_LENGTH);
6532 if (ret_val)
f2315bf1 6533 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
073287c0
BA
6534 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
6535 hw->mac.type, hw->phy.type, pba_str);
bc7f75fa
AK
6536}
6537
10aa4c04
AK
6538static void e1000_eeprom_checks(struct e1000_adapter *adapter)
6539{
6540 struct e1000_hw *hw = &adapter->hw;
6541 int ret_val;
6542 u16 buf = 0;
6543
6544 if (hw->mac.type != e1000_82573)
6545 return;
6546
6547 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
e885d762
BA
6548 le16_to_cpus(&buf);
6549 if (!ret_val && (!(buf & (1 << 0)))) {
10aa4c04 6550 /* Deep Smart Power Down (DSPD) */
6c2a9efa
FP
6551 dev_warn(&adapter->pdev->dev,
6552 "Warning: detected DSPD enabled in EEPROM\n");
10aa4c04 6553 }
10aa4c04
AK
6554}
6555
c8f44aff 6556static int e1000_set_features(struct net_device *netdev,
70495a50 6557 netdev_features_t features)
dc221294
BA
6558{
6559 struct e1000_adapter *adapter = netdev_priv(netdev);
c8f44aff 6560 netdev_features_t changed = features ^ netdev->features;
dc221294
BA
6561
6562 if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
6563 adapter->flags |= FLAG_TSO_FORCE;
6564
f646968f 6565 if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
cf955e6c
BG
6566 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
6567 NETIF_F_RXALL)))
dc221294
BA
6568 return 0;
6569
0184039a
BG
6570 if (changed & NETIF_F_RXFCS) {
6571 if (features & NETIF_F_RXFCS) {
6572 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6573 } else {
6574 /* We need to take it back to defaults, which might mean
6575 * stripping is still disabled at the adapter level.
6576 */
6577 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
6578 adapter->flags2 |= FLAG2_CRC_STRIPPING;
6579 else
6580 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6581 }
6582 }
6583
70495a50
BA
6584 netdev->features = features;
6585
dc221294
BA
6586 if (netif_running(netdev))
6587 e1000e_reinit_locked(adapter);
6588 else
6589 e1000e_reset(adapter);
6590
6591 return 0;
6592}
6593
651c2466
SH
6594static const struct net_device_ops e1000e_netdev_ops = {
6595 .ndo_open = e1000_open,
6596 .ndo_stop = e1000_close,
00829823 6597 .ndo_start_xmit = e1000_xmit_frame,
67fd4fcb 6598 .ndo_get_stats64 = e1000e_get_stats64,
ef9b965a 6599 .ndo_set_rx_mode = e1000e_set_rx_mode,
651c2466
SH
6600 .ndo_set_mac_address = e1000_set_mac,
6601 .ndo_change_mtu = e1000_change_mtu,
6602 .ndo_do_ioctl = e1000_ioctl,
6603 .ndo_tx_timeout = e1000_tx_timeout,
6604 .ndo_validate_addr = eth_validate_addr,
6605
651c2466
SH
6606 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
6607 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
6608#ifdef CONFIG_NET_POLL_CONTROLLER
6609 .ndo_poll_controller = e1000_netpoll,
6610#endif
dc221294 6611 .ndo_set_features = e1000_set_features,
651c2466
SH
6612};
6613
bc7f75fa
AK
6614/**
6615 * e1000_probe - Device Initialization Routine
6616 * @pdev: PCI device information struct
6617 * @ent: entry in e1000_pci_tbl
6618 *
6619 * Returns 0 on success, negative on failure
6620 *
6621 * e1000_probe initializes an adapter identified by a pci_dev structure.
6622 * The OS initialization, configuring of the adapter private structure,
6623 * and a hardware reset occur.
6624 **/
1dd06ae8 6625static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bc7f75fa
AK
6626{
6627 struct net_device *netdev;
6628 struct e1000_adapter *adapter;
6629 struct e1000_hw *hw;
6630 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
f47e81fc
BB
6631 resource_size_t mmio_start, mmio_len;
6632 resource_size_t flash_start, flash_len;
bc7f75fa 6633 static int cards_found;
78cd29d5 6634 u16 aspm_disable_flag = 0;
17e813ec 6635 int bars, i, err, pci_using_dac;
bc7f75fa
AK
6636 u16 eeprom_data = 0;
6637 u16 eeprom_apme_mask = E1000_EEPROM_APME;
6638
78cd29d5
BA
6639 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
6640 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6f461f6c 6641 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
78cd29d5
BA
6642 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6643 if (aspm_disable_flag)
6644 e1000e_disable_aspm(pdev, aspm_disable_flag);
6e4f6f6b 6645
f0f422e5 6646 err = pci_enable_device_mem(pdev);
bc7f75fa
AK
6647 if (err)
6648 return err;
6649
6650 pci_using_dac = 0;
718a39eb 6651 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
bc7f75fa 6652 if (!err) {
718a39eb 6653 pci_using_dac = 1;
bc7f75fa 6654 } else {
718a39eb 6655 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
bc7f75fa 6656 if (err) {
718a39eb
RK
6657 dev_err(&pdev->dev,
6658 "No usable DMA configuration, aborting\n");
6659 goto err_dma;
bc7f75fa
AK
6660 }
6661 }
6662
17e813ec
BA
6663 bars = pci_select_bars(pdev, IORESOURCE_MEM);
6664 err = pci_request_selected_regions_exclusive(pdev, bars,
6665 e1000e_driver_name);
bc7f75fa
AK
6666 if (err)
6667 goto err_pci_reg;
6668
68eac460 6669 /* AER (Advanced Error Reporting) hooks */
19d5afd4 6670 pci_enable_pcie_error_reporting(pdev);
68eac460 6671
bc7f75fa 6672 pci_set_master(pdev);
438b365a
BA
6673 /* PCI config space info */
6674 err = pci_save_state(pdev);
6675 if (err)
6676 goto err_alloc_etherdev;
bc7f75fa
AK
6677
6678 err = -ENOMEM;
6679 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6680 if (!netdev)
6681 goto err_alloc_etherdev;
6682
bc7f75fa
AK
6683 SET_NETDEV_DEV(netdev, &pdev->dev);
6684
f85e4dfa
TH
6685 netdev->irq = pdev->irq;
6686
bc7f75fa
AK
6687 pci_set_drvdata(pdev, netdev);
6688 adapter = netdev_priv(netdev);
6689 hw = &adapter->hw;
6690 adapter->netdev = netdev;
6691 adapter->pdev = pdev;
6692 adapter->ei = ei;
6693 adapter->pba = ei->pba;
6694 adapter->flags = ei->flags;
eb7c3adb 6695 adapter->flags2 = ei->flags2;
bc7f75fa
AK
6696 adapter->hw.adapter = adapter;
6697 adapter->hw.mac.type = ei->mac;
2adc55c9 6698 adapter->max_hw_frame_size = ei->max_hw_frame_size;
b3f4d599 6699 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
bc7f75fa
AK
6700
6701 mmio_start = pci_resource_start(pdev, 0);
6702 mmio_len = pci_resource_len(pdev, 0);
6703
6704 err = -EIO;
6705 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
6706 if (!adapter->hw.hw_addr)
6707 goto err_ioremap;
6708
6709 if ((adapter->flags & FLAG_HAS_FLASH) &&
6710 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
6711 flash_start = pci_resource_start(pdev, 1);
6712 flash_len = pci_resource_len(pdev, 1);
6713 adapter->hw.flash_address = ioremap(flash_start, flash_len);
6714 if (!adapter->hw.flash_address)
6715 goto err_flashmap;
6716 }
6717
d495bcb8
BA
6718 /* Set default EEE advertisement */
6719 if (adapter->flags2 & FLAG2_HAS_EEE)
6720 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
6721
bc7f75fa 6722 /* construct the net_device struct */
e80bd1d1 6723 netdev->netdev_ops = &e1000e_netdev_ops;
bc7f75fa 6724 e1000e_set_ethtool_ops(netdev);
e80bd1d1 6725 netdev->watchdog_timeo = 5 * HZ;
c58c8a78 6726 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
f2315bf1 6727 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
bc7f75fa
AK
6728
6729 netdev->mem_start = mmio_start;
6730 netdev->mem_end = mmio_start + mmio_len;
6731
6732 adapter->bd_number = cards_found++;
6733
4662e82b
BA
6734 e1000e_check_options(adapter);
6735
bc7f75fa
AK
6736 /* setup adapter struct */
6737 err = e1000_sw_init(adapter);
6738 if (err)
6739 goto err_sw_init;
6740
bc7f75fa
AK
6741 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6742 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
6743 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6744
69e3fd8c 6745 err = ei->get_variants(adapter);
bc7f75fa
AK
6746 if (err)
6747 goto err_hw_init;
6748
4a770358
BA
6749 if ((adapter->flags & FLAG_IS_ICH) &&
6750 (adapter->flags & FLAG_READ_ONLY_NVM))
6751 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
6752
bc7f75fa
AK
6753 hw->mac.ops.get_bus_info(&adapter->hw);
6754
318a94d6 6755 adapter->hw.phy.autoneg_wait_to_complete = 0;
bc7f75fa
AK
6756
6757 /* Copper options */
318a94d6 6758 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
bc7f75fa
AK
6759 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6760 adapter->hw.phy.disable_polarity_correction = 0;
6761 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6762 }
6763
470a5420 6764 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
185095fb
BA
6765 dev_info(&pdev->dev,
6766 "PHY reset is blocked due to SOL/IDER session.\n");
bc7f75fa 6767
dc221294
BA
6768 /* Set initial default active device features */
6769 netdev->features = (NETIF_F_SG |
f646968f
PM
6770 NETIF_F_HW_VLAN_CTAG_RX |
6771 NETIF_F_HW_VLAN_CTAG_TX |
dc221294
BA
6772 NETIF_F_TSO |
6773 NETIF_F_TSO6 |
70495a50 6774 NETIF_F_RXHASH |
dc221294
BA
6775 NETIF_F_RXCSUM |
6776 NETIF_F_HW_CSUM);
6777
6778 /* Set user-changeable features (subset of all device features) */
6779 netdev->hw_features = netdev->features;
0184039a 6780 netdev->hw_features |= NETIF_F_RXFCS;
943146de 6781 netdev->priv_flags |= IFF_SUPP_NOFCS;
cf955e6c 6782 netdev->hw_features |= NETIF_F_RXALL;
bc7f75fa
AK
6783
6784 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
f646968f 6785 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
bc7f75fa 6786
dc221294
BA
6787 netdev->vlan_features |= (NETIF_F_SG |
6788 NETIF_F_TSO |
6789 NETIF_F_TSO6 |
6790 NETIF_F_HW_CSUM);
a5136e23 6791
ef9b965a
JB
6792 netdev->priv_flags |= IFF_UNICAST_FLT;
6793
7b872a55 6794 if (pci_using_dac) {
bc7f75fa 6795 netdev->features |= NETIF_F_HIGHDMA;
7b872a55
YZ
6796 netdev->vlan_features |= NETIF_F_HIGHDMA;
6797 }
bc7f75fa 6798
bc7f75fa
AK
6799 if (e1000e_enable_mng_pass_thru(&adapter->hw))
6800 adapter->flags |= FLAG_MNG_PT_ENABLED;
6801
e921eb1a 6802 /* before reading the NVM, reset the controller to
ad68076e
BA
6803 * put the device in a known good starting state
6804 */
bc7f75fa
AK
6805 adapter->hw.mac.ops.reset_hw(&adapter->hw);
6806
e921eb1a 6807 /* systems with ASPM and others may see the checksum fail on the first
bc7f75fa
AK
6808 * attempt. Let's give it a few tries
6809 */
6810 for (i = 0;; i++) {
6811 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6812 break;
6813 if (i == 2) {
185095fb 6814 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
bc7f75fa
AK
6815 err = -EIO;
6816 goto err_eeprom;
6817 }
6818 }
6819
10aa4c04
AK
6820 e1000_eeprom_checks(adapter);
6821
608f8a0d 6822 /* copy the MAC address */
bc7f75fa 6823 if (e1000e_read_mac_addr(&adapter->hw))
185095fb
BA
6824 dev_err(&pdev->dev,
6825 "NVM Read Error while reading MAC address\n");
bc7f75fa
AK
6826
6827 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
bc7f75fa 6828
aaeb6cdf 6829 if (!is_valid_ether_addr(netdev->dev_addr)) {
185095fb 6830 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
aaeb6cdf 6831 netdev->dev_addr);
bc7f75fa
AK
6832 err = -EIO;
6833 goto err_eeprom;
6834 }
6835
6836 init_timer(&adapter->watchdog_timer);
c061b18d 6837 adapter->watchdog_timer.function = e1000_watchdog;
53aa82da 6838 adapter->watchdog_timer.data = (unsigned long)adapter;
bc7f75fa
AK
6839
6840 init_timer(&adapter->phy_info_timer);
c061b18d 6841 adapter->phy_info_timer.function = e1000_update_phy_info;
53aa82da 6842 adapter->phy_info_timer.data = (unsigned long)adapter;
bc7f75fa
AK
6843
6844 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6845 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
a8f88ff5
JB
6846 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6847 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
41cec6f1 6848 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
bc7f75fa 6849
bc7f75fa
AK
6850 /* Initialize link parameters. User can change them with ethtool */
6851 adapter->hw.mac.autoneg = 1;
3db1cd5c 6852 adapter->fc_autoneg = true;
5c48ef3e
BA
6853 adapter->hw.fc.requested_mode = e1000_fc_default;
6854 adapter->hw.fc.current_mode = e1000_fc_default;
bc7f75fa
AK
6855 adapter->hw.phy.autoneg_advertised = 0x2f;
6856
e921eb1a 6857 /* Initial Wake on LAN setting - If APM wake is enabled in
bc7f75fa
AK
6858 * the EEPROM, enable the ACPI Magic Packet filter
6859 */
6860 if (adapter->flags & FLAG_APME_IN_WUC) {
6861 /* APME bit in EEPROM is mapped to WUC.APME */
6862 eeprom_data = er32(WUC);
6863 eeprom_apme_mask = E1000_WUC_APME;
4def99bb
BA
6864 if ((hw->mac.type > e1000_ich10lan) &&
6865 (eeprom_data & E1000_WUC_PHY_WAKE))
a4f58f54 6866 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
bc7f75fa
AK
6867 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6868 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6869 (adapter->hw.bus.func == 1))
3d3a1676
BA
6870 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B,
6871 1, &eeprom_data);
bc7f75fa 6872 else
3d3a1676
BA
6873 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A,
6874 1, &eeprom_data);
bc7f75fa
AK
6875 }
6876
6877 /* fetch WoL from EEPROM */
6878 if (eeprom_data & eeprom_apme_mask)
6879 adapter->eeprom_wol |= E1000_WUFC_MAG;
6880
e921eb1a 6881 /* now that we have the eeprom settings, apply the special cases
bc7f75fa
AK
6882 * where the eeprom may be wrong or the board simply won't support
6883 * wake on lan on a particular port
6884 */
6885 if (!(adapter->flags & FLAG_HAS_WOL))
6886 adapter->eeprom_wol = 0;
6887
6888 /* initialize the wol settings based on the eeprom settings */
6889 adapter->wol = adapter->eeprom_wol;
66148bab
KK
6890
6891 /* make sure adapter isn't asleep if manageability is enabled */
6892 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
6893 (hw->mac.ops.check_mng_mode(hw)))
6894 device_wakeup_enable(&pdev->dev);
bc7f75fa 6895
84527590
BA
6896 /* save off EEPROM version number */
6897 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
6898
bc7f75fa
AK
6899 /* reset the hardware with the new settings */
6900 e1000e_reset(adapter);
6901
e921eb1a 6902 /* If the controller has AMT, do not set DRV_LOAD until the interface
bc7f75fa 6903 * is up. For all other cases, let the f/w know that the h/w is now
ad68076e
BA
6904 * under the control of the driver.
6905 */
c43bc57e 6906 if (!(adapter->flags & FLAG_HAS_AMT))
31dbe5b4 6907 e1000e_get_hw_control(adapter);
bc7f75fa 6908
f2315bf1 6909 strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
bc7f75fa
AK
6910 err = register_netdev(netdev);
6911 if (err)
6912 goto err_register;
6913
9c563d20
JB
6914 /* carrier off reporting is important to ethtool even BEFORE open */
6915 netif_carrier_off(netdev);
6916
d89777bf
BA
6917 /* init PTP hardware clock */
6918 e1000e_ptp_init(adapter);
6919
bc7f75fa
AK
6920 e1000_print_device_info(adapter);
6921
f3ec4f87
AS
6922 if (pci_dev_run_wake(pdev))
6923 pm_runtime_put_noidle(&pdev->dev);
23606cf5 6924
bc7f75fa
AK
6925 return 0;
6926
6927err_register:
c43bc57e 6928 if (!(adapter->flags & FLAG_HAS_AMT))
31dbe5b4 6929 e1000e_release_hw_control(adapter);
bc7f75fa 6930err_eeprom:
470a5420 6931 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
bc7f75fa 6932 e1000_phy_hw_reset(&adapter->hw);
c43bc57e 6933err_hw_init:
bc7f75fa
AK
6934 kfree(adapter->tx_ring);
6935 kfree(adapter->rx_ring);
6936err_sw_init:
c43bc57e
JB
6937 if (adapter->hw.flash_address)
6938 iounmap(adapter->hw.flash_address);
e82f54ba 6939 e1000e_reset_interrupt_capability(adapter);
c43bc57e 6940err_flashmap:
bc7f75fa
AK
6941 iounmap(adapter->hw.hw_addr);
6942err_ioremap:
6943 free_netdev(netdev);
6944err_alloc_etherdev:
f0f422e5 6945 pci_release_selected_regions(pdev,
f0ff4398 6946 pci_select_bars(pdev, IORESOURCE_MEM));
bc7f75fa
AK
6947err_pci_reg:
6948err_dma:
6949 pci_disable_device(pdev);
6950 return err;
6951}
6952
6953/**
6954 * e1000_remove - Device Removal Routine
6955 * @pdev: PCI device information struct
6956 *
6957 * e1000_remove is called by the PCI subsystem to alert the driver
6958 * that it should release a PCI device. The could be caused by a
6959 * Hot-Plug event, or because the driver is going to be removed from
6960 * memory.
6961 **/
9f9a12f8 6962static void e1000_remove(struct pci_dev *pdev)
bc7f75fa
AK
6963{
6964 struct net_device *netdev = pci_get_drvdata(pdev);
6965 struct e1000_adapter *adapter = netdev_priv(netdev);
23606cf5
RW
6966 bool down = test_bit(__E1000_DOWN, &adapter->state);
6967
d89777bf
BA
6968 e1000e_ptp_remove(adapter);
6969
e921eb1a 6970 /* The timers may be rescheduled, so explicitly disable them
23f333a2 6971 * from being rescheduled.
ad68076e 6972 */
23606cf5
RW
6973 if (!down)
6974 set_bit(__E1000_DOWN, &adapter->state);
bc7f75fa
AK
6975 del_timer_sync(&adapter->watchdog_timer);
6976 del_timer_sync(&adapter->phy_info_timer);
6977
41cec6f1
BA
6978 cancel_work_sync(&adapter->reset_task);
6979 cancel_work_sync(&adapter->watchdog_task);
6980 cancel_work_sync(&adapter->downshift_task);
6981 cancel_work_sync(&adapter->update_phy_task);
6982 cancel_work_sync(&adapter->print_hang_task);
bc7f75fa 6983
b67e1913
BA
6984 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
6985 cancel_work_sync(&adapter->tx_hwtstamp_work);
6986 if (adapter->tx_hwtstamp_skb) {
6987 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
6988 adapter->tx_hwtstamp_skb = NULL;
6989 }
6990 }
6991
23606cf5
RW
6992 /* Don't lie to e1000_close() down the road. */
6993 if (!down)
6994 clear_bit(__E1000_DOWN, &adapter->state);
17f208de
BA
6995 unregister_netdev(netdev);
6996
f3ec4f87
AS
6997 if (pci_dev_run_wake(pdev))
6998 pm_runtime_get_noresume(&pdev->dev);
23606cf5 6999
e921eb1a 7000 /* Release control of h/w to f/w. If f/w is AMT enabled, this
ad68076e
BA
7001 * would have already happened in close and is redundant.
7002 */
31dbe5b4 7003 e1000e_release_hw_control(adapter);
bc7f75fa 7004
4662e82b 7005 e1000e_reset_interrupt_capability(adapter);
bc7f75fa
AK
7006 kfree(adapter->tx_ring);
7007 kfree(adapter->rx_ring);
7008
7009 iounmap(adapter->hw.hw_addr);
7010 if (adapter->hw.flash_address)
7011 iounmap(adapter->hw.flash_address);
f0f422e5 7012 pci_release_selected_regions(pdev,
f0ff4398 7013 pci_select_bars(pdev, IORESOURCE_MEM));
bc7f75fa
AK
7014
7015 free_netdev(netdev);
7016
111b9dc5 7017 /* AER disable */
19d5afd4 7018 pci_disable_pcie_error_reporting(pdev);
111b9dc5 7019
bc7f75fa
AK
7020 pci_disable_device(pdev);
7021}
7022
7023/* PCI Error Recovery (ERS) */
3646f0e5 7024static const struct pci_error_handlers e1000_err_handler = {
bc7f75fa
AK
7025 .error_detected = e1000_io_error_detected,
7026 .slot_reset = e1000_io_slot_reset,
7027 .resume = e1000_io_resume,
7028};
7029
a3aa1884 7030static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
bc7f75fa
AK
7031 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
7032 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
7033 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
c29c3ba5
BA
7034 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
7035 board_82571 },
bc7f75fa
AK
7036 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
7037 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
040babf9
AK
7038 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
7039 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
7040 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
ad68076e 7041
bc7f75fa
AK
7042 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
7043 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
7044 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
7045 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
ad68076e 7046
bc7f75fa
AK
7047 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
7048 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
7049 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
ad68076e 7050
4662e82b 7051 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
bef28b11 7052 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
8c81c9c3 7053 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
4662e82b 7054
bc7f75fa
AK
7055 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
7056 board_80003es2lan },
7057 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
7058 board_80003es2lan },
7059 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
7060 board_80003es2lan },
7061 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
7062 board_80003es2lan },
ad68076e 7063
bc7f75fa
AK
7064 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
7065 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
7066 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
7067 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
7068 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
7069 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
7070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
9e135a2e 7071 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
ad68076e 7072
bc7f75fa
AK
7073 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
7074 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
7075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
7076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
7077 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
2f15f9d6 7078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
97ac8cae
BA
7079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
7080 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
7081 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
7082
7083 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
7084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
7085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
bc7f75fa 7086
f4187b56
BA
7087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
7088 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
10df0b91 7089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
f4187b56 7090
a4f58f54
BA
7091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
7092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
7093 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
7094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
7095
d3738bb8
BA
7096 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
7097 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
7098
2fbe4526
BA
7099 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
7100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
16e310ae
BA
7101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
7102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
91a3d82f
BA
7103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
7104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
7105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
7106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
2fbe4526 7107
f36bb6ca 7108 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
bc7f75fa
AK
7109};
7110MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
7111
23606cf5 7112static const struct dev_pm_ops e1000_pm_ops = {
72f72dcc 7113#ifdef CONFIG_PM_SLEEP
28002099
DE
7114 .suspend = e1000e_pm_suspend,
7115 .resume = e1000e_pm_resume,
7116 .freeze = e1000e_pm_freeze,
7117 .thaw = e1000e_pm_thaw,
7118 .poweroff = e1000e_pm_suspend,
7119 .restore = e1000e_pm_resume,
72f72dcc 7120#endif
63eb48f1
DE
7121 SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
7122 e1000e_pm_runtime_idle)
23606cf5
RW
7123};
7124
bc7f75fa
AK
7125/* PCI Device API Driver */
7126static struct pci_driver e1000_driver = {
7127 .name = e1000e_driver_name,
7128 .id_table = e1000_pci_tbl,
7129 .probe = e1000_probe,
9f9a12f8 7130 .remove = e1000_remove,
f36bb6ca
BA
7131 .driver = {
7132 .pm = &e1000_pm_ops,
7133 },
bc7f75fa
AK
7134 .shutdown = e1000_shutdown,
7135 .err_handler = &e1000_err_handler
7136};
7137
7138/**
7139 * e1000_init_module - Driver Registration Routine
7140 *
7141 * e1000_init_module is the first routine called when the driver is
7142 * loaded. All it does is register with the PCI subsystem.
7143 **/
7144static int __init e1000_init_module(void)
7145{
7146 int ret;
8544b9f7
BA
7147 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
7148 e1000e_driver_version);
e78b80b1 7149 pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
bc7f75fa 7150 ret = pci_register_driver(&e1000_driver);
53ec5498 7151
bc7f75fa
AK
7152 return ret;
7153}
7154module_init(e1000_init_module);
7155
7156/**
7157 * e1000_exit_module - Driver Exit Cleanup Routine
7158 *
7159 * e1000_exit_module is called just before the driver is removed
7160 * from memory.
7161 **/
7162static void __exit e1000_exit_module(void)
7163{
7164 pci_unregister_driver(&e1000_driver);
7165}
7166module_exit(e1000_exit_module);
7167
bc7f75fa
AK
7168MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
7169MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
7170MODULE_LICENSE("GPL");
7171MODULE_VERSION(DRV_VERSION);
7172
06c24b91 7173/* netdev.c */
This page took 2.179359 seconds and 5 git commands to generate.