e1000: checkpatch clean
[deliverable/linux.git] / drivers / net / e1000 / e1000_main.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
0abb6eb1 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
3d41e30a 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
d0bb53e1 30#include <net/ip6_checksum.h>
1da177e4 31
1da177e4 32char e1000_driver_name[] = "e1000";
3ad2cc67 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
1da177e4
LT
34#ifndef CONFIG_E1000_NAPI
35#define DRIVERNAPI
36#else
37#define DRIVERNAPI "-NAPI"
38#endif
7e721579 39#define DRV_VERSION "7.3.20-k2"DRIVERNAPI
abec42a4
SH
40const char e1000_driver_version[] = DRV_VERSION;
41static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
1da177e4
LT
42
43/* e1000_pci_tbl - PCI Device ID Table
44 *
45 * Last entry must be all 0s
46 *
47 * Macro expands to...
48 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
49 */
50static struct pci_device_id e1000_pci_tbl[] = {
51 INTEL_E1000_ETHERNET_DEVICE(0x1000),
52 INTEL_E1000_ETHERNET_DEVICE(0x1001),
53 INTEL_E1000_ETHERNET_DEVICE(0x1004),
54 INTEL_E1000_ETHERNET_DEVICE(0x1008),
55 INTEL_E1000_ETHERNET_DEVICE(0x1009),
56 INTEL_E1000_ETHERNET_DEVICE(0x100C),
57 INTEL_E1000_ETHERNET_DEVICE(0x100D),
58 INTEL_E1000_ETHERNET_DEVICE(0x100E),
59 INTEL_E1000_ETHERNET_DEVICE(0x100F),
60 INTEL_E1000_ETHERNET_DEVICE(0x1010),
61 INTEL_E1000_ETHERNET_DEVICE(0x1011),
62 INTEL_E1000_ETHERNET_DEVICE(0x1012),
63 INTEL_E1000_ETHERNET_DEVICE(0x1013),
64 INTEL_E1000_ETHERNET_DEVICE(0x1014),
65 INTEL_E1000_ETHERNET_DEVICE(0x1015),
66 INTEL_E1000_ETHERNET_DEVICE(0x1016),
67 INTEL_E1000_ETHERNET_DEVICE(0x1017),
68 INTEL_E1000_ETHERNET_DEVICE(0x1018),
69 INTEL_E1000_ETHERNET_DEVICE(0x1019),
2648345f 70 INTEL_E1000_ETHERNET_DEVICE(0x101A),
1da177e4
LT
71 INTEL_E1000_ETHERNET_DEVICE(0x101D),
72 INTEL_E1000_ETHERNET_DEVICE(0x101E),
73 INTEL_E1000_ETHERNET_DEVICE(0x1026),
74 INTEL_E1000_ETHERNET_DEVICE(0x1027),
75 INTEL_E1000_ETHERNET_DEVICE(0x1028),
76 INTEL_E1000_ETHERNET_DEVICE(0x1075),
77 INTEL_E1000_ETHERNET_DEVICE(0x1076),
78 INTEL_E1000_ETHERNET_DEVICE(0x1077),
79 INTEL_E1000_ETHERNET_DEVICE(0x1078),
80 INTEL_E1000_ETHERNET_DEVICE(0x1079),
81 INTEL_E1000_ETHERNET_DEVICE(0x107A),
82 INTEL_E1000_ETHERNET_DEVICE(0x107B),
83 INTEL_E1000_ETHERNET_DEVICE(0x107C),
84 INTEL_E1000_ETHERNET_DEVICE(0x108A),
b7ee49db 85 INTEL_E1000_ETHERNET_DEVICE(0x1099),
b7ee49db 86 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
1da177e4
LT
87 /* required last entry */
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
35574764
NN
93int e1000_up(struct e1000_adapter *adapter);
94void e1000_down(struct e1000_adapter *adapter);
95void e1000_reinit_locked(struct e1000_adapter *adapter);
96void e1000_reset(struct e1000_adapter *adapter);
406874a7 97int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
35574764
NN
98int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
99int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
100void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
101void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
3ad2cc67 102static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
35574764 103 struct e1000_tx_ring *txdr);
3ad2cc67 104static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
35574764 105 struct e1000_rx_ring *rxdr);
3ad2cc67 106static void e1000_free_tx_resources(struct e1000_adapter *adapter,
35574764 107 struct e1000_tx_ring *tx_ring);
3ad2cc67 108static void e1000_free_rx_resources(struct e1000_adapter *adapter,
35574764
NN
109 struct e1000_rx_ring *rx_ring);
110void e1000_update_stats(struct e1000_adapter *adapter);
1da177e4
LT
111
112static int e1000_init_module(void);
113static void e1000_exit_module(void);
114static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
115static void __devexit e1000_remove(struct pci_dev *pdev);
581d708e 116static int e1000_alloc_queues(struct e1000_adapter *adapter);
1da177e4
LT
117static int e1000_sw_init(struct e1000_adapter *adapter);
118static int e1000_open(struct net_device *netdev);
119static int e1000_close(struct net_device *netdev);
120static void e1000_configure_tx(struct e1000_adapter *adapter);
121static void e1000_configure_rx(struct e1000_adapter *adapter);
122static void e1000_setup_rctl(struct e1000_adapter *adapter);
581d708e
MC
123static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
125static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
126 struct e1000_tx_ring *tx_ring);
127static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
128 struct e1000_rx_ring *rx_ring);
db0ce50d 129static void e1000_set_rx_mode(struct net_device *netdev);
1da177e4
LT
130static void e1000_update_phy_info(unsigned long data);
131static void e1000_watchdog(unsigned long data);
1da177e4
LT
132static void e1000_82547_tx_fifo_stall(unsigned long data);
133static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
134static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136static int e1000_set_mac(struct net_device *netdev, void *p);
7d12e780 137static irqreturn_t e1000_intr(int irq, void *data);
9ac98284 138static irqreturn_t e1000_intr_msi(int irq, void *data);
c3033b01
JP
139static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
140 struct e1000_tx_ring *tx_ring);
1da177e4 141#ifdef CONFIG_E1000_NAPI
bea3348e 142static int e1000_clean(struct napi_struct *napi, int budget);
c3033b01
JP
143static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
144 struct e1000_rx_ring *rx_ring,
145 int *work_done, int work_to_do);
146static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
147 struct e1000_rx_ring *rx_ring,
148 int *work_done, int work_to_do);
1da177e4 149#else
c3033b01
JP
150static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring);
152static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
153 struct e1000_rx_ring *rx_ring);
1da177e4 154#endif
581d708e 155static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
72d64a43
JK
156 struct e1000_rx_ring *rx_ring,
157 int cleaned_count);
581d708e 158static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
72d64a43
JK
159 struct e1000_rx_ring *rx_ring,
160 int cleaned_count);
1da177e4
LT
161static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
162static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
163 int cmd);
1da177e4
LT
164static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
165static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
166static void e1000_tx_timeout(struct net_device *dev);
65f27f38 167static void e1000_reset_task(struct work_struct *work);
1da177e4 168static void e1000_smartspeed(struct e1000_adapter *adapter);
e619d523
AK
169static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
170 struct sk_buff *skb);
1da177e4
LT
171
172static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
406874a7
JP
173static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
174static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
1da177e4
LT
175static void e1000_restore_vlan(struct e1000_adapter *adapter);
176
977e74b5 177static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
6fdfef16 178#ifdef CONFIG_PM
1da177e4
LT
179static int e1000_resume(struct pci_dev *pdev);
180#endif
c653e635 181static void e1000_shutdown(struct pci_dev *pdev);
1da177e4
LT
182
183#ifdef CONFIG_NET_POLL_CONTROLLER
184/* for netdump / net console */
185static void e1000_netpoll (struct net_device *netdev);
186#endif
187
1f753861
JB
188#define COPYBREAK_DEFAULT 256
189static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
190module_param(copybreak, uint, 0644);
191MODULE_PARM_DESC(copybreak,
192 "Maximum size of packet that is copied to a new buffer on receive");
193
9026729b
AK
194static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
195 pci_channel_state_t state);
196static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
197static void e1000_io_resume(struct pci_dev *pdev);
198
199static struct pci_error_handlers e1000_err_handler = {
200 .error_detected = e1000_io_error_detected,
201 .slot_reset = e1000_io_slot_reset,
202 .resume = e1000_io_resume,
203};
24025e4e 204
1da177e4
LT
205static struct pci_driver e1000_driver = {
206 .name = e1000_driver_name,
207 .id_table = e1000_pci_tbl,
208 .probe = e1000_probe,
209 .remove = __devexit_p(e1000_remove),
c4e24f01 210#ifdef CONFIG_PM
1da177e4 211 /* Power Managment Hooks */
1da177e4 212 .suspend = e1000_suspend,
c653e635 213 .resume = e1000_resume,
1da177e4 214#endif
9026729b
AK
215 .shutdown = e1000_shutdown,
216 .err_handler = &e1000_err_handler
1da177e4
LT
217};
218
219MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
220MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
221MODULE_LICENSE("GPL");
222MODULE_VERSION(DRV_VERSION);
223
224static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
225module_param(debug, int, 0);
226MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
227
228/**
229 * e1000_init_module - Driver Registration Routine
230 *
231 * e1000_init_module is the first routine called when the driver is
232 * loaded. All it does is register with the PCI subsystem.
233 **/
234
64798845 235static int __init e1000_init_module(void)
1da177e4
LT
236{
237 int ret;
238 printk(KERN_INFO "%s - version %s\n",
239 e1000_driver_string, e1000_driver_version);
240
241 printk(KERN_INFO "%s\n", e1000_copyright);
242
29917620 243 ret = pci_register_driver(&e1000_driver);
1f753861
JB
244 if (copybreak != COPYBREAK_DEFAULT) {
245 if (copybreak == 0)
246 printk(KERN_INFO "e1000: copybreak disabled\n");
247 else
248 printk(KERN_INFO "e1000: copybreak enabled for "
249 "packets <= %u bytes\n", copybreak);
250 }
1da177e4
LT
251 return ret;
252}
253
254module_init(e1000_init_module);
255
256/**
257 * e1000_exit_module - Driver Exit Cleanup Routine
258 *
259 * e1000_exit_module is called just before the driver is removed
260 * from memory.
261 **/
262
64798845 263static void __exit e1000_exit_module(void)
1da177e4 264{
1da177e4
LT
265 pci_unregister_driver(&e1000_driver);
266}
267
268module_exit(e1000_exit_module);
269
2db10a08
AK
270static int e1000_request_irq(struct e1000_adapter *adapter)
271{
1dc32918 272 struct e1000_hw *hw = &adapter->hw;
2db10a08 273 struct net_device *netdev = adapter->netdev;
3e18826c 274 irq_handler_t handler = e1000_intr;
e94bd23f
AK
275 int irq_flags = IRQF_SHARED;
276 int err;
2db10a08 277
1dc32918 278 if (hw->mac_type >= e1000_82571) {
e94bd23f
AK
279 adapter->have_msi = !pci_enable_msi(adapter->pdev);
280 if (adapter->have_msi) {
3e18826c 281 handler = e1000_intr_msi;
e94bd23f 282 irq_flags = 0;
2db10a08
AK
283 }
284 }
e94bd23f
AK
285
286 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
287 netdev);
288 if (err) {
289 if (adapter->have_msi)
290 pci_disable_msi(adapter->pdev);
2db10a08
AK
291 DPRINTK(PROBE, ERR,
292 "Unable to allocate interrupt Error: %d\n", err);
e94bd23f 293 }
2db10a08
AK
294
295 return err;
296}
297
298static void e1000_free_irq(struct e1000_adapter *adapter)
299{
300 struct net_device *netdev = adapter->netdev;
301
302 free_irq(adapter->pdev->irq, netdev);
303
2db10a08
AK
304 if (adapter->have_msi)
305 pci_disable_msi(adapter->pdev);
2db10a08
AK
306}
307
1da177e4
LT
308/**
309 * e1000_irq_disable - Mask off interrupt generation on the NIC
310 * @adapter: board private structure
311 **/
312
64798845 313static void e1000_irq_disable(struct e1000_adapter *adapter)
1da177e4 314{
1dc32918
JP
315 struct e1000_hw *hw = &adapter->hw;
316
317 ew32(IMC, ~0);
318 E1000_WRITE_FLUSH();
1da177e4
LT
319 synchronize_irq(adapter->pdev->irq);
320}
321
322/**
323 * e1000_irq_enable - Enable default interrupt generation settings
324 * @adapter: board private structure
325 **/
326
64798845 327static void e1000_irq_enable(struct e1000_adapter *adapter)
1da177e4 328{
1dc32918
JP
329 struct e1000_hw *hw = &adapter->hw;
330
331 ew32(IMS, IMS_ENABLE_MASK);
332 E1000_WRITE_FLUSH();
1da177e4 333}
3ad2cc67 334
64798845 335static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2d7edb92 336{
1dc32918 337 struct e1000_hw *hw = &adapter->hw;
2d7edb92 338 struct net_device *netdev = adapter->netdev;
1dc32918 339 u16 vid = hw->mng_cookie.vlan_id;
406874a7 340 u16 old_vid = adapter->mng_vlan_id;
96838a40 341 if (adapter->vlgrp) {
5c15bdec 342 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
1dc32918 343 if (hw->mng_cookie.status &
2d7edb92
MC
344 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
345 e1000_vlan_rx_add_vid(netdev, vid);
346 adapter->mng_vlan_id = vid;
347 } else
348 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
96838a40 349
406874a7 350 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
96838a40 351 (vid != old_vid) &&
5c15bdec 352 !vlan_group_get_device(adapter->vlgrp, old_vid))
2d7edb92 353 e1000_vlan_rx_kill_vid(netdev, old_vid);
c5f226fe
JK
354 } else
355 adapter->mng_vlan_id = vid;
2d7edb92
MC
356 }
357}
b55ccb35
JK
358
359/**
360 * e1000_release_hw_control - release control of the h/w to f/w
361 * @adapter: address of board private structure
362 *
363 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
364 * For ASF and Pass Through versions of f/w this means that the
365 * driver is no longer loaded. For AMT version (only with 82573) i
90fb5135 366 * of the f/w this means that the network i/f is closed.
76c224bc 367 *
b55ccb35
JK
368 **/
369
64798845 370static void e1000_release_hw_control(struct e1000_adapter *adapter)
b55ccb35 371{
406874a7
JP
372 u32 ctrl_ext;
373 u32 swsm;
1dc32918 374 struct e1000_hw *hw = &adapter->hw;
b55ccb35
JK
375
376 /* Let firmware taken over control of h/w */
1dc32918 377 switch (hw->mac_type) {
b55ccb35 378 case e1000_82573:
1dc32918
JP
379 swsm = er32(SWSM);
380 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
31d76442
BA
381 break;
382 case e1000_82571:
383 case e1000_82572:
384 case e1000_80003es2lan:
cd94dd0b 385 case e1000_ich8lan:
1dc32918
JP
386 ctrl_ext = er32(CTRL_EXT);
387 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
cd94dd0b 388 break;
b55ccb35
JK
389 default:
390 break;
391 }
392}
393
394/**
395 * e1000_get_hw_control - get control of the h/w from f/w
396 * @adapter: address of board private structure
397 *
398 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
76c224bc
AK
399 * For ASF and Pass Through versions of f/w this means that
400 * the driver is loaded. For AMT version (only with 82573)
90fb5135 401 * of the f/w this means that the network i/f is open.
76c224bc 402 *
b55ccb35
JK
403 **/
404
64798845 405static void e1000_get_hw_control(struct e1000_adapter *adapter)
b55ccb35 406{
406874a7
JP
407 u32 ctrl_ext;
408 u32 swsm;
1dc32918 409 struct e1000_hw *hw = &adapter->hw;
90fb5135 410
b55ccb35 411 /* Let firmware know the driver has taken over */
1dc32918 412 switch (hw->mac_type) {
b55ccb35 413 case e1000_82573:
1dc32918
JP
414 swsm = er32(SWSM);
415 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
b55ccb35 416 break;
31d76442
BA
417 case e1000_82571:
418 case e1000_82572:
419 case e1000_80003es2lan:
cd94dd0b 420 case e1000_ich8lan:
1dc32918
JP
421 ctrl_ext = er32(CTRL_EXT);
422 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
cd94dd0b 423 break;
b55ccb35
JK
424 default:
425 break;
426 }
427}
428
64798845 429static void e1000_init_manageability(struct e1000_adapter *adapter)
0fccd0e9 430{
1dc32918
JP
431 struct e1000_hw *hw = &adapter->hw;
432
0fccd0e9 433 if (adapter->en_mng_pt) {
1dc32918 434 u32 manc = er32(MANC);
0fccd0e9
JG
435
436 /* disable hardware interception of ARP */
437 manc &= ~(E1000_MANC_ARP_EN);
438
439 /* enable receiving management packets to the host */
440 /* this will probably generate destination unreachable messages
441 * from the host OS, but the packets will be handled on SMBUS */
1dc32918
JP
442 if (hw->has_manc2h) {
443 u32 manc2h = er32(MANC2H);
0fccd0e9
JG
444
445 manc |= E1000_MANC_EN_MNG2HOST;
446#define E1000_MNG2HOST_PORT_623 (1 << 5)
447#define E1000_MNG2HOST_PORT_664 (1 << 6)
448 manc2h |= E1000_MNG2HOST_PORT_623;
449 manc2h |= E1000_MNG2HOST_PORT_664;
1dc32918 450 ew32(MANC2H, manc2h);
0fccd0e9
JG
451 }
452
1dc32918 453 ew32(MANC, manc);
0fccd0e9
JG
454 }
455}
456
64798845 457static void e1000_release_manageability(struct e1000_adapter *adapter)
0fccd0e9 458{
1dc32918
JP
459 struct e1000_hw *hw = &adapter->hw;
460
0fccd0e9 461 if (adapter->en_mng_pt) {
1dc32918 462 u32 manc = er32(MANC);
0fccd0e9
JG
463
464 /* re-enable hardware interception of ARP */
465 manc |= E1000_MANC_ARP_EN;
466
1dc32918 467 if (hw->has_manc2h)
0fccd0e9
JG
468 manc &= ~E1000_MANC_EN_MNG2HOST;
469
470 /* don't explicitly have to mess with MANC2H since
471 * MANC has an enable disable that gates MANC2H */
472
1dc32918 473 ew32(MANC, manc);
0fccd0e9
JG
474 }
475}
476
e0aac5a2
AK
477/**
478 * e1000_configure - configure the hardware for RX and TX
479 * @adapter = private board structure
480 **/
481static void e1000_configure(struct e1000_adapter *adapter)
1da177e4
LT
482{
483 struct net_device *netdev = adapter->netdev;
2db10a08 484 int i;
1da177e4 485
db0ce50d 486 e1000_set_rx_mode(netdev);
1da177e4
LT
487
488 e1000_restore_vlan(adapter);
0fccd0e9 489 e1000_init_manageability(adapter);
1da177e4
LT
490
491 e1000_configure_tx(adapter);
492 e1000_setup_rctl(adapter);
493 e1000_configure_rx(adapter);
72d64a43
JK
494 /* call E1000_DESC_UNUSED which always leaves
495 * at least 1 descriptor unused to make sure
496 * next_to_use != next_to_clean */
f56799ea 497 for (i = 0; i < adapter->num_rx_queues; i++) {
72d64a43 498 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
a292ca6e
JK
499 adapter->alloc_rx_buf(adapter, ring,
500 E1000_DESC_UNUSED(ring));
f56799ea 501 }
1da177e4 502
7bfa4816 503 adapter->tx_queue_len = netdev->tx_queue_len;
e0aac5a2
AK
504}
505
506int e1000_up(struct e1000_adapter *adapter)
507{
1dc32918
JP
508 struct e1000_hw *hw = &adapter->hw;
509
e0aac5a2
AK
510 /* hardware has been reset, we need to reload some things */
511 e1000_configure(adapter);
512
513 clear_bit(__E1000_DOWN, &adapter->flags);
7bfa4816 514
1da177e4 515#ifdef CONFIG_E1000_NAPI
bea3348e 516 napi_enable(&adapter->napi);
1da177e4 517#endif
5de55624
MC
518 e1000_irq_enable(adapter);
519
79f3d399 520 /* fire a link change interrupt to start the watchdog */
1dc32918 521 ew32(ICS, E1000_ICS_LSC);
1da177e4
LT
522 return 0;
523}
524
79f05bf0
AK
525/**
526 * e1000_power_up_phy - restore link in case the phy was powered down
527 * @adapter: address of board private structure
528 *
529 * The phy may be powered down to save power and turn off link when the
530 * driver is unloaded and wake on lan is not enabled (among others)
531 * *** this routine MUST be followed by a call to e1000_reset ***
532 *
533 **/
534
d658266e 535void e1000_power_up_phy(struct e1000_adapter *adapter)
79f05bf0 536{
1dc32918 537 struct e1000_hw *hw = &adapter->hw;
406874a7 538 u16 mii_reg = 0;
79f05bf0
AK
539
540 /* Just clear the power down bit to wake the phy back up */
1dc32918 541 if (hw->media_type == e1000_media_type_copper) {
79f05bf0
AK
542 /* according to the manual, the phy will retain its
543 * settings across a power-down/up cycle */
1dc32918 544 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
79f05bf0 545 mii_reg &= ~MII_CR_POWER_DOWN;
1dc32918 546 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
79f05bf0
AK
547 }
548}
549
550static void e1000_power_down_phy(struct e1000_adapter *adapter)
551{
1dc32918
JP
552 struct e1000_hw *hw = &adapter->hw;
553
61c2505f 554 /* Power down the PHY so no link is implied when interface is down *
c3033b01 555 * The PHY cannot be powered down if any of the following is true *
79f05bf0
AK
556 * (a) WoL is enabled
557 * (b) AMT is active
558 * (c) SoL/IDER session is active */
1dc32918
JP
559 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
560 hw->media_type == e1000_media_type_copper) {
406874a7 561 u16 mii_reg = 0;
61c2505f 562
1dc32918 563 switch (hw->mac_type) {
61c2505f
BA
564 case e1000_82540:
565 case e1000_82545:
566 case e1000_82545_rev_3:
567 case e1000_82546:
568 case e1000_82546_rev_3:
569 case e1000_82541:
570 case e1000_82541_rev_2:
571 case e1000_82547:
572 case e1000_82547_rev_2:
1dc32918 573 if (er32(MANC) & E1000_MANC_SMBUS_EN)
61c2505f
BA
574 goto out;
575 break;
576 case e1000_82571:
577 case e1000_82572:
578 case e1000_82573:
579 case e1000_80003es2lan:
580 case e1000_ich8lan:
1dc32918
JP
581 if (e1000_check_mng_mode(hw) ||
582 e1000_check_phy_reset_block(hw))
61c2505f
BA
583 goto out;
584 break;
585 default:
586 goto out;
587 }
1dc32918 588 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
79f05bf0 589 mii_reg |= MII_CR_POWER_DOWN;
1dc32918 590 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
79f05bf0
AK
591 mdelay(1);
592 }
61c2505f
BA
593out:
594 return;
79f05bf0
AK
595}
596
64798845 597void e1000_down(struct e1000_adapter *adapter)
1da177e4
LT
598{
599 struct net_device *netdev = adapter->netdev;
600
1314bbf3
AK
601 /* signal that we're down so the interrupt handler does not
602 * reschedule our watchdog timer */
603 set_bit(__E1000_DOWN, &adapter->flags);
604
e0aac5a2 605#ifdef CONFIG_E1000_NAPI
bea3348e 606 napi_disable(&adapter->napi);
e0aac5a2 607#endif
1da177e4 608 e1000_irq_disable(adapter);
c1605eb3 609
1da177e4
LT
610 del_timer_sync(&adapter->tx_fifo_stall_timer);
611 del_timer_sync(&adapter->watchdog_timer);
612 del_timer_sync(&adapter->phy_info_timer);
613
7bfa4816 614 netdev->tx_queue_len = adapter->tx_queue_len;
1da177e4
LT
615 adapter->link_speed = 0;
616 adapter->link_duplex = 0;
617 netif_carrier_off(netdev);
618 netif_stop_queue(netdev);
619
620 e1000_reset(adapter);
581d708e
MC
621 e1000_clean_all_tx_rings(adapter);
622 e1000_clean_all_rx_rings(adapter);
1da177e4 623}
1da177e4 624
64798845 625void e1000_reinit_locked(struct e1000_adapter *adapter)
2db10a08
AK
626{
627 WARN_ON(in_interrupt());
628 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
629 msleep(1);
630 e1000_down(adapter);
631 e1000_up(adapter);
632 clear_bit(__E1000_RESETTING, &adapter->flags);
1da177e4
LT
633}
634
64798845 635void e1000_reset(struct e1000_adapter *adapter)
1da177e4 636{
1dc32918 637 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
638 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
639 u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
c3033b01 640 bool legacy_pba_adjust = false;
1da177e4
LT
641
642 /* Repartition Pba for greater than 9k mtu
643 * To take effect CTRL.RST is required.
644 */
645
1dc32918 646 switch (hw->mac_type) {
018ea44e
BA
647 case e1000_82542_rev2_0:
648 case e1000_82542_rev2_1:
649 case e1000_82543:
650 case e1000_82544:
651 case e1000_82540:
652 case e1000_82541:
653 case e1000_82541_rev_2:
c3033b01 654 legacy_pba_adjust = true;
018ea44e
BA
655 pba = E1000_PBA_48K;
656 break;
657 case e1000_82545:
658 case e1000_82545_rev_3:
659 case e1000_82546:
660 case e1000_82546_rev_3:
661 pba = E1000_PBA_48K;
662 break;
2d7edb92 663 case e1000_82547:
0e6ef3e0 664 case e1000_82547_rev_2:
c3033b01 665 legacy_pba_adjust = true;
2d7edb92
MC
666 pba = E1000_PBA_30K;
667 break;
868d5309
MC
668 case e1000_82571:
669 case e1000_82572:
6418ecc6 670 case e1000_80003es2lan:
868d5309
MC
671 pba = E1000_PBA_38K;
672 break;
2d7edb92 673 case e1000_82573:
018ea44e 674 pba = E1000_PBA_20K;
2d7edb92 675 break;
cd94dd0b
AK
676 case e1000_ich8lan:
677 pba = E1000_PBA_8K;
018ea44e
BA
678 case e1000_undefined:
679 case e1000_num_macs:
2d7edb92
MC
680 break;
681 }
682
c3033b01 683 if (legacy_pba_adjust) {
018ea44e
BA
684 if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
685 pba -= 8; /* allocate more FIFO for Tx */
2d7edb92 686
1dc32918 687 if (hw->mac_type == e1000_82547) {
018ea44e
BA
688 adapter->tx_fifo_head = 0;
689 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
690 adapter->tx_fifo_size =
691 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
692 atomic_set(&adapter->tx_fifo_stall, 0);
693 }
1dc32918 694 } else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
018ea44e 695 /* adjust PBA for jumbo frames */
1dc32918 696 ew32(PBA, pba);
018ea44e
BA
697
698 /* To maintain wire speed transmits, the Tx FIFO should be
699 * large enough to accomodate two full transmit packets,
700 * rounded up to the next 1KB and expressed in KB. Likewise,
701 * the Rx FIFO should be large enough to accomodate at least
702 * one full receive packet and is similarly rounded up and
703 * expressed in KB. */
1dc32918 704 pba = er32(PBA);
018ea44e
BA
705 /* upper 16 bits has Tx packet buffer allocation size in KB */
706 tx_space = pba >> 16;
707 /* lower 16 bits has Rx packet buffer allocation size in KB */
708 pba &= 0xffff;
709 /* don't include ethernet FCS because hardware appends/strips */
710 min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
711 VLAN_TAG_SIZE;
712 min_tx_space = min_rx_space;
713 min_tx_space *= 2;
9099cfb9 714 min_tx_space = ALIGN(min_tx_space, 1024);
018ea44e 715 min_tx_space >>= 10;
9099cfb9 716 min_rx_space = ALIGN(min_rx_space, 1024);
018ea44e
BA
717 min_rx_space >>= 10;
718
719 /* If current Tx allocation is less than the min Tx FIFO size,
720 * and the min Tx FIFO size is less than the current Rx FIFO
721 * allocation, take space away from current Rx allocation */
722 if (tx_space < min_tx_space &&
723 ((min_tx_space - tx_space) < pba)) {
724 pba = pba - (min_tx_space - tx_space);
725
726 /* PCI/PCIx hardware has PBA alignment constraints */
1dc32918 727 switch (hw->mac_type) {
018ea44e
BA
728 case e1000_82545 ... e1000_82546_rev_3:
729 pba &= ~(E1000_PBA_8K - 1);
730 break;
731 default:
732 break;
733 }
734
735 /* if short on rx space, rx wins and must trump tx
736 * adjustment or use Early Receive if available */
737 if (pba < min_rx_space) {
1dc32918 738 switch (hw->mac_type) {
018ea44e
BA
739 case e1000_82573:
740 /* ERT enabled in e1000_configure_rx */
741 break;
742 default:
743 pba = min_rx_space;
744 break;
745 }
746 }
747 }
1da177e4 748 }
2d7edb92 749
1dc32918 750 ew32(PBA, pba);
1da177e4
LT
751
752 /* flow control settings */
f11b7f85
JK
753 /* Set the FC high water mark to 90% of the FIFO size.
754 * Required to clear last 3 LSB */
755 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
cd94dd0b
AK
756 /* We can't use 90% on small FIFOs because the remainder
757 * would be less than 1 full frame. In this case, we size
758 * it to allow at least a full frame above the high water
759 * mark. */
760 if (pba < E1000_PBA_16K)
761 fc_high_water_mark = (pba * 1024) - 1600;
f11b7f85 762
1dc32918
JP
763 hw->fc_high_water = fc_high_water_mark;
764 hw->fc_low_water = fc_high_water_mark - 8;
765 if (hw->mac_type == e1000_80003es2lan)
766 hw->fc_pause_time = 0xFFFF;
87041639 767 else
1dc32918
JP
768 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
769 hw->fc_send_xon = 1;
770 hw->fc = hw->original_fc;
1da177e4 771
2d7edb92 772 /* Allow time for pending master requests to run */
1dc32918
JP
773 e1000_reset_hw(hw);
774 if (hw->mac_type >= e1000_82544)
775 ew32(WUC, 0);
09ae3e88 776
1dc32918 777 if (e1000_init_hw(hw))
1da177e4 778 DPRINTK(PROBE, ERR, "Hardware Error\n");
2d7edb92 779 e1000_update_mng_vlan(adapter);
3d5460a0
JB
780
781 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
1dc32918
JP
782 if (hw->mac_type >= e1000_82544 &&
783 hw->mac_type <= e1000_82547_rev_2 &&
784 hw->autoneg == 1 &&
785 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
786 u32 ctrl = er32(CTRL);
3d5460a0
JB
787 /* clear phy power management bit if we are in gig only mode,
788 * which if enabled will attempt negotiation to 100Mb, which
789 * can cause a loss of link at power off or driver unload */
790 ctrl &= ~E1000_CTRL_SWDPIN3;
1dc32918 791 ew32(CTRL, ctrl);
3d5460a0
JB
792 }
793
1da177e4 794 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1dc32918 795 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
1da177e4 796
1dc32918
JP
797 e1000_reset_adaptive(hw);
798 e1000_phy_get_info(hw, &adapter->phy_info);
9a53a202
AK
799
800 if (!adapter->smart_power_down &&
1dc32918
JP
801 (hw->mac_type == e1000_82571 ||
802 hw->mac_type == e1000_82572)) {
406874a7 803 u16 phy_data = 0;
9a53a202
AK
804 /* speed up time to link by disabling smart power down, ignore
805 * the return value of this function because there is nothing
806 * different we would do if it failed */
1dc32918 807 e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
9a53a202
AK
808 &phy_data);
809 phy_data &= ~IGP02E1000_PM_SPD;
1dc32918 810 e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
9a53a202
AK
811 phy_data);
812 }
813
0fccd0e9 814 e1000_release_manageability(adapter);
1da177e4
LT
815}
816
67b3c27c
AK
817/**
818 * Dump the eeprom for users having checksum issues
819 **/
b4ea895d 820static void e1000_dump_eeprom(struct e1000_adapter *adapter)
67b3c27c
AK
821{
822 struct net_device *netdev = adapter->netdev;
823 struct ethtool_eeprom eeprom;
824 const struct ethtool_ops *ops = netdev->ethtool_ops;
825 u8 *data;
826 int i;
827 u16 csum_old, csum_new = 0;
828
829 eeprom.len = ops->get_eeprom_len(netdev);
830 eeprom.offset = 0;
831
832 data = kmalloc(eeprom.len, GFP_KERNEL);
833 if (!data) {
834 printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
835 " data\n");
836 return;
837 }
838
839 ops->get_eeprom(netdev, &eeprom, data);
840
841 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
842 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
843 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
844 csum_new += data[i] + (data[i + 1] << 8);
845 csum_new = EEPROM_SUM - csum_new;
846
847 printk(KERN_ERR "/*********************/\n");
848 printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
849 printk(KERN_ERR "Calculated : 0x%04x\n", csum_new);
850
851 printk(KERN_ERR "Offset Values\n");
852 printk(KERN_ERR "======== ======\n");
853 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
854
855 printk(KERN_ERR "Include this output when contacting your support "
856 "provider.\n");
857 printk(KERN_ERR "This is not a software error! Something bad "
858 "happened to your hardware or\n");
859 printk(KERN_ERR "EEPROM image. Ignoring this "
860 "problem could result in further problems,\n");
861 printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
862 printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
863 "which is invalid\n");
864 printk(KERN_ERR "and requires you to set the proper MAC "
865 "address manually before continuing\n");
866 printk(KERN_ERR "to enable this network device.\n");
867 printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
868 "to your hardware vendor\n");
869 printk(KERN_ERR "or Intel Customer Support: linux-nics@intel.com\n");
870 printk(KERN_ERR "/*********************/\n");
871
872 kfree(data);
873}
874
1da177e4
LT
875/**
876 * e1000_probe - Device Initialization Routine
877 * @pdev: PCI device information struct
878 * @ent: entry in e1000_pci_tbl
879 *
880 * Returns 0 on success, negative on failure
881 *
882 * e1000_probe initializes an adapter identified by a pci_dev structure.
883 * The OS initialization, configuring of the adapter private structure,
884 * and a hardware reset occur.
885 **/
886
1dc32918
JP
887static int __devinit e1000_probe(struct pci_dev *pdev,
888 const struct pci_device_id *ent)
1da177e4
LT
889{
890 struct net_device *netdev;
891 struct e1000_adapter *adapter;
1dc32918 892 struct e1000_hw *hw;
2d7edb92 893
1da177e4 894 static int cards_found = 0;
120cd576 895 static int global_quad_port_a = 0; /* global ksp3 port a indication */
2d7edb92 896 int i, err, pci_using_dac;
406874a7
JP
897 u16 eeprom_data = 0;
898 u16 eeprom_apme_mask = E1000_EEPROM_APME;
0795af57
JP
899 DECLARE_MAC_BUF(mac);
900
96838a40 901 if ((err = pci_enable_device(pdev)))
1da177e4
LT
902 return err;
903
cd94dd0b
AK
904 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
905 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
1da177e4
LT
906 pci_using_dac = 1;
907 } else {
cd94dd0b
AK
908 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
909 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
1da177e4 910 E1000_ERR("No usable DMA configuration, aborting\n");
6dd62ab0 911 goto err_dma;
1da177e4
LT
912 }
913 pci_using_dac = 0;
914 }
915
96838a40 916 if ((err = pci_request_regions(pdev, e1000_driver_name)))
6dd62ab0 917 goto err_pci_reg;
1da177e4
LT
918
919 pci_set_master(pdev);
920
6dd62ab0 921 err = -ENOMEM;
1da177e4 922 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6dd62ab0 923 if (!netdev)
1da177e4 924 goto err_alloc_etherdev;
1da177e4 925
1da177e4
LT
926 SET_NETDEV_DEV(netdev, &pdev->dev);
927
928 pci_set_drvdata(pdev, netdev);
60490fe0 929 adapter = netdev_priv(netdev);
1da177e4
LT
930 adapter->netdev = netdev;
931 adapter->pdev = pdev;
1da177e4
LT
932 adapter->msg_enable = (1 << debug) - 1;
933
1dc32918
JP
934 hw = &adapter->hw;
935 hw->back = adapter;
936
6dd62ab0 937 err = -EIO;
1dc32918
JP
938 hw->hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
939 pci_resource_len(pdev, BAR_0));
940 if (!hw->hw_addr)
1da177e4 941 goto err_ioremap;
1da177e4 942
96838a40
JB
943 for (i = BAR_1; i <= BAR_5; i++) {
944 if (pci_resource_len(pdev, i) == 0)
1da177e4 945 continue;
96838a40 946 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1dc32918 947 hw->io_base = pci_resource_start(pdev, i);
1da177e4
LT
948 break;
949 }
950 }
951
952 netdev->open = &e1000_open;
953 netdev->stop = &e1000_close;
954 netdev->hard_start_xmit = &e1000_xmit_frame;
955 netdev->get_stats = &e1000_get_stats;
db0ce50d 956 netdev->set_rx_mode = &e1000_set_rx_mode;
1da177e4
LT
957 netdev->set_mac_address = &e1000_set_mac;
958 netdev->change_mtu = &e1000_change_mtu;
959 netdev->do_ioctl = &e1000_ioctl;
960 e1000_set_ethtool_ops(netdev);
961 netdev->tx_timeout = &e1000_tx_timeout;
962 netdev->watchdog_timeo = 5 * HZ;
963#ifdef CONFIG_E1000_NAPI
bea3348e 964 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1da177e4
LT
965#endif
966 netdev->vlan_rx_register = e1000_vlan_rx_register;
967 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
968 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
969#ifdef CONFIG_NET_POLL_CONTROLLER
970 netdev->poll_controller = e1000_netpoll;
971#endif
0eb5a34c 972 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4 973
1da177e4
LT
974 adapter->bd_number = cards_found;
975
976 /* setup the private structure */
977
96838a40 978 if ((err = e1000_sw_init(adapter)))
1da177e4
LT
979 goto err_sw_init;
980
6dd62ab0 981 err = -EIO;
cd94dd0b
AK
982 /* Flash BAR mapping must happen after e1000_sw_init
983 * because it depends on mac_type */
1dc32918 984 if ((hw->mac_type == e1000_ich8lan) &&
cd94dd0b 985 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
1dc32918 986 hw->flash_address =
3c34ac36
BH
987 ioremap(pci_resource_start(pdev, 1),
988 pci_resource_len(pdev, 1));
1dc32918 989 if (!hw->flash_address)
cd94dd0b 990 goto err_flashmap;
cd94dd0b
AK
991 }
992
1dc32918 993 if (e1000_check_phy_reset_block(hw))
2d7edb92
MC
994 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
995
1dc32918 996 if (hw->mac_type >= e1000_82543) {
1da177e4
LT
997 netdev->features = NETIF_F_SG |
998 NETIF_F_HW_CSUM |
999 NETIF_F_HW_VLAN_TX |
1000 NETIF_F_HW_VLAN_RX |
1001 NETIF_F_HW_VLAN_FILTER;
1dc32918 1002 if (hw->mac_type == e1000_ich8lan)
cd94dd0b 1003 netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
1da177e4
LT
1004 }
1005
1dc32918
JP
1006 if ((hw->mac_type >= e1000_82544) &&
1007 (hw->mac_type != e1000_82547))
1da177e4 1008 netdev->features |= NETIF_F_TSO;
2d7edb92 1009
1dc32918 1010 if (hw->mac_type > e1000_82547_rev_2)
87ca4e5b 1011 netdev->features |= NETIF_F_TSO6;
96838a40 1012 if (pci_using_dac)
1da177e4
LT
1013 netdev->features |= NETIF_F_HIGHDMA;
1014
76c224bc
AK
1015 netdev->features |= NETIF_F_LLTX;
1016
1dc32918 1017 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
2d7edb92 1018
cd94dd0b 1019 /* initialize eeprom parameters */
1dc32918 1020 if (e1000_init_eeprom_params(hw)) {
cd94dd0b 1021 E1000_ERR("EEPROM initialization failed\n");
6dd62ab0 1022 goto err_eeprom;
cd94dd0b
AK
1023 }
1024
96838a40 1025 /* before reading the EEPROM, reset the controller to
1da177e4 1026 * put the device in a known good starting state */
96838a40 1027
1dc32918 1028 e1000_reset_hw(hw);
1da177e4
LT
1029
1030 /* make sure the EEPROM is good */
1dc32918 1031 if (e1000_validate_eeprom_checksum(hw) < 0) {
1da177e4 1032 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
67b3c27c
AK
1033 e1000_dump_eeprom(adapter);
1034 /*
1035 * set MAC address to all zeroes to invalidate and temporary
1036 * disable this device for the user. This blocks regular
1037 * traffic while still permitting ethtool ioctls from reaching
1038 * the hardware as well as allowing the user to run the
1039 * interface after manually setting a hw addr using
1040 * `ip set address`
1041 */
1dc32918 1042 memset(hw->mac_addr, 0, netdev->addr_len);
67b3c27c
AK
1043 } else {
1044 /* copy the MAC address out of the EEPROM */
1dc32918 1045 if (e1000_read_mac_addr(hw))
67b3c27c 1046 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
1da177e4 1047 }
67b3c27c 1048 /* don't block initalization here due to bad MAC address */
1dc32918
JP
1049 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1050 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1da177e4 1051
67b3c27c 1052 if (!is_valid_ether_addr(netdev->perm_addr))
1da177e4 1053 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
1da177e4 1054
1dc32918 1055 e1000_get_bus_info(hw);
1da177e4
LT
1056
1057 init_timer(&adapter->tx_fifo_stall_timer);
1058 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
e982f17c 1059 adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
1da177e4
LT
1060
1061 init_timer(&adapter->watchdog_timer);
1062 adapter->watchdog_timer.function = &e1000_watchdog;
1063 adapter->watchdog_timer.data = (unsigned long) adapter;
1064
1da177e4
LT
1065 init_timer(&adapter->phy_info_timer);
1066 adapter->phy_info_timer.function = &e1000_update_phy_info;
e982f17c 1067 adapter->phy_info_timer.data = (unsigned long)adapter;
1da177e4 1068
65f27f38 1069 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1da177e4 1070
1da177e4
LT
1071 e1000_check_options(adapter);
1072
1073 /* Initial Wake on LAN setting
1074 * If APM wake is enabled in the EEPROM,
1075 * enable the ACPI Magic Packet filter
1076 */
1077
1dc32918 1078 switch (hw->mac_type) {
1da177e4
LT
1079 case e1000_82542_rev2_0:
1080 case e1000_82542_rev2_1:
1081 case e1000_82543:
1082 break;
1083 case e1000_82544:
1dc32918 1084 e1000_read_eeprom(hw,
1da177e4
LT
1085 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1086 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1087 break;
cd94dd0b 1088 case e1000_ich8lan:
1dc32918 1089 e1000_read_eeprom(hw,
cd94dd0b
AK
1090 EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
1091 eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
1092 break;
1da177e4
LT
1093 case e1000_82546:
1094 case e1000_82546_rev_3:
fd803241 1095 case e1000_82571:
6418ecc6 1096 case e1000_80003es2lan:
1dc32918
JP
1097 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1098 e1000_read_eeprom(hw,
1da177e4
LT
1099 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1100 break;
1101 }
1102 /* Fall Through */
1103 default:
1dc32918 1104 e1000_read_eeprom(hw,
1da177e4
LT
1105 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1106 break;
1107 }
96838a40 1108 if (eeprom_data & eeprom_apme_mask)
120cd576
JB
1109 adapter->eeprom_wol |= E1000_WUFC_MAG;
1110
1111 /* now that we have the eeprom settings, apply the special cases
1112 * where the eeprom may be wrong or the board simply won't support
1113 * wake on lan on a particular port */
1114 switch (pdev->device) {
1115 case E1000_DEV_ID_82546GB_PCIE:
1116 adapter->eeprom_wol = 0;
1117 break;
1118 case E1000_DEV_ID_82546EB_FIBER:
1119 case E1000_DEV_ID_82546GB_FIBER:
1120 case E1000_DEV_ID_82571EB_FIBER:
1121 /* Wake events only supported on port A for dual fiber
1122 * regardless of eeprom setting */
1dc32918 1123 if (er32(STATUS) & E1000_STATUS_FUNC_1)
120cd576
JB
1124 adapter->eeprom_wol = 0;
1125 break;
1126 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
5881cde8 1127 case E1000_DEV_ID_82571EB_QUAD_COPPER:
ce57a02c 1128 case E1000_DEV_ID_82571EB_QUAD_FIBER:
fc2307d0 1129 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
f4ec7f98 1130 case E1000_DEV_ID_82571PT_QUAD_COPPER:
120cd576
JB
1131 /* if quad port adapter, disable WoL on all but port A */
1132 if (global_quad_port_a != 0)
1133 adapter->eeprom_wol = 0;
1134 else
1135 adapter->quad_port_a = 1;
1136 /* Reset for multiple quad port adapters */
1137 if (++global_quad_port_a == 4)
1138 global_quad_port_a = 0;
1139 break;
1140 }
1141
1142 /* initialize the wol settings based on the eeprom settings */
1143 adapter->wol = adapter->eeprom_wol;
1da177e4 1144
fb3d47d4 1145 /* print bus type/speed/width info */
fb3d47d4
JK
1146 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
1147 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
1148 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
1149 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1150 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1151 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
1152 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
1153 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
1154 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
1155 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
1156 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
1157 "32-bit"));
fb3d47d4 1158
0795af57 1159 printk("%s\n", print_mac(mac, netdev->dev_addr));
fb3d47d4 1160
1dc32918 1161 if (hw->bus_type == e1000_bus_type_pci_express) {
14782ca8
AK
1162 DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
1163 "longer be supported by this driver in the future.\n",
1164 pdev->vendor, pdev->device);
1165 DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
1166 "driver instead.\n");
1167 }
1168
1da177e4
LT
1169 /* reset the hardware with the new settings */
1170 e1000_reset(adapter);
1171
b55ccb35
JK
1172 /* If the controller is 82573 and f/w is AMT, do not set
1173 * DRV_LOAD until the interface is up. For all other cases,
1174 * let the f/w know that the h/w is now under the control
1175 * of the driver. */
1dc32918
JP
1176 if (hw->mac_type != e1000_82573 ||
1177 !e1000_check_mng_mode(hw))
b55ccb35 1178 e1000_get_hw_control(adapter);
2d7edb92 1179
1314bbf3
AK
1180 /* tell the stack to leave us alone until e1000_open() is called */
1181 netif_carrier_off(netdev);
1182 netif_stop_queue(netdev);
416b5d10
AK
1183
1184 strcpy(netdev->name, "eth%d");
1185 if ((err = register_netdev(netdev)))
1186 goto err_register;
1314bbf3 1187
1da177e4
LT
1188 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
1189
1190 cards_found++;
1191 return 0;
1192
1193err_register:
6dd62ab0
VA
1194 e1000_release_hw_control(adapter);
1195err_eeprom:
1dc32918
JP
1196 if (!e1000_check_phy_reset_block(hw))
1197 e1000_phy_hw_reset(hw);
6dd62ab0 1198
1dc32918
JP
1199 if (hw->flash_address)
1200 iounmap(hw->flash_address);
cd94dd0b 1201err_flashmap:
6dd62ab0
VA
1202#ifdef CONFIG_E1000_NAPI
1203 for (i = 0; i < adapter->num_rx_queues; i++)
1204 dev_put(&adapter->polling_netdev[i]);
1205#endif
1206
1207 kfree(adapter->tx_ring);
1208 kfree(adapter->rx_ring);
1209#ifdef CONFIG_E1000_NAPI
1210 kfree(adapter->polling_netdev);
1211#endif
1da177e4 1212err_sw_init:
1dc32918 1213 iounmap(hw->hw_addr);
1da177e4
LT
1214err_ioremap:
1215 free_netdev(netdev);
1216err_alloc_etherdev:
1217 pci_release_regions(pdev);
6dd62ab0
VA
1218err_pci_reg:
1219err_dma:
1220 pci_disable_device(pdev);
1da177e4
LT
1221 return err;
1222}
1223
1224/**
1225 * e1000_remove - Device Removal Routine
1226 * @pdev: PCI device information struct
1227 *
1228 * e1000_remove is called by the PCI subsystem to alert the driver
1229 * that it should release a PCI device. The could be caused by a
1230 * Hot-Plug event, or because the driver is going to be removed from
1231 * memory.
1232 **/
1233
64798845 1234static void __devexit e1000_remove(struct pci_dev *pdev)
1da177e4
LT
1235{
1236 struct net_device *netdev = pci_get_drvdata(pdev);
60490fe0 1237 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 1238 struct e1000_hw *hw = &adapter->hw;
581d708e
MC
1239#ifdef CONFIG_E1000_NAPI
1240 int i;
1241#endif
1da177e4 1242
28e53bdd 1243 cancel_work_sync(&adapter->reset_task);
be2b28ed 1244
0fccd0e9 1245 e1000_release_manageability(adapter);
1da177e4 1246
b55ccb35
JK
1247 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1248 * would have already happened in close and is redundant. */
1249 e1000_release_hw_control(adapter);
2d7edb92 1250
581d708e 1251#ifdef CONFIG_E1000_NAPI
f56799ea 1252 for (i = 0; i < adapter->num_rx_queues; i++)
15333061 1253 dev_put(&adapter->polling_netdev[i]);
581d708e 1254#endif
1da177e4 1255
bea3348e
SH
1256 unregister_netdev(netdev);
1257
1dc32918
JP
1258 if (!e1000_check_phy_reset_block(hw))
1259 e1000_phy_hw_reset(hw);
1da177e4 1260
24025e4e
MC
1261 kfree(adapter->tx_ring);
1262 kfree(adapter->rx_ring);
1263#ifdef CONFIG_E1000_NAPI
1264 kfree(adapter->polling_netdev);
1265#endif
1266
1dc32918
JP
1267 iounmap(hw->hw_addr);
1268 if (hw->flash_address)
1269 iounmap(hw->flash_address);
1da177e4
LT
1270 pci_release_regions(pdev);
1271
1272 free_netdev(netdev);
1273
1274 pci_disable_device(pdev);
1275}
1276
1277/**
1278 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1279 * @adapter: board private structure to initialize
1280 *
1281 * e1000_sw_init initializes the Adapter private data structure.
1282 * Fields are initialized based on PCI device information and
1283 * OS network device settings (MTU size).
1284 **/
1285
64798845 1286static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1da177e4
LT
1287{
1288 struct e1000_hw *hw = &adapter->hw;
1289 struct net_device *netdev = adapter->netdev;
1290 struct pci_dev *pdev = adapter->pdev;
581d708e
MC
1291#ifdef CONFIG_E1000_NAPI
1292 int i;
1293#endif
1da177e4
LT
1294
1295 /* PCI config space info */
1296
1297 hw->vendor_id = pdev->vendor;
1298 hw->device_id = pdev->device;
1299 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1300 hw->subsystem_id = pdev->subsystem_device;
44c10138 1301 hw->revision_id = pdev->revision;
1da177e4
LT
1302
1303 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1304
eb0f8054 1305 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
9e2feace 1306 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
1da177e4
LT
1307 hw->max_frame_size = netdev->mtu +
1308 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1309 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
1310
1311 /* identify the MAC */
1312
96838a40 1313 if (e1000_set_mac_type(hw)) {
1da177e4
LT
1314 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
1315 return -EIO;
1316 }
1317
96838a40 1318 switch (hw->mac_type) {
1da177e4
LT
1319 default:
1320 break;
1321 case e1000_82541:
1322 case e1000_82547:
1323 case e1000_82541_rev_2:
1324 case e1000_82547_rev_2:
1325 hw->phy_init_script = 1;
1326 break;
1327 }
1328
1329 e1000_set_media_type(hw);
1330
c3033b01
JP
1331 hw->wait_autoneg_complete = false;
1332 hw->tbi_compatibility_en = true;
1333 hw->adaptive_ifs = true;
1da177e4
LT
1334
1335 /* Copper options */
1336
96838a40 1337 if (hw->media_type == e1000_media_type_copper) {
1da177e4 1338 hw->mdix = AUTO_ALL_MODES;
c3033b01 1339 hw->disable_polarity_correction = false;
1da177e4
LT
1340 hw->master_slave = E1000_MASTER_SLAVE;
1341 }
1342
f56799ea
JK
1343 adapter->num_tx_queues = 1;
1344 adapter->num_rx_queues = 1;
581d708e
MC
1345
1346 if (e1000_alloc_queues(adapter)) {
1347 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
1348 return -ENOMEM;
1349 }
1350
1351#ifdef CONFIG_E1000_NAPI
f56799ea 1352 for (i = 0; i < adapter->num_rx_queues; i++) {
581d708e 1353 adapter->polling_netdev[i].priv = adapter;
581d708e
MC
1354 dev_hold(&adapter->polling_netdev[i]);
1355 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
1356 }
7bfa4816 1357 spin_lock_init(&adapter->tx_queue_lock);
24025e4e
MC
1358#endif
1359
47313054 1360 /* Explicitly disable IRQ since the NIC can be in any state. */
47313054
HX
1361 e1000_irq_disable(adapter);
1362
1da177e4 1363 spin_lock_init(&adapter->stats_lock);
1da177e4 1364
1314bbf3
AK
1365 set_bit(__E1000_DOWN, &adapter->flags);
1366
1da177e4
LT
1367 return 0;
1368}
1369
581d708e
MC
1370/**
1371 * e1000_alloc_queues - Allocate memory for all rings
1372 * @adapter: board private structure to initialize
1373 *
1374 * We allocate one ring per queue at run-time since we don't know the
1375 * number of queues at compile-time. The polling_netdev array is
1376 * intended for Multiqueue, but should work fine with a single queue.
1377 **/
1378
64798845 1379static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
581d708e 1380{
1c7e5b12
YB
1381 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1382 sizeof(struct e1000_tx_ring), GFP_KERNEL);
581d708e
MC
1383 if (!adapter->tx_ring)
1384 return -ENOMEM;
581d708e 1385
1c7e5b12
YB
1386 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1387 sizeof(struct e1000_rx_ring), GFP_KERNEL);
581d708e
MC
1388 if (!adapter->rx_ring) {
1389 kfree(adapter->tx_ring);
1390 return -ENOMEM;
1391 }
581d708e
MC
1392
1393#ifdef CONFIG_E1000_NAPI
1c7e5b12
YB
1394 adapter->polling_netdev = kcalloc(adapter->num_rx_queues,
1395 sizeof(struct net_device),
1396 GFP_KERNEL);
581d708e
MC
1397 if (!adapter->polling_netdev) {
1398 kfree(adapter->tx_ring);
1399 kfree(adapter->rx_ring);
1400 return -ENOMEM;
1401 }
581d708e
MC
1402#endif
1403
1404 return E1000_SUCCESS;
1405}
1406
1da177e4
LT
1407/**
1408 * e1000_open - Called when a network interface is made active
1409 * @netdev: network interface device structure
1410 *
1411 * Returns 0 on success, negative value on failure
1412 *
1413 * The open entry point is called when a network interface is made
1414 * active by the system (IFF_UP). At this point all resources needed
1415 * for transmit and receive operations are allocated, the interrupt
1416 * handler is registered with the OS, the watchdog timer is started,
1417 * and the stack is notified that the interface is ready.
1418 **/
1419
64798845 1420static int e1000_open(struct net_device *netdev)
1da177e4 1421{
60490fe0 1422 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 1423 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
1424 int err;
1425
2db10a08 1426 /* disallow open during test */
1314bbf3 1427 if (test_bit(__E1000_TESTING, &adapter->flags))
2db10a08
AK
1428 return -EBUSY;
1429
1da177e4 1430 /* allocate transmit descriptors */
e0aac5a2
AK
1431 err = e1000_setup_all_tx_resources(adapter);
1432 if (err)
1da177e4
LT
1433 goto err_setup_tx;
1434
1435 /* allocate receive descriptors */
e0aac5a2 1436 err = e1000_setup_all_rx_resources(adapter);
b5bf28cd 1437 if (err)
e0aac5a2 1438 goto err_setup_rx;
b5bf28cd 1439
79f05bf0
AK
1440 e1000_power_up_phy(adapter);
1441
2d7edb92 1442 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1dc32918 1443 if ((hw->mng_cookie.status &
2d7edb92
MC
1444 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1445 e1000_update_mng_vlan(adapter);
1446 }
1da177e4 1447
b55ccb35
JK
1448 /* If AMT is enabled, let the firmware know that the network
1449 * interface is now open */
1dc32918
JP
1450 if (hw->mac_type == e1000_82573 &&
1451 e1000_check_mng_mode(hw))
b55ccb35
JK
1452 e1000_get_hw_control(adapter);
1453
e0aac5a2
AK
1454 /* before we allocate an interrupt, we must be ready to handle it.
1455 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1456 * as soon as we call pci_request_irq, so we have to setup our
1457 * clean_rx handler before we do so. */
1458 e1000_configure(adapter);
1459
1460 err = e1000_request_irq(adapter);
1461 if (err)
1462 goto err_req_irq;
1463
1464 /* From here on the code is the same as e1000_up() */
1465 clear_bit(__E1000_DOWN, &adapter->flags);
1466
47313054 1467#ifdef CONFIG_E1000_NAPI
bea3348e 1468 napi_enable(&adapter->napi);
47313054
HX
1469#endif
1470
e0aac5a2
AK
1471 e1000_irq_enable(adapter);
1472
076152d5
BH
1473 netif_start_queue(netdev);
1474
e0aac5a2 1475 /* fire a link status change interrupt to start the watchdog */
1dc32918 1476 ew32(ICS, E1000_ICS_LSC);
e0aac5a2 1477
1da177e4
LT
1478 return E1000_SUCCESS;
1479
b5bf28cd 1480err_req_irq:
e0aac5a2
AK
1481 e1000_release_hw_control(adapter);
1482 e1000_power_down_phy(adapter);
581d708e 1483 e1000_free_all_rx_resources(adapter);
1da177e4 1484err_setup_rx:
581d708e 1485 e1000_free_all_tx_resources(adapter);
1da177e4
LT
1486err_setup_tx:
1487 e1000_reset(adapter);
1488
1489 return err;
1490}
1491
1492/**
1493 * e1000_close - Disables a network interface
1494 * @netdev: network interface device structure
1495 *
1496 * Returns 0, this is not allowed to fail
1497 *
1498 * The close entry point is called when an interface is de-activated
1499 * by the OS. The hardware is still under the drivers control, but
1500 * needs to be disabled. A global MAC reset is issued to stop the
1501 * hardware, and all transmit and receive resources are freed.
1502 **/
1503
64798845 1504static int e1000_close(struct net_device *netdev)
1da177e4 1505{
60490fe0 1506 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 1507 struct e1000_hw *hw = &adapter->hw;
1da177e4 1508
2db10a08 1509 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1da177e4 1510 e1000_down(adapter);
79f05bf0 1511 e1000_power_down_phy(adapter);
2db10a08 1512 e1000_free_irq(adapter);
1da177e4 1513
581d708e
MC
1514 e1000_free_all_tx_resources(adapter);
1515 e1000_free_all_rx_resources(adapter);
1da177e4 1516
4666560a
BA
1517 /* kill manageability vlan ID if supported, but not if a vlan with
1518 * the same ID is registered on the host OS (let 8021q kill it) */
1dc32918 1519 if ((hw->mng_cookie.status &
4666560a
BA
1520 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1521 !(adapter->vlgrp &&
5c15bdec 1522 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
2d7edb92
MC
1523 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1524 }
b55ccb35
JK
1525
1526 /* If AMT is enabled, let the firmware know that the network
1527 * interface is now closed */
1dc32918
JP
1528 if (hw->mac_type == e1000_82573 &&
1529 e1000_check_mng_mode(hw))
b55ccb35
JK
1530 e1000_release_hw_control(adapter);
1531
1da177e4
LT
1532 return 0;
1533}
1534
1535/**
1536 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1537 * @adapter: address of board private structure
2d7edb92
MC
1538 * @start: address of beginning of memory
1539 * @len: length of memory
1da177e4 1540 **/
64798845
JP
1541static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1542 unsigned long len)
1da177e4 1543{
1dc32918 1544 struct e1000_hw *hw = &adapter->hw;
e982f17c 1545 unsigned long begin = (unsigned long)start;
1da177e4
LT
1546 unsigned long end = begin + len;
1547
2648345f
MC
1548 /* First rev 82545 and 82546 need to not allow any memory
1549 * write location to cross 64k boundary due to errata 23 */
1dc32918
JP
1550 if (hw->mac_type == e1000_82545 ||
1551 hw->mac_type == e1000_82546) {
c3033b01 1552 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1da177e4
LT
1553 }
1554
c3033b01 1555 return true;
1da177e4
LT
1556}
1557
1558/**
1559 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1560 * @adapter: board private structure
581d708e 1561 * @txdr: tx descriptor ring (for a specific queue) to setup
1da177e4
LT
1562 *
1563 * Return 0 on success, negative on failure
1564 **/
1565
64798845
JP
1566static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1567 struct e1000_tx_ring *txdr)
1da177e4 1568{
1da177e4
LT
1569 struct pci_dev *pdev = adapter->pdev;
1570 int size;
1571
1572 size = sizeof(struct e1000_buffer) * txdr->count;
cd94dd0b 1573 txdr->buffer_info = vmalloc(size);
96838a40 1574 if (!txdr->buffer_info) {
2648345f
MC
1575 DPRINTK(PROBE, ERR,
1576 "Unable to allocate memory for the transmit descriptor ring\n");
1da177e4
LT
1577 return -ENOMEM;
1578 }
1579 memset(txdr->buffer_info, 0, size);
1580
1581 /* round up to nearest 4K */
1582
1583 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
9099cfb9 1584 txdr->size = ALIGN(txdr->size, 4096);
1da177e4
LT
1585
1586 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
96838a40 1587 if (!txdr->desc) {
1da177e4 1588setup_tx_desc_die:
1da177e4 1589 vfree(txdr->buffer_info);
2648345f
MC
1590 DPRINTK(PROBE, ERR,
1591 "Unable to allocate memory for the transmit descriptor ring\n");
1da177e4
LT
1592 return -ENOMEM;
1593 }
1594
2648345f 1595 /* Fix for errata 23, can't cross 64kB boundary */
1da177e4
LT
1596 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1597 void *olddesc = txdr->desc;
1598 dma_addr_t olddma = txdr->dma;
2648345f
MC
1599 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
1600 "at %p\n", txdr->size, txdr->desc);
1601 /* Try again, without freeing the previous */
1da177e4 1602 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
2648345f 1603 /* Failed allocation, critical failure */
96838a40 1604 if (!txdr->desc) {
1da177e4
LT
1605 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1606 goto setup_tx_desc_die;
1607 }
1608
1609 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1610 /* give up */
2648345f
MC
1611 pci_free_consistent(pdev, txdr->size, txdr->desc,
1612 txdr->dma);
1da177e4
LT
1613 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1614 DPRINTK(PROBE, ERR,
2648345f
MC
1615 "Unable to allocate aligned memory "
1616 "for the transmit descriptor ring\n");
1da177e4
LT
1617 vfree(txdr->buffer_info);
1618 return -ENOMEM;
1619 } else {
2648345f 1620 /* Free old allocation, new allocation was successful */
1da177e4
LT
1621 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1622 }
1623 }
1624 memset(txdr->desc, 0, txdr->size);
1625
1626 txdr->next_to_use = 0;
1627 txdr->next_to_clean = 0;
2ae76d98 1628 spin_lock_init(&txdr->tx_lock);
1da177e4
LT
1629
1630 return 0;
1631}
1632
581d708e
MC
1633/**
1634 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1635 * (Descriptors) for all queues
1636 * @adapter: board private structure
1637 *
581d708e
MC
1638 * Return 0 on success, negative on failure
1639 **/
1640
64798845 1641int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
581d708e
MC
1642{
1643 int i, err = 0;
1644
f56799ea 1645 for (i = 0; i < adapter->num_tx_queues; i++) {
581d708e
MC
1646 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1647 if (err) {
1648 DPRINTK(PROBE, ERR,
1649 "Allocation for Tx Queue %u failed\n", i);
3fbbc72e
VA
1650 for (i-- ; i >= 0; i--)
1651 e1000_free_tx_resources(adapter,
1652 &adapter->tx_ring[i]);
581d708e
MC
1653 break;
1654 }
1655 }
1656
1657 return err;
1658}
1659
1da177e4
LT
1660/**
1661 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1662 * @adapter: board private structure
1663 *
1664 * Configure the Tx unit of the MAC after a reset.
1665 **/
1666
64798845 1667static void e1000_configure_tx(struct e1000_adapter *adapter)
1da177e4 1668{
406874a7 1669 u64 tdba;
581d708e 1670 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
1671 u32 tdlen, tctl, tipg, tarc;
1672 u32 ipgr1, ipgr2;
1da177e4
LT
1673
1674 /* Setup the HW Tx Head and Tail descriptor pointers */
1675
f56799ea 1676 switch (adapter->num_tx_queues) {
24025e4e
MC
1677 case 1:
1678 default:
581d708e
MC
1679 tdba = adapter->tx_ring[0].dma;
1680 tdlen = adapter->tx_ring[0].count *
1681 sizeof(struct e1000_tx_desc);
1dc32918
JP
1682 ew32(TDLEN, tdlen);
1683 ew32(TDBAH, (tdba >> 32));
1684 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1685 ew32(TDT, 0);
1686 ew32(TDH, 0);
6a951698
AK
1687 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1688 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
24025e4e
MC
1689 break;
1690 }
1da177e4
LT
1691
1692 /* Set the default values for the Tx Inter Packet Gap timer */
1dc32918 1693 if (hw->mac_type <= e1000_82547_rev_2 &&
d89b6c67
JB
1694 (hw->media_type == e1000_media_type_fiber ||
1695 hw->media_type == e1000_media_type_internal_serdes))
0fadb059
JK
1696 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1697 else
1698 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1699
581d708e 1700 switch (hw->mac_type) {
1da177e4
LT
1701 case e1000_82542_rev2_0:
1702 case e1000_82542_rev2_1:
1703 tipg = DEFAULT_82542_TIPG_IPGT;
0fadb059
JK
1704 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1705 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1da177e4 1706 break;
87041639
JK
1707 case e1000_80003es2lan:
1708 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1709 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
1710 break;
1da177e4 1711 default:
0fadb059
JK
1712 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1713 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1714 break;
1da177e4 1715 }
0fadb059
JK
1716 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1717 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1dc32918 1718 ew32(TIPG, tipg);
1da177e4
LT
1719
1720 /* Set the Tx Interrupt Delay register */
1721
1dc32918 1722 ew32(TIDV, adapter->tx_int_delay);
581d708e 1723 if (hw->mac_type >= e1000_82540)
1dc32918 1724 ew32(TADV, adapter->tx_abs_int_delay);
1da177e4
LT
1725
1726 /* Program the Transmit Control Register */
1727
1dc32918 1728 tctl = er32(TCTL);
1da177e4 1729 tctl &= ~E1000_TCTL_CT;
7e6c9861 1730 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1da177e4
LT
1731 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1732
2ae76d98 1733 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1dc32918 1734 tarc = er32(TARC0);
90fb5135
AK
1735 /* set the speed mode bit, we'll clear it if we're not at
1736 * gigabit link later */
09ae3e88 1737 tarc |= (1 << 21);
1dc32918 1738 ew32(TARC0, tarc);
87041639 1739 } else if (hw->mac_type == e1000_80003es2lan) {
1dc32918 1740 tarc = er32(TARC0);
87041639 1741 tarc |= 1;
1dc32918
JP
1742 ew32(TARC0, tarc);
1743 tarc = er32(TARC1);
87041639 1744 tarc |= 1;
1dc32918 1745 ew32(TARC1, tarc);
2ae76d98
MC
1746 }
1747
581d708e 1748 e1000_config_collision_dist(hw);
1da177e4
LT
1749
1750 /* Setup Transmit Descriptor Settings for eop descriptor */
6a042dab
JB
1751 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1752
1753 /* only set IDE if we are delaying interrupts using the timers */
1754 if (adapter->tx_int_delay)
1755 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1da177e4 1756
581d708e 1757 if (hw->mac_type < e1000_82543)
1da177e4
LT
1758 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1759 else
1760 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1761
1762 /* Cache if we're 82544 running in PCI-X because we'll
1763 * need this to apply a workaround later in the send path. */
581d708e
MC
1764 if (hw->mac_type == e1000_82544 &&
1765 hw->bus_type == e1000_bus_type_pcix)
1da177e4 1766 adapter->pcix_82544 = 1;
7e6c9861 1767
1dc32918 1768 ew32(TCTL, tctl);
7e6c9861 1769
1da177e4
LT
1770}
1771
1772/**
1773 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1774 * @adapter: board private structure
581d708e 1775 * @rxdr: rx descriptor ring (for a specific queue) to setup
1da177e4
LT
1776 *
1777 * Returns 0 on success, negative on failure
1778 **/
1779
64798845
JP
1780static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1781 struct e1000_rx_ring *rxdr)
1da177e4 1782{
1dc32918 1783 struct e1000_hw *hw = &adapter->hw;
1da177e4 1784 struct pci_dev *pdev = adapter->pdev;
2d7edb92 1785 int size, desc_len;
1da177e4
LT
1786
1787 size = sizeof(struct e1000_buffer) * rxdr->count;
cd94dd0b 1788 rxdr->buffer_info = vmalloc(size);
581d708e 1789 if (!rxdr->buffer_info) {
2648345f
MC
1790 DPRINTK(PROBE, ERR,
1791 "Unable to allocate memory for the receive descriptor ring\n");
1da177e4
LT
1792 return -ENOMEM;
1793 }
1794 memset(rxdr->buffer_info, 0, size);
1795
1c7e5b12
YB
1796 rxdr->ps_page = kcalloc(rxdr->count, sizeof(struct e1000_ps_page),
1797 GFP_KERNEL);
96838a40 1798 if (!rxdr->ps_page) {
2d7edb92
MC
1799 vfree(rxdr->buffer_info);
1800 DPRINTK(PROBE, ERR,
1801 "Unable to allocate memory for the receive descriptor ring\n");
1802 return -ENOMEM;
1803 }
2d7edb92 1804
1c7e5b12
YB
1805 rxdr->ps_page_dma = kcalloc(rxdr->count,
1806 sizeof(struct e1000_ps_page_dma),
1807 GFP_KERNEL);
96838a40 1808 if (!rxdr->ps_page_dma) {
2d7edb92
MC
1809 vfree(rxdr->buffer_info);
1810 kfree(rxdr->ps_page);
1811 DPRINTK(PROBE, ERR,
1812 "Unable to allocate memory for the receive descriptor ring\n");
1813 return -ENOMEM;
1814 }
2d7edb92 1815
1dc32918 1816 if (hw->mac_type <= e1000_82547_rev_2)
2d7edb92
MC
1817 desc_len = sizeof(struct e1000_rx_desc);
1818 else
1819 desc_len = sizeof(union e1000_rx_desc_packet_split);
1820
1da177e4
LT
1821 /* Round up to nearest 4K */
1822
2d7edb92 1823 rxdr->size = rxdr->count * desc_len;
9099cfb9 1824 rxdr->size = ALIGN(rxdr->size, 4096);
1da177e4
LT
1825
1826 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1827
581d708e
MC
1828 if (!rxdr->desc) {
1829 DPRINTK(PROBE, ERR,
1830 "Unable to allocate memory for the receive descriptor ring\n");
1da177e4 1831setup_rx_desc_die:
1da177e4 1832 vfree(rxdr->buffer_info);
2d7edb92
MC
1833 kfree(rxdr->ps_page);
1834 kfree(rxdr->ps_page_dma);
1da177e4
LT
1835 return -ENOMEM;
1836 }
1837
2648345f 1838 /* Fix for errata 23, can't cross 64kB boundary */
1da177e4
LT
1839 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1840 void *olddesc = rxdr->desc;
1841 dma_addr_t olddma = rxdr->dma;
2648345f
MC
1842 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
1843 "at %p\n", rxdr->size, rxdr->desc);
1844 /* Try again, without freeing the previous */
1da177e4 1845 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
2648345f 1846 /* Failed allocation, critical failure */
581d708e 1847 if (!rxdr->desc) {
1da177e4 1848 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
581d708e
MC
1849 DPRINTK(PROBE, ERR,
1850 "Unable to allocate memory "
1851 "for the receive descriptor ring\n");
1da177e4
LT
1852 goto setup_rx_desc_die;
1853 }
1854
1855 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1856 /* give up */
2648345f
MC
1857 pci_free_consistent(pdev, rxdr->size, rxdr->desc,
1858 rxdr->dma);
1da177e4 1859 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
2648345f
MC
1860 DPRINTK(PROBE, ERR,
1861 "Unable to allocate aligned memory "
1862 "for the receive descriptor ring\n");
581d708e 1863 goto setup_rx_desc_die;
1da177e4 1864 } else {
2648345f 1865 /* Free old allocation, new allocation was successful */
1da177e4
LT
1866 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1867 }
1868 }
1869 memset(rxdr->desc, 0, rxdr->size);
1870
1871 rxdr->next_to_clean = 0;
1872 rxdr->next_to_use = 0;
1873
1874 return 0;
1875}
1876
581d708e
MC
1877/**
1878 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1879 * (Descriptors) for all queues
1880 * @adapter: board private structure
1881 *
581d708e
MC
1882 * Return 0 on success, negative on failure
1883 **/
1884
64798845 1885int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
581d708e
MC
1886{
1887 int i, err = 0;
1888
f56799ea 1889 for (i = 0; i < adapter->num_rx_queues; i++) {
581d708e
MC
1890 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1891 if (err) {
1892 DPRINTK(PROBE, ERR,
1893 "Allocation for Rx Queue %u failed\n", i);
3fbbc72e
VA
1894 for (i-- ; i >= 0; i--)
1895 e1000_free_rx_resources(adapter,
1896 &adapter->rx_ring[i]);
581d708e
MC
1897 break;
1898 }
1899 }
1900
1901 return err;
1902}
1903
1da177e4 1904/**
2648345f 1905 * e1000_setup_rctl - configure the receive control registers
1da177e4
LT
1906 * @adapter: Board private structure
1907 **/
e4c811c9
MC
1908#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1909 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
64798845 1910static void e1000_setup_rctl(struct e1000_adapter *adapter)
1da177e4 1911{
1dc32918 1912 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
1913 u32 rctl, rfctl;
1914 u32 psrctl = 0;
35ec56bb 1915#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
406874a7 1916 u32 pages = 0;
e4c811c9 1917#endif
1da177e4 1918
1dc32918 1919 rctl = er32(RCTL);
1da177e4
LT
1920
1921 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1922
1923 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1924 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1dc32918 1925 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1da177e4 1926
1dc32918 1927 if (hw->tbi_compatibility_on == 1)
1da177e4
LT
1928 rctl |= E1000_RCTL_SBP;
1929 else
1930 rctl &= ~E1000_RCTL_SBP;
1931
2d7edb92
MC
1932 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1933 rctl &= ~E1000_RCTL_LPE;
1934 else
1935 rctl |= E1000_RCTL_LPE;
1936
1da177e4 1937 /* Setup buffer sizes */
9e2feace
AK
1938 rctl &= ~E1000_RCTL_SZ_4096;
1939 rctl |= E1000_RCTL_BSEX;
1940 switch (adapter->rx_buffer_len) {
1941 case E1000_RXBUFFER_256:
1942 rctl |= E1000_RCTL_SZ_256;
1943 rctl &= ~E1000_RCTL_BSEX;
1944 break;
1945 case E1000_RXBUFFER_512:
1946 rctl |= E1000_RCTL_SZ_512;
1947 rctl &= ~E1000_RCTL_BSEX;
1948 break;
1949 case E1000_RXBUFFER_1024:
1950 rctl |= E1000_RCTL_SZ_1024;
1951 rctl &= ~E1000_RCTL_BSEX;
1952 break;
a1415ee6
JK
1953 case E1000_RXBUFFER_2048:
1954 default:
1955 rctl |= E1000_RCTL_SZ_2048;
1956 rctl &= ~E1000_RCTL_BSEX;
1957 break;
1958 case E1000_RXBUFFER_4096:
1959 rctl |= E1000_RCTL_SZ_4096;
1960 break;
1961 case E1000_RXBUFFER_8192:
1962 rctl |= E1000_RCTL_SZ_8192;
1963 break;
1964 case E1000_RXBUFFER_16384:
1965 rctl |= E1000_RCTL_SZ_16384;
1966 break;
2d7edb92
MC
1967 }
1968
35ec56bb 1969#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
2d7edb92
MC
1970 /* 82571 and greater support packet-split where the protocol
1971 * header is placed in skb->data and the packet data is
1972 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1973 * In the case of a non-split, skb->data is linearly filled,
1974 * followed by the page buffers. Therefore, skb->data is
1975 * sized to hold the largest protocol header.
1976 */
e64d7d02
JB
1977 /* allocations using alloc_page take too long for regular MTU
1978 * so only enable packet split for jumbo frames */
e4c811c9 1979 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1dc32918 1980 if ((hw->mac_type >= e1000_82571) && (pages <= 3) &&
e64d7d02 1981 PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
e4c811c9
MC
1982 adapter->rx_ps_pages = pages;
1983 else
1984 adapter->rx_ps_pages = 0;
2d7edb92 1985#endif
e4c811c9 1986 if (adapter->rx_ps_pages) {
2d7edb92 1987 /* Configure extra packet-split registers */
1dc32918 1988 rfctl = er32(RFCTL);
2d7edb92 1989 rfctl |= E1000_RFCTL_EXTEN;
87ca4e5b
AK
1990 /* disable packet split support for IPv6 extension headers,
1991 * because some malformed IPv6 headers can hang the RX */
1992 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
1993 E1000_RFCTL_NEW_IPV6_EXT_DIS);
1994
1dc32918 1995 ew32(RFCTL, rfctl);
2d7edb92 1996
7dfee0cb 1997 rctl |= E1000_RCTL_DTYP_PS;
96838a40 1998
2d7edb92
MC
1999 psrctl |= adapter->rx_ps_bsize0 >>
2000 E1000_PSRCTL_BSIZE0_SHIFT;
e4c811c9
MC
2001
2002 switch (adapter->rx_ps_pages) {
2003 case 3:
2004 psrctl |= PAGE_SIZE <<
2005 E1000_PSRCTL_BSIZE3_SHIFT;
2006 case 2:
2007 psrctl |= PAGE_SIZE <<
2008 E1000_PSRCTL_BSIZE2_SHIFT;
2009 case 1:
2010 psrctl |= PAGE_SIZE >>
2011 E1000_PSRCTL_BSIZE1_SHIFT;
2012 break;
2013 }
2d7edb92 2014
1dc32918 2015 ew32(PSRCTL, psrctl);
1da177e4
LT
2016 }
2017
1dc32918 2018 ew32(RCTL, rctl);
1da177e4
LT
2019}
2020
2021/**
2022 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
2023 * @adapter: board private structure
2024 *
2025 * Configure the Rx unit of the MAC after a reset.
2026 **/
2027
64798845 2028static void e1000_configure_rx(struct e1000_adapter *adapter)
1da177e4 2029{
406874a7 2030 u64 rdba;
581d708e 2031 struct e1000_hw *hw = &adapter->hw;
406874a7 2032 u32 rdlen, rctl, rxcsum, ctrl_ext;
2d7edb92 2033
e4c811c9 2034 if (adapter->rx_ps_pages) {
0f15a8fa 2035 /* this is a 32 byte descriptor */
581d708e 2036 rdlen = adapter->rx_ring[0].count *
2d7edb92
MC
2037 sizeof(union e1000_rx_desc_packet_split);
2038 adapter->clean_rx = e1000_clean_rx_irq_ps;
2039 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2040 } else {
581d708e
MC
2041 rdlen = adapter->rx_ring[0].count *
2042 sizeof(struct e1000_rx_desc);
2d7edb92
MC
2043 adapter->clean_rx = e1000_clean_rx_irq;
2044 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2045 }
1da177e4
LT
2046
2047 /* disable receives while setting up the descriptors */
1dc32918
JP
2048 rctl = er32(RCTL);
2049 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1da177e4
LT
2050
2051 /* set the Receive Delay Timer Register */
1dc32918 2052 ew32(RDTR, adapter->rx_int_delay);
1da177e4 2053
581d708e 2054 if (hw->mac_type >= e1000_82540) {
1dc32918 2055 ew32(RADV, adapter->rx_abs_int_delay);
835bb129 2056 if (adapter->itr_setting != 0)
1dc32918 2057 ew32(ITR, 1000000000 / (adapter->itr * 256));
1da177e4
LT
2058 }
2059
2ae76d98 2060 if (hw->mac_type >= e1000_82571) {
1dc32918 2061 ctrl_ext = er32(CTRL_EXT);
1e613fd9 2062 /* Reset delay timers after every interrupt */
6fc7a7ec 2063 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
1e613fd9 2064#ifdef CONFIG_E1000_NAPI
835bb129 2065 /* Auto-Mask interrupts upon ICR access */
1e613fd9 2066 ctrl_ext |= E1000_CTRL_EXT_IAME;
1dc32918 2067 ew32(IAM, 0xffffffff);
1e613fd9 2068#endif
1dc32918
JP
2069 ew32(CTRL_EXT, ctrl_ext);
2070 E1000_WRITE_FLUSH();
2ae76d98
MC
2071 }
2072
581d708e
MC
2073 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2074 * the Base and Length of the Rx Descriptor Ring */
f56799ea 2075 switch (adapter->num_rx_queues) {
24025e4e
MC
2076 case 1:
2077 default:
581d708e 2078 rdba = adapter->rx_ring[0].dma;
1dc32918
JP
2079 ew32(RDLEN, rdlen);
2080 ew32(RDBAH, (rdba >> 32));
2081 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
2082 ew32(RDT, 0);
2083 ew32(RDH, 0);
6a951698
AK
2084 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
2085 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
581d708e 2086 break;
24025e4e
MC
2087 }
2088
1da177e4 2089 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
581d708e 2090 if (hw->mac_type >= e1000_82543) {
1dc32918 2091 rxcsum = er32(RXCSUM);
c3033b01 2092 if (adapter->rx_csum) {
2d7edb92
MC
2093 rxcsum |= E1000_RXCSUM_TUOFL;
2094
868d5309 2095 /* Enable 82571 IPv4 payload checksum for UDP fragments
2d7edb92 2096 * Must be used in conjunction with packet-split. */
96838a40
JB
2097 if ((hw->mac_type >= e1000_82571) &&
2098 (adapter->rx_ps_pages)) {
2d7edb92
MC
2099 rxcsum |= E1000_RXCSUM_IPPCSE;
2100 }
2101 } else {
2102 rxcsum &= ~E1000_RXCSUM_TUOFL;
2103 /* don't need to clear IPPCSE as it defaults to 0 */
2104 }
1dc32918 2105 ew32(RXCSUM, rxcsum);
1da177e4
LT
2106 }
2107
21c4d5e0
AK
2108 /* enable early receives on 82573, only takes effect if using > 2048
2109 * byte total frame size. for example only for jumbo frames */
2110#define E1000_ERT_2048 0x100
2111 if (hw->mac_type == e1000_82573)
1dc32918 2112 ew32(ERT, E1000_ERT_2048);
21c4d5e0 2113
1da177e4 2114 /* Enable Receives */
1dc32918 2115 ew32(RCTL, rctl);
1da177e4
LT
2116}
2117
2118/**
581d708e 2119 * e1000_free_tx_resources - Free Tx Resources per Queue
1da177e4 2120 * @adapter: board private structure
581d708e 2121 * @tx_ring: Tx descriptor ring for a specific queue
1da177e4
LT
2122 *
2123 * Free all transmit software resources
2124 **/
2125
64798845
JP
2126static void e1000_free_tx_resources(struct e1000_adapter *adapter,
2127 struct e1000_tx_ring *tx_ring)
1da177e4
LT
2128{
2129 struct pci_dev *pdev = adapter->pdev;
2130
581d708e 2131 e1000_clean_tx_ring(adapter, tx_ring);
1da177e4 2132
581d708e
MC
2133 vfree(tx_ring->buffer_info);
2134 tx_ring->buffer_info = NULL;
1da177e4 2135
581d708e 2136 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1da177e4 2137
581d708e
MC
2138 tx_ring->desc = NULL;
2139}
2140
2141/**
2142 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
2143 * @adapter: board private structure
2144 *
2145 * Free all transmit software resources
2146 **/
2147
64798845 2148void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
581d708e
MC
2149{
2150 int i;
2151
f56799ea 2152 for (i = 0; i < adapter->num_tx_queues; i++)
581d708e 2153 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1da177e4
LT
2154}
2155
64798845
JP
2156static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
2157 struct e1000_buffer *buffer_info)
1da177e4 2158{
96838a40 2159 if (buffer_info->dma) {
2648345f
MC
2160 pci_unmap_page(adapter->pdev,
2161 buffer_info->dma,
2162 buffer_info->length,
2163 PCI_DMA_TODEVICE);
a9ebadd6 2164 buffer_info->dma = 0;
1da177e4 2165 }
a9ebadd6 2166 if (buffer_info->skb) {
1da177e4 2167 dev_kfree_skb_any(buffer_info->skb);
a9ebadd6
JB
2168 buffer_info->skb = NULL;
2169 }
2170 /* buffer_info must be completely set up in the transmit path */
1da177e4
LT
2171}
2172
2173/**
2174 * e1000_clean_tx_ring - Free Tx Buffers
2175 * @adapter: board private structure
581d708e 2176 * @tx_ring: ring to be cleaned
1da177e4
LT
2177 **/
2178
64798845
JP
2179static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2180 struct e1000_tx_ring *tx_ring)
1da177e4 2181{
1dc32918 2182 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
2183 struct e1000_buffer *buffer_info;
2184 unsigned long size;
2185 unsigned int i;
2186
2187 /* Free all the Tx ring sk_buffs */
2188
96838a40 2189 for (i = 0; i < tx_ring->count; i++) {
1da177e4
LT
2190 buffer_info = &tx_ring->buffer_info[i];
2191 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2192 }
2193
2194 size = sizeof(struct e1000_buffer) * tx_ring->count;
2195 memset(tx_ring->buffer_info, 0, size);
2196
2197 /* Zero out the descriptor ring */
2198
2199 memset(tx_ring->desc, 0, tx_ring->size);
2200
2201 tx_ring->next_to_use = 0;
2202 tx_ring->next_to_clean = 0;
fd803241 2203 tx_ring->last_tx_tso = 0;
1da177e4 2204
1dc32918
JP
2205 writel(0, hw->hw_addr + tx_ring->tdh);
2206 writel(0, hw->hw_addr + tx_ring->tdt);
581d708e
MC
2207}
2208
2209/**
2210 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2211 * @adapter: board private structure
2212 **/
2213
64798845 2214static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
581d708e
MC
2215{
2216 int i;
2217
f56799ea 2218 for (i = 0; i < adapter->num_tx_queues; i++)
581d708e 2219 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1da177e4
LT
2220}
2221
2222/**
2223 * e1000_free_rx_resources - Free Rx Resources
2224 * @adapter: board private structure
581d708e 2225 * @rx_ring: ring to clean the resources from
1da177e4
LT
2226 *
2227 * Free all receive software resources
2228 **/
2229
64798845
JP
2230static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2231 struct e1000_rx_ring *rx_ring)
1da177e4 2232{
1da177e4
LT
2233 struct pci_dev *pdev = adapter->pdev;
2234
581d708e 2235 e1000_clean_rx_ring(adapter, rx_ring);
1da177e4
LT
2236
2237 vfree(rx_ring->buffer_info);
2238 rx_ring->buffer_info = NULL;
2d7edb92
MC
2239 kfree(rx_ring->ps_page);
2240 rx_ring->ps_page = NULL;
2241 kfree(rx_ring->ps_page_dma);
2242 rx_ring->ps_page_dma = NULL;
1da177e4
LT
2243
2244 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2245
2246 rx_ring->desc = NULL;
2247}
2248
2249/**
581d708e 2250 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
1da177e4 2251 * @adapter: board private structure
581d708e
MC
2252 *
2253 * Free all receive software resources
2254 **/
2255
64798845 2256void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
581d708e
MC
2257{
2258 int i;
2259
f56799ea 2260 for (i = 0; i < adapter->num_rx_queues; i++)
581d708e
MC
2261 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2262}
2263
2264/**
2265 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2266 * @adapter: board private structure
2267 * @rx_ring: ring to free buffers from
1da177e4
LT
2268 **/
2269
64798845
JP
2270static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2271 struct e1000_rx_ring *rx_ring)
1da177e4 2272{
1dc32918 2273 struct e1000_hw *hw = &adapter->hw;
1da177e4 2274 struct e1000_buffer *buffer_info;
2d7edb92
MC
2275 struct e1000_ps_page *ps_page;
2276 struct e1000_ps_page_dma *ps_page_dma;
1da177e4
LT
2277 struct pci_dev *pdev = adapter->pdev;
2278 unsigned long size;
2d7edb92 2279 unsigned int i, j;
1da177e4
LT
2280
2281 /* Free all the Rx ring sk_buffs */
96838a40 2282 for (i = 0; i < rx_ring->count; i++) {
1da177e4 2283 buffer_info = &rx_ring->buffer_info[i];
96838a40 2284 if (buffer_info->skb) {
1da177e4
LT
2285 pci_unmap_single(pdev,
2286 buffer_info->dma,
2287 buffer_info->length,
2288 PCI_DMA_FROMDEVICE);
2289
2290 dev_kfree_skb(buffer_info->skb);
2291 buffer_info->skb = NULL;
997f5cbd
JK
2292 }
2293 ps_page = &rx_ring->ps_page[i];
2294 ps_page_dma = &rx_ring->ps_page_dma[i];
2295 for (j = 0; j < adapter->rx_ps_pages; j++) {
2296 if (!ps_page->ps_page[j]) break;
2297 pci_unmap_page(pdev,
2298 ps_page_dma->ps_page_dma[j],
2299 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2300 ps_page_dma->ps_page_dma[j] = 0;
2301 put_page(ps_page->ps_page[j]);
2302 ps_page->ps_page[j] = NULL;
1da177e4
LT
2303 }
2304 }
2305
2306 size = sizeof(struct e1000_buffer) * rx_ring->count;
2307 memset(rx_ring->buffer_info, 0, size);
2d7edb92
MC
2308 size = sizeof(struct e1000_ps_page) * rx_ring->count;
2309 memset(rx_ring->ps_page, 0, size);
2310 size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
2311 memset(rx_ring->ps_page_dma, 0, size);
1da177e4
LT
2312
2313 /* Zero out the descriptor ring */
2314
2315 memset(rx_ring->desc, 0, rx_ring->size);
2316
2317 rx_ring->next_to_clean = 0;
2318 rx_ring->next_to_use = 0;
2319
1dc32918
JP
2320 writel(0, hw->hw_addr + rx_ring->rdh);
2321 writel(0, hw->hw_addr + rx_ring->rdt);
581d708e
MC
2322}
2323
2324/**
2325 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2326 * @adapter: board private structure
2327 **/
2328
64798845 2329static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
581d708e
MC
2330{
2331 int i;
2332
f56799ea 2333 for (i = 0; i < adapter->num_rx_queues; i++)
581d708e 2334 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1da177e4
LT
2335}
2336
2337/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2338 * and memory write and invalidate disabled for certain operations
2339 */
64798845 2340static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
1da177e4 2341{
1dc32918 2342 struct e1000_hw *hw = &adapter->hw;
1da177e4 2343 struct net_device *netdev = adapter->netdev;
406874a7 2344 u32 rctl;
1da177e4 2345
1dc32918 2346 e1000_pci_clear_mwi(hw);
1da177e4 2347
1dc32918 2348 rctl = er32(RCTL);
1da177e4 2349 rctl |= E1000_RCTL_RST;
1dc32918
JP
2350 ew32(RCTL, rctl);
2351 E1000_WRITE_FLUSH();
1da177e4
LT
2352 mdelay(5);
2353
96838a40 2354 if (netif_running(netdev))
581d708e 2355 e1000_clean_all_rx_rings(adapter);
1da177e4
LT
2356}
2357
64798845 2358static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
1da177e4 2359{
1dc32918 2360 struct e1000_hw *hw = &adapter->hw;
1da177e4 2361 struct net_device *netdev = adapter->netdev;
406874a7 2362 u32 rctl;
1da177e4 2363
1dc32918 2364 rctl = er32(RCTL);
1da177e4 2365 rctl &= ~E1000_RCTL_RST;
1dc32918
JP
2366 ew32(RCTL, rctl);
2367 E1000_WRITE_FLUSH();
1da177e4
LT
2368 mdelay(5);
2369
1dc32918
JP
2370 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2371 e1000_pci_set_mwi(hw);
1da177e4 2372
96838a40 2373 if (netif_running(netdev)) {
72d64a43
JK
2374 /* No need to loop, because 82542 supports only 1 queue */
2375 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
7c4d3367 2376 e1000_configure_rx(adapter);
72d64a43 2377 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
1da177e4
LT
2378 }
2379}
2380
2381/**
2382 * e1000_set_mac - Change the Ethernet Address of the NIC
2383 * @netdev: network interface device structure
2384 * @p: pointer to an address structure
2385 *
2386 * Returns 0 on success, negative on failure
2387 **/
2388
64798845 2389static int e1000_set_mac(struct net_device *netdev, void *p)
1da177e4 2390{
60490fe0 2391 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 2392 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
2393 struct sockaddr *addr = p;
2394
96838a40 2395 if (!is_valid_ether_addr(addr->sa_data))
1da177e4
LT
2396 return -EADDRNOTAVAIL;
2397
2398 /* 82542 2.0 needs to be in reset to write receive address registers */
2399
1dc32918 2400 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4
LT
2401 e1000_enter_82542_rst(adapter);
2402
2403 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1dc32918 2404 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
1da177e4 2405
1dc32918 2406 e1000_rar_set(hw, hw->mac_addr, 0);
1da177e4 2407
868d5309
MC
2408 /* With 82571 controllers, LAA may be overwritten (with the default)
2409 * due to controller reset from the other port. */
1dc32918 2410 if (hw->mac_type == e1000_82571) {
868d5309 2411 /* activate the work around */
1dc32918 2412 hw->laa_is_present = 1;
868d5309 2413
96838a40
JB
2414 /* Hold a copy of the LAA in RAR[14] This is done so that
2415 * between the time RAR[0] gets clobbered and the time it
2416 * gets fixed (in e1000_watchdog), the actual LAA is in one
868d5309 2417 * of the RARs and no incoming packets directed to this port
96838a40 2418 * are dropped. Eventaully the LAA will be in RAR[0] and
868d5309 2419 * RAR[14] */
1dc32918 2420 e1000_rar_set(hw, hw->mac_addr,
868d5309
MC
2421 E1000_RAR_ENTRIES - 1);
2422 }
2423
1dc32918 2424 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4
LT
2425 e1000_leave_82542_rst(adapter);
2426
2427 return 0;
2428}
2429
2430/**
db0ce50d 2431 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
1da177e4
LT
2432 * @netdev: network interface device structure
2433 *
db0ce50d
PM
2434 * The set_rx_mode entry point is called whenever the unicast or multicast
2435 * address lists or the network interface flags are updated. This routine is
2436 * responsible for configuring the hardware for proper unicast, multicast,
1da177e4
LT
2437 * promiscuous mode, and all-multi behavior.
2438 **/
2439
64798845 2440static void e1000_set_rx_mode(struct net_device *netdev)
1da177e4 2441{
60490fe0 2442 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4 2443 struct e1000_hw *hw = &adapter->hw;
db0ce50d
PM
2444 struct dev_addr_list *uc_ptr;
2445 struct dev_addr_list *mc_ptr;
406874a7
JP
2446 u32 rctl;
2447 u32 hash_value;
868d5309 2448 int i, rar_entries = E1000_RAR_ENTRIES;
cd94dd0b
AK
2449 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
2450 E1000_NUM_MTA_REGISTERS_ICH8LAN :
2451 E1000_NUM_MTA_REGISTERS;
2452
1dc32918 2453 if (hw->mac_type == e1000_ich8lan)
cd94dd0b 2454 rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
1da177e4 2455
868d5309 2456 /* reserve RAR[14] for LAA over-write work-around */
1dc32918 2457 if (hw->mac_type == e1000_82571)
868d5309 2458 rar_entries--;
1da177e4 2459
2648345f
MC
2460 /* Check for Promiscuous and All Multicast modes */
2461
1dc32918 2462 rctl = er32(RCTL);
1da177e4 2463
96838a40 2464 if (netdev->flags & IFF_PROMISC) {
1da177e4 2465 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
746b9f02 2466 rctl &= ~E1000_RCTL_VFE;
1da177e4 2467 } else {
746b9f02
PM
2468 if (netdev->flags & IFF_ALLMULTI) {
2469 rctl |= E1000_RCTL_MPE;
2470 } else {
2471 rctl &= ~E1000_RCTL_MPE;
2472 }
78ed11a5 2473 if (adapter->hw.mac_type != e1000_ich8lan)
746b9f02 2474 rctl |= E1000_RCTL_VFE;
db0ce50d
PM
2475 }
2476
2477 uc_ptr = NULL;
2478 if (netdev->uc_count > rar_entries - 1) {
2479 rctl |= E1000_RCTL_UPE;
2480 } else if (!(netdev->flags & IFF_PROMISC)) {
2481 rctl &= ~E1000_RCTL_UPE;
2482 uc_ptr = netdev->uc_list;
1da177e4
LT
2483 }
2484
1dc32918 2485 ew32(RCTL, rctl);
1da177e4
LT
2486
2487 /* 82542 2.0 needs to be in reset to write receive address registers */
2488
96838a40 2489 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4
LT
2490 e1000_enter_82542_rst(adapter);
2491
db0ce50d
PM
2492 /* load the first 14 addresses into the exact filters 1-14. Unicast
2493 * addresses take precedence to avoid disabling unicast filtering
2494 * when possible.
2495 *
1da177e4
LT
2496 * RAR 0 is used for the station MAC adddress
2497 * if there are not 14 addresses, go ahead and clear the filters
868d5309 2498 * -- with 82571 controllers only 0-13 entries are filled here
1da177e4
LT
2499 */
2500 mc_ptr = netdev->mc_list;
2501
96838a40 2502 for (i = 1; i < rar_entries; i++) {
db0ce50d
PM
2503 if (uc_ptr) {
2504 e1000_rar_set(hw, uc_ptr->da_addr, i);
2505 uc_ptr = uc_ptr->next;
2506 } else if (mc_ptr) {
2507 e1000_rar_set(hw, mc_ptr->da_addr, i);
1da177e4
LT
2508 mc_ptr = mc_ptr->next;
2509 } else {
2510 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
1dc32918 2511 E1000_WRITE_FLUSH();
1da177e4 2512 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
1dc32918 2513 E1000_WRITE_FLUSH();
1da177e4
LT
2514 }
2515 }
db0ce50d 2516 WARN_ON(uc_ptr != NULL);
1da177e4
LT
2517
2518 /* clear the old settings from the multicast hash table */
2519
cd94dd0b 2520 for (i = 0; i < mta_reg_count; i++) {
1da177e4 2521 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
1dc32918 2522 E1000_WRITE_FLUSH();
4ca213a6 2523 }
1da177e4
LT
2524
2525 /* load any remaining addresses into the hash table */
2526
96838a40 2527 for (; mc_ptr; mc_ptr = mc_ptr->next) {
db0ce50d 2528 hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
1da177e4
LT
2529 e1000_mta_set(hw, hash_value);
2530 }
2531
96838a40 2532 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4 2533 e1000_leave_82542_rst(adapter);
1da177e4
LT
2534}
2535
2536/* Need to wait a few seconds after link up to get diagnostic information from
2537 * the phy */
2538
64798845 2539static void e1000_update_phy_info(unsigned long data)
1da177e4 2540{
e982f17c 2541 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
1dc32918
JP
2542 struct e1000_hw *hw = &adapter->hw;
2543 e1000_phy_get_info(hw, &adapter->phy_info);
1da177e4
LT
2544}
2545
2546/**
2547 * e1000_82547_tx_fifo_stall - Timer Call-back
2548 * @data: pointer to adapter cast into an unsigned long
2549 **/
2550
64798845 2551static void e1000_82547_tx_fifo_stall(unsigned long data)
1da177e4 2552{
e982f17c 2553 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
1dc32918 2554 struct e1000_hw *hw = &adapter->hw;
1da177e4 2555 struct net_device *netdev = adapter->netdev;
406874a7 2556 u32 tctl;
1da177e4 2557
96838a40 2558 if (atomic_read(&adapter->tx_fifo_stall)) {
1dc32918
JP
2559 if ((er32(TDT) == er32(TDH)) &&
2560 (er32(TDFT) == er32(TDFH)) &&
2561 (er32(TDFTS) == er32(TDFHS))) {
2562 tctl = er32(TCTL);
2563 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2564 ew32(TDFT, adapter->tx_head_addr);
2565 ew32(TDFH, adapter->tx_head_addr);
2566 ew32(TDFTS, adapter->tx_head_addr);
2567 ew32(TDFHS, adapter->tx_head_addr);
2568 ew32(TCTL, tctl);
2569 E1000_WRITE_FLUSH();
1da177e4
LT
2570
2571 adapter->tx_fifo_head = 0;
2572 atomic_set(&adapter->tx_fifo_stall, 0);
2573 netif_wake_queue(netdev);
2574 } else {
2575 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2576 }
2577 }
2578}
2579
2580/**
2581 * e1000_watchdog - Timer Call-back
2582 * @data: pointer to adapter cast into an unsigned long
2583 **/
64798845 2584static void e1000_watchdog(unsigned long data)
1da177e4 2585{
e982f17c 2586 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
1dc32918 2587 struct e1000_hw *hw = &adapter->hw;
1da177e4 2588 struct net_device *netdev = adapter->netdev;
545c67c0 2589 struct e1000_tx_ring *txdr = adapter->tx_ring;
406874a7
JP
2590 u32 link, tctl;
2591 s32 ret_val;
cd94dd0b 2592
1dc32918 2593 ret_val = e1000_check_for_link(hw);
cd94dd0b 2594 if ((ret_val == E1000_ERR_PHY) &&
1dc32918
JP
2595 (hw->phy_type == e1000_phy_igp_3) &&
2596 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
cd94dd0b
AK
2597 /* See e1000_kumeran_lock_loss_workaround() */
2598 DPRINTK(LINK, INFO,
2599 "Gigabit has been disabled, downgrading speed\n");
2600 }
90fb5135 2601
1dc32918
JP
2602 if (hw->mac_type == e1000_82573) {
2603 e1000_enable_tx_pkt_filtering(hw);
2604 if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id)
2d7edb92 2605 e1000_update_mng_vlan(adapter);
96838a40 2606 }
1da177e4 2607
1dc32918
JP
2608 if ((hw->media_type == e1000_media_type_internal_serdes) &&
2609 !(er32(TXCW) & E1000_TXCW_ANE))
2610 link = !hw->serdes_link_down;
1da177e4 2611 else
1dc32918 2612 link = er32(STATUS) & E1000_STATUS_LU;
1da177e4 2613
96838a40
JB
2614 if (link) {
2615 if (!netif_carrier_ok(netdev)) {
406874a7 2616 u32 ctrl;
c3033b01 2617 bool txb2b = true;
1dc32918 2618 e1000_get_speed_and_duplex(hw,
1da177e4
LT
2619 &adapter->link_speed,
2620 &adapter->link_duplex);
2621
1dc32918 2622 ctrl = er32(CTRL);
9669f53b
AK
2623 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
2624 "Flow Control: %s\n",
2625 adapter->link_speed,
2626 adapter->link_duplex == FULL_DUPLEX ?
2627 "Full Duplex" : "Half Duplex",
2628 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2629 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2630 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2631 E1000_CTRL_TFCE) ? "TX" : "None" )));
1da177e4 2632
7e6c9861
JK
2633 /* tweak tx_queue_len according to speed/duplex
2634 * and adjust the timeout factor */
66a2b0a3
JK
2635 netdev->tx_queue_len = adapter->tx_queue_len;
2636 adapter->tx_timeout_factor = 1;
7e6c9861
JK
2637 switch (adapter->link_speed) {
2638 case SPEED_10:
c3033b01 2639 txb2b = false;
7e6c9861
JK
2640 netdev->tx_queue_len = 10;
2641 adapter->tx_timeout_factor = 8;
2642 break;
2643 case SPEED_100:
c3033b01 2644 txb2b = false;
7e6c9861
JK
2645 netdev->tx_queue_len = 100;
2646 /* maybe add some timeout factor ? */
2647 break;
2648 }
2649
1dc32918
JP
2650 if ((hw->mac_type == e1000_82571 ||
2651 hw->mac_type == e1000_82572) &&
c3033b01 2652 !txb2b) {
406874a7 2653 u32 tarc0;
1dc32918 2654 tarc0 = er32(TARC0);
90fb5135 2655 tarc0 &= ~(1 << 21);
1dc32918 2656 ew32(TARC0, tarc0);
7e6c9861 2657 }
90fb5135 2658
7e6c9861
JK
2659 /* disable TSO for pcie and 10/100 speeds, to avoid
2660 * some hardware issues */
2661 if (!adapter->tso_force &&
1dc32918 2662 hw->bus_type == e1000_bus_type_pci_express){
66a2b0a3
JK
2663 switch (adapter->link_speed) {
2664 case SPEED_10:
66a2b0a3 2665 case SPEED_100:
7e6c9861
JK
2666 DPRINTK(PROBE,INFO,
2667 "10/100 speed: disabling TSO\n");
2668 netdev->features &= ~NETIF_F_TSO;
87ca4e5b 2669 netdev->features &= ~NETIF_F_TSO6;
7e6c9861
JK
2670 break;
2671 case SPEED_1000:
2672 netdev->features |= NETIF_F_TSO;
87ca4e5b 2673 netdev->features |= NETIF_F_TSO6;
7e6c9861
JK
2674 break;
2675 default:
2676 /* oops */
66a2b0a3
JK
2677 break;
2678 }
2679 }
7e6c9861
JK
2680
2681 /* enable transmits in the hardware, need to do this
2682 * after setting TARC0 */
1dc32918 2683 tctl = er32(TCTL);
7e6c9861 2684 tctl |= E1000_TCTL_EN;
1dc32918 2685 ew32(TCTL, tctl);
66a2b0a3 2686
1da177e4
LT
2687 netif_carrier_on(netdev);
2688 netif_wake_queue(netdev);
56e1393f 2689 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
1da177e4 2690 adapter->smartspeed = 0;
bb8e3311
JG
2691 } else {
2692 /* make sure the receive unit is started */
1dc32918
JP
2693 if (hw->rx_needs_kicking) {
2694 u32 rctl = er32(RCTL);
2695 ew32(RCTL, rctl | E1000_RCTL_EN);
bb8e3311 2696 }
1da177e4
LT
2697 }
2698 } else {
96838a40 2699 if (netif_carrier_ok(netdev)) {
1da177e4
LT
2700 adapter->link_speed = 0;
2701 adapter->link_duplex = 0;
2702 DPRINTK(LINK, INFO, "NIC Link is Down\n");
2703 netif_carrier_off(netdev);
2704 netif_stop_queue(netdev);
56e1393f 2705 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
87041639
JK
2706
2707 /* 80003ES2LAN workaround--
2708 * For packet buffer work-around on link down event;
2709 * disable receives in the ISR and
2710 * reset device here in the watchdog
2711 */
1dc32918 2712 if (hw->mac_type == e1000_80003es2lan)
87041639
JK
2713 /* reset device */
2714 schedule_work(&adapter->reset_task);
1da177e4
LT
2715 }
2716
2717 e1000_smartspeed(adapter);
2718 }
2719
2720 e1000_update_stats(adapter);
2721
1dc32918 2722 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
1da177e4 2723 adapter->tpt_old = adapter->stats.tpt;
1dc32918 2724 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
1da177e4
LT
2725 adapter->colc_old = adapter->stats.colc;
2726
2727 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2728 adapter->gorcl_old = adapter->stats.gorcl;
2729 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2730 adapter->gotcl_old = adapter->stats.gotcl;
2731
1dc32918 2732 e1000_update_adaptive(hw);
1da177e4 2733
f56799ea 2734 if (!netif_carrier_ok(netdev)) {
581d708e 2735 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1da177e4
LT
2736 /* We've lost link, so the controller stops DMA,
2737 * but we've got queued Tx work that's never going
2738 * to get done, so reset controller to flush Tx.
2739 * (Do the reset outside of interrupt context). */
87041639
JK
2740 adapter->tx_timeout_count++;
2741 schedule_work(&adapter->reset_task);
1da177e4
LT
2742 }
2743 }
2744
1da177e4 2745 /* Cause software interrupt to ensure rx ring is cleaned */
1dc32918 2746 ew32(ICS, E1000_ICS_RXDMT0);
1da177e4 2747
2648345f 2748 /* Force detection of hung controller every watchdog period */
c3033b01 2749 adapter->detect_tx_hung = true;
1da177e4 2750
96838a40 2751 /* With 82571 controllers, LAA may be overwritten due to controller
868d5309 2752 * reset from the other port. Set the appropriate LAA in RAR[0] */
1dc32918
JP
2753 if (hw->mac_type == e1000_82571 && hw->laa_is_present)
2754 e1000_rar_set(hw, hw->mac_addr, 0);
868d5309 2755
1da177e4 2756 /* Reset the timer */
56e1393f 2757 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
1da177e4
LT
2758}
2759
835bb129
JB
2760enum latency_range {
2761 lowest_latency = 0,
2762 low_latency = 1,
2763 bulk_latency = 2,
2764 latency_invalid = 255
2765};
2766
2767/**
2768 * e1000_update_itr - update the dynamic ITR value based on statistics
2769 * Stores a new ITR value based on packets and byte
2770 * counts during the last interrupt. The advantage of per interrupt
2771 * computation is faster updates and more accurate ITR for the current
2772 * traffic pattern. Constants in this function were computed
2773 * based on theoretical maximum wire speed and thresholds were set based
2774 * on testing data as well as attempting to minimize response time
2775 * while increasing bulk throughput.
2776 * this functionality is controlled by the InterruptThrottleRate module
2777 * parameter (see e1000_param.c)
2778 * @adapter: pointer to adapter
2779 * @itr_setting: current adapter->itr
2780 * @packets: the number of packets during this measurement interval
2781 * @bytes: the number of bytes during this measurement interval
2782 **/
2783static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
64798845 2784 u16 itr_setting, int packets, int bytes)
835bb129
JB
2785{
2786 unsigned int retval = itr_setting;
2787 struct e1000_hw *hw = &adapter->hw;
2788
2789 if (unlikely(hw->mac_type < e1000_82540))
2790 goto update_itr_done;
2791
2792 if (packets == 0)
2793 goto update_itr_done;
2794
835bb129
JB
2795 switch (itr_setting) {
2796 case lowest_latency:
2b65326e
JB
2797 /* jumbo frames get bulk treatment*/
2798 if (bytes/packets > 8000)
2799 retval = bulk_latency;
2800 else if ((packets < 5) && (bytes > 512))
835bb129
JB
2801 retval = low_latency;
2802 break;
2803 case low_latency: /* 50 usec aka 20000 ints/s */
2804 if (bytes > 10000) {
2b65326e
JB
2805 /* jumbo frames need bulk latency setting */
2806 if (bytes/packets > 8000)
2807 retval = bulk_latency;
2808 else if ((packets < 10) || ((bytes/packets) > 1200))
835bb129
JB
2809 retval = bulk_latency;
2810 else if ((packets > 35))
2811 retval = lowest_latency;
2b65326e
JB
2812 } else if (bytes/packets > 2000)
2813 retval = bulk_latency;
2814 else if (packets <= 2 && bytes < 512)
835bb129
JB
2815 retval = lowest_latency;
2816 break;
2817 case bulk_latency: /* 250 usec aka 4000 ints/s */
2818 if (bytes > 25000) {
2819 if (packets > 35)
2820 retval = low_latency;
2b65326e
JB
2821 } else if (bytes < 6000) {
2822 retval = low_latency;
835bb129
JB
2823 }
2824 break;
2825 }
2826
2827update_itr_done:
2828 return retval;
2829}
2830
2831static void e1000_set_itr(struct e1000_adapter *adapter)
2832{
2833 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
2834 u16 current_itr;
2835 u32 new_itr = adapter->itr;
835bb129
JB
2836
2837 if (unlikely(hw->mac_type < e1000_82540))
2838 return;
2839
2840 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2841 if (unlikely(adapter->link_speed != SPEED_1000)) {
2842 current_itr = 0;
2843 new_itr = 4000;
2844 goto set_itr_now;
2845 }
2846
2847 adapter->tx_itr = e1000_update_itr(adapter,
2848 adapter->tx_itr,
2849 adapter->total_tx_packets,
2850 adapter->total_tx_bytes);
2b65326e
JB
2851 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2852 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2853 adapter->tx_itr = low_latency;
2854
835bb129
JB
2855 adapter->rx_itr = e1000_update_itr(adapter,
2856 adapter->rx_itr,
2857 adapter->total_rx_packets,
2858 adapter->total_rx_bytes);
2b65326e
JB
2859 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2860 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2861 adapter->rx_itr = low_latency;
835bb129
JB
2862
2863 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2864
835bb129
JB
2865 switch (current_itr) {
2866 /* counts and packets in update_itr are dependent on these numbers */
2867 case lowest_latency:
2868 new_itr = 70000;
2869 break;
2870 case low_latency:
2871 new_itr = 20000; /* aka hwitr = ~200 */
2872 break;
2873 case bulk_latency:
2874 new_itr = 4000;
2875 break;
2876 default:
2877 break;
2878 }
2879
2880set_itr_now:
2881 if (new_itr != adapter->itr) {
2882 /* this attempts to bias the interrupt rate towards Bulk
2883 * by adding intermediate steps when interrupt rate is
2884 * increasing */
2885 new_itr = new_itr > adapter->itr ?
2886 min(adapter->itr + (new_itr >> 2), new_itr) :
2887 new_itr;
2888 adapter->itr = new_itr;
1dc32918 2889 ew32(ITR, 1000000000 / (new_itr * 256));
835bb129
JB
2890 }
2891
2892 return;
2893}
2894
1da177e4
LT
2895#define E1000_TX_FLAGS_CSUM 0x00000001
2896#define E1000_TX_FLAGS_VLAN 0x00000002
2897#define E1000_TX_FLAGS_TSO 0x00000004
2d7edb92 2898#define E1000_TX_FLAGS_IPV4 0x00000008
1da177e4
LT
2899#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2900#define E1000_TX_FLAGS_VLAN_SHIFT 16
2901
64798845
JP
2902static int e1000_tso(struct e1000_adapter *adapter,
2903 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
1da177e4 2904{
1da177e4 2905 struct e1000_context_desc *context_desc;
545c67c0 2906 struct e1000_buffer *buffer_info;
1da177e4 2907 unsigned int i;
406874a7
JP
2908 u32 cmd_length = 0;
2909 u16 ipcse = 0, tucse, mss;
2910 u8 ipcss, ipcso, tucss, tucso, hdr_len;
1da177e4
LT
2911 int err;
2912
89114afd 2913 if (skb_is_gso(skb)) {
1da177e4
LT
2914 if (skb_header_cloned(skb)) {
2915 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2916 if (err)
2917 return err;
2918 }
2919
ab6a5bb6 2920 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
7967168c 2921 mss = skb_shinfo(skb)->gso_size;
60828236 2922 if (skb->protocol == htons(ETH_P_IP)) {
eddc9ec5
ACM
2923 struct iphdr *iph = ip_hdr(skb);
2924 iph->tot_len = 0;
2925 iph->check = 0;
aa8223c7
ACM
2926 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2927 iph->daddr, 0,
2928 IPPROTO_TCP,
2929 0);
2d7edb92 2930 cmd_length = E1000_TXD_CMD_IP;
ea2ae17d 2931 ipcse = skb_transport_offset(skb) - 1;
e15fdd03 2932 } else if (skb->protocol == htons(ETH_P_IPV6)) {
0660e03f 2933 ipv6_hdr(skb)->payload_len = 0;
aa8223c7 2934 tcp_hdr(skb)->check =
0660e03f
ACM
2935 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2936 &ipv6_hdr(skb)->daddr,
2937 0, IPPROTO_TCP, 0);
2d7edb92 2938 ipcse = 0;
2d7edb92 2939 }
bbe735e4 2940 ipcss = skb_network_offset(skb);
eddc9ec5 2941 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
ea2ae17d 2942 tucss = skb_transport_offset(skb);
aa8223c7 2943 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1da177e4
LT
2944 tucse = 0;
2945
2946 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2d7edb92 2947 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
1da177e4 2948
581d708e
MC
2949 i = tx_ring->next_to_use;
2950 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
545c67c0 2951 buffer_info = &tx_ring->buffer_info[i];
1da177e4
LT
2952
2953 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2954 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2955 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2956 context_desc->upper_setup.tcp_fields.tucss = tucss;
2957 context_desc->upper_setup.tcp_fields.tucso = tucso;
2958 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2959 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2960 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2961 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2962
545c67c0 2963 buffer_info->time_stamp = jiffies;
a9ebadd6 2964 buffer_info->next_to_watch = i;
545c67c0 2965
581d708e
MC
2966 if (++i == tx_ring->count) i = 0;
2967 tx_ring->next_to_use = i;
1da177e4 2968
c3033b01 2969 return true;
1da177e4 2970 }
c3033b01 2971 return false;
1da177e4
LT
2972}
2973
64798845
JP
2974static bool e1000_tx_csum(struct e1000_adapter *adapter,
2975 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
1da177e4
LT
2976{
2977 struct e1000_context_desc *context_desc;
545c67c0 2978 struct e1000_buffer *buffer_info;
1da177e4 2979 unsigned int i;
406874a7 2980 u8 css;
1da177e4 2981
84fa7933 2982 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
ea2ae17d 2983 css = skb_transport_offset(skb);
1da177e4 2984
581d708e 2985 i = tx_ring->next_to_use;
545c67c0 2986 buffer_info = &tx_ring->buffer_info[i];
581d708e 2987 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
1da177e4 2988
f6c57baf 2989 context_desc->lower_setup.ip_config = 0;
1da177e4 2990 context_desc->upper_setup.tcp_fields.tucss = css;
628592cc
HX
2991 context_desc->upper_setup.tcp_fields.tucso =
2992 css + skb->csum_offset;
1da177e4
LT
2993 context_desc->upper_setup.tcp_fields.tucse = 0;
2994 context_desc->tcp_seg_setup.data = 0;
2995 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
2996
545c67c0 2997 buffer_info->time_stamp = jiffies;
a9ebadd6 2998 buffer_info->next_to_watch = i;
545c67c0 2999
581d708e
MC
3000 if (unlikely(++i == tx_ring->count)) i = 0;
3001 tx_ring->next_to_use = i;
1da177e4 3002
c3033b01 3003 return true;
1da177e4
LT
3004 }
3005
c3033b01 3006 return false;
1da177e4
LT
3007}
3008
3009#define E1000_MAX_TXD_PWR 12
3010#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
3011
64798845
JP
3012static int e1000_tx_map(struct e1000_adapter *adapter,
3013 struct e1000_tx_ring *tx_ring,
3014 struct sk_buff *skb, unsigned int first,
3015 unsigned int max_per_txd, unsigned int nr_frags,
3016 unsigned int mss)
1da177e4 3017{
1dc32918 3018 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
3019 struct e1000_buffer *buffer_info;
3020 unsigned int len = skb->len;
3021 unsigned int offset = 0, size, count = 0, i;
3022 unsigned int f;
3023 len -= skb->data_len;
3024
3025 i = tx_ring->next_to_use;
3026
96838a40 3027 while (len) {
1da177e4
LT
3028 buffer_info = &tx_ring->buffer_info[i];
3029 size = min(len, max_per_txd);
fd803241
JK
3030 /* Workaround for Controller erratum --
3031 * descriptor for non-tso packet in a linear SKB that follows a
3032 * tso gets written back prematurely before the data is fully
0f15a8fa 3033 * DMA'd to the controller */
fd803241 3034 if (!skb->data_len && tx_ring->last_tx_tso &&
89114afd 3035 !skb_is_gso(skb)) {
fd803241
JK
3036 tx_ring->last_tx_tso = 0;
3037 size -= 4;
3038 }
3039
1da177e4
LT
3040 /* Workaround for premature desc write-backs
3041 * in TSO mode. Append 4-byte sentinel desc */
96838a40 3042 if (unlikely(mss && !nr_frags && size == len && size > 8))
1da177e4 3043 size -= 4;
97338bde
MC
3044 /* work-around for errata 10 and it applies
3045 * to all controllers in PCI-X mode
3046 * The fix is to make sure that the first descriptor of a
3047 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
3048 */
1dc32918 3049 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
97338bde
MC
3050 (size > 2015) && count == 0))
3051 size = 2015;
96838a40 3052
1da177e4
LT
3053 /* Workaround for potential 82544 hang in PCI-X. Avoid
3054 * terminating buffers within evenly-aligned dwords. */
96838a40 3055 if (unlikely(adapter->pcix_82544 &&
1da177e4
LT
3056 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
3057 size > 4))
3058 size -= 4;
3059
3060 buffer_info->length = size;
3061 buffer_info->dma =
3062 pci_map_single(adapter->pdev,
3063 skb->data + offset,
3064 size,
3065 PCI_DMA_TODEVICE);
3066 buffer_info->time_stamp = jiffies;
a9ebadd6 3067 buffer_info->next_to_watch = i;
1da177e4
LT
3068
3069 len -= size;
3070 offset += size;
3071 count++;
96838a40 3072 if (unlikely(++i == tx_ring->count)) i = 0;
1da177e4
LT
3073 }
3074
96838a40 3075 for (f = 0; f < nr_frags; f++) {
1da177e4
LT
3076 struct skb_frag_struct *frag;
3077
3078 frag = &skb_shinfo(skb)->frags[f];
3079 len = frag->size;
3080 offset = frag->page_offset;
3081
96838a40 3082 while (len) {
1da177e4
LT
3083 buffer_info = &tx_ring->buffer_info[i];
3084 size = min(len, max_per_txd);
1da177e4
LT
3085 /* Workaround for premature desc write-backs
3086 * in TSO mode. Append 4-byte sentinel desc */
96838a40 3087 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
1da177e4 3088 size -= 4;
1da177e4
LT
3089 /* Workaround for potential 82544 hang in PCI-X.
3090 * Avoid terminating buffers within evenly-aligned
3091 * dwords. */
96838a40 3092 if (unlikely(adapter->pcix_82544 &&
1da177e4
LT
3093 !((unsigned long)(frag->page+offset+size-1) & 4) &&
3094 size > 4))
3095 size -= 4;
3096
3097 buffer_info->length = size;
3098 buffer_info->dma =
3099 pci_map_page(adapter->pdev,
3100 frag->page,
3101 offset,
3102 size,
3103 PCI_DMA_TODEVICE);
3104 buffer_info->time_stamp = jiffies;
a9ebadd6 3105 buffer_info->next_to_watch = i;
1da177e4
LT
3106
3107 len -= size;
3108 offset += size;
3109 count++;
96838a40 3110 if (unlikely(++i == tx_ring->count)) i = 0;
1da177e4
LT
3111 }
3112 }
3113
3114 i = (i == 0) ? tx_ring->count - 1 : i - 1;
3115 tx_ring->buffer_info[i].skb = skb;
3116 tx_ring->buffer_info[first].next_to_watch = i;
3117
3118 return count;
3119}
3120
64798845
JP
3121static void e1000_tx_queue(struct e1000_adapter *adapter,
3122 struct e1000_tx_ring *tx_ring, int tx_flags,
3123 int count)
1da177e4 3124{
1dc32918 3125 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
3126 struct e1000_tx_desc *tx_desc = NULL;
3127 struct e1000_buffer *buffer_info;
406874a7 3128 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
1da177e4
LT
3129 unsigned int i;
3130
96838a40 3131 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
1da177e4
LT
3132 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3133 E1000_TXD_CMD_TSE;
2d7edb92
MC
3134 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3135
96838a40 3136 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2d7edb92 3137 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
1da177e4
LT
3138 }
3139
96838a40 3140 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
1da177e4
LT
3141 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3142 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3143 }
3144
96838a40 3145 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
1da177e4
LT
3146 txd_lower |= E1000_TXD_CMD_VLE;
3147 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3148 }
3149
3150 i = tx_ring->next_to_use;
3151
96838a40 3152 while (count--) {
1da177e4
LT
3153 buffer_info = &tx_ring->buffer_info[i];
3154 tx_desc = E1000_TX_DESC(*tx_ring, i);
3155 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3156 tx_desc->lower.data =
3157 cpu_to_le32(txd_lower | buffer_info->length);
3158 tx_desc->upper.data = cpu_to_le32(txd_upper);
96838a40 3159 if (unlikely(++i == tx_ring->count)) i = 0;
1da177e4
LT
3160 }
3161
3162 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3163
3164 /* Force memory writes to complete before letting h/w
3165 * know there are new descriptors to fetch. (Only
3166 * applicable for weak-ordered memory model archs,
3167 * such as IA-64). */
3168 wmb();
3169
3170 tx_ring->next_to_use = i;
1dc32918 3171 writel(i, hw->hw_addr + tx_ring->tdt);
2ce9047f
JB
3172 /* we need this if more than one processor can write to our tail
3173 * at a time, it syncronizes IO on IA64/Altix systems */
3174 mmiowb();
1da177e4
LT
3175}
3176
3177/**
3178 * 82547 workaround to avoid controller hang in half-duplex environment.
3179 * The workaround is to avoid queuing a large packet that would span
3180 * the internal Tx FIFO ring boundary by notifying the stack to resend
3181 * the packet at a later time. This gives the Tx FIFO an opportunity to
3182 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3183 * to the beginning of the Tx FIFO.
3184 **/
3185
3186#define E1000_FIFO_HDR 0x10
3187#define E1000_82547_PAD_LEN 0x3E0
3188
64798845
JP
3189static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3190 struct sk_buff *skb)
1da177e4 3191{
406874a7
JP
3192 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3193 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
1da177e4 3194
9099cfb9 3195 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
1da177e4 3196
96838a40 3197 if (adapter->link_duplex != HALF_DUPLEX)
1da177e4
LT
3198 goto no_fifo_stall_required;
3199
96838a40 3200 if (atomic_read(&adapter->tx_fifo_stall))
1da177e4
LT
3201 return 1;
3202
96838a40 3203 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
1da177e4
LT
3204 atomic_set(&adapter->tx_fifo_stall, 1);
3205 return 1;
3206 }
3207
3208no_fifo_stall_required:
3209 adapter->tx_fifo_head += skb_fifo_len;
96838a40 3210 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1da177e4
LT
3211 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3212 return 0;
3213}
3214
2d7edb92 3215#define MINIMUM_DHCP_PACKET_SIZE 282
64798845
JP
3216static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
3217 struct sk_buff *skb)
2d7edb92
MC
3218{
3219 struct e1000_hw *hw = &adapter->hw;
406874a7 3220 u16 length, offset;
96838a40 3221 if (vlan_tx_tag_present(skb)) {
1dc32918
JP
3222 if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
3223 ( hw->mng_cookie.status &
2d7edb92
MC
3224 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
3225 return 0;
3226 }
20a44028 3227 if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
e982f17c 3228 struct ethhdr *eth = (struct ethhdr *)skb->data;
96838a40
JB
3229 if ((htons(ETH_P_IP) == eth->h_proto)) {
3230 const struct iphdr *ip =
406874a7 3231 (struct iphdr *)((u8 *)skb->data+14);
96838a40
JB
3232 if (IPPROTO_UDP == ip->protocol) {
3233 struct udphdr *udp =
406874a7 3234 (struct udphdr *)((u8 *)ip +
2d7edb92 3235 (ip->ihl << 2));
96838a40 3236 if (ntohs(udp->dest) == 67) {
406874a7 3237 offset = (u8 *)udp + 8 - skb->data;
2d7edb92
MC
3238 length = skb->len - offset;
3239
3240 return e1000_mng_write_dhcp_info(hw,
406874a7 3241 (u8 *)udp + 8,
2d7edb92
MC
3242 length);
3243 }
3244 }
3245 }
3246 }
3247 return 0;
3248}
3249
65c7973f
JB
3250static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3251{
3252 struct e1000_adapter *adapter = netdev_priv(netdev);
3253 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3254
3255 netif_stop_queue(netdev);
3256 /* Herbert's original patch had:
3257 * smp_mb__after_netif_stop_queue();
3258 * but since that doesn't exist yet, just open code it. */
3259 smp_mb();
3260
3261 /* We need to check again in a case another CPU has just
3262 * made room available. */
3263 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3264 return -EBUSY;
3265
3266 /* A reprieve! */
3267 netif_start_queue(netdev);
fcfb1224 3268 ++adapter->restart_queue;
65c7973f
JB
3269 return 0;
3270}
3271
3272static int e1000_maybe_stop_tx(struct net_device *netdev,
3273 struct e1000_tx_ring *tx_ring, int size)
3274{
3275 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3276 return 0;
3277 return __e1000_maybe_stop_tx(netdev, size);
3278}
3279
1da177e4 3280#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
64798845 3281static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1da177e4 3282{
60490fe0 3283 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 3284 struct e1000_hw *hw = &adapter->hw;
581d708e 3285 struct e1000_tx_ring *tx_ring;
1da177e4
LT
3286 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3287 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3288 unsigned int tx_flags = 0;
6d1e3aa7 3289 unsigned int len = skb->len - skb->data_len;
1da177e4 3290 unsigned long flags;
6d1e3aa7
KK
3291 unsigned int nr_frags;
3292 unsigned int mss;
1da177e4 3293 int count = 0;
76c224bc 3294 int tso;
1da177e4 3295 unsigned int f;
1da177e4 3296
65c7973f
JB
3297 /* This goes back to the question of how to logically map a tx queue
3298 * to a flow. Right now, performance is impacted slightly negatively
3299 * if using multiple tx queues. If the stack breaks away from a
3300 * single qdisc implementation, we can look at this again. */
581d708e 3301 tx_ring = adapter->tx_ring;
24025e4e 3302
581d708e 3303 if (unlikely(skb->len <= 0)) {
1da177e4
LT
3304 dev_kfree_skb_any(skb);
3305 return NETDEV_TX_OK;
3306 }
3307
032fe6e9
JB
3308 /* 82571 and newer doesn't need the workaround that limited descriptor
3309 * length to 4kB */
1dc32918 3310 if (hw->mac_type >= e1000_82571)
032fe6e9
JB
3311 max_per_txd = 8192;
3312
7967168c 3313 mss = skb_shinfo(skb)->gso_size;
76c224bc 3314 /* The controller does a simple calculation to
1da177e4
LT
3315 * make sure there is enough room in the FIFO before
3316 * initiating the DMA for each buffer. The calc is:
3317 * 4 = ceil(buffer len/mss). To make sure we don't
3318 * overrun the FIFO, adjust the max buffer len if mss
3319 * drops. */
96838a40 3320 if (mss) {
406874a7 3321 u8 hdr_len;
1da177e4
LT
3322 max_per_txd = min(mss << 2, max_per_txd);
3323 max_txd_pwr = fls(max_per_txd) - 1;
9a3056da 3324
90fb5135
AK
3325 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3326 * points to just header, pull a few bytes of payload from
3327 * frags into skb->data */
ab6a5bb6 3328 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
6d1e3aa7 3329 if (skb->data_len && hdr_len == len) {
1dc32918 3330 switch (hw->mac_type) {
9f687888 3331 unsigned int pull_size;
683a2aa3
HX
3332 case e1000_82544:
3333 /* Make sure we have room to chop off 4 bytes,
3334 * and that the end alignment will work out to
3335 * this hardware's requirements
3336 * NOTE: this is a TSO only workaround
3337 * if end byte alignment not correct move us
3338 * into the next dword */
27a884dc 3339 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
683a2aa3
HX
3340 break;
3341 /* fall through */
9f687888
JK
3342 case e1000_82571:
3343 case e1000_82572:
3344 case e1000_82573:
cd94dd0b 3345 case e1000_ich8lan:
9f687888
JK
3346 pull_size = min((unsigned int)4, skb->data_len);
3347 if (!__pskb_pull_tail(skb, pull_size)) {
a5eafce2 3348 DPRINTK(DRV, ERR,
9f687888
JK
3349 "__pskb_pull_tail failed.\n");
3350 dev_kfree_skb_any(skb);
749dfc70 3351 return NETDEV_TX_OK;
9f687888
JK
3352 }
3353 len = skb->len - skb->data_len;
3354 break;
3355 default:
3356 /* do nothing */
3357 break;
d74bbd3b 3358 }
9a3056da 3359 }
1da177e4
LT
3360 }
3361
9a3056da 3362 /* reserve a descriptor for the offload context */
84fa7933 3363 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
1da177e4 3364 count++;
2648345f 3365 count++;
fd803241 3366
fd803241 3367 /* Controller Erratum workaround */
89114afd 3368 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
fd803241 3369 count++;
fd803241 3370
1da177e4
LT
3371 count += TXD_USE_COUNT(len, max_txd_pwr);
3372
96838a40 3373 if (adapter->pcix_82544)
1da177e4
LT
3374 count++;
3375
96838a40 3376 /* work-around for errata 10 and it applies to all controllers
97338bde
MC
3377 * in PCI-X mode, so add one more descriptor to the count
3378 */
1dc32918 3379 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
97338bde
MC
3380 (len > 2015)))
3381 count++;
3382
1da177e4 3383 nr_frags = skb_shinfo(skb)->nr_frags;
96838a40 3384 for (f = 0; f < nr_frags; f++)
1da177e4
LT
3385 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
3386 max_txd_pwr);
96838a40 3387 if (adapter->pcix_82544)
1da177e4
LT
3388 count += nr_frags;
3389
0f15a8fa 3390
1dc32918
JP
3391 if (hw->tx_pkt_filtering &&
3392 (hw->mac_type == e1000_82573))
2d7edb92
MC
3393 e1000_transfer_dhcp_info(adapter, skb);
3394
f50393fe 3395 if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
581d708e 3396 /* Collision - tell upper layer to requeue */
581d708e 3397 return NETDEV_TX_LOCKED;
1da177e4
LT
3398
3399 /* need: count + 2 desc gap to keep tail from touching
3400 * head, otherwise try next time */
65c7973f 3401 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
581d708e 3402 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1da177e4
LT
3403 return NETDEV_TX_BUSY;
3404 }
3405
1dc32918 3406 if (unlikely(hw->mac_type == e1000_82547)) {
96838a40 3407 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
1da177e4 3408 netif_stop_queue(netdev);
1314bbf3 3409 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
581d708e 3410 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1da177e4
LT
3411 return NETDEV_TX_BUSY;
3412 }
3413 }
3414
96838a40 3415 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
1da177e4
LT
3416 tx_flags |= E1000_TX_FLAGS_VLAN;
3417 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3418 }
3419
581d708e 3420 first = tx_ring->next_to_use;
96838a40 3421
581d708e 3422 tso = e1000_tso(adapter, tx_ring, skb);
1da177e4
LT
3423 if (tso < 0) {
3424 dev_kfree_skb_any(skb);
581d708e 3425 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1da177e4
LT
3426 return NETDEV_TX_OK;
3427 }
3428
fd803241
JK
3429 if (likely(tso)) {
3430 tx_ring->last_tx_tso = 1;
1da177e4 3431 tx_flags |= E1000_TX_FLAGS_TSO;
fd803241 3432 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
1da177e4
LT
3433 tx_flags |= E1000_TX_FLAGS_CSUM;
3434
2d7edb92 3435 /* Old method was to assume IPv4 packet by default if TSO was enabled.
868d5309 3436 * 82571 hardware supports TSO capabilities for IPv6 as well...
2d7edb92 3437 * no longer assume, we must. */
60828236 3438 if (likely(skb->protocol == htons(ETH_P_IP)))
2d7edb92
MC
3439 tx_flags |= E1000_TX_FLAGS_IPV4;
3440
581d708e
MC
3441 e1000_tx_queue(adapter, tx_ring, tx_flags,
3442 e1000_tx_map(adapter, tx_ring, skb, first,
3443 max_per_txd, nr_frags, mss));
1da177e4
LT
3444
3445 netdev->trans_start = jiffies;
3446
3447 /* Make sure there is space in the ring for the next send. */
65c7973f 3448 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
1da177e4 3449
581d708e 3450 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1da177e4
LT
3451 return NETDEV_TX_OK;
3452}
3453
3454/**
3455 * e1000_tx_timeout - Respond to a Tx Hang
3456 * @netdev: network interface device structure
3457 **/
3458
64798845 3459static void e1000_tx_timeout(struct net_device *netdev)
1da177e4 3460{
60490fe0 3461 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
3462
3463 /* Do the reset outside of interrupt context */
87041639
JK
3464 adapter->tx_timeout_count++;
3465 schedule_work(&adapter->reset_task);
1da177e4
LT
3466}
3467
64798845 3468static void e1000_reset_task(struct work_struct *work)
1da177e4 3469{
65f27f38
DH
3470 struct e1000_adapter *adapter =
3471 container_of(work, struct e1000_adapter, reset_task);
1da177e4 3472
2db10a08 3473 e1000_reinit_locked(adapter);
1da177e4
LT
3474}
3475
3476/**
3477 * e1000_get_stats - Get System Network Statistics
3478 * @netdev: network interface device structure
3479 *
3480 * Returns the address of the device statistics structure.
3481 * The statistics are actually updated from the timer callback.
3482 **/
3483
64798845 3484static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
1da177e4 3485{
60490fe0 3486 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4 3487
6b7660cd 3488 /* only return the current stats */
1da177e4
LT
3489 return &adapter->net_stats;
3490}
3491
3492/**
3493 * e1000_change_mtu - Change the Maximum Transfer Unit
3494 * @netdev: network interface device structure
3495 * @new_mtu: new value for maximum frame size
3496 *
3497 * Returns 0 on success, negative on failure
3498 **/
3499
64798845 3500static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
1da177e4 3501{
60490fe0 3502 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 3503 struct e1000_hw *hw = &adapter->hw;
1da177e4 3504 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
406874a7 3505 u16 eeprom_data = 0;
1da177e4 3506
96838a40
JB
3507 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3508 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3509 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
1da177e4 3510 return -EINVAL;
2d7edb92 3511 }
1da177e4 3512
997f5cbd 3513 /* Adapter-specific max frame size limits. */
1dc32918 3514 switch (hw->mac_type) {
9e2feace 3515 case e1000_undefined ... e1000_82542_rev2_1:
cd94dd0b 3516 case e1000_ich8lan:
997f5cbd
JK
3517 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3518 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
2d7edb92 3519 return -EINVAL;
2d7edb92 3520 }
997f5cbd 3521 break;
85b22eb6 3522 case e1000_82573:
249d71d6
BA
3523 /* Jumbo Frames not supported if:
3524 * - this is not an 82573L device
3525 * - ASPM is enabled in any way (0x1A bits 3:2) */
1dc32918 3526 e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
85b22eb6 3527 &eeprom_data);
1dc32918 3528 if ((hw->device_id != E1000_DEV_ID_82573L) ||
249d71d6 3529 (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
85b22eb6
JK
3530 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3531 DPRINTK(PROBE, ERR,
3532 "Jumbo Frames not supported.\n");
3533 return -EINVAL;
3534 }
3535 break;
3536 }
249d71d6
BA
3537 /* ERT will be enabled later to enable wire speed receives */
3538
85b22eb6 3539 /* fall through to get support */
997f5cbd
JK
3540 case e1000_82571:
3541 case e1000_82572:
87041639 3542 case e1000_80003es2lan:
997f5cbd
JK
3543#define MAX_STD_JUMBO_FRAME_SIZE 9234
3544 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3545 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
3546 return -EINVAL;
3547 }
3548 break;
3549 default:
3550 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3551 break;
1da177e4
LT
3552 }
3553
87f5032e 3554 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
9e2feace
AK
3555 * means we reserve 2 more, this pushes us to allocate from the next
3556 * larger slab size
3557 * i.e. RXBUFFER_2048 --> size-4096 slab */
3558
3559 if (max_frame <= E1000_RXBUFFER_256)
3560 adapter->rx_buffer_len = E1000_RXBUFFER_256;
3561 else if (max_frame <= E1000_RXBUFFER_512)
3562 adapter->rx_buffer_len = E1000_RXBUFFER_512;
3563 else if (max_frame <= E1000_RXBUFFER_1024)
3564 adapter->rx_buffer_len = E1000_RXBUFFER_1024;
3565 else if (max_frame <= E1000_RXBUFFER_2048)
3566 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3567 else if (max_frame <= E1000_RXBUFFER_4096)
3568 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
3569 else if (max_frame <= E1000_RXBUFFER_8192)
3570 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
3571 else if (max_frame <= E1000_RXBUFFER_16384)
3572 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3573
3574 /* adjust allocation if LPE protects us, and we aren't using SBP */
1dc32918 3575 if (!hw->tbi_compatibility_on &&
9e2feace
AK
3576 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
3577 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3578 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
997f5cbd 3579
2d7edb92 3580 netdev->mtu = new_mtu;
1dc32918 3581 hw->max_frame_size = max_frame;
2d7edb92 3582
2db10a08
AK
3583 if (netif_running(netdev))
3584 e1000_reinit_locked(adapter);
1da177e4 3585
1da177e4
LT
3586 return 0;
3587}
3588
3589/**
3590 * e1000_update_stats - Update the board statistics counters
3591 * @adapter: board private structure
3592 **/
3593
64798845 3594void e1000_update_stats(struct e1000_adapter *adapter)
1da177e4
LT
3595{
3596 struct e1000_hw *hw = &adapter->hw;
282f33c9 3597 struct pci_dev *pdev = adapter->pdev;
1da177e4 3598 unsigned long flags;
406874a7 3599 u16 phy_tmp;
1da177e4
LT
3600
3601#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3602
282f33c9
LV
3603 /*
3604 * Prevent stats update while adapter is being reset, or if the pci
3605 * connection is down.
3606 */
9026729b 3607 if (adapter->link_speed == 0)
282f33c9 3608 return;
81b1955e 3609 if (pci_channel_offline(pdev))
9026729b
AK
3610 return;
3611
1da177e4
LT
3612 spin_lock_irqsave(&adapter->stats_lock, flags);
3613
828d055f 3614 /* these counters are modified from e1000_tbi_adjust_stats,
1da177e4
LT
3615 * called from the interrupt context, so they must only
3616 * be written while holding adapter->stats_lock
3617 */
3618
1dc32918
JP
3619 adapter->stats.crcerrs += er32(CRCERRS);
3620 adapter->stats.gprc += er32(GPRC);
3621 adapter->stats.gorcl += er32(GORCL);
3622 adapter->stats.gorch += er32(GORCH);
3623 adapter->stats.bprc += er32(BPRC);
3624 adapter->stats.mprc += er32(MPRC);
3625 adapter->stats.roc += er32(ROC);
3626
3627 if (hw->mac_type != e1000_ich8lan) {
3628 adapter->stats.prc64 += er32(PRC64);
3629 adapter->stats.prc127 += er32(PRC127);
3630 adapter->stats.prc255 += er32(PRC255);
3631 adapter->stats.prc511 += er32(PRC511);
3632 adapter->stats.prc1023 += er32(PRC1023);
3633 adapter->stats.prc1522 += er32(PRC1522);
3634 }
3635
3636 adapter->stats.symerrs += er32(SYMERRS);
3637 adapter->stats.mpc += er32(MPC);
3638 adapter->stats.scc += er32(SCC);
3639 adapter->stats.ecol += er32(ECOL);
3640 adapter->stats.mcc += er32(MCC);
3641 adapter->stats.latecol += er32(LATECOL);
3642 adapter->stats.dc += er32(DC);
3643 adapter->stats.sec += er32(SEC);
3644 adapter->stats.rlec += er32(RLEC);
3645 adapter->stats.xonrxc += er32(XONRXC);
3646 adapter->stats.xontxc += er32(XONTXC);
3647 adapter->stats.xoffrxc += er32(XOFFRXC);
3648 adapter->stats.xofftxc += er32(XOFFTXC);
3649 adapter->stats.fcruc += er32(FCRUC);
3650 adapter->stats.gptc += er32(GPTC);
3651 adapter->stats.gotcl += er32(GOTCL);
3652 adapter->stats.gotch += er32(GOTCH);
3653 adapter->stats.rnbc += er32(RNBC);
3654 adapter->stats.ruc += er32(RUC);
3655 adapter->stats.rfc += er32(RFC);
3656 adapter->stats.rjc += er32(RJC);
3657 adapter->stats.torl += er32(TORL);
3658 adapter->stats.torh += er32(TORH);
3659 adapter->stats.totl += er32(TOTL);
3660 adapter->stats.toth += er32(TOTH);
3661 adapter->stats.tpr += er32(TPR);
3662
3663 if (hw->mac_type != e1000_ich8lan) {
3664 adapter->stats.ptc64 += er32(PTC64);
3665 adapter->stats.ptc127 += er32(PTC127);
3666 adapter->stats.ptc255 += er32(PTC255);
3667 adapter->stats.ptc511 += er32(PTC511);
3668 adapter->stats.ptc1023 += er32(PTC1023);
3669 adapter->stats.ptc1522 += er32(PTC1522);
3670 }
3671
3672 adapter->stats.mptc += er32(MPTC);
3673 adapter->stats.bptc += er32(BPTC);
1da177e4
LT
3674
3675 /* used for adaptive IFS */
3676
1dc32918 3677 hw->tx_packet_delta = er32(TPT);
1da177e4 3678 adapter->stats.tpt += hw->tx_packet_delta;
1dc32918 3679 hw->collision_delta = er32(COLC);
1da177e4
LT
3680 adapter->stats.colc += hw->collision_delta;
3681
96838a40 3682 if (hw->mac_type >= e1000_82543) {
1dc32918
JP
3683 adapter->stats.algnerrc += er32(ALGNERRC);
3684 adapter->stats.rxerrc += er32(RXERRC);
3685 adapter->stats.tncrs += er32(TNCRS);
3686 adapter->stats.cexterr += er32(CEXTERR);
3687 adapter->stats.tsctc += er32(TSCTC);
3688 adapter->stats.tsctfc += er32(TSCTFC);
1da177e4 3689 }
96838a40 3690 if (hw->mac_type > e1000_82547_rev_2) {
1dc32918
JP
3691 adapter->stats.iac += er32(IAC);
3692 adapter->stats.icrxoc += er32(ICRXOC);
3693
3694 if (hw->mac_type != e1000_ich8lan) {
3695 adapter->stats.icrxptc += er32(ICRXPTC);
3696 adapter->stats.icrxatc += er32(ICRXATC);
3697 adapter->stats.ictxptc += er32(ICTXPTC);
3698 adapter->stats.ictxatc += er32(ICTXATC);
3699 adapter->stats.ictxqec += er32(ICTXQEC);
3700 adapter->stats.ictxqmtc += er32(ICTXQMTC);
3701 adapter->stats.icrxdmtc += er32(ICRXDMTC);
cd94dd0b 3702 }
2d7edb92 3703 }
1da177e4
LT
3704
3705 /* Fill out the OS statistics structure */
1da177e4
LT
3706 adapter->net_stats.multicast = adapter->stats.mprc;
3707 adapter->net_stats.collisions = adapter->stats.colc;
3708
3709 /* Rx Errors */
3710
87041639
JK
3711 /* RLEC on some newer hardware can be incorrect so build
3712 * our own version based on RUC and ROC */
1da177e4
LT
3713 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3714 adapter->stats.crcerrs + adapter->stats.algnerrc +
87041639
JK
3715 adapter->stats.ruc + adapter->stats.roc +
3716 adapter->stats.cexterr;
49559854
MW
3717 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3718 adapter->net_stats.rx_length_errors = adapter->stats.rlerrc;
1da177e4
LT
3719 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3720 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
1da177e4
LT
3721 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3722
3723 /* Tx Errors */
49559854
MW
3724 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3725 adapter->net_stats.tx_errors = adapter->stats.txerrc;
1da177e4
LT
3726 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3727 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3728 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
1dc32918 3729 if (hw->bad_tx_carr_stats_fd &&
167fb284
JG
3730 adapter->link_duplex == FULL_DUPLEX) {
3731 adapter->net_stats.tx_carrier_errors = 0;
3732 adapter->stats.tncrs = 0;
3733 }
1da177e4
LT
3734
3735 /* Tx Dropped needs to be maintained elsewhere */
3736
3737 /* Phy Stats */
96838a40
JB
3738 if (hw->media_type == e1000_media_type_copper) {
3739 if ((adapter->link_speed == SPEED_1000) &&
1da177e4
LT
3740 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3741 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3742 adapter->phy_stats.idle_errors += phy_tmp;
3743 }
3744
96838a40 3745 if ((hw->mac_type <= e1000_82546) &&
1da177e4
LT
3746 (hw->phy_type == e1000_phy_m88) &&
3747 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3748 adapter->phy_stats.receive_errors += phy_tmp;
3749 }
3750
15e376b4 3751 /* Management Stats */
1dc32918
JP
3752 if (hw->has_smbus) {
3753 adapter->stats.mgptc += er32(MGTPTC);
3754 adapter->stats.mgprc += er32(MGTPRC);
3755 adapter->stats.mgpdc += er32(MGTPDC);
15e376b4
JG
3756 }
3757
1da177e4
LT
3758 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3759}
9ac98284
JB
3760
3761/**
3762 * e1000_intr_msi - Interrupt Handler
3763 * @irq: interrupt number
3764 * @data: pointer to a network interface device structure
3765 **/
3766
64798845 3767static irqreturn_t e1000_intr_msi(int irq, void *data)
9ac98284
JB
3768{
3769 struct net_device *netdev = data;
3770 struct e1000_adapter *adapter = netdev_priv(netdev);
3771 struct e1000_hw *hw = &adapter->hw;
3772#ifndef CONFIG_E1000_NAPI
3773 int i;
3774#endif
1dc32918 3775 u32 icr = er32(ICR);
9ac98284 3776
9150b76a
JB
3777 /* in NAPI mode read ICR disables interrupts using IAM */
3778
b5fc8f0c
JB
3779 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3780 hw->get_link_status = 1;
3781 /* 80003ES2LAN workaround-- For packet buffer work-around on
3782 * link down event; disable receives here in the ISR and reset
3783 * adapter in watchdog */
3784 if (netif_carrier_ok(netdev) &&
1dc32918 3785 (hw->mac_type == e1000_80003es2lan)) {
b5fc8f0c 3786 /* disable receives */
1dc32918
JP
3787 u32 rctl = er32(RCTL);
3788 ew32(RCTL, rctl & ~E1000_RCTL_EN);
9ac98284 3789 }
b5fc8f0c
JB
3790 /* guard against interrupt when we're going down */
3791 if (!test_bit(__E1000_DOWN, &adapter->flags))
3792 mod_timer(&adapter->watchdog_timer, jiffies + 1);
9ac98284
JB
3793 }
3794
3795#ifdef CONFIG_E1000_NAPI
bea3348e 3796 if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
835bb129
JB
3797 adapter->total_tx_bytes = 0;
3798 adapter->total_tx_packets = 0;
3799 adapter->total_rx_bytes = 0;
3800 adapter->total_rx_packets = 0;
bea3348e 3801 __netif_rx_schedule(netdev, &adapter->napi);
835bb129 3802 } else
9ac98284
JB
3803 e1000_irq_enable(adapter);
3804#else
835bb129
JB
3805 adapter->total_tx_bytes = 0;
3806 adapter->total_rx_bytes = 0;
3807 adapter->total_tx_packets = 0;
3808 adapter->total_rx_packets = 0;
3809
9ac98284
JB
3810 for (i = 0; i < E1000_MAX_INTR; i++)
3811 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
46fcc86d 3812 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
9ac98284 3813 break;
835bb129
JB
3814
3815 if (likely(adapter->itr_setting & 3))
3816 e1000_set_itr(adapter);
9ac98284
JB
3817#endif
3818
3819 return IRQ_HANDLED;
3820}
1da177e4
LT
3821
3822/**
3823 * e1000_intr - Interrupt Handler
3824 * @irq: interrupt number
3825 * @data: pointer to a network interface device structure
1da177e4
LT
3826 **/
3827
64798845 3828static irqreturn_t e1000_intr(int irq, void *data)
1da177e4
LT
3829{
3830 struct net_device *netdev = data;
60490fe0 3831 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4 3832 struct e1000_hw *hw = &adapter->hw;
1dc32918 3833 u32 rctl, icr = er32(ICR);
1e613fd9 3834#ifndef CONFIG_E1000_NAPI
581d708e 3835 int i;
835bb129
JB
3836#endif
3837 if (unlikely(!icr))
3838 return IRQ_NONE; /* Not our interrupt */
3839
3840#ifdef CONFIG_E1000_NAPI
3841 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3842 * not set, then the adapter didn't send an interrupt */
3843 if (unlikely(hw->mac_type >= e1000_82571 &&
3844 !(icr & E1000_ICR_INT_ASSERTED)))
3845 return IRQ_NONE;
3846
9150b76a
JB
3847 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3848 * need for the IMC write */
be2b28ed 3849#endif
1da177e4 3850
96838a40 3851 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
1da177e4 3852 hw->get_link_status = 1;
87041639
JK
3853 /* 80003ES2LAN workaround--
3854 * For packet buffer work-around on link down event;
3855 * disable receives here in the ISR and
3856 * reset adapter in watchdog
3857 */
3858 if (netif_carrier_ok(netdev) &&
1dc32918 3859 (hw->mac_type == e1000_80003es2lan)) {
87041639 3860 /* disable receives */
1dc32918
JP
3861 rctl = er32(RCTL);
3862 ew32(RCTL, rctl & ~E1000_RCTL_EN);
87041639 3863 }
1314bbf3
AK
3864 /* guard against interrupt when we're going down */
3865 if (!test_bit(__E1000_DOWN, &adapter->flags))
3866 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1da177e4
LT
3867 }
3868
3869#ifdef CONFIG_E1000_NAPI
1e613fd9 3870 if (unlikely(hw->mac_type < e1000_82571)) {
835bb129 3871 /* disable interrupts, without the synchronize_irq bit */
1dc32918
JP
3872 ew32(IMC, ~0);
3873 E1000_WRITE_FLUSH();
1e613fd9 3874 }
bea3348e 3875 if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
835bb129
JB
3876 adapter->total_tx_bytes = 0;
3877 adapter->total_tx_packets = 0;
3878 adapter->total_rx_bytes = 0;
3879 adapter->total_rx_packets = 0;
bea3348e 3880 __netif_rx_schedule(netdev, &adapter->napi);
835bb129 3881 } else
90fb5135
AK
3882 /* this really should not happen! if it does it is basically a
3883 * bug, but not a hard error, so enable ints and continue */
581d708e 3884 e1000_irq_enable(adapter);
c1605eb3 3885#else
1da177e4 3886 /* Writing IMC and IMS is needed for 82547.
96838a40
JB
3887 * Due to Hub Link bus being occupied, an interrupt
3888 * de-assertion message is not able to be sent.
3889 * When an interrupt assertion message is generated later,
3890 * two messages are re-ordered and sent out.
3891 * That causes APIC to think 82547 is in de-assertion
3892 * state, while 82547 is in assertion state, resulting
3893 * in dead lock. Writing IMC forces 82547 into
3894 * de-assertion state.
3895 */
9150b76a 3896 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
1dc32918 3897 ew32(IMC, ~0);
1da177e4 3898
835bb129
JB
3899 adapter->total_tx_bytes = 0;
3900 adapter->total_rx_bytes = 0;
3901 adapter->total_tx_packets = 0;
3902 adapter->total_rx_packets = 0;
3903
96838a40
JB
3904 for (i = 0; i < E1000_MAX_INTR; i++)
3905 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
46fcc86d 3906 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
1da177e4
LT
3907 break;
3908
835bb129
JB
3909 if (likely(adapter->itr_setting & 3))
3910 e1000_set_itr(adapter);
3911
96838a40 3912 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
1da177e4 3913 e1000_irq_enable(adapter);
581d708e 3914
c1605eb3 3915#endif
1da177e4
LT
3916 return IRQ_HANDLED;
3917}
3918
3919#ifdef CONFIG_E1000_NAPI
3920/**
3921 * e1000_clean - NAPI Rx polling callback
3922 * @adapter: board private structure
3923 **/
3924
64798845 3925static int e1000_clean(struct napi_struct *napi, int budget)
1da177e4 3926{
bea3348e
SH
3927 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3928 struct net_device *poll_dev = adapter->netdev;
d2c7ddd6 3929 int tx_cleaned = 0, work_done = 0;
581d708e
MC
3930
3931 /* Must NOT use netdev_priv macro here. */
3932 adapter = poll_dev->priv;
3933
d3d9e484
AK
3934 /* e1000_clean is called per-cpu. This lock protects
3935 * tx_ring[0] from being cleaned by multiple cpus
3936 * simultaneously. A failure obtaining the lock means
3937 * tx_ring[0] is currently being cleaned anyway. */
3938 if (spin_trylock(&adapter->tx_queue_lock)) {
d2c7ddd6
DM
3939 tx_cleaned = e1000_clean_tx_irq(adapter,
3940 &adapter->tx_ring[0]);
d3d9e484 3941 spin_unlock(&adapter->tx_queue_lock);
581d708e
MC
3942 }
3943
d3d9e484 3944 adapter->clean_rx(adapter, &adapter->rx_ring[0],
bea3348e 3945 &work_done, budget);
96838a40 3946
d2c7ddd6
DM
3947 if (tx_cleaned)
3948 work_done = budget;
3949
53e52c72
DM
3950 /* If budget not fully consumed, exit the polling mode */
3951 if (work_done < budget) {
835bb129
JB
3952 if (likely(adapter->itr_setting & 3))
3953 e1000_set_itr(adapter);
bea3348e 3954 netif_rx_complete(poll_dev, napi);
1da177e4 3955 e1000_irq_enable(adapter);
1da177e4
LT
3956 }
3957
bea3348e 3958 return work_done;
1da177e4
LT
3959}
3960
3961#endif
3962/**
3963 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3964 * @adapter: board private structure
3965 **/
3966
64798845
JP
3967static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3968 struct e1000_tx_ring *tx_ring)
1da177e4 3969{
1dc32918 3970 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
3971 struct net_device *netdev = adapter->netdev;
3972 struct e1000_tx_desc *tx_desc, *eop_desc;
3973 struct e1000_buffer *buffer_info;
3974 unsigned int i, eop;
2a1af5d7
JK
3975#ifdef CONFIG_E1000_NAPI
3976 unsigned int count = 0;
3977#endif
c3033b01 3978 bool cleaned = false;
835bb129 3979 unsigned int total_tx_bytes=0, total_tx_packets=0;
1da177e4
LT
3980
3981 i = tx_ring->next_to_clean;
3982 eop = tx_ring->buffer_info[i].next_to_watch;
3983 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3984
581d708e 3985 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
c3033b01 3986 for (cleaned = false; !cleaned; ) {
1da177e4
LT
3987 tx_desc = E1000_TX_DESC(*tx_ring, i);
3988 buffer_info = &tx_ring->buffer_info[i];
3989 cleaned = (i == eop);
3990
835bb129 3991 if (cleaned) {
2b65326e 3992 struct sk_buff *skb = buffer_info->skb;
7753b171
JB
3993 unsigned int segs, bytecount;
3994 segs = skb_shinfo(skb)->gso_segs ?: 1;
3995 /* multiply data chunks by size of headers */
3996 bytecount = ((segs - 1) * skb_headlen(skb)) +
3997 skb->len;
2b65326e 3998 total_tx_packets += segs;
7753b171 3999 total_tx_bytes += bytecount;
835bb129 4000 }
fd803241 4001 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
a9ebadd6 4002 tx_desc->upper.data = 0;
1da177e4 4003
96838a40 4004 if (unlikely(++i == tx_ring->count)) i = 0;
1da177e4 4005 }
581d708e 4006
1da177e4
LT
4007 eop = tx_ring->buffer_info[i].next_to_watch;
4008 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2a1af5d7
JK
4009#ifdef CONFIG_E1000_NAPI
4010#define E1000_TX_WEIGHT 64
4011 /* weight of a sort for tx, to avoid endless transmit cleanup */
46fcc86d 4012 if (count++ == E1000_TX_WEIGHT) break;
2a1af5d7 4013#endif
1da177e4
LT
4014 }
4015
4016 tx_ring->next_to_clean = i;
4017
77b2aad5 4018#define TX_WAKE_THRESHOLD 32
65c7973f
JB
4019 if (unlikely(cleaned && netif_carrier_ok(netdev) &&
4020 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
4021 /* Make sure that anybody stopping the queue after this
4022 * sees the new next_to_clean.
4023 */
4024 smp_mb();
fcfb1224 4025 if (netif_queue_stopped(netdev)) {
77b2aad5 4026 netif_wake_queue(netdev);
fcfb1224
JB
4027 ++adapter->restart_queue;
4028 }
77b2aad5 4029 }
2648345f 4030
581d708e 4031 if (adapter->detect_tx_hung) {
2648345f 4032 /* Detect a transmit hang in hardware, this serializes the
1da177e4 4033 * check with the clearing of time_stamp and movement of i */
c3033b01 4034 adapter->detect_tx_hung = false;
392137fa
JK
4035 if (tx_ring->buffer_info[eop].dma &&
4036 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
7e6c9861 4037 (adapter->tx_timeout_factor * HZ))
1dc32918 4038 && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
70b8f1e1
MC
4039
4040 /* detected Tx unit hang */
c6963ef5 4041 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
7bfa4816 4042 " Tx Queue <%lu>\n"
70b8f1e1
MC
4043 " TDH <%x>\n"
4044 " TDT <%x>\n"
4045 " next_to_use <%x>\n"
4046 " next_to_clean <%x>\n"
4047 "buffer_info[next_to_clean]\n"
70b8f1e1
MC
4048 " time_stamp <%lx>\n"
4049 " next_to_watch <%x>\n"
4050 " jiffies <%lx>\n"
4051 " next_to_watch.status <%x>\n",
7bfa4816
JK
4052 (unsigned long)((tx_ring - adapter->tx_ring) /
4053 sizeof(struct e1000_tx_ring)),
1dc32918
JP
4054 readl(hw->hw_addr + tx_ring->tdh),
4055 readl(hw->hw_addr + tx_ring->tdt),
70b8f1e1 4056 tx_ring->next_to_use,
392137fa
JK
4057 tx_ring->next_to_clean,
4058 tx_ring->buffer_info[eop].time_stamp,
70b8f1e1
MC
4059 eop,
4060 jiffies,
4061 eop_desc->upper.fields.status);
1da177e4 4062 netif_stop_queue(netdev);
70b8f1e1 4063 }
1da177e4 4064 }
835bb129
JB
4065 adapter->total_tx_bytes += total_tx_bytes;
4066 adapter->total_tx_packets += total_tx_packets;
ef90e4ec
AK
4067 adapter->net_stats.tx_bytes += total_tx_bytes;
4068 adapter->net_stats.tx_packets += total_tx_packets;
1da177e4
LT
4069 return cleaned;
4070}
4071
4072/**
4073 * e1000_rx_checksum - Receive Checksum Offload for 82543
2d7edb92
MC
4074 * @adapter: board private structure
4075 * @status_err: receive descriptor status and error fields
4076 * @csum: receive descriptor csum field
4077 * @sk_buff: socket buffer with received data
1da177e4
LT
4078 **/
4079
64798845
JP
4080static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
4081 u32 csum, struct sk_buff *skb)
1da177e4 4082{
1dc32918 4083 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
4084 u16 status = (u16)status_err;
4085 u8 errors = (u8)(status_err >> 24);
2d7edb92
MC
4086 skb->ip_summed = CHECKSUM_NONE;
4087
1da177e4 4088 /* 82543 or newer only */
1dc32918 4089 if (unlikely(hw->mac_type < e1000_82543)) return;
1da177e4 4090 /* Ignore Checksum bit is set */
96838a40 4091 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
2d7edb92 4092 /* TCP/UDP checksum error bit is set */
96838a40 4093 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
1da177e4 4094 /* let the stack verify checksum errors */
1da177e4 4095 adapter->hw_csum_err++;
2d7edb92
MC
4096 return;
4097 }
4098 /* TCP/UDP Checksum has not been calculated */
1dc32918 4099 if (hw->mac_type <= e1000_82547_rev_2) {
96838a40 4100 if (!(status & E1000_RXD_STAT_TCPCS))
2d7edb92 4101 return;
1da177e4 4102 } else {
96838a40 4103 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
2d7edb92
MC
4104 return;
4105 }
4106 /* It must be a TCP or UDP packet with a valid checksum */
4107 if (likely(status & E1000_RXD_STAT_TCPCS)) {
1da177e4
LT
4108 /* TCP checksum is good */
4109 skb->ip_summed = CHECKSUM_UNNECESSARY;
1dc32918 4110 } else if (hw->mac_type > e1000_82547_rev_2) {
2d7edb92
MC
4111 /* IP fragment with UDP payload */
4112 /* Hardware complements the payload checksum, so we undo it
4113 * and then put the value in host order for further stack use.
4114 */
3e18826c
AV
4115 __sum16 sum = (__force __sum16)htons(csum);
4116 skb->csum = csum_unfold(~sum);
84fa7933 4117 skb->ip_summed = CHECKSUM_COMPLETE;
1da177e4 4118 }
2d7edb92 4119 adapter->hw_csum_good++;
1da177e4
LT
4120}
4121
4122/**
2d7edb92 4123 * e1000_clean_rx_irq - Send received data up the network stack; legacy
1da177e4
LT
4124 * @adapter: board private structure
4125 **/
1da177e4 4126#ifdef CONFIG_E1000_NAPI
64798845
JP
4127static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4128 struct e1000_rx_ring *rx_ring,
4129 int *work_done, int work_to_do)
1da177e4 4130#else
64798845
JP
4131static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4132 struct e1000_rx_ring *rx_ring)
1da177e4
LT
4133#endif
4134{
1dc32918 4135 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
4136 struct net_device *netdev = adapter->netdev;
4137 struct pci_dev *pdev = adapter->pdev;
86c3d59f
JB
4138 struct e1000_rx_desc *rx_desc, *next_rxd;
4139 struct e1000_buffer *buffer_info, *next_buffer;
1da177e4 4140 unsigned long flags;
406874a7
JP
4141 u32 length;
4142 u8 last_byte;
1da177e4 4143 unsigned int i;
72d64a43 4144 int cleaned_count = 0;
c3033b01 4145 bool cleaned = false;
835bb129 4146 unsigned int total_rx_bytes=0, total_rx_packets=0;
1da177e4
LT
4147
4148 i = rx_ring->next_to_clean;
4149 rx_desc = E1000_RX_DESC(*rx_ring, i);
b92ff8ee 4150 buffer_info = &rx_ring->buffer_info[i];
1da177e4 4151
b92ff8ee 4152 while (rx_desc->status & E1000_RXD_STAT_DD) {
24f476ee 4153 struct sk_buff *skb;
a292ca6e 4154 u8 status;
90fb5135 4155
1da177e4 4156#ifdef CONFIG_E1000_NAPI
96838a40 4157 if (*work_done >= work_to_do)
1da177e4
LT
4158 break;
4159 (*work_done)++;
4160#endif
a292ca6e 4161 status = rx_desc->status;
b92ff8ee 4162 skb = buffer_info->skb;
86c3d59f
JB
4163 buffer_info->skb = NULL;
4164
30320be8
JK
4165 prefetch(skb->data - NET_IP_ALIGN);
4166
86c3d59f
JB
4167 if (++i == rx_ring->count) i = 0;
4168 next_rxd = E1000_RX_DESC(*rx_ring, i);
30320be8
JK
4169 prefetch(next_rxd);
4170
86c3d59f 4171 next_buffer = &rx_ring->buffer_info[i];
86c3d59f 4172
c3033b01 4173 cleaned = true;
72d64a43 4174 cleaned_count++;
a292ca6e
JK
4175 pci_unmap_single(pdev,
4176 buffer_info->dma,
4177 buffer_info->length,
1da177e4
LT
4178 PCI_DMA_FROMDEVICE);
4179
1da177e4
LT
4180 length = le16_to_cpu(rx_desc->length);
4181
a1415ee6
JK
4182 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
4183 /* All receives must fit into a single buffer */
4184 E1000_DBG("%s: Receive packet consumed multiple"
4185 " buffers\n", netdev->name);
864c4e45 4186 /* recycle */
8fc897b0 4187 buffer_info->skb = skb;
1da177e4
LT
4188 goto next_desc;
4189 }
4190
96838a40 4191 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
1da177e4 4192 last_byte = *(skb->data + length - 1);
1dc32918
JP
4193 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4194 last_byte)) {
1da177e4 4195 spin_lock_irqsave(&adapter->stats_lock, flags);
1dc32918 4196 e1000_tbi_adjust_stats(hw, &adapter->stats,
1da177e4
LT
4197 length, skb->data);
4198 spin_unlock_irqrestore(&adapter->stats_lock,
4199 flags);
4200 length--;
4201 } else {
9e2feace
AK
4202 /* recycle */
4203 buffer_info->skb = skb;
1da177e4
LT
4204 goto next_desc;
4205 }
1cb5821f 4206 }
1da177e4 4207
d2a1e213
JB
4208 /* adjust length to remove Ethernet CRC, this must be
4209 * done after the TBI_ACCEPT workaround above */
4210 length -= 4;
4211
835bb129
JB
4212 /* probably a little skewed due to removing CRC */
4213 total_rx_bytes += length;
4214 total_rx_packets++;
4215
a292ca6e
JK
4216 /* code added for copybreak, this should improve
4217 * performance for small packets with large amounts
4218 * of reassembly being done in the stack */
1f753861 4219 if (length < copybreak) {
a292ca6e 4220 struct sk_buff *new_skb =
87f5032e 4221 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
a292ca6e
JK
4222 if (new_skb) {
4223 skb_reserve(new_skb, NET_IP_ALIGN);
27d7ff46
ACM
4224 skb_copy_to_linear_data_offset(new_skb,
4225 -NET_IP_ALIGN,
4226 (skb->data -
4227 NET_IP_ALIGN),
4228 (length +
4229 NET_IP_ALIGN));
a292ca6e
JK
4230 /* save the skb in buffer_info as good */
4231 buffer_info->skb = skb;
4232 skb = new_skb;
a292ca6e 4233 }
996695de
AK
4234 /* else just continue with the old one */
4235 }
a292ca6e 4236 /* end copybreak code */
996695de 4237 skb_put(skb, length);
1da177e4
LT
4238
4239 /* Receive Checksum Offload */
a292ca6e 4240 e1000_rx_checksum(adapter,
406874a7
JP
4241 (u32)(status) |
4242 ((u32)(rx_desc->errors) << 24),
c3d7a3a4 4243 le16_to_cpu(rx_desc->csum), skb);
96838a40 4244
1da177e4
LT
4245 skb->protocol = eth_type_trans(skb, netdev);
4246#ifdef CONFIG_E1000_NAPI
96838a40 4247 if (unlikely(adapter->vlgrp &&
a292ca6e 4248 (status & E1000_RXD_STAT_VP))) {
1da177e4 4249 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
38b22195 4250 le16_to_cpu(rx_desc->special));
1da177e4
LT
4251 } else {
4252 netif_receive_skb(skb);
4253 }
4254#else /* CONFIG_E1000_NAPI */
96838a40 4255 if (unlikely(adapter->vlgrp &&
b92ff8ee 4256 (status & E1000_RXD_STAT_VP))) {
1da177e4 4257 vlan_hwaccel_rx(skb, adapter->vlgrp,
38b22195 4258 le16_to_cpu(rx_desc->special));
1da177e4
LT
4259 } else {
4260 netif_rx(skb);
4261 }
4262#endif /* CONFIG_E1000_NAPI */
4263 netdev->last_rx = jiffies;
4264
4265next_desc:
4266 rx_desc->status = 0;
1da177e4 4267
72d64a43
JK
4268 /* return some buffers to hardware, one at a time is too slow */
4269 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4270 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4271 cleaned_count = 0;
4272 }
4273
30320be8 4274 /* use prefetched values */
86c3d59f
JB
4275 rx_desc = next_rxd;
4276 buffer_info = next_buffer;
1da177e4 4277 }
1da177e4 4278 rx_ring->next_to_clean = i;
72d64a43
JK
4279
4280 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4281 if (cleaned_count)
4282 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
2d7edb92 4283
835bb129
JB
4284 adapter->total_rx_packets += total_rx_packets;
4285 adapter->total_rx_bytes += total_rx_bytes;
ef90e4ec
AK
4286 adapter->net_stats.rx_bytes += total_rx_bytes;
4287 adapter->net_stats.rx_packets += total_rx_packets;
2d7edb92
MC
4288 return cleaned;
4289}
4290
4291/**
4292 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
4293 * @adapter: board private structure
4294 **/
4295
2d7edb92 4296#ifdef CONFIG_E1000_NAPI
64798845
JP
4297static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4298 struct e1000_rx_ring *rx_ring,
4299 int *work_done, int work_to_do)
2d7edb92 4300#else
64798845
JP
4301static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4302 struct e1000_rx_ring *rx_ring)
2d7edb92
MC
4303#endif
4304{
86c3d59f 4305 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
2d7edb92
MC
4306 struct net_device *netdev = adapter->netdev;
4307 struct pci_dev *pdev = adapter->pdev;
86c3d59f 4308 struct e1000_buffer *buffer_info, *next_buffer;
2d7edb92
MC
4309 struct e1000_ps_page *ps_page;
4310 struct e1000_ps_page_dma *ps_page_dma;
24f476ee 4311 struct sk_buff *skb;
2d7edb92 4312 unsigned int i, j;
406874a7 4313 u32 length, staterr;
72d64a43 4314 int cleaned_count = 0;
c3033b01 4315 bool cleaned = false;
835bb129 4316 unsigned int total_rx_bytes=0, total_rx_packets=0;
2d7edb92
MC
4317
4318 i = rx_ring->next_to_clean;
4319 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
683a38f3 4320 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
9e2feace 4321 buffer_info = &rx_ring->buffer_info[i];
2d7edb92 4322
96838a40 4323 while (staterr & E1000_RXD_STAT_DD) {
2d7edb92
MC
4324 ps_page = &rx_ring->ps_page[i];
4325 ps_page_dma = &rx_ring->ps_page_dma[i];
4326#ifdef CONFIG_E1000_NAPI
96838a40 4327 if (unlikely(*work_done >= work_to_do))
2d7edb92
MC
4328 break;
4329 (*work_done)++;
4330#endif
86c3d59f
JB
4331 skb = buffer_info->skb;
4332
30320be8
JK
4333 /* in the packet split case this is header only */
4334 prefetch(skb->data - NET_IP_ALIGN);
4335
86c3d59f
JB
4336 if (++i == rx_ring->count) i = 0;
4337 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
30320be8
JK
4338 prefetch(next_rxd);
4339
86c3d59f 4340 next_buffer = &rx_ring->buffer_info[i];
86c3d59f 4341
c3033b01 4342 cleaned = true;
72d64a43 4343 cleaned_count++;
2d7edb92
MC
4344 pci_unmap_single(pdev, buffer_info->dma,
4345 buffer_info->length,
4346 PCI_DMA_FROMDEVICE);
4347
96838a40 4348 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
2d7edb92
MC
4349 E1000_DBG("%s: Packet Split buffers didn't pick up"
4350 " the full packet\n", netdev->name);
4351 dev_kfree_skb_irq(skb);
4352 goto next_desc;
4353 }
1da177e4 4354
96838a40 4355 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
2d7edb92
MC
4356 dev_kfree_skb_irq(skb);
4357 goto next_desc;
4358 }
4359
4360 length = le16_to_cpu(rx_desc->wb.middle.length0);
4361
96838a40 4362 if (unlikely(!length)) {
2d7edb92
MC
4363 E1000_DBG("%s: Last part of the packet spanning"
4364 " multiple descriptors\n", netdev->name);
4365 dev_kfree_skb_irq(skb);
4366 goto next_desc;
4367 }
4368
4369 /* Good Receive */
4370 skb_put(skb, length);
4371
dc7c6add
JK
4372 {
4373 /* this looks ugly, but it seems compiler issues make it
4374 more efficient than reusing j */
4375 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
4376
4377 /* page alloc/put takes too long and effects small packet
4378 * throughput, so unsplit small packets and save the alloc/put*/
1f753861 4379 if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) {
dc7c6add 4380 u8 *vaddr;
76c224bc 4381 /* there is no documentation about how to call
dc7c6add
JK
4382 * kmap_atomic, so we can't hold the mapping
4383 * very long */
4384 pci_dma_sync_single_for_cpu(pdev,
4385 ps_page_dma->ps_page_dma[0],
4386 PAGE_SIZE,
4387 PCI_DMA_FROMDEVICE);
4388 vaddr = kmap_atomic(ps_page->ps_page[0],
4389 KM_SKB_DATA_SOFTIRQ);
27a884dc 4390 memcpy(skb_tail_pointer(skb), vaddr, l1);
dc7c6add
JK
4391 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
4392 pci_dma_sync_single_for_device(pdev,
4393 ps_page_dma->ps_page_dma[0],
4394 PAGE_SIZE, PCI_DMA_FROMDEVICE);
f235a2ab
AK
4395 /* remove the CRC */
4396 l1 -= 4;
dc7c6add 4397 skb_put(skb, l1);
dc7c6add
JK
4398 goto copydone;
4399 } /* if */
4400 }
90fb5135 4401
96838a40 4402 for (j = 0; j < adapter->rx_ps_pages; j++) {
30320be8 4403 if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
2d7edb92 4404 break;
2d7edb92
MC
4405 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
4406 PAGE_SIZE, PCI_DMA_FROMDEVICE);
4407 ps_page_dma->ps_page_dma[j] = 0;
329bfd0b
JK
4408 skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
4409 length);
2d7edb92 4410 ps_page->ps_page[j] = NULL;
2d7edb92
MC
4411 skb->len += length;
4412 skb->data_len += length;
5d51b80f 4413 skb->truesize += length;
2d7edb92
MC
4414 }
4415
f235a2ab
AK
4416 /* strip the ethernet crc, problem is we're using pages now so
4417 * this whole operation can get a little cpu intensive */
4418 pskb_trim(skb, skb->len - 4);
4419
dc7c6add 4420copydone:
835bb129
JB
4421 total_rx_bytes += skb->len;
4422 total_rx_packets++;
4423
2d7edb92 4424 e1000_rx_checksum(adapter, staterr,
c3d7a3a4 4425 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
2d7edb92
MC
4426 skb->protocol = eth_type_trans(skb, netdev);
4427
96838a40 4428 if (likely(rx_desc->wb.upper.header_status &
c3d7a3a4 4429 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
e4c811c9 4430 adapter->rx_hdr_split++;
2d7edb92 4431#ifdef CONFIG_E1000_NAPI
96838a40 4432 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
2d7edb92 4433 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
38b22195 4434 le16_to_cpu(rx_desc->wb.middle.vlan));
2d7edb92
MC
4435 } else {
4436 netif_receive_skb(skb);
4437 }
4438#else /* CONFIG_E1000_NAPI */
96838a40 4439 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
2d7edb92 4440 vlan_hwaccel_rx(skb, adapter->vlgrp,
38b22195 4441 le16_to_cpu(rx_desc->wb.middle.vlan));
2d7edb92
MC
4442 } else {
4443 netif_rx(skb);
4444 }
4445#endif /* CONFIG_E1000_NAPI */
4446 netdev->last_rx = jiffies;
4447
4448next_desc:
c3d7a3a4 4449 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
2d7edb92 4450 buffer_info->skb = NULL;
2d7edb92 4451
72d64a43
JK
4452 /* return some buffers to hardware, one at a time is too slow */
4453 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4454 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4455 cleaned_count = 0;
4456 }
4457
30320be8 4458 /* use prefetched values */
86c3d59f
JB
4459 rx_desc = next_rxd;
4460 buffer_info = next_buffer;
4461
683a38f3 4462 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
2d7edb92
MC
4463 }
4464 rx_ring->next_to_clean = i;
72d64a43
JK
4465
4466 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4467 if (cleaned_count)
4468 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
1da177e4 4469
835bb129
JB
4470 adapter->total_rx_packets += total_rx_packets;
4471 adapter->total_rx_bytes += total_rx_bytes;
ef90e4ec
AK
4472 adapter->net_stats.rx_bytes += total_rx_bytes;
4473 adapter->net_stats.rx_packets += total_rx_packets;
1da177e4
LT
4474 return cleaned;
4475}
4476
4477/**
2d7edb92 4478 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1da177e4
LT
4479 * @adapter: address of board private structure
4480 **/
4481
64798845
JP
4482static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4483 struct e1000_rx_ring *rx_ring,
4484 int cleaned_count)
1da177e4 4485{
1dc32918 4486 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
4487 struct net_device *netdev = adapter->netdev;
4488 struct pci_dev *pdev = adapter->pdev;
4489 struct e1000_rx_desc *rx_desc;
4490 struct e1000_buffer *buffer_info;
4491 struct sk_buff *skb;
2648345f
MC
4492 unsigned int i;
4493 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1da177e4
LT
4494
4495 i = rx_ring->next_to_use;
4496 buffer_info = &rx_ring->buffer_info[i];
4497
a292ca6e 4498 while (cleaned_count--) {
ca6f7224
CH
4499 skb = buffer_info->skb;
4500 if (skb) {
a292ca6e
JK
4501 skb_trim(skb, 0);
4502 goto map_skb;
4503 }
4504
ca6f7224 4505 skb = netdev_alloc_skb(netdev, bufsz);
96838a40 4506 if (unlikely(!skb)) {
1da177e4 4507 /* Better luck next round */
72d64a43 4508 adapter->alloc_rx_buff_failed++;
1da177e4
LT
4509 break;
4510 }
4511
2648345f 4512 /* Fix for errata 23, can't cross 64kB boundary */
1da177e4
LT
4513 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4514 struct sk_buff *oldskb = skb;
2648345f
MC
4515 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
4516 "at %p\n", bufsz, skb->data);
4517 /* Try again, without freeing the previous */
87f5032e 4518 skb = netdev_alloc_skb(netdev, bufsz);
2648345f 4519 /* Failed allocation, critical failure */
1da177e4
LT
4520 if (!skb) {
4521 dev_kfree_skb(oldskb);
4522 break;
4523 }
2648345f 4524
1da177e4
LT
4525 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4526 /* give up */
4527 dev_kfree_skb(skb);
4528 dev_kfree_skb(oldskb);
4529 break; /* while !buffer_info->skb */
1da177e4 4530 }
ca6f7224
CH
4531
4532 /* Use new allocation */
4533 dev_kfree_skb(oldskb);
1da177e4 4534 }
1da177e4
LT
4535 /* Make buffer alignment 2 beyond a 16 byte boundary
4536 * this will result in a 16 byte aligned IP header after
4537 * the 14 byte MAC header is removed
4538 */
4539 skb_reserve(skb, NET_IP_ALIGN);
4540
1da177e4
LT
4541 buffer_info->skb = skb;
4542 buffer_info->length = adapter->rx_buffer_len;
a292ca6e 4543map_skb:
1da177e4
LT
4544 buffer_info->dma = pci_map_single(pdev,
4545 skb->data,
4546 adapter->rx_buffer_len,
4547 PCI_DMA_FROMDEVICE);
4548
2648345f
MC
4549 /* Fix for errata 23, can't cross 64kB boundary */
4550 if (!e1000_check_64k_bound(adapter,
4551 (void *)(unsigned long)buffer_info->dma,
4552 adapter->rx_buffer_len)) {
4553 DPRINTK(RX_ERR, ERR,
4554 "dma align check failed: %u bytes at %p\n",
4555 adapter->rx_buffer_len,
4556 (void *)(unsigned long)buffer_info->dma);
1da177e4
LT
4557 dev_kfree_skb(skb);
4558 buffer_info->skb = NULL;
4559
2648345f 4560 pci_unmap_single(pdev, buffer_info->dma,
1da177e4
LT
4561 adapter->rx_buffer_len,
4562 PCI_DMA_FROMDEVICE);
4563
4564 break; /* while !buffer_info->skb */
4565 }
1da177e4
LT
4566 rx_desc = E1000_RX_DESC(*rx_ring, i);
4567 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4568
96838a40
JB
4569 if (unlikely(++i == rx_ring->count))
4570 i = 0;
1da177e4
LT
4571 buffer_info = &rx_ring->buffer_info[i];
4572 }
4573
b92ff8ee
JB
4574 if (likely(rx_ring->next_to_use != i)) {
4575 rx_ring->next_to_use = i;
4576 if (unlikely(i-- == 0))
4577 i = (rx_ring->count - 1);
4578
4579 /* Force memory writes to complete before letting h/w
4580 * know there are new descriptors to fetch. (Only
4581 * applicable for weak-ordered memory model archs,
4582 * such as IA-64). */
4583 wmb();
1dc32918 4584 writel(i, hw->hw_addr + rx_ring->rdt);
b92ff8ee 4585 }
1da177e4
LT
4586}
4587
2d7edb92
MC
4588/**
4589 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
4590 * @adapter: address of board private structure
4591 **/
4592
64798845
JP
4593static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4594 struct e1000_rx_ring *rx_ring,
4595 int cleaned_count)
2d7edb92 4596{
1dc32918 4597 struct e1000_hw *hw = &adapter->hw;
2d7edb92
MC
4598 struct net_device *netdev = adapter->netdev;
4599 struct pci_dev *pdev = adapter->pdev;
4600 union e1000_rx_desc_packet_split *rx_desc;
4601 struct e1000_buffer *buffer_info;
4602 struct e1000_ps_page *ps_page;
4603 struct e1000_ps_page_dma *ps_page_dma;
4604 struct sk_buff *skb;
4605 unsigned int i, j;
4606
4607 i = rx_ring->next_to_use;
4608 buffer_info = &rx_ring->buffer_info[i];
4609 ps_page = &rx_ring->ps_page[i];
4610 ps_page_dma = &rx_ring->ps_page_dma[i];
4611
72d64a43 4612 while (cleaned_count--) {
2d7edb92
MC
4613 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4614
96838a40 4615 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
e4c811c9
MC
4616 if (j < adapter->rx_ps_pages) {
4617 if (likely(!ps_page->ps_page[j])) {
4618 ps_page->ps_page[j] =
4619 alloc_page(GFP_ATOMIC);
b92ff8ee
JB
4620 if (unlikely(!ps_page->ps_page[j])) {
4621 adapter->alloc_rx_buff_failed++;
e4c811c9 4622 goto no_buffers;
b92ff8ee 4623 }
e4c811c9
MC
4624 ps_page_dma->ps_page_dma[j] =
4625 pci_map_page(pdev,
4626 ps_page->ps_page[j],
4627 0, PAGE_SIZE,
4628 PCI_DMA_FROMDEVICE);
4629 }
4630 /* Refresh the desc even if buffer_addrs didn't
96838a40 4631 * change because each write-back erases
e4c811c9
MC
4632 * this info.
4633 */
4634 rx_desc->read.buffer_addr[j+1] =
4635 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
4636 } else
3e18826c 4637 rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
2d7edb92
MC
4638 }
4639
87f5032e 4640 skb = netdev_alloc_skb(netdev,
90fb5135 4641 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
2d7edb92 4642
b92ff8ee
JB
4643 if (unlikely(!skb)) {
4644 adapter->alloc_rx_buff_failed++;
2d7edb92 4645 break;
b92ff8ee 4646 }
2d7edb92
MC
4647
4648 /* Make buffer alignment 2 beyond a 16 byte boundary
4649 * this will result in a 16 byte aligned IP header after
4650 * the 14 byte MAC header is removed
4651 */
4652 skb_reserve(skb, NET_IP_ALIGN);
4653
2d7edb92
MC
4654 buffer_info->skb = skb;
4655 buffer_info->length = adapter->rx_ps_bsize0;
4656 buffer_info->dma = pci_map_single(pdev, skb->data,
4657 adapter->rx_ps_bsize0,
4658 PCI_DMA_FROMDEVICE);
4659
4660 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
4661
96838a40 4662 if (unlikely(++i == rx_ring->count)) i = 0;
2d7edb92
MC
4663 buffer_info = &rx_ring->buffer_info[i];
4664 ps_page = &rx_ring->ps_page[i];
4665 ps_page_dma = &rx_ring->ps_page_dma[i];
4666 }
4667
4668no_buffers:
b92ff8ee
JB
4669 if (likely(rx_ring->next_to_use != i)) {
4670 rx_ring->next_to_use = i;
4671 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4672
4673 /* Force memory writes to complete before letting h/w
4674 * know there are new descriptors to fetch. (Only
4675 * applicable for weak-ordered memory model archs,
4676 * such as IA-64). */
4677 wmb();
4678 /* Hardware increments by 16 bytes, but packet split
4679 * descriptors are 32 bytes...so we increment tail
4680 * twice as much.
4681 */
1dc32918 4682 writel(i<<1, hw->hw_addr + rx_ring->rdt);
b92ff8ee 4683 }
2d7edb92
MC
4684}
4685
1da177e4
LT
4686/**
4687 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4688 * @adapter:
4689 **/
4690
64798845 4691static void e1000_smartspeed(struct e1000_adapter *adapter)
1da177e4 4692{
1dc32918 4693 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
4694 u16 phy_status;
4695 u16 phy_ctrl;
1da177e4 4696
1dc32918
JP
4697 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4698 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
1da177e4
LT
4699 return;
4700
96838a40 4701 if (adapter->smartspeed == 0) {
1da177e4
LT
4702 /* If Master/Slave config fault is asserted twice,
4703 * we assume back-to-back */
1dc32918 4704 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
96838a40 4705 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
1dc32918 4706 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
96838a40 4707 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
1dc32918 4708 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
96838a40 4709 if (phy_ctrl & CR_1000T_MS_ENABLE) {
1da177e4 4710 phy_ctrl &= ~CR_1000T_MS_ENABLE;
1dc32918 4711 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
1da177e4
LT
4712 phy_ctrl);
4713 adapter->smartspeed++;
1dc32918
JP
4714 if (!e1000_phy_setup_autoneg(hw) &&
4715 !e1000_read_phy_reg(hw, PHY_CTRL,
1da177e4
LT
4716 &phy_ctrl)) {
4717 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4718 MII_CR_RESTART_AUTO_NEG);
1dc32918 4719 e1000_write_phy_reg(hw, PHY_CTRL,
1da177e4
LT
4720 phy_ctrl);
4721 }
4722 }
4723 return;
96838a40 4724 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
1da177e4 4725 /* If still no link, perhaps using 2/3 pair cable */
1dc32918 4726 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
1da177e4 4727 phy_ctrl |= CR_1000T_MS_ENABLE;
1dc32918
JP
4728 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4729 if (!e1000_phy_setup_autoneg(hw) &&
4730 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
1da177e4
LT
4731 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4732 MII_CR_RESTART_AUTO_NEG);
1dc32918 4733 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
1da177e4
LT
4734 }
4735 }
4736 /* Restart process after E1000_SMARTSPEED_MAX iterations */
96838a40 4737 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
1da177e4
LT
4738 adapter->smartspeed = 0;
4739}
4740
4741/**
4742 * e1000_ioctl -
4743 * @netdev:
4744 * @ifreq:
4745 * @cmd:
4746 **/
4747
64798845 4748static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1da177e4
LT
4749{
4750 switch (cmd) {
4751 case SIOCGMIIPHY:
4752 case SIOCGMIIREG:
4753 case SIOCSMIIREG:
4754 return e1000_mii_ioctl(netdev, ifr, cmd);
4755 default:
4756 return -EOPNOTSUPP;
4757 }
4758}
4759
4760/**
4761 * e1000_mii_ioctl -
4762 * @netdev:
4763 * @ifreq:
4764 * @cmd:
4765 **/
4766
64798845
JP
4767static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4768 int cmd)
1da177e4 4769{
60490fe0 4770 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4771 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
4772 struct mii_ioctl_data *data = if_mii(ifr);
4773 int retval;
406874a7
JP
4774 u16 mii_reg;
4775 u16 spddplx;
97876fc6 4776 unsigned long flags;
1da177e4 4777
1dc32918 4778 if (hw->media_type != e1000_media_type_copper)
1da177e4
LT
4779 return -EOPNOTSUPP;
4780
4781 switch (cmd) {
4782 case SIOCGMIIPHY:
1dc32918 4783 data->phy_id = hw->phy_addr;
1da177e4
LT
4784 break;
4785 case SIOCGMIIREG:
96838a40 4786 if (!capable(CAP_NET_ADMIN))
1da177e4 4787 return -EPERM;
97876fc6 4788 spin_lock_irqsave(&adapter->stats_lock, flags);
1dc32918 4789 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
97876fc6
MC
4790 &data->val_out)) {
4791 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1da177e4 4792 return -EIO;
97876fc6
MC
4793 }
4794 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1da177e4
LT
4795 break;
4796 case SIOCSMIIREG:
96838a40 4797 if (!capable(CAP_NET_ADMIN))
1da177e4 4798 return -EPERM;
96838a40 4799 if (data->reg_num & ~(0x1F))
1da177e4
LT
4800 return -EFAULT;
4801 mii_reg = data->val_in;
97876fc6 4802 spin_lock_irqsave(&adapter->stats_lock, flags);
1dc32918 4803 if (e1000_write_phy_reg(hw, data->reg_num,
97876fc6
MC
4804 mii_reg)) {
4805 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1da177e4 4806 return -EIO;
97876fc6 4807 }
f0163ac4 4808 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1dc32918 4809 if (hw->media_type == e1000_media_type_copper) {
1da177e4
LT
4810 switch (data->reg_num) {
4811 case PHY_CTRL:
96838a40 4812 if (mii_reg & MII_CR_POWER_DOWN)
1da177e4 4813 break;
96838a40 4814 if (mii_reg & MII_CR_AUTO_NEG_EN) {
1dc32918
JP
4815 hw->autoneg = 1;
4816 hw->autoneg_advertised = 0x2F;
1da177e4
LT
4817 } else {
4818 if (mii_reg & 0x40)
4819 spddplx = SPEED_1000;
4820 else if (mii_reg & 0x2000)
4821 spddplx = SPEED_100;
4822 else
4823 spddplx = SPEED_10;
4824 spddplx += (mii_reg & 0x100)
cb764326
JK
4825 ? DUPLEX_FULL :
4826 DUPLEX_HALF;
1da177e4
LT
4827 retval = e1000_set_spd_dplx(adapter,
4828 spddplx);
f0163ac4 4829 if (retval)
1da177e4
LT
4830 return retval;
4831 }
2db10a08
AK
4832 if (netif_running(adapter->netdev))
4833 e1000_reinit_locked(adapter);
4834 else
1da177e4
LT
4835 e1000_reset(adapter);
4836 break;
4837 case M88E1000_PHY_SPEC_CTRL:
4838 case M88E1000_EXT_PHY_SPEC_CTRL:
1dc32918 4839 if (e1000_phy_reset(hw))
1da177e4
LT
4840 return -EIO;
4841 break;
4842 }
4843 } else {
4844 switch (data->reg_num) {
4845 case PHY_CTRL:
96838a40 4846 if (mii_reg & MII_CR_POWER_DOWN)
1da177e4 4847 break;
2db10a08
AK
4848 if (netif_running(adapter->netdev))
4849 e1000_reinit_locked(adapter);
4850 else
1da177e4
LT
4851 e1000_reset(adapter);
4852 break;
4853 }
4854 }
4855 break;
4856 default:
4857 return -EOPNOTSUPP;
4858 }
4859 return E1000_SUCCESS;
4860}
4861
64798845 4862void e1000_pci_set_mwi(struct e1000_hw *hw)
1da177e4
LT
4863{
4864 struct e1000_adapter *adapter = hw->back;
2648345f 4865 int ret_val = pci_set_mwi(adapter->pdev);
1da177e4 4866
96838a40 4867 if (ret_val)
2648345f 4868 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
1da177e4
LT
4869}
4870
64798845 4871void e1000_pci_clear_mwi(struct e1000_hw *hw)
1da177e4
LT
4872{
4873 struct e1000_adapter *adapter = hw->back;
4874
4875 pci_clear_mwi(adapter->pdev);
4876}
4877
64798845 4878int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
007755eb
PO
4879{
4880 struct e1000_adapter *adapter = hw->back;
4881 return pcix_get_mmrbc(adapter->pdev);
4882}
4883
64798845 4884void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
007755eb
PO
4885{
4886 struct e1000_adapter *adapter = hw->back;
4887 pcix_set_mmrbc(adapter->pdev, mmrbc);
4888}
4889
64798845 4890s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
caeccb68
JK
4891{
4892 struct e1000_adapter *adapter = hw->back;
406874a7 4893 u16 cap_offset;
caeccb68
JK
4894
4895 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
4896 if (!cap_offset)
4897 return -E1000_ERR_CONFIG;
4898
4899 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
4900
4901 return E1000_SUCCESS;
4902}
4903
64798845 4904void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
1da177e4
LT
4905{
4906 outl(value, port);
4907}
4908
64798845
JP
4909static void e1000_vlan_rx_register(struct net_device *netdev,
4910 struct vlan_group *grp)
1da177e4 4911{
60490fe0 4912 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4913 struct e1000_hw *hw = &adapter->hw;
406874a7 4914 u32 ctrl, rctl;
1da177e4 4915
9150b76a
JB
4916 if (!test_bit(__E1000_DOWN, &adapter->flags))
4917 e1000_irq_disable(adapter);
1da177e4
LT
4918 adapter->vlgrp = grp;
4919
96838a40 4920 if (grp) {
1da177e4 4921 /* enable VLAN tag insert/strip */
1dc32918 4922 ctrl = er32(CTRL);
1da177e4 4923 ctrl |= E1000_CTRL_VME;
1dc32918 4924 ew32(CTRL, ctrl);
1da177e4 4925
cd94dd0b 4926 if (adapter->hw.mac_type != e1000_ich8lan) {
90fb5135 4927 /* enable VLAN receive filtering */
1dc32918 4928 rctl = er32(RCTL);
90fb5135 4929 rctl &= ~E1000_RCTL_CFIEN;
1dc32918 4930 ew32(RCTL, rctl);
90fb5135 4931 e1000_update_mng_vlan(adapter);
cd94dd0b 4932 }
1da177e4
LT
4933 } else {
4934 /* disable VLAN tag insert/strip */
1dc32918 4935 ctrl = er32(CTRL);
1da177e4 4936 ctrl &= ~E1000_CTRL_VME;
1dc32918 4937 ew32(CTRL, ctrl);
1da177e4 4938
cd94dd0b 4939 if (adapter->hw.mac_type != e1000_ich8lan) {
90fb5135 4940 if (adapter->mng_vlan_id !=
406874a7 4941 (u16)E1000_MNG_VLAN_NONE) {
90fb5135
AK
4942 e1000_vlan_rx_kill_vid(netdev,
4943 adapter->mng_vlan_id);
4944 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4945 }
cd94dd0b 4946 }
1da177e4
LT
4947 }
4948
9150b76a
JB
4949 if (!test_bit(__E1000_DOWN, &adapter->flags))
4950 e1000_irq_enable(adapter);
1da177e4
LT
4951}
4952
64798845 4953static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1da177e4 4954{
60490fe0 4955 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4956 struct e1000_hw *hw = &adapter->hw;
406874a7 4957 u32 vfta, index;
96838a40 4958
1dc32918 4959 if ((hw->mng_cookie.status &
96838a40
JB
4960 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4961 (vid == adapter->mng_vlan_id))
2d7edb92 4962 return;
1da177e4
LT
4963 /* add VID to filter table */
4964 index = (vid >> 5) & 0x7F;
1dc32918 4965 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
1da177e4 4966 vfta |= (1 << (vid & 0x1F));
1dc32918 4967 e1000_write_vfta(hw, index, vfta);
1da177e4
LT
4968}
4969
64798845 4970static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1da177e4 4971{
60490fe0 4972 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4973 struct e1000_hw *hw = &adapter->hw;
406874a7 4974 u32 vfta, index;
1da177e4 4975
9150b76a
JB
4976 if (!test_bit(__E1000_DOWN, &adapter->flags))
4977 e1000_irq_disable(adapter);
5c15bdec 4978 vlan_group_set_device(adapter->vlgrp, vid, NULL);
9150b76a
JB
4979 if (!test_bit(__E1000_DOWN, &adapter->flags))
4980 e1000_irq_enable(adapter);
1da177e4 4981
1dc32918 4982 if ((hw->mng_cookie.status &
96838a40 4983 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
ff147013
JK
4984 (vid == adapter->mng_vlan_id)) {
4985 /* release control to f/w */
4986 e1000_release_hw_control(adapter);
2d7edb92 4987 return;
ff147013
JK
4988 }
4989
1da177e4
LT
4990 /* remove VID from filter table */
4991 index = (vid >> 5) & 0x7F;
1dc32918 4992 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
1da177e4 4993 vfta &= ~(1 << (vid & 0x1F));
1dc32918 4994 e1000_write_vfta(hw, index, vfta);
1da177e4
LT
4995}
4996
64798845 4997static void e1000_restore_vlan(struct e1000_adapter *adapter)
1da177e4
LT
4998{
4999 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5000
96838a40 5001 if (adapter->vlgrp) {
406874a7 5002 u16 vid;
96838a40 5003 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5c15bdec 5004 if (!vlan_group_get_device(adapter->vlgrp, vid))
1da177e4
LT
5005 continue;
5006 e1000_vlan_rx_add_vid(adapter->netdev, vid);
5007 }
5008 }
5009}
5010
64798845 5011int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
1da177e4 5012{
1dc32918
JP
5013 struct e1000_hw *hw = &adapter->hw;
5014
5015 hw->autoneg = 0;
1da177e4 5016
6921368f 5017 /* Fiber NICs only allow 1000 gbps Full duplex */
1dc32918 5018 if ((hw->media_type == e1000_media_type_fiber) &&
6921368f
MC
5019 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
5020 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
5021 return -EINVAL;
5022 }
5023
96838a40 5024 switch (spddplx) {
1da177e4 5025 case SPEED_10 + DUPLEX_HALF:
1dc32918 5026 hw->forced_speed_duplex = e1000_10_half;
1da177e4
LT
5027 break;
5028 case SPEED_10 + DUPLEX_FULL:
1dc32918 5029 hw->forced_speed_duplex = e1000_10_full;
1da177e4
LT
5030 break;
5031 case SPEED_100 + DUPLEX_HALF:
1dc32918 5032 hw->forced_speed_duplex = e1000_100_half;
1da177e4
LT
5033 break;
5034 case SPEED_100 + DUPLEX_FULL:
1dc32918 5035 hw->forced_speed_duplex = e1000_100_full;
1da177e4
LT
5036 break;
5037 case SPEED_1000 + DUPLEX_FULL:
1dc32918
JP
5038 hw->autoneg = 1;
5039 hw->autoneg_advertised = ADVERTISE_1000_FULL;
1da177e4
LT
5040 break;
5041 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5042 default:
2648345f 5043 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
1da177e4
LT
5044 return -EINVAL;
5045 }
5046 return 0;
5047}
5048
64798845 5049static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
5050{
5051 struct net_device *netdev = pci_get_drvdata(pdev);
60490fe0 5052 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 5053 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
5054 u32 ctrl, ctrl_ext, rctl, status;
5055 u32 wufc = adapter->wol;
6fdfef16 5056#ifdef CONFIG_PM
240b1710 5057 int retval = 0;
6fdfef16 5058#endif
1da177e4
LT
5059
5060 netif_device_detach(netdev);
5061
2db10a08
AK
5062 if (netif_running(netdev)) {
5063 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1da177e4 5064 e1000_down(adapter);
2db10a08 5065 }
1da177e4 5066
2f82665f 5067#ifdef CONFIG_PM
1d33e9c6 5068 retval = pci_save_state(pdev);
2f82665f
JB
5069 if (retval)
5070 return retval;
5071#endif
5072
1dc32918 5073 status = er32(STATUS);
96838a40 5074 if (status & E1000_STATUS_LU)
1da177e4
LT
5075 wufc &= ~E1000_WUFC_LNKC;
5076
96838a40 5077 if (wufc) {
1da177e4 5078 e1000_setup_rctl(adapter);
db0ce50d 5079 e1000_set_rx_mode(netdev);
1da177e4
LT
5080
5081 /* turn on all-multi mode if wake on multicast is enabled */
120cd576 5082 if (wufc & E1000_WUFC_MC) {
1dc32918 5083 rctl = er32(RCTL);
1da177e4 5084 rctl |= E1000_RCTL_MPE;
1dc32918 5085 ew32(RCTL, rctl);
1da177e4
LT
5086 }
5087
1dc32918
JP
5088 if (hw->mac_type >= e1000_82540) {
5089 ctrl = er32(CTRL);
1da177e4
LT
5090 /* advertise wake from D3Cold */
5091 #define E1000_CTRL_ADVD3WUC 0x00100000
5092 /* phy power management enable */
5093 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5094 ctrl |= E1000_CTRL_ADVD3WUC |
5095 E1000_CTRL_EN_PHY_PWR_MGMT;
1dc32918 5096 ew32(CTRL, ctrl);
1da177e4
LT
5097 }
5098
1dc32918
JP
5099 if (hw->media_type == e1000_media_type_fiber ||
5100 hw->media_type == e1000_media_type_internal_serdes) {
1da177e4 5101 /* keep the laser running in D3 */
1dc32918 5102 ctrl_ext = er32(CTRL_EXT);
1da177e4 5103 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
1dc32918 5104 ew32(CTRL_EXT, ctrl_ext);
1da177e4
LT
5105 }
5106
2d7edb92 5107 /* Allow time for pending master requests to run */
1dc32918 5108 e1000_disable_pciex_master(hw);
2d7edb92 5109
1dc32918
JP
5110 ew32(WUC, E1000_WUC_PME_EN);
5111 ew32(WUFC, wufc);
d0e027db
AK
5112 pci_enable_wake(pdev, PCI_D3hot, 1);
5113 pci_enable_wake(pdev, PCI_D3cold, 1);
1da177e4 5114 } else {
1dc32918
JP
5115 ew32(WUC, 0);
5116 ew32(WUFC, 0);
d0e027db
AK
5117 pci_enable_wake(pdev, PCI_D3hot, 0);
5118 pci_enable_wake(pdev, PCI_D3cold, 0);
1da177e4
LT
5119 }
5120
0fccd0e9
JG
5121 e1000_release_manageability(adapter);
5122
5123 /* make sure adapter isn't asleep if manageability is enabled */
5124 if (adapter->en_mng_pt) {
5125 pci_enable_wake(pdev, PCI_D3hot, 1);
5126 pci_enable_wake(pdev, PCI_D3cold, 1);
1da177e4
LT
5127 }
5128
1dc32918
JP
5129 if (hw->phy_type == e1000_phy_igp_3)
5130 e1000_phy_powerdown_workaround(hw);
cd94dd0b 5131
edd106fc
AK
5132 if (netif_running(netdev))
5133 e1000_free_irq(adapter);
5134
b55ccb35
JK
5135 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5136 * would have already happened in close and is redundant. */
5137 e1000_release_hw_control(adapter);
2d7edb92 5138
1da177e4 5139 pci_disable_device(pdev);
240b1710 5140
d0e027db 5141 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1da177e4
LT
5142
5143 return 0;
5144}
5145
2f82665f 5146#ifdef CONFIG_PM
64798845 5147static int e1000_resume(struct pci_dev *pdev)
1da177e4
LT
5148{
5149 struct net_device *netdev = pci_get_drvdata(pdev);
60490fe0 5150 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 5151 struct e1000_hw *hw = &adapter->hw;
406874a7 5152 u32 err;
1da177e4 5153
d0e027db 5154 pci_set_power_state(pdev, PCI_D0);
1d33e9c6 5155 pci_restore_state(pdev);
3d1dd8cb
AK
5156 if ((err = pci_enable_device(pdev))) {
5157 printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
5158 return err;
5159 }
a4cb847d 5160 pci_set_master(pdev);
1da177e4 5161
d0e027db
AK
5162 pci_enable_wake(pdev, PCI_D3hot, 0);
5163 pci_enable_wake(pdev, PCI_D3cold, 0);
1da177e4 5164
edd106fc
AK
5165 if (netif_running(netdev) && (err = e1000_request_irq(adapter)))
5166 return err;
5167
5168 e1000_power_up_phy(adapter);
1da177e4 5169 e1000_reset(adapter);
1dc32918 5170 ew32(WUS, ~0);
1da177e4 5171
0fccd0e9
JG
5172 e1000_init_manageability(adapter);
5173
96838a40 5174 if (netif_running(netdev))
1da177e4
LT
5175 e1000_up(adapter);
5176
5177 netif_device_attach(netdev);
5178
b55ccb35
JK
5179 /* If the controller is 82573 and f/w is AMT, do not set
5180 * DRV_LOAD until the interface is up. For all other cases,
5181 * let the f/w know that the h/w is now under the control
5182 * of the driver. */
1dc32918
JP
5183 if (hw->mac_type != e1000_82573 ||
5184 !e1000_check_mng_mode(hw))
b55ccb35 5185 e1000_get_hw_control(adapter);
2d7edb92 5186
1da177e4
LT
5187 return 0;
5188}
5189#endif
c653e635
AK
5190
5191static void e1000_shutdown(struct pci_dev *pdev)
5192{
5193 e1000_suspend(pdev, PMSG_SUSPEND);
5194}
5195
1da177e4
LT
5196#ifdef CONFIG_NET_POLL_CONTROLLER
5197/*
5198 * Polling 'interrupt' - used by things like netconsole to send skbs
5199 * without having to re-enable interrupts. It's not called while
5200 * the interrupt routine is executing.
5201 */
64798845 5202static void e1000_netpoll(struct net_device *netdev)
1da177e4 5203{
60490fe0 5204 struct e1000_adapter *adapter = netdev_priv(netdev);
d3d9e484 5205
1da177e4 5206 disable_irq(adapter->pdev->irq);
7d12e780 5207 e1000_intr(adapter->pdev->irq, netdev);
e8da8be1
JK
5208#ifndef CONFIG_E1000_NAPI
5209 adapter->clean_rx(adapter, adapter->rx_ring);
5210#endif
1da177e4
LT
5211 enable_irq(adapter->pdev->irq);
5212}
5213#endif
5214
9026729b
AK
5215/**
5216 * e1000_io_error_detected - called when PCI error is detected
5217 * @pdev: Pointer to PCI device
5218 * @state: The current pci conneection state
5219 *
5220 * This function is called after a PCI bus error affecting
5221 * this device has been detected.
5222 */
64798845
JP
5223static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5224 pci_channel_state_t state)
9026729b
AK
5225{
5226 struct net_device *netdev = pci_get_drvdata(pdev);
5227 struct e1000_adapter *adapter = netdev->priv;
5228
5229 netif_device_detach(netdev);
5230
5231 if (netif_running(netdev))
5232 e1000_down(adapter);
72e8d6bb 5233 pci_disable_device(pdev);
9026729b
AK
5234
5235 /* Request a slot slot reset. */
5236 return PCI_ERS_RESULT_NEED_RESET;
5237}
5238
5239/**
5240 * e1000_io_slot_reset - called after the pci bus has been reset.
5241 * @pdev: Pointer to PCI device
5242 *
5243 * Restart the card from scratch, as if from a cold-boot. Implementation
5244 * resembles the first-half of the e1000_resume routine.
5245 */
5246static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5247{
5248 struct net_device *netdev = pci_get_drvdata(pdev);
5249 struct e1000_adapter *adapter = netdev->priv;
1dc32918 5250 struct e1000_hw *hw = &adapter->hw;
9026729b
AK
5251
5252 if (pci_enable_device(pdev)) {
5253 printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
5254 return PCI_ERS_RESULT_DISCONNECT;
5255 }
5256 pci_set_master(pdev);
5257
dbf38c94
LV
5258 pci_enable_wake(pdev, PCI_D3hot, 0);
5259 pci_enable_wake(pdev, PCI_D3cold, 0);
9026729b 5260
9026729b 5261 e1000_reset(adapter);
1dc32918 5262 ew32(WUS, ~0);
9026729b
AK
5263
5264 return PCI_ERS_RESULT_RECOVERED;
5265}
5266
5267/**
5268 * e1000_io_resume - called when traffic can start flowing again.
5269 * @pdev: Pointer to PCI device
5270 *
5271 * This callback is called when the error recovery driver tells us that
5272 * its OK to resume normal operation. Implementation resembles the
5273 * second-half of the e1000_resume routine.
5274 */
5275static void e1000_io_resume(struct pci_dev *pdev)
5276{
5277 struct net_device *netdev = pci_get_drvdata(pdev);
5278 struct e1000_adapter *adapter = netdev->priv;
1dc32918 5279 struct e1000_hw *hw = &adapter->hw;
0fccd0e9
JG
5280
5281 e1000_init_manageability(adapter);
9026729b
AK
5282
5283 if (netif_running(netdev)) {
5284 if (e1000_up(adapter)) {
5285 printk("e1000: can't bring device back up after reset\n");
5286 return;
5287 }
5288 }
5289
5290 netif_device_attach(netdev);
5291
0fccd0e9
JG
5292 /* If the controller is 82573 and f/w is AMT, do not set
5293 * DRV_LOAD until the interface is up. For all other cases,
5294 * let the f/w know that the h/w is now under the control
5295 * of the driver. */
1dc32918
JP
5296 if (hw->mac_type != e1000_82573 ||
5297 !e1000_check_mng_mode(hw))
0fccd0e9 5298 e1000_get_hw_control(adapter);
9026729b 5299
9026729b
AK
5300}
5301
1da177e4 5302/* e1000_main.c */
This page took 1.241316 seconds and 5 git commands to generate.