1 /*******************************************************************************
4 Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 The full GNU General Public License is included in this distribution in the
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *******************************************************************************/
32 char e1000_driver_name
[] = "e1000";
33 static char e1000_driver_string
[] = "Intel(R) PRO/1000 Network Driver";
34 #ifndef CONFIG_E1000_NAPI
37 #define DRIVERNAPI "-NAPI"
39 #define DRV_VERSION "7.1.9-k4"DRIVERNAPI
40 char e1000_driver_version
[] = DRV_VERSION
;
41 static char e1000_copyright
[] = "Copyright (c) 1999-2006 Intel Corporation.";
43 /* e1000_pci_tbl - PCI Device ID Table
45 * Last entry must be all 0s
48 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
50 static struct pci_device_id e1000_pci_tbl
[] = {
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
69 INTEL_E1000_ETHERNET_DEVICE(0x101A),
70 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1049),
76 INTEL_E1000_ETHERNET_DEVICE(0x104A),
77 INTEL_E1000_ETHERNET_DEVICE(0x104B),
78 INTEL_E1000_ETHERNET_DEVICE(0x104C),
79 INTEL_E1000_ETHERNET_DEVICE(0x104D),
80 INTEL_E1000_ETHERNET_DEVICE(0x105E),
81 INTEL_E1000_ETHERNET_DEVICE(0x105F),
82 INTEL_E1000_ETHERNET_DEVICE(0x1060),
83 INTEL_E1000_ETHERNET_DEVICE(0x1075),
84 INTEL_E1000_ETHERNET_DEVICE(0x1076),
85 INTEL_E1000_ETHERNET_DEVICE(0x1077),
86 INTEL_E1000_ETHERNET_DEVICE(0x1078),
87 INTEL_E1000_ETHERNET_DEVICE(0x1079),
88 INTEL_E1000_ETHERNET_DEVICE(0x107A),
89 INTEL_E1000_ETHERNET_DEVICE(0x107B),
90 INTEL_E1000_ETHERNET_DEVICE(0x107C),
91 INTEL_E1000_ETHERNET_DEVICE(0x107D),
92 INTEL_E1000_ETHERNET_DEVICE(0x107E),
93 INTEL_E1000_ETHERNET_DEVICE(0x107F),
94 INTEL_E1000_ETHERNET_DEVICE(0x108A),
95 INTEL_E1000_ETHERNET_DEVICE(0x108B),
96 INTEL_E1000_ETHERNET_DEVICE(0x108C),
97 INTEL_E1000_ETHERNET_DEVICE(0x1096),
98 INTEL_E1000_ETHERNET_DEVICE(0x1098),
99 INTEL_E1000_ETHERNET_DEVICE(0x1099),
100 INTEL_E1000_ETHERNET_DEVICE(0x109A),
101 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
102 INTEL_E1000_ETHERNET_DEVICE(0x10B9),
103 INTEL_E1000_ETHERNET_DEVICE(0x10BA),
104 INTEL_E1000_ETHERNET_DEVICE(0x10BB),
105 /* required last entry */
109 MODULE_DEVICE_TABLE(pci
, e1000_pci_tbl
);
111 static int e1000_setup_tx_resources(struct e1000_adapter
*adapter
,
112 struct e1000_tx_ring
*txdr
);
113 static int e1000_setup_rx_resources(struct e1000_adapter
*adapter
,
114 struct e1000_rx_ring
*rxdr
);
115 static void e1000_free_tx_resources(struct e1000_adapter
*adapter
,
116 struct e1000_tx_ring
*tx_ring
);
117 static void e1000_free_rx_resources(struct e1000_adapter
*adapter
,
118 struct e1000_rx_ring
*rx_ring
);
120 /* Local Function Prototypes */
122 static int e1000_init_module(void);
123 static void e1000_exit_module(void);
124 static int e1000_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
125 static void __devexit
e1000_remove(struct pci_dev
*pdev
);
126 static int e1000_alloc_queues(struct e1000_adapter
*adapter
);
127 static int e1000_sw_init(struct e1000_adapter
*adapter
);
128 static int e1000_open(struct net_device
*netdev
);
129 static int e1000_close(struct net_device
*netdev
);
130 static void e1000_configure_tx(struct e1000_adapter
*adapter
);
131 static void e1000_configure_rx(struct e1000_adapter
*adapter
);
132 static void e1000_setup_rctl(struct e1000_adapter
*adapter
);
133 static void e1000_clean_all_tx_rings(struct e1000_adapter
*adapter
);
134 static void e1000_clean_all_rx_rings(struct e1000_adapter
*adapter
);
135 static void e1000_clean_tx_ring(struct e1000_adapter
*adapter
,
136 struct e1000_tx_ring
*tx_ring
);
137 static void e1000_clean_rx_ring(struct e1000_adapter
*adapter
,
138 struct e1000_rx_ring
*rx_ring
);
139 static void e1000_set_multi(struct net_device
*netdev
);
140 static void e1000_update_phy_info(unsigned long data
);
141 static void e1000_watchdog(unsigned long data
);
142 static void e1000_82547_tx_fifo_stall(unsigned long data
);
143 static int e1000_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
);
144 static struct net_device_stats
* e1000_get_stats(struct net_device
*netdev
);
145 static int e1000_change_mtu(struct net_device
*netdev
, int new_mtu
);
146 static int e1000_set_mac(struct net_device
*netdev
, void *p
);
147 static irqreturn_t
e1000_intr(int irq
, void *data
, struct pt_regs
*regs
);
148 static boolean_t
e1000_clean_tx_irq(struct e1000_adapter
*adapter
,
149 struct e1000_tx_ring
*tx_ring
);
150 #ifdef CONFIG_E1000_NAPI
151 static int e1000_clean(struct net_device
*poll_dev
, int *budget
);
152 static boolean_t
e1000_clean_rx_irq(struct e1000_adapter
*adapter
,
153 struct e1000_rx_ring
*rx_ring
,
154 int *work_done
, int work_to_do
);
155 static boolean_t
e1000_clean_rx_irq_ps(struct e1000_adapter
*adapter
,
156 struct e1000_rx_ring
*rx_ring
,
157 int *work_done
, int work_to_do
);
159 static boolean_t
e1000_clean_rx_irq(struct e1000_adapter
*adapter
,
160 struct e1000_rx_ring
*rx_ring
);
161 static boolean_t
e1000_clean_rx_irq_ps(struct e1000_adapter
*adapter
,
162 struct e1000_rx_ring
*rx_ring
);
164 static void e1000_alloc_rx_buffers(struct e1000_adapter
*adapter
,
165 struct e1000_rx_ring
*rx_ring
,
167 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter
*adapter
,
168 struct e1000_rx_ring
*rx_ring
,
170 static int e1000_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
);
171 static int e1000_mii_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
,
173 static void e1000_enter_82542_rst(struct e1000_adapter
*adapter
);
174 static void e1000_leave_82542_rst(struct e1000_adapter
*adapter
);
175 static void e1000_tx_timeout(struct net_device
*dev
);
176 static void e1000_reset_task(struct net_device
*dev
);
177 static void e1000_smartspeed(struct e1000_adapter
*adapter
);
178 static int e1000_82547_fifo_workaround(struct e1000_adapter
*adapter
,
179 struct sk_buff
*skb
);
181 static void e1000_vlan_rx_register(struct net_device
*netdev
, struct vlan_group
*grp
);
182 static void e1000_vlan_rx_add_vid(struct net_device
*netdev
, uint16_t vid
);
183 static void e1000_vlan_rx_kill_vid(struct net_device
*netdev
, uint16_t vid
);
184 static void e1000_restore_vlan(struct e1000_adapter
*adapter
);
186 static int e1000_suspend(struct pci_dev
*pdev
, pm_message_t state
);
188 static int e1000_resume(struct pci_dev
*pdev
);
190 static void e1000_shutdown(struct pci_dev
*pdev
);
192 #ifdef CONFIG_NET_POLL_CONTROLLER
193 /* for netdump / net console */
194 static void e1000_netpoll (struct net_device
*netdev
);
197 static pci_ers_result_t
e1000_io_error_detected(struct pci_dev
*pdev
,
198 pci_channel_state_t state
);
199 static pci_ers_result_t
e1000_io_slot_reset(struct pci_dev
*pdev
);
200 static void e1000_io_resume(struct pci_dev
*pdev
);
202 static struct pci_error_handlers e1000_err_handler
= {
203 .error_detected
= e1000_io_error_detected
,
204 .slot_reset
= e1000_io_slot_reset
,
205 .resume
= e1000_io_resume
,
208 static struct pci_driver e1000_driver
= {
209 .name
= e1000_driver_name
,
210 .id_table
= e1000_pci_tbl
,
211 .probe
= e1000_probe
,
212 .remove
= __devexit_p(e1000_remove
),
213 /* Power Managment Hooks */
214 .suspend
= e1000_suspend
,
216 .resume
= e1000_resume
,
218 .shutdown
= e1000_shutdown
,
219 .err_handler
= &e1000_err_handler
222 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
223 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_VERSION
);
227 static int debug
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
;
228 module_param(debug
, int, 0);
229 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
232 * e1000_init_module - Driver Registration Routine
234 * e1000_init_module is the first routine called when the driver is
235 * loaded. All it does is register with the PCI subsystem.
239 e1000_init_module(void)
242 printk(KERN_INFO
"%s - version %s\n",
243 e1000_driver_string
, e1000_driver_version
);
245 printk(KERN_INFO
"%s\n", e1000_copyright
);
247 ret
= pci_module_init(&e1000_driver
);
252 module_init(e1000_init_module
);
255 * e1000_exit_module - Driver Exit Cleanup Routine
257 * e1000_exit_module is called just before the driver is removed
262 e1000_exit_module(void)
264 pci_unregister_driver(&e1000_driver
);
267 module_exit(e1000_exit_module
);
269 static int e1000_request_irq(struct e1000_adapter
*adapter
)
271 struct net_device
*netdev
= adapter
->netdev
;
275 #ifdef CONFIG_PCI_MSI
276 if (adapter
->hw
.mac_type
> e1000_82547_rev_2
) {
277 adapter
->have_msi
= TRUE
;
278 if ((err
= pci_enable_msi(adapter
->pdev
))) {
280 "Unable to allocate MSI interrupt Error: %d\n", err
);
281 adapter
->have_msi
= FALSE
;
284 if (adapter
->have_msi
)
285 flags
&= ~IRQF_SHARED
;
287 if ((err
= request_irq(adapter
->pdev
->irq
, &e1000_intr
, flags
,
288 netdev
->name
, netdev
)))
290 "Unable to allocate interrupt Error: %d\n", err
);
295 static void e1000_free_irq(struct e1000_adapter
*adapter
)
297 struct net_device
*netdev
= adapter
->netdev
;
299 free_irq(adapter
->pdev
->irq
, netdev
);
301 #ifdef CONFIG_PCI_MSI
302 if (adapter
->have_msi
)
303 pci_disable_msi(adapter
->pdev
);
308 * e1000_irq_disable - Mask off interrupt generation on the NIC
309 * @adapter: board private structure
313 e1000_irq_disable(struct e1000_adapter
*adapter
)
315 atomic_inc(&adapter
->irq_sem
);
316 E1000_WRITE_REG(&adapter
->hw
, IMC
, ~0);
317 E1000_WRITE_FLUSH(&adapter
->hw
);
318 synchronize_irq(adapter
->pdev
->irq
);
322 * e1000_irq_enable - Enable default interrupt generation settings
323 * @adapter: board private structure
327 e1000_irq_enable(struct e1000_adapter
*adapter
)
329 if (likely(atomic_dec_and_test(&adapter
->irq_sem
))) {
330 E1000_WRITE_REG(&adapter
->hw
, IMS
, IMS_ENABLE_MASK
);
331 E1000_WRITE_FLUSH(&adapter
->hw
);
336 e1000_update_mng_vlan(struct e1000_adapter
*adapter
)
338 struct net_device
*netdev
= adapter
->netdev
;
339 uint16_t vid
= adapter
->hw
.mng_cookie
.vlan_id
;
340 uint16_t old_vid
= adapter
->mng_vlan_id
;
341 if (adapter
->vlgrp
) {
342 if (!adapter
->vlgrp
->vlan_devices
[vid
]) {
343 if (adapter
->hw
.mng_cookie
.status
&
344 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT
) {
345 e1000_vlan_rx_add_vid(netdev
, vid
);
346 adapter
->mng_vlan_id
= vid
;
348 adapter
->mng_vlan_id
= E1000_MNG_VLAN_NONE
;
350 if ((old_vid
!= (uint16_t)E1000_MNG_VLAN_NONE
) &&
352 !adapter
->vlgrp
->vlan_devices
[old_vid
])
353 e1000_vlan_rx_kill_vid(netdev
, old_vid
);
355 adapter
->mng_vlan_id
= vid
;
360 * e1000_release_hw_control - release control of the h/w to f/w
361 * @adapter: address of board private structure
363 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
364 * For ASF and Pass Through versions of f/w this means that the
365 * driver is no longer loaded. For AMT version (only with 82573) i
366 * of the f/w this means that the netowrk i/f is closed.
371 e1000_release_hw_control(struct e1000_adapter
*adapter
)
377 /* Let firmware taken over control of h/w */
378 switch (adapter
->hw
.mac_type
) {
381 case e1000_80003es2lan
:
382 ctrl_ext
= E1000_READ_REG(&adapter
->hw
, CTRL_EXT
);
383 E1000_WRITE_REG(&adapter
->hw
, CTRL_EXT
,
384 ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
387 swsm
= E1000_READ_REG(&adapter
->hw
, SWSM
);
388 E1000_WRITE_REG(&adapter
->hw
, SWSM
,
389 swsm
& ~E1000_SWSM_DRV_LOAD
);
391 extcnf
= E1000_READ_REG(&adapter
->hw
, CTRL_EXT
);
392 E1000_WRITE_REG(&adapter
->hw
, CTRL_EXT
,
393 extcnf
& ~E1000_CTRL_EXT_DRV_LOAD
);
401 * e1000_get_hw_control - get control of the h/w from f/w
402 * @adapter: address of board private structure
404 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
405 * For ASF and Pass Through versions of f/w this means that
406 * the driver is loaded. For AMT version (only with 82573)
407 * of the f/w this means that the netowrk i/f is open.
412 e1000_get_hw_control(struct e1000_adapter
*adapter
)
417 /* Let firmware know the driver has taken over */
418 switch (adapter
->hw
.mac_type
) {
421 case e1000_80003es2lan
:
422 ctrl_ext
= E1000_READ_REG(&adapter
->hw
, CTRL_EXT
);
423 E1000_WRITE_REG(&adapter
->hw
, CTRL_EXT
,
424 ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
427 swsm
= E1000_READ_REG(&adapter
->hw
, SWSM
);
428 E1000_WRITE_REG(&adapter
->hw
, SWSM
,
429 swsm
| E1000_SWSM_DRV_LOAD
);
432 extcnf
= E1000_READ_REG(&adapter
->hw
, EXTCNF_CTRL
);
433 E1000_WRITE_REG(&adapter
->hw
, EXTCNF_CTRL
,
434 extcnf
| E1000_EXTCNF_CTRL_SWFLAG
);
442 e1000_up(struct e1000_adapter
*adapter
)
444 struct net_device
*netdev
= adapter
->netdev
;
447 /* hardware has been reset, we need to reload some things */
449 e1000_set_multi(netdev
);
451 e1000_restore_vlan(adapter
);
453 e1000_configure_tx(adapter
);
454 e1000_setup_rctl(adapter
);
455 e1000_configure_rx(adapter
);
456 /* call E1000_DESC_UNUSED which always leaves
457 * at least 1 descriptor unused to make sure
458 * next_to_use != next_to_clean */
459 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
460 struct e1000_rx_ring
*ring
= &adapter
->rx_ring
[i
];
461 adapter
->alloc_rx_buf(adapter
, ring
,
462 E1000_DESC_UNUSED(ring
));
465 adapter
->tx_queue_len
= netdev
->tx_queue_len
;
467 mod_timer(&adapter
->watchdog_timer
, jiffies
);
469 #ifdef CONFIG_E1000_NAPI
470 netif_poll_enable(netdev
);
472 e1000_irq_enable(adapter
);
478 * e1000_power_up_phy - restore link in case the phy was powered down
479 * @adapter: address of board private structure
481 * The phy may be powered down to save power and turn off link when the
482 * driver is unloaded and wake on lan is not enabled (among others)
483 * *** this routine MUST be followed by a call to e1000_reset ***
487 static void e1000_power_up_phy(struct e1000_adapter
*adapter
)
489 uint16_t mii_reg
= 0;
491 /* Just clear the power down bit to wake the phy back up */
492 if (adapter
->hw
.media_type
== e1000_media_type_copper
) {
493 /* according to the manual, the phy will retain its
494 * settings across a power-down/up cycle */
495 e1000_read_phy_reg(&adapter
->hw
, PHY_CTRL
, &mii_reg
);
496 mii_reg
&= ~MII_CR_POWER_DOWN
;
497 e1000_write_phy_reg(&adapter
->hw
, PHY_CTRL
, mii_reg
);
501 static void e1000_power_down_phy(struct e1000_adapter
*adapter
)
503 boolean_t mng_mode_enabled
= (adapter
->hw
.mac_type
>= e1000_82571
) &&
504 e1000_check_mng_mode(&adapter
->hw
);
505 /* Power down the PHY so no link is implied when interface is down
506 * The PHY cannot be powered down if any of the following is TRUE
509 * (c) SoL/IDER session is active */
510 if (!adapter
->wol
&& adapter
->hw
.mac_type
>= e1000_82540
&&
511 adapter
->hw
.mac_type
!= e1000_ich8lan
&&
512 adapter
->hw
.media_type
== e1000_media_type_copper
&&
513 !(E1000_READ_REG(&adapter
->hw
, MANC
) & E1000_MANC_SMBUS_EN
) &&
515 !e1000_check_phy_reset_block(&adapter
->hw
)) {
516 uint16_t mii_reg
= 0;
517 e1000_read_phy_reg(&adapter
->hw
, PHY_CTRL
, &mii_reg
);
518 mii_reg
|= MII_CR_POWER_DOWN
;
519 e1000_write_phy_reg(&adapter
->hw
, PHY_CTRL
, mii_reg
);
525 e1000_down(struct e1000_adapter
*adapter
)
527 struct net_device
*netdev
= adapter
->netdev
;
529 e1000_irq_disable(adapter
);
531 del_timer_sync(&adapter
->tx_fifo_stall_timer
);
532 del_timer_sync(&adapter
->watchdog_timer
);
533 del_timer_sync(&adapter
->phy_info_timer
);
535 #ifdef CONFIG_E1000_NAPI
536 netif_poll_disable(netdev
);
538 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
539 adapter
->link_speed
= 0;
540 adapter
->link_duplex
= 0;
541 netif_carrier_off(netdev
);
542 netif_stop_queue(netdev
);
544 e1000_reset(adapter
);
545 e1000_clean_all_tx_rings(adapter
);
546 e1000_clean_all_rx_rings(adapter
);
550 e1000_reinit_locked(struct e1000_adapter
*adapter
)
552 WARN_ON(in_interrupt());
553 while (test_and_set_bit(__E1000_RESETTING
, &adapter
->flags
))
557 clear_bit(__E1000_RESETTING
, &adapter
->flags
);
561 e1000_reset(struct e1000_adapter
*adapter
)
564 uint16_t fc_high_water_mark
= E1000_FC_HIGH_DIFF
;
566 /* Repartition Pba for greater than 9k mtu
567 * To take effect CTRL.RST is required.
570 switch (adapter
->hw
.mac_type
) {
572 case e1000_82547_rev_2
:
577 case e1000_80003es2lan
:
591 if ((adapter
->hw
.mac_type
!= e1000_82573
) &&
592 (adapter
->netdev
->mtu
> E1000_RXBUFFER_8192
))
593 pba
-= 8; /* allocate more FIFO for Tx */
596 if (adapter
->hw
.mac_type
== e1000_82547
) {
597 adapter
->tx_fifo_head
= 0;
598 adapter
->tx_head_addr
= pba
<< E1000_TX_HEAD_ADDR_SHIFT
;
599 adapter
->tx_fifo_size
=
600 (E1000_PBA_40K
- pba
) << E1000_PBA_BYTES_SHIFT
;
601 atomic_set(&adapter
->tx_fifo_stall
, 0);
604 E1000_WRITE_REG(&adapter
->hw
, PBA
, pba
);
606 /* flow control settings */
607 /* Set the FC high water mark to 90% of the FIFO size.
608 * Required to clear last 3 LSB */
609 fc_high_water_mark
= ((pba
* 9216)/10) & 0xFFF8;
610 /* We can't use 90% on small FIFOs because the remainder
611 * would be less than 1 full frame. In this case, we size
612 * it to allow at least a full frame above the high water
614 if (pba
< E1000_PBA_16K
)
615 fc_high_water_mark
= (pba
* 1024) - 1600;
617 adapter
->hw
.fc_high_water
= fc_high_water_mark
;
618 adapter
->hw
.fc_low_water
= fc_high_water_mark
- 8;
619 if (adapter
->hw
.mac_type
== e1000_80003es2lan
)
620 adapter
->hw
.fc_pause_time
= 0xFFFF;
622 adapter
->hw
.fc_pause_time
= E1000_FC_PAUSE_TIME
;
623 adapter
->hw
.fc_send_xon
= 1;
624 adapter
->hw
.fc
= adapter
->hw
.original_fc
;
626 /* Allow time for pending master requests to run */
627 e1000_reset_hw(&adapter
->hw
);
628 if (adapter
->hw
.mac_type
>= e1000_82544
)
629 E1000_WRITE_REG(&adapter
->hw
, WUC
, 0);
630 if (e1000_init_hw(&adapter
->hw
))
631 DPRINTK(PROBE
, ERR
, "Hardware Error\n");
632 e1000_update_mng_vlan(adapter
);
633 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
634 E1000_WRITE_REG(&adapter
->hw
, VET
, ETHERNET_IEEE_VLAN_TYPE
);
636 e1000_reset_adaptive(&adapter
->hw
);
637 e1000_phy_get_info(&adapter
->hw
, &adapter
->phy_info
);
639 if (!adapter
->smart_power_down
&&
640 (adapter
->hw
.mac_type
== e1000_82571
||
641 adapter
->hw
.mac_type
== e1000_82572
)) {
642 uint16_t phy_data
= 0;
643 /* speed up time to link by disabling smart power down, ignore
644 * the return value of this function because there is nothing
645 * different we would do if it failed */
646 e1000_read_phy_reg(&adapter
->hw
, IGP02E1000_PHY_POWER_MGMT
,
648 phy_data
&= ~IGP02E1000_PM_SPD
;
649 e1000_write_phy_reg(&adapter
->hw
, IGP02E1000_PHY_POWER_MGMT
,
653 if (adapter
->hw
.mac_type
< e1000_ich8lan
)
654 /* FIXME: this code is duplicate and wrong for PCI Express */
655 if (adapter
->en_mng_pt
) {
656 manc
= E1000_READ_REG(&adapter
->hw
, MANC
);
657 manc
|= (E1000_MANC_ARP_EN
| E1000_MANC_EN_MNG2HOST
);
658 E1000_WRITE_REG(&adapter
->hw
, MANC
, manc
);
663 * e1000_probe - Device Initialization Routine
664 * @pdev: PCI device information struct
665 * @ent: entry in e1000_pci_tbl
667 * Returns 0 on success, negative on failure
669 * e1000_probe initializes an adapter identified by a pci_dev structure.
670 * The OS initialization, configuring of the adapter private structure,
671 * and a hardware reset occur.
675 e1000_probe(struct pci_dev
*pdev
,
676 const struct pci_device_id
*ent
)
678 struct net_device
*netdev
;
679 struct e1000_adapter
*adapter
;
680 unsigned long mmio_start
, mmio_len
;
681 unsigned long flash_start
, flash_len
;
683 static int cards_found
= 0;
684 static int e1000_ksp3_port_a
= 0; /* global ksp3 port a indication */
685 int i
, err
, pci_using_dac
;
686 uint16_t eeprom_data
;
687 uint16_t eeprom_apme_mask
= E1000_EEPROM_APME
;
688 if ((err
= pci_enable_device(pdev
)))
691 if (!(err
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) &&
692 !(err
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
))) {
695 if ((err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
)) &&
696 (err
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
))) {
697 E1000_ERR("No usable DMA configuration, aborting\n");
703 if ((err
= pci_request_regions(pdev
, e1000_driver_name
)))
706 pci_set_master(pdev
);
708 netdev
= alloc_etherdev(sizeof(struct e1000_adapter
));
711 goto err_alloc_etherdev
;
714 SET_MODULE_OWNER(netdev
);
715 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
717 pci_set_drvdata(pdev
, netdev
);
718 adapter
= netdev_priv(netdev
);
719 adapter
->netdev
= netdev
;
720 adapter
->pdev
= pdev
;
721 adapter
->hw
.back
= adapter
;
722 adapter
->msg_enable
= (1 << debug
) - 1;
724 mmio_start
= pci_resource_start(pdev
, BAR_0
);
725 mmio_len
= pci_resource_len(pdev
, BAR_0
);
727 adapter
->hw
.hw_addr
= ioremap(mmio_start
, mmio_len
);
728 if (!adapter
->hw
.hw_addr
) {
733 for (i
= BAR_1
; i
<= BAR_5
; i
++) {
734 if (pci_resource_len(pdev
, i
) == 0)
736 if (pci_resource_flags(pdev
, i
) & IORESOURCE_IO
) {
737 adapter
->hw
.io_base
= pci_resource_start(pdev
, i
);
742 netdev
->open
= &e1000_open
;
743 netdev
->stop
= &e1000_close
;
744 netdev
->hard_start_xmit
= &e1000_xmit_frame
;
745 netdev
->get_stats
= &e1000_get_stats
;
746 netdev
->set_multicast_list
= &e1000_set_multi
;
747 netdev
->set_mac_address
= &e1000_set_mac
;
748 netdev
->change_mtu
= &e1000_change_mtu
;
749 netdev
->do_ioctl
= &e1000_ioctl
;
750 e1000_set_ethtool_ops(netdev
);
751 netdev
->tx_timeout
= &e1000_tx_timeout
;
752 netdev
->watchdog_timeo
= 5 * HZ
;
753 #ifdef CONFIG_E1000_NAPI
754 netdev
->poll
= &e1000_clean
;
757 netdev
->vlan_rx_register
= e1000_vlan_rx_register
;
758 netdev
->vlan_rx_add_vid
= e1000_vlan_rx_add_vid
;
759 netdev
->vlan_rx_kill_vid
= e1000_vlan_rx_kill_vid
;
760 #ifdef CONFIG_NET_POLL_CONTROLLER
761 netdev
->poll_controller
= e1000_netpoll
;
763 strcpy(netdev
->name
, pci_name(pdev
));
765 netdev
->mem_start
= mmio_start
;
766 netdev
->mem_end
= mmio_start
+ mmio_len
;
767 netdev
->base_addr
= adapter
->hw
.io_base
;
769 adapter
->bd_number
= cards_found
;
771 /* setup the private structure */
773 if ((err
= e1000_sw_init(adapter
)))
776 /* Flash BAR mapping must happen after e1000_sw_init
777 * because it depends on mac_type */
778 if ((adapter
->hw
.mac_type
== e1000_ich8lan
) &&
779 (pci_resource_flags(pdev
, 1) & IORESOURCE_MEM
)) {
780 flash_start
= pci_resource_start(pdev
, 1);
781 flash_len
= pci_resource_len(pdev
, 1);
782 adapter
->hw
.flash_address
= ioremap(flash_start
, flash_len
);
783 if (!adapter
->hw
.flash_address
) {
789 if ((err
= e1000_check_phy_reset_block(&adapter
->hw
)))
790 DPRINTK(PROBE
, INFO
, "PHY reset is blocked due to SOL/IDER session.\n");
792 /* if ksp3, indicate if it's port a being setup */
793 if (pdev
->device
== E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3
&&
794 e1000_ksp3_port_a
== 0)
795 adapter
->ksp3_port_a
= 1;
797 /* Reset for multiple KP3 adapters */
798 if (e1000_ksp3_port_a
== 4)
799 e1000_ksp3_port_a
= 0;
801 if (adapter
->hw
.mac_type
>= e1000_82543
) {
802 netdev
->features
= NETIF_F_SG
|
806 NETIF_F_HW_VLAN_FILTER
;
807 if (adapter
->hw
.mac_type
== e1000_ich8lan
)
808 netdev
->features
&= ~NETIF_F_HW_VLAN_FILTER
;
812 if ((adapter
->hw
.mac_type
>= e1000_82544
) &&
813 (adapter
->hw
.mac_type
!= e1000_82547
))
814 netdev
->features
|= NETIF_F_TSO
;
816 #ifdef NETIF_F_TSO_IPV6
817 if (adapter
->hw
.mac_type
> e1000_82547_rev_2
)
818 netdev
->features
|= NETIF_F_TSO_IPV6
;
822 netdev
->features
|= NETIF_F_HIGHDMA
;
824 netdev
->features
|= NETIF_F_LLTX
;
826 adapter
->en_mng_pt
= e1000_enable_mng_pass_thru(&adapter
->hw
);
828 /* initialize eeprom parameters */
830 if (e1000_init_eeprom_params(&adapter
->hw
)) {
831 E1000_ERR("EEPROM initialization failed\n");
835 /* before reading the EEPROM, reset the controller to
836 * put the device in a known good starting state */
838 e1000_reset_hw(&adapter
->hw
);
840 /* make sure the EEPROM is good */
842 if (e1000_validate_eeprom_checksum(&adapter
->hw
) < 0) {
843 DPRINTK(PROBE
, ERR
, "The EEPROM Checksum Is Not Valid\n");
848 /* copy the MAC address out of the EEPROM */
850 if (e1000_read_mac_addr(&adapter
->hw
))
851 DPRINTK(PROBE
, ERR
, "EEPROM Read Error\n");
852 memcpy(netdev
->dev_addr
, adapter
->hw
.mac_addr
, netdev
->addr_len
);
853 memcpy(netdev
->perm_addr
, adapter
->hw
.mac_addr
, netdev
->addr_len
);
855 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
856 DPRINTK(PROBE
, ERR
, "Invalid MAC Address\n");
861 e1000_read_part_num(&adapter
->hw
, &(adapter
->part_num
));
863 e1000_get_bus_info(&adapter
->hw
);
865 init_timer(&adapter
->tx_fifo_stall_timer
);
866 adapter
->tx_fifo_stall_timer
.function
= &e1000_82547_tx_fifo_stall
;
867 adapter
->tx_fifo_stall_timer
.data
= (unsigned long) adapter
;
869 init_timer(&adapter
->watchdog_timer
);
870 adapter
->watchdog_timer
.function
= &e1000_watchdog
;
871 adapter
->watchdog_timer
.data
= (unsigned long) adapter
;
873 init_timer(&adapter
->phy_info_timer
);
874 adapter
->phy_info_timer
.function
= &e1000_update_phy_info
;
875 adapter
->phy_info_timer
.data
= (unsigned long) adapter
;
877 INIT_WORK(&adapter
->reset_task
,
878 (void (*)(void *))e1000_reset_task
, netdev
);
880 /* we're going to reset, so assume we have no link for now */
882 netif_carrier_off(netdev
);
883 netif_stop_queue(netdev
);
885 e1000_check_options(adapter
);
887 /* Initial Wake on LAN setting
888 * If APM wake is enabled in the EEPROM,
889 * enable the ACPI Magic Packet filter
892 switch (adapter
->hw
.mac_type
) {
893 case e1000_82542_rev2_0
:
894 case e1000_82542_rev2_1
:
898 e1000_read_eeprom(&adapter
->hw
,
899 EEPROM_INIT_CONTROL2_REG
, 1, &eeprom_data
);
900 eeprom_apme_mask
= E1000_EEPROM_82544_APM
;
903 e1000_read_eeprom(&adapter
->hw
,
904 EEPROM_INIT_CONTROL1_REG
, 1, &eeprom_data
);
905 eeprom_apme_mask
= E1000_EEPROM_ICH8_APME
;
908 case e1000_82546_rev_3
:
910 case e1000_80003es2lan
:
911 if (E1000_READ_REG(&adapter
->hw
, STATUS
) & E1000_STATUS_FUNC_1
){
912 e1000_read_eeprom(&adapter
->hw
,
913 EEPROM_INIT_CONTROL3_PORT_B
, 1, &eeprom_data
);
918 e1000_read_eeprom(&adapter
->hw
,
919 EEPROM_INIT_CONTROL3_PORT_A
, 1, &eeprom_data
);
922 if (eeprom_data
& eeprom_apme_mask
)
923 adapter
->wol
|= E1000_WUFC_MAG
;
925 /* print bus type/speed/width info */
927 struct e1000_hw
*hw
= &adapter
->hw
;
928 DPRINTK(PROBE
, INFO
, "(PCI%s:%s:%s) ",
929 ((hw
->bus_type
== e1000_bus_type_pcix
) ? "-X" :
930 (hw
->bus_type
== e1000_bus_type_pci_express
? " Express":"")),
931 ((hw
->bus_speed
== e1000_bus_speed_2500
) ? "2.5Gb/s" :
932 (hw
->bus_speed
== e1000_bus_speed_133
) ? "133MHz" :
933 (hw
->bus_speed
== e1000_bus_speed_120
) ? "120MHz" :
934 (hw
->bus_speed
== e1000_bus_speed_100
) ? "100MHz" :
935 (hw
->bus_speed
== e1000_bus_speed_66
) ? "66MHz" : "33MHz"),
936 ((hw
->bus_width
== e1000_bus_width_64
) ? "64-bit" :
937 (hw
->bus_width
== e1000_bus_width_pciex_4
) ? "Width x4" :
938 (hw
->bus_width
== e1000_bus_width_pciex_1
) ? "Width x1" :
942 for (i
= 0; i
< 6; i
++)
943 printk("%2.2x%c", netdev
->dev_addr
[i
], i
== 5 ? '\n' : ':');
945 /* reset the hardware with the new settings */
946 e1000_reset(adapter
);
948 /* If the controller is 82573 and f/w is AMT, do not set
949 * DRV_LOAD until the interface is up. For all other cases,
950 * let the f/w know that the h/w is now under the control
952 if (adapter
->hw
.mac_type
!= e1000_82573
||
953 !e1000_check_mng_mode(&adapter
->hw
))
954 e1000_get_hw_control(adapter
);
956 strcpy(netdev
->name
, "eth%d");
957 if ((err
= register_netdev(netdev
)))
960 DPRINTK(PROBE
, INFO
, "Intel(R) PRO/1000 Network Connection\n");
966 if (adapter
->hw
.flash_address
)
967 iounmap(adapter
->hw
.flash_address
);
971 iounmap(adapter
->hw
.hw_addr
);
975 pci_release_regions(pdev
);
980 * e1000_remove - Device Removal Routine
981 * @pdev: PCI device information struct
983 * e1000_remove is called by the PCI subsystem to alert the driver
984 * that it should release a PCI device. The could be caused by a
985 * Hot-Plug event, or because the driver is going to be removed from
989 static void __devexit
990 e1000_remove(struct pci_dev
*pdev
)
992 struct net_device
*netdev
= pci_get_drvdata(pdev
);
993 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
995 #ifdef CONFIG_E1000_NAPI
999 flush_scheduled_work();
1001 if (adapter
->hw
.mac_type
>= e1000_82540
&&
1002 adapter
->hw
.mac_type
!= e1000_ich8lan
&&
1003 adapter
->hw
.media_type
== e1000_media_type_copper
) {
1004 manc
= E1000_READ_REG(&adapter
->hw
, MANC
);
1005 if (manc
& E1000_MANC_SMBUS_EN
) {
1006 manc
|= E1000_MANC_ARP_EN
;
1007 E1000_WRITE_REG(&adapter
->hw
, MANC
, manc
);
1011 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1012 * would have already happened in close and is redundant. */
1013 e1000_release_hw_control(adapter
);
1015 unregister_netdev(netdev
);
1016 #ifdef CONFIG_E1000_NAPI
1017 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1018 dev_put(&adapter
->polling_netdev
[i
]);
1021 if (!e1000_check_phy_reset_block(&adapter
->hw
))
1022 e1000_phy_hw_reset(&adapter
->hw
);
1024 kfree(adapter
->tx_ring
);
1025 kfree(adapter
->rx_ring
);
1026 #ifdef CONFIG_E1000_NAPI
1027 kfree(adapter
->polling_netdev
);
1030 iounmap(adapter
->hw
.hw_addr
);
1031 if (adapter
->hw
.flash_address
)
1032 iounmap(adapter
->hw
.flash_address
);
1033 pci_release_regions(pdev
);
1035 free_netdev(netdev
);
1037 pci_disable_device(pdev
);
1041 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1042 * @adapter: board private structure to initialize
1044 * e1000_sw_init initializes the Adapter private data structure.
1045 * Fields are initialized based on PCI device information and
1046 * OS network device settings (MTU size).
1049 static int __devinit
1050 e1000_sw_init(struct e1000_adapter
*adapter
)
1052 struct e1000_hw
*hw
= &adapter
->hw
;
1053 struct net_device
*netdev
= adapter
->netdev
;
1054 struct pci_dev
*pdev
= adapter
->pdev
;
1055 #ifdef CONFIG_E1000_NAPI
1059 /* PCI config space info */
1061 hw
->vendor_id
= pdev
->vendor
;
1062 hw
->device_id
= pdev
->device
;
1063 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
1064 hw
->subsystem_id
= pdev
->subsystem_device
;
1066 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
1068 pci_read_config_word(pdev
, PCI_COMMAND
, &hw
->pci_cmd_word
);
1070 adapter
->rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1071 adapter
->rx_ps_bsize0
= E1000_RXBUFFER_128
;
1072 hw
->max_frame_size
= netdev
->mtu
+
1073 ENET_HEADER_SIZE
+ ETHERNET_FCS_SIZE
;
1074 hw
->min_frame_size
= MINIMUM_ETHERNET_FRAME_SIZE
;
1076 /* identify the MAC */
1078 if (e1000_set_mac_type(hw
)) {
1079 DPRINTK(PROBE
, ERR
, "Unknown MAC Type\n");
1083 switch (hw
->mac_type
) {
1088 case e1000_82541_rev_2
:
1089 case e1000_82547_rev_2
:
1090 hw
->phy_init_script
= 1;
1094 e1000_set_media_type(hw
);
1096 hw
->wait_autoneg_complete
= FALSE
;
1097 hw
->tbi_compatibility_en
= TRUE
;
1098 hw
->adaptive_ifs
= TRUE
;
1100 /* Copper options */
1102 if (hw
->media_type
== e1000_media_type_copper
) {
1103 hw
->mdix
= AUTO_ALL_MODES
;
1104 hw
->disable_polarity_correction
= FALSE
;
1105 hw
->master_slave
= E1000_MASTER_SLAVE
;
1108 adapter
->num_tx_queues
= 1;
1109 adapter
->num_rx_queues
= 1;
1111 if (e1000_alloc_queues(adapter
)) {
1112 DPRINTK(PROBE
, ERR
, "Unable to allocate memory for queues\n");
1116 #ifdef CONFIG_E1000_NAPI
1117 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1118 adapter
->polling_netdev
[i
].priv
= adapter
;
1119 adapter
->polling_netdev
[i
].poll
= &e1000_clean
;
1120 adapter
->polling_netdev
[i
].weight
= 64;
1121 dev_hold(&adapter
->polling_netdev
[i
]);
1122 set_bit(__LINK_STATE_START
, &adapter
->polling_netdev
[i
].state
);
1124 spin_lock_init(&adapter
->tx_queue_lock
);
1127 atomic_set(&adapter
->irq_sem
, 1);
1128 spin_lock_init(&adapter
->stats_lock
);
1134 * e1000_alloc_queues - Allocate memory for all rings
1135 * @adapter: board private structure to initialize
1137 * We allocate one ring per queue at run-time since we don't know the
1138 * number of queues at compile-time. The polling_netdev array is
1139 * intended for Multiqueue, but should work fine with a single queue.
1142 static int __devinit
1143 e1000_alloc_queues(struct e1000_adapter
*adapter
)
1147 size
= sizeof(struct e1000_tx_ring
) * adapter
->num_tx_queues
;
1148 adapter
->tx_ring
= kmalloc(size
, GFP_KERNEL
);
1149 if (!adapter
->tx_ring
)
1151 memset(adapter
->tx_ring
, 0, size
);
1153 size
= sizeof(struct e1000_rx_ring
) * adapter
->num_rx_queues
;
1154 adapter
->rx_ring
= kmalloc(size
, GFP_KERNEL
);
1155 if (!adapter
->rx_ring
) {
1156 kfree(adapter
->tx_ring
);
1159 memset(adapter
->rx_ring
, 0, size
);
1161 #ifdef CONFIG_E1000_NAPI
1162 size
= sizeof(struct net_device
) * adapter
->num_rx_queues
;
1163 adapter
->polling_netdev
= kmalloc(size
, GFP_KERNEL
);
1164 if (!adapter
->polling_netdev
) {
1165 kfree(adapter
->tx_ring
);
1166 kfree(adapter
->rx_ring
);
1169 memset(adapter
->polling_netdev
, 0, size
);
1172 return E1000_SUCCESS
;
1176 * e1000_open - Called when a network interface is made active
1177 * @netdev: network interface device structure
1179 * Returns 0 on success, negative value on failure
1181 * The open entry point is called when a network interface is made
1182 * active by the system (IFF_UP). At this point all resources needed
1183 * for transmit and receive operations are allocated, the interrupt
1184 * handler is registered with the OS, the watchdog timer is started,
1185 * and the stack is notified that the interface is ready.
1189 e1000_open(struct net_device
*netdev
)
1191 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1194 /* disallow open during test */
1195 if (test_bit(__E1000_DRIVER_TESTING
, &adapter
->flags
))
1198 /* allocate transmit descriptors */
1200 if ((err
= e1000_setup_all_tx_resources(adapter
)))
1203 /* allocate receive descriptors */
1205 if ((err
= e1000_setup_all_rx_resources(adapter
)))
1208 err
= e1000_request_irq(adapter
);
1212 e1000_power_up_phy(adapter
);
1214 if ((err
= e1000_up(adapter
)))
1216 adapter
->mng_vlan_id
= E1000_MNG_VLAN_NONE
;
1217 if ((adapter
->hw
.mng_cookie
.status
&
1218 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT
)) {
1219 e1000_update_mng_vlan(adapter
);
1222 /* If AMT is enabled, let the firmware know that the network
1223 * interface is now open */
1224 if (adapter
->hw
.mac_type
== e1000_82573
&&
1225 e1000_check_mng_mode(&adapter
->hw
))
1226 e1000_get_hw_control(adapter
);
1228 return E1000_SUCCESS
;
1231 e1000_free_all_rx_resources(adapter
);
1233 e1000_free_all_tx_resources(adapter
);
1235 e1000_reset(adapter
);
1241 * e1000_close - Disables a network interface
1242 * @netdev: network interface device structure
1244 * Returns 0, this is not allowed to fail
1246 * The close entry point is called when an interface is de-activated
1247 * by the OS. The hardware is still under the drivers control, but
1248 * needs to be disabled. A global MAC reset is issued to stop the
1249 * hardware, and all transmit and receive resources are freed.
1253 e1000_close(struct net_device
*netdev
)
1255 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1257 WARN_ON(test_bit(__E1000_RESETTING
, &adapter
->flags
));
1258 e1000_down(adapter
);
1259 e1000_power_down_phy(adapter
);
1260 e1000_free_irq(adapter
);
1262 e1000_free_all_tx_resources(adapter
);
1263 e1000_free_all_rx_resources(adapter
);
1265 if ((adapter
->hw
.mng_cookie
.status
&
1266 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT
)) {
1267 e1000_vlan_rx_kill_vid(netdev
, adapter
->mng_vlan_id
);
1270 /* If AMT is enabled, let the firmware know that the network
1271 * interface is now closed */
1272 if (adapter
->hw
.mac_type
== e1000_82573
&&
1273 e1000_check_mng_mode(&adapter
->hw
))
1274 e1000_release_hw_control(adapter
);
1280 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1281 * @adapter: address of board private structure
1282 * @start: address of beginning of memory
1283 * @len: length of memory
1286 e1000_check_64k_bound(struct e1000_adapter
*adapter
,
1287 void *start
, unsigned long len
)
1289 unsigned long begin
= (unsigned long) start
;
1290 unsigned long end
= begin
+ len
;
1292 /* First rev 82545 and 82546 need to not allow any memory
1293 * write location to cross 64k boundary due to errata 23 */
1294 if (adapter
->hw
.mac_type
== e1000_82545
||
1295 adapter
->hw
.mac_type
== e1000_82546
) {
1296 return ((begin
^ (end
- 1)) >> 16) != 0 ? FALSE
: TRUE
;
1303 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1304 * @adapter: board private structure
1305 * @txdr: tx descriptor ring (for a specific queue) to setup
1307 * Return 0 on success, negative on failure
1311 e1000_setup_tx_resources(struct e1000_adapter
*adapter
,
1312 struct e1000_tx_ring
*txdr
)
1314 struct pci_dev
*pdev
= adapter
->pdev
;
1317 size
= sizeof(struct e1000_buffer
) * txdr
->count
;
1318 txdr
->buffer_info
= vmalloc(size
);
1319 if (!txdr
->buffer_info
) {
1321 "Unable to allocate memory for the transmit descriptor ring\n");
1324 memset(txdr
->buffer_info
, 0, size
);
1326 /* round up to nearest 4K */
1328 txdr
->size
= txdr
->count
* sizeof(struct e1000_tx_desc
);
1329 E1000_ROUNDUP(txdr
->size
, 4096);
1331 txdr
->desc
= pci_alloc_consistent(pdev
, txdr
->size
, &txdr
->dma
);
1334 vfree(txdr
->buffer_info
);
1336 "Unable to allocate memory for the transmit descriptor ring\n");
1340 /* Fix for errata 23, can't cross 64kB boundary */
1341 if (!e1000_check_64k_bound(adapter
, txdr
->desc
, txdr
->size
)) {
1342 void *olddesc
= txdr
->desc
;
1343 dma_addr_t olddma
= txdr
->dma
;
1344 DPRINTK(TX_ERR
, ERR
, "txdr align check failed: %u bytes "
1345 "at %p\n", txdr
->size
, txdr
->desc
);
1346 /* Try again, without freeing the previous */
1347 txdr
->desc
= pci_alloc_consistent(pdev
, txdr
->size
, &txdr
->dma
);
1348 /* Failed allocation, critical failure */
1350 pci_free_consistent(pdev
, txdr
->size
, olddesc
, olddma
);
1351 goto setup_tx_desc_die
;
1354 if (!e1000_check_64k_bound(adapter
, txdr
->desc
, txdr
->size
)) {
1356 pci_free_consistent(pdev
, txdr
->size
, txdr
->desc
,
1358 pci_free_consistent(pdev
, txdr
->size
, olddesc
, olddma
);
1360 "Unable to allocate aligned memory "
1361 "for the transmit descriptor ring\n");
1362 vfree(txdr
->buffer_info
);
1365 /* Free old allocation, new allocation was successful */
1366 pci_free_consistent(pdev
, txdr
->size
, olddesc
, olddma
);
1369 memset(txdr
->desc
, 0, txdr
->size
);
1371 txdr
->next_to_use
= 0;
1372 txdr
->next_to_clean
= 0;
1373 spin_lock_init(&txdr
->tx_lock
);
1379 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1380 * (Descriptors) for all queues
1381 * @adapter: board private structure
1383 * If this function returns with an error, then it's possible one or
1384 * more of the rings is populated (while the rest are not). It is the
1385 * callers duty to clean those orphaned rings.
1387 * Return 0 on success, negative on failure
1391 e1000_setup_all_tx_resources(struct e1000_adapter
*adapter
)
1395 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1396 err
= e1000_setup_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
1399 "Allocation for Tx Queue %u failed\n", i
);
1408 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1409 * @adapter: board private structure
1411 * Configure the Tx unit of the MAC after a reset.
1415 e1000_configure_tx(struct e1000_adapter
*adapter
)
1418 struct e1000_hw
*hw
= &adapter
->hw
;
1419 uint32_t tdlen
, tctl
, tipg
, tarc
;
1420 uint32_t ipgr1
, ipgr2
;
1422 /* Setup the HW Tx Head and Tail descriptor pointers */
1424 switch (adapter
->num_tx_queues
) {
1427 tdba
= adapter
->tx_ring
[0].dma
;
1428 tdlen
= adapter
->tx_ring
[0].count
*
1429 sizeof(struct e1000_tx_desc
);
1430 E1000_WRITE_REG(hw
, TDLEN
, tdlen
);
1431 E1000_WRITE_REG(hw
, TDBAH
, (tdba
>> 32));
1432 E1000_WRITE_REG(hw
, TDBAL
, (tdba
& 0x00000000ffffffffULL
));
1433 E1000_WRITE_REG(hw
, TDT
, 0);
1434 E1000_WRITE_REG(hw
, TDH
, 0);
1435 adapter
->tx_ring
[0].tdh
= E1000_TDH
;
1436 adapter
->tx_ring
[0].tdt
= E1000_TDT
;
1440 /* Set the default values for the Tx Inter Packet Gap timer */
1442 if (hw
->media_type
== e1000_media_type_fiber
||
1443 hw
->media_type
== e1000_media_type_internal_serdes
)
1444 tipg
= DEFAULT_82543_TIPG_IPGT_FIBER
;
1446 tipg
= DEFAULT_82543_TIPG_IPGT_COPPER
;
1448 switch (hw
->mac_type
) {
1449 case e1000_82542_rev2_0
:
1450 case e1000_82542_rev2_1
:
1451 tipg
= DEFAULT_82542_TIPG_IPGT
;
1452 ipgr1
= DEFAULT_82542_TIPG_IPGR1
;
1453 ipgr2
= DEFAULT_82542_TIPG_IPGR2
;
1455 case e1000_80003es2lan
:
1456 ipgr1
= DEFAULT_82543_TIPG_IPGR1
;
1457 ipgr2
= DEFAULT_80003ES2LAN_TIPG_IPGR2
;
1460 ipgr1
= DEFAULT_82543_TIPG_IPGR1
;
1461 ipgr2
= DEFAULT_82543_TIPG_IPGR2
;
1464 tipg
|= ipgr1
<< E1000_TIPG_IPGR1_SHIFT
;
1465 tipg
|= ipgr2
<< E1000_TIPG_IPGR2_SHIFT
;
1466 E1000_WRITE_REG(hw
, TIPG
, tipg
);
1468 /* Set the Tx Interrupt Delay register */
1470 E1000_WRITE_REG(hw
, TIDV
, adapter
->tx_int_delay
);
1471 if (hw
->mac_type
>= e1000_82540
)
1472 E1000_WRITE_REG(hw
, TADV
, adapter
->tx_abs_int_delay
);
1474 /* Program the Transmit Control Register */
1476 tctl
= E1000_READ_REG(hw
, TCTL
);
1478 tctl
&= ~E1000_TCTL_CT
;
1479 tctl
|= E1000_TCTL_PSP
| E1000_TCTL_RTLC
|
1480 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
1483 /* disable Multiple Reads for debugging */
1484 tctl
&= ~E1000_TCTL_MULR
;
1487 if (hw
->mac_type
== e1000_82571
|| hw
->mac_type
== e1000_82572
) {
1488 tarc
= E1000_READ_REG(hw
, TARC0
);
1489 tarc
|= ((1 << 25) | (1 << 21));
1490 E1000_WRITE_REG(hw
, TARC0
, tarc
);
1491 tarc
= E1000_READ_REG(hw
, TARC1
);
1493 if (tctl
& E1000_TCTL_MULR
)
1497 E1000_WRITE_REG(hw
, TARC1
, tarc
);
1498 } else if (hw
->mac_type
== e1000_80003es2lan
) {
1499 tarc
= E1000_READ_REG(hw
, TARC0
);
1501 if (hw
->media_type
== e1000_media_type_internal_serdes
)
1503 E1000_WRITE_REG(hw
, TARC0
, tarc
);
1504 tarc
= E1000_READ_REG(hw
, TARC1
);
1506 E1000_WRITE_REG(hw
, TARC1
, tarc
);
1509 e1000_config_collision_dist(hw
);
1511 /* Setup Transmit Descriptor Settings for eop descriptor */
1512 adapter
->txd_cmd
= E1000_TXD_CMD_IDE
| E1000_TXD_CMD_EOP
|
1515 if (hw
->mac_type
< e1000_82543
)
1516 adapter
->txd_cmd
|= E1000_TXD_CMD_RPS
;
1518 adapter
->txd_cmd
|= E1000_TXD_CMD_RS
;
1520 /* Cache if we're 82544 running in PCI-X because we'll
1521 * need this to apply a workaround later in the send path. */
1522 if (hw
->mac_type
== e1000_82544
&&
1523 hw
->bus_type
== e1000_bus_type_pcix
)
1524 adapter
->pcix_82544
= 1;
1526 E1000_WRITE_REG(hw
, TCTL
, tctl
);
1531 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1532 * @adapter: board private structure
1533 * @rxdr: rx descriptor ring (for a specific queue) to setup
1535 * Returns 0 on success, negative on failure
1539 e1000_setup_rx_resources(struct e1000_adapter
*adapter
,
1540 struct e1000_rx_ring
*rxdr
)
1542 struct pci_dev
*pdev
= adapter
->pdev
;
1545 size
= sizeof(struct e1000_buffer
) * rxdr
->count
;
1546 rxdr
->buffer_info
= vmalloc(size
);
1547 if (!rxdr
->buffer_info
) {
1549 "Unable to allocate memory for the receive descriptor ring\n");
1552 memset(rxdr
->buffer_info
, 0, size
);
1554 size
= sizeof(struct e1000_ps_page
) * rxdr
->count
;
1555 rxdr
->ps_page
= kmalloc(size
, GFP_KERNEL
);
1556 if (!rxdr
->ps_page
) {
1557 vfree(rxdr
->buffer_info
);
1559 "Unable to allocate memory for the receive descriptor ring\n");
1562 memset(rxdr
->ps_page
, 0, size
);
1564 size
= sizeof(struct e1000_ps_page_dma
) * rxdr
->count
;
1565 rxdr
->ps_page_dma
= kmalloc(size
, GFP_KERNEL
);
1566 if (!rxdr
->ps_page_dma
) {
1567 vfree(rxdr
->buffer_info
);
1568 kfree(rxdr
->ps_page
);
1570 "Unable to allocate memory for the receive descriptor ring\n");
1573 memset(rxdr
->ps_page_dma
, 0, size
);
1575 if (adapter
->hw
.mac_type
<= e1000_82547_rev_2
)
1576 desc_len
= sizeof(struct e1000_rx_desc
);
1578 desc_len
= sizeof(union e1000_rx_desc_packet_split
);
1580 /* Round up to nearest 4K */
1582 rxdr
->size
= rxdr
->count
* desc_len
;
1583 E1000_ROUNDUP(rxdr
->size
, 4096);
1585 rxdr
->desc
= pci_alloc_consistent(pdev
, rxdr
->size
, &rxdr
->dma
);
1589 "Unable to allocate memory for the receive descriptor ring\n");
1591 vfree(rxdr
->buffer_info
);
1592 kfree(rxdr
->ps_page
);
1593 kfree(rxdr
->ps_page_dma
);
1597 /* Fix for errata 23, can't cross 64kB boundary */
1598 if (!e1000_check_64k_bound(adapter
, rxdr
->desc
, rxdr
->size
)) {
1599 void *olddesc
= rxdr
->desc
;
1600 dma_addr_t olddma
= rxdr
->dma
;
1601 DPRINTK(RX_ERR
, ERR
, "rxdr align check failed: %u bytes "
1602 "at %p\n", rxdr
->size
, rxdr
->desc
);
1603 /* Try again, without freeing the previous */
1604 rxdr
->desc
= pci_alloc_consistent(pdev
, rxdr
->size
, &rxdr
->dma
);
1605 /* Failed allocation, critical failure */
1607 pci_free_consistent(pdev
, rxdr
->size
, olddesc
, olddma
);
1609 "Unable to allocate memory "
1610 "for the receive descriptor ring\n");
1611 goto setup_rx_desc_die
;
1614 if (!e1000_check_64k_bound(adapter
, rxdr
->desc
, rxdr
->size
)) {
1616 pci_free_consistent(pdev
, rxdr
->size
, rxdr
->desc
,
1618 pci_free_consistent(pdev
, rxdr
->size
, olddesc
, olddma
);
1620 "Unable to allocate aligned memory "
1621 "for the receive descriptor ring\n");
1622 goto setup_rx_desc_die
;
1624 /* Free old allocation, new allocation was successful */
1625 pci_free_consistent(pdev
, rxdr
->size
, olddesc
, olddma
);
1628 memset(rxdr
->desc
, 0, rxdr
->size
);
1630 rxdr
->next_to_clean
= 0;
1631 rxdr
->next_to_use
= 0;
1637 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1638 * (Descriptors) for all queues
1639 * @adapter: board private structure
1641 * If this function returns with an error, then it's possible one or
1642 * more of the rings is populated (while the rest are not). It is the
1643 * callers duty to clean those orphaned rings.
1645 * Return 0 on success, negative on failure
1649 e1000_setup_all_rx_resources(struct e1000_adapter
*adapter
)
1653 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1654 err
= e1000_setup_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
1657 "Allocation for Rx Queue %u failed\n", i
);
1666 * e1000_setup_rctl - configure the receive control registers
1667 * @adapter: Board private structure
1669 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1670 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1672 e1000_setup_rctl(struct e1000_adapter
*adapter
)
1674 uint32_t rctl
, rfctl
;
1675 uint32_t psrctl
= 0;
1676 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1680 rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
1682 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
1684 rctl
|= E1000_RCTL_EN
| E1000_RCTL_BAM
|
1685 E1000_RCTL_LBM_NO
| E1000_RCTL_RDMTS_HALF
|
1686 (adapter
->hw
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
1688 if (adapter
->hw
.tbi_compatibility_on
== 1)
1689 rctl
|= E1000_RCTL_SBP
;
1691 rctl
&= ~E1000_RCTL_SBP
;
1693 if (adapter
->netdev
->mtu
<= ETH_DATA_LEN
)
1694 rctl
&= ~E1000_RCTL_LPE
;
1696 rctl
|= E1000_RCTL_LPE
;
1698 /* Setup buffer sizes */
1699 rctl
&= ~E1000_RCTL_SZ_4096
;
1700 rctl
|= E1000_RCTL_BSEX
;
1701 switch (adapter
->rx_buffer_len
) {
1702 case E1000_RXBUFFER_256
:
1703 rctl
|= E1000_RCTL_SZ_256
;
1704 rctl
&= ~E1000_RCTL_BSEX
;
1706 case E1000_RXBUFFER_512
:
1707 rctl
|= E1000_RCTL_SZ_512
;
1708 rctl
&= ~E1000_RCTL_BSEX
;
1710 case E1000_RXBUFFER_1024
:
1711 rctl
|= E1000_RCTL_SZ_1024
;
1712 rctl
&= ~E1000_RCTL_BSEX
;
1714 case E1000_RXBUFFER_2048
:
1716 rctl
|= E1000_RCTL_SZ_2048
;
1717 rctl
&= ~E1000_RCTL_BSEX
;
1719 case E1000_RXBUFFER_4096
:
1720 rctl
|= E1000_RCTL_SZ_4096
;
1722 case E1000_RXBUFFER_8192
:
1723 rctl
|= E1000_RCTL_SZ_8192
;
1725 case E1000_RXBUFFER_16384
:
1726 rctl
|= E1000_RCTL_SZ_16384
;
1730 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1731 /* 82571 and greater support packet-split where the protocol
1732 * header is placed in skb->data and the packet data is
1733 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1734 * In the case of a non-split, skb->data is linearly filled,
1735 * followed by the page buffers. Therefore, skb->data is
1736 * sized to hold the largest protocol header.
1738 pages
= PAGE_USE_COUNT(adapter
->netdev
->mtu
);
1739 if ((adapter
->hw
.mac_type
> e1000_82547_rev_2
) && (pages
<= 3) &&
1741 adapter
->rx_ps_pages
= pages
;
1743 adapter
->rx_ps_pages
= 0;
1745 if (adapter
->rx_ps_pages
) {
1746 /* Configure extra packet-split registers */
1747 rfctl
= E1000_READ_REG(&adapter
->hw
, RFCTL
);
1748 rfctl
|= E1000_RFCTL_EXTEN
;
1749 /* disable IPv6 packet split support */
1750 rfctl
|= E1000_RFCTL_IPV6_DIS
;
1751 E1000_WRITE_REG(&adapter
->hw
, RFCTL
, rfctl
);
1753 rctl
|= E1000_RCTL_DTYP_PS
;
1755 psrctl
|= adapter
->rx_ps_bsize0
>>
1756 E1000_PSRCTL_BSIZE0_SHIFT
;
1758 switch (adapter
->rx_ps_pages
) {
1760 psrctl
|= PAGE_SIZE
<<
1761 E1000_PSRCTL_BSIZE3_SHIFT
;
1763 psrctl
|= PAGE_SIZE
<<
1764 E1000_PSRCTL_BSIZE2_SHIFT
;
1766 psrctl
|= PAGE_SIZE
>>
1767 E1000_PSRCTL_BSIZE1_SHIFT
;
1771 E1000_WRITE_REG(&adapter
->hw
, PSRCTL
, psrctl
);
1774 E1000_WRITE_REG(&adapter
->hw
, RCTL
, rctl
);
1778 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1779 * @adapter: board private structure
1781 * Configure the Rx unit of the MAC after a reset.
1785 e1000_configure_rx(struct e1000_adapter
*adapter
)
1788 struct e1000_hw
*hw
= &adapter
->hw
;
1789 uint32_t rdlen
, rctl
, rxcsum
, ctrl_ext
;
1791 if (adapter
->rx_ps_pages
) {
1792 /* this is a 32 byte descriptor */
1793 rdlen
= adapter
->rx_ring
[0].count
*
1794 sizeof(union e1000_rx_desc_packet_split
);
1795 adapter
->clean_rx
= e1000_clean_rx_irq_ps
;
1796 adapter
->alloc_rx_buf
= e1000_alloc_rx_buffers_ps
;
1798 rdlen
= adapter
->rx_ring
[0].count
*
1799 sizeof(struct e1000_rx_desc
);
1800 adapter
->clean_rx
= e1000_clean_rx_irq
;
1801 adapter
->alloc_rx_buf
= e1000_alloc_rx_buffers
;
1804 /* disable receives while setting up the descriptors */
1805 rctl
= E1000_READ_REG(hw
, RCTL
);
1806 E1000_WRITE_REG(hw
, RCTL
, rctl
& ~E1000_RCTL_EN
);
1808 /* set the Receive Delay Timer Register */
1809 E1000_WRITE_REG(hw
, RDTR
, adapter
->rx_int_delay
);
1811 if (hw
->mac_type
>= e1000_82540
) {
1812 E1000_WRITE_REG(hw
, RADV
, adapter
->rx_abs_int_delay
);
1813 if (adapter
->itr
> 1)
1814 E1000_WRITE_REG(hw
, ITR
,
1815 1000000000 / (adapter
->itr
* 256));
1818 if (hw
->mac_type
>= e1000_82571
) {
1819 ctrl_ext
= E1000_READ_REG(hw
, CTRL_EXT
);
1820 /* Reset delay timers after every interrupt */
1821 ctrl_ext
|= E1000_CTRL_EXT_INT_TIMER_CLR
;
1822 #ifdef CONFIG_E1000_NAPI
1823 /* Auto-Mask interrupts upon ICR read. */
1824 ctrl_ext
|= E1000_CTRL_EXT_IAME
;
1826 E1000_WRITE_REG(hw
, CTRL_EXT
, ctrl_ext
);
1827 E1000_WRITE_REG(hw
, IAM
, ~0);
1828 E1000_WRITE_FLUSH(hw
);
1831 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1832 * the Base and Length of the Rx Descriptor Ring */
1833 switch (adapter
->num_rx_queues
) {
1836 rdba
= adapter
->rx_ring
[0].dma
;
1837 E1000_WRITE_REG(hw
, RDLEN
, rdlen
);
1838 E1000_WRITE_REG(hw
, RDBAH
, (rdba
>> 32));
1839 E1000_WRITE_REG(hw
, RDBAL
, (rdba
& 0x00000000ffffffffULL
));
1840 E1000_WRITE_REG(hw
, RDT
, 0);
1841 E1000_WRITE_REG(hw
, RDH
, 0);
1842 adapter
->rx_ring
[0].rdh
= E1000_RDH
;
1843 adapter
->rx_ring
[0].rdt
= E1000_RDT
;
1847 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1848 if (hw
->mac_type
>= e1000_82543
) {
1849 rxcsum
= E1000_READ_REG(hw
, RXCSUM
);
1850 if (adapter
->rx_csum
== TRUE
) {
1851 rxcsum
|= E1000_RXCSUM_TUOFL
;
1853 /* Enable 82571 IPv4 payload checksum for UDP fragments
1854 * Must be used in conjunction with packet-split. */
1855 if ((hw
->mac_type
>= e1000_82571
) &&
1856 (adapter
->rx_ps_pages
)) {
1857 rxcsum
|= E1000_RXCSUM_IPPCSE
;
1860 rxcsum
&= ~E1000_RXCSUM_TUOFL
;
1861 /* don't need to clear IPPCSE as it defaults to 0 */
1863 E1000_WRITE_REG(hw
, RXCSUM
, rxcsum
);
1866 /* Enable Receives */
1867 E1000_WRITE_REG(hw
, RCTL
, rctl
);
1871 * e1000_free_tx_resources - Free Tx Resources per Queue
1872 * @adapter: board private structure
1873 * @tx_ring: Tx descriptor ring for a specific queue
1875 * Free all transmit software resources
1879 e1000_free_tx_resources(struct e1000_adapter
*adapter
,
1880 struct e1000_tx_ring
*tx_ring
)
1882 struct pci_dev
*pdev
= adapter
->pdev
;
1884 e1000_clean_tx_ring(adapter
, tx_ring
);
1886 vfree(tx_ring
->buffer_info
);
1887 tx_ring
->buffer_info
= NULL
;
1889 pci_free_consistent(pdev
, tx_ring
->size
, tx_ring
->desc
, tx_ring
->dma
);
1891 tx_ring
->desc
= NULL
;
1895 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1896 * @adapter: board private structure
1898 * Free all transmit software resources
1902 e1000_free_all_tx_resources(struct e1000_adapter
*adapter
)
1906 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1907 e1000_free_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
1911 e1000_unmap_and_free_tx_resource(struct e1000_adapter
*adapter
,
1912 struct e1000_buffer
*buffer_info
)
1914 if (buffer_info
->dma
) {
1915 pci_unmap_page(adapter
->pdev
,
1917 buffer_info
->length
,
1920 if (buffer_info
->skb
)
1921 dev_kfree_skb_any(buffer_info
->skb
);
1922 memset(buffer_info
, 0, sizeof(struct e1000_buffer
));
1926 * e1000_clean_tx_ring - Free Tx Buffers
1927 * @adapter: board private structure
1928 * @tx_ring: ring to be cleaned
1932 e1000_clean_tx_ring(struct e1000_adapter
*adapter
,
1933 struct e1000_tx_ring
*tx_ring
)
1935 struct e1000_buffer
*buffer_info
;
1939 /* Free all the Tx ring sk_buffs */
1941 for (i
= 0; i
< tx_ring
->count
; i
++) {
1942 buffer_info
= &tx_ring
->buffer_info
[i
];
1943 e1000_unmap_and_free_tx_resource(adapter
, buffer_info
);
1946 size
= sizeof(struct e1000_buffer
) * tx_ring
->count
;
1947 memset(tx_ring
->buffer_info
, 0, size
);
1949 /* Zero out the descriptor ring */
1951 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1953 tx_ring
->next_to_use
= 0;
1954 tx_ring
->next_to_clean
= 0;
1955 tx_ring
->last_tx_tso
= 0;
1957 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tdh
);
1958 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tdt
);
1962 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
1963 * @adapter: board private structure
1967 e1000_clean_all_tx_rings(struct e1000_adapter
*adapter
)
1971 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1972 e1000_clean_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
1976 * e1000_free_rx_resources - Free Rx Resources
1977 * @adapter: board private structure
1978 * @rx_ring: ring to clean the resources from
1980 * Free all receive software resources
1984 e1000_free_rx_resources(struct e1000_adapter
*adapter
,
1985 struct e1000_rx_ring
*rx_ring
)
1987 struct pci_dev
*pdev
= adapter
->pdev
;
1989 e1000_clean_rx_ring(adapter
, rx_ring
);
1991 vfree(rx_ring
->buffer_info
);
1992 rx_ring
->buffer_info
= NULL
;
1993 kfree(rx_ring
->ps_page
);
1994 rx_ring
->ps_page
= NULL
;
1995 kfree(rx_ring
->ps_page_dma
);
1996 rx_ring
->ps_page_dma
= NULL
;
1998 pci_free_consistent(pdev
, rx_ring
->size
, rx_ring
->desc
, rx_ring
->dma
);
2000 rx_ring
->desc
= NULL
;
2004 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2005 * @adapter: board private structure
2007 * Free all receive software resources
2011 e1000_free_all_rx_resources(struct e1000_adapter
*adapter
)
2015 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2016 e1000_free_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
2020 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2021 * @adapter: board private structure
2022 * @rx_ring: ring to free buffers from
2026 e1000_clean_rx_ring(struct e1000_adapter
*adapter
,
2027 struct e1000_rx_ring
*rx_ring
)
2029 struct e1000_buffer
*buffer_info
;
2030 struct e1000_ps_page
*ps_page
;
2031 struct e1000_ps_page_dma
*ps_page_dma
;
2032 struct pci_dev
*pdev
= adapter
->pdev
;
2036 /* Free all the Rx ring sk_buffs */
2037 for (i
= 0; i
< rx_ring
->count
; i
++) {
2038 buffer_info
= &rx_ring
->buffer_info
[i
];
2039 if (buffer_info
->skb
) {
2040 pci_unmap_single(pdev
,
2042 buffer_info
->length
,
2043 PCI_DMA_FROMDEVICE
);
2045 dev_kfree_skb(buffer_info
->skb
);
2046 buffer_info
->skb
= NULL
;
2048 ps_page
= &rx_ring
->ps_page
[i
];
2049 ps_page_dma
= &rx_ring
->ps_page_dma
[i
];
2050 for (j
= 0; j
< adapter
->rx_ps_pages
; j
++) {
2051 if (!ps_page
->ps_page
[j
]) break;
2052 pci_unmap_page(pdev
,
2053 ps_page_dma
->ps_page_dma
[j
],
2054 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
2055 ps_page_dma
->ps_page_dma
[j
] = 0;
2056 put_page(ps_page
->ps_page
[j
]);
2057 ps_page
->ps_page
[j
] = NULL
;
2061 size
= sizeof(struct e1000_buffer
) * rx_ring
->count
;
2062 memset(rx_ring
->buffer_info
, 0, size
);
2063 size
= sizeof(struct e1000_ps_page
) * rx_ring
->count
;
2064 memset(rx_ring
->ps_page
, 0, size
);
2065 size
= sizeof(struct e1000_ps_page_dma
) * rx_ring
->count
;
2066 memset(rx_ring
->ps_page_dma
, 0, size
);
2068 /* Zero out the descriptor ring */
2070 memset(rx_ring
->desc
, 0, rx_ring
->size
);
2072 rx_ring
->next_to_clean
= 0;
2073 rx_ring
->next_to_use
= 0;
2075 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->rdh
);
2076 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->rdt
);
2080 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2081 * @adapter: board private structure
2085 e1000_clean_all_rx_rings(struct e1000_adapter
*adapter
)
2089 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2090 e1000_clean_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
2093 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2094 * and memory write and invalidate disabled for certain operations
2097 e1000_enter_82542_rst(struct e1000_adapter
*adapter
)
2099 struct net_device
*netdev
= adapter
->netdev
;
2102 e1000_pci_clear_mwi(&adapter
->hw
);
2104 rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
2105 rctl
|= E1000_RCTL_RST
;
2106 E1000_WRITE_REG(&adapter
->hw
, RCTL
, rctl
);
2107 E1000_WRITE_FLUSH(&adapter
->hw
);
2110 if (netif_running(netdev
))
2111 e1000_clean_all_rx_rings(adapter
);
2115 e1000_leave_82542_rst(struct e1000_adapter
*adapter
)
2117 struct net_device
*netdev
= adapter
->netdev
;
2120 rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
2121 rctl
&= ~E1000_RCTL_RST
;
2122 E1000_WRITE_REG(&adapter
->hw
, RCTL
, rctl
);
2123 E1000_WRITE_FLUSH(&adapter
->hw
);
2126 if (adapter
->hw
.pci_cmd_word
& PCI_COMMAND_INVALIDATE
)
2127 e1000_pci_set_mwi(&adapter
->hw
);
2129 if (netif_running(netdev
)) {
2130 /* No need to loop, because 82542 supports only 1 queue */
2131 struct e1000_rx_ring
*ring
= &adapter
->rx_ring
[0];
2132 e1000_configure_rx(adapter
);
2133 adapter
->alloc_rx_buf(adapter
, ring
, E1000_DESC_UNUSED(ring
));
2138 * e1000_set_mac - Change the Ethernet Address of the NIC
2139 * @netdev: network interface device structure
2140 * @p: pointer to an address structure
2142 * Returns 0 on success, negative on failure
2146 e1000_set_mac(struct net_device
*netdev
, void *p
)
2148 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2149 struct sockaddr
*addr
= p
;
2151 if (!is_valid_ether_addr(addr
->sa_data
))
2152 return -EADDRNOTAVAIL
;
2154 /* 82542 2.0 needs to be in reset to write receive address registers */
2156 if (adapter
->hw
.mac_type
== e1000_82542_rev2_0
)
2157 e1000_enter_82542_rst(adapter
);
2159 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2160 memcpy(adapter
->hw
.mac_addr
, addr
->sa_data
, netdev
->addr_len
);
2162 e1000_rar_set(&adapter
->hw
, adapter
->hw
.mac_addr
, 0);
2164 /* With 82571 controllers, LAA may be overwritten (with the default)
2165 * due to controller reset from the other port. */
2166 if (adapter
->hw
.mac_type
== e1000_82571
) {
2167 /* activate the work around */
2168 adapter
->hw
.laa_is_present
= 1;
2170 /* Hold a copy of the LAA in RAR[14] This is done so that
2171 * between the time RAR[0] gets clobbered and the time it
2172 * gets fixed (in e1000_watchdog), the actual LAA is in one
2173 * of the RARs and no incoming packets directed to this port
2174 * are dropped. Eventaully the LAA will be in RAR[0] and
2176 e1000_rar_set(&adapter
->hw
, adapter
->hw
.mac_addr
,
2177 E1000_RAR_ENTRIES
- 1);
2180 if (adapter
->hw
.mac_type
== e1000_82542_rev2_0
)
2181 e1000_leave_82542_rst(adapter
);
2187 * e1000_set_multi - Multicast and Promiscuous mode set
2188 * @netdev: network interface device structure
2190 * The set_multi entry point is called whenever the multicast address
2191 * list or the network interface flags are updated. This routine is
2192 * responsible for configuring the hardware for proper multicast,
2193 * promiscuous mode, and all-multi behavior.
2197 e1000_set_multi(struct net_device
*netdev
)
2199 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2200 struct e1000_hw
*hw
= &adapter
->hw
;
2201 struct dev_mc_list
*mc_ptr
;
2203 uint32_t hash_value
;
2204 int i
, rar_entries
= E1000_RAR_ENTRIES
;
2205 int mta_reg_count
= (hw
->mac_type
== e1000_ich8lan
) ?
2206 E1000_NUM_MTA_REGISTERS_ICH8LAN
:
2207 E1000_NUM_MTA_REGISTERS
;
2209 if (adapter
->hw
.mac_type
== e1000_ich8lan
)
2210 rar_entries
= E1000_RAR_ENTRIES_ICH8LAN
;
2212 /* reserve RAR[14] for LAA over-write work-around */
2213 if (adapter
->hw
.mac_type
== e1000_82571
)
2216 /* Check for Promiscuous and All Multicast modes */
2218 rctl
= E1000_READ_REG(hw
, RCTL
);
2220 if (netdev
->flags
& IFF_PROMISC
) {
2221 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
2222 } else if (netdev
->flags
& IFF_ALLMULTI
) {
2223 rctl
|= E1000_RCTL_MPE
;
2224 rctl
&= ~E1000_RCTL_UPE
;
2226 rctl
&= ~(E1000_RCTL_UPE
| E1000_RCTL_MPE
);
2229 E1000_WRITE_REG(hw
, RCTL
, rctl
);
2231 /* 82542 2.0 needs to be in reset to write receive address registers */
2233 if (hw
->mac_type
== e1000_82542_rev2_0
)
2234 e1000_enter_82542_rst(adapter
);
2236 /* load the first 14 multicast address into the exact filters 1-14
2237 * RAR 0 is used for the station MAC adddress
2238 * if there are not 14 addresses, go ahead and clear the filters
2239 * -- with 82571 controllers only 0-13 entries are filled here
2241 mc_ptr
= netdev
->mc_list
;
2243 for (i
= 1; i
< rar_entries
; i
++) {
2245 e1000_rar_set(hw
, mc_ptr
->dmi_addr
, i
);
2246 mc_ptr
= mc_ptr
->next
;
2248 E1000_WRITE_REG_ARRAY(hw
, RA
, i
<< 1, 0);
2249 E1000_WRITE_FLUSH(hw
);
2250 E1000_WRITE_REG_ARRAY(hw
, RA
, (i
<< 1) + 1, 0);
2251 E1000_WRITE_FLUSH(hw
);
2255 /* clear the old settings from the multicast hash table */
2257 for (i
= 0; i
< mta_reg_count
; i
++) {
2258 E1000_WRITE_REG_ARRAY(hw
, MTA
, i
, 0);
2259 E1000_WRITE_FLUSH(hw
);
2262 /* load any remaining addresses into the hash table */
2264 for (; mc_ptr
; mc_ptr
= mc_ptr
->next
) {
2265 hash_value
= e1000_hash_mc_addr(hw
, mc_ptr
->dmi_addr
);
2266 e1000_mta_set(hw
, hash_value
);
2269 if (hw
->mac_type
== e1000_82542_rev2_0
)
2270 e1000_leave_82542_rst(adapter
);
2273 /* Need to wait a few seconds after link up to get diagnostic information from
2277 e1000_update_phy_info(unsigned long data
)
2279 struct e1000_adapter
*adapter
= (struct e1000_adapter
*) data
;
2280 e1000_phy_get_info(&adapter
->hw
, &adapter
->phy_info
);
2284 * e1000_82547_tx_fifo_stall - Timer Call-back
2285 * @data: pointer to adapter cast into an unsigned long
2289 e1000_82547_tx_fifo_stall(unsigned long data
)
2291 struct e1000_adapter
*adapter
= (struct e1000_adapter
*) data
;
2292 struct net_device
*netdev
= adapter
->netdev
;
2295 if (atomic_read(&adapter
->tx_fifo_stall
)) {
2296 if ((E1000_READ_REG(&adapter
->hw
, TDT
) ==
2297 E1000_READ_REG(&adapter
->hw
, TDH
)) &&
2298 (E1000_READ_REG(&adapter
->hw
, TDFT
) ==
2299 E1000_READ_REG(&adapter
->hw
, TDFH
)) &&
2300 (E1000_READ_REG(&adapter
->hw
, TDFTS
) ==
2301 E1000_READ_REG(&adapter
->hw
, TDFHS
))) {
2302 tctl
= E1000_READ_REG(&adapter
->hw
, TCTL
);
2303 E1000_WRITE_REG(&adapter
->hw
, TCTL
,
2304 tctl
& ~E1000_TCTL_EN
);
2305 E1000_WRITE_REG(&adapter
->hw
, TDFT
,
2306 adapter
->tx_head_addr
);
2307 E1000_WRITE_REG(&adapter
->hw
, TDFH
,
2308 adapter
->tx_head_addr
);
2309 E1000_WRITE_REG(&adapter
->hw
, TDFTS
,
2310 adapter
->tx_head_addr
);
2311 E1000_WRITE_REG(&adapter
->hw
, TDFHS
,
2312 adapter
->tx_head_addr
);
2313 E1000_WRITE_REG(&adapter
->hw
, TCTL
, tctl
);
2314 E1000_WRITE_FLUSH(&adapter
->hw
);
2316 adapter
->tx_fifo_head
= 0;
2317 atomic_set(&adapter
->tx_fifo_stall
, 0);
2318 netif_wake_queue(netdev
);
2320 mod_timer(&adapter
->tx_fifo_stall_timer
, jiffies
+ 1);
2326 * e1000_watchdog - Timer Call-back
2327 * @data: pointer to adapter cast into an unsigned long
2330 e1000_watchdog(unsigned long data
)
2332 struct e1000_adapter
*adapter
= (struct e1000_adapter
*) data
;
2333 struct net_device
*netdev
= adapter
->netdev
;
2334 struct e1000_tx_ring
*txdr
= adapter
->tx_ring
;
2335 uint32_t link
, tctl
;
2338 ret_val
= e1000_check_for_link(&adapter
->hw
);
2339 if ((ret_val
== E1000_ERR_PHY
) &&
2340 (adapter
->hw
.phy_type
== e1000_phy_igp_3
) &&
2341 (E1000_READ_REG(&adapter
->hw
, CTRL
) & E1000_PHY_CTRL_GBE_DISABLE
)) {
2342 /* See e1000_kumeran_lock_loss_workaround() */
2344 "Gigabit has been disabled, downgrading speed\n");
2346 if (adapter
->hw
.mac_type
== e1000_82573
) {
2347 e1000_enable_tx_pkt_filtering(&adapter
->hw
);
2348 if (adapter
->mng_vlan_id
!= adapter
->hw
.mng_cookie
.vlan_id
)
2349 e1000_update_mng_vlan(adapter
);
2352 if ((adapter
->hw
.media_type
== e1000_media_type_internal_serdes
) &&
2353 !(E1000_READ_REG(&adapter
->hw
, TXCW
) & E1000_TXCW_ANE
))
2354 link
= !adapter
->hw
.serdes_link_down
;
2356 link
= E1000_READ_REG(&adapter
->hw
, STATUS
) & E1000_STATUS_LU
;
2359 if (!netif_carrier_ok(netdev
)) {
2360 boolean_t txb2b
= 1;
2361 e1000_get_speed_and_duplex(&adapter
->hw
,
2362 &adapter
->link_speed
,
2363 &adapter
->link_duplex
);
2365 DPRINTK(LINK
, INFO
, "NIC Link is Up %d Mbps %s\n",
2366 adapter
->link_speed
,
2367 adapter
->link_duplex
== FULL_DUPLEX
?
2368 "Full Duplex" : "Half Duplex");
2370 /* tweak tx_queue_len according to speed/duplex
2371 * and adjust the timeout factor */
2372 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
2373 adapter
->tx_timeout_factor
= 1;
2374 switch (adapter
->link_speed
) {
2377 netdev
->tx_queue_len
= 10;
2378 adapter
->tx_timeout_factor
= 8;
2382 netdev
->tx_queue_len
= 100;
2383 /* maybe add some timeout factor ? */
2387 if ((adapter
->hw
.mac_type
== e1000_82571
||
2388 adapter
->hw
.mac_type
== e1000_82572
) &&
2390 #define SPEED_MODE_BIT (1 << 21)
2392 tarc0
= E1000_READ_REG(&adapter
->hw
, TARC0
);
2393 tarc0
&= ~SPEED_MODE_BIT
;
2394 E1000_WRITE_REG(&adapter
->hw
, TARC0
, tarc0
);
2398 /* disable TSO for pcie and 10/100 speeds, to avoid
2399 * some hardware issues */
2400 if (!adapter
->tso_force
&&
2401 adapter
->hw
.bus_type
== e1000_bus_type_pci_express
){
2402 switch (adapter
->link_speed
) {
2406 "10/100 speed: disabling TSO\n");
2407 netdev
->features
&= ~NETIF_F_TSO
;
2410 netdev
->features
|= NETIF_F_TSO
;
2419 /* enable transmits in the hardware, need to do this
2420 * after setting TARC0 */
2421 tctl
= E1000_READ_REG(&adapter
->hw
, TCTL
);
2422 tctl
|= E1000_TCTL_EN
;
2423 E1000_WRITE_REG(&adapter
->hw
, TCTL
, tctl
);
2425 netif_carrier_on(netdev
);
2426 netif_wake_queue(netdev
);
2427 mod_timer(&adapter
->phy_info_timer
, jiffies
+ 2 * HZ
);
2428 adapter
->smartspeed
= 0;
2431 if (netif_carrier_ok(netdev
)) {
2432 adapter
->link_speed
= 0;
2433 adapter
->link_duplex
= 0;
2434 DPRINTK(LINK
, INFO
, "NIC Link is Down\n");
2435 netif_carrier_off(netdev
);
2436 netif_stop_queue(netdev
);
2437 mod_timer(&adapter
->phy_info_timer
, jiffies
+ 2 * HZ
);
2439 /* 80003ES2LAN workaround--
2440 * For packet buffer work-around on link down event;
2441 * disable receives in the ISR and
2442 * reset device here in the watchdog
2444 if (adapter
->hw
.mac_type
== e1000_80003es2lan
) {
2446 schedule_work(&adapter
->reset_task
);
2450 e1000_smartspeed(adapter
);
2453 e1000_update_stats(adapter
);
2455 adapter
->hw
.tx_packet_delta
= adapter
->stats
.tpt
- adapter
->tpt_old
;
2456 adapter
->tpt_old
= adapter
->stats
.tpt
;
2457 adapter
->hw
.collision_delta
= adapter
->stats
.colc
- adapter
->colc_old
;
2458 adapter
->colc_old
= adapter
->stats
.colc
;
2460 adapter
->gorcl
= adapter
->stats
.gorcl
- adapter
->gorcl_old
;
2461 adapter
->gorcl_old
= adapter
->stats
.gorcl
;
2462 adapter
->gotcl
= adapter
->stats
.gotcl
- adapter
->gotcl_old
;
2463 adapter
->gotcl_old
= adapter
->stats
.gotcl
;
2465 e1000_update_adaptive(&adapter
->hw
);
2467 if (!netif_carrier_ok(netdev
)) {
2468 if (E1000_DESC_UNUSED(txdr
) + 1 < txdr
->count
) {
2469 /* We've lost link, so the controller stops DMA,
2470 * but we've got queued Tx work that's never going
2471 * to get done, so reset controller to flush Tx.
2472 * (Do the reset outside of interrupt context). */
2473 adapter
->tx_timeout_count
++;
2474 schedule_work(&adapter
->reset_task
);
2478 /* Dynamic mode for Interrupt Throttle Rate (ITR) */
2479 if (adapter
->hw
.mac_type
>= e1000_82540
&& adapter
->itr
== 1) {
2480 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
2481 * asymmetrical Tx or Rx gets ITR=8000; everyone
2482 * else is between 2000-8000. */
2483 uint32_t goc
= (adapter
->gotcl
+ adapter
->gorcl
) / 10000;
2484 uint32_t dif
= (adapter
->gotcl
> adapter
->gorcl
?
2485 adapter
->gotcl
- adapter
->gorcl
:
2486 adapter
->gorcl
- adapter
->gotcl
) / 10000;
2487 uint32_t itr
= goc
> 0 ? (dif
* 6000 / goc
+ 2000) : 8000;
2488 E1000_WRITE_REG(&adapter
->hw
, ITR
, 1000000000 / (itr
* 256));
2491 /* Cause software interrupt to ensure rx ring is cleaned */
2492 E1000_WRITE_REG(&adapter
->hw
, ICS
, E1000_ICS_RXDMT0
);
2494 /* Force detection of hung controller every watchdog period */
2495 adapter
->detect_tx_hung
= TRUE
;
2497 /* With 82571 controllers, LAA may be overwritten due to controller
2498 * reset from the other port. Set the appropriate LAA in RAR[0] */
2499 if (adapter
->hw
.mac_type
== e1000_82571
&& adapter
->hw
.laa_is_present
)
2500 e1000_rar_set(&adapter
->hw
, adapter
->hw
.mac_addr
, 0);
2502 /* Reset the timer */
2503 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 2 * HZ
);
2506 #define E1000_TX_FLAGS_CSUM 0x00000001
2507 #define E1000_TX_FLAGS_VLAN 0x00000002
2508 #define E1000_TX_FLAGS_TSO 0x00000004
2509 #define E1000_TX_FLAGS_IPV4 0x00000008
2510 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2511 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2514 e1000_tso(struct e1000_adapter
*adapter
, struct e1000_tx_ring
*tx_ring
,
2515 struct sk_buff
*skb
)
2518 struct e1000_context_desc
*context_desc
;
2519 struct e1000_buffer
*buffer_info
;
2521 uint32_t cmd_length
= 0;
2522 uint16_t ipcse
= 0, tucse
, mss
;
2523 uint8_t ipcss
, ipcso
, tucss
, tucso
, hdr_len
;
2526 if (skb_is_gso(skb
)) {
2527 if (skb_header_cloned(skb
)) {
2528 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2533 hdr_len
= ((skb
->h
.raw
- skb
->data
) + (skb
->h
.th
->doff
<< 2));
2534 mss
= skb_shinfo(skb
)->gso_size
;
2535 if (skb
->protocol
== htons(ETH_P_IP
)) {
2536 skb
->nh
.iph
->tot_len
= 0;
2537 skb
->nh
.iph
->check
= 0;
2539 ~csum_tcpudp_magic(skb
->nh
.iph
->saddr
,
2544 cmd_length
= E1000_TXD_CMD_IP
;
2545 ipcse
= skb
->h
.raw
- skb
->data
- 1;
2546 #ifdef NETIF_F_TSO_IPV6
2547 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
2548 skb
->nh
.ipv6h
->payload_len
= 0;
2550 ~csum_ipv6_magic(&skb
->nh
.ipv6h
->saddr
,
2551 &skb
->nh
.ipv6h
->daddr
,
2558 ipcss
= skb
->nh
.raw
- skb
->data
;
2559 ipcso
= (void *)&(skb
->nh
.iph
->check
) - (void *)skb
->data
;
2560 tucss
= skb
->h
.raw
- skb
->data
;
2561 tucso
= (void *)&(skb
->h
.th
->check
) - (void *)skb
->data
;
2564 cmd_length
|= (E1000_TXD_CMD_DEXT
| E1000_TXD_CMD_TSE
|
2565 E1000_TXD_CMD_TCP
| (skb
->len
- (hdr_len
)));
2567 i
= tx_ring
->next_to_use
;
2568 context_desc
= E1000_CONTEXT_DESC(*tx_ring
, i
);
2569 buffer_info
= &tx_ring
->buffer_info
[i
];
2571 context_desc
->lower_setup
.ip_fields
.ipcss
= ipcss
;
2572 context_desc
->lower_setup
.ip_fields
.ipcso
= ipcso
;
2573 context_desc
->lower_setup
.ip_fields
.ipcse
= cpu_to_le16(ipcse
);
2574 context_desc
->upper_setup
.tcp_fields
.tucss
= tucss
;
2575 context_desc
->upper_setup
.tcp_fields
.tucso
= tucso
;
2576 context_desc
->upper_setup
.tcp_fields
.tucse
= cpu_to_le16(tucse
);
2577 context_desc
->tcp_seg_setup
.fields
.mss
= cpu_to_le16(mss
);
2578 context_desc
->tcp_seg_setup
.fields
.hdr_len
= hdr_len
;
2579 context_desc
->cmd_and_length
= cpu_to_le32(cmd_length
);
2581 buffer_info
->time_stamp
= jiffies
;
2583 if (++i
== tx_ring
->count
) i
= 0;
2584 tx_ring
->next_to_use
= i
;
2594 e1000_tx_csum(struct e1000_adapter
*adapter
, struct e1000_tx_ring
*tx_ring
,
2595 struct sk_buff
*skb
)
2597 struct e1000_context_desc
*context_desc
;
2598 struct e1000_buffer
*buffer_info
;
2602 if (likely(skb
->ip_summed
== CHECKSUM_HW
)) {
2603 css
= skb
->h
.raw
- skb
->data
;
2605 i
= tx_ring
->next_to_use
;
2606 buffer_info
= &tx_ring
->buffer_info
[i
];
2607 context_desc
= E1000_CONTEXT_DESC(*tx_ring
, i
);
2609 context_desc
->upper_setup
.tcp_fields
.tucss
= css
;
2610 context_desc
->upper_setup
.tcp_fields
.tucso
= css
+ skb
->csum
;
2611 context_desc
->upper_setup
.tcp_fields
.tucse
= 0;
2612 context_desc
->tcp_seg_setup
.data
= 0;
2613 context_desc
->cmd_and_length
= cpu_to_le32(E1000_TXD_CMD_DEXT
);
2615 buffer_info
->time_stamp
= jiffies
;
2617 if (unlikely(++i
== tx_ring
->count
)) i
= 0;
2618 tx_ring
->next_to_use
= i
;
2626 #define E1000_MAX_TXD_PWR 12
2627 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2630 e1000_tx_map(struct e1000_adapter
*adapter
, struct e1000_tx_ring
*tx_ring
,
2631 struct sk_buff
*skb
, unsigned int first
, unsigned int max_per_txd
,
2632 unsigned int nr_frags
, unsigned int mss
)
2634 struct e1000_buffer
*buffer_info
;
2635 unsigned int len
= skb
->len
;
2636 unsigned int offset
= 0, size
, count
= 0, i
;
2638 len
-= skb
->data_len
;
2640 i
= tx_ring
->next_to_use
;
2643 buffer_info
= &tx_ring
->buffer_info
[i
];
2644 size
= min(len
, max_per_txd
);
2646 /* Workaround for Controller erratum --
2647 * descriptor for non-tso packet in a linear SKB that follows a
2648 * tso gets written back prematurely before the data is fully
2649 * DMA'd to the controller */
2650 if (!skb
->data_len
&& tx_ring
->last_tx_tso
&&
2652 tx_ring
->last_tx_tso
= 0;
2656 /* Workaround for premature desc write-backs
2657 * in TSO mode. Append 4-byte sentinel desc */
2658 if (unlikely(mss
&& !nr_frags
&& size
== len
&& size
> 8))
2661 /* work-around for errata 10 and it applies
2662 * to all controllers in PCI-X mode
2663 * The fix is to make sure that the first descriptor of a
2664 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2666 if (unlikely((adapter
->hw
.bus_type
== e1000_bus_type_pcix
) &&
2667 (size
> 2015) && count
== 0))
2670 /* Workaround for potential 82544 hang in PCI-X. Avoid
2671 * terminating buffers within evenly-aligned dwords. */
2672 if (unlikely(adapter
->pcix_82544
&&
2673 !((unsigned long)(skb
->data
+ offset
+ size
- 1) & 4) &&
2677 buffer_info
->length
= size
;
2679 pci_map_single(adapter
->pdev
,
2683 buffer_info
->time_stamp
= jiffies
;
2688 if (unlikely(++i
== tx_ring
->count
)) i
= 0;
2691 for (f
= 0; f
< nr_frags
; f
++) {
2692 struct skb_frag_struct
*frag
;
2694 frag
= &skb_shinfo(skb
)->frags
[f
];
2696 offset
= frag
->page_offset
;
2699 buffer_info
= &tx_ring
->buffer_info
[i
];
2700 size
= min(len
, max_per_txd
);
2702 /* Workaround for premature desc write-backs
2703 * in TSO mode. Append 4-byte sentinel desc */
2704 if (unlikely(mss
&& f
== (nr_frags
-1) && size
== len
&& size
> 8))
2707 /* Workaround for potential 82544 hang in PCI-X.
2708 * Avoid terminating buffers within evenly-aligned
2710 if (unlikely(adapter
->pcix_82544
&&
2711 !((unsigned long)(frag
->page
+offset
+size
-1) & 4) &&
2715 buffer_info
->length
= size
;
2717 pci_map_page(adapter
->pdev
,
2722 buffer_info
->time_stamp
= jiffies
;
2727 if (unlikely(++i
== tx_ring
->count
)) i
= 0;
2731 i
= (i
== 0) ? tx_ring
->count
- 1 : i
- 1;
2732 tx_ring
->buffer_info
[i
].skb
= skb
;
2733 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
2739 e1000_tx_queue(struct e1000_adapter
*adapter
, struct e1000_tx_ring
*tx_ring
,
2740 int tx_flags
, int count
)
2742 struct e1000_tx_desc
*tx_desc
= NULL
;
2743 struct e1000_buffer
*buffer_info
;
2744 uint32_t txd_upper
= 0, txd_lower
= E1000_TXD_CMD_IFCS
;
2747 if (likely(tx_flags
& E1000_TX_FLAGS_TSO
)) {
2748 txd_lower
|= E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
|
2750 txd_upper
|= E1000_TXD_POPTS_TXSM
<< 8;
2752 if (likely(tx_flags
& E1000_TX_FLAGS_IPV4
))
2753 txd_upper
|= E1000_TXD_POPTS_IXSM
<< 8;
2756 if (likely(tx_flags
& E1000_TX_FLAGS_CSUM
)) {
2757 txd_lower
|= E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
;
2758 txd_upper
|= E1000_TXD_POPTS_TXSM
<< 8;
2761 if (unlikely(tx_flags
& E1000_TX_FLAGS_VLAN
)) {
2762 txd_lower
|= E1000_TXD_CMD_VLE
;
2763 txd_upper
|= (tx_flags
& E1000_TX_FLAGS_VLAN_MASK
);
2766 i
= tx_ring
->next_to_use
;
2769 buffer_info
= &tx_ring
->buffer_info
[i
];
2770 tx_desc
= E1000_TX_DESC(*tx_ring
, i
);
2771 tx_desc
->buffer_addr
= cpu_to_le64(buffer_info
->dma
);
2772 tx_desc
->lower
.data
=
2773 cpu_to_le32(txd_lower
| buffer_info
->length
);
2774 tx_desc
->upper
.data
= cpu_to_le32(txd_upper
);
2775 if (unlikely(++i
== tx_ring
->count
)) i
= 0;
2778 tx_desc
->lower
.data
|= cpu_to_le32(adapter
->txd_cmd
);
2780 /* Force memory writes to complete before letting h/w
2781 * know there are new descriptors to fetch. (Only
2782 * applicable for weak-ordered memory model archs,
2783 * such as IA-64). */
2786 tx_ring
->next_to_use
= i
;
2787 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tdt
);
2791 * 82547 workaround to avoid controller hang in half-duplex environment.
2792 * The workaround is to avoid queuing a large packet that would span
2793 * the internal Tx FIFO ring boundary by notifying the stack to resend
2794 * the packet at a later time. This gives the Tx FIFO an opportunity to
2795 * flush all packets. When that occurs, we reset the Tx FIFO pointers
2796 * to the beginning of the Tx FIFO.
2799 #define E1000_FIFO_HDR 0x10
2800 #define E1000_82547_PAD_LEN 0x3E0
2803 e1000_82547_fifo_workaround(struct e1000_adapter
*adapter
, struct sk_buff
*skb
)
2805 uint32_t fifo_space
= adapter
->tx_fifo_size
- adapter
->tx_fifo_head
;
2806 uint32_t skb_fifo_len
= skb
->len
+ E1000_FIFO_HDR
;
2808 E1000_ROUNDUP(skb_fifo_len
, E1000_FIFO_HDR
);
2810 if (adapter
->link_duplex
!= HALF_DUPLEX
)
2811 goto no_fifo_stall_required
;
2813 if (atomic_read(&adapter
->tx_fifo_stall
))
2816 if (skb_fifo_len
>= (E1000_82547_PAD_LEN
+ fifo_space
)) {
2817 atomic_set(&adapter
->tx_fifo_stall
, 1);
2821 no_fifo_stall_required
:
2822 adapter
->tx_fifo_head
+= skb_fifo_len
;
2823 if (adapter
->tx_fifo_head
>= adapter
->tx_fifo_size
)
2824 adapter
->tx_fifo_head
-= adapter
->tx_fifo_size
;
2828 #define MINIMUM_DHCP_PACKET_SIZE 282
2830 e1000_transfer_dhcp_info(struct e1000_adapter
*adapter
, struct sk_buff
*skb
)
2832 struct e1000_hw
*hw
= &adapter
->hw
;
2833 uint16_t length
, offset
;
2834 if (vlan_tx_tag_present(skb
)) {
2835 if (!((vlan_tx_tag_get(skb
) == adapter
->hw
.mng_cookie
.vlan_id
) &&
2836 ( adapter
->hw
.mng_cookie
.status
&
2837 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT
)) )
2840 if (skb
->len
> MINIMUM_DHCP_PACKET_SIZE
) {
2841 struct ethhdr
*eth
= (struct ethhdr
*) skb
->data
;
2842 if ((htons(ETH_P_IP
) == eth
->h_proto
)) {
2843 const struct iphdr
*ip
=
2844 (struct iphdr
*)((uint8_t *)skb
->data
+14);
2845 if (IPPROTO_UDP
== ip
->protocol
) {
2846 struct udphdr
*udp
=
2847 (struct udphdr
*)((uint8_t *)ip
+
2849 if (ntohs(udp
->dest
) == 67) {
2850 offset
= (uint8_t *)udp
+ 8 - skb
->data
;
2851 length
= skb
->len
- offset
;
2853 return e1000_mng_write_dhcp_info(hw
,
2863 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
2865 e1000_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
2867 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2868 struct e1000_tx_ring
*tx_ring
;
2869 unsigned int first
, max_per_txd
= E1000_MAX_DATA_PER_TXD
;
2870 unsigned int max_txd_pwr
= E1000_MAX_TXD_PWR
;
2871 unsigned int tx_flags
= 0;
2872 unsigned int len
= skb
->len
;
2873 unsigned long flags
;
2874 unsigned int nr_frags
= 0;
2875 unsigned int mss
= 0;
2879 len
-= skb
->data_len
;
2881 tx_ring
= adapter
->tx_ring
;
2883 if (unlikely(skb
->len
<= 0)) {
2884 dev_kfree_skb_any(skb
);
2885 return NETDEV_TX_OK
;
2889 mss
= skb_shinfo(skb
)->gso_size
;
2890 /* The controller does a simple calculation to
2891 * make sure there is enough room in the FIFO before
2892 * initiating the DMA for each buffer. The calc is:
2893 * 4 = ceil(buffer len/mss). To make sure we don't
2894 * overrun the FIFO, adjust the max buffer len if mss
2898 max_per_txd
= min(mss
<< 2, max_per_txd
);
2899 max_txd_pwr
= fls(max_per_txd
) - 1;
2901 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
2902 * points to just header, pull a few bytes of payload from
2903 * frags into skb->data */
2904 hdr_len
= ((skb
->h
.raw
- skb
->data
) + (skb
->h
.th
->doff
<< 2));
2905 if (skb
->data_len
&& (hdr_len
== (skb
->len
- skb
->data_len
))) {
2906 switch (adapter
->hw
.mac_type
) {
2907 unsigned int pull_size
;
2912 pull_size
= min((unsigned int)4, skb
->data_len
);
2913 if (!__pskb_pull_tail(skb
, pull_size
)) {
2915 "__pskb_pull_tail failed.\n");
2916 dev_kfree_skb_any(skb
);
2917 return NETDEV_TX_OK
;
2919 len
= skb
->len
- skb
->data_len
;
2928 /* reserve a descriptor for the offload context */
2929 if ((mss
) || (skb
->ip_summed
== CHECKSUM_HW
))
2933 if (skb
->ip_summed
== CHECKSUM_HW
)
2938 /* Controller Erratum workaround */
2939 if (!skb
->data_len
&& tx_ring
->last_tx_tso
&& !skb_is_gso(skb
))
2943 count
+= TXD_USE_COUNT(len
, max_txd_pwr
);
2945 if (adapter
->pcix_82544
)
2948 /* work-around for errata 10 and it applies to all controllers
2949 * in PCI-X mode, so add one more descriptor to the count
2951 if (unlikely((adapter
->hw
.bus_type
== e1000_bus_type_pcix
) &&
2955 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2956 for (f
= 0; f
< nr_frags
; f
++)
2957 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
,
2959 if (adapter
->pcix_82544
)
2963 if (adapter
->hw
.tx_pkt_filtering
&&
2964 (adapter
->hw
.mac_type
== e1000_82573
))
2965 e1000_transfer_dhcp_info(adapter
, skb
);
2967 local_irq_save(flags
);
2968 if (!spin_trylock(&tx_ring
->tx_lock
)) {
2969 /* Collision - tell upper layer to requeue */
2970 local_irq_restore(flags
);
2971 return NETDEV_TX_LOCKED
;
2974 /* need: count + 2 desc gap to keep tail from touching
2975 * head, otherwise try next time */
2976 if (unlikely(E1000_DESC_UNUSED(tx_ring
) < count
+ 2)) {
2977 netif_stop_queue(netdev
);
2978 spin_unlock_irqrestore(&tx_ring
->tx_lock
, flags
);
2979 return NETDEV_TX_BUSY
;
2982 if (unlikely(adapter
->hw
.mac_type
== e1000_82547
)) {
2983 if (unlikely(e1000_82547_fifo_workaround(adapter
, skb
))) {
2984 netif_stop_queue(netdev
);
2985 mod_timer(&adapter
->tx_fifo_stall_timer
, jiffies
);
2986 spin_unlock_irqrestore(&tx_ring
->tx_lock
, flags
);
2987 return NETDEV_TX_BUSY
;
2991 if (unlikely(adapter
->vlgrp
&& vlan_tx_tag_present(skb
))) {
2992 tx_flags
|= E1000_TX_FLAGS_VLAN
;
2993 tx_flags
|= (vlan_tx_tag_get(skb
) << E1000_TX_FLAGS_VLAN_SHIFT
);
2996 first
= tx_ring
->next_to_use
;
2998 tso
= e1000_tso(adapter
, tx_ring
, skb
);
3000 dev_kfree_skb_any(skb
);
3001 spin_unlock_irqrestore(&tx_ring
->tx_lock
, flags
);
3002 return NETDEV_TX_OK
;
3006 tx_ring
->last_tx_tso
= 1;
3007 tx_flags
|= E1000_TX_FLAGS_TSO
;
3008 } else if (likely(e1000_tx_csum(adapter
, tx_ring
, skb
)))
3009 tx_flags
|= E1000_TX_FLAGS_CSUM
;
3011 /* Old method was to assume IPv4 packet by default if TSO was enabled.
3012 * 82571 hardware supports TSO capabilities for IPv6 as well...
3013 * no longer assume, we must. */
3014 if (likely(skb
->protocol
== htons(ETH_P_IP
)))
3015 tx_flags
|= E1000_TX_FLAGS_IPV4
;
3017 e1000_tx_queue(adapter
, tx_ring
, tx_flags
,
3018 e1000_tx_map(adapter
, tx_ring
, skb
, first
,
3019 max_per_txd
, nr_frags
, mss
));
3021 netdev
->trans_start
= jiffies
;
3023 /* Make sure there is space in the ring for the next send. */
3024 if (unlikely(E1000_DESC_UNUSED(tx_ring
) < MAX_SKB_FRAGS
+ 2))
3025 netif_stop_queue(netdev
);
3027 spin_unlock_irqrestore(&tx_ring
->tx_lock
, flags
);
3028 return NETDEV_TX_OK
;
3032 * e1000_tx_timeout - Respond to a Tx Hang
3033 * @netdev: network interface device structure
3037 e1000_tx_timeout(struct net_device
*netdev
)
3039 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3041 /* Do the reset outside of interrupt context */
3042 adapter
->tx_timeout_count
++;
3043 schedule_work(&adapter
->reset_task
);
3047 e1000_reset_task(struct net_device
*netdev
)
3049 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3051 e1000_reinit_locked(adapter
);
3055 * e1000_get_stats - Get System Network Statistics
3056 * @netdev: network interface device structure
3058 * Returns the address of the device statistics structure.
3059 * The statistics are actually updated from the timer callback.
3062 static struct net_device_stats
*
3063 e1000_get_stats(struct net_device
*netdev
)
3065 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3067 /* only return the current stats */
3068 return &adapter
->net_stats
;
3072 * e1000_change_mtu - Change the Maximum Transfer Unit
3073 * @netdev: network interface device structure
3074 * @new_mtu: new value for maximum frame size
3076 * Returns 0 on success, negative on failure
3080 e1000_change_mtu(struct net_device
*netdev
, int new_mtu
)
3082 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3083 int max_frame
= new_mtu
+ ENET_HEADER_SIZE
+ ETHERNET_FCS_SIZE
;
3084 uint16_t eeprom_data
= 0;
3086 if ((max_frame
< MINIMUM_ETHERNET_FRAME_SIZE
) ||
3087 (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
3088 DPRINTK(PROBE
, ERR
, "Invalid MTU setting\n");
3092 /* Adapter-specific max frame size limits. */
3093 switch (adapter
->hw
.mac_type
) {
3094 case e1000_undefined
... e1000_82542_rev2_1
:
3096 if (max_frame
> MAXIMUM_ETHERNET_FRAME_SIZE
) {
3097 DPRINTK(PROBE
, ERR
, "Jumbo Frames not supported.\n");
3102 /* only enable jumbo frames if ASPM is disabled completely
3103 * this means both bits must be zero in 0x1A bits 3:2 */
3104 e1000_read_eeprom(&adapter
->hw
, EEPROM_INIT_3GIO_3
, 1,
3106 if (eeprom_data
& EEPROM_WORD1A_ASPM_MASK
) {
3107 if (max_frame
> MAXIMUM_ETHERNET_FRAME_SIZE
) {
3109 "Jumbo Frames not supported.\n");
3114 /* fall through to get support */
3117 case e1000_80003es2lan
:
3118 #define MAX_STD_JUMBO_FRAME_SIZE 9234
3119 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
3120 DPRINTK(PROBE
, ERR
, "MTU > 9216 not supported.\n");
3125 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3129 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3130 * means we reserve 2 more, this pushes us to allocate from the next
3132 * i.e. RXBUFFER_2048 --> size-4096 slab */
3134 if (max_frame
<= E1000_RXBUFFER_256
)
3135 adapter
->rx_buffer_len
= E1000_RXBUFFER_256
;
3136 else if (max_frame
<= E1000_RXBUFFER_512
)
3137 adapter
->rx_buffer_len
= E1000_RXBUFFER_512
;
3138 else if (max_frame
<= E1000_RXBUFFER_1024
)
3139 adapter
->rx_buffer_len
= E1000_RXBUFFER_1024
;
3140 else if (max_frame
<= E1000_RXBUFFER_2048
)
3141 adapter
->rx_buffer_len
= E1000_RXBUFFER_2048
;
3142 else if (max_frame
<= E1000_RXBUFFER_4096
)
3143 adapter
->rx_buffer_len
= E1000_RXBUFFER_4096
;
3144 else if (max_frame
<= E1000_RXBUFFER_8192
)
3145 adapter
->rx_buffer_len
= E1000_RXBUFFER_8192
;
3146 else if (max_frame
<= E1000_RXBUFFER_16384
)
3147 adapter
->rx_buffer_len
= E1000_RXBUFFER_16384
;
3149 /* adjust allocation if LPE protects us, and we aren't using SBP */
3150 if (!adapter
->hw
.tbi_compatibility_on
&&
3151 ((max_frame
== MAXIMUM_ETHERNET_FRAME_SIZE
) ||
3152 (max_frame
== MAXIMUM_ETHERNET_VLAN_SIZE
)))
3153 adapter
->rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3155 netdev
->mtu
= new_mtu
;
3157 if (netif_running(netdev
))
3158 e1000_reinit_locked(adapter
);
3160 adapter
->hw
.max_frame_size
= max_frame
;
3166 * e1000_update_stats - Update the board statistics counters
3167 * @adapter: board private structure
3171 e1000_update_stats(struct e1000_adapter
*adapter
)
3173 struct e1000_hw
*hw
= &adapter
->hw
;
3174 struct pci_dev
*pdev
= adapter
->pdev
;
3175 unsigned long flags
;
3178 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3181 * Prevent stats update while adapter is being reset, or if the pci
3182 * connection is down.
3184 if (adapter
->link_speed
== 0)
3186 if (pdev
->error_state
&& pdev
->error_state
!= pci_channel_io_normal
)
3189 spin_lock_irqsave(&adapter
->stats_lock
, flags
);
3191 /* these counters are modified from e1000_adjust_tbi_stats,
3192 * called from the interrupt context, so they must only
3193 * be written while holding adapter->stats_lock
3196 adapter
->stats
.crcerrs
+= E1000_READ_REG(hw
, CRCERRS
);
3197 adapter
->stats
.gprc
+= E1000_READ_REG(hw
, GPRC
);
3198 adapter
->stats
.gorcl
+= E1000_READ_REG(hw
, GORCL
);
3199 adapter
->stats
.gorch
+= E1000_READ_REG(hw
, GORCH
);
3200 adapter
->stats
.bprc
+= E1000_READ_REG(hw
, BPRC
);
3201 adapter
->stats
.mprc
+= E1000_READ_REG(hw
, MPRC
);
3202 adapter
->stats
.roc
+= E1000_READ_REG(hw
, ROC
);
3204 if (adapter
->hw
.mac_type
!= e1000_ich8lan
) {
3205 adapter
->stats
.prc64
+= E1000_READ_REG(hw
, PRC64
);
3206 adapter
->stats
.prc127
+= E1000_READ_REG(hw
, PRC127
);
3207 adapter
->stats
.prc255
+= E1000_READ_REG(hw
, PRC255
);
3208 adapter
->stats
.prc511
+= E1000_READ_REG(hw
, PRC511
);
3209 adapter
->stats
.prc1023
+= E1000_READ_REG(hw
, PRC1023
);
3210 adapter
->stats
.prc1522
+= E1000_READ_REG(hw
, PRC1522
);
3213 adapter
->stats
.symerrs
+= E1000_READ_REG(hw
, SYMERRS
);
3214 adapter
->stats
.mpc
+= E1000_READ_REG(hw
, MPC
);
3215 adapter
->stats
.scc
+= E1000_READ_REG(hw
, SCC
);
3216 adapter
->stats
.ecol
+= E1000_READ_REG(hw
, ECOL
);
3217 adapter
->stats
.mcc
+= E1000_READ_REG(hw
, MCC
);
3218 adapter
->stats
.latecol
+= E1000_READ_REG(hw
, LATECOL
);
3219 adapter
->stats
.dc
+= E1000_READ_REG(hw
, DC
);
3220 adapter
->stats
.sec
+= E1000_READ_REG(hw
, SEC
);
3221 adapter
->stats
.rlec
+= E1000_READ_REG(hw
, RLEC
);
3222 adapter
->stats
.xonrxc
+= E1000_READ_REG(hw
, XONRXC
);
3223 adapter
->stats
.xontxc
+= E1000_READ_REG(hw
, XONTXC
);
3224 adapter
->stats
.xoffrxc
+= E1000_READ_REG(hw
, XOFFRXC
);
3225 adapter
->stats
.xofftxc
+= E1000_READ_REG(hw
, XOFFTXC
);
3226 adapter
->stats
.fcruc
+= E1000_READ_REG(hw
, FCRUC
);
3227 adapter
->stats
.gptc
+= E1000_READ_REG(hw
, GPTC
);
3228 adapter
->stats
.gotcl
+= E1000_READ_REG(hw
, GOTCL
);
3229 adapter
->stats
.gotch
+= E1000_READ_REG(hw
, GOTCH
);
3230 adapter
->stats
.rnbc
+= E1000_READ_REG(hw
, RNBC
);
3231 adapter
->stats
.ruc
+= E1000_READ_REG(hw
, RUC
);
3232 adapter
->stats
.rfc
+= E1000_READ_REG(hw
, RFC
);
3233 adapter
->stats
.rjc
+= E1000_READ_REG(hw
, RJC
);
3234 adapter
->stats
.torl
+= E1000_READ_REG(hw
, TORL
);
3235 adapter
->stats
.torh
+= E1000_READ_REG(hw
, TORH
);
3236 adapter
->stats
.totl
+= E1000_READ_REG(hw
, TOTL
);
3237 adapter
->stats
.toth
+= E1000_READ_REG(hw
, TOTH
);
3238 adapter
->stats
.tpr
+= E1000_READ_REG(hw
, TPR
);
3240 if (adapter
->hw
.mac_type
!= e1000_ich8lan
) {
3241 adapter
->stats
.ptc64
+= E1000_READ_REG(hw
, PTC64
);
3242 adapter
->stats
.ptc127
+= E1000_READ_REG(hw
, PTC127
);
3243 adapter
->stats
.ptc255
+= E1000_READ_REG(hw
, PTC255
);
3244 adapter
->stats
.ptc511
+= E1000_READ_REG(hw
, PTC511
);
3245 adapter
->stats
.ptc1023
+= E1000_READ_REG(hw
, PTC1023
);
3246 adapter
->stats
.ptc1522
+= E1000_READ_REG(hw
, PTC1522
);
3249 adapter
->stats
.mptc
+= E1000_READ_REG(hw
, MPTC
);
3250 adapter
->stats
.bptc
+= E1000_READ_REG(hw
, BPTC
);
3252 /* used for adaptive IFS */
3254 hw
->tx_packet_delta
= E1000_READ_REG(hw
, TPT
);
3255 adapter
->stats
.tpt
+= hw
->tx_packet_delta
;
3256 hw
->collision_delta
= E1000_READ_REG(hw
, COLC
);
3257 adapter
->stats
.colc
+= hw
->collision_delta
;
3259 if (hw
->mac_type
>= e1000_82543
) {
3260 adapter
->stats
.algnerrc
+= E1000_READ_REG(hw
, ALGNERRC
);
3261 adapter
->stats
.rxerrc
+= E1000_READ_REG(hw
, RXERRC
);
3262 adapter
->stats
.tncrs
+= E1000_READ_REG(hw
, TNCRS
);
3263 adapter
->stats
.cexterr
+= E1000_READ_REG(hw
, CEXTERR
);
3264 adapter
->stats
.tsctc
+= E1000_READ_REG(hw
, TSCTC
);
3265 adapter
->stats
.tsctfc
+= E1000_READ_REG(hw
, TSCTFC
);
3267 if (hw
->mac_type
> e1000_82547_rev_2
) {
3268 adapter
->stats
.iac
+= E1000_READ_REG(hw
, IAC
);
3269 adapter
->stats
.icrxoc
+= E1000_READ_REG(hw
, ICRXOC
);
3271 if (adapter
->hw
.mac_type
!= e1000_ich8lan
) {
3272 adapter
->stats
.icrxptc
+= E1000_READ_REG(hw
, ICRXPTC
);
3273 adapter
->stats
.icrxatc
+= E1000_READ_REG(hw
, ICRXATC
);
3274 adapter
->stats
.ictxptc
+= E1000_READ_REG(hw
, ICTXPTC
);
3275 adapter
->stats
.ictxatc
+= E1000_READ_REG(hw
, ICTXATC
);
3276 adapter
->stats
.ictxqec
+= E1000_READ_REG(hw
, ICTXQEC
);
3277 adapter
->stats
.ictxqmtc
+= E1000_READ_REG(hw
, ICTXQMTC
);
3278 adapter
->stats
.icrxdmtc
+= E1000_READ_REG(hw
, ICRXDMTC
);
3282 /* Fill out the OS statistics structure */
3284 adapter
->net_stats
.rx_packets
= adapter
->stats
.gprc
;
3285 adapter
->net_stats
.tx_packets
= adapter
->stats
.gptc
;
3286 adapter
->net_stats
.rx_bytes
= adapter
->stats
.gorcl
;
3287 adapter
->net_stats
.tx_bytes
= adapter
->stats
.gotcl
;
3288 adapter
->net_stats
.multicast
= adapter
->stats
.mprc
;
3289 adapter
->net_stats
.collisions
= adapter
->stats
.colc
;
3293 /* RLEC on some newer hardware can be incorrect so build
3294 * our own version based on RUC and ROC */
3295 adapter
->net_stats
.rx_errors
= adapter
->stats
.rxerrc
+
3296 adapter
->stats
.crcerrs
+ adapter
->stats
.algnerrc
+
3297 adapter
->stats
.ruc
+ adapter
->stats
.roc
+
3298 adapter
->stats
.cexterr
;
3299 adapter
->net_stats
.rx_length_errors
= adapter
->stats
.ruc
+
3301 adapter
->net_stats
.rx_crc_errors
= adapter
->stats
.crcerrs
;
3302 adapter
->net_stats
.rx_frame_errors
= adapter
->stats
.algnerrc
;
3303 adapter
->net_stats
.rx_missed_errors
= adapter
->stats
.mpc
;
3307 adapter
->net_stats
.tx_errors
= adapter
->stats
.ecol
+
3308 adapter
->stats
.latecol
;
3309 adapter
->net_stats
.tx_aborted_errors
= adapter
->stats
.ecol
;
3310 adapter
->net_stats
.tx_window_errors
= adapter
->stats
.latecol
;
3311 adapter
->net_stats
.tx_carrier_errors
= adapter
->stats
.tncrs
;
3313 /* Tx Dropped needs to be maintained elsewhere */
3317 if (hw
->media_type
== e1000_media_type_copper
) {
3318 if ((adapter
->link_speed
== SPEED_1000
) &&
3319 (!e1000_read_phy_reg(hw
, PHY_1000T_STATUS
, &phy_tmp
))) {
3320 phy_tmp
&= PHY_IDLE_ERROR_COUNT_MASK
;
3321 adapter
->phy_stats
.idle_errors
+= phy_tmp
;
3324 if ((hw
->mac_type
<= e1000_82546
) &&
3325 (hw
->phy_type
== e1000_phy_m88
) &&
3326 !e1000_read_phy_reg(hw
, M88E1000_RX_ERR_CNTR
, &phy_tmp
))
3327 adapter
->phy_stats
.receive_errors
+= phy_tmp
;
3330 spin_unlock_irqrestore(&adapter
->stats_lock
, flags
);
3334 * e1000_intr - Interrupt Handler
3335 * @irq: interrupt number
3336 * @data: pointer to a network interface device structure
3337 * @pt_regs: CPU registers structure
3341 e1000_intr(int irq
, void *data
, struct pt_regs
*regs
)
3343 struct net_device
*netdev
= data
;
3344 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3345 struct e1000_hw
*hw
= &adapter
->hw
;
3346 uint32_t rctl
, icr
= E1000_READ_REG(hw
, ICR
);
3347 #ifndef CONFIG_E1000_NAPI
3350 /* Interrupt Auto-Mask...upon reading ICR,
3351 * interrupts are masked. No need for the
3352 * IMC write, but it does mean we should
3353 * account for it ASAP. */
3354 if (likely(hw
->mac_type
>= e1000_82571
))
3355 atomic_inc(&adapter
->irq_sem
);
3358 if (unlikely(!icr
)) {
3359 #ifdef CONFIG_E1000_NAPI
3360 if (hw
->mac_type
>= e1000_82571
)
3361 e1000_irq_enable(adapter
);
3363 return IRQ_NONE
; /* Not our interrupt */
3366 if (unlikely(icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
))) {
3367 hw
->get_link_status
= 1;
3368 /* 80003ES2LAN workaround--
3369 * For packet buffer work-around on link down event;
3370 * disable receives here in the ISR and
3371 * reset adapter in watchdog
3373 if (netif_carrier_ok(netdev
) &&
3374 (adapter
->hw
.mac_type
== e1000_80003es2lan
)) {
3375 /* disable receives */
3376 rctl
= E1000_READ_REG(hw
, RCTL
);
3377 E1000_WRITE_REG(hw
, RCTL
, rctl
& ~E1000_RCTL_EN
);
3379 mod_timer(&adapter
->watchdog_timer
, jiffies
);
3382 #ifdef CONFIG_E1000_NAPI
3383 if (unlikely(hw
->mac_type
< e1000_82571
)) {
3384 atomic_inc(&adapter
->irq_sem
);
3385 E1000_WRITE_REG(hw
, IMC
, ~0);
3386 E1000_WRITE_FLUSH(hw
);
3388 if (likely(netif_rx_schedule_prep(netdev
)))
3389 __netif_rx_schedule(netdev
);
3391 e1000_irq_enable(adapter
);
3393 /* Writing IMC and IMS is needed for 82547.
3394 * Due to Hub Link bus being occupied, an interrupt
3395 * de-assertion message is not able to be sent.
3396 * When an interrupt assertion message is generated later,
3397 * two messages are re-ordered and sent out.
3398 * That causes APIC to think 82547 is in de-assertion
3399 * state, while 82547 is in assertion state, resulting
3400 * in dead lock. Writing IMC forces 82547 into
3401 * de-assertion state.
3403 if (hw
->mac_type
== e1000_82547
|| hw
->mac_type
== e1000_82547_rev_2
) {
3404 atomic_inc(&adapter
->irq_sem
);
3405 E1000_WRITE_REG(hw
, IMC
, ~0);
3408 for (i
= 0; i
< E1000_MAX_INTR
; i
++)
3409 if (unlikely(!adapter
->clean_rx(adapter
, adapter
->rx_ring
) &
3410 !e1000_clean_tx_irq(adapter
, adapter
->tx_ring
)))
3413 if (hw
->mac_type
== e1000_82547
|| hw
->mac_type
== e1000_82547_rev_2
)
3414 e1000_irq_enable(adapter
);
3421 #ifdef CONFIG_E1000_NAPI
3423 * e1000_clean - NAPI Rx polling callback
3424 * @adapter: board private structure
3428 e1000_clean(struct net_device
*poll_dev
, int *budget
)
3430 struct e1000_adapter
*adapter
;
3431 int work_to_do
= min(*budget
, poll_dev
->quota
);
3432 int tx_cleaned
= 0, work_done
= 0;
3434 /* Must NOT use netdev_priv macro here. */
3435 adapter
= poll_dev
->priv
;
3437 /* Keep link state information with original netdev */
3438 if (!netif_carrier_ok(poll_dev
))
3441 /* e1000_clean is called per-cpu. This lock protects
3442 * tx_ring[0] from being cleaned by multiple cpus
3443 * simultaneously. A failure obtaining the lock means
3444 * tx_ring[0] is currently being cleaned anyway. */
3445 if (spin_trylock(&adapter
->tx_queue_lock
)) {
3446 tx_cleaned
= e1000_clean_tx_irq(adapter
,
3447 &adapter
->tx_ring
[0]);
3448 spin_unlock(&adapter
->tx_queue_lock
);
3451 adapter
->clean_rx(adapter
, &adapter
->rx_ring
[0],
3452 &work_done
, work_to_do
);
3454 *budget
-= work_done
;
3455 poll_dev
->quota
-= work_done
;
3457 /* If no Tx and not enough Rx work done, exit the polling mode */
3458 if ((!tx_cleaned
&& (work_done
== 0)) ||
3459 !netif_running(poll_dev
)) {
3461 netif_rx_complete(poll_dev
);
3462 e1000_irq_enable(adapter
);
3471 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3472 * @adapter: board private structure
3476 e1000_clean_tx_irq(struct e1000_adapter
*adapter
,
3477 struct e1000_tx_ring
*tx_ring
)
3479 struct net_device
*netdev
= adapter
->netdev
;
3480 struct e1000_tx_desc
*tx_desc
, *eop_desc
;
3481 struct e1000_buffer
*buffer_info
;
3482 unsigned int i
, eop
;
3483 #ifdef CONFIG_E1000_NAPI
3484 unsigned int count
= 0;
3486 boolean_t cleaned
= FALSE
;
3488 i
= tx_ring
->next_to_clean
;
3489 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
3490 eop_desc
= E1000_TX_DESC(*tx_ring
, eop
);
3492 while (eop_desc
->upper
.data
& cpu_to_le32(E1000_TXD_STAT_DD
)) {
3493 for (cleaned
= FALSE
; !cleaned
; ) {
3494 tx_desc
= E1000_TX_DESC(*tx_ring
, i
);
3495 buffer_info
= &tx_ring
->buffer_info
[i
];
3496 cleaned
= (i
== eop
);
3498 e1000_unmap_and_free_tx_resource(adapter
, buffer_info
);
3499 memset(tx_desc
, 0, sizeof(struct e1000_tx_desc
));
3501 if (unlikely(++i
== tx_ring
->count
)) i
= 0;
3505 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
3506 eop_desc
= E1000_TX_DESC(*tx_ring
, eop
);
3507 #ifdef CONFIG_E1000_NAPI
3508 #define E1000_TX_WEIGHT 64
3509 /* weight of a sort for tx, to avoid endless transmit cleanup */
3510 if (count
++ == E1000_TX_WEIGHT
) break;
3514 tx_ring
->next_to_clean
= i
;
3516 #define TX_WAKE_THRESHOLD 32
3517 if (unlikely(cleaned
&& netif_queue_stopped(netdev
) &&
3518 netif_carrier_ok(netdev
))) {
3519 spin_lock(&tx_ring
->tx_lock
);
3520 if (netif_queue_stopped(netdev
) &&
3521 (E1000_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))
3522 netif_wake_queue(netdev
);
3523 spin_unlock(&tx_ring
->tx_lock
);
3526 if (adapter
->detect_tx_hung
) {
3527 /* Detect a transmit hang in hardware, this serializes the
3528 * check with the clearing of time_stamp and movement of i */
3529 adapter
->detect_tx_hung
= FALSE
;
3530 if (tx_ring
->buffer_info
[eop
].dma
&&
3531 time_after(jiffies
, tx_ring
->buffer_info
[eop
].time_stamp
+
3532 (adapter
->tx_timeout_factor
* HZ
))
3533 && !(E1000_READ_REG(&adapter
->hw
, STATUS
) &
3534 E1000_STATUS_TXOFF
)) {
3536 /* detected Tx unit hang */
3537 DPRINTK(DRV
, ERR
, "Detected Tx Unit Hang\n"
3541 " next_to_use <%x>\n"
3542 " next_to_clean <%x>\n"
3543 "buffer_info[next_to_clean]\n"
3544 " time_stamp <%lx>\n"
3545 " next_to_watch <%x>\n"
3547 " next_to_watch.status <%x>\n",
3548 (unsigned long)((tx_ring
- adapter
->tx_ring
) /
3549 sizeof(struct e1000_tx_ring
)),
3550 readl(adapter
->hw
.hw_addr
+ tx_ring
->tdh
),
3551 readl(adapter
->hw
.hw_addr
+ tx_ring
->tdt
),
3552 tx_ring
->next_to_use
,
3553 tx_ring
->next_to_clean
,
3554 tx_ring
->buffer_info
[eop
].time_stamp
,
3557 eop_desc
->upper
.fields
.status
);
3558 netif_stop_queue(netdev
);
3565 * e1000_rx_checksum - Receive Checksum Offload for 82543
3566 * @adapter: board private structure
3567 * @status_err: receive descriptor status and error fields
3568 * @csum: receive descriptor csum field
3569 * @sk_buff: socket buffer with received data
3573 e1000_rx_checksum(struct e1000_adapter
*adapter
,
3574 uint32_t status_err
, uint32_t csum
,
3575 struct sk_buff
*skb
)
3577 uint16_t status
= (uint16_t)status_err
;
3578 uint8_t errors
= (uint8_t)(status_err
>> 24);
3579 skb
->ip_summed
= CHECKSUM_NONE
;
3581 /* 82543 or newer only */
3582 if (unlikely(adapter
->hw
.mac_type
< e1000_82543
)) return;
3583 /* Ignore Checksum bit is set */
3584 if (unlikely(status
& E1000_RXD_STAT_IXSM
)) return;
3585 /* TCP/UDP checksum error bit is set */
3586 if (unlikely(errors
& E1000_RXD_ERR_TCPE
)) {
3587 /* let the stack verify checksum errors */
3588 adapter
->hw_csum_err
++;
3591 /* TCP/UDP Checksum has not been calculated */
3592 if (adapter
->hw
.mac_type
<= e1000_82547_rev_2
) {
3593 if (!(status
& E1000_RXD_STAT_TCPCS
))
3596 if (!(status
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
)))
3599 /* It must be a TCP or UDP packet with a valid checksum */
3600 if (likely(status
& E1000_RXD_STAT_TCPCS
)) {
3601 /* TCP checksum is good */
3602 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3603 } else if (adapter
->hw
.mac_type
> e1000_82547_rev_2
) {
3604 /* IP fragment with UDP payload */
3605 /* Hardware complements the payload checksum, so we undo it
3606 * and then put the value in host order for further stack use.
3608 csum
= ntohl(csum
^ 0xFFFF);
3610 skb
->ip_summed
= CHECKSUM_HW
;
3612 adapter
->hw_csum_good
++;
3616 * e1000_clean_rx_irq - Send received data up the network stack; legacy
3617 * @adapter: board private structure
3621 #ifdef CONFIG_E1000_NAPI
3622 e1000_clean_rx_irq(struct e1000_adapter
*adapter
,
3623 struct e1000_rx_ring
*rx_ring
,
3624 int *work_done
, int work_to_do
)
3626 e1000_clean_rx_irq(struct e1000_adapter
*adapter
,
3627 struct e1000_rx_ring
*rx_ring
)
3630 struct net_device
*netdev
= adapter
->netdev
;
3631 struct pci_dev
*pdev
= adapter
->pdev
;
3632 struct e1000_rx_desc
*rx_desc
, *next_rxd
;
3633 struct e1000_buffer
*buffer_info
, *next_buffer
;
3634 unsigned long flags
;
3638 int cleaned_count
= 0;
3639 boolean_t cleaned
= FALSE
;
3641 i
= rx_ring
->next_to_clean
;
3642 rx_desc
= E1000_RX_DESC(*rx_ring
, i
);
3643 buffer_info
= &rx_ring
->buffer_info
[i
];
3645 while (rx_desc
->status
& E1000_RXD_STAT_DD
) {
3646 struct sk_buff
*skb
;
3648 #ifdef CONFIG_E1000_NAPI
3649 if (*work_done
>= work_to_do
)
3653 status
= rx_desc
->status
;
3654 skb
= buffer_info
->skb
;
3655 buffer_info
->skb
= NULL
;
3657 prefetch(skb
->data
- NET_IP_ALIGN
);
3659 if (++i
== rx_ring
->count
) i
= 0;
3660 next_rxd
= E1000_RX_DESC(*rx_ring
, i
);
3663 next_buffer
= &rx_ring
->buffer_info
[i
];
3667 pci_unmap_single(pdev
,
3669 buffer_info
->length
,
3670 PCI_DMA_FROMDEVICE
);
3672 length
= le16_to_cpu(rx_desc
->length
);
3674 /* adjust length to remove Ethernet CRC */
3677 if (unlikely(!(status
& E1000_RXD_STAT_EOP
))) {
3678 /* All receives must fit into a single buffer */
3679 E1000_DBG("%s: Receive packet consumed multiple"
3680 " buffers\n", netdev
->name
);
3682 buffer_info
-> skb
= skb
;
3686 if (unlikely(rx_desc
->errors
& E1000_RXD_ERR_FRAME_ERR_MASK
)) {
3687 last_byte
= *(skb
->data
+ length
- 1);
3688 if (TBI_ACCEPT(&adapter
->hw
, status
,
3689 rx_desc
->errors
, length
, last_byte
)) {
3690 spin_lock_irqsave(&adapter
->stats_lock
, flags
);
3691 e1000_tbi_adjust_stats(&adapter
->hw
,
3694 spin_unlock_irqrestore(&adapter
->stats_lock
,
3699 buffer_info
->skb
= skb
;
3704 /* code added for copybreak, this should improve
3705 * performance for small packets with large amounts
3706 * of reassembly being done in the stack */
3707 #define E1000_CB_LENGTH 256
3708 if (length
< E1000_CB_LENGTH
) {
3709 struct sk_buff
*new_skb
=
3710 netdev_alloc_skb(netdev
, length
+ NET_IP_ALIGN
);
3712 skb_reserve(new_skb
, NET_IP_ALIGN
);
3713 new_skb
->dev
= netdev
;
3714 memcpy(new_skb
->data
- NET_IP_ALIGN
,
3715 skb
->data
- NET_IP_ALIGN
,
3716 length
+ NET_IP_ALIGN
);
3717 /* save the skb in buffer_info as good */
3718 buffer_info
->skb
= skb
;
3720 skb_put(skb
, length
);
3723 skb_put(skb
, length
);
3725 /* end copybreak code */
3727 /* Receive Checksum Offload */
3728 e1000_rx_checksum(adapter
,
3729 (uint32_t)(status
) |
3730 ((uint32_t)(rx_desc
->errors
) << 24),
3731 le16_to_cpu(rx_desc
->csum
), skb
);
3733 skb
->protocol
= eth_type_trans(skb
, netdev
);
3734 #ifdef CONFIG_E1000_NAPI
3735 if (unlikely(adapter
->vlgrp
&&
3736 (status
& E1000_RXD_STAT_VP
))) {
3737 vlan_hwaccel_receive_skb(skb
, adapter
->vlgrp
,
3738 le16_to_cpu(rx_desc
->special
) &
3739 E1000_RXD_SPC_VLAN_MASK
);
3741 netif_receive_skb(skb
);
3743 #else /* CONFIG_E1000_NAPI */
3744 if (unlikely(adapter
->vlgrp
&&
3745 (status
& E1000_RXD_STAT_VP
))) {
3746 vlan_hwaccel_rx(skb
, adapter
->vlgrp
,
3747 le16_to_cpu(rx_desc
->special
) &
3748 E1000_RXD_SPC_VLAN_MASK
);
3752 #endif /* CONFIG_E1000_NAPI */
3753 netdev
->last_rx
= jiffies
;
3756 rx_desc
->status
= 0;
3758 /* return some buffers to hardware, one at a time is too slow */
3759 if (unlikely(cleaned_count
>= E1000_RX_BUFFER_WRITE
)) {
3760 adapter
->alloc_rx_buf(adapter
, rx_ring
, cleaned_count
);
3764 /* use prefetched values */
3766 buffer_info
= next_buffer
;
3768 rx_ring
->next_to_clean
= i
;
3770 cleaned_count
= E1000_DESC_UNUSED(rx_ring
);
3772 adapter
->alloc_rx_buf(adapter
, rx_ring
, cleaned_count
);
3778 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
3779 * @adapter: board private structure
3783 #ifdef CONFIG_E1000_NAPI
3784 e1000_clean_rx_irq_ps(struct e1000_adapter
*adapter
,
3785 struct e1000_rx_ring
*rx_ring
,
3786 int *work_done
, int work_to_do
)
3788 e1000_clean_rx_irq_ps(struct e1000_adapter
*adapter
,
3789 struct e1000_rx_ring
*rx_ring
)
3792 union e1000_rx_desc_packet_split
*rx_desc
, *next_rxd
;
3793 struct net_device
*netdev
= adapter
->netdev
;
3794 struct pci_dev
*pdev
= adapter
->pdev
;
3795 struct e1000_buffer
*buffer_info
, *next_buffer
;
3796 struct e1000_ps_page
*ps_page
;
3797 struct e1000_ps_page_dma
*ps_page_dma
;
3798 struct sk_buff
*skb
;
3800 uint32_t length
, staterr
;
3801 int cleaned_count
= 0;
3802 boolean_t cleaned
= FALSE
;
3804 i
= rx_ring
->next_to_clean
;
3805 rx_desc
= E1000_RX_DESC_PS(*rx_ring
, i
);
3806 staterr
= le32_to_cpu(rx_desc
->wb
.middle
.status_error
);
3807 buffer_info
= &rx_ring
->buffer_info
[i
];
3809 while (staterr
& E1000_RXD_STAT_DD
) {
3810 ps_page
= &rx_ring
->ps_page
[i
];
3811 ps_page_dma
= &rx_ring
->ps_page_dma
[i
];
3812 #ifdef CONFIG_E1000_NAPI
3813 if (unlikely(*work_done
>= work_to_do
))
3817 skb
= buffer_info
->skb
;
3819 /* in the packet split case this is header only */
3820 prefetch(skb
->data
- NET_IP_ALIGN
);
3822 if (++i
== rx_ring
->count
) i
= 0;
3823 next_rxd
= E1000_RX_DESC_PS(*rx_ring
, i
);
3826 next_buffer
= &rx_ring
->buffer_info
[i
];
3830 pci_unmap_single(pdev
, buffer_info
->dma
,
3831 buffer_info
->length
,
3832 PCI_DMA_FROMDEVICE
);
3834 if (unlikely(!(staterr
& E1000_RXD_STAT_EOP
))) {
3835 E1000_DBG("%s: Packet Split buffers didn't pick up"
3836 " the full packet\n", netdev
->name
);
3837 dev_kfree_skb_irq(skb
);
3841 if (unlikely(staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
)) {
3842 dev_kfree_skb_irq(skb
);
3846 length
= le16_to_cpu(rx_desc
->wb
.middle
.length0
);
3848 if (unlikely(!length
)) {
3849 E1000_DBG("%s: Last part of the packet spanning"
3850 " multiple descriptors\n", netdev
->name
);
3851 dev_kfree_skb_irq(skb
);
3856 skb_put(skb
, length
);
3859 /* this looks ugly, but it seems compiler issues make it
3860 more efficient than reusing j */
3861 int l1
= le16_to_cpu(rx_desc
->wb
.upper
.length
[0]);
3863 /* page alloc/put takes too long and effects small packet
3864 * throughput, so unsplit small packets and save the alloc/put*/
3865 if (l1
&& ((length
+ l1
) <= adapter
->rx_ps_bsize0
)) {
3867 /* there is no documentation about how to call
3868 * kmap_atomic, so we can't hold the mapping
3870 pci_dma_sync_single_for_cpu(pdev
,
3871 ps_page_dma
->ps_page_dma
[0],
3873 PCI_DMA_FROMDEVICE
);
3874 vaddr
= kmap_atomic(ps_page
->ps_page
[0],
3875 KM_SKB_DATA_SOFTIRQ
);
3876 memcpy(skb
->tail
, vaddr
, l1
);
3877 kunmap_atomic(vaddr
, KM_SKB_DATA_SOFTIRQ
);
3878 pci_dma_sync_single_for_device(pdev
,
3879 ps_page_dma
->ps_page_dma
[0],
3880 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
3881 /* remove the CRC */
3888 for (j
= 0; j
< adapter
->rx_ps_pages
; j
++) {
3889 if (!(length
= le16_to_cpu(rx_desc
->wb
.upper
.length
[j
])))
3891 pci_unmap_page(pdev
, ps_page_dma
->ps_page_dma
[j
],
3892 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
3893 ps_page_dma
->ps_page_dma
[j
] = 0;
3894 skb_fill_page_desc(skb
, j
, ps_page
->ps_page
[j
], 0,
3896 ps_page
->ps_page
[j
] = NULL
;
3898 skb
->data_len
+= length
;
3899 skb
->truesize
+= length
;
3902 /* strip the ethernet crc, problem is we're using pages now so
3903 * this whole operation can get a little cpu intensive */
3904 pskb_trim(skb
, skb
->len
- 4);
3907 e1000_rx_checksum(adapter
, staterr
,
3908 le16_to_cpu(rx_desc
->wb
.lower
.hi_dword
.csum_ip
.csum
), skb
);
3909 skb
->protocol
= eth_type_trans(skb
, netdev
);
3911 if (likely(rx_desc
->wb
.upper
.header_status
&
3912 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP
)))
3913 adapter
->rx_hdr_split
++;
3914 #ifdef CONFIG_E1000_NAPI
3915 if (unlikely(adapter
->vlgrp
&& (staterr
& E1000_RXD_STAT_VP
))) {
3916 vlan_hwaccel_receive_skb(skb
, adapter
->vlgrp
,
3917 le16_to_cpu(rx_desc
->wb
.middle
.vlan
) &
3918 E1000_RXD_SPC_VLAN_MASK
);
3920 netif_receive_skb(skb
);
3922 #else /* CONFIG_E1000_NAPI */
3923 if (unlikely(adapter
->vlgrp
&& (staterr
& E1000_RXD_STAT_VP
))) {
3924 vlan_hwaccel_rx(skb
, adapter
->vlgrp
,
3925 le16_to_cpu(rx_desc
->wb
.middle
.vlan
) &
3926 E1000_RXD_SPC_VLAN_MASK
);
3930 #endif /* CONFIG_E1000_NAPI */
3931 netdev
->last_rx
= jiffies
;
3934 rx_desc
->wb
.middle
.status_error
&= cpu_to_le32(~0xFF);
3935 buffer_info
->skb
= NULL
;
3937 /* return some buffers to hardware, one at a time is too slow */
3938 if (unlikely(cleaned_count
>= E1000_RX_BUFFER_WRITE
)) {
3939 adapter
->alloc_rx_buf(adapter
, rx_ring
, cleaned_count
);
3943 /* use prefetched values */
3945 buffer_info
= next_buffer
;
3947 staterr
= le32_to_cpu(rx_desc
->wb
.middle
.status_error
);
3949 rx_ring
->next_to_clean
= i
;
3951 cleaned_count
= E1000_DESC_UNUSED(rx_ring
);
3953 adapter
->alloc_rx_buf(adapter
, rx_ring
, cleaned_count
);
3959 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
3960 * @adapter: address of board private structure
3964 e1000_alloc_rx_buffers(struct e1000_adapter
*adapter
,
3965 struct e1000_rx_ring
*rx_ring
,
3968 struct net_device
*netdev
= adapter
->netdev
;
3969 struct pci_dev
*pdev
= adapter
->pdev
;
3970 struct e1000_rx_desc
*rx_desc
;
3971 struct e1000_buffer
*buffer_info
;
3972 struct sk_buff
*skb
;
3974 unsigned int bufsz
= adapter
->rx_buffer_len
+ NET_IP_ALIGN
;
3976 i
= rx_ring
->next_to_use
;
3977 buffer_info
= &rx_ring
->buffer_info
[i
];
3979 while (cleaned_count
--) {
3980 if (!(skb
= buffer_info
->skb
))
3981 skb
= netdev_alloc_skb(netdev
, bufsz
);
3987 if (unlikely(!skb
)) {
3988 /* Better luck next round */
3989 adapter
->alloc_rx_buff_failed
++;
3993 /* Fix for errata 23, can't cross 64kB boundary */
3994 if (!e1000_check_64k_bound(adapter
, skb
->data
, bufsz
)) {
3995 struct sk_buff
*oldskb
= skb
;
3996 DPRINTK(RX_ERR
, ERR
, "skb align check failed: %u bytes "
3997 "at %p\n", bufsz
, skb
->data
);
3998 /* Try again, without freeing the previous */
3999 skb
= netdev_alloc_skb(netdev
, bufsz
);
4000 /* Failed allocation, critical failure */
4002 dev_kfree_skb(oldskb
);
4006 if (!e1000_check_64k_bound(adapter
, skb
->data
, bufsz
)) {
4009 dev_kfree_skb(oldskb
);
4010 break; /* while !buffer_info->skb */
4012 /* Use new allocation */
4013 dev_kfree_skb(oldskb
);
4016 /* Make buffer alignment 2 beyond a 16 byte boundary
4017 * this will result in a 16 byte aligned IP header after
4018 * the 14 byte MAC header is removed
4020 skb_reserve(skb
, NET_IP_ALIGN
);
4024 buffer_info
->skb
= skb
;
4025 buffer_info
->length
= adapter
->rx_buffer_len
;
4027 buffer_info
->dma
= pci_map_single(pdev
,
4029 adapter
->rx_buffer_len
,
4030 PCI_DMA_FROMDEVICE
);
4032 /* Fix for errata 23, can't cross 64kB boundary */
4033 if (!e1000_check_64k_bound(adapter
,
4034 (void *)(unsigned long)buffer_info
->dma
,
4035 adapter
->rx_buffer_len
)) {
4036 DPRINTK(RX_ERR
, ERR
,
4037 "dma align check failed: %u bytes at %p\n",
4038 adapter
->rx_buffer_len
,
4039 (void *)(unsigned long)buffer_info
->dma
);
4041 buffer_info
->skb
= NULL
;
4043 pci_unmap_single(pdev
, buffer_info
->dma
,
4044 adapter
->rx_buffer_len
,
4045 PCI_DMA_FROMDEVICE
);
4047 break; /* while !buffer_info->skb */
4049 rx_desc
= E1000_RX_DESC(*rx_ring
, i
);
4050 rx_desc
->buffer_addr
= cpu_to_le64(buffer_info
->dma
);
4052 if (unlikely(++i
== rx_ring
->count
))
4054 buffer_info
= &rx_ring
->buffer_info
[i
];
4057 if (likely(rx_ring
->next_to_use
!= i
)) {
4058 rx_ring
->next_to_use
= i
;
4059 if (unlikely(i
-- == 0))
4060 i
= (rx_ring
->count
- 1);
4062 /* Force memory writes to complete before letting h/w
4063 * know there are new descriptors to fetch. (Only
4064 * applicable for weak-ordered memory model archs,
4065 * such as IA-64). */
4067 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->rdt
);
4072 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
4073 * @adapter: address of board private structure
4077 e1000_alloc_rx_buffers_ps(struct e1000_adapter
*adapter
,
4078 struct e1000_rx_ring
*rx_ring
,
4081 struct net_device
*netdev
= adapter
->netdev
;
4082 struct pci_dev
*pdev
= adapter
->pdev
;
4083 union e1000_rx_desc_packet_split
*rx_desc
;
4084 struct e1000_buffer
*buffer_info
;
4085 struct e1000_ps_page
*ps_page
;
4086 struct e1000_ps_page_dma
*ps_page_dma
;
4087 struct sk_buff
*skb
;
4090 i
= rx_ring
->next_to_use
;
4091 buffer_info
= &rx_ring
->buffer_info
[i
];
4092 ps_page
= &rx_ring
->ps_page
[i
];
4093 ps_page_dma
= &rx_ring
->ps_page_dma
[i
];
4095 while (cleaned_count
--) {
4096 rx_desc
= E1000_RX_DESC_PS(*rx_ring
, i
);
4098 for (j
= 0; j
< PS_PAGE_BUFFERS
; j
++) {
4099 if (j
< adapter
->rx_ps_pages
) {
4100 if (likely(!ps_page
->ps_page
[j
])) {
4101 ps_page
->ps_page
[j
] =
4102 alloc_page(GFP_ATOMIC
);
4103 if (unlikely(!ps_page
->ps_page
[j
])) {
4104 adapter
->alloc_rx_buff_failed
++;
4107 ps_page_dma
->ps_page_dma
[j
] =
4109 ps_page
->ps_page
[j
],
4111 PCI_DMA_FROMDEVICE
);
4113 /* Refresh the desc even if buffer_addrs didn't
4114 * change because each write-back erases
4117 rx_desc
->read
.buffer_addr
[j
+1] =
4118 cpu_to_le64(ps_page_dma
->ps_page_dma
[j
]);
4120 rx_desc
->read
.buffer_addr
[j
+1] = ~0;
4123 skb
= netdev_alloc_skb(netdev
,
4124 adapter
->rx_ps_bsize0
+ NET_IP_ALIGN
);
4126 if (unlikely(!skb
)) {
4127 adapter
->alloc_rx_buff_failed
++;
4131 /* Make buffer alignment 2 beyond a 16 byte boundary
4132 * this will result in a 16 byte aligned IP header after
4133 * the 14 byte MAC header is removed
4135 skb_reserve(skb
, NET_IP_ALIGN
);
4139 buffer_info
->skb
= skb
;
4140 buffer_info
->length
= adapter
->rx_ps_bsize0
;
4141 buffer_info
->dma
= pci_map_single(pdev
, skb
->data
,
4142 adapter
->rx_ps_bsize0
,
4143 PCI_DMA_FROMDEVICE
);
4145 rx_desc
->read
.buffer_addr
[0] = cpu_to_le64(buffer_info
->dma
);
4147 if (unlikely(++i
== rx_ring
->count
)) i
= 0;
4148 buffer_info
= &rx_ring
->buffer_info
[i
];
4149 ps_page
= &rx_ring
->ps_page
[i
];
4150 ps_page_dma
= &rx_ring
->ps_page_dma
[i
];
4154 if (likely(rx_ring
->next_to_use
!= i
)) {
4155 rx_ring
->next_to_use
= i
;
4156 if (unlikely(i
-- == 0)) i
= (rx_ring
->count
- 1);
4158 /* Force memory writes to complete before letting h/w
4159 * know there are new descriptors to fetch. (Only
4160 * applicable for weak-ordered memory model archs,
4161 * such as IA-64). */
4163 /* Hardware increments by 16 bytes, but packet split
4164 * descriptors are 32 bytes...so we increment tail
4167 writel(i
<<1, adapter
->hw
.hw_addr
+ rx_ring
->rdt
);
4172 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4177 e1000_smartspeed(struct e1000_adapter
*adapter
)
4179 uint16_t phy_status
;
4182 if ((adapter
->hw
.phy_type
!= e1000_phy_igp
) || !adapter
->hw
.autoneg
||
4183 !(adapter
->hw
.autoneg_advertised
& ADVERTISE_1000_FULL
))
4186 if (adapter
->smartspeed
== 0) {
4187 /* If Master/Slave config fault is asserted twice,
4188 * we assume back-to-back */
4189 e1000_read_phy_reg(&adapter
->hw
, PHY_1000T_STATUS
, &phy_status
);
4190 if (!(phy_status
& SR_1000T_MS_CONFIG_FAULT
)) return;
4191 e1000_read_phy_reg(&adapter
->hw
, PHY_1000T_STATUS
, &phy_status
);
4192 if (!(phy_status
& SR_1000T_MS_CONFIG_FAULT
)) return;
4193 e1000_read_phy_reg(&adapter
->hw
, PHY_1000T_CTRL
, &phy_ctrl
);
4194 if (phy_ctrl
& CR_1000T_MS_ENABLE
) {
4195 phy_ctrl
&= ~CR_1000T_MS_ENABLE
;
4196 e1000_write_phy_reg(&adapter
->hw
, PHY_1000T_CTRL
,
4198 adapter
->smartspeed
++;
4199 if (!e1000_phy_setup_autoneg(&adapter
->hw
) &&
4200 !e1000_read_phy_reg(&adapter
->hw
, PHY_CTRL
,
4202 phy_ctrl
|= (MII_CR_AUTO_NEG_EN
|
4203 MII_CR_RESTART_AUTO_NEG
);
4204 e1000_write_phy_reg(&adapter
->hw
, PHY_CTRL
,
4209 } else if (adapter
->smartspeed
== E1000_SMARTSPEED_DOWNSHIFT
) {
4210 /* If still no link, perhaps using 2/3 pair cable */
4211 e1000_read_phy_reg(&adapter
->hw
, PHY_1000T_CTRL
, &phy_ctrl
);
4212 phy_ctrl
|= CR_1000T_MS_ENABLE
;
4213 e1000_write_phy_reg(&adapter
->hw
, PHY_1000T_CTRL
, phy_ctrl
);
4214 if (!e1000_phy_setup_autoneg(&adapter
->hw
) &&
4215 !e1000_read_phy_reg(&adapter
->hw
, PHY_CTRL
, &phy_ctrl
)) {
4216 phy_ctrl
|= (MII_CR_AUTO_NEG_EN
|
4217 MII_CR_RESTART_AUTO_NEG
);
4218 e1000_write_phy_reg(&adapter
->hw
, PHY_CTRL
, phy_ctrl
);
4221 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4222 if (adapter
->smartspeed
++ == E1000_SMARTSPEED_MAX
)
4223 adapter
->smartspeed
= 0;
4234 e1000_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
4240 return e1000_mii_ioctl(netdev
, ifr
, cmd
);
4254 e1000_mii_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
4256 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4257 struct mii_ioctl_data
*data
= if_mii(ifr
);
4261 unsigned long flags
;
4263 if (adapter
->hw
.media_type
!= e1000_media_type_copper
)
4268 data
->phy_id
= adapter
->hw
.phy_addr
;
4271 if (!capable(CAP_NET_ADMIN
))
4273 spin_lock_irqsave(&adapter
->stats_lock
, flags
);
4274 if (e1000_read_phy_reg(&adapter
->hw
, data
->reg_num
& 0x1F,
4276 spin_unlock_irqrestore(&adapter
->stats_lock
, flags
);
4279 spin_unlock_irqrestore(&adapter
->stats_lock
, flags
);
4282 if (!capable(CAP_NET_ADMIN
))
4284 if (data
->reg_num
& ~(0x1F))
4286 mii_reg
= data
->val_in
;
4287 spin_lock_irqsave(&adapter
->stats_lock
, flags
);
4288 if (e1000_write_phy_reg(&adapter
->hw
, data
->reg_num
,
4290 spin_unlock_irqrestore(&adapter
->stats_lock
, flags
);
4293 if (adapter
->hw
.media_type
== e1000_media_type_copper
) {
4294 switch (data
->reg_num
) {
4296 if (mii_reg
& MII_CR_POWER_DOWN
)
4298 if (mii_reg
& MII_CR_AUTO_NEG_EN
) {
4299 adapter
->hw
.autoneg
= 1;
4300 adapter
->hw
.autoneg_advertised
= 0x2F;
4303 spddplx
= SPEED_1000
;
4304 else if (mii_reg
& 0x2000)
4305 spddplx
= SPEED_100
;
4308 spddplx
+= (mii_reg
& 0x100)
4311 retval
= e1000_set_spd_dplx(adapter
,
4314 spin_unlock_irqrestore(
4315 &adapter
->stats_lock
,
4320 if (netif_running(adapter
->netdev
))
4321 e1000_reinit_locked(adapter
);
4323 e1000_reset(adapter
);
4325 case M88E1000_PHY_SPEC_CTRL
:
4326 case M88E1000_EXT_PHY_SPEC_CTRL
:
4327 if (e1000_phy_reset(&adapter
->hw
)) {
4328 spin_unlock_irqrestore(
4329 &adapter
->stats_lock
, flags
);
4335 switch (data
->reg_num
) {
4337 if (mii_reg
& MII_CR_POWER_DOWN
)
4339 if (netif_running(adapter
->netdev
))
4340 e1000_reinit_locked(adapter
);
4342 e1000_reset(adapter
);
4346 spin_unlock_irqrestore(&adapter
->stats_lock
, flags
);
4351 return E1000_SUCCESS
;
4355 e1000_pci_set_mwi(struct e1000_hw
*hw
)
4357 struct e1000_adapter
*adapter
= hw
->back
;
4358 int ret_val
= pci_set_mwi(adapter
->pdev
);
4361 DPRINTK(PROBE
, ERR
, "Error in setting MWI\n");
4365 e1000_pci_clear_mwi(struct e1000_hw
*hw
)
4367 struct e1000_adapter
*adapter
= hw
->back
;
4369 pci_clear_mwi(adapter
->pdev
);
4373 e1000_read_pci_cfg(struct e1000_hw
*hw
, uint32_t reg
, uint16_t *value
)
4375 struct e1000_adapter
*adapter
= hw
->back
;
4377 pci_read_config_word(adapter
->pdev
, reg
, value
);
4381 e1000_write_pci_cfg(struct e1000_hw
*hw
, uint32_t reg
, uint16_t *value
)
4383 struct e1000_adapter
*adapter
= hw
->back
;
4385 pci_write_config_word(adapter
->pdev
, reg
, *value
);
4389 e1000_io_read(struct e1000_hw
*hw
, unsigned long port
)
4395 e1000_io_write(struct e1000_hw
*hw
, unsigned long port
, uint32_t value
)
4401 e1000_vlan_rx_register(struct net_device
*netdev
, struct vlan_group
*grp
)
4403 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4404 uint32_t ctrl
, rctl
;
4406 e1000_irq_disable(adapter
);
4407 adapter
->vlgrp
= grp
;
4410 /* enable VLAN tag insert/strip */
4411 ctrl
= E1000_READ_REG(&adapter
->hw
, CTRL
);
4412 ctrl
|= E1000_CTRL_VME
;
4413 E1000_WRITE_REG(&adapter
->hw
, CTRL
, ctrl
);
4415 if (adapter
->hw
.mac_type
!= e1000_ich8lan
) {
4416 /* enable VLAN receive filtering */
4417 rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
4418 rctl
|= E1000_RCTL_VFE
;
4419 rctl
&= ~E1000_RCTL_CFIEN
;
4420 E1000_WRITE_REG(&adapter
->hw
, RCTL
, rctl
);
4421 e1000_update_mng_vlan(adapter
);
4424 /* disable VLAN tag insert/strip */
4425 ctrl
= E1000_READ_REG(&adapter
->hw
, CTRL
);
4426 ctrl
&= ~E1000_CTRL_VME
;
4427 E1000_WRITE_REG(&adapter
->hw
, CTRL
, ctrl
);
4429 if (adapter
->hw
.mac_type
!= e1000_ich8lan
) {
4430 /* disable VLAN filtering */
4431 rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
4432 rctl
&= ~E1000_RCTL_VFE
;
4433 E1000_WRITE_REG(&adapter
->hw
, RCTL
, rctl
);
4434 if (adapter
->mng_vlan_id
!= (uint16_t)E1000_MNG_VLAN_NONE
) {
4435 e1000_vlan_rx_kill_vid(netdev
, adapter
->mng_vlan_id
);
4436 adapter
->mng_vlan_id
= E1000_MNG_VLAN_NONE
;
4441 e1000_irq_enable(adapter
);
4445 e1000_vlan_rx_add_vid(struct net_device
*netdev
, uint16_t vid
)
4447 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4448 uint32_t vfta
, index
;
4450 if ((adapter
->hw
.mng_cookie
.status
&
4451 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT
) &&
4452 (vid
== adapter
->mng_vlan_id
))
4454 /* add VID to filter table */
4455 index
= (vid
>> 5) & 0x7F;
4456 vfta
= E1000_READ_REG_ARRAY(&adapter
->hw
, VFTA
, index
);
4457 vfta
|= (1 << (vid
& 0x1F));
4458 e1000_write_vfta(&adapter
->hw
, index
, vfta
);
4462 e1000_vlan_rx_kill_vid(struct net_device
*netdev
, uint16_t vid
)
4464 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4465 uint32_t vfta
, index
;
4467 e1000_irq_disable(adapter
);
4470 adapter
->vlgrp
->vlan_devices
[vid
] = NULL
;
4472 e1000_irq_enable(adapter
);
4474 if ((adapter
->hw
.mng_cookie
.status
&
4475 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT
) &&
4476 (vid
== adapter
->mng_vlan_id
)) {
4477 /* release control to f/w */
4478 e1000_release_hw_control(adapter
);
4482 /* remove VID from filter table */
4483 index
= (vid
>> 5) & 0x7F;
4484 vfta
= E1000_READ_REG_ARRAY(&adapter
->hw
, VFTA
, index
);
4485 vfta
&= ~(1 << (vid
& 0x1F));
4486 e1000_write_vfta(&adapter
->hw
, index
, vfta
);
4490 e1000_restore_vlan(struct e1000_adapter
*adapter
)
4492 e1000_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
4494 if (adapter
->vlgrp
) {
4496 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
4497 if (!adapter
->vlgrp
->vlan_devices
[vid
])
4499 e1000_vlan_rx_add_vid(adapter
->netdev
, vid
);
4505 e1000_set_spd_dplx(struct e1000_adapter
*adapter
, uint16_t spddplx
)
4507 adapter
->hw
.autoneg
= 0;
4509 /* Fiber NICs only allow 1000 gbps Full duplex */
4510 if ((adapter
->hw
.media_type
== e1000_media_type_fiber
) &&
4511 spddplx
!= (SPEED_1000
+ DUPLEX_FULL
)) {
4512 DPRINTK(PROBE
, ERR
, "Unsupported Speed/Duplex configuration\n");
4517 case SPEED_10
+ DUPLEX_HALF
:
4518 adapter
->hw
.forced_speed_duplex
= e1000_10_half
;
4520 case SPEED_10
+ DUPLEX_FULL
:
4521 adapter
->hw
.forced_speed_duplex
= e1000_10_full
;
4523 case SPEED_100
+ DUPLEX_HALF
:
4524 adapter
->hw
.forced_speed_duplex
= e1000_100_half
;
4526 case SPEED_100
+ DUPLEX_FULL
:
4527 adapter
->hw
.forced_speed_duplex
= e1000_100_full
;
4529 case SPEED_1000
+ DUPLEX_FULL
:
4530 adapter
->hw
.autoneg
= 1;
4531 adapter
->hw
.autoneg_advertised
= ADVERTISE_1000_FULL
;
4533 case SPEED_1000
+ DUPLEX_HALF
: /* not supported */
4535 DPRINTK(PROBE
, ERR
, "Unsupported Speed/Duplex configuration\n");
4542 /* Save/restore 16 or 64 dwords of PCI config space depending on which
4543 * bus we're on (PCI(X) vs. PCI-E)
4545 #define PCIE_CONFIG_SPACE_LEN 256
4546 #define PCI_CONFIG_SPACE_LEN 64
4548 e1000_pci_save_state(struct e1000_adapter
*adapter
)
4550 struct pci_dev
*dev
= adapter
->pdev
;
4554 if (adapter
->hw
.mac_type
>= e1000_82571
)
4555 size
= PCIE_CONFIG_SPACE_LEN
;
4557 size
= PCI_CONFIG_SPACE_LEN
;
4559 WARN_ON(adapter
->config_space
!= NULL
);
4561 adapter
->config_space
= kmalloc(size
, GFP_KERNEL
);
4562 if (!adapter
->config_space
) {
4563 DPRINTK(PROBE
, ERR
, "unable to allocate %d bytes\n", size
);
4566 for (i
= 0; i
< (size
/ 4); i
++)
4567 pci_read_config_dword(dev
, i
* 4, &adapter
->config_space
[i
]);
4572 e1000_pci_restore_state(struct e1000_adapter
*adapter
)
4574 struct pci_dev
*dev
= adapter
->pdev
;
4578 if (adapter
->config_space
== NULL
)
4581 if (adapter
->hw
.mac_type
>= e1000_82571
)
4582 size
= PCIE_CONFIG_SPACE_LEN
;
4584 size
= PCI_CONFIG_SPACE_LEN
;
4585 for (i
= 0; i
< (size
/ 4); i
++)
4586 pci_write_config_dword(dev
, i
* 4, adapter
->config_space
[i
]);
4587 kfree(adapter
->config_space
);
4588 adapter
->config_space
= NULL
;
4591 #endif /* CONFIG_PM */
4594 e1000_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4596 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4597 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4598 uint32_t ctrl
, ctrl_ext
, rctl
, manc
, status
;
4599 uint32_t wufc
= adapter
->wol
;
4604 netif_device_detach(netdev
);
4606 if (netif_running(netdev
)) {
4607 WARN_ON(test_bit(__E1000_RESETTING
, &adapter
->flags
));
4608 e1000_down(adapter
);
4612 /* Implement our own version of pci_save_state(pdev) because pci-
4613 * express adapters have 256-byte config spaces. */
4614 retval
= e1000_pci_save_state(adapter
);
4619 status
= E1000_READ_REG(&adapter
->hw
, STATUS
);
4620 if (status
& E1000_STATUS_LU
)
4621 wufc
&= ~E1000_WUFC_LNKC
;
4624 e1000_setup_rctl(adapter
);
4625 e1000_set_multi(netdev
);
4627 /* turn on all-multi mode if wake on multicast is enabled */
4628 if (adapter
->wol
& E1000_WUFC_MC
) {
4629 rctl
= E1000_READ_REG(&adapter
->hw
, RCTL
);
4630 rctl
|= E1000_RCTL_MPE
;
4631 E1000_WRITE_REG(&adapter
->hw
, RCTL
, rctl
);
4634 if (adapter
->hw
.mac_type
>= e1000_82540
) {
4635 ctrl
= E1000_READ_REG(&adapter
->hw
, CTRL
);
4636 /* advertise wake from D3Cold */
4637 #define E1000_CTRL_ADVD3WUC 0x00100000
4638 /* phy power management enable */
4639 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4640 ctrl
|= E1000_CTRL_ADVD3WUC
|
4641 E1000_CTRL_EN_PHY_PWR_MGMT
;
4642 E1000_WRITE_REG(&adapter
->hw
, CTRL
, ctrl
);
4645 if (adapter
->hw
.media_type
== e1000_media_type_fiber
||
4646 adapter
->hw
.media_type
== e1000_media_type_internal_serdes
) {
4647 /* keep the laser running in D3 */
4648 ctrl_ext
= E1000_READ_REG(&adapter
->hw
, CTRL_EXT
);
4649 ctrl_ext
|= E1000_CTRL_EXT_SDP7_DATA
;
4650 E1000_WRITE_REG(&adapter
->hw
, CTRL_EXT
, ctrl_ext
);
4653 /* Allow time for pending master requests to run */
4654 e1000_disable_pciex_master(&adapter
->hw
);
4656 E1000_WRITE_REG(&adapter
->hw
, WUC
, E1000_WUC_PME_EN
);
4657 E1000_WRITE_REG(&adapter
->hw
, WUFC
, wufc
);
4658 pci_enable_wake(pdev
, PCI_D3hot
, 1);
4659 pci_enable_wake(pdev
, PCI_D3cold
, 1);
4661 E1000_WRITE_REG(&adapter
->hw
, WUC
, 0);
4662 E1000_WRITE_REG(&adapter
->hw
, WUFC
, 0);
4663 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4664 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4667 /* FIXME: this code is incorrect for PCI Express */
4668 if (adapter
->hw
.mac_type
>= e1000_82540
&&
4669 adapter
->hw
.mac_type
!= e1000_ich8lan
&&
4670 adapter
->hw
.media_type
== e1000_media_type_copper
) {
4671 manc
= E1000_READ_REG(&adapter
->hw
, MANC
);
4672 if (manc
& E1000_MANC_SMBUS_EN
) {
4673 manc
|= E1000_MANC_ARP_EN
;
4674 E1000_WRITE_REG(&adapter
->hw
, MANC
, manc
);
4675 pci_enable_wake(pdev
, PCI_D3hot
, 1);
4676 pci_enable_wake(pdev
, PCI_D3cold
, 1);
4680 if (adapter
->hw
.phy_type
== e1000_phy_igp_3
)
4681 e1000_phy_powerdown_workaround(&adapter
->hw
);
4683 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4684 * would have already happened in close and is redundant. */
4685 e1000_release_hw_control(adapter
);
4687 pci_disable_device(pdev
);
4689 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4696 e1000_resume(struct pci_dev
*pdev
)
4698 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4699 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4700 uint32_t manc
, ret_val
;
4702 pci_set_power_state(pdev
, PCI_D0
);
4703 e1000_pci_restore_state(adapter
);
4704 ret_val
= pci_enable_device(pdev
);
4705 pci_set_master(pdev
);
4707 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4708 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4710 e1000_reset(adapter
);
4711 E1000_WRITE_REG(&adapter
->hw
, WUS
, ~0);
4713 if (netif_running(netdev
))
4716 netif_device_attach(netdev
);
4718 /* FIXME: this code is incorrect for PCI Express */
4719 if (adapter
->hw
.mac_type
>= e1000_82540
&&
4720 adapter
->hw
.mac_type
!= e1000_ich8lan
&&
4721 adapter
->hw
.media_type
== e1000_media_type_copper
) {
4722 manc
= E1000_READ_REG(&adapter
->hw
, MANC
);
4723 manc
&= ~(E1000_MANC_ARP_EN
);
4724 E1000_WRITE_REG(&adapter
->hw
, MANC
, manc
);
4727 /* If the controller is 82573 and f/w is AMT, do not set
4728 * DRV_LOAD until the interface is up. For all other cases,
4729 * let the f/w know that the h/w is now under the control
4731 if (adapter
->hw
.mac_type
!= e1000_82573
||
4732 !e1000_check_mng_mode(&adapter
->hw
))
4733 e1000_get_hw_control(adapter
);
4739 static void e1000_shutdown(struct pci_dev
*pdev
)
4741 e1000_suspend(pdev
, PMSG_SUSPEND
);
4744 #ifdef CONFIG_NET_POLL_CONTROLLER
4746 * Polling 'interrupt' - used by things like netconsole to send skbs
4747 * without having to re-enable interrupts. It's not called while
4748 * the interrupt routine is executing.
4751 e1000_netpoll(struct net_device
*netdev
)
4753 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4755 disable_irq(adapter
->pdev
->irq
);
4756 e1000_intr(adapter
->pdev
->irq
, netdev
, NULL
);
4757 e1000_clean_tx_irq(adapter
, adapter
->tx_ring
);
4758 #ifndef CONFIG_E1000_NAPI
4759 adapter
->clean_rx(adapter
, adapter
->rx_ring
);
4761 enable_irq(adapter
->pdev
->irq
);
4766 * e1000_io_error_detected - called when PCI error is detected
4767 * @pdev: Pointer to PCI device
4768 * @state: The current pci conneection state
4770 * This function is called after a PCI bus error affecting
4771 * this device has been detected.
4773 static pci_ers_result_t
e1000_io_error_detected(struct pci_dev
*pdev
, pci_channel_state_t state
)
4775 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4776 struct e1000_adapter
*adapter
= netdev
->priv
;
4778 netif_device_detach(netdev
);
4780 if (netif_running(netdev
))
4781 e1000_down(adapter
);
4783 /* Request a slot slot reset. */
4784 return PCI_ERS_RESULT_NEED_RESET
;
4788 * e1000_io_slot_reset - called after the pci bus has been reset.
4789 * @pdev: Pointer to PCI device
4791 * Restart the card from scratch, as if from a cold-boot. Implementation
4792 * resembles the first-half of the e1000_resume routine.
4794 static pci_ers_result_t
e1000_io_slot_reset(struct pci_dev
*pdev
)
4796 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4797 struct e1000_adapter
*adapter
= netdev
->priv
;
4799 if (pci_enable_device(pdev
)) {
4800 printk(KERN_ERR
"e1000: Cannot re-enable PCI device after reset.\n");
4801 return PCI_ERS_RESULT_DISCONNECT
;
4803 pci_set_master(pdev
);
4805 pci_enable_wake(pdev
, 3, 0);
4806 pci_enable_wake(pdev
, 4, 0); /* 4 == D3 cold */
4808 /* Perform card reset only on one instance of the card */
4809 if (PCI_FUNC (pdev
->devfn
) != 0)
4810 return PCI_ERS_RESULT_RECOVERED
;
4812 e1000_reset(adapter
);
4813 E1000_WRITE_REG(&adapter
->hw
, WUS
, ~0);
4815 return PCI_ERS_RESULT_RECOVERED
;
4819 * e1000_io_resume - called when traffic can start flowing again.
4820 * @pdev: Pointer to PCI device
4822 * This callback is called when the error recovery driver tells us that
4823 * its OK to resume normal operation. Implementation resembles the
4824 * second-half of the e1000_resume routine.
4826 static void e1000_io_resume(struct pci_dev
*pdev
)
4828 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4829 struct e1000_adapter
*adapter
= netdev
->priv
;
4830 uint32_t manc
, swsm
;
4832 if (netif_running(netdev
)) {
4833 if (e1000_up(adapter
)) {
4834 printk("e1000: can't bring device back up after reset\n");
4839 netif_device_attach(netdev
);
4841 if (adapter
->hw
.mac_type
>= e1000_82540
&&
4842 adapter
->hw
.media_type
== e1000_media_type_copper
) {
4843 manc
= E1000_READ_REG(&adapter
->hw
, MANC
);
4844 manc
&= ~(E1000_MANC_ARP_EN
);
4845 E1000_WRITE_REG(&adapter
->hw
, MANC
, manc
);
4848 switch (adapter
->hw
.mac_type
) {
4850 swsm
= E1000_READ_REG(&adapter
->hw
, SWSM
);
4851 E1000_WRITE_REG(&adapter
->hw
, SWSM
,
4852 swsm
| E1000_SWSM_DRV_LOAD
);
4858 if (netif_running(netdev
))
4859 mod_timer(&adapter
->watchdog_timer
, jiffies
);