Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
[deliverable/linux.git] / drivers / net / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
0a0863af 97 * IV. Receive
1da177e4
LT
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
7734f6e6
DA
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
115 *
1da177e4
LT
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
0a0863af 123 * scenario where all Rx resources have been indicated and none re-
1da177e4
LT
124 * placed.
125 *
126 * V. Miscellaneous
127 *
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
133 *
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
135 *
136 * Thanks to JC (jchapman@katalix.com) for helping with
137 * testing/troubleshooting the development driver.
138 *
139 * TODO:
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
142 *
143 * FIXES:
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
1da177e4
LT
146 */
147
1da177e4
LT
148#include <linux/module.h>
149#include <linux/moduleparam.h>
150#include <linux/kernel.h>
151#include <linux/types.h>
152#include <linux/slab.h>
153#include <linux/delay.h>
154#include <linux/init.h>
155#include <linux/pci.h>
1e7f0bd8 156#include <linux/dma-mapping.h>
1da177e4
LT
157#include <linux/netdevice.h>
158#include <linux/etherdevice.h>
159#include <linux/mii.h>
160#include <linux/if_vlan.h>
161#include <linux/skbuff.h>
162#include <linux/ethtool.h>
163#include <linux/string.h>
9ac32e1b 164#include <linux/firmware.h>
1da177e4
LT
165#include <asm/unaligned.h>
166
167
168#define DRV_NAME "e100"
4e1dc97d 169#define DRV_EXT "-NAPI"
773c9c1f 170#define DRV_VERSION "3.5.23-k6"DRV_EXT
1da177e4 171#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 172#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
173#define PFX DRV_NAME ": "
174
175#define E100_WATCHDOG_PERIOD (2 * HZ)
176#define E100_NAPI_WEIGHT 16
177
9ac32e1b
JSR
178#define FIRMWARE_D101M "e100/d101m_ucode.bin"
179#define FIRMWARE_D101S "e100/d101s_ucode.bin"
180#define FIRMWARE_D102E "e100/d102e_ucode.bin"
181
1da177e4
LT
182MODULE_DESCRIPTION(DRV_DESCRIPTION);
183MODULE_AUTHOR(DRV_COPYRIGHT);
184MODULE_LICENSE("GPL");
185MODULE_VERSION(DRV_VERSION);
9ac32e1b
JSR
186MODULE_FIRMWARE(FIRMWARE_D101M);
187MODULE_FIRMWARE(FIRMWARE_D101S);
188MODULE_FIRMWARE(FIRMWARE_D102E);
1da177e4
LT
189
190static int debug = 3;
8fb6f732 191static int eeprom_bad_csum_allow = 0;
27345bb6 192static int use_io = 0;
1da177e4 193module_param(debug, int, 0);
8fb6f732 194module_param(eeprom_bad_csum_allow, int, 0);
27345bb6 195module_param(use_io, int, 0);
1da177e4 196MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 197MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
27345bb6 198MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
1da177e4
LT
199#define DPRINTK(nlevel, klevel, fmt, args...) \
200 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
201 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
b39d66a8 202 __func__ , ## args))
1da177e4
LT
203
204#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
205 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
206 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
207static struct pci_device_id e100_id_table[] = {
208 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
209 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
210 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
211 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
212 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
213 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
214 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
216 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
217 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
218 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
219 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
222 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
223 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
224 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
225 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
230 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
231 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
232 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
233 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
234 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
238 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
239 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
240 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
241 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
242 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
1da177e4
LT
243 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
244 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
245 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
246 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
247 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 248 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
249 { 0, }
250};
251MODULE_DEVICE_TABLE(pci, e100_id_table);
252
253enum mac {
254 mac_82557_D100_A = 0,
255 mac_82557_D100_B = 1,
256 mac_82557_D100_C = 2,
257 mac_82558_D101_A4 = 4,
258 mac_82558_D101_B0 = 5,
259 mac_82559_D101M = 8,
260 mac_82559_D101S = 9,
261 mac_82550_D102 = 12,
262 mac_82550_D102_C = 13,
263 mac_82551_E = 14,
264 mac_82551_F = 15,
265 mac_82551_10 = 16,
266 mac_unknown = 0xFF,
267};
268
269enum phy {
270 phy_100a = 0x000003E0,
271 phy_100c = 0x035002A8,
272 phy_82555_tx = 0x015002A8,
273 phy_nsc_tx = 0x5C002000,
274 phy_82562_et = 0x033002A8,
275 phy_82562_em = 0x032002A8,
276 phy_82562_ek = 0x031002A8,
277 phy_82562_eh = 0x017002A8,
278 phy_unknown = 0xFFFFFFFF,
279};
280
281/* CSR (Control/Status Registers) */
282struct csr {
283 struct {
284 u8 status;
285 u8 stat_ack;
286 u8 cmd_lo;
287 u8 cmd_hi;
288 u32 gen_ptr;
289 } scb;
290 u32 port;
291 u16 flash_ctrl;
292 u8 eeprom_ctrl_lo;
293 u8 eeprom_ctrl_hi;
294 u32 mdi_ctrl;
295 u32 rx_dma_count;
296};
297
298enum scb_status {
7734f6e6 299 rus_no_res = 0x08,
1da177e4
LT
300 rus_ready = 0x10,
301 rus_mask = 0x3C,
302};
303
ca93ca42
JG
304enum ru_state {
305 RU_SUSPENDED = 0,
306 RU_RUNNING = 1,
307 RU_UNINITIALIZED = -1,
308};
309
1da177e4
LT
310enum scb_stat_ack {
311 stat_ack_not_ours = 0x00,
312 stat_ack_sw_gen = 0x04,
313 stat_ack_rnr = 0x10,
314 stat_ack_cu_idle = 0x20,
315 stat_ack_frame_rx = 0x40,
316 stat_ack_cu_cmd_done = 0x80,
317 stat_ack_not_present = 0xFF,
318 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
319 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
320};
321
322enum scb_cmd_hi {
323 irq_mask_none = 0x00,
324 irq_mask_all = 0x01,
325 irq_sw_gen = 0x02,
326};
327
328enum scb_cmd_lo {
329 cuc_nop = 0x00,
330 ruc_start = 0x01,
331 ruc_load_base = 0x06,
332 cuc_start = 0x10,
333 cuc_resume = 0x20,
334 cuc_dump_addr = 0x40,
335 cuc_dump_stats = 0x50,
336 cuc_load_base = 0x60,
337 cuc_dump_reset = 0x70,
338};
339
340enum cuc_dump {
341 cuc_dump_complete = 0x0000A005,
342 cuc_dump_reset_complete = 0x0000A007,
343};
05479938 344
1da177e4
LT
345enum port {
346 software_reset = 0x0000,
347 selftest = 0x0001,
348 selective_reset = 0x0002,
349};
350
351enum eeprom_ctrl_lo {
352 eesk = 0x01,
353 eecs = 0x02,
354 eedi = 0x04,
355 eedo = 0x08,
356};
357
358enum mdi_ctrl {
359 mdi_write = 0x04000000,
360 mdi_read = 0x08000000,
361 mdi_ready = 0x10000000,
362};
363
364enum eeprom_op {
365 op_write = 0x05,
366 op_read = 0x06,
367 op_ewds = 0x10,
368 op_ewen = 0x13,
369};
370
371enum eeprom_offsets {
372 eeprom_cnfg_mdix = 0x03,
373 eeprom_id = 0x0A,
374 eeprom_config_asf = 0x0D,
375 eeprom_smbus_addr = 0x90,
376};
377
378enum eeprom_cnfg_mdix {
379 eeprom_mdix_enabled = 0x0080,
380};
381
382enum eeprom_id {
383 eeprom_id_wol = 0x0020,
384};
385
386enum eeprom_config_asf {
387 eeprom_asf = 0x8000,
388 eeprom_gcl = 0x4000,
389};
390
391enum cb_status {
392 cb_complete = 0x8000,
393 cb_ok = 0x2000,
394};
395
396enum cb_command {
397 cb_nop = 0x0000,
398 cb_iaaddr = 0x0001,
399 cb_config = 0x0002,
400 cb_multi = 0x0003,
401 cb_tx = 0x0004,
402 cb_ucode = 0x0005,
403 cb_dump = 0x0006,
404 cb_tx_sf = 0x0008,
405 cb_cid = 0x1f00,
406 cb_i = 0x2000,
407 cb_s = 0x4000,
408 cb_el = 0x8000,
409};
410
411struct rfd {
aaf918ba
AV
412 __le16 status;
413 __le16 command;
414 __le32 link;
415 __le32 rbd;
416 __le16 actual_size;
417 __le16 size;
1da177e4
LT
418};
419
420struct rx {
421 struct rx *next, *prev;
422 struct sk_buff *skb;
423 dma_addr_t dma_addr;
424};
425
426#if defined(__BIG_ENDIAN_BITFIELD)
427#define X(a,b) b,a
428#else
429#define X(a,b) a,b
430#endif
431struct config {
432/*0*/ u8 X(byte_count:6, pad0:2);
433/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
434/*2*/ u8 adaptive_ifs;
435/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
436 term_write_cache_line:1), pad3:4);
437/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
438/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
439/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
440 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
441 rx_discard_overruns:1), rx_save_bad_frames:1);
442/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
443 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
444 tx_dynamic_tbd:1);
445/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
446/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
447 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
448/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
449 loopback:2);
450/*11*/ u8 X(linear_priority:3, pad11:5);
451/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
452/*13*/ u8 ip_addr_lo;
453/*14*/ u8 ip_addr_hi;
454/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
455 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
456 pad15_2:1), crs_or_cdt:1);
457/*16*/ u8 fc_delay_lo;
458/*17*/ u8 fc_delay_hi;
459/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
460 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
461/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
462 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
463 full_duplex_force:1), full_duplex_pin:1);
464/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
465/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
466/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
467 u8 pad_d102[9];
468};
469
470#define E100_MAX_MULTICAST_ADDRS 64
471struct multi {
aaf918ba 472 __le16 count;
1da177e4
LT
473 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
474};
475
476/* Important: keep total struct u32-aligned */
477#define UCODE_SIZE 134
478struct cb {
aaf918ba
AV
479 __le16 status;
480 __le16 command;
481 __le32 link;
1da177e4
LT
482 union {
483 u8 iaaddr[ETH_ALEN];
aaf918ba 484 __le32 ucode[UCODE_SIZE];
1da177e4
LT
485 struct config config;
486 struct multi multi;
487 struct {
488 u32 tbd_array;
489 u16 tcb_byte_count;
490 u8 threshold;
491 u8 tbd_count;
492 struct {
aaf918ba
AV
493 __le32 buf_addr;
494 __le16 size;
1da177e4
LT
495 u16 eol;
496 } tbd;
497 } tcb;
aaf918ba 498 __le32 dump_buffer_addr;
1da177e4
LT
499 } u;
500 struct cb *next, *prev;
501 dma_addr_t dma_addr;
502 struct sk_buff *skb;
503};
504
505enum loopback {
506 lb_none = 0, lb_mac = 1, lb_phy = 3,
507};
508
509struct stats {
aaf918ba 510 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
1da177e4
LT
511 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
512 tx_multiple_collisions, tx_total_collisions;
aaf918ba 513 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
1da177e4
LT
514 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
515 rx_short_frame_errors;
aaf918ba
AV
516 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
517 __le16 xmt_tco_frames, rcv_tco_frames;
518 __le32 complete;
1da177e4
LT
519};
520
521struct mem {
522 struct {
523 u32 signature;
524 u32 result;
525 } selftest;
526 struct stats stats;
527 u8 dump_buf[596];
528};
529
530struct param_range {
531 u32 min;
532 u32 max;
533 u32 count;
534};
535
536struct params {
537 struct param_range rfds;
538 struct param_range cbs;
539};
540
541struct nic {
542 /* Begin: frequently used values: keep adjacent for cache effect */
543 u32 msg_enable ____cacheline_aligned;
544 struct net_device *netdev;
545 struct pci_dev *pdev;
546
547 struct rx *rxs ____cacheline_aligned;
548 struct rx *rx_to_use;
549 struct rx *rx_to_clean;
550 struct rfd blank_rfd;
ca93ca42 551 enum ru_state ru_running;
1da177e4
LT
552
553 spinlock_t cb_lock ____cacheline_aligned;
554 spinlock_t cmd_lock;
555 struct csr __iomem *csr;
556 enum scb_cmd_lo cuc_cmd;
557 unsigned int cbs_avail;
bea3348e 558 struct napi_struct napi;
1da177e4
LT
559 struct cb *cbs;
560 struct cb *cb_to_use;
561 struct cb *cb_to_send;
562 struct cb *cb_to_clean;
aaf918ba 563 __le16 tx_command;
1da177e4
LT
564 /* End: frequently used values: keep adjacent for cache effect */
565
566 enum {
567 ich = (1 << 0),
568 promiscuous = (1 << 1),
569 multicast_all = (1 << 2),
570 wol_magic = (1 << 3),
571 ich_10h_workaround = (1 << 4),
572 } flags ____cacheline_aligned;
573
574 enum mac mac;
575 enum phy phy;
576 struct params params;
1da177e4
LT
577 struct timer_list watchdog;
578 struct timer_list blink_timer;
579 struct mii_if_info mii;
2acdb1e0 580 struct work_struct tx_timeout_task;
1da177e4
LT
581 enum loopback loopback;
582
583 struct mem *mem;
584 dma_addr_t dma_addr;
585
586 dma_addr_t cbs_dma_addr;
587 u8 adaptive_ifs;
588 u8 tx_threshold;
589 u32 tx_frames;
590 u32 tx_collisions;
591 u32 tx_deferred;
592 u32 tx_single_collisions;
593 u32 tx_multiple_collisions;
594 u32 tx_fc_pause;
595 u32 tx_tco_frames;
596
597 u32 rx_fc_pause;
598 u32 rx_fc_unsupported;
599 u32 rx_tco_frames;
600 u32 rx_over_length_errors;
601
1da177e4
LT
602 u16 leds;
603 u16 eeprom_wc;
aaf918ba 604 __le16 eeprom[256];
ac7c6669 605 spinlock_t mdio_lock;
1da177e4
LT
606};
607
608static inline void e100_write_flush(struct nic *nic)
609{
610 /* Flush previous PCI writes through intermediate bridges
611 * by doing a benign read */
27345bb6 612 (void)ioread8(&nic->csr->scb.status);
1da177e4
LT
613}
614
858119e1 615static void e100_enable_irq(struct nic *nic)
1da177e4
LT
616{
617 unsigned long flags;
618
619 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 620 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 621 e100_write_flush(nic);
ad8c48ad 622 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
623}
624
858119e1 625static void e100_disable_irq(struct nic *nic)
1da177e4
LT
626{
627 unsigned long flags;
628
629 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 630 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 631 e100_write_flush(nic);
ad8c48ad 632 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
633}
634
635static void e100_hw_reset(struct nic *nic)
636{
637 /* Put CU and RU into idle with a selective reset to get
638 * device off of PCI bus */
27345bb6 639 iowrite32(selective_reset, &nic->csr->port);
1da177e4
LT
640 e100_write_flush(nic); udelay(20);
641
642 /* Now fully reset device */
27345bb6 643 iowrite32(software_reset, &nic->csr->port);
1da177e4
LT
644 e100_write_flush(nic); udelay(20);
645
646 /* Mask off our interrupt line - it's unmasked after reset */
647 e100_disable_irq(nic);
648}
649
650static int e100_self_test(struct nic *nic)
651{
652 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
653
654 /* Passing the self-test is a pretty good indication
655 * that the device can DMA to/from host memory */
656
657 nic->mem->selftest.signature = 0;
658 nic->mem->selftest.result = 0xFFFFFFFF;
659
27345bb6 660 iowrite32(selftest | dma_addr, &nic->csr->port);
1da177e4
LT
661 e100_write_flush(nic);
662 /* Wait 10 msec for self-test to complete */
663 msleep(10);
664
665 /* Interrupts are enabled after self-test */
666 e100_disable_irq(nic);
667
668 /* Check results of self-test */
f26251eb 669 if (nic->mem->selftest.result != 0) {
1da177e4
LT
670 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
671 nic->mem->selftest.result);
672 return -ETIMEDOUT;
673 }
f26251eb 674 if (nic->mem->selftest.signature == 0) {
1da177e4
LT
675 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
676 return -ETIMEDOUT;
677 }
678
679 return 0;
680}
681
aaf918ba 682static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
1da177e4
LT
683{
684 u32 cmd_addr_data[3];
685 u8 ctrl;
686 int i, j;
687
688 /* Three cmds: write/erase enable, write data, write/erase disable */
689 cmd_addr_data[0] = op_ewen << (addr_len - 2);
690 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
aaf918ba 691 le16_to_cpu(data);
1da177e4
LT
692 cmd_addr_data[2] = op_ewds << (addr_len - 2);
693
694 /* Bit-bang cmds to write word to eeprom */
f26251eb 695 for (j = 0; j < 3; j++) {
1da177e4
LT
696
697 /* Chip select */
27345bb6 698 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
699 e100_write_flush(nic); udelay(4);
700
f26251eb 701 for (i = 31; i >= 0; i--) {
1da177e4
LT
702 ctrl = (cmd_addr_data[j] & (1 << i)) ?
703 eecs | eedi : eecs;
27345bb6 704 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
705 e100_write_flush(nic); udelay(4);
706
27345bb6 707 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
708 e100_write_flush(nic); udelay(4);
709 }
710 /* Wait 10 msec for cmd to complete */
711 msleep(10);
712
713 /* Chip deselect */
27345bb6 714 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
715 e100_write_flush(nic); udelay(4);
716 }
717};
718
719/* General technique stolen from the eepro100 driver - very clever */
aaf918ba 720static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
1da177e4
LT
721{
722 u32 cmd_addr_data;
723 u16 data = 0;
724 u8 ctrl;
725 int i;
726
727 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
728
729 /* Chip select */
27345bb6 730 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
731 e100_write_flush(nic); udelay(4);
732
733 /* Bit-bang to read word from eeprom */
f26251eb 734 for (i = 31; i >= 0; i--) {
1da177e4 735 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
27345bb6 736 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4 737 e100_write_flush(nic); udelay(4);
05479938 738
27345bb6 739 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4 740 e100_write_flush(nic); udelay(4);
05479938 741
1da177e4
LT
742 /* Eeprom drives a dummy zero to EEDO after receiving
743 * complete address. Use this to adjust addr_len. */
27345bb6 744 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
f26251eb 745 if (!(ctrl & eedo) && i > 16) {
1da177e4
LT
746 *addr_len -= (i - 16);
747 i = 17;
748 }
05479938 749
1da177e4
LT
750 data = (data << 1) | (ctrl & eedo ? 1 : 0);
751 }
752
753 /* Chip deselect */
27345bb6 754 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
755 e100_write_flush(nic); udelay(4);
756
aaf918ba 757 return cpu_to_le16(data);
1da177e4
LT
758};
759
760/* Load entire EEPROM image into driver cache and validate checksum */
761static int e100_eeprom_load(struct nic *nic)
762{
763 u16 addr, addr_len = 8, checksum = 0;
764
765 /* Try reading with an 8-bit addr len to discover actual addr len */
766 e100_eeprom_read(nic, &addr_len, 0);
767 nic->eeprom_wc = 1 << addr_len;
768
f26251eb 769 for (addr = 0; addr < nic->eeprom_wc; addr++) {
1da177e4 770 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
f26251eb 771 if (addr < nic->eeprom_wc - 1)
aaf918ba 772 checksum += le16_to_cpu(nic->eeprom[addr]);
1da177e4
LT
773 }
774
775 /* The checksum, stored in the last word, is calculated such that
776 * the sum of words should be 0xBABA */
aaf918ba 777 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
1da177e4 778 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
8fb6f732
DM
779 if (!eeprom_bad_csum_allow)
780 return -EAGAIN;
1da177e4
LT
781 }
782
783 return 0;
784}
785
786/* Save (portion of) driver EEPROM cache to device and update checksum */
787static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
788{
789 u16 addr, addr_len = 8, checksum = 0;
790
791 /* Try reading with an 8-bit addr len to discover actual addr len */
792 e100_eeprom_read(nic, &addr_len, 0);
793 nic->eeprom_wc = 1 << addr_len;
794
f26251eb 795 if (start + count >= nic->eeprom_wc)
1da177e4
LT
796 return -EINVAL;
797
f26251eb 798 for (addr = start; addr < start + count; addr++)
1da177e4
LT
799 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
800
801 /* The checksum, stored in the last word, is calculated such that
802 * the sum of words should be 0xBABA */
f26251eb 803 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
aaf918ba
AV
804 checksum += le16_to_cpu(nic->eeprom[addr]);
805 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
1da177e4
LT
806 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
807 nic->eeprom[nic->eeprom_wc - 1]);
808
809 return 0;
810}
811
962082b6 812#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 813#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 814static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
815{
816 unsigned long flags;
817 unsigned int i;
818 int err = 0;
819
820 spin_lock_irqsave(&nic->cmd_lock, flags);
821
822 /* Previous command is accepted when SCB clears */
f26251eb
BA
823 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
824 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
1da177e4
LT
825 break;
826 cpu_relax();
f26251eb 827 if (unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
828 udelay(5);
829 }
f26251eb 830 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
1da177e4
LT
831 err = -EAGAIN;
832 goto err_unlock;
833 }
834
f26251eb 835 if (unlikely(cmd != cuc_resume))
27345bb6
JB
836 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
837 iowrite8(cmd, &nic->csr->scb.cmd_lo);
1da177e4
LT
838
839err_unlock:
840 spin_unlock_irqrestore(&nic->cmd_lock, flags);
841
842 return err;
843}
844
858119e1 845static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
846 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
847{
848 struct cb *cb;
849 unsigned long flags;
850 int err = 0;
851
852 spin_lock_irqsave(&nic->cb_lock, flags);
853
f26251eb 854 if (unlikely(!nic->cbs_avail)) {
1da177e4
LT
855 err = -ENOMEM;
856 goto err_unlock;
857 }
858
859 cb = nic->cb_to_use;
860 nic->cb_to_use = cb->next;
861 nic->cbs_avail--;
862 cb->skb = skb;
863
f26251eb 864 if (unlikely(!nic->cbs_avail))
1da177e4
LT
865 err = -ENOSPC;
866
867 cb_prepare(nic, cb, skb);
868
869 /* Order is important otherwise we'll be in a race with h/w:
870 * set S-bit in current first, then clear S-bit in previous. */
871 cb->command |= cpu_to_le16(cb_s);
872 wmb();
873 cb->prev->command &= cpu_to_le16(~cb_s);
874
f26251eb
BA
875 while (nic->cb_to_send != nic->cb_to_use) {
876 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
1da177e4
LT
877 nic->cb_to_send->dma_addr))) {
878 /* Ok, here's where things get sticky. It's
879 * possible that we can't schedule the command
880 * because the controller is too busy, so
881 * let's just queue the command and try again
882 * when another command is scheduled. */
f26251eb 883 if (err == -ENOSPC) {
962082b6
MC
884 //request a reset
885 schedule_work(&nic->tx_timeout_task);
886 }
1da177e4
LT
887 break;
888 } else {
889 nic->cuc_cmd = cuc_resume;
890 nic->cb_to_send = nic->cb_to_send->next;
891 }
892 }
893
894err_unlock:
895 spin_unlock_irqrestore(&nic->cb_lock, flags);
896
897 return err;
898}
899
900static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
901{
902 u32 data_out = 0;
903 unsigned int i;
ac7c6669 904 unsigned long flags;
1da177e4 905
ac7c6669
OM
906
907 /*
908 * Stratus87247: we shouldn't be writing the MDI control
909 * register until the Ready bit shows True. Also, since
910 * manipulation of the MDI control registers is a multi-step
911 * procedure it should be done under lock.
912 */
913 spin_lock_irqsave(&nic->mdio_lock, flags);
914 for (i = 100; i; --i) {
27345bb6 915 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
ac7c6669
OM
916 break;
917 udelay(20);
918 }
919 if (unlikely(!i)) {
920 printk("e100.mdio_ctrl(%s) won't go Ready\n",
921 nic->netdev->name );
922 spin_unlock_irqrestore(&nic->mdio_lock, flags);
923 return 0; /* No way to indicate timeout error */
924 }
27345bb6 925 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
1da177e4 926
ac7c6669 927 for (i = 0; i < 100; i++) {
1da177e4 928 udelay(20);
27345bb6 929 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
930 break;
931 }
ac7c6669 932 spin_unlock_irqrestore(&nic->mdio_lock, flags);
1da177e4
LT
933 DPRINTK(HW, DEBUG,
934 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
935 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
936 return (u16)data_out;
937}
938
939static int mdio_read(struct net_device *netdev, int addr, int reg)
940{
941 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
942}
943
944static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
945{
946 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
947}
948
949static void e100_get_defaults(struct nic *nic)
950{
2afecc04
JB
951 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
952 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4 953
1da177e4 954 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
44c10138 955 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
f26251eb 956 if (nic->mac == mac_unknown)
1da177e4
LT
957 nic->mac = mac_82557_D100_A;
958
959 nic->params.rfds = rfds;
960 nic->params.cbs = cbs;
961
962 /* Quadwords to DMA into FIFO before starting frame transmit */
963 nic->tx_threshold = 0xE0;
964
0a0863af 965 /* no interrupt for every tx completion, delay = 256us if not 557 */
962082b6
MC
966 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
967 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
968
969 /* Template for a freshly allocated RFD */
7734f6e6 970 nic->blank_rfd.command = 0;
1172899a 971 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1da177e4
LT
972 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
973
974 /* MII setup */
975 nic->mii.phy_id_mask = 0x1F;
976 nic->mii.reg_num_mask = 0x1F;
977 nic->mii.dev = nic->netdev;
978 nic->mii.mdio_read = mdio_read;
979 nic->mii.mdio_write = mdio_write;
980}
981
982static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
983{
984 struct config *config = &cb->u.config;
985 u8 *c = (u8 *)config;
986
987 cb->command = cpu_to_le16(cb_config);
988
989 memset(config, 0, sizeof(struct config));
990
991 config->byte_count = 0x16; /* bytes in this struct */
992 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
993 config->direct_rx_dma = 0x1; /* reserved */
994 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
995 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
996 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
997 config->tx_underrun_retry = 0x3; /* # of underrun retries */
998 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
999 config->pad10 = 0x6;
1000 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1001 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1002 config->ifs = 0x6; /* x16 = inter frame spacing */
1003 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1004 config->pad15_1 = 0x1;
1005 config->pad15_2 = 0x1;
1006 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1007 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1008 config->tx_padding = 0x1; /* 1=pad short frames */
1009 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1010 config->pad18 = 0x1;
1011 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1012 config->pad20_1 = 0x1F;
1013 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1014 config->pad21_1 = 0x5;
1015
1016 config->adaptive_ifs = nic->adaptive_ifs;
1017 config->loopback = nic->loopback;
1018
f26251eb 1019 if (nic->mii.force_media && nic->mii.full_duplex)
1da177e4
LT
1020 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1021
f26251eb 1022 if (nic->flags & promiscuous || nic->loopback) {
1da177e4
LT
1023 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1024 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1025 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1026 }
1027
f26251eb 1028 if (nic->flags & multicast_all)
1da177e4
LT
1029 config->multicast_all = 0x1; /* 1=accept, 0=no */
1030
6bdacb1a 1031 /* disable WoL when up */
f26251eb 1032 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1033 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1034
f26251eb 1035 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1036 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1037 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1038 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1039 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
44e4925e 1040 if (nic->mac >= mac_82559_D101M) {
1da177e4 1041 config->tno_intr = 0x1; /* TCO stats enable */
44e4925e
DG
1042 /* Enable TCO in extended config */
1043 if (nic->mac >= mac_82551_10) {
1044 config->byte_count = 0x20; /* extended bytes */
1045 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1046 }
1047 } else {
1da177e4 1048 config->standard_stat_counter = 0x0;
44e4925e 1049 }
1da177e4
LT
1050 }
1051
1052 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1053 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1054 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1055 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1056 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1057 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1058}
1059
2afecc04
JB
1060/*************************************************************************
1061* CPUSaver parameters
1062*
1063* All CPUSaver parameters are 16-bit literals that are part of a
1064* "move immediate value" instruction. By changing the value of
1065* the literal in the instruction before the code is loaded, the
1066* driver can change the algorithm.
1067*
0779bf2d 1068* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1069* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1070* timer is reset each time a new packet is received. (see
1071* BUNDLEMAX below to set the limit on number of chained packets)
1072* The current default is 0x600 or 1536. Experiments show that
1073* the value should probably stay within the 0x200 - 0x1000.
1074*
05479938 1075* BUNDLEMAX -
2afecc04
JB
1076* This sets the maximum number of frames that will be bundled. In
1077* some situations, such as the TCP windowing algorithm, it may be
1078* better to limit the growth of the bundle size than let it go as
1079* high as it can, because that could cause too much added latency.
1080* The default is six, because this is the number of packets in the
1081* default TCP window size. A value of 1 would make CPUSaver indicate
1082* an interrupt for every frame received. If you do not want to put
1083* a limit on the bundle size, set this value to xFFFF.
1084*
05479938 1085* BUNDLESMALL -
2afecc04
JB
1086* This contains a bit-mask describing the minimum size frame that
1087* will be bundled. The default masks the lower 7 bits, which means
1088* that any frame less than 128 bytes in length will not be bundled,
1089* but will instead immediately generate an interrupt. This does
1090* not affect the current bundle in any way. Any frame that is 128
1091* bytes or large will be bundled normally. This feature is meant
1092* to provide immediate indication of ACK frames in a TCP environment.
1093* Customers were seeing poor performance when a machine with CPUSaver
1094* enabled was sending but not receiving. The delay introduced when
1095* the ACKs were received was enough to reduce total throughput, because
1096* the sender would sit idle until the ACK was finally seen.
1097*
1098* The current default is 0xFF80, which masks out the lower 7 bits.
1099* This means that any frame which is x7F (127) bytes or smaller
05479938 1100* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1101* bit mask, there are only a few valid values that can be used. To
1102* turn this feature off, the driver can write the value xFFFF to the
1103* lower word of this instruction (in the same way that the other
1104* parameters are used). Likewise, a value of 0xF800 (2047) would
1105* cause an interrupt to be generated for every frame, because all
1106* standard Ethernet frames are <= 2047 bytes in length.
1107*************************************************************************/
1108
05479938 1109/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1110 * workarounds it provides, set the following defines to:
1111 * BUNDLESMALL 0
1112 * BUNDLEMAX 1
1113 * INTDELAY 1
1114 */
1115#define BUNDLESMALL 1
1116#define BUNDLEMAX (u16)6
1117#define INTDELAY (u16)1536 /* 0x600 */
1118
9ac32e1b
JSR
1119/* Initialize firmware */
1120static const struct firmware *e100_request_firmware(struct nic *nic)
1121{
1122 const char *fw_name;
1123 const struct firmware *fw;
1124 u8 timer, bundle, min_size;
1125 int err;
1126
2afecc04
JB
1127 /* do not load u-code for ICH devices */
1128 if (nic->flags & ich)
9ac32e1b 1129 return NULL;
2afecc04 1130
44c10138 1131 /* Search for ucode match against h/w revision */
9ac32e1b
JSR
1132 if (nic->mac == mac_82559_D101M)
1133 fw_name = FIRMWARE_D101M;
1134 else if (nic->mac == mac_82559_D101S)
1135 fw_name = FIRMWARE_D101S;
1136 else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
1137 fw_name = FIRMWARE_D102E;
1138 else /* No ucode on other devices */
1139 return NULL;
1140
1141 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1142 if (err) {
1143 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
1144 fw_name, err);
1145 return ERR_PTR(err);
1146 }
1147 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1148 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1149 if (fw->size != UCODE_SIZE * 4 + 3) {
1150 DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
1151 fw_name, fw->size);
1152 release_firmware(fw);
1153 return ERR_PTR(-EINVAL);
2afecc04
JB
1154 }
1155
9ac32e1b
JSR
1156 /* Read timer, bundle and min_size from end of firmware blob */
1157 timer = fw->data[UCODE_SIZE * 4];
1158 bundle = fw->data[UCODE_SIZE * 4 + 1];
1159 min_size = fw->data[UCODE_SIZE * 4 + 2];
1160
1161 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1162 min_size >= UCODE_SIZE) {
1163 DPRINTK(PROBE, ERR,
1164 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1165 fw_name, timer, bundle, min_size);
1166 release_firmware(fw);
1167 return ERR_PTR(-EINVAL);
1168 }
1169 /* OK, firmware is validated and ready to use... */
1170 return fw;
24180333
JB
1171}
1172
9ac32e1b
JSR
1173static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1174 struct sk_buff *skb)
24180333 1175{
9ac32e1b
JSR
1176 const struct firmware *fw = (void *)skb;
1177 u8 timer, bundle, min_size;
1178
1179 /* It's not a real skb; we just abused the fact that e100_exec_cb
1180 will pass it through to here... */
1181 cb->skb = NULL;
1182
1183 /* firmware is stored as little endian already */
1184 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1185
1186 /* Read timer, bundle and min_size from end of firmware blob */
1187 timer = fw->data[UCODE_SIZE * 4];
1188 bundle = fw->data[UCODE_SIZE * 4 + 1];
1189 min_size = fw->data[UCODE_SIZE * 4 + 2];
1190
1191 /* Insert user-tunable settings in cb->u.ucode */
1192 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1193 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1194 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1195 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1196 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1197 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1198
1199 cb->command = cpu_to_le16(cb_ucode | cb_el);
1200}
1201
1202static inline int e100_load_ucode_wait(struct nic *nic)
1203{
1204 const struct firmware *fw;
24180333
JB
1205 int err = 0, counter = 50;
1206 struct cb *cb = nic->cb_to_clean;
1207
9ac32e1b
JSR
1208 fw = e100_request_firmware(nic);
1209 /* If it's NULL, then no ucode is required */
1210 if (!fw || IS_ERR(fw))
1211 return PTR_ERR(fw);
1212
1213 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
24180333 1214 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
05479938 1215
24180333
JB
1216 /* must restart cuc */
1217 nic->cuc_cmd = cuc_start;
1218
1219 /* wait for completion */
1220 e100_write_flush(nic);
1221 udelay(10);
1222
1223 /* wait for possibly (ouch) 500ms */
1224 while (!(cb->status & cpu_to_le16(cb_complete))) {
1225 msleep(10);
1226 if (!--counter) break;
1227 }
05479938 1228
3a4fa0a2 1229 /* ack any interrupts, something could have been set */
27345bb6 1230 iowrite8(~0, &nic->csr->scb.stat_ack);
24180333
JB
1231
1232 /* if the command failed, or is not OK, notify and return */
1233 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1234 DPRINTK(PROBE,ERR, "ucode load failed\n");
1235 err = -EPERM;
1236 }
05479938 1237
24180333 1238 return err;
1da177e4
LT
1239}
1240
1241static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1242 struct sk_buff *skb)
1243{
1244 cb->command = cpu_to_le16(cb_iaaddr);
1245 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1246}
1247
1248static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1249{
1250 cb->command = cpu_to_le16(cb_dump);
1251 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1252 offsetof(struct mem, dump_buf));
1253}
1254
1255#define NCONFIG_AUTO_SWITCH 0x0080
1256#define MII_NSC_CONG MII_RESV1
1257#define NSC_CONG_ENABLE 0x0100
1258#define NSC_CONG_TXREADY 0x0400
1259#define ADVERTISE_FC_SUPPORTED 0x0400
1260static int e100_phy_init(struct nic *nic)
1261{
1262 struct net_device *netdev = nic->netdev;
1263 u32 addr;
1264 u16 bmcr, stat, id_lo, id_hi, cong;
1265
1266 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
f26251eb 1267 for (addr = 0; addr < 32; addr++) {
1da177e4
LT
1268 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1269 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1270 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1271 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
f26251eb 1272 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1da177e4
LT
1273 break;
1274 }
1275 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
f26251eb 1276 if (addr == 32)
1da177e4
LT
1277 return -EAGAIN;
1278
1279 /* Selected the phy and isolate the rest */
f26251eb
BA
1280 for (addr = 0; addr < 32; addr++) {
1281 if (addr != nic->mii.phy_id) {
1da177e4
LT
1282 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1283 } else {
1284 bmcr = mdio_read(netdev, addr, MII_BMCR);
1285 mdio_write(netdev, addr, MII_BMCR,
1286 bmcr & ~BMCR_ISOLATE);
1287 }
1288 }
1289
1290 /* Get phy ID */
1291 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1292 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1293 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1294 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1295
1296 /* Handle National tx phys */
1297#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
f26251eb 1298 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1da177e4
LT
1299 /* Disable congestion control */
1300 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1301 cong |= NSC_CONG_TXREADY;
1302 cong &= ~NSC_CONG_ENABLE;
1303 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1304 }
1305
f26251eb 1306 if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1307 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1308 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1309 /* enable/disable MDI/MDI-X auto-switching. */
1310 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1311 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1312 }
1da177e4
LT
1313
1314 return 0;
1315}
1316
1317static int e100_hw_init(struct nic *nic)
1318{
1319 int err;
1320
1321 e100_hw_reset(nic);
1322
1323 DPRINTK(HW, ERR, "e100_hw_init\n");
f26251eb 1324 if (!in_interrupt() && (err = e100_self_test(nic)))
1da177e4
LT
1325 return err;
1326
f26251eb 1327 if ((err = e100_phy_init(nic)))
1da177e4 1328 return err;
f26251eb 1329 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1da177e4 1330 return err;
f26251eb 1331 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1da177e4 1332 return err;
9ac32e1b 1333 if ((err = e100_load_ucode_wait(nic)))
1da177e4 1334 return err;
f26251eb 1335 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1da177e4 1336 return err;
f26251eb 1337 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1da177e4 1338 return err;
f26251eb 1339 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1da177e4
LT
1340 nic->dma_addr + offsetof(struct mem, stats))))
1341 return err;
f26251eb 1342 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1da177e4
LT
1343 return err;
1344
1345 e100_disable_irq(nic);
1346
1347 return 0;
1348}
1349
1350static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1351{
1352 struct net_device *netdev = nic->netdev;
1353 struct dev_mc_list *list = netdev->mc_list;
1354 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1355
1356 cb->command = cpu_to_le16(cb_multi);
1357 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
f26251eb 1358 for (i = 0; list && i < count; i++, list = list->next)
1da177e4
LT
1359 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1360 ETH_ALEN);
1361}
1362
1363static void e100_set_multicast_list(struct net_device *netdev)
1364{
1365 struct nic *nic = netdev_priv(netdev);
1366
1367 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1368 netdev->mc_count, netdev->flags);
1369
f26251eb 1370 if (netdev->flags & IFF_PROMISC)
1da177e4
LT
1371 nic->flags |= promiscuous;
1372 else
1373 nic->flags &= ~promiscuous;
1374
f26251eb 1375 if (netdev->flags & IFF_ALLMULTI ||
1da177e4
LT
1376 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1377 nic->flags |= multicast_all;
1378 else
1379 nic->flags &= ~multicast_all;
1380
1381 e100_exec_cb(nic, NULL, e100_configure);
1382 e100_exec_cb(nic, NULL, e100_multi);
1383}
1384
1385static void e100_update_stats(struct nic *nic)
1386{
09f75cd7
JG
1387 struct net_device *dev = nic->netdev;
1388 struct net_device_stats *ns = &dev->stats;
1da177e4 1389 struct stats *s = &nic->mem->stats;
aaf918ba
AV
1390 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1391 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1da177e4
LT
1392 &s->complete;
1393
1394 /* Device's stats reporting may take several microseconds to
0a0863af 1395 * complete, so we're always waiting for results of the
1da177e4
LT
1396 * previous command. */
1397
f26251eb 1398 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1da177e4
LT
1399 *complete = 0;
1400 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1401 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1402 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1403 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1404 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1405 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1406 ns->collisions += nic->tx_collisions;
1407 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1408 le32_to_cpu(s->tx_lost_crs);
1da177e4
LT
1409 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1410 nic->rx_over_length_errors;
1411 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1412 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1413 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1414 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1415 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1416 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1417 le32_to_cpu(s->rx_alignment_errors) +
1418 le32_to_cpu(s->rx_short_frame_errors) +
1419 le32_to_cpu(s->rx_cdt_errors);
1420 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1421 nic->tx_single_collisions +=
1422 le32_to_cpu(s->tx_single_collisions);
1423 nic->tx_multiple_collisions +=
1424 le32_to_cpu(s->tx_multiple_collisions);
f26251eb 1425 if (nic->mac >= mac_82558_D101_A4) {
1da177e4
LT
1426 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1427 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1428 nic->rx_fc_unsupported +=
1429 le32_to_cpu(s->fc_rcv_unsupported);
f26251eb 1430 if (nic->mac >= mac_82559_D101M) {
1da177e4
LT
1431 nic->tx_tco_frames +=
1432 le16_to_cpu(s->xmt_tco_frames);
1433 nic->rx_tco_frames +=
1434 le16_to_cpu(s->rcv_tco_frames);
1435 }
1436 }
1437 }
1438
05479938 1439
f26251eb 1440 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1f53367d 1441 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1da177e4
LT
1442}
1443
1444static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1445{
1446 /* Adjust inter-frame-spacing (IFS) between two transmits if
1447 * we're getting collisions on a half-duplex connection. */
1448
f26251eb 1449 if (duplex == DUPLEX_HALF) {
1da177e4
LT
1450 u32 prev = nic->adaptive_ifs;
1451 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1452
f26251eb 1453 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1da177e4 1454 (nic->tx_frames > min_frames)) {
f26251eb 1455 if (nic->adaptive_ifs < 60)
1da177e4
LT
1456 nic->adaptive_ifs += 5;
1457 } else if (nic->tx_frames < min_frames) {
f26251eb 1458 if (nic->adaptive_ifs >= 5)
1da177e4
LT
1459 nic->adaptive_ifs -= 5;
1460 }
f26251eb 1461 if (nic->adaptive_ifs != prev)
1da177e4
LT
1462 e100_exec_cb(nic, NULL, e100_configure);
1463 }
1464}
1465
1466static void e100_watchdog(unsigned long data)
1467{
1468 struct nic *nic = (struct nic *)data;
1469 struct ethtool_cmd cmd;
1470
1471 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1472
1473 /* mii library handles link maintenance tasks */
1474
1475 mii_ethtool_gset(&nic->mii, &cmd);
1476
f26251eb 1477 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
f4113030
JK
1478 printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
1479 nic->netdev->name,
1480 cmd.speed == SPEED_100 ? "100" : "10",
1481 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
f26251eb 1482 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
f4113030
JK
1483 printk(KERN_INFO "e100: %s NIC Link is Down\n",
1484 nic->netdev->name);
1da177e4
LT
1485 }
1486
1487 mii_check_link(&nic->mii);
1488
1489 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1490 * allocation failure.
1491 * Unfortunately have to use a spinlock to not re-enable interrupts
1492 * accidentally, due to hardware that shares a register between the
1493 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4 1494 spin_lock_irq(&nic->cmd_lock);
27345bb6 1495 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1496 e100_write_flush(nic);
ad8c48ad 1497 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1498
1499 e100_update_stats(nic);
1500 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1501
f26251eb 1502 if (nic->mac <= mac_82557_D100_C)
1da177e4
LT
1503 /* Issue a multicast command to workaround a 557 lock up */
1504 e100_set_multicast_list(nic->netdev);
1505
f26251eb 1506 if (nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1da177e4
LT
1507 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1508 nic->flags |= ich_10h_workaround;
1509 else
1510 nic->flags &= ~ich_10h_workaround;
1511
34c6417b
SH
1512 mod_timer(&nic->watchdog,
1513 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1da177e4
LT
1514}
1515
858119e1 1516static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1517 struct sk_buff *skb)
1518{
1519 cb->command = nic->tx_command;
962082b6 1520 /* interrupt every 16 packets regardless of delay */
f26251eb 1521 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
996ec353 1522 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1523 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1524 cb->u.tcb.tcb_byte_count = 0;
1525 cb->u.tcb.threshold = nic->tx_threshold;
1526 cb->u.tcb.tbd_count = 1;
1527 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1528 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1529 /* check for mapping failure? */
1da177e4
LT
1530 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1531}
1532
1533static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1534{
1535 struct nic *nic = netdev_priv(netdev);
1536 int err;
1537
f26251eb 1538 if (nic->flags & ich_10h_workaround) {
1da177e4
LT
1539 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1540 Issue a NOP command followed by a 1us delay before
1541 issuing the Tx command. */
f26251eb 1542 if (e100_exec_cmd(nic, cuc_nop, 0))
1f53367d 1543 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1da177e4
LT
1544 udelay(1);
1545 }
1546
1547 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1548
f26251eb 1549 switch (err) {
1da177e4
LT
1550 case -ENOSPC:
1551 /* We queued the skb, but now we're out of space. */
1552 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1553 netif_stop_queue(netdev);
1554 break;
1555 case -ENOMEM:
1556 /* This is a hard error - log it. */
1557 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1558 netif_stop_queue(netdev);
1559 return 1;
1560 }
1561
1562 netdev->trans_start = jiffies;
1563 return 0;
1564}
1565
858119e1 1566static int e100_tx_clean(struct nic *nic)
1da177e4 1567{
09f75cd7 1568 struct net_device *dev = nic->netdev;
1da177e4
LT
1569 struct cb *cb;
1570 int tx_cleaned = 0;
1571
1572 spin_lock(&nic->cb_lock);
1573
1da177e4 1574 /* Clean CBs marked complete */
f26251eb 1575 for (cb = nic->cb_to_clean;
1da177e4
LT
1576 cb->status & cpu_to_le16(cb_complete);
1577 cb = nic->cb_to_clean = cb->next) {
dc45010e
JB
1578 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1579 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1580 cb->status);
1581
f26251eb 1582 if (likely(cb->skb != NULL)) {
09f75cd7
JG
1583 dev->stats.tx_packets++;
1584 dev->stats.tx_bytes += cb->skb->len;
1da177e4
LT
1585
1586 pci_unmap_single(nic->pdev,
1587 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1588 le16_to_cpu(cb->u.tcb.tbd.size),
1589 PCI_DMA_TODEVICE);
1590 dev_kfree_skb_any(cb->skb);
1591 cb->skb = NULL;
1592 tx_cleaned = 1;
1593 }
1594 cb->status = 0;
1595 nic->cbs_avail++;
1596 }
1597
1598 spin_unlock(&nic->cb_lock);
1599
1600 /* Recover from running out of Tx resources in xmit_frame */
f26251eb 1601 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1da177e4
LT
1602 netif_wake_queue(nic->netdev);
1603
1604 return tx_cleaned;
1605}
1606
1607static void e100_clean_cbs(struct nic *nic)
1608{
f26251eb
BA
1609 if (nic->cbs) {
1610 while (nic->cbs_avail != nic->params.cbs.count) {
1da177e4 1611 struct cb *cb = nic->cb_to_clean;
f26251eb 1612 if (cb->skb) {
1da177e4
LT
1613 pci_unmap_single(nic->pdev,
1614 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1615 le16_to_cpu(cb->u.tcb.tbd.size),
1616 PCI_DMA_TODEVICE);
1617 dev_kfree_skb(cb->skb);
1618 }
1619 nic->cb_to_clean = nic->cb_to_clean->next;
1620 nic->cbs_avail++;
1621 }
1622 pci_free_consistent(nic->pdev,
1623 sizeof(struct cb) * nic->params.cbs.count,
1624 nic->cbs, nic->cbs_dma_addr);
1625 nic->cbs = NULL;
1626 nic->cbs_avail = 0;
1627 }
1628 nic->cuc_cmd = cuc_start;
1629 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1630 nic->cbs;
1631}
1632
1633static int e100_alloc_cbs(struct nic *nic)
1634{
1635 struct cb *cb;
1636 unsigned int i, count = nic->params.cbs.count;
1637
1638 nic->cuc_cmd = cuc_start;
1639 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1640 nic->cbs_avail = 0;
1641
1642 nic->cbs = pci_alloc_consistent(nic->pdev,
1643 sizeof(struct cb) * count, &nic->cbs_dma_addr);
f26251eb 1644 if (!nic->cbs)
1da177e4
LT
1645 return -ENOMEM;
1646
f26251eb 1647 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1da177e4
LT
1648 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1649 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1650
1651 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1652 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1653 ((i+1) % count) * sizeof(struct cb));
1654 cb->skb = NULL;
1655 }
1656
1657 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1658 nic->cbs_avail = count;
1659
1660 return 0;
1661}
1662
ca93ca42 1663static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1664{
f26251eb
BA
1665 if (!nic->rxs) return;
1666 if (RU_SUSPENDED != nic->ru_running) return;
ca93ca42
JG
1667
1668 /* handle init time starts */
f26251eb 1669 if (!rx) rx = nic->rxs;
ca93ca42
JG
1670
1671 /* (Re)start RU if suspended or idle and RFA is non-NULL */
f26251eb 1672 if (rx->skb) {
ca93ca42
JG
1673 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1674 nic->ru_running = RU_RUNNING;
1675 }
1da177e4
LT
1676}
1677
1678#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
858119e1 1679static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1680{
f26251eb 1681 if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1da177e4
LT
1682 return -ENOMEM;
1683
1684 /* Align, init, and map the RFD. */
1da177e4 1685 skb_reserve(rx->skb, NET_IP_ALIGN);
27d7ff46 1686 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1687 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1688 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1689
8d8bb39b 1690 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1f53367d 1691 dev_kfree_skb_any(rx->skb);
097688ef 1692 rx->skb = NULL;
1f53367d
MC
1693 rx->dma_addr = 0;
1694 return -ENOMEM;
1695 }
1696
1da177e4 1697 /* Link the RFD to end of RFA by linking previous RFD to
7734f6e6
DA
1698 * this one. We are safe to touch the previous RFD because
1699 * it is protected by the before last buffer's el bit being set */
aaf918ba 1700 if (rx->prev->skb) {
1da177e4 1701 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
6caf52a4 1702 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1923815d 1703 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
773c9c1f 1704 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1705 }
1706
1707 return 0;
1708}
1709
858119e1 1710static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1711 unsigned int *work_done, unsigned int work_to_do)
1712{
09f75cd7 1713 struct net_device *dev = nic->netdev;
1da177e4
LT
1714 struct sk_buff *skb = rx->skb;
1715 struct rfd *rfd = (struct rfd *)skb->data;
1716 u16 rfd_status, actual_size;
1717
f26251eb 1718 if (unlikely(work_done && *work_done >= work_to_do))
1da177e4
LT
1719 return -EAGAIN;
1720
1721 /* Need to sync before taking a peek at cb_complete bit */
1722 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
773c9c1f 1723 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1724 rfd_status = le16_to_cpu(rfd->status);
1725
1726 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1727
1728 /* If data isn't ready, nothing to indicate */
7734f6e6
DA
1729 if (unlikely(!(rfd_status & cb_complete))) {
1730 /* If the next buffer has the el bit, but we think the receiver
1731 * is still running, check to see if it really stopped while
1732 * we had interrupts off.
1733 * This allows for a fast restart without re-enabling
1734 * interrupts */
1735 if ((le16_to_cpu(rfd->command) & cb_el) &&
1736 (RU_RUNNING == nic->ru_running))
1737
17393dd6 1738 if (ioread8(&nic->csr->scb.status) & rus_no_res)
7734f6e6 1739 nic->ru_running = RU_SUSPENDED;
1f53367d 1740 return -ENODATA;
7734f6e6 1741 }
1da177e4
LT
1742
1743 /* Get actual data size */
1744 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
f26251eb 1745 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1da177e4
LT
1746 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1747
1748 /* Get data */
1749 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 1750 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4 1751
7734f6e6
DA
1752 /* If this buffer has the el bit, but we think the receiver
1753 * is still running, check to see if it really stopped while
1754 * we had interrupts off.
1755 * This allows for a fast restart without re-enabling interrupts.
1756 * This can happen when the RU sees the size change but also sees
1757 * the el bit set. */
1758 if ((le16_to_cpu(rfd->command) & cb_el) &&
1759 (RU_RUNNING == nic->ru_running)) {
1760
17393dd6 1761 if (ioread8(&nic->csr->scb.status) & rus_no_res)
ca93ca42 1762 nic->ru_running = RU_SUSPENDED;
7734f6e6 1763 }
ca93ca42 1764
1da177e4
LT
1765 /* Pull off the RFD and put the actual data (minus eth hdr) */
1766 skb_reserve(skb, sizeof(struct rfd));
1767 skb_put(skb, actual_size);
1768 skb->protocol = eth_type_trans(skb, nic->netdev);
1769
f26251eb 1770 if (unlikely(!(rfd_status & cb_ok))) {
1da177e4 1771 /* Don't indicate if hardware indicates errors */
1da177e4 1772 dev_kfree_skb_any(skb);
f26251eb 1773 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1da177e4
LT
1774 /* Don't indicate oversized frames */
1775 nic->rx_over_length_errors++;
1da177e4
LT
1776 dev_kfree_skb_any(skb);
1777 } else {
09f75cd7
JG
1778 dev->stats.rx_packets++;
1779 dev->stats.rx_bytes += actual_size;
1da177e4 1780 netif_receive_skb(skb);
f26251eb 1781 if (work_done)
1da177e4
LT
1782 (*work_done)++;
1783 }
1784
1785 rx->skb = NULL;
1786
1787 return 0;
1788}
1789
858119e1 1790static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
1791 unsigned int work_to_do)
1792{
1793 struct rx *rx;
7734f6e6
DA
1794 int restart_required = 0, err = 0;
1795 struct rx *old_before_last_rx, *new_before_last_rx;
1796 struct rfd *old_before_last_rfd, *new_before_last_rfd;
1da177e4
LT
1797
1798 /* Indicate newly arrived packets */
f26251eb 1799 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
7734f6e6
DA
1800 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1801 /* Hit quota or no more to clean */
1802 if (-EAGAIN == err || -ENODATA == err)
ca93ca42 1803 break;
1da177e4
LT
1804 }
1805
7734f6e6
DA
1806
1807 /* On EAGAIN, hit quota so have more work to do, restart once
1808 * cleanup is complete.
1809 * Else, are we already rnr? then pay attention!!! this ensures that
1810 * the state machine progression never allows a start with a
1811 * partially cleaned list, avoiding a race between hardware
1812 * and rx_to_clean when in NAPI mode */
1813 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
1814 restart_required = 1;
1815
1816 old_before_last_rx = nic->rx_to_use->prev->prev;
1817 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
ca93ca42 1818
1da177e4 1819 /* Alloc new skbs to refill list */
f26251eb
BA
1820 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1821 if (unlikely(e100_rx_alloc_skb(nic, rx)))
1da177e4
LT
1822 break; /* Better luck next time (see watchdog) */
1823 }
ca93ca42 1824
7734f6e6
DA
1825 new_before_last_rx = nic->rx_to_use->prev->prev;
1826 if (new_before_last_rx != old_before_last_rx) {
1827 /* Set the el-bit on the buffer that is before the last buffer.
1828 * This lets us update the next pointer on the last buffer
1829 * without worrying about hardware touching it.
1830 * We set the size to 0 to prevent hardware from touching this
1831 * buffer.
1832 * When the hardware hits the before last buffer with el-bit
1833 * and size of 0, it will RNR interrupt, the RUS will go into
1834 * the No Resources state. It will not complete nor write to
1835 * this buffer. */
1836 new_before_last_rfd =
1837 (struct rfd *)new_before_last_rx->skb->data;
1838 new_before_last_rfd->size = 0;
1839 new_before_last_rfd->command |= cpu_to_le16(cb_el);
1840 pci_dma_sync_single_for_device(nic->pdev,
1841 new_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 1842 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
1843
1844 /* Now that we have a new stopping point, we can clear the old
1845 * stopping point. We must sync twice to get the proper
1846 * ordering on the hardware side of things. */
1847 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
1848 pci_dma_sync_single_for_device(nic->pdev,
1849 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 1850 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
1851 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
1852 pci_dma_sync_single_for_device(nic->pdev,
1853 old_before_last_rx->dma_addr, sizeof(struct rfd),
773c9c1f 1854 PCI_DMA_BIDIRECTIONAL);
7734f6e6
DA
1855 }
1856
f26251eb 1857 if (restart_required) {
ca93ca42 1858 // ack the rnr?
915e91d7 1859 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
7734f6e6 1860 e100_start_receiver(nic, nic->rx_to_clean);
f26251eb 1861 if (work_done)
ca93ca42
JG
1862 (*work_done)++;
1863 }
1da177e4
LT
1864}
1865
1866static void e100_rx_clean_list(struct nic *nic)
1867{
1868 struct rx *rx;
1869 unsigned int i, count = nic->params.rfds.count;
1870
ca93ca42
JG
1871 nic->ru_running = RU_UNINITIALIZED;
1872
f26251eb
BA
1873 if (nic->rxs) {
1874 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
1875 if (rx->skb) {
1da177e4 1876 pci_unmap_single(nic->pdev, rx->dma_addr,
773c9c1f 1877 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1878 dev_kfree_skb(rx->skb);
1879 }
1880 }
1881 kfree(nic->rxs);
1882 nic->rxs = NULL;
1883 }
1884
1885 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
1886}
1887
1888static int e100_rx_alloc_list(struct nic *nic)
1889{
1890 struct rx *rx;
1891 unsigned int i, count = nic->params.rfds.count;
7734f6e6 1892 struct rfd *before_last;
1da177e4
LT
1893
1894 nic->rx_to_use = nic->rx_to_clean = NULL;
ca93ca42 1895 nic->ru_running = RU_UNINITIALIZED;
1da177e4 1896
f26251eb 1897 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 1898 return -ENOMEM;
1da177e4 1899
f26251eb 1900 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
1da177e4
LT
1901 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
1902 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
f26251eb 1903 if (e100_rx_alloc_skb(nic, rx)) {
1da177e4
LT
1904 e100_rx_clean_list(nic);
1905 return -ENOMEM;
1906 }
1907 }
7734f6e6
DA
1908 /* Set the el-bit on the buffer that is before the last buffer.
1909 * This lets us update the next pointer on the last buffer without
1910 * worrying about hardware touching it.
1911 * We set the size to 0 to prevent hardware from touching this buffer.
1912 * When the hardware hits the before last buffer with el-bit and size
1913 * of 0, it will RNR interrupt, the RU will go into the No Resources
1914 * state. It will not complete nor write to this buffer. */
1915 rx = nic->rxs->prev->prev;
1916 before_last = (struct rfd *)rx->skb->data;
1917 before_last->command |= cpu_to_le16(cb_el);
1918 before_last->size = 0;
1919 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
773c9c1f 1920 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1da177e4
LT
1921
1922 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
ca93ca42 1923 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
1924
1925 return 0;
1926}
1927
7d12e780 1928static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
1929{
1930 struct net_device *netdev = dev_id;
1931 struct nic *nic = netdev_priv(netdev);
27345bb6 1932 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1da177e4
LT
1933
1934 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1935
f26251eb 1936 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
1da177e4
LT
1937 stat_ack == stat_ack_not_present) /* Hardware is ejected */
1938 return IRQ_NONE;
1939
1940 /* Ack interrupt(s) */
27345bb6 1941 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1da177e4 1942
ca93ca42 1943 /* We hit Receive No Resource (RNR); restart RU after cleaning */
f26251eb 1944 if (stat_ack & stat_ack_rnr)
ca93ca42
JG
1945 nic->ru_running = RU_SUSPENDED;
1946
f26251eb 1947 if (likely(netif_rx_schedule_prep(&nic->napi))) {
0685c31b 1948 e100_disable_irq(nic);
908a7a16 1949 __netif_rx_schedule(&nic->napi);
0685c31b 1950 }
1da177e4
LT
1951
1952 return IRQ_HANDLED;
1953}
1954
bea3348e 1955static int e100_poll(struct napi_struct *napi, int budget)
1da177e4 1956{
bea3348e 1957 struct nic *nic = container_of(napi, struct nic, napi);
ddfce6bb 1958 unsigned int work_done = 0;
1da177e4 1959
bea3348e 1960 e100_rx_clean(nic, &work_done, budget);
53e52c72 1961 e100_tx_clean(nic);
1da177e4 1962
53e52c72
DM
1963 /* If budget not fully consumed, exit the polling mode */
1964 if (work_done < budget) {
908a7a16 1965 netif_rx_complete(napi);
1da177e4 1966 e100_enable_irq(nic);
1da177e4
LT
1967 }
1968
bea3348e 1969 return work_done;
1da177e4
LT
1970}
1971
1972#ifdef CONFIG_NET_POLL_CONTROLLER
1973static void e100_netpoll(struct net_device *netdev)
1974{
1975 struct nic *nic = netdev_priv(netdev);
611494dc 1976
1da177e4 1977 e100_disable_irq(nic);
7d12e780 1978 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
1979 e100_tx_clean(nic);
1980 e100_enable_irq(nic);
1981}
1982#endif
1983
1da177e4
LT
1984static int e100_set_mac_address(struct net_device *netdev, void *p)
1985{
1986 struct nic *nic = netdev_priv(netdev);
1987 struct sockaddr *addr = p;
1988
1989 if (!is_valid_ether_addr(addr->sa_data))
1990 return -EADDRNOTAVAIL;
1991
1992 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1993 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
1994
1995 return 0;
1996}
1997
1998static int e100_change_mtu(struct net_device *netdev, int new_mtu)
1999{
f26251eb 2000 if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
1da177e4
LT
2001 return -EINVAL;
2002 netdev->mtu = new_mtu;
2003 return 0;
2004}
2005
2006static int e100_asf(struct nic *nic)
2007{
2008 /* ASF can be enabled from eeprom */
2009 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2010 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2011 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2012 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2013}
2014
2015static int e100_up(struct nic *nic)
2016{
2017 int err;
2018
f26251eb 2019 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2020 return err;
f26251eb 2021 if ((err = e100_alloc_cbs(nic)))
1da177e4 2022 goto err_rx_clean_list;
f26251eb 2023 if ((err = e100_hw_init(nic)))
1da177e4
LT
2024 goto err_clean_cbs;
2025 e100_set_multicast_list(nic->netdev);
ca93ca42 2026 e100_start_receiver(nic, NULL);
1da177e4 2027 mod_timer(&nic->watchdog, jiffies);
f26251eb 2028 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2029 nic->netdev->name, nic->netdev)))
2030 goto err_no_irq;
1da177e4 2031 netif_wake_queue(nic->netdev);
bea3348e 2032 napi_enable(&nic->napi);
0236ebb7
MC
2033 /* enable ints _after_ enabling poll, preventing a race between
2034 * disable ints+schedule */
2035 e100_enable_irq(nic);
1da177e4
LT
2036 return 0;
2037
2038err_no_irq:
2039 del_timer_sync(&nic->watchdog);
2040err_clean_cbs:
2041 e100_clean_cbs(nic);
2042err_rx_clean_list:
2043 e100_rx_clean_list(nic);
2044 return err;
2045}
2046
2047static void e100_down(struct nic *nic)
2048{
0236ebb7 2049 /* wait here for poll to complete */
bea3348e 2050 napi_disable(&nic->napi);
0236ebb7 2051 netif_stop_queue(nic->netdev);
1da177e4
LT
2052 e100_hw_reset(nic);
2053 free_irq(nic->pdev->irq, nic->netdev);
2054 del_timer_sync(&nic->watchdog);
2055 netif_carrier_off(nic->netdev);
1da177e4
LT
2056 e100_clean_cbs(nic);
2057 e100_rx_clean_list(nic);
2058}
2059
2060static void e100_tx_timeout(struct net_device *netdev)
2061{
2062 struct nic *nic = netdev_priv(netdev);
2063
05479938 2064 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2065 * in interrupt context */
2066 schedule_work(&nic->tx_timeout_task);
2067}
2068
c4028958 2069static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2070{
c4028958
DH
2071 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2072 struct net_device *netdev = nic->netdev;
2acdb1e0 2073
1da177e4 2074 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
27345bb6 2075 ioread8(&nic->csr->scb.status));
1da177e4
LT
2076 e100_down(netdev_priv(netdev));
2077 e100_up(netdev_priv(netdev));
2078}
2079
2080static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2081{
2082 int err;
2083 struct sk_buff *skb;
2084
2085 /* Use driver resources to perform internal MAC or PHY
2086 * loopback test. A single packet is prepared and transmitted
2087 * in loopback mode, and the test passes if the received
2088 * packet compares byte-for-byte to the transmitted packet. */
2089
f26251eb 2090 if ((err = e100_rx_alloc_list(nic)))
1da177e4 2091 return err;
f26251eb 2092 if ((err = e100_alloc_cbs(nic)))
1da177e4
LT
2093 goto err_clean_rx;
2094
2095 /* ICH PHY loopback is broken so do MAC loopback instead */
f26251eb 2096 if (nic->flags & ich && loopback_mode == lb_phy)
1da177e4
LT
2097 loopback_mode = lb_mac;
2098
2099 nic->loopback = loopback_mode;
f26251eb 2100 if ((err = e100_hw_init(nic)))
1da177e4
LT
2101 goto err_loopback_none;
2102
f26251eb 2103 if (loopback_mode == lb_phy)
1da177e4
LT
2104 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2105 BMCR_LOOPBACK);
2106
ca93ca42 2107 e100_start_receiver(nic, NULL);
1da177e4 2108
f26251eb 2109 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2110 err = -ENOMEM;
2111 goto err_loopback_none;
2112 }
2113 skb_put(skb, ETH_DATA_LEN);
2114 memset(skb->data, 0xFF, ETH_DATA_LEN);
2115 e100_xmit_frame(skb, nic->netdev);
2116
2117 msleep(10);
2118
aa49cdd9 2119 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
773c9c1f 2120 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
aa49cdd9 2121
f26251eb 2122 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
1da177e4
LT
2123 skb->data, ETH_DATA_LEN))
2124 err = -EAGAIN;
2125
2126err_loopback_none:
2127 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2128 nic->loopback = lb_none;
1da177e4 2129 e100_clean_cbs(nic);
aa49cdd9 2130 e100_hw_reset(nic);
1da177e4
LT
2131err_clean_rx:
2132 e100_rx_clean_list(nic);
2133 return err;
2134}
2135
2136#define MII_LED_CONTROL 0x1B
2137static void e100_blink_led(unsigned long data)
2138{
2139 struct nic *nic = (struct nic *)data;
2140 enum led_state {
2141 led_on = 0x01,
2142 led_off = 0x04,
2143 led_on_559 = 0x05,
2144 led_on_557 = 0x07,
2145 };
2146
2147 nic->leds = (nic->leds & led_on) ? led_off :
2148 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2149 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
2150 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2151}
2152
2153static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2154{
2155 struct nic *nic = netdev_priv(netdev);
2156 return mii_ethtool_gset(&nic->mii, cmd);
2157}
2158
2159static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2160{
2161 struct nic *nic = netdev_priv(netdev);
2162 int err;
2163
2164 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2165 err = mii_ethtool_sset(&nic->mii, cmd);
2166 e100_exec_cb(nic, NULL, e100_configure);
2167
2168 return err;
2169}
2170
2171static void e100_get_drvinfo(struct net_device *netdev,
2172 struct ethtool_drvinfo *info)
2173{
2174 struct nic *nic = netdev_priv(netdev);
2175 strcpy(info->driver, DRV_NAME);
2176 strcpy(info->version, DRV_VERSION);
2177 strcpy(info->fw_version, "N/A");
2178 strcpy(info->bus_info, pci_name(nic->pdev));
2179}
2180
abf9b902 2181#define E100_PHY_REGS 0x1C
1da177e4
LT
2182static int e100_get_regs_len(struct net_device *netdev)
2183{
2184 struct nic *nic = netdev_priv(netdev);
abf9b902 2185 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
1da177e4
LT
2186}
2187
2188static void e100_get_regs(struct net_device *netdev,
2189 struct ethtool_regs *regs, void *p)
2190{
2191 struct nic *nic = netdev_priv(netdev);
2192 u32 *buff = p;
2193 int i;
2194
44c10138 2195 regs->version = (1 << 24) | nic->pdev->revision;
27345bb6
JB
2196 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2197 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2198 ioread16(&nic->csr->scb.status);
f26251eb 2199 for (i = E100_PHY_REGS; i >= 0; i--)
1da177e4
LT
2200 buff[1 + E100_PHY_REGS - i] =
2201 mdio_read(netdev, nic->mii.phy_id, i);
2202 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2203 e100_exec_cb(nic, NULL, e100_dump);
2204 msleep(10);
2205 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2206 sizeof(nic->mem->dump_buf));
2207}
2208
2209static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2210{
2211 struct nic *nic = netdev_priv(netdev);
2212 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2213 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2214}
2215
2216static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2217{
2218 struct nic *nic = netdev_priv(netdev);
2219
bc79fc84
RW
2220 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2221 !device_can_wakeup(&nic->pdev->dev))
1da177e4
LT
2222 return -EOPNOTSUPP;
2223
f26251eb 2224 if (wol->wolopts)
1da177e4
LT
2225 nic->flags |= wol_magic;
2226 else
2227 nic->flags &= ~wol_magic;
2228
bc79fc84
RW
2229 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2230
1da177e4
LT
2231 e100_exec_cb(nic, NULL, e100_configure);
2232
2233 return 0;
2234}
2235
2236static u32 e100_get_msglevel(struct net_device *netdev)
2237{
2238 struct nic *nic = netdev_priv(netdev);
2239 return nic->msg_enable;
2240}
2241
2242static void e100_set_msglevel(struct net_device *netdev, u32 value)
2243{
2244 struct nic *nic = netdev_priv(netdev);
2245 nic->msg_enable = value;
2246}
2247
2248static int e100_nway_reset(struct net_device *netdev)
2249{
2250 struct nic *nic = netdev_priv(netdev);
2251 return mii_nway_restart(&nic->mii);
2252}
2253
2254static u32 e100_get_link(struct net_device *netdev)
2255{
2256 struct nic *nic = netdev_priv(netdev);
2257 return mii_link_ok(&nic->mii);
2258}
2259
2260static int e100_get_eeprom_len(struct net_device *netdev)
2261{
2262 struct nic *nic = netdev_priv(netdev);
2263 return nic->eeprom_wc << 1;
2264}
2265
2266#define E100_EEPROM_MAGIC 0x1234
2267static int e100_get_eeprom(struct net_device *netdev,
2268 struct ethtool_eeprom *eeprom, u8 *bytes)
2269{
2270 struct nic *nic = netdev_priv(netdev);
2271
2272 eeprom->magic = E100_EEPROM_MAGIC;
2273 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2274
2275 return 0;
2276}
2277
2278static int e100_set_eeprom(struct net_device *netdev,
2279 struct ethtool_eeprom *eeprom, u8 *bytes)
2280{
2281 struct nic *nic = netdev_priv(netdev);
2282
f26251eb 2283 if (eeprom->magic != E100_EEPROM_MAGIC)
1da177e4
LT
2284 return -EINVAL;
2285
2286 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2287
2288 return e100_eeprom_save(nic, eeprom->offset >> 1,
2289 (eeprom->len >> 1) + 1);
2290}
2291
2292static void e100_get_ringparam(struct net_device *netdev,
2293 struct ethtool_ringparam *ring)
2294{
2295 struct nic *nic = netdev_priv(netdev);
2296 struct param_range *rfds = &nic->params.rfds;
2297 struct param_range *cbs = &nic->params.cbs;
2298
2299 ring->rx_max_pending = rfds->max;
2300 ring->tx_max_pending = cbs->max;
2301 ring->rx_mini_max_pending = 0;
2302 ring->rx_jumbo_max_pending = 0;
2303 ring->rx_pending = rfds->count;
2304 ring->tx_pending = cbs->count;
2305 ring->rx_mini_pending = 0;
2306 ring->rx_jumbo_pending = 0;
2307}
2308
2309static int e100_set_ringparam(struct net_device *netdev,
2310 struct ethtool_ringparam *ring)
2311{
2312 struct nic *nic = netdev_priv(netdev);
2313 struct param_range *rfds = &nic->params.rfds;
2314 struct param_range *cbs = &nic->params.cbs;
2315
05479938 2316 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2317 return -EINVAL;
2318
f26251eb 2319 if (netif_running(netdev))
1da177e4
LT
2320 e100_down(nic);
2321 rfds->count = max(ring->rx_pending, rfds->min);
2322 rfds->count = min(rfds->count, rfds->max);
2323 cbs->count = max(ring->tx_pending, cbs->min);
2324 cbs->count = min(cbs->count, cbs->max);
2325 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2326 rfds->count, cbs->count);
f26251eb 2327 if (netif_running(netdev))
1da177e4
LT
2328 e100_up(nic);
2329
2330 return 0;
2331}
2332
2333static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2334 "Link test (on/offline)",
2335 "Eeprom test (on/offline)",
2336 "Self test (offline)",
2337 "Mac loopback (offline)",
2338 "Phy loopback (offline)",
2339};
4c3616cd 2340#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
1da177e4 2341
1da177e4
LT
2342static void e100_diag_test(struct net_device *netdev,
2343 struct ethtool_test *test, u64 *data)
2344{
2345 struct ethtool_cmd cmd;
2346 struct nic *nic = netdev_priv(netdev);
2347 int i, err;
2348
2349 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2350 data[0] = !mii_link_ok(&nic->mii);
2351 data[1] = e100_eeprom_load(nic);
f26251eb 2352 if (test->flags & ETH_TEST_FL_OFFLINE) {
1da177e4
LT
2353
2354 /* save speed, duplex & autoneg settings */
2355 err = mii_ethtool_gset(&nic->mii, &cmd);
2356
f26251eb 2357 if (netif_running(netdev))
1da177e4
LT
2358 e100_down(nic);
2359 data[2] = e100_self_test(nic);
2360 data[3] = e100_loopback_test(nic, lb_mac);
2361 data[4] = e100_loopback_test(nic, lb_phy);
2362
2363 /* restore speed, duplex & autoneg settings */
2364 err = mii_ethtool_sset(&nic->mii, &cmd);
2365
f26251eb 2366 if (netif_running(netdev))
1da177e4
LT
2367 e100_up(nic);
2368 }
f26251eb 2369 for (i = 0; i < E100_TEST_LEN; i++)
1da177e4 2370 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2371
2372 msleep_interruptible(4 * 1000);
1da177e4
LT
2373}
2374
2375static int e100_phys_id(struct net_device *netdev, u32 data)
2376{
2377 struct nic *nic = netdev_priv(netdev);
2378
f26251eb 2379 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
1da177e4
LT
2380 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2381 mod_timer(&nic->blink_timer, jiffies);
2382 msleep_interruptible(data * 1000);
2383 del_timer_sync(&nic->blink_timer);
2384 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2385
2386 return 0;
2387}
2388
2389static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2390 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2391 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2392 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2393 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2394 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2395 "tx_heartbeat_errors", "tx_window_errors",
2396 /* device-specific stats */
2397 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2398 "tx_flow_control_pause", "rx_flow_control_pause",
2399 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2400};
2401#define E100_NET_STATS_LEN 21
4c3616cd 2402#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
1da177e4 2403
b9f2c044 2404static int e100_get_sset_count(struct net_device *netdev, int sset)
1da177e4 2405{
b9f2c044
JG
2406 switch (sset) {
2407 case ETH_SS_TEST:
2408 return E100_TEST_LEN;
2409 case ETH_SS_STATS:
2410 return E100_STATS_LEN;
2411 default:
2412 return -EOPNOTSUPP;
2413 }
1da177e4
LT
2414}
2415
2416static void e100_get_ethtool_stats(struct net_device *netdev,
2417 struct ethtool_stats *stats, u64 *data)
2418{
2419 struct nic *nic = netdev_priv(netdev);
2420 int i;
2421
f26251eb 2422 for (i = 0; i < E100_NET_STATS_LEN; i++)
09f75cd7 2423 data[i] = ((unsigned long *)&netdev->stats)[i];
1da177e4
LT
2424
2425 data[i++] = nic->tx_deferred;
2426 data[i++] = nic->tx_single_collisions;
2427 data[i++] = nic->tx_multiple_collisions;
2428 data[i++] = nic->tx_fc_pause;
2429 data[i++] = nic->rx_fc_pause;
2430 data[i++] = nic->rx_fc_unsupported;
2431 data[i++] = nic->tx_tco_frames;
2432 data[i++] = nic->rx_tco_frames;
2433}
2434
2435static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2436{
f26251eb 2437 switch (stringset) {
1da177e4
LT
2438 case ETH_SS_TEST:
2439 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2440 break;
2441 case ETH_SS_STATS:
2442 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2443 break;
2444 }
2445}
2446
7282d491 2447static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2448 .get_settings = e100_get_settings,
2449 .set_settings = e100_set_settings,
2450 .get_drvinfo = e100_get_drvinfo,
2451 .get_regs_len = e100_get_regs_len,
2452 .get_regs = e100_get_regs,
2453 .get_wol = e100_get_wol,
2454 .set_wol = e100_set_wol,
2455 .get_msglevel = e100_get_msglevel,
2456 .set_msglevel = e100_set_msglevel,
2457 .nway_reset = e100_nway_reset,
2458 .get_link = e100_get_link,
2459 .get_eeprom_len = e100_get_eeprom_len,
2460 .get_eeprom = e100_get_eeprom,
2461 .set_eeprom = e100_set_eeprom,
2462 .get_ringparam = e100_get_ringparam,
2463 .set_ringparam = e100_set_ringparam,
1da177e4
LT
2464 .self_test = e100_diag_test,
2465 .get_strings = e100_get_strings,
2466 .phys_id = e100_phys_id,
1da177e4 2467 .get_ethtool_stats = e100_get_ethtool_stats,
b9f2c044 2468 .get_sset_count = e100_get_sset_count,
1da177e4
LT
2469};
2470
2471static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2472{
2473 struct nic *nic = netdev_priv(netdev);
2474
2475 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2476}
2477
2478static int e100_alloc(struct nic *nic)
2479{
2480 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2481 &nic->dma_addr);
2482 return nic->mem ? 0 : -ENOMEM;
2483}
2484
2485static void e100_free(struct nic *nic)
2486{
f26251eb 2487 if (nic->mem) {
1da177e4
LT
2488 pci_free_consistent(nic->pdev, sizeof(struct mem),
2489 nic->mem, nic->dma_addr);
2490 nic->mem = NULL;
2491 }
2492}
2493
2494static int e100_open(struct net_device *netdev)
2495{
2496 struct nic *nic = netdev_priv(netdev);
2497 int err = 0;
2498
2499 netif_carrier_off(netdev);
f26251eb 2500 if ((err = e100_up(nic)))
1da177e4
LT
2501 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2502 return err;
2503}
2504
2505static int e100_close(struct net_device *netdev)
2506{
2507 e100_down(netdev_priv(netdev));
2508 return 0;
2509}
2510
acc78426
SH
2511static const struct net_device_ops e100_netdev_ops = {
2512 .ndo_open = e100_open,
2513 .ndo_stop = e100_close,
00829823 2514 .ndo_start_xmit = e100_xmit_frame,
acc78426
SH
2515 .ndo_validate_addr = eth_validate_addr,
2516 .ndo_set_multicast_list = e100_set_multicast_list,
2517 .ndo_set_mac_address = e100_set_mac_address,
2518 .ndo_change_mtu = e100_change_mtu,
2519 .ndo_do_ioctl = e100_do_ioctl,
2520 .ndo_tx_timeout = e100_tx_timeout,
2521#ifdef CONFIG_NET_POLL_CONTROLLER
2522 .ndo_poll_controller = e100_netpoll,
2523#endif
2524};
2525
1da177e4
LT
2526static int __devinit e100_probe(struct pci_dev *pdev,
2527 const struct pci_device_id *ent)
2528{
2529 struct net_device *netdev;
2530 struct nic *nic;
2531 int err;
2532
f26251eb
BA
2533 if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2534 if (((1 << debug) - 1) & NETIF_MSG_PROBE)
1da177e4
LT
2535 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2536 return -ENOMEM;
2537 }
2538
acc78426 2539 netdev->netdev_ops = &e100_netdev_ops;
1da177e4 2540 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
1da177e4 2541 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
0eb5a34c 2542 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2543
2544 nic = netdev_priv(netdev);
bea3348e 2545 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
1da177e4
LT
2546 nic->netdev = netdev;
2547 nic->pdev = pdev;
2548 nic->msg_enable = (1 << debug) - 1;
2549 pci_set_drvdata(pdev, netdev);
2550
f26251eb 2551 if ((err = pci_enable_device(pdev))) {
1da177e4
LT
2552 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2553 goto err_out_free_dev;
2554 }
2555
f26251eb 2556 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1da177e4
LT
2557 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2558 "base address, aborting.\n");
2559 err = -ENODEV;
2560 goto err_out_disable_pdev;
2561 }
2562
f26251eb 2563 if ((err = pci_request_regions(pdev, DRV_NAME))) {
1da177e4
LT
2564 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2565 goto err_out_disable_pdev;
2566 }
2567
f26251eb 2568 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
1da177e4
LT
2569 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2570 goto err_out_free_res;
2571 }
2572
1da177e4
LT
2573 SET_NETDEV_DEV(netdev, &pdev->dev);
2574
27345bb6
JB
2575 if (use_io)
2576 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2577
2578 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
f26251eb 2579 if (!nic->csr) {
1da177e4
LT
2580 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2581 err = -ENOMEM;
2582 goto err_out_free_res;
2583 }
2584
f26251eb 2585 if (ent->driver_data)
1da177e4
LT
2586 nic->flags |= ich;
2587 else
2588 nic->flags &= ~ich;
2589
2590 e100_get_defaults(nic);
2591
1f53367d 2592 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2593 spin_lock_init(&nic->cb_lock);
2594 spin_lock_init(&nic->cmd_lock);
ac7c6669 2595 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2596
2597 /* Reset the device before pci_set_master() in case device is in some
2598 * funky state and has an interrupt pending - hint: we don't have the
2599 * interrupt handler registered yet. */
2600 e100_hw_reset(nic);
2601
2602 pci_set_master(pdev);
2603
2604 init_timer(&nic->watchdog);
2605 nic->watchdog.function = e100_watchdog;
2606 nic->watchdog.data = (unsigned long)nic;
2607 init_timer(&nic->blink_timer);
2608 nic->blink_timer.function = e100_blink_led;
2609 nic->blink_timer.data = (unsigned long)nic;
2610
c4028958 2611 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2612
f26251eb 2613 if ((err = e100_alloc(nic))) {
1da177e4
LT
2614 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2615 goto err_out_iounmap;
2616 }
2617
f26251eb 2618 if ((err = e100_eeprom_load(nic)))
1da177e4
LT
2619 goto err_out_free;
2620
f92d8728
MC
2621 e100_phy_init(nic);
2622
1da177e4 2623 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923 2624 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
948cd43f
JB
2625 if (!is_valid_ether_addr(netdev->perm_addr)) {
2626 if (!eeprom_bad_csum_allow) {
2627 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2628 "EEPROM, aborting.\n");
2629 err = -EAGAIN;
2630 goto err_out_free;
2631 } else {
2632 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2633 "you MUST configure one.\n");
2634 }
1da177e4
LT
2635 }
2636
2637 /* Wol magic packet can be enabled from eeprom */
f26251eb 2638 if ((nic->mac >= mac_82558_D101_A4) &&
bc79fc84 2639 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
1da177e4 2640 nic->flags |= wol_magic;
bc79fc84
RW
2641 device_set_wakeup_enable(&pdev->dev, true);
2642 }
1da177e4 2643
6bdacb1a 2644 /* ack any pending wake events, disable PME */
e7272403 2645 pci_pme_active(pdev, false);
1da177e4
LT
2646
2647 strcpy(netdev->name, "eth%d");
f26251eb 2648 if ((err = register_netdev(netdev))) {
1da177e4
LT
2649 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2650 goto err_out_free;
2651 }
2652
e174961c 2653 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
0795af57 2654 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
e174961c 2655 pdev->irq, netdev->dev_addr);
1da177e4
LT
2656
2657 return 0;
2658
2659err_out_free:
2660 e100_free(nic);
2661err_out_iounmap:
27345bb6 2662 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2663err_out_free_res:
2664 pci_release_regions(pdev);
2665err_out_disable_pdev:
2666 pci_disable_device(pdev);
2667err_out_free_dev:
2668 pci_set_drvdata(pdev, NULL);
2669 free_netdev(netdev);
2670 return err;
2671}
2672
2673static void __devexit e100_remove(struct pci_dev *pdev)
2674{
2675 struct net_device *netdev = pci_get_drvdata(pdev);
2676
f26251eb 2677 if (netdev) {
1da177e4
LT
2678 struct nic *nic = netdev_priv(netdev);
2679 unregister_netdev(netdev);
2680 e100_free(nic);
915e91d7 2681 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2682 free_netdev(netdev);
2683 pci_release_regions(pdev);
2684 pci_disable_device(pdev);
2685 pci_set_drvdata(pdev, NULL);
2686 }
2687}
2688
1da177e4
LT
2689static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2690{
2691 struct net_device *netdev = pci_get_drvdata(pdev);
2692 struct nic *nic = netdev_priv(netdev);
2693
824545e7 2694 if (netif_running(netdev))
f902283b 2695 e100_down(nic);
518d8338 2696 netif_device_detach(netdev);
a53a33da 2697
1da177e4 2698 pci_save_state(pdev);
e8e82b76
AK
2699
2700 if ((nic->flags & wol_magic) | e100_asf(nic)) {
bc79fc84
RW
2701 if (pci_enable_wake(pdev, PCI_D3cold, true))
2702 pci_enable_wake(pdev, PCI_D3hot, true);
e8e82b76 2703 } else {
bc79fc84 2704 pci_enable_wake(pdev, PCI_D3hot, false);
e8e82b76 2705 }
975b366a 2706
8543da66 2707 pci_disable_device(pdev);
e8e82b76 2708 pci_set_power_state(pdev, PCI_D3hot);
1da177e4
LT
2709
2710 return 0;
2711}
2712
f902283b 2713#ifdef CONFIG_PM
1da177e4
LT
2714static int e100_resume(struct pci_dev *pdev)
2715{
2716 struct net_device *netdev = pci_get_drvdata(pdev);
2717 struct nic *nic = netdev_priv(netdev);
2718
975b366a 2719 pci_set_power_state(pdev, PCI_D0);
1da177e4 2720 pci_restore_state(pdev);
6bdacb1a 2721 /* ack any pending wake events, disable PME */
975b366a 2722 pci_enable_wake(pdev, 0, 0);
1da177e4
LT
2723
2724 netif_device_attach(netdev);
975b366a 2725 if (netif_running(netdev))
1da177e4
LT
2726 e100_up(nic);
2727
2728 return 0;
2729}
975b366a 2730#endif /* CONFIG_PM */
1da177e4 2731
d18c3db5 2732static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 2733{
f902283b 2734 e100_suspend(pdev, PMSG_SUSPEND);
6bdacb1a
MC
2735}
2736
2cc30492
AK
2737/* ------------------ PCI Error Recovery infrastructure -------------- */
2738/**
2739 * e100_io_error_detected - called when PCI error is detected.
2740 * @pdev: Pointer to PCI device
0a0863af 2741 * @state: The current pci connection state
2cc30492
AK
2742 */
2743static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2744{
2745 struct net_device *netdev = pci_get_drvdata(pdev);
bea3348e 2746 struct nic *nic = netdev_priv(netdev);
2cc30492 2747
0a0863af 2748 /* Similar to calling e100_down(), but avoids adapter I/O. */
acc78426 2749 e100_close(netdev);
2cc30492 2750
0a0863af 2751 /* Detach; put netif into a state similar to hotplug unplug. */
bea3348e 2752 napi_enable(&nic->napi);
2cc30492 2753 netif_device_detach(netdev);
b1d26f24 2754 pci_disable_device(pdev);
2cc30492
AK
2755
2756 /* Request a slot reset. */
2757 return PCI_ERS_RESULT_NEED_RESET;
2758}
2759
2760/**
2761 * e100_io_slot_reset - called after the pci bus has been reset.
2762 * @pdev: Pointer to PCI device
2763 *
2764 * Restart the card from scratch.
2765 */
2766static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2767{
2768 struct net_device *netdev = pci_get_drvdata(pdev);
2769 struct nic *nic = netdev_priv(netdev);
2770
2771 if (pci_enable_device(pdev)) {
2772 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2773 return PCI_ERS_RESULT_DISCONNECT;
2774 }
2775 pci_set_master(pdev);
2776
2777 /* Only one device per card can do a reset */
2778 if (0 != PCI_FUNC(pdev->devfn))
2779 return PCI_ERS_RESULT_RECOVERED;
2780 e100_hw_reset(nic);
2781 e100_phy_init(nic);
2782
2783 return PCI_ERS_RESULT_RECOVERED;
2784}
2785
2786/**
2787 * e100_io_resume - resume normal operations
2788 * @pdev: Pointer to PCI device
2789 *
2790 * Resume normal operations after an error recovery
2791 * sequence has been completed.
2792 */
2793static void e100_io_resume(struct pci_dev *pdev)
2794{
2795 struct net_device *netdev = pci_get_drvdata(pdev);
2796 struct nic *nic = netdev_priv(netdev);
2797
2798 /* ack any pending wake events, disable PME */
2799 pci_enable_wake(pdev, 0, 0);
2800
2801 netif_device_attach(netdev);
2802 if (netif_running(netdev)) {
2803 e100_open(netdev);
2804 mod_timer(&nic->watchdog, jiffies);
2805 }
2806}
2807
2808static struct pci_error_handlers e100_err_handler = {
2809 .error_detected = e100_io_error_detected,
2810 .slot_reset = e100_io_slot_reset,
2811 .resume = e100_io_resume,
2812};
6bdacb1a 2813
1da177e4
LT
2814static struct pci_driver e100_driver = {
2815 .name = DRV_NAME,
2816 .id_table = e100_id_table,
2817 .probe = e100_probe,
2818 .remove = __devexit_p(e100_remove),
e8e82b76 2819#ifdef CONFIG_PM
975b366a 2820 /* Power Management hooks */
1da177e4
LT
2821 .suspend = e100_suspend,
2822 .resume = e100_resume,
2823#endif
05479938 2824 .shutdown = e100_shutdown,
2cc30492 2825 .err_handler = &e100_err_handler,
1da177e4
LT
2826};
2827
2828static int __init e100_init_module(void)
2829{
f26251eb 2830 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
1da177e4
LT
2831 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2832 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2833 }
29917620 2834 return pci_register_driver(&e100_driver);
1da177e4
LT
2835}
2836
2837static void __exit e100_cleanup_module(void)
2838{
2839 pci_unregister_driver(&e100_driver);
2840}
2841
2842module_init(e100_init_module);
2843module_exit(e100_cleanup_module);
This page took 0.717435 seconds and 5 git commands to generate.