forcedeth endianness bugs
[deliverable/linux.git] / drivers / net / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
97 * IV. Recieve
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
7734f6e6
DA
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
115 *
1da177e4
LT
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
123 * senario where all Rx resources have been indicated and none re-
124 * placed.
125 *
126 * V. Miscellaneous
127 *
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
133 *
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
135 *
136 * Thanks to JC (jchapman@katalix.com) for helping with
137 * testing/troubleshooting the development driver.
138 *
139 * TODO:
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
142 *
143 * FIXES:
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
1da177e4
LT
146 */
147
1da177e4
LT
148#include <linux/module.h>
149#include <linux/moduleparam.h>
150#include <linux/kernel.h>
151#include <linux/types.h>
152#include <linux/slab.h>
153#include <linux/delay.h>
154#include <linux/init.h>
155#include <linux/pci.h>
1e7f0bd8 156#include <linux/dma-mapping.h>
1da177e4
LT
157#include <linux/netdevice.h>
158#include <linux/etherdevice.h>
159#include <linux/mii.h>
160#include <linux/if_vlan.h>
161#include <linux/skbuff.h>
162#include <linux/ethtool.h>
163#include <linux/string.h>
164#include <asm/unaligned.h>
165
166
167#define DRV_NAME "e100"
4e1dc97d 168#define DRV_EXT "-NAPI"
44e4925e 169#define DRV_VERSION "3.5.23-k4"DRV_EXT
1da177e4 170#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 171#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
172#define PFX DRV_NAME ": "
173
174#define E100_WATCHDOG_PERIOD (2 * HZ)
175#define E100_NAPI_WEIGHT 16
176
177MODULE_DESCRIPTION(DRV_DESCRIPTION);
178MODULE_AUTHOR(DRV_COPYRIGHT);
179MODULE_LICENSE("GPL");
180MODULE_VERSION(DRV_VERSION);
181
182static int debug = 3;
8fb6f732 183static int eeprom_bad_csum_allow = 0;
27345bb6 184static int use_io = 0;
1da177e4 185module_param(debug, int, 0);
8fb6f732 186module_param(eeprom_bad_csum_allow, int, 0);
27345bb6 187module_param(use_io, int, 0);
1da177e4 188MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 189MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
27345bb6 190MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
1da177e4
LT
191#define DPRINTK(nlevel, klevel, fmt, args...) \
192 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
193 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
194 __FUNCTION__ , ## args))
195
196#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
197 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
198 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
199static struct pci_device_id e100_id_table[] = {
200 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
201 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
202 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
203 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
204 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
205 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
206 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
207 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
208 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
209 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
210 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
211 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
212 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
213 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
214 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
215 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
216 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
217 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
218 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
219 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
220 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
221 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
222 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
223 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
224 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
225 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
226 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
227 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
228 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
229 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
230 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
231 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
232 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
233 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
234 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
1da177e4
LT
235 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
236 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
237 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
238 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
239 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 240 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
241 { 0, }
242};
243MODULE_DEVICE_TABLE(pci, e100_id_table);
244
245enum mac {
246 mac_82557_D100_A = 0,
247 mac_82557_D100_B = 1,
248 mac_82557_D100_C = 2,
249 mac_82558_D101_A4 = 4,
250 mac_82558_D101_B0 = 5,
251 mac_82559_D101M = 8,
252 mac_82559_D101S = 9,
253 mac_82550_D102 = 12,
254 mac_82550_D102_C = 13,
255 mac_82551_E = 14,
256 mac_82551_F = 15,
257 mac_82551_10 = 16,
258 mac_unknown = 0xFF,
259};
260
261enum phy {
262 phy_100a = 0x000003E0,
263 phy_100c = 0x035002A8,
264 phy_82555_tx = 0x015002A8,
265 phy_nsc_tx = 0x5C002000,
266 phy_82562_et = 0x033002A8,
267 phy_82562_em = 0x032002A8,
268 phy_82562_ek = 0x031002A8,
269 phy_82562_eh = 0x017002A8,
270 phy_unknown = 0xFFFFFFFF,
271};
272
273/* CSR (Control/Status Registers) */
274struct csr {
275 struct {
276 u8 status;
277 u8 stat_ack;
278 u8 cmd_lo;
279 u8 cmd_hi;
280 u32 gen_ptr;
281 } scb;
282 u32 port;
283 u16 flash_ctrl;
284 u8 eeprom_ctrl_lo;
285 u8 eeprom_ctrl_hi;
286 u32 mdi_ctrl;
287 u32 rx_dma_count;
288};
289
290enum scb_status {
7734f6e6 291 rus_no_res = 0x08,
1da177e4
LT
292 rus_ready = 0x10,
293 rus_mask = 0x3C,
294};
295
ca93ca42
JG
296enum ru_state {
297 RU_SUSPENDED = 0,
298 RU_RUNNING = 1,
299 RU_UNINITIALIZED = -1,
300};
301
1da177e4
LT
302enum scb_stat_ack {
303 stat_ack_not_ours = 0x00,
304 stat_ack_sw_gen = 0x04,
305 stat_ack_rnr = 0x10,
306 stat_ack_cu_idle = 0x20,
307 stat_ack_frame_rx = 0x40,
308 stat_ack_cu_cmd_done = 0x80,
309 stat_ack_not_present = 0xFF,
310 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
311 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
312};
313
314enum scb_cmd_hi {
315 irq_mask_none = 0x00,
316 irq_mask_all = 0x01,
317 irq_sw_gen = 0x02,
318};
319
320enum scb_cmd_lo {
321 cuc_nop = 0x00,
322 ruc_start = 0x01,
323 ruc_load_base = 0x06,
324 cuc_start = 0x10,
325 cuc_resume = 0x20,
326 cuc_dump_addr = 0x40,
327 cuc_dump_stats = 0x50,
328 cuc_load_base = 0x60,
329 cuc_dump_reset = 0x70,
330};
331
332enum cuc_dump {
333 cuc_dump_complete = 0x0000A005,
334 cuc_dump_reset_complete = 0x0000A007,
335};
05479938 336
1da177e4
LT
337enum port {
338 software_reset = 0x0000,
339 selftest = 0x0001,
340 selective_reset = 0x0002,
341};
342
343enum eeprom_ctrl_lo {
344 eesk = 0x01,
345 eecs = 0x02,
346 eedi = 0x04,
347 eedo = 0x08,
348};
349
350enum mdi_ctrl {
351 mdi_write = 0x04000000,
352 mdi_read = 0x08000000,
353 mdi_ready = 0x10000000,
354};
355
356enum eeprom_op {
357 op_write = 0x05,
358 op_read = 0x06,
359 op_ewds = 0x10,
360 op_ewen = 0x13,
361};
362
363enum eeprom_offsets {
364 eeprom_cnfg_mdix = 0x03,
365 eeprom_id = 0x0A,
366 eeprom_config_asf = 0x0D,
367 eeprom_smbus_addr = 0x90,
368};
369
370enum eeprom_cnfg_mdix {
371 eeprom_mdix_enabled = 0x0080,
372};
373
374enum eeprom_id {
375 eeprom_id_wol = 0x0020,
376};
377
378enum eeprom_config_asf {
379 eeprom_asf = 0x8000,
380 eeprom_gcl = 0x4000,
381};
382
383enum cb_status {
384 cb_complete = 0x8000,
385 cb_ok = 0x2000,
386};
387
388enum cb_command {
389 cb_nop = 0x0000,
390 cb_iaaddr = 0x0001,
391 cb_config = 0x0002,
392 cb_multi = 0x0003,
393 cb_tx = 0x0004,
394 cb_ucode = 0x0005,
395 cb_dump = 0x0006,
396 cb_tx_sf = 0x0008,
397 cb_cid = 0x1f00,
398 cb_i = 0x2000,
399 cb_s = 0x4000,
400 cb_el = 0x8000,
401};
402
403struct rfd {
404 u16 status;
405 u16 command;
406 u32 link;
407 u32 rbd;
408 u16 actual_size;
409 u16 size;
410};
411
412struct rx {
413 struct rx *next, *prev;
414 struct sk_buff *skb;
415 dma_addr_t dma_addr;
416};
417
418#if defined(__BIG_ENDIAN_BITFIELD)
419#define X(a,b) b,a
420#else
421#define X(a,b) a,b
422#endif
423struct config {
424/*0*/ u8 X(byte_count:6, pad0:2);
425/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
426/*2*/ u8 adaptive_ifs;
427/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
428 term_write_cache_line:1), pad3:4);
429/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
430/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
431/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
432 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
433 rx_discard_overruns:1), rx_save_bad_frames:1);
434/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
435 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
436 tx_dynamic_tbd:1);
437/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
438/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
439 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
440/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
441 loopback:2);
442/*11*/ u8 X(linear_priority:3, pad11:5);
443/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
444/*13*/ u8 ip_addr_lo;
445/*14*/ u8 ip_addr_hi;
446/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
447 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
448 pad15_2:1), crs_or_cdt:1);
449/*16*/ u8 fc_delay_lo;
450/*17*/ u8 fc_delay_hi;
451/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
452 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
453/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
454 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
455 full_duplex_force:1), full_duplex_pin:1);
456/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
457/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
458/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
459 u8 pad_d102[9];
460};
461
462#define E100_MAX_MULTICAST_ADDRS 64
463struct multi {
464 u16 count;
465 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
466};
467
468/* Important: keep total struct u32-aligned */
469#define UCODE_SIZE 134
470struct cb {
471 u16 status;
472 u16 command;
473 u32 link;
474 union {
475 u8 iaaddr[ETH_ALEN];
476 u32 ucode[UCODE_SIZE];
477 struct config config;
478 struct multi multi;
479 struct {
480 u32 tbd_array;
481 u16 tcb_byte_count;
482 u8 threshold;
483 u8 tbd_count;
484 struct {
485 u32 buf_addr;
486 u16 size;
487 u16 eol;
488 } tbd;
489 } tcb;
490 u32 dump_buffer_addr;
491 } u;
492 struct cb *next, *prev;
493 dma_addr_t dma_addr;
494 struct sk_buff *skb;
495};
496
497enum loopback {
498 lb_none = 0, lb_mac = 1, lb_phy = 3,
499};
500
501struct stats {
502 u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
503 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
504 tx_multiple_collisions, tx_total_collisions;
505 u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
506 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
507 rx_short_frame_errors;
508 u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
509 u16 xmt_tco_frames, rcv_tco_frames;
510 u32 complete;
511};
512
513struct mem {
514 struct {
515 u32 signature;
516 u32 result;
517 } selftest;
518 struct stats stats;
519 u8 dump_buf[596];
520};
521
522struct param_range {
523 u32 min;
524 u32 max;
525 u32 count;
526};
527
528struct params {
529 struct param_range rfds;
530 struct param_range cbs;
531};
532
533struct nic {
534 /* Begin: frequently used values: keep adjacent for cache effect */
535 u32 msg_enable ____cacheline_aligned;
536 struct net_device *netdev;
537 struct pci_dev *pdev;
538
539 struct rx *rxs ____cacheline_aligned;
540 struct rx *rx_to_use;
541 struct rx *rx_to_clean;
542 struct rfd blank_rfd;
ca93ca42 543 enum ru_state ru_running;
1da177e4
LT
544
545 spinlock_t cb_lock ____cacheline_aligned;
546 spinlock_t cmd_lock;
547 struct csr __iomem *csr;
548 enum scb_cmd_lo cuc_cmd;
549 unsigned int cbs_avail;
bea3348e 550 struct napi_struct napi;
1da177e4
LT
551 struct cb *cbs;
552 struct cb *cb_to_use;
553 struct cb *cb_to_send;
554 struct cb *cb_to_clean;
555 u16 tx_command;
556 /* End: frequently used values: keep adjacent for cache effect */
557
558 enum {
559 ich = (1 << 0),
560 promiscuous = (1 << 1),
561 multicast_all = (1 << 2),
562 wol_magic = (1 << 3),
563 ich_10h_workaround = (1 << 4),
564 } flags ____cacheline_aligned;
565
566 enum mac mac;
567 enum phy phy;
568 struct params params;
1da177e4
LT
569 struct timer_list watchdog;
570 struct timer_list blink_timer;
571 struct mii_if_info mii;
2acdb1e0 572 struct work_struct tx_timeout_task;
1da177e4
LT
573 enum loopback loopback;
574
575 struct mem *mem;
576 dma_addr_t dma_addr;
577
578 dma_addr_t cbs_dma_addr;
579 u8 adaptive_ifs;
580 u8 tx_threshold;
581 u32 tx_frames;
582 u32 tx_collisions;
583 u32 tx_deferred;
584 u32 tx_single_collisions;
585 u32 tx_multiple_collisions;
586 u32 tx_fc_pause;
587 u32 tx_tco_frames;
588
589 u32 rx_fc_pause;
590 u32 rx_fc_unsupported;
591 u32 rx_tco_frames;
592 u32 rx_over_length_errors;
593
1da177e4
LT
594 u16 leds;
595 u16 eeprom_wc;
596 u16 eeprom[256];
ac7c6669 597 spinlock_t mdio_lock;
1da177e4
LT
598};
599
600static inline void e100_write_flush(struct nic *nic)
601{
602 /* Flush previous PCI writes through intermediate bridges
603 * by doing a benign read */
27345bb6 604 (void)ioread8(&nic->csr->scb.status);
1da177e4
LT
605}
606
858119e1 607static void e100_enable_irq(struct nic *nic)
1da177e4
LT
608{
609 unsigned long flags;
610
611 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 612 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 613 e100_write_flush(nic);
ad8c48ad 614 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
615}
616
858119e1 617static void e100_disable_irq(struct nic *nic)
1da177e4
LT
618{
619 unsigned long flags;
620
621 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 622 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 623 e100_write_flush(nic);
ad8c48ad 624 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
625}
626
627static void e100_hw_reset(struct nic *nic)
628{
629 /* Put CU and RU into idle with a selective reset to get
630 * device off of PCI bus */
27345bb6 631 iowrite32(selective_reset, &nic->csr->port);
1da177e4
LT
632 e100_write_flush(nic); udelay(20);
633
634 /* Now fully reset device */
27345bb6 635 iowrite32(software_reset, &nic->csr->port);
1da177e4
LT
636 e100_write_flush(nic); udelay(20);
637
638 /* Mask off our interrupt line - it's unmasked after reset */
639 e100_disable_irq(nic);
640}
641
642static int e100_self_test(struct nic *nic)
643{
644 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
645
646 /* Passing the self-test is a pretty good indication
647 * that the device can DMA to/from host memory */
648
649 nic->mem->selftest.signature = 0;
650 nic->mem->selftest.result = 0xFFFFFFFF;
651
27345bb6 652 iowrite32(selftest | dma_addr, &nic->csr->port);
1da177e4
LT
653 e100_write_flush(nic);
654 /* Wait 10 msec for self-test to complete */
655 msleep(10);
656
657 /* Interrupts are enabled after self-test */
658 e100_disable_irq(nic);
659
660 /* Check results of self-test */
661 if(nic->mem->selftest.result != 0) {
662 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
663 nic->mem->selftest.result);
664 return -ETIMEDOUT;
665 }
666 if(nic->mem->selftest.signature == 0) {
667 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
668 return -ETIMEDOUT;
669 }
670
671 return 0;
672}
673
674static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
675{
676 u32 cmd_addr_data[3];
677 u8 ctrl;
678 int i, j;
679
680 /* Three cmds: write/erase enable, write data, write/erase disable */
681 cmd_addr_data[0] = op_ewen << (addr_len - 2);
682 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
683 cpu_to_le16(data);
684 cmd_addr_data[2] = op_ewds << (addr_len - 2);
685
686 /* Bit-bang cmds to write word to eeprom */
687 for(j = 0; j < 3; j++) {
688
689 /* Chip select */
27345bb6 690 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
691 e100_write_flush(nic); udelay(4);
692
693 for(i = 31; i >= 0; i--) {
694 ctrl = (cmd_addr_data[j] & (1 << i)) ?
695 eecs | eedi : eecs;
27345bb6 696 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
697 e100_write_flush(nic); udelay(4);
698
27345bb6 699 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
700 e100_write_flush(nic); udelay(4);
701 }
702 /* Wait 10 msec for cmd to complete */
703 msleep(10);
704
705 /* Chip deselect */
27345bb6 706 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
707 e100_write_flush(nic); udelay(4);
708 }
709};
710
711/* General technique stolen from the eepro100 driver - very clever */
712static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
713{
714 u32 cmd_addr_data;
715 u16 data = 0;
716 u8 ctrl;
717 int i;
718
719 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
720
721 /* Chip select */
27345bb6 722 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
723 e100_write_flush(nic); udelay(4);
724
725 /* Bit-bang to read word from eeprom */
726 for(i = 31; i >= 0; i--) {
727 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
27345bb6 728 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4 729 e100_write_flush(nic); udelay(4);
05479938 730
27345bb6 731 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4 732 e100_write_flush(nic); udelay(4);
05479938 733
1da177e4
LT
734 /* Eeprom drives a dummy zero to EEDO after receiving
735 * complete address. Use this to adjust addr_len. */
27345bb6 736 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
1da177e4
LT
737 if(!(ctrl & eedo) && i > 16) {
738 *addr_len -= (i - 16);
739 i = 17;
740 }
05479938 741
1da177e4
LT
742 data = (data << 1) | (ctrl & eedo ? 1 : 0);
743 }
744
745 /* Chip deselect */
27345bb6 746 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
747 e100_write_flush(nic); udelay(4);
748
749 return le16_to_cpu(data);
750};
751
752/* Load entire EEPROM image into driver cache and validate checksum */
753static int e100_eeprom_load(struct nic *nic)
754{
755 u16 addr, addr_len = 8, checksum = 0;
756
757 /* Try reading with an 8-bit addr len to discover actual addr len */
758 e100_eeprom_read(nic, &addr_len, 0);
759 nic->eeprom_wc = 1 << addr_len;
760
761 for(addr = 0; addr < nic->eeprom_wc; addr++) {
762 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
763 if(addr < nic->eeprom_wc - 1)
764 checksum += cpu_to_le16(nic->eeprom[addr]);
765 }
766
767 /* The checksum, stored in the last word, is calculated such that
768 * the sum of words should be 0xBABA */
769 checksum = le16_to_cpu(0xBABA - checksum);
770 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
771 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
8fb6f732
DM
772 if (!eeprom_bad_csum_allow)
773 return -EAGAIN;
1da177e4
LT
774 }
775
776 return 0;
777}
778
779/* Save (portion of) driver EEPROM cache to device and update checksum */
780static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
781{
782 u16 addr, addr_len = 8, checksum = 0;
783
784 /* Try reading with an 8-bit addr len to discover actual addr len */
785 e100_eeprom_read(nic, &addr_len, 0);
786 nic->eeprom_wc = 1 << addr_len;
787
788 if(start + count >= nic->eeprom_wc)
789 return -EINVAL;
790
791 for(addr = start; addr < start + count; addr++)
792 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
793
794 /* The checksum, stored in the last word, is calculated such that
795 * the sum of words should be 0xBABA */
796 for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
797 checksum += cpu_to_le16(nic->eeprom[addr]);
798 nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
799 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
800 nic->eeprom[nic->eeprom_wc - 1]);
801
802 return 0;
803}
804
962082b6 805#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 806#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 807static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
808{
809 unsigned long flags;
810 unsigned int i;
811 int err = 0;
812
813 spin_lock_irqsave(&nic->cmd_lock, flags);
814
815 /* Previous command is accepted when SCB clears */
816 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
27345bb6 817 if(likely(!ioread8(&nic->csr->scb.cmd_lo)))
1da177e4
LT
818 break;
819 cpu_relax();
e6280f26 820 if(unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
821 udelay(5);
822 }
823 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
824 err = -EAGAIN;
825 goto err_unlock;
826 }
827
828 if(unlikely(cmd != cuc_resume))
27345bb6
JB
829 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
830 iowrite8(cmd, &nic->csr->scb.cmd_lo);
1da177e4
LT
831
832err_unlock:
833 spin_unlock_irqrestore(&nic->cmd_lock, flags);
834
835 return err;
836}
837
858119e1 838static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
839 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
840{
841 struct cb *cb;
842 unsigned long flags;
843 int err = 0;
844
845 spin_lock_irqsave(&nic->cb_lock, flags);
846
847 if(unlikely(!nic->cbs_avail)) {
848 err = -ENOMEM;
849 goto err_unlock;
850 }
851
852 cb = nic->cb_to_use;
853 nic->cb_to_use = cb->next;
854 nic->cbs_avail--;
855 cb->skb = skb;
856
857 if(unlikely(!nic->cbs_avail))
858 err = -ENOSPC;
859
860 cb_prepare(nic, cb, skb);
861
862 /* Order is important otherwise we'll be in a race with h/w:
863 * set S-bit in current first, then clear S-bit in previous. */
864 cb->command |= cpu_to_le16(cb_s);
865 wmb();
866 cb->prev->command &= cpu_to_le16(~cb_s);
867
868 while(nic->cb_to_send != nic->cb_to_use) {
869 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
870 nic->cb_to_send->dma_addr))) {
871 /* Ok, here's where things get sticky. It's
872 * possible that we can't schedule the command
873 * because the controller is too busy, so
874 * let's just queue the command and try again
875 * when another command is scheduled. */
962082b6
MC
876 if(err == -ENOSPC) {
877 //request a reset
878 schedule_work(&nic->tx_timeout_task);
879 }
1da177e4
LT
880 break;
881 } else {
882 nic->cuc_cmd = cuc_resume;
883 nic->cb_to_send = nic->cb_to_send->next;
884 }
885 }
886
887err_unlock:
888 spin_unlock_irqrestore(&nic->cb_lock, flags);
889
890 return err;
891}
892
893static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
894{
895 u32 data_out = 0;
896 unsigned int i;
ac7c6669 897 unsigned long flags;
1da177e4 898
ac7c6669
OM
899
900 /*
901 * Stratus87247: we shouldn't be writing the MDI control
902 * register until the Ready bit shows True. Also, since
903 * manipulation of the MDI control registers is a multi-step
904 * procedure it should be done under lock.
905 */
906 spin_lock_irqsave(&nic->mdio_lock, flags);
907 for (i = 100; i; --i) {
27345bb6 908 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
ac7c6669
OM
909 break;
910 udelay(20);
911 }
912 if (unlikely(!i)) {
913 printk("e100.mdio_ctrl(%s) won't go Ready\n",
914 nic->netdev->name );
915 spin_unlock_irqrestore(&nic->mdio_lock, flags);
916 return 0; /* No way to indicate timeout error */
917 }
27345bb6 918 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
1da177e4 919
ac7c6669 920 for (i = 0; i < 100; i++) {
1da177e4 921 udelay(20);
27345bb6 922 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
923 break;
924 }
ac7c6669 925 spin_unlock_irqrestore(&nic->mdio_lock, flags);
1da177e4
LT
926 DPRINTK(HW, DEBUG,
927 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
928 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
929 return (u16)data_out;
930}
931
932static int mdio_read(struct net_device *netdev, int addr, int reg)
933{
934 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
935}
936
937static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
938{
939 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
940}
941
942static void e100_get_defaults(struct nic *nic)
943{
2afecc04
JB
944 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
945 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4 946
1da177e4 947 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
44c10138 948 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1da177e4
LT
949 if(nic->mac == mac_unknown)
950 nic->mac = mac_82557_D100_A;
951
952 nic->params.rfds = rfds;
953 nic->params.cbs = cbs;
954
955 /* Quadwords to DMA into FIFO before starting frame transmit */
956 nic->tx_threshold = 0xE0;
957
962082b6
MC
958 /* no interrupt for every tx completion, delay = 256us if not 557*/
959 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
960 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
961
962 /* Template for a freshly allocated RFD */
7734f6e6 963 nic->blank_rfd.command = 0;
1da177e4
LT
964 nic->blank_rfd.rbd = 0xFFFFFFFF;
965 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
966
967 /* MII setup */
968 nic->mii.phy_id_mask = 0x1F;
969 nic->mii.reg_num_mask = 0x1F;
970 nic->mii.dev = nic->netdev;
971 nic->mii.mdio_read = mdio_read;
972 nic->mii.mdio_write = mdio_write;
973}
974
975static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
976{
977 struct config *config = &cb->u.config;
978 u8 *c = (u8 *)config;
979
980 cb->command = cpu_to_le16(cb_config);
981
982 memset(config, 0, sizeof(struct config));
983
984 config->byte_count = 0x16; /* bytes in this struct */
985 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
986 config->direct_rx_dma = 0x1; /* reserved */
987 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
988 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
989 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
990 config->tx_underrun_retry = 0x3; /* # of underrun retries */
991 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
992 config->pad10 = 0x6;
993 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
994 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
995 config->ifs = 0x6; /* x16 = inter frame spacing */
996 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
997 config->pad15_1 = 0x1;
998 config->pad15_2 = 0x1;
999 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1000 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1001 config->tx_padding = 0x1; /* 1=pad short frames */
1002 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1003 config->pad18 = 0x1;
1004 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1005 config->pad20_1 = 0x1F;
1006 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1007 config->pad21_1 = 0x5;
1008
1009 config->adaptive_ifs = nic->adaptive_ifs;
1010 config->loopback = nic->loopback;
1011
1012 if(nic->mii.force_media && nic->mii.full_duplex)
1013 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1014
1015 if(nic->flags & promiscuous || nic->loopback) {
1016 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1017 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1018 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1019 }
1020
1021 if(nic->flags & multicast_all)
1022 config->multicast_all = 0x1; /* 1=accept, 0=no */
1023
6bdacb1a
MC
1024 /* disable WoL when up */
1025 if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1026 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1027
1028 if(nic->mac >= mac_82558_D101_A4) {
1029 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1030 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1031 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1032 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
44e4925e 1033 if (nic->mac >= mac_82559_D101M) {
1da177e4 1034 config->tno_intr = 0x1; /* TCO stats enable */
44e4925e
DG
1035 /* Enable TCO in extended config */
1036 if (nic->mac >= mac_82551_10) {
1037 config->byte_count = 0x20; /* extended bytes */
1038 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1039 }
1040 } else {
1da177e4 1041 config->standard_stat_counter = 0x0;
44e4925e 1042 }
1da177e4
LT
1043 }
1044
1045 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1046 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1047 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1048 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1049 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1050 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1051}
1052
2afecc04
JB
1053/********************************************************/
1054/* Micro code for 8086:1229 Rev 8 */
1055/********************************************************/
1056
1057/* Parameter values for the D101M B-step */
1058#define D101M_CPUSAVER_TIMER_DWORD 78
1059#define D101M_CPUSAVER_BUNDLE_DWORD 65
1060#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1061
1062#define D101M_B_RCVBUNDLE_UCODE \
1063{\
10640x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
10650x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
10660x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
10670x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
10680x00380438, 0x00000000, 0x00140000, 0x00380555, \
10690x00308000, 0x00100662, 0x00100561, 0x000E0408, \
10700x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10710x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10720x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
10730x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
10740x00000000, 0x00000000, 0x00000000, 0x00000000, \
10750x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
10760x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
10770x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
10780x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
10790x00041000, 0x00010004, 0x00130826, 0x000C0006, \
10800x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
10810x00000000, 0x00000000, 0x00000000, 0x00000000, \
10820x00000000, 0x00000000, 0x00000000, 0x00000000, \
10830x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10840x00101210, 0x00380C34, 0x00000000, 0x00000000, \
10850x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
10860x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
10870x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
10880x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
10890x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
10900x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
10910x00130826, 0x000C0001, 0x00220559, 0x00101313, \
10920x00380559, 0x00000000, 0x00000000, 0x00000000, \
10930x00000000, 0x00000000, 0x00000000, 0x00000000, \
10940x00000000, 0x00130831, 0x0010090B, 0x00124813, \
10950x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
10960x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1097}
1098
1099/********************************************************/
1100/* Micro code for 8086:1229 Rev 9 */
1101/********************************************************/
1102
1103/* Parameter values for the D101S */
1104#define D101S_CPUSAVER_TIMER_DWORD 78
1105#define D101S_CPUSAVER_BUNDLE_DWORD 67
1106#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1107
1108#define D101S_RCVBUNDLE_UCODE \
1109{\
11100x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
11110x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
11120x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
11130x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
11140x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
11150x00308000, 0x00100610, 0x00100561, 0x000E0408, \
11160x00134861, 0x000C0002, 0x00103093, 0x00308000, \
11170x00100624, 0x00100561, 0x000E0408, 0x00100861, \
11180x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
11190x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
11200x00000000, 0x00000000, 0x00000000, 0x00000000, \
11210x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
11220x003A047E, 0x00044010, 0x00380819, 0x00000000, \
11230x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
11240x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
11250x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
11260x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
11270x00101313, 0x00380700, 0x00000000, 0x00000000, \
11280x00000000, 0x00000000, 0x00000000, 0x00000000, \
11290x00080600, 0x00101B10, 0x00050004, 0x00100826, \
11300x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
11310x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
11320x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
11330x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
11340x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
11350x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
11360x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
11370x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
11380x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
11390x00000000, 0x00000000, 0x00000000, 0x00000000, \
11400x00000000, 0x00000000, 0x00000000, 0x00130831, \
11410x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
11420x00041000, 0x00010004, 0x00380700 \
1143}
1144
1145/********************************************************/
1146/* Micro code for the 8086:1229 Rev F/10 */
1147/********************************************************/
1148
1149/* Parameter values for the D102 E-step */
1150#define D102_E_CPUSAVER_TIMER_DWORD 42
1151#define D102_E_CPUSAVER_BUNDLE_DWORD 54
1152#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1153
1154#define D102_E_RCVBUNDLE_UCODE \
1155{\
11560x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
11570x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
11580x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
11590x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
11600x00000000, 0x00000000, 0x00000000, 0x00000000, \
11610x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
11620x00000000, 0x00000000, 0x00000000, 0x00000000, \
11630x00000000, 0x00000000, 0x00000000, 0x00000000, \
11640x00000000, 0x00000000, 0x00000000, 0x00000000, \
11650x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
11660x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
11670x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
11680x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
11690x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
11700x00000000, 0x00000000, 0x00000000, 0x00000000, \
11710x00000000, 0x00000000, 0x00000000, 0x00000000, \
11720x00000000, 0x00000000, 0x00000000, 0x00000000, \
11730x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
11740x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
11750x00000000, 0x00000000, 0x00000000, 0x00000000, \
11760x00000000, 0x00000000, 0x00000000, 0x00000000, \
11770x00000000, 0x00000000, 0x00000000, 0x00000000, \
11780x00000000, 0x00000000, 0x00000000, 0x00000000, \
11790x00000000, 0x00000000, 0x00000000, 0x00000000, \
11800x00000000, 0x00000000, 0x00000000, 0x00000000, \
11810x00000000, 0x00000000, 0x00000000, 0x00000000, \
11820x00000000, 0x00000000, 0x00000000, 0x00000000, \
11830x00000000, 0x00000000, 0x00000000, 0x00000000, \
11840x00000000, 0x00000000, 0x00000000, 0x00000000, \
11850x00000000, 0x00000000, 0x00000000, 0x00000000, \
11860x00000000, 0x00000000, 0x00000000, 0x00000000, \
11870x00000000, 0x00000000, 0x00000000, 0x00000000, \
11880x00000000, 0x00000000, 0x00000000, 0x00000000, \
1189}
1190
24180333 1191static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1da177e4 1192{
2afecc04
JB
1193/* *INDENT-OFF* */
1194 static struct {
1195 u32 ucode[UCODE_SIZE + 1];
1196 u8 mac;
1197 u8 timer_dword;
1198 u8 bundle_dword;
1199 u8 min_size_dword;
1200 } ucode_opts[] = {
1201 { D101M_B_RCVBUNDLE_UCODE,
1202 mac_82559_D101M,
1203 D101M_CPUSAVER_TIMER_DWORD,
1204 D101M_CPUSAVER_BUNDLE_DWORD,
1205 D101M_CPUSAVER_MIN_SIZE_DWORD },
1206 { D101S_RCVBUNDLE_UCODE,
1207 mac_82559_D101S,
1208 D101S_CPUSAVER_TIMER_DWORD,
1209 D101S_CPUSAVER_BUNDLE_DWORD,
1210 D101S_CPUSAVER_MIN_SIZE_DWORD },
1211 { D102_E_RCVBUNDLE_UCODE,
1212 mac_82551_F,
1213 D102_E_CPUSAVER_TIMER_DWORD,
1214 D102_E_CPUSAVER_BUNDLE_DWORD,
1215 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1216 { D102_E_RCVBUNDLE_UCODE,
1217 mac_82551_10,
1218 D102_E_CPUSAVER_TIMER_DWORD,
1219 D102_E_CPUSAVER_BUNDLE_DWORD,
1220 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1221 { {0}, 0, 0, 0, 0}
1222 }, *opts;
1223/* *INDENT-ON* */
1224
1225/*************************************************************************
1226* CPUSaver parameters
1227*
1228* All CPUSaver parameters are 16-bit literals that are part of a
1229* "move immediate value" instruction. By changing the value of
1230* the literal in the instruction before the code is loaded, the
1231* driver can change the algorithm.
1232*
0779bf2d 1233* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1234* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1235* timer is reset each time a new packet is received. (see
1236* BUNDLEMAX below to set the limit on number of chained packets)
1237* The current default is 0x600 or 1536. Experiments show that
1238* the value should probably stay within the 0x200 - 0x1000.
1239*
05479938 1240* BUNDLEMAX -
2afecc04
JB
1241* This sets the maximum number of frames that will be bundled. In
1242* some situations, such as the TCP windowing algorithm, it may be
1243* better to limit the growth of the bundle size than let it go as
1244* high as it can, because that could cause too much added latency.
1245* The default is six, because this is the number of packets in the
1246* default TCP window size. A value of 1 would make CPUSaver indicate
1247* an interrupt for every frame received. If you do not want to put
1248* a limit on the bundle size, set this value to xFFFF.
1249*
05479938 1250* BUNDLESMALL -
2afecc04
JB
1251* This contains a bit-mask describing the minimum size frame that
1252* will be bundled. The default masks the lower 7 bits, which means
1253* that any frame less than 128 bytes in length will not be bundled,
1254* but will instead immediately generate an interrupt. This does
1255* not affect the current bundle in any way. Any frame that is 128
1256* bytes or large will be bundled normally. This feature is meant
1257* to provide immediate indication of ACK frames in a TCP environment.
1258* Customers were seeing poor performance when a machine with CPUSaver
1259* enabled was sending but not receiving. The delay introduced when
1260* the ACKs were received was enough to reduce total throughput, because
1261* the sender would sit idle until the ACK was finally seen.
1262*
1263* The current default is 0xFF80, which masks out the lower 7 bits.
1264* This means that any frame which is x7F (127) bytes or smaller
05479938 1265* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1266* bit mask, there are only a few valid values that can be used. To
1267* turn this feature off, the driver can write the value xFFFF to the
1268* lower word of this instruction (in the same way that the other
1269* parameters are used). Likewise, a value of 0xF800 (2047) would
1270* cause an interrupt to be generated for every frame, because all
1271* standard Ethernet frames are <= 2047 bytes in length.
1272*************************************************************************/
1273
05479938 1274/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1275 * workarounds it provides, set the following defines to:
1276 * BUNDLESMALL 0
1277 * BUNDLEMAX 1
1278 * INTDELAY 1
1279 */
1280#define BUNDLESMALL 1
1281#define BUNDLEMAX (u16)6
1282#define INTDELAY (u16)1536 /* 0x600 */
1283
1284 /* do not load u-code for ICH devices */
1285 if (nic->flags & ich)
1286 goto noloaducode;
1287
44c10138 1288 /* Search for ucode match against h/w revision */
2afecc04
JB
1289 for (opts = ucode_opts; opts->mac; opts++) {
1290 int i;
1291 u32 *ucode = opts->ucode;
1292 if (nic->mac != opts->mac)
1293 continue;
1294
1295 /* Insert user-tunable settings */
1296 ucode[opts->timer_dword] &= 0xFFFF0000;
1297 ucode[opts->timer_dword] |= INTDELAY;
1298 ucode[opts->bundle_dword] &= 0xFFFF0000;
1299 ucode[opts->bundle_dword] |= BUNDLEMAX;
1300 ucode[opts->min_size_dword] &= 0xFFFF0000;
1301 ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1302
1303 for (i = 0; i < UCODE_SIZE; i++)
875521dd 1304 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
24180333 1305 cb->command = cpu_to_le16(cb_ucode | cb_el);
2afecc04
JB
1306 return;
1307 }
1308
1309noloaducode:
24180333
JB
1310 cb->command = cpu_to_le16(cb_nop | cb_el);
1311}
1312
1313static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1314 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1315{
1316 int err = 0, counter = 50;
1317 struct cb *cb = nic->cb_to_clean;
1318
1319 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1320 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
05479938 1321
24180333
JB
1322 /* must restart cuc */
1323 nic->cuc_cmd = cuc_start;
1324
1325 /* wait for completion */
1326 e100_write_flush(nic);
1327 udelay(10);
1328
1329 /* wait for possibly (ouch) 500ms */
1330 while (!(cb->status & cpu_to_le16(cb_complete))) {
1331 msleep(10);
1332 if (!--counter) break;
1333 }
05479938 1334
3a4fa0a2 1335 /* ack any interrupts, something could have been set */
27345bb6 1336 iowrite8(~0, &nic->csr->scb.stat_ack);
24180333
JB
1337
1338 /* if the command failed, or is not OK, notify and return */
1339 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1340 DPRINTK(PROBE,ERR, "ucode load failed\n");
1341 err = -EPERM;
1342 }
05479938 1343
24180333 1344 return err;
1da177e4
LT
1345}
1346
1347static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1348 struct sk_buff *skb)
1349{
1350 cb->command = cpu_to_le16(cb_iaaddr);
1351 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1352}
1353
1354static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1355{
1356 cb->command = cpu_to_le16(cb_dump);
1357 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1358 offsetof(struct mem, dump_buf));
1359}
1360
1361#define NCONFIG_AUTO_SWITCH 0x0080
1362#define MII_NSC_CONG MII_RESV1
1363#define NSC_CONG_ENABLE 0x0100
1364#define NSC_CONG_TXREADY 0x0400
1365#define ADVERTISE_FC_SUPPORTED 0x0400
1366static int e100_phy_init(struct nic *nic)
1367{
1368 struct net_device *netdev = nic->netdev;
1369 u32 addr;
1370 u16 bmcr, stat, id_lo, id_hi, cong;
1371
1372 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1373 for(addr = 0; addr < 32; addr++) {
1374 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1375 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1376 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1377 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1378 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1379 break;
1380 }
1381 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1382 if(addr == 32)
1383 return -EAGAIN;
1384
1385 /* Selected the phy and isolate the rest */
1386 for(addr = 0; addr < 32; addr++) {
1387 if(addr != nic->mii.phy_id) {
1388 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1389 } else {
1390 bmcr = mdio_read(netdev, addr, MII_BMCR);
1391 mdio_write(netdev, addr, MII_BMCR,
1392 bmcr & ~BMCR_ISOLATE);
1393 }
1394 }
1395
1396 /* Get phy ID */
1397 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1398 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1399 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1400 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1401
1402 /* Handle National tx phys */
1403#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1404 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1405 /* Disable congestion control */
1406 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1407 cong |= NSC_CONG_TXREADY;
1408 cong &= ~NSC_CONG_ENABLE;
1409 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1410 }
1411
05479938 1412 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1413 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1414 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1415 /* enable/disable MDI/MDI-X auto-switching. */
1416 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1417 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1418 }
1da177e4
LT
1419
1420 return 0;
1421}
1422
1423static int e100_hw_init(struct nic *nic)
1424{
1425 int err;
1426
1427 e100_hw_reset(nic);
1428
1429 DPRINTK(HW, ERR, "e100_hw_init\n");
1430 if(!in_interrupt() && (err = e100_self_test(nic)))
1431 return err;
1432
1433 if((err = e100_phy_init(nic)))
1434 return err;
1435 if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1436 return err;
1437 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1438 return err;
24180333 1439 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1da177e4
LT
1440 return err;
1441 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1442 return err;
1443 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1444 return err;
1445 if((err = e100_exec_cmd(nic, cuc_dump_addr,
1446 nic->dma_addr + offsetof(struct mem, stats))))
1447 return err;
1448 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1449 return err;
1450
1451 e100_disable_irq(nic);
1452
1453 return 0;
1454}
1455
1456static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1457{
1458 struct net_device *netdev = nic->netdev;
1459 struct dev_mc_list *list = netdev->mc_list;
1460 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1461
1462 cb->command = cpu_to_le16(cb_multi);
1463 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1464 for(i = 0; list && i < count; i++, list = list->next)
1465 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1466 ETH_ALEN);
1467}
1468
1469static void e100_set_multicast_list(struct net_device *netdev)
1470{
1471 struct nic *nic = netdev_priv(netdev);
1472
1473 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1474 netdev->mc_count, netdev->flags);
1475
1476 if(netdev->flags & IFF_PROMISC)
1477 nic->flags |= promiscuous;
1478 else
1479 nic->flags &= ~promiscuous;
1480
1481 if(netdev->flags & IFF_ALLMULTI ||
1482 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1483 nic->flags |= multicast_all;
1484 else
1485 nic->flags &= ~multicast_all;
1486
1487 e100_exec_cb(nic, NULL, e100_configure);
1488 e100_exec_cb(nic, NULL, e100_multi);
1489}
1490
1491static void e100_update_stats(struct nic *nic)
1492{
09f75cd7
JG
1493 struct net_device *dev = nic->netdev;
1494 struct net_device_stats *ns = &dev->stats;
1da177e4
LT
1495 struct stats *s = &nic->mem->stats;
1496 u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1497 (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
1498 &s->complete;
1499
1500 /* Device's stats reporting may take several microseconds to
1501 * complete, so where always waiting for results of the
1502 * previous command. */
1503
1504 if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
1505 *complete = 0;
1506 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1507 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1508 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1509 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1510 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1511 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1512 ns->collisions += nic->tx_collisions;
1513 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1514 le32_to_cpu(s->tx_lost_crs);
1da177e4
LT
1515 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1516 nic->rx_over_length_errors;
1517 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1518 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1519 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1520 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1521 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1522 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1523 le32_to_cpu(s->rx_alignment_errors) +
1524 le32_to_cpu(s->rx_short_frame_errors) +
1525 le32_to_cpu(s->rx_cdt_errors);
1526 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1527 nic->tx_single_collisions +=
1528 le32_to_cpu(s->tx_single_collisions);
1529 nic->tx_multiple_collisions +=
1530 le32_to_cpu(s->tx_multiple_collisions);
1531 if(nic->mac >= mac_82558_D101_A4) {
1532 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1533 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1534 nic->rx_fc_unsupported +=
1535 le32_to_cpu(s->fc_rcv_unsupported);
1536 if(nic->mac >= mac_82559_D101M) {
1537 nic->tx_tco_frames +=
1538 le16_to_cpu(s->xmt_tco_frames);
1539 nic->rx_tco_frames +=
1540 le16_to_cpu(s->rcv_tco_frames);
1541 }
1542 }
1543 }
1544
05479938 1545
1f53367d
MC
1546 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1547 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1da177e4
LT
1548}
1549
1550static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1551{
1552 /* Adjust inter-frame-spacing (IFS) between two transmits if
1553 * we're getting collisions on a half-duplex connection. */
1554
1555 if(duplex == DUPLEX_HALF) {
1556 u32 prev = nic->adaptive_ifs;
1557 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1558
1559 if((nic->tx_frames / 32 < nic->tx_collisions) &&
1560 (nic->tx_frames > min_frames)) {
1561 if(nic->adaptive_ifs < 60)
1562 nic->adaptive_ifs += 5;
1563 } else if (nic->tx_frames < min_frames) {
1564 if(nic->adaptive_ifs >= 5)
1565 nic->adaptive_ifs -= 5;
1566 }
1567 if(nic->adaptive_ifs != prev)
1568 e100_exec_cb(nic, NULL, e100_configure);
1569 }
1570}
1571
1572static void e100_watchdog(unsigned long data)
1573{
1574 struct nic *nic = (struct nic *)data;
1575 struct ethtool_cmd cmd;
1576
1577 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1578
1579 /* mii library handles link maintenance tasks */
1580
1581 mii_ethtool_gset(&nic->mii, &cmd);
1582
1583 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1584 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
1585 cmd.speed == SPEED_100 ? "100" : "10",
1586 cmd.duplex == DUPLEX_FULL ? "full" : "half");
1587 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1588 DPRINTK(LINK, INFO, "link down\n");
1589 }
1590
1591 mii_check_link(&nic->mii);
1592
1593 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1594 * allocation failure.
1595 * Unfortunately have to use a spinlock to not re-enable interrupts
1596 * accidentally, due to hardware that shares a register between the
1597 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4 1598 spin_lock_irq(&nic->cmd_lock);
27345bb6 1599 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1600 e100_write_flush(nic);
ad8c48ad 1601 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1602
1603 e100_update_stats(nic);
1604 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1605
1606 if(nic->mac <= mac_82557_D100_C)
1607 /* Issue a multicast command to workaround a 557 lock up */
1608 e100_set_multicast_list(nic->netdev);
1609
1610 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1611 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1612 nic->flags |= ich_10h_workaround;
1613 else
1614 nic->flags &= ~ich_10h_workaround;
1615
34c6417b
SH
1616 mod_timer(&nic->watchdog,
1617 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1da177e4
LT
1618}
1619
858119e1 1620static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1621 struct sk_buff *skb)
1622{
1623 cb->command = nic->tx_command;
962082b6 1624 /* interrupt every 16 packets regardless of delay */
996ec353
MC
1625 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1626 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1627 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1628 cb->u.tcb.tcb_byte_count = 0;
1629 cb->u.tcb.threshold = nic->tx_threshold;
1630 cb->u.tcb.tbd_count = 1;
1631 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1632 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1633 /* check for mapping failure? */
1da177e4
LT
1634 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1635}
1636
1637static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1638{
1639 struct nic *nic = netdev_priv(netdev);
1640 int err;
1641
1642 if(nic->flags & ich_10h_workaround) {
1643 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1644 Issue a NOP command followed by a 1us delay before
1645 issuing the Tx command. */
1f53367d
MC
1646 if(e100_exec_cmd(nic, cuc_nop, 0))
1647 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1da177e4
LT
1648 udelay(1);
1649 }
1650
1651 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1652
1653 switch(err) {
1654 case -ENOSPC:
1655 /* We queued the skb, but now we're out of space. */
1656 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1657 netif_stop_queue(netdev);
1658 break;
1659 case -ENOMEM:
1660 /* This is a hard error - log it. */
1661 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1662 netif_stop_queue(netdev);
1663 return 1;
1664 }
1665
1666 netdev->trans_start = jiffies;
1667 return 0;
1668}
1669
858119e1 1670static int e100_tx_clean(struct nic *nic)
1da177e4 1671{
09f75cd7 1672 struct net_device *dev = nic->netdev;
1da177e4
LT
1673 struct cb *cb;
1674 int tx_cleaned = 0;
1675
1676 spin_lock(&nic->cb_lock);
1677
1da177e4
LT
1678 /* Clean CBs marked complete */
1679 for(cb = nic->cb_to_clean;
1680 cb->status & cpu_to_le16(cb_complete);
1681 cb = nic->cb_to_clean = cb->next) {
dc45010e
JB
1682 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1683 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1684 cb->status);
1685
1da177e4 1686 if(likely(cb->skb != NULL)) {
09f75cd7
JG
1687 dev->stats.tx_packets++;
1688 dev->stats.tx_bytes += cb->skb->len;
1da177e4
LT
1689
1690 pci_unmap_single(nic->pdev,
1691 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1692 le16_to_cpu(cb->u.tcb.tbd.size),
1693 PCI_DMA_TODEVICE);
1694 dev_kfree_skb_any(cb->skb);
1695 cb->skb = NULL;
1696 tx_cleaned = 1;
1697 }
1698 cb->status = 0;
1699 nic->cbs_avail++;
1700 }
1701
1702 spin_unlock(&nic->cb_lock);
1703
1704 /* Recover from running out of Tx resources in xmit_frame */
1705 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1706 netif_wake_queue(nic->netdev);
1707
1708 return tx_cleaned;
1709}
1710
1711static void e100_clean_cbs(struct nic *nic)
1712{
1713 if(nic->cbs) {
1714 while(nic->cbs_avail != nic->params.cbs.count) {
1715 struct cb *cb = nic->cb_to_clean;
1716 if(cb->skb) {
1717 pci_unmap_single(nic->pdev,
1718 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1719 le16_to_cpu(cb->u.tcb.tbd.size),
1720 PCI_DMA_TODEVICE);
1721 dev_kfree_skb(cb->skb);
1722 }
1723 nic->cb_to_clean = nic->cb_to_clean->next;
1724 nic->cbs_avail++;
1725 }
1726 pci_free_consistent(nic->pdev,
1727 sizeof(struct cb) * nic->params.cbs.count,
1728 nic->cbs, nic->cbs_dma_addr);
1729 nic->cbs = NULL;
1730 nic->cbs_avail = 0;
1731 }
1732 nic->cuc_cmd = cuc_start;
1733 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1734 nic->cbs;
1735}
1736
1737static int e100_alloc_cbs(struct nic *nic)
1738{
1739 struct cb *cb;
1740 unsigned int i, count = nic->params.cbs.count;
1741
1742 nic->cuc_cmd = cuc_start;
1743 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1744 nic->cbs_avail = 0;
1745
1746 nic->cbs = pci_alloc_consistent(nic->pdev,
1747 sizeof(struct cb) * count, &nic->cbs_dma_addr);
1748 if(!nic->cbs)
1749 return -ENOMEM;
1750
1751 for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
1752 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1753 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1754
1755 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1756 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1757 ((i+1) % count) * sizeof(struct cb));
1758 cb->skb = NULL;
1759 }
1760
1761 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1762 nic->cbs_avail = count;
1763
1764 return 0;
1765}
1766
ca93ca42 1767static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1768{
ca93ca42
JG
1769 if(!nic->rxs) return;
1770 if(RU_SUSPENDED != nic->ru_running) return;
1771
1772 /* handle init time starts */
1773 if(!rx) rx = nic->rxs;
1774
1775 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1776 if(rx->skb) {
1777 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1778 nic->ru_running = RU_RUNNING;
1779 }
1da177e4
LT
1780}
1781
1782#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
858119e1 1783static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1784{
4187592b 1785 if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1da177e4
LT
1786 return -ENOMEM;
1787
1788 /* Align, init, and map the RFD. */
1da177e4 1789 skb_reserve(rx->skb, NET_IP_ALIGN);
27d7ff46 1790 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1791 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1792 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1793
1f53367d
MC
1794 if(pci_dma_mapping_error(rx->dma_addr)) {
1795 dev_kfree_skb_any(rx->skb);
097688ef 1796 rx->skb = NULL;
1f53367d
MC
1797 rx->dma_addr = 0;
1798 return -ENOMEM;
1799 }
1800
1da177e4 1801 /* Link the RFD to end of RFA by linking previous RFD to
7734f6e6
DA
1802 * this one. We are safe to touch the previous RFD because
1803 * it is protected by the before last buffer's el bit being set */
1da177e4
LT
1804 if(rx->prev->skb) {
1805 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1806 put_unaligned(cpu_to_le32(rx->dma_addr),
1807 (u32 *)&prev_rfd->link);
1da177e4
LT
1808 }
1809
1810 return 0;
1811}
1812
858119e1 1813static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1814 unsigned int *work_done, unsigned int work_to_do)
1815{
09f75cd7 1816 struct net_device *dev = nic->netdev;
1da177e4
LT
1817 struct sk_buff *skb = rx->skb;
1818 struct rfd *rfd = (struct rfd *)skb->data;
1819 u16 rfd_status, actual_size;
1820
1821 if(unlikely(work_done && *work_done >= work_to_do))
1822 return -EAGAIN;
1823
1824 /* Need to sync before taking a peek at cb_complete bit */
1825 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1826 sizeof(struct rfd), PCI_DMA_FROMDEVICE);
1827 rfd_status = le16_to_cpu(rfd->status);
1828
1829 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1830
1831 /* If data isn't ready, nothing to indicate */
7734f6e6
DA
1832 if (unlikely(!(rfd_status & cb_complete))) {
1833 /* If the next buffer has the el bit, but we think the receiver
1834 * is still running, check to see if it really stopped while
1835 * we had interrupts off.
1836 * This allows for a fast restart without re-enabling
1837 * interrupts */
1838 if ((le16_to_cpu(rfd->command) & cb_el) &&
1839 (RU_RUNNING == nic->ru_running))
1840
1841 if (readb(&nic->csr->scb.status) & rus_no_res)
1842 nic->ru_running = RU_SUSPENDED;
1f53367d 1843 return -ENODATA;
7734f6e6 1844 }
1da177e4
LT
1845
1846 /* Get actual data size */
1847 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1848 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1849 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1850
1851 /* Get data */
1852 pci_unmap_single(nic->pdev, rx->dma_addr,
1853 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1854
7734f6e6
DA
1855 /* If this buffer has the el bit, but we think the receiver
1856 * is still running, check to see if it really stopped while
1857 * we had interrupts off.
1858 * This allows for a fast restart without re-enabling interrupts.
1859 * This can happen when the RU sees the size change but also sees
1860 * the el bit set. */
1861 if ((le16_to_cpu(rfd->command) & cb_el) &&
1862 (RU_RUNNING == nic->ru_running)) {
1863
1864 if (readb(&nic->csr->scb.status) & rus_no_res)
ca93ca42 1865 nic->ru_running = RU_SUSPENDED;
7734f6e6 1866 }
ca93ca42 1867
1da177e4
LT
1868 /* Pull off the RFD and put the actual data (minus eth hdr) */
1869 skb_reserve(skb, sizeof(struct rfd));
1870 skb_put(skb, actual_size);
1871 skb->protocol = eth_type_trans(skb, nic->netdev);
1872
1873 if(unlikely(!(rfd_status & cb_ok))) {
1874 /* Don't indicate if hardware indicates errors */
1da177e4 1875 dev_kfree_skb_any(skb);
136df52d 1876 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1da177e4
LT
1877 /* Don't indicate oversized frames */
1878 nic->rx_over_length_errors++;
1da177e4
LT
1879 dev_kfree_skb_any(skb);
1880 } else {
09f75cd7
JG
1881 dev->stats.rx_packets++;
1882 dev->stats.rx_bytes += actual_size;
1da177e4
LT
1883 nic->netdev->last_rx = jiffies;
1884 netif_receive_skb(skb);
1885 if(work_done)
1886 (*work_done)++;
1887 }
1888
1889 rx->skb = NULL;
1890
1891 return 0;
1892}
1893
858119e1 1894static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
1895 unsigned int work_to_do)
1896{
1897 struct rx *rx;
7734f6e6
DA
1898 int restart_required = 0, err = 0;
1899 struct rx *old_before_last_rx, *new_before_last_rx;
1900 struct rfd *old_before_last_rfd, *new_before_last_rfd;
1da177e4
LT
1901
1902 /* Indicate newly arrived packets */
1903 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
7734f6e6
DA
1904 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1905 /* Hit quota or no more to clean */
1906 if (-EAGAIN == err || -ENODATA == err)
ca93ca42 1907 break;
1da177e4
LT
1908 }
1909
7734f6e6
DA
1910
1911 /* On EAGAIN, hit quota so have more work to do, restart once
1912 * cleanup is complete.
1913 * Else, are we already rnr? then pay attention!!! this ensures that
1914 * the state machine progression never allows a start with a
1915 * partially cleaned list, avoiding a race between hardware
1916 * and rx_to_clean when in NAPI mode */
1917 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
1918 restart_required = 1;
1919
1920 old_before_last_rx = nic->rx_to_use->prev->prev;
1921 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
ca93ca42 1922
1da177e4
LT
1923 /* Alloc new skbs to refill list */
1924 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1925 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1926 break; /* Better luck next time (see watchdog) */
1927 }
ca93ca42 1928
7734f6e6
DA
1929 new_before_last_rx = nic->rx_to_use->prev->prev;
1930 if (new_before_last_rx != old_before_last_rx) {
1931 /* Set the el-bit on the buffer that is before the last buffer.
1932 * This lets us update the next pointer on the last buffer
1933 * without worrying about hardware touching it.
1934 * We set the size to 0 to prevent hardware from touching this
1935 * buffer.
1936 * When the hardware hits the before last buffer with el-bit
1937 * and size of 0, it will RNR interrupt, the RUS will go into
1938 * the No Resources state. It will not complete nor write to
1939 * this buffer. */
1940 new_before_last_rfd =
1941 (struct rfd *)new_before_last_rx->skb->data;
1942 new_before_last_rfd->size = 0;
1943 new_before_last_rfd->command |= cpu_to_le16(cb_el);
1944 pci_dma_sync_single_for_device(nic->pdev,
1945 new_before_last_rx->dma_addr, sizeof(struct rfd),
1946 PCI_DMA_TODEVICE);
1947
1948 /* Now that we have a new stopping point, we can clear the old
1949 * stopping point. We must sync twice to get the proper
1950 * ordering on the hardware side of things. */
1951 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
1952 pci_dma_sync_single_for_device(nic->pdev,
1953 old_before_last_rx->dma_addr, sizeof(struct rfd),
1954 PCI_DMA_TODEVICE);
1955 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
1956 pci_dma_sync_single_for_device(nic->pdev,
1957 old_before_last_rx->dma_addr, sizeof(struct rfd),
1958 PCI_DMA_TODEVICE);
1959 }
1960
ca93ca42
JG
1961 if(restart_required) {
1962 // ack the rnr?
1963 writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
7734f6e6 1964 e100_start_receiver(nic, nic->rx_to_clean);
ca93ca42
JG
1965 if(work_done)
1966 (*work_done)++;
1967 }
1da177e4
LT
1968}
1969
1970static void e100_rx_clean_list(struct nic *nic)
1971{
1972 struct rx *rx;
1973 unsigned int i, count = nic->params.rfds.count;
1974
ca93ca42
JG
1975 nic->ru_running = RU_UNINITIALIZED;
1976
1da177e4
LT
1977 if(nic->rxs) {
1978 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1979 if(rx->skb) {
1980 pci_unmap_single(nic->pdev, rx->dma_addr,
1981 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1982 dev_kfree_skb(rx->skb);
1983 }
1984 }
1985 kfree(nic->rxs);
1986 nic->rxs = NULL;
1987 }
1988
1989 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
1990}
1991
1992static int e100_rx_alloc_list(struct nic *nic)
1993{
1994 struct rx *rx;
1995 unsigned int i, count = nic->params.rfds.count;
7734f6e6 1996 struct rfd *before_last;
1da177e4
LT
1997
1998 nic->rx_to_use = nic->rx_to_clean = NULL;
ca93ca42 1999 nic->ru_running = RU_UNINITIALIZED;
1da177e4 2000
c48e3fca 2001 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 2002 return -ENOMEM;
1da177e4
LT
2003
2004 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
2005 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2006 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2007 if(e100_rx_alloc_skb(nic, rx)) {
2008 e100_rx_clean_list(nic);
2009 return -ENOMEM;
2010 }
2011 }
7734f6e6
DA
2012 /* Set the el-bit on the buffer that is before the last buffer.
2013 * This lets us update the next pointer on the last buffer without
2014 * worrying about hardware touching it.
2015 * We set the size to 0 to prevent hardware from touching this buffer.
2016 * When the hardware hits the before last buffer with el-bit and size
2017 * of 0, it will RNR interrupt, the RU will go into the No Resources
2018 * state. It will not complete nor write to this buffer. */
2019 rx = nic->rxs->prev->prev;
2020 before_last = (struct rfd *)rx->skb->data;
2021 before_last->command |= cpu_to_le16(cb_el);
2022 before_last->size = 0;
2023 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2024 sizeof(struct rfd), PCI_DMA_TODEVICE);
1da177e4
LT
2025
2026 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
ca93ca42 2027 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
2028
2029 return 0;
2030}
2031
7d12e780 2032static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
2033{
2034 struct net_device *netdev = dev_id;
2035 struct nic *nic = netdev_priv(netdev);
27345bb6 2036 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1da177e4
LT
2037
2038 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
2039
2040 if(stat_ack == stat_ack_not_ours || /* Not our interrupt */
2041 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2042 return IRQ_NONE;
2043
2044 /* Ack interrupt(s) */
27345bb6 2045 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1da177e4 2046
ca93ca42
JG
2047 /* We hit Receive No Resource (RNR); restart RU after cleaning */
2048 if(stat_ack & stat_ack_rnr)
2049 nic->ru_running = RU_SUSPENDED;
2050
bea3348e 2051 if(likely(netif_rx_schedule_prep(netdev, &nic->napi))) {
0685c31b 2052 e100_disable_irq(nic);
bea3348e 2053 __netif_rx_schedule(netdev, &nic->napi);
0685c31b 2054 }
1da177e4
LT
2055
2056 return IRQ_HANDLED;
2057}
2058
bea3348e 2059static int e100_poll(struct napi_struct *napi, int budget)
1da177e4 2060{
bea3348e
SH
2061 struct nic *nic = container_of(napi, struct nic, napi);
2062 struct net_device *netdev = nic->netdev;
ddfce6bb 2063 unsigned int work_done = 0;
1da177e4 2064
bea3348e 2065 e100_rx_clean(nic, &work_done, budget);
53e52c72 2066 e100_tx_clean(nic);
1da177e4 2067
53e52c72
DM
2068 /* If budget not fully consumed, exit the polling mode */
2069 if (work_done < budget) {
bea3348e 2070 netif_rx_complete(netdev, napi);
1da177e4 2071 e100_enable_irq(nic);
1da177e4
LT
2072 }
2073
bea3348e 2074 return work_done;
1da177e4
LT
2075}
2076
2077#ifdef CONFIG_NET_POLL_CONTROLLER
2078static void e100_netpoll(struct net_device *netdev)
2079{
2080 struct nic *nic = netdev_priv(netdev);
611494dc 2081
1da177e4 2082 e100_disable_irq(nic);
7d12e780 2083 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
2084 e100_tx_clean(nic);
2085 e100_enable_irq(nic);
2086}
2087#endif
2088
1da177e4
LT
2089static int e100_set_mac_address(struct net_device *netdev, void *p)
2090{
2091 struct nic *nic = netdev_priv(netdev);
2092 struct sockaddr *addr = p;
2093
2094 if (!is_valid_ether_addr(addr->sa_data))
2095 return -EADDRNOTAVAIL;
2096
2097 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2098 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2099
2100 return 0;
2101}
2102
2103static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2104{
2105 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2106 return -EINVAL;
2107 netdev->mtu = new_mtu;
2108 return 0;
2109}
2110
2111static int e100_asf(struct nic *nic)
2112{
2113 /* ASF can be enabled from eeprom */
2114 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2115 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2116 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2117 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2118}
2119
2120static int e100_up(struct nic *nic)
2121{
2122 int err;
2123
2124 if((err = e100_rx_alloc_list(nic)))
2125 return err;
2126 if((err = e100_alloc_cbs(nic)))
2127 goto err_rx_clean_list;
2128 if((err = e100_hw_init(nic)))
2129 goto err_clean_cbs;
2130 e100_set_multicast_list(nic->netdev);
ca93ca42 2131 e100_start_receiver(nic, NULL);
1da177e4 2132 mod_timer(&nic->watchdog, jiffies);
1fb9df5d 2133 if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2134 nic->netdev->name, nic->netdev)))
2135 goto err_no_irq;
1da177e4 2136 netif_wake_queue(nic->netdev);
bea3348e 2137 napi_enable(&nic->napi);
0236ebb7
MC
2138 /* enable ints _after_ enabling poll, preventing a race between
2139 * disable ints+schedule */
2140 e100_enable_irq(nic);
1da177e4
LT
2141 return 0;
2142
2143err_no_irq:
2144 del_timer_sync(&nic->watchdog);
2145err_clean_cbs:
2146 e100_clean_cbs(nic);
2147err_rx_clean_list:
2148 e100_rx_clean_list(nic);
2149 return err;
2150}
2151
2152static void e100_down(struct nic *nic)
2153{
0236ebb7 2154 /* wait here for poll to complete */
bea3348e 2155 napi_disable(&nic->napi);
0236ebb7 2156 netif_stop_queue(nic->netdev);
1da177e4
LT
2157 e100_hw_reset(nic);
2158 free_irq(nic->pdev->irq, nic->netdev);
2159 del_timer_sync(&nic->watchdog);
2160 netif_carrier_off(nic->netdev);
1da177e4
LT
2161 e100_clean_cbs(nic);
2162 e100_rx_clean_list(nic);
2163}
2164
2165static void e100_tx_timeout(struct net_device *netdev)
2166{
2167 struct nic *nic = netdev_priv(netdev);
2168
05479938 2169 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2170 * in interrupt context */
2171 schedule_work(&nic->tx_timeout_task);
2172}
2173
c4028958 2174static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2175{
c4028958
DH
2176 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2177 struct net_device *netdev = nic->netdev;
2acdb1e0 2178
1da177e4 2179 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
27345bb6 2180 ioread8(&nic->csr->scb.status));
1da177e4
LT
2181 e100_down(netdev_priv(netdev));
2182 e100_up(netdev_priv(netdev));
2183}
2184
2185static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2186{
2187 int err;
2188 struct sk_buff *skb;
2189
2190 /* Use driver resources to perform internal MAC or PHY
2191 * loopback test. A single packet is prepared and transmitted
2192 * in loopback mode, and the test passes if the received
2193 * packet compares byte-for-byte to the transmitted packet. */
2194
2195 if((err = e100_rx_alloc_list(nic)))
2196 return err;
2197 if((err = e100_alloc_cbs(nic)))
2198 goto err_clean_rx;
2199
2200 /* ICH PHY loopback is broken so do MAC loopback instead */
2201 if(nic->flags & ich && loopback_mode == lb_phy)
2202 loopback_mode = lb_mac;
2203
2204 nic->loopback = loopback_mode;
2205 if((err = e100_hw_init(nic)))
2206 goto err_loopback_none;
2207
2208 if(loopback_mode == lb_phy)
2209 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2210 BMCR_LOOPBACK);
2211
ca93ca42 2212 e100_start_receiver(nic, NULL);
1da177e4 2213
4187592b 2214 if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2215 err = -ENOMEM;
2216 goto err_loopback_none;
2217 }
2218 skb_put(skb, ETH_DATA_LEN);
2219 memset(skb->data, 0xFF, ETH_DATA_LEN);
2220 e100_xmit_frame(skb, nic->netdev);
2221
2222 msleep(10);
2223
aa49cdd9
JB
2224 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2225 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
2226
1da177e4
LT
2227 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2228 skb->data, ETH_DATA_LEN))
2229 err = -EAGAIN;
2230
2231err_loopback_none:
2232 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2233 nic->loopback = lb_none;
1da177e4 2234 e100_clean_cbs(nic);
aa49cdd9 2235 e100_hw_reset(nic);
1da177e4
LT
2236err_clean_rx:
2237 e100_rx_clean_list(nic);
2238 return err;
2239}
2240
2241#define MII_LED_CONTROL 0x1B
2242static void e100_blink_led(unsigned long data)
2243{
2244 struct nic *nic = (struct nic *)data;
2245 enum led_state {
2246 led_on = 0x01,
2247 led_off = 0x04,
2248 led_on_559 = 0x05,
2249 led_on_557 = 0x07,
2250 };
2251
2252 nic->leds = (nic->leds & led_on) ? led_off :
2253 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2254 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
2255 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2256}
2257
2258static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2259{
2260 struct nic *nic = netdev_priv(netdev);
2261 return mii_ethtool_gset(&nic->mii, cmd);
2262}
2263
2264static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2265{
2266 struct nic *nic = netdev_priv(netdev);
2267 int err;
2268
2269 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2270 err = mii_ethtool_sset(&nic->mii, cmd);
2271 e100_exec_cb(nic, NULL, e100_configure);
2272
2273 return err;
2274}
2275
2276static void e100_get_drvinfo(struct net_device *netdev,
2277 struct ethtool_drvinfo *info)
2278{
2279 struct nic *nic = netdev_priv(netdev);
2280 strcpy(info->driver, DRV_NAME);
2281 strcpy(info->version, DRV_VERSION);
2282 strcpy(info->fw_version, "N/A");
2283 strcpy(info->bus_info, pci_name(nic->pdev));
2284}
2285
abf9b902 2286#define E100_PHY_REGS 0x1C
1da177e4
LT
2287static int e100_get_regs_len(struct net_device *netdev)
2288{
2289 struct nic *nic = netdev_priv(netdev);
abf9b902 2290 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
1da177e4
LT
2291}
2292
2293static void e100_get_regs(struct net_device *netdev,
2294 struct ethtool_regs *regs, void *p)
2295{
2296 struct nic *nic = netdev_priv(netdev);
2297 u32 *buff = p;
2298 int i;
2299
44c10138 2300 regs->version = (1 << 24) | nic->pdev->revision;
27345bb6
JB
2301 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2302 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2303 ioread16(&nic->csr->scb.status);
1da177e4
LT
2304 for(i = E100_PHY_REGS; i >= 0; i--)
2305 buff[1 + E100_PHY_REGS - i] =
2306 mdio_read(netdev, nic->mii.phy_id, i);
2307 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2308 e100_exec_cb(nic, NULL, e100_dump);
2309 msleep(10);
2310 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2311 sizeof(nic->mem->dump_buf));
2312}
2313
2314static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2315{
2316 struct nic *nic = netdev_priv(netdev);
2317 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2318 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2319}
2320
2321static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2322{
2323 struct nic *nic = netdev_priv(netdev);
2324
2325 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2326 return -EOPNOTSUPP;
2327
2328 if(wol->wolopts)
2329 nic->flags |= wol_magic;
2330 else
2331 nic->flags &= ~wol_magic;
2332
1da177e4
LT
2333 e100_exec_cb(nic, NULL, e100_configure);
2334
2335 return 0;
2336}
2337
2338static u32 e100_get_msglevel(struct net_device *netdev)
2339{
2340 struct nic *nic = netdev_priv(netdev);
2341 return nic->msg_enable;
2342}
2343
2344static void e100_set_msglevel(struct net_device *netdev, u32 value)
2345{
2346 struct nic *nic = netdev_priv(netdev);
2347 nic->msg_enable = value;
2348}
2349
2350static int e100_nway_reset(struct net_device *netdev)
2351{
2352 struct nic *nic = netdev_priv(netdev);
2353 return mii_nway_restart(&nic->mii);
2354}
2355
2356static u32 e100_get_link(struct net_device *netdev)
2357{
2358 struct nic *nic = netdev_priv(netdev);
2359 return mii_link_ok(&nic->mii);
2360}
2361
2362static int e100_get_eeprom_len(struct net_device *netdev)
2363{
2364 struct nic *nic = netdev_priv(netdev);
2365 return nic->eeprom_wc << 1;
2366}
2367
2368#define E100_EEPROM_MAGIC 0x1234
2369static int e100_get_eeprom(struct net_device *netdev,
2370 struct ethtool_eeprom *eeprom, u8 *bytes)
2371{
2372 struct nic *nic = netdev_priv(netdev);
2373
2374 eeprom->magic = E100_EEPROM_MAGIC;
2375 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2376
2377 return 0;
2378}
2379
2380static int e100_set_eeprom(struct net_device *netdev,
2381 struct ethtool_eeprom *eeprom, u8 *bytes)
2382{
2383 struct nic *nic = netdev_priv(netdev);
2384
2385 if(eeprom->magic != E100_EEPROM_MAGIC)
2386 return -EINVAL;
2387
2388 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2389
2390 return e100_eeprom_save(nic, eeprom->offset >> 1,
2391 (eeprom->len >> 1) + 1);
2392}
2393
2394static void e100_get_ringparam(struct net_device *netdev,
2395 struct ethtool_ringparam *ring)
2396{
2397 struct nic *nic = netdev_priv(netdev);
2398 struct param_range *rfds = &nic->params.rfds;
2399 struct param_range *cbs = &nic->params.cbs;
2400
2401 ring->rx_max_pending = rfds->max;
2402 ring->tx_max_pending = cbs->max;
2403 ring->rx_mini_max_pending = 0;
2404 ring->rx_jumbo_max_pending = 0;
2405 ring->rx_pending = rfds->count;
2406 ring->tx_pending = cbs->count;
2407 ring->rx_mini_pending = 0;
2408 ring->rx_jumbo_pending = 0;
2409}
2410
2411static int e100_set_ringparam(struct net_device *netdev,
2412 struct ethtool_ringparam *ring)
2413{
2414 struct nic *nic = netdev_priv(netdev);
2415 struct param_range *rfds = &nic->params.rfds;
2416 struct param_range *cbs = &nic->params.cbs;
2417
05479938 2418 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2419 return -EINVAL;
2420
2421 if(netif_running(netdev))
2422 e100_down(nic);
2423 rfds->count = max(ring->rx_pending, rfds->min);
2424 rfds->count = min(rfds->count, rfds->max);
2425 cbs->count = max(ring->tx_pending, cbs->min);
2426 cbs->count = min(cbs->count, cbs->max);
2427 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2428 rfds->count, cbs->count);
2429 if(netif_running(netdev))
2430 e100_up(nic);
2431
2432 return 0;
2433}
2434
2435static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2436 "Link test (on/offline)",
2437 "Eeprom test (on/offline)",
2438 "Self test (offline)",
2439 "Mac loopback (offline)",
2440 "Phy loopback (offline)",
2441};
4c3616cd 2442#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
1da177e4 2443
1da177e4
LT
2444static void e100_diag_test(struct net_device *netdev,
2445 struct ethtool_test *test, u64 *data)
2446{
2447 struct ethtool_cmd cmd;
2448 struct nic *nic = netdev_priv(netdev);
2449 int i, err;
2450
2451 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2452 data[0] = !mii_link_ok(&nic->mii);
2453 data[1] = e100_eeprom_load(nic);
2454 if(test->flags & ETH_TEST_FL_OFFLINE) {
2455
2456 /* save speed, duplex & autoneg settings */
2457 err = mii_ethtool_gset(&nic->mii, &cmd);
2458
2459 if(netif_running(netdev))
2460 e100_down(nic);
2461 data[2] = e100_self_test(nic);
2462 data[3] = e100_loopback_test(nic, lb_mac);
2463 data[4] = e100_loopback_test(nic, lb_phy);
2464
2465 /* restore speed, duplex & autoneg settings */
2466 err = mii_ethtool_sset(&nic->mii, &cmd);
2467
2468 if(netif_running(netdev))
2469 e100_up(nic);
2470 }
2471 for(i = 0; i < E100_TEST_LEN; i++)
2472 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2473
2474 msleep_interruptible(4 * 1000);
1da177e4
LT
2475}
2476
2477static int e100_phys_id(struct net_device *netdev, u32 data)
2478{
2479 struct nic *nic = netdev_priv(netdev);
2480
2481 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2482 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2483 mod_timer(&nic->blink_timer, jiffies);
2484 msleep_interruptible(data * 1000);
2485 del_timer_sync(&nic->blink_timer);
2486 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2487
2488 return 0;
2489}
2490
2491static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2492 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2493 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2494 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2495 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2496 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2497 "tx_heartbeat_errors", "tx_window_errors",
2498 /* device-specific stats */
2499 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2500 "tx_flow_control_pause", "rx_flow_control_pause",
2501 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2502};
2503#define E100_NET_STATS_LEN 21
4c3616cd 2504#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
1da177e4 2505
b9f2c044 2506static int e100_get_sset_count(struct net_device *netdev, int sset)
1da177e4 2507{
b9f2c044
JG
2508 switch (sset) {
2509 case ETH_SS_TEST:
2510 return E100_TEST_LEN;
2511 case ETH_SS_STATS:
2512 return E100_STATS_LEN;
2513 default:
2514 return -EOPNOTSUPP;
2515 }
1da177e4
LT
2516}
2517
2518static void e100_get_ethtool_stats(struct net_device *netdev,
2519 struct ethtool_stats *stats, u64 *data)
2520{
2521 struct nic *nic = netdev_priv(netdev);
2522 int i;
2523
2524 for(i = 0; i < E100_NET_STATS_LEN; i++)
09f75cd7 2525 data[i] = ((unsigned long *)&netdev->stats)[i];
1da177e4
LT
2526
2527 data[i++] = nic->tx_deferred;
2528 data[i++] = nic->tx_single_collisions;
2529 data[i++] = nic->tx_multiple_collisions;
2530 data[i++] = nic->tx_fc_pause;
2531 data[i++] = nic->rx_fc_pause;
2532 data[i++] = nic->rx_fc_unsupported;
2533 data[i++] = nic->tx_tco_frames;
2534 data[i++] = nic->rx_tco_frames;
2535}
2536
2537static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2538{
2539 switch(stringset) {
2540 case ETH_SS_TEST:
2541 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2542 break;
2543 case ETH_SS_STATS:
2544 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2545 break;
2546 }
2547}
2548
7282d491 2549static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2550 .get_settings = e100_get_settings,
2551 .set_settings = e100_set_settings,
2552 .get_drvinfo = e100_get_drvinfo,
2553 .get_regs_len = e100_get_regs_len,
2554 .get_regs = e100_get_regs,
2555 .get_wol = e100_get_wol,
2556 .set_wol = e100_set_wol,
2557 .get_msglevel = e100_get_msglevel,
2558 .set_msglevel = e100_set_msglevel,
2559 .nway_reset = e100_nway_reset,
2560 .get_link = e100_get_link,
2561 .get_eeprom_len = e100_get_eeprom_len,
2562 .get_eeprom = e100_get_eeprom,
2563 .set_eeprom = e100_set_eeprom,
2564 .get_ringparam = e100_get_ringparam,
2565 .set_ringparam = e100_set_ringparam,
1da177e4
LT
2566 .self_test = e100_diag_test,
2567 .get_strings = e100_get_strings,
2568 .phys_id = e100_phys_id,
1da177e4 2569 .get_ethtool_stats = e100_get_ethtool_stats,
b9f2c044 2570 .get_sset_count = e100_get_sset_count,
1da177e4
LT
2571};
2572
2573static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2574{
2575 struct nic *nic = netdev_priv(netdev);
2576
2577 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2578}
2579
2580static int e100_alloc(struct nic *nic)
2581{
2582 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2583 &nic->dma_addr);
2584 return nic->mem ? 0 : -ENOMEM;
2585}
2586
2587static void e100_free(struct nic *nic)
2588{
2589 if(nic->mem) {
2590 pci_free_consistent(nic->pdev, sizeof(struct mem),
2591 nic->mem, nic->dma_addr);
2592 nic->mem = NULL;
2593 }
2594}
2595
2596static int e100_open(struct net_device *netdev)
2597{
2598 struct nic *nic = netdev_priv(netdev);
2599 int err = 0;
2600
2601 netif_carrier_off(netdev);
2602 if((err = e100_up(nic)))
2603 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2604 return err;
2605}
2606
2607static int e100_close(struct net_device *netdev)
2608{
2609 e100_down(netdev_priv(netdev));
2610 return 0;
2611}
2612
2613static int __devinit e100_probe(struct pci_dev *pdev,
2614 const struct pci_device_id *ent)
2615{
2616 struct net_device *netdev;
2617 struct nic *nic;
2618 int err;
0795af57 2619 DECLARE_MAC_BUF(mac);
1da177e4
LT
2620
2621 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2622 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
2623 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2624 return -ENOMEM;
2625 }
2626
2627 netdev->open = e100_open;
2628 netdev->stop = e100_close;
2629 netdev->hard_start_xmit = e100_xmit_frame;
1da177e4
LT
2630 netdev->set_multicast_list = e100_set_multicast_list;
2631 netdev->set_mac_address = e100_set_mac_address;
2632 netdev->change_mtu = e100_change_mtu;
2633 netdev->do_ioctl = e100_do_ioctl;
2634 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2635 netdev->tx_timeout = e100_tx_timeout;
2636 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
1da177e4
LT
2637#ifdef CONFIG_NET_POLL_CONTROLLER
2638 netdev->poll_controller = e100_netpoll;
2639#endif
0eb5a34c 2640 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2641
2642 nic = netdev_priv(netdev);
bea3348e 2643 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
1da177e4
LT
2644 nic->netdev = netdev;
2645 nic->pdev = pdev;
2646 nic->msg_enable = (1 << debug) - 1;
2647 pci_set_drvdata(pdev, netdev);
2648
2649 if((err = pci_enable_device(pdev))) {
2650 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2651 goto err_out_free_dev;
2652 }
2653
2654 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2655 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2656 "base address, aborting.\n");
2657 err = -ENODEV;
2658 goto err_out_disable_pdev;
2659 }
2660
2661 if((err = pci_request_regions(pdev, DRV_NAME))) {
2662 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2663 goto err_out_disable_pdev;
2664 }
2665
1e7f0bd8 2666 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
1da177e4
LT
2667 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2668 goto err_out_free_res;
2669 }
2670
1da177e4
LT
2671 SET_NETDEV_DEV(netdev, &pdev->dev);
2672
27345bb6
JB
2673 if (use_io)
2674 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2675
2676 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
1da177e4
LT
2677 if(!nic->csr) {
2678 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2679 err = -ENOMEM;
2680 goto err_out_free_res;
2681 }
2682
2683 if(ent->driver_data)
2684 nic->flags |= ich;
2685 else
2686 nic->flags &= ~ich;
2687
2688 e100_get_defaults(nic);
2689
1f53367d 2690 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2691 spin_lock_init(&nic->cb_lock);
2692 spin_lock_init(&nic->cmd_lock);
ac7c6669 2693 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2694
2695 /* Reset the device before pci_set_master() in case device is in some
2696 * funky state and has an interrupt pending - hint: we don't have the
2697 * interrupt handler registered yet. */
2698 e100_hw_reset(nic);
2699
2700 pci_set_master(pdev);
2701
2702 init_timer(&nic->watchdog);
2703 nic->watchdog.function = e100_watchdog;
2704 nic->watchdog.data = (unsigned long)nic;
2705 init_timer(&nic->blink_timer);
2706 nic->blink_timer.function = e100_blink_led;
2707 nic->blink_timer.data = (unsigned long)nic;
2708
c4028958 2709 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2710
1da177e4
LT
2711 if((err = e100_alloc(nic))) {
2712 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2713 goto err_out_iounmap;
2714 }
2715
1da177e4
LT
2716 if((err = e100_eeprom_load(nic)))
2717 goto err_out_free;
2718
f92d8728
MC
2719 e100_phy_init(nic);
2720
1da177e4 2721 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923 2722 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
948cd43f
JB
2723 if (!is_valid_ether_addr(netdev->perm_addr)) {
2724 if (!eeprom_bad_csum_allow) {
2725 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2726 "EEPROM, aborting.\n");
2727 err = -EAGAIN;
2728 goto err_out_free;
2729 } else {
2730 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2731 "you MUST configure one.\n");
2732 }
1da177e4
LT
2733 }
2734
2735 /* Wol magic packet can be enabled from eeprom */
2736 if((nic->mac >= mac_82558_D101_A4) &&
2737 (nic->eeprom[eeprom_id] & eeprom_id_wol))
2738 nic->flags |= wol_magic;
2739
6bdacb1a 2740 /* ack any pending wake events, disable PME */
3435dbce
JB
2741 err = pci_enable_wake(pdev, 0, 0);
2742 if (err)
2743 DPRINTK(PROBE, ERR, "Error clearing wake event\n");
1da177e4
LT
2744
2745 strcpy(netdev->name, "eth%d");
2746 if((err = register_netdev(netdev))) {
2747 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2748 goto err_out_free;
2749 }
2750
0795af57
JP
2751 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %s\n",
2752 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2753 pdev->irq, print_mac(mac, netdev->dev_addr));
1da177e4
LT
2754
2755 return 0;
2756
2757err_out_free:
2758 e100_free(nic);
2759err_out_iounmap:
27345bb6 2760 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2761err_out_free_res:
2762 pci_release_regions(pdev);
2763err_out_disable_pdev:
2764 pci_disable_device(pdev);
2765err_out_free_dev:
2766 pci_set_drvdata(pdev, NULL);
2767 free_netdev(netdev);
2768 return err;
2769}
2770
2771static void __devexit e100_remove(struct pci_dev *pdev)
2772{
2773 struct net_device *netdev = pci_get_drvdata(pdev);
2774
2775 if(netdev) {
2776 struct nic *nic = netdev_priv(netdev);
2777 unregister_netdev(netdev);
2778 e100_free(nic);
2779 iounmap(nic->csr);
2780 free_netdev(netdev);
2781 pci_release_regions(pdev);
2782 pci_disable_device(pdev);
2783 pci_set_drvdata(pdev, NULL);
2784 }
2785}
2786
e8e82b76 2787#ifdef CONFIG_PM
1da177e4
LT
2788static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2789{
2790 struct net_device *netdev = pci_get_drvdata(pdev);
2791 struct nic *nic = netdev_priv(netdev);
2792
824545e7 2793 if (netif_running(netdev))
bea3348e 2794 napi_disable(&nic->napi);
e8e82b76
AK
2795 del_timer_sync(&nic->watchdog);
2796 netif_carrier_off(nic->netdev);
518d8338 2797 netif_device_detach(netdev);
a53a33da 2798
1da177e4 2799 pci_save_state(pdev);
e8e82b76
AK
2800
2801 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2802 pci_enable_wake(pdev, PCI_D3hot, 1);
2803 pci_enable_wake(pdev, PCI_D3cold, 1);
2804 } else {
2805 pci_enable_wake(pdev, PCI_D3hot, 0);
2806 pci_enable_wake(pdev, PCI_D3cold, 0);
2807 }
975b366a 2808
518d8338 2809 free_irq(pdev->irq, netdev);
8543da66
AK
2810
2811 pci_disable_device(pdev);
e8e82b76 2812 pci_set_power_state(pdev, PCI_D3hot);
1da177e4
LT
2813
2814 return 0;
2815}
2816
2817static int e100_resume(struct pci_dev *pdev)
2818{
2819 struct net_device *netdev = pci_get_drvdata(pdev);
2820 struct nic *nic = netdev_priv(netdev);
2821
975b366a 2822 pci_set_power_state(pdev, PCI_D0);
1da177e4 2823 pci_restore_state(pdev);
6bdacb1a 2824 /* ack any pending wake events, disable PME */
975b366a 2825 pci_enable_wake(pdev, 0, 0);
1da177e4
LT
2826
2827 netif_device_attach(netdev);
975b366a 2828 if (netif_running(netdev))
1da177e4
LT
2829 e100_up(nic);
2830
2831 return 0;
2832}
975b366a 2833#endif /* CONFIG_PM */
1da177e4 2834
d18c3db5 2835static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 2836{
e8e82b76
AK
2837 struct net_device *netdev = pci_get_drvdata(pdev);
2838 struct nic *nic = netdev_priv(netdev);
2839
824545e7 2840 if (netif_running(netdev))
bea3348e 2841 napi_disable(&nic->napi);
e8e82b76
AK
2842 del_timer_sync(&nic->watchdog);
2843 netif_carrier_off(nic->netdev);
2844
2845 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2846 pci_enable_wake(pdev, PCI_D3hot, 1);
2847 pci_enable_wake(pdev, PCI_D3cold, 1);
2848 } else {
2849 pci_enable_wake(pdev, PCI_D3hot, 0);
2850 pci_enable_wake(pdev, PCI_D3cold, 0);
2851 }
2852
8543da66
AK
2853 free_irq(pdev->irq, netdev);
2854
e8e82b76
AK
2855 pci_disable_device(pdev);
2856 pci_set_power_state(pdev, PCI_D3hot);
6bdacb1a
MC
2857}
2858
2cc30492
AK
2859/* ------------------ PCI Error Recovery infrastructure -------------- */
2860/**
2861 * e100_io_error_detected - called when PCI error is detected.
2862 * @pdev: Pointer to PCI device
2863 * @state: The current pci conneection state
2864 */
2865static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2866{
2867 struct net_device *netdev = pci_get_drvdata(pdev);
bea3348e 2868 struct nic *nic = netdev_priv(netdev);
2cc30492
AK
2869
2870 /* Similar to calling e100_down(), but avoids adpater I/O. */
2871 netdev->stop(netdev);
2872
2873 /* Detach; put netif into state similar to hotplug unplug. */
bea3348e 2874 napi_enable(&nic->napi);
2cc30492 2875 netif_device_detach(netdev);
b1d26f24 2876 pci_disable_device(pdev);
2cc30492
AK
2877
2878 /* Request a slot reset. */
2879 return PCI_ERS_RESULT_NEED_RESET;
2880}
2881
2882/**
2883 * e100_io_slot_reset - called after the pci bus has been reset.
2884 * @pdev: Pointer to PCI device
2885 *
2886 * Restart the card from scratch.
2887 */
2888static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2889{
2890 struct net_device *netdev = pci_get_drvdata(pdev);
2891 struct nic *nic = netdev_priv(netdev);
2892
2893 if (pci_enable_device(pdev)) {
2894 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2895 return PCI_ERS_RESULT_DISCONNECT;
2896 }
2897 pci_set_master(pdev);
2898
2899 /* Only one device per card can do a reset */
2900 if (0 != PCI_FUNC(pdev->devfn))
2901 return PCI_ERS_RESULT_RECOVERED;
2902 e100_hw_reset(nic);
2903 e100_phy_init(nic);
2904
2905 return PCI_ERS_RESULT_RECOVERED;
2906}
2907
2908/**
2909 * e100_io_resume - resume normal operations
2910 * @pdev: Pointer to PCI device
2911 *
2912 * Resume normal operations after an error recovery
2913 * sequence has been completed.
2914 */
2915static void e100_io_resume(struct pci_dev *pdev)
2916{
2917 struct net_device *netdev = pci_get_drvdata(pdev);
2918 struct nic *nic = netdev_priv(netdev);
2919
2920 /* ack any pending wake events, disable PME */
2921 pci_enable_wake(pdev, 0, 0);
2922
2923 netif_device_attach(netdev);
2924 if (netif_running(netdev)) {
2925 e100_open(netdev);
2926 mod_timer(&nic->watchdog, jiffies);
2927 }
2928}
2929
2930static struct pci_error_handlers e100_err_handler = {
2931 .error_detected = e100_io_error_detected,
2932 .slot_reset = e100_io_slot_reset,
2933 .resume = e100_io_resume,
2934};
6bdacb1a 2935
1da177e4
LT
2936static struct pci_driver e100_driver = {
2937 .name = DRV_NAME,
2938 .id_table = e100_id_table,
2939 .probe = e100_probe,
2940 .remove = __devexit_p(e100_remove),
e8e82b76 2941#ifdef CONFIG_PM
975b366a 2942 /* Power Management hooks */
1da177e4
LT
2943 .suspend = e100_suspend,
2944 .resume = e100_resume,
2945#endif
05479938 2946 .shutdown = e100_shutdown,
2cc30492 2947 .err_handler = &e100_err_handler,
1da177e4
LT
2948};
2949
2950static int __init e100_init_module(void)
2951{
2952 if(((1 << debug) - 1) & NETIF_MSG_DRV) {
2953 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2954 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2955 }
29917620 2956 return pci_register_driver(&e100_driver);
1da177e4
LT
2957}
2958
2959static void __exit e100_cleanup_module(void)
2960{
2961 pci_unregister_driver(&e100_driver);
2962}
2963
2964module_init(e100_init_module);
2965module_exit(e100_cleanup_module);
This page took 0.653138 seconds and 5 git commands to generate.