[PATCH] NetXen: temp monitoring, newer firmware support, mm footprint reduction
[deliverable/linux.git] / drivers / net / sundance.c
CommitLineData
1da177e4
LT
1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2/*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
03a8c661 19 [link no longer provides useful info -jgarzik]
e714d99c
PDM
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
1da177e4 22
1da177e4
LT
23*/
24
25#define DRV_NAME "sundance"
d5b20697
AG
26#define DRV_VERSION "1.2"
27#define DRV_RELDATE "11-Sep-2006"
1da177e4
LT
28
29
30/* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
f71e1309 35static const int multicast_filter_limit = 32;
1da177e4
LT
36
37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
40 need a copy-align. */
41static int rx_copybreak;
42static int flowctrl=1;
43
44/* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
51 1 10Mbps half duplex.
52 2 10Mbps full duplex.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
55*/
56#define MAX_UNITS 8
57static char *media[MAX_UNITS];
58
59
60/* Operational parameters that are set at compile time. */
61
62/* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
66 Tx error recovery.
67 Large receive rings merely waste memory. */
68#define TX_RING_SIZE 32
69#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70#define RX_RING_SIZE 64
71#define RX_BUDGET 32
72#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
74
75/* Operational parameters that usually are not changed. */
76/* Time in jiffies before concluding the transmitter is hung. */
77#define TX_TIMEOUT (4*HZ)
78#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79
80/* Include files, designed to support most kernel versions 2.0.0 and later. */
81#include <linux/module.h>
82#include <linux/kernel.h>
83#include <linux/string.h>
84#include <linux/timer.h>
85#include <linux/errno.h>
86#include <linux/ioport.h>
87#include <linux/slab.h>
88#include <linux/interrupt.h>
89#include <linux/pci.h>
90#include <linux/netdevice.h>
91#include <linux/etherdevice.h>
92#include <linux/skbuff.h>
93#include <linux/init.h>
94#include <linux/bitops.h>
95#include <asm/uaccess.h>
96#include <asm/processor.h> /* Processor type for cache alignment. */
97#include <asm/io.h>
98#include <linux/delay.h>
99#include <linux/spinlock.h>
100#ifndef _COMPAT_WITH_OLD_KERNEL
101#include <linux/crc32.h>
102#include <linux/ethtool.h>
103#include <linux/mii.h>
104#else
105#include "crc32.h"
106#include "ethtool.h"
107#include "mii.h"
108#include "compat.h"
109#endif
110
111/* These identify the driver base version and may not be removed. */
3418e469 112static char version[] =
1da177e4
LT
113KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
114KERN_INFO " http://www.scyld.com/network/sundance.html\n";
115
116MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
118MODULE_LICENSE("GPL");
119
120module_param(debug, int, 0);
121module_param(rx_copybreak, int, 0);
122module_param_array(media, charp, NULL, 0);
123module_param(flowctrl, int, 0);
124MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
125MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
126MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
127
128/*
129 Theory of Operation
130
131I. Board Compatibility
132
133This driver is designed for the Sundance Technologies "Alta" ST201 chip.
134
135II. Board-specific settings
136
137III. Driver operation
138
139IIIa. Ring buffers
140
141This driver uses two statically allocated fixed-size descriptor lists
142formed into rings by a branch from the final descriptor to the beginning of
143the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
144Some chips explicitly use only 2^N sized rings, while others use a
145'next descriptor' pointer that the driver forms into rings.
146
147IIIb/c. Transmit/Receive Structure
148
149This driver uses a zero-copy receive and transmit scheme.
150The driver allocates full frame size skbuffs for the Rx ring buffers at
151open() time and passes the skb->data field to the chip as receive data
152buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
153a fresh skbuff is allocated and the frame is copied to the new skbuff.
154When the incoming frame is larger, the skbuff is passed directly up the
155protocol stack. Buffers consumed this way are replaced by newly allocated
156skbuffs in a later phase of receives.
157
158The RX_COPYBREAK value is chosen to trade-off the memory wasted by
159using a full-sized skbuff for small frames vs. the copying costs of larger
160frames. New boards are typically used in generously configured machines
161and the underfilled buffers have negligible impact compared to the benefit of
162a single allocation size, so the default value of zero results in never
163copying packets. When copying is done, the cost is usually mitigated by using
164a combined copy/checksum routine. Copying also preloads the cache, which is
165most useful with small frames.
166
167A subtle aspect of the operation is that the IP header at offset 14 in an
168ethernet frame isn't longword aligned for further processing.
169Unaligned buffers are permitted by the Sundance hardware, so
170frames are received into the skbuff at an offset of "+2", 16-byte aligning
171the IP header.
172
173IIId. Synchronization
174
175The driver runs as two independent, single-threaded flows of control. One
176is the send-packet routine, which enforces single-threaded use by the
177dev->tbusy flag. The other thread is the interrupt handler, which is single
178threaded by the hardware and interrupt handling software.
179
180The send packet thread has partial control over the Tx ring and 'dev->tbusy'
181flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
182queue slot is empty, it clears the tbusy flag when finished otherwise it sets
183the 'lp->tx_full' flag.
184
185The interrupt handler has exclusive control over the Rx ring and records stats
186from the Tx ring. After reaping the stats, it marks the Tx queue entry as
187empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
188clears both the tx_full and tbusy flags.
189
190IV. Notes
191
192IVb. References
193
194The Sundance ST201 datasheet, preliminary version.
b71b95ef
PDM
195The Kendin KS8723 datasheet, preliminary version.
196The ICplus IP100 datasheet, preliminary version.
197http://www.scyld.com/expert/100mbps.html
198http://www.scyld.com/expert/NWay.html
1da177e4
LT
199
200IVc. Errata
201
202*/
203
204/* Work-around for Kendin chip bugs. */
205#ifndef CONFIG_SUNDANCE_MMIO
206#define USE_IO_OPS 1
207#endif
208
46009c8b
JG
209static const struct pci_device_id sundance_pci_tbl[] = {
210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
213 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
214 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
215 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
216 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
217 { }
1da177e4
LT
218};
219MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
220
221enum {
222 netdev_io_size = 128
223};
224
225struct pci_id_info {
226 const char *name;
227};
46009c8b 228static const struct pci_id_info pci_id_tbl[] __devinitdata = {
1da177e4
LT
229 {"D-Link DFE-550TX FAST Ethernet Adapter"},
230 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
231 {"D-Link DFE-580TX 4 port Server Adapter"},
232 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
233 {"D-Link DL10050-based FAST Ethernet Adapter"},
234 {"Sundance Technology Alta"},
1668b19f 235 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
46009c8b 236 { } /* terminate list. */
1da177e4
LT
237};
238
239/* This driver was written to use PCI memory space, however x86-oriented
240 hardware often uses I/O space accesses. */
241
242/* Offsets to the device registers.
243 Unlike software-only systems, device drivers interact with complex hardware.
244 It's not useful to define symbolic names for every register bit in the
245 device. The name can only partially document the semantics and make
246 the driver longer and more difficult to read.
247 In general, only the important configuration values or bits changed
248 multiple times should be defined symbolically.
249*/
250enum alta_offsets {
251 DMACtrl = 0x00,
252 TxListPtr = 0x04,
253 TxDMABurstThresh = 0x08,
254 TxDMAUrgentThresh = 0x09,
255 TxDMAPollPeriod = 0x0a,
256 RxDMAStatus = 0x0c,
257 RxListPtr = 0x10,
258 DebugCtrl0 = 0x1a,
259 DebugCtrl1 = 0x1c,
260 RxDMABurstThresh = 0x14,
261 RxDMAUrgentThresh = 0x15,
262 RxDMAPollPeriod = 0x16,
263 LEDCtrl = 0x1a,
264 ASICCtrl = 0x30,
265 EEData = 0x34,
266 EECtrl = 0x36,
1da177e4
LT
267 FlashAddr = 0x40,
268 FlashData = 0x44,
269 TxStatus = 0x46,
270 TxFrameId = 0x47,
271 DownCounter = 0x18,
272 IntrClear = 0x4a,
273 IntrEnable = 0x4c,
274 IntrStatus = 0x4e,
275 MACCtrl0 = 0x50,
276 MACCtrl1 = 0x52,
277 StationAddr = 0x54,
278 MaxFrameSize = 0x5A,
279 RxMode = 0x5c,
280 MIICtrl = 0x5e,
281 MulticastFilter0 = 0x60,
282 MulticastFilter1 = 0x64,
283 RxOctetsLow = 0x68,
284 RxOctetsHigh = 0x6a,
285 TxOctetsLow = 0x6c,
286 TxOctetsHigh = 0x6e,
287 TxFramesOK = 0x70,
288 RxFramesOK = 0x72,
289 StatsCarrierError = 0x74,
290 StatsLateColl = 0x75,
291 StatsMultiColl = 0x76,
292 StatsOneColl = 0x77,
293 StatsTxDefer = 0x78,
294 RxMissed = 0x79,
295 StatsTxXSDefer = 0x7a,
296 StatsTxAbort = 0x7b,
297 StatsBcastTx = 0x7c,
298 StatsBcastRx = 0x7d,
299 StatsMcastTx = 0x7e,
300 StatsMcastRx = 0x7f,
301 /* Aliased and bogus values! */
302 RxStatus = 0x0c,
303};
304enum ASICCtrl_HiWord_bit {
305 GlobalReset = 0x0001,
306 RxReset = 0x0002,
307 TxReset = 0x0004,
308 DMAReset = 0x0008,
309 FIFOReset = 0x0010,
310 NetworkReset = 0x0020,
311 HostReset = 0x0040,
312 ResetBusy = 0x0400,
313};
314
315/* Bits in the interrupt status/mask registers. */
316enum intr_status_bits {
317 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
318 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
319 IntrDrvRqst=0x0040,
320 StatsMax=0x0080, LinkChange=0x0100,
321 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
322};
323
324/* Bits in the RxMode register. */
325enum rx_mode_bits {
326 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
327 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
328};
329/* Bits in MACCtrl. */
330enum mac_ctrl0_bits {
331 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
332 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
333};
334enum mac_ctrl1_bits {
335 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
336 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
337 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
338};
339
340/* The Rx and Tx buffer descriptors. */
341/* Note that using only 32 bit fields simplifies conversion to big-endian
342 architectures. */
343struct netdev_desc {
344 u32 next_desc;
345 u32 status;
346 struct desc_frag { u32 addr, length; } frag[1];
347};
348
349/* Bits in netdev_desc.status */
350enum desc_status_bits {
351 DescOwn=0x8000,
352 DescEndPacket=0x4000,
353 DescEndRing=0x2000,
354 LastFrag=0x80000000,
355 DescIntrOnTx=0x8000,
356 DescIntrOnDMADone=0x80000000,
357 DisableAlign = 0x00000001,
358};
359
360#define PRIV_ALIGN 15 /* Required alignment mask */
361/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
362 within the structure. */
363#define MII_CNT 4
364struct netdev_private {
365 /* Descriptor rings first for alignment. */
366 struct netdev_desc *rx_ring;
367 struct netdev_desc *tx_ring;
368 struct sk_buff* rx_skbuff[RX_RING_SIZE];
369 struct sk_buff* tx_skbuff[TX_RING_SIZE];
370 dma_addr_t tx_ring_dma;
371 dma_addr_t rx_ring_dma;
372 struct net_device_stats stats;
373 struct timer_list timer; /* Media monitoring timer. */
374 /* Frequently used values: keep some adjacent for cache effect. */
375 spinlock_t lock;
376 spinlock_t rx_lock; /* Group with Tx control cache line. */
377 int msg_enable;
378 int chip_id;
379 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
380 unsigned int rx_buf_sz; /* Based on MTU+slack. */
381 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
382 unsigned int cur_tx, dirty_tx;
383 /* These values are keep track of the transceiver/media in use. */
384 unsigned int flowctrl:1;
385 unsigned int default_port:4; /* Last dev->if_port value. */
386 unsigned int an_enable:1;
387 unsigned int speed;
388 struct tasklet_struct rx_tasklet;
389 struct tasklet_struct tx_tasklet;
390 int budget;
391 int cur_task;
392 /* Multicast and receive mode. */
393 spinlock_t mcastlock; /* SMP lock multicast updates. */
394 u16 mcast_filter[4];
395 /* MII transceiver section. */
396 struct mii_if_info mii_if;
397 int mii_preamble_required;
398 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
399 struct pci_dev *pci_dev;
400 void __iomem *base;
401 unsigned char pci_rev_id;
402};
403
404/* The station address location in the EEPROM. */
405#define EEPROM_SA_OFFSET 0x10
406#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
407 IntrDrvRqst | IntrTxDone | StatsMax | \
408 LinkChange)
409
410static int change_mtu(struct net_device *dev, int new_mtu);
411static int eeprom_read(void __iomem *ioaddr, int location);
412static int mdio_read(struct net_device *dev, int phy_id, int location);
413static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
414static int netdev_open(struct net_device *dev);
415static void check_duplex(struct net_device *dev);
416static void netdev_timer(unsigned long data);
417static void tx_timeout(struct net_device *dev);
418static void init_ring(struct net_device *dev);
419static int start_tx(struct sk_buff *skb, struct net_device *dev);
420static int reset_tx (struct net_device *dev);
7d12e780 421static irqreturn_t intr_handler(int irq, void *dev_instance);
1da177e4
LT
422static void rx_poll(unsigned long data);
423static void tx_poll(unsigned long data);
424static void refill_rx (struct net_device *dev);
425static void netdev_error(struct net_device *dev, int intr_status);
426static void netdev_error(struct net_device *dev, int intr_status);
427static void set_rx_mode(struct net_device *dev);
428static int __set_mac_addr(struct net_device *dev);
429static struct net_device_stats *get_stats(struct net_device *dev);
430static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
431static int netdev_close(struct net_device *dev);
7282d491 432static const struct ethtool_ops ethtool_ops;
1da177e4 433
b71b95ef
PDM
434static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
435{
436 struct netdev_private *np = netdev_priv(dev);
437 void __iomem *ioaddr = np->base + ASICCtrl;
438 int countdown;
439
440 /* ST201 documentation states ASICCtrl is a 32bit register */
441 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
442 /* ST201 documentation states reset can take up to 1 ms */
443 countdown = 10 + 1;
444 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
445 if (--countdown == 0) {
446 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
447 break;
448 }
449 udelay(100);
450 }
451}
452
1da177e4
LT
453static int __devinit sundance_probe1 (struct pci_dev *pdev,
454 const struct pci_device_id *ent)
455{
456 struct net_device *dev;
457 struct netdev_private *np;
458 static int card_idx;
459 int chip_idx = ent->driver_data;
460 int irq;
461 int i;
462 void __iomem *ioaddr;
463 u16 mii_ctl;
464 void *ring_space;
465 dma_addr_t ring_dma;
466#ifdef USE_IO_OPS
467 int bar = 0;
468#else
469 int bar = 1;
470#endif
67ec2f80 471 int phy, phy_idx = 0;
1da177e4
LT
472
473
474/* when built into the kernel, we only print version if device is found */
475#ifndef MODULE
476 static int printed_version;
477 if (!printed_version++)
478 printk(version);
479#endif
480
481 if (pci_enable_device(pdev))
482 return -EIO;
483 pci_set_master(pdev);
484
485 irq = pdev->irq;
486
487 dev = alloc_etherdev(sizeof(*np));
488 if (!dev)
489 return -ENOMEM;
490 SET_MODULE_OWNER(dev);
491 SET_NETDEV_DEV(dev, &pdev->dev);
492
493 if (pci_request_regions(pdev, DRV_NAME))
494 goto err_out_netdev;
495
496 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
497 if (!ioaddr)
498 goto err_out_res;
499
500 for (i = 0; i < 3; i++)
501 ((u16 *)dev->dev_addr)[i] =
502 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
30d60a82 503 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
504
505 dev->base_addr = (unsigned long)ioaddr;
506 dev->irq = irq;
507
508 np = netdev_priv(dev);
509 np->base = ioaddr;
510 np->pci_dev = pdev;
511 np->chip_id = chip_idx;
512 np->msg_enable = (1 << debug) - 1;
513 spin_lock_init(&np->lock);
514 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
515 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
516
517 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
518 if (!ring_space)
519 goto err_out_cleardev;
520 np->tx_ring = (struct netdev_desc *)ring_space;
521 np->tx_ring_dma = ring_dma;
522
523 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
524 if (!ring_space)
525 goto err_out_unmap_tx;
526 np->rx_ring = (struct netdev_desc *)ring_space;
527 np->rx_ring_dma = ring_dma;
528
529 np->mii_if.dev = dev;
530 np->mii_if.mdio_read = mdio_read;
531 np->mii_if.mdio_write = mdio_write;
532 np->mii_if.phy_id_mask = 0x1f;
533 np->mii_if.reg_num_mask = 0x1f;
534
535 /* The chip-specific entries in the device structure. */
536 dev->open = &netdev_open;
537 dev->hard_start_xmit = &start_tx;
538 dev->stop = &netdev_close;
539 dev->get_stats = &get_stats;
540 dev->set_multicast_list = &set_rx_mode;
541 dev->do_ioctl = &netdev_ioctl;
542 SET_ETHTOOL_OPS(dev, &ethtool_ops);
543 dev->tx_timeout = &tx_timeout;
544 dev->watchdog_timeo = TX_TIMEOUT;
545 dev->change_mtu = &change_mtu;
546 pci_set_drvdata(pdev, dev);
547
548 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
549
550 i = register_netdev(dev);
551 if (i)
552 goto err_out_unmap_rx;
553
554 printk(KERN_INFO "%s: %s at %p, ",
555 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
556 for (i = 0; i < 5; i++)
557 printk("%2.2x:", dev->dev_addr[i]);
558 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
559
67ec2f80
JL
560 np->phys[0] = 1; /* Default setting */
561 np->mii_preamble_required++;
0d615ec2
ACM
562 /*
563 * It seems some phys doesn't deal well with address 0 being accessed
564 * first, so leave address zero to the end of the loop (32 & 31).
565 */
b06c093e 566 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
b06c093e 567 int phyx = phy & 0x1f;
0d615ec2 568 int mii_status = mdio_read(dev, phyx, MII_BMSR);
67ec2f80 569 if (mii_status != 0xffff && mii_status != 0x0000) {
b06c093e
JL
570 np->phys[phy_idx++] = phyx;
571 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
67ec2f80
JL
572 if ((mii_status & 0x0040) == 0)
573 np->mii_preamble_required++;
574 printk(KERN_INFO "%s: MII PHY found at address %d, status "
575 "0x%4.4x advertising %4.4x.\n",
b06c093e 576 dev->name, phyx, mii_status, np->mii_if.advertising);
1da177e4 577 }
67ec2f80
JL
578 }
579 np->mii_preamble_required--;
1da177e4 580
67ec2f80
JL
581 if (phy_idx == 0) {
582 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
583 dev->name, ioread32(ioaddr + ASICCtrl));
584 goto err_out_unregister;
1da177e4
LT
585 }
586
67ec2f80
JL
587 np->mii_if.phy_id = np->phys[0];
588
1da177e4
LT
589 /* Parse override configuration */
590 np->an_enable = 1;
591 if (card_idx < MAX_UNITS) {
592 if (media[card_idx] != NULL) {
593 np->an_enable = 0;
594 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
595 strcmp (media[card_idx], "4") == 0) {
596 np->speed = 100;
597 np->mii_if.full_duplex = 1;
598 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
599 || strcmp (media[card_idx], "3") == 0) {
600 np->speed = 100;
601 np->mii_if.full_duplex = 0;
602 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
603 strcmp (media[card_idx], "2") == 0) {
604 np->speed = 10;
605 np->mii_if.full_duplex = 1;
606 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
607 strcmp (media[card_idx], "1") == 0) {
608 np->speed = 10;
609 np->mii_if.full_duplex = 0;
610 } else {
611 np->an_enable = 1;
612 }
613 }
614 if (flowctrl == 1)
615 np->flowctrl = 1;
616 }
617
618 /* Fibre PHY? */
619 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
620 /* Default 100Mbps Full */
621 if (np->an_enable) {
622 np->speed = 100;
623 np->mii_if.full_duplex = 1;
624 np->an_enable = 0;
625 }
626 }
627 /* Reset PHY */
628 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
629 mdelay (300);
630 /* If flow control enabled, we need to advertise it.*/
631 if (np->flowctrl)
632 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
633 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
634 /* Force media type */
635 if (!np->an_enable) {
636 mii_ctl = 0;
637 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
638 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
639 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
640 printk (KERN_INFO "Override speed=%d, %s duplex\n",
641 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
642
643 }
644
645 /* Perhaps move the reset here? */
646 /* Reset the chip to erase previous misconfiguration. */
647 if (netif_msg_hw(np))
648 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
e714d99c 649 sundance_reset(dev, 0x00ff << 16);
1da177e4
LT
650 if (netif_msg_hw(np))
651 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
652
653 card_idx++;
654 return 0;
655
656err_out_unregister:
657 unregister_netdev(dev);
658err_out_unmap_rx:
659 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
660err_out_unmap_tx:
661 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
662err_out_cleardev:
663 pci_set_drvdata(pdev, NULL);
664 pci_iounmap(pdev, ioaddr);
665err_out_res:
666 pci_release_regions(pdev);
667err_out_netdev:
668 free_netdev (dev);
669 return -ENODEV;
670}
671
672static int change_mtu(struct net_device *dev, int new_mtu)
673{
674 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
675 return -EINVAL;
676 if (netif_running(dev))
677 return -EBUSY;
678 dev->mtu = new_mtu;
679 return 0;
680}
681
682#define eeprom_delay(ee_addr) ioread32(ee_addr)
683/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
684static int __devinit eeprom_read(void __iomem *ioaddr, int location)
685{
686 int boguscnt = 10000; /* Typical 1900 ticks. */
687 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
688 do {
689 eeprom_delay(ioaddr + EECtrl);
690 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
691 return ioread16(ioaddr + EEData);
692 }
693 } while (--boguscnt > 0);
694 return 0;
695}
696
697/* MII transceiver control section.
698 Read and write the MII registers using software-generated serial
699 MDIO protocol. See the MII specifications or DP83840A data sheet
700 for details.
701
702 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
703 met by back-to-back 33Mhz PCI cycles. */
704#define mdio_delay() ioread8(mdio_addr)
705
706enum mii_reg_bits {
707 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
708};
709#define MDIO_EnbIn (0)
710#define MDIO_WRITE0 (MDIO_EnbOutput)
711#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
712
713/* Generate the preamble required for initial synchronization and
714 a few older transceivers. */
715static void mdio_sync(void __iomem *mdio_addr)
716{
717 int bits = 32;
718
719 /* Establish sync by sending at least 32 logic ones. */
720 while (--bits >= 0) {
721 iowrite8(MDIO_WRITE1, mdio_addr);
722 mdio_delay();
723 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
724 mdio_delay();
725 }
726}
727
728static int mdio_read(struct net_device *dev, int phy_id, int location)
729{
730 struct netdev_private *np = netdev_priv(dev);
731 void __iomem *mdio_addr = np->base + MIICtrl;
732 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
733 int i, retval = 0;
734
735 if (np->mii_preamble_required)
736 mdio_sync(mdio_addr);
737
738 /* Shift the read command bits out. */
739 for (i = 15; i >= 0; i--) {
740 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
741
742 iowrite8(dataval, mdio_addr);
743 mdio_delay();
744 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
745 mdio_delay();
746 }
747 /* Read the two transition, 16 data, and wire-idle bits. */
748 for (i = 19; i > 0; i--) {
749 iowrite8(MDIO_EnbIn, mdio_addr);
750 mdio_delay();
751 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
752 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
753 mdio_delay();
754 }
755 return (retval>>1) & 0xffff;
756}
757
758static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
759{
760 struct netdev_private *np = netdev_priv(dev);
761 void __iomem *mdio_addr = np->base + MIICtrl;
762 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
763 int i;
764
765 if (np->mii_preamble_required)
766 mdio_sync(mdio_addr);
767
768 /* Shift the command bits out. */
769 for (i = 31; i >= 0; i--) {
770 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
771
772 iowrite8(dataval, mdio_addr);
773 mdio_delay();
774 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
775 mdio_delay();
776 }
777 /* Clear out extra bits. */
778 for (i = 2; i > 0; i--) {
779 iowrite8(MDIO_EnbIn, mdio_addr);
780 mdio_delay();
781 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
782 mdio_delay();
783 }
784 return;
785}
786
787static int netdev_open(struct net_device *dev)
788{
789 struct netdev_private *np = netdev_priv(dev);
790 void __iomem *ioaddr = np->base;
acd70c2b 791 unsigned long flags;
1da177e4
LT
792 int i;
793
794 /* Do we need to reset the chip??? */
795
1fb9df5d 796 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
1da177e4
LT
797 if (i)
798 return i;
799
800 if (netif_msg_ifup(np))
801 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
802 dev->name, dev->irq);
803 init_ring(dev);
804
805 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
806 /* The Tx list pointer is written as packets are queued. */
807
808 /* Initialize other registers. */
809 __set_mac_addr(dev);
810#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
811 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
812#else
813 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
814#endif
815 if (dev->mtu > 2047)
816 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
817
818 /* Configure the PCI bus bursts and FIFO thresholds. */
819
820 if (dev->if_port == 0)
821 dev->if_port = np->default_port;
822
823 spin_lock_init(&np->mcastlock);
824
825 set_rx_mode(dev);
826 iowrite16(0, ioaddr + IntrEnable);
827 iowrite16(0, ioaddr + DownCounter);
828 /* Set the chip to poll every N*320nsec. */
829 iowrite8(100, ioaddr + RxDMAPollPeriod);
830 iowrite8(127, ioaddr + TxDMAPollPeriod);
831 /* Fix DFE-580TX packet drop issue */
832 if (np->pci_rev_id >= 0x14)
833 iowrite8(0x01, ioaddr + DebugCtrl1);
834 netif_start_queue(dev);
835
acd70c2b
JH
836 spin_lock_irqsave(&np->lock, flags);
837 reset_tx(dev);
838 spin_unlock_irqrestore(&np->lock, flags);
839
1da177e4
LT
840 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
841
842 if (netif_msg_ifup(np))
843 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
844 "MAC Control %x, %4.4x %4.4x.\n",
845 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
846 ioread32(ioaddr + MACCtrl0),
847 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
848
849 /* Set the timer to check for link beat. */
850 init_timer(&np->timer);
851 np->timer.expires = jiffies + 3*HZ;
852 np->timer.data = (unsigned long)dev;
853 np->timer.function = &netdev_timer; /* timer handler */
854 add_timer(&np->timer);
855
856 /* Enable interrupts by setting the interrupt mask. */
857 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
858
859 return 0;
860}
861
862static void check_duplex(struct net_device *dev)
863{
864 struct netdev_private *np = netdev_priv(dev);
865 void __iomem *ioaddr = np->base;
866 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
867 int negotiated = mii_lpa & np->mii_if.advertising;
868 int duplex;
869
870 /* Force media */
871 if (!np->an_enable || mii_lpa == 0xffff) {
872 if (np->mii_if.full_duplex)
873 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
874 ioaddr + MACCtrl0);
875 return;
876 }
877
878 /* Autonegotiation */
879 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
880 if (np->mii_if.full_duplex != duplex) {
881 np->mii_if.full_duplex = duplex;
882 if (netif_msg_link(np))
883 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
884 "negotiated capability %4.4x.\n", dev->name,
885 duplex ? "full" : "half", np->phys[0], negotiated);
886 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
887 }
888}
889
890static void netdev_timer(unsigned long data)
891{
892 struct net_device *dev = (struct net_device *)data;
893 struct netdev_private *np = netdev_priv(dev);
894 void __iomem *ioaddr = np->base;
895 int next_tick = 10*HZ;
896
897 if (netif_msg_timer(np)) {
898 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
899 "Tx %x Rx %x.\n",
900 dev->name, ioread16(ioaddr + IntrEnable),
901 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
902 }
903 check_duplex(dev);
904 np->timer.expires = jiffies + next_tick;
905 add_timer(&np->timer);
906}
907
908static void tx_timeout(struct net_device *dev)
909{
910 struct netdev_private *np = netdev_priv(dev);
911 void __iomem *ioaddr = np->base;
912 unsigned long flag;
6aa20a22 913
1da177e4
LT
914 netif_stop_queue(dev);
915 tasklet_disable(&np->tx_tasklet);
916 iowrite16(0, ioaddr + IntrEnable);
917 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
918 "TxFrameId %2.2x,"
919 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
920 ioread8(ioaddr + TxFrameId));
921
922 {
923 int i;
924 for (i=0; i<TX_RING_SIZE; i++) {
925 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
926 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
927 le32_to_cpu(np->tx_ring[i].next_desc),
928 le32_to_cpu(np->tx_ring[i].status),
929 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
6aa20a22 930 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1da177e4
LT
931 le32_to_cpu(np->tx_ring[i].frag[0].length));
932 }
6aa20a22
JG
933 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
934 ioread32(np->base + TxListPtr),
1da177e4 935 netif_queue_stopped(dev));
6aa20a22 936 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1da177e4
LT
937 np->cur_tx, np->cur_tx % TX_RING_SIZE,
938 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
939 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
940 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
941 }
942 spin_lock_irqsave(&np->lock, flag);
943
944 /* Stop and restart the chip's Tx processes . */
945 reset_tx(dev);
946 spin_unlock_irqrestore(&np->lock, flag);
947
948 dev->if_port = 0;
949
950 dev->trans_start = jiffies;
951 np->stats.tx_errors++;
952 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
953 netif_wake_queue(dev);
954 }
955 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
956 tasklet_enable(&np->tx_tasklet);
957}
958
959
960/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
961static void init_ring(struct net_device *dev)
962{
963 struct netdev_private *np = netdev_priv(dev);
964 int i;
965
966 np->cur_rx = np->cur_tx = 0;
967 np->dirty_rx = np->dirty_tx = 0;
968 np->cur_task = 0;
969
970 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
971
972 /* Initialize all Rx descriptors. */
973 for (i = 0; i < RX_RING_SIZE; i++) {
974 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
975 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
976 np->rx_ring[i].status = 0;
977 np->rx_ring[i].frag[0].length = 0;
978 np->rx_skbuff[i] = NULL;
979 }
980
981 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
982 for (i = 0; i < RX_RING_SIZE; i++) {
983 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
984 np->rx_skbuff[i] = skb;
985 if (skb == NULL)
986 break;
987 skb->dev = dev; /* Mark as being used by this device. */
988 skb_reserve(skb, 2); /* 16 byte align the IP header. */
989 np->rx_ring[i].frag[0].addr = cpu_to_le32(
689be439 990 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1da177e4
LT
991 PCI_DMA_FROMDEVICE));
992 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
993 }
994 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
995
996 for (i = 0; i < TX_RING_SIZE; i++) {
997 np->tx_skbuff[i] = NULL;
998 np->tx_ring[i].status = 0;
999 }
1000 return;
1001}
1002
1003static void tx_poll (unsigned long data)
1004{
1005 struct net_device *dev = (struct net_device *)data;
1006 struct netdev_private *np = netdev_priv(dev);
1007 unsigned head = np->cur_task % TX_RING_SIZE;
6aa20a22 1008 struct netdev_desc *txdesc =
1da177e4 1009 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
6aa20a22 1010
1da177e4
LT
1011 /* Chain the next pointer */
1012 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1013 int entry = np->cur_task % TX_RING_SIZE;
1014 txdesc = &np->tx_ring[entry];
1015 if (np->last_tx) {
1016 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1017 entry*sizeof(struct netdev_desc));
1018 }
1019 np->last_tx = txdesc;
1020 }
1021 /* Indicate the latest descriptor of tx ring */
1022 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1023
1024 if (ioread32 (np->base + TxListPtr) == 0)
1025 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1026 np->base + TxListPtr);
1027 return;
1028}
1029
1030static int
1031start_tx (struct sk_buff *skb, struct net_device *dev)
1032{
1033 struct netdev_private *np = netdev_priv(dev);
1034 struct netdev_desc *txdesc;
1035 unsigned entry;
1036
1037 /* Calculate the next Tx descriptor entry. */
1038 entry = np->cur_tx % TX_RING_SIZE;
1039 np->tx_skbuff[entry] = skb;
1040 txdesc = &np->tx_ring[entry];
1041
1042 txdesc->next_desc = 0;
1043 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1044 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1045 skb->len,
1046 PCI_DMA_TODEVICE));
1047 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1048
1049 /* Increment cur_tx before tasklet_schedule() */
1050 np->cur_tx++;
1051 mb();
1052 /* Schedule a tx_poll() task */
1053 tasklet_schedule(&np->tx_tasklet);
1054
1055 /* On some architectures: explicitly flush cache lines here. */
1056 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1057 && !netif_queue_stopped(dev)) {
1058 /* do nothing */
1059 } else {
1060 netif_stop_queue (dev);
1061 }
1062 dev->trans_start = jiffies;
1063 if (netif_msg_tx_queued(np)) {
1064 printk (KERN_DEBUG
1065 "%s: Transmit frame #%d queued in slot %d.\n",
1066 dev->name, np->cur_tx, entry);
1067 }
1068 return 0;
1069}
1070
1071/* Reset hardware tx and free all of tx buffers */
1072static int
1073reset_tx (struct net_device *dev)
1074{
1075 struct netdev_private *np = netdev_priv(dev);
1076 void __iomem *ioaddr = np->base;
1077 struct sk_buff *skb;
1078 int i;
1079 int irq = in_interrupt();
6aa20a22 1080
1da177e4
LT
1081 /* Reset tx logic, TxListPtr will be cleaned */
1082 iowrite16 (TxDisable, ioaddr + MACCtrl1);
e714d99c
PDM
1083 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1084
1da177e4
LT
1085 /* free all tx skbuff */
1086 for (i = 0; i < TX_RING_SIZE; i++) {
2109f89f
JH
1087 np->tx_ring[i].next_desc = 0;
1088
1da177e4
LT
1089 skb = np->tx_skbuff[i];
1090 if (skb) {
6aa20a22 1091 pci_unmap_single(np->pci_dev,
1da177e4
LT
1092 np->tx_ring[i].frag[0].addr, skb->len,
1093 PCI_DMA_TODEVICE);
1094 if (irq)
1095 dev_kfree_skb_irq (skb);
1096 else
1097 dev_kfree_skb (skb);
1098 np->tx_skbuff[i] = NULL;
1099 np->stats.tx_dropped++;
1100 }
1101 }
1102 np->cur_tx = np->dirty_tx = 0;
1103 np->cur_task = 0;
2109f89f
JH
1104
1105 np->last_tx = 0;
1106 iowrite8(127, ioaddr + TxDMAPollPeriod);
1107
1da177e4
LT
1108 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1109 return 0;
1110}
1111
6aa20a22 1112/* The interrupt handler cleans up after the Tx thread,
1da177e4 1113 and schedule a Rx thread work */
7d12e780 1114static irqreturn_t intr_handler(int irq, void *dev_instance)
1da177e4
LT
1115{
1116 struct net_device *dev = (struct net_device *)dev_instance;
1117 struct netdev_private *np = netdev_priv(dev);
1118 void __iomem *ioaddr = np->base;
1119 int hw_frame_id;
1120 int tx_cnt;
1121 int tx_status;
1122 int handled = 0;
e242040d 1123 int i;
1da177e4
LT
1124
1125
1126 do {
1127 int intr_status = ioread16(ioaddr + IntrStatus);
1128 iowrite16(intr_status, ioaddr + IntrStatus);
1129
1130 if (netif_msg_intr(np))
1131 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1132 dev->name, intr_status);
1133
1134 if (!(intr_status & DEFAULT_INTR))
1135 break;
1136
1137 handled = 1;
1138
1139 if (intr_status & (IntrRxDMADone)) {
1140 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1141 ioaddr + IntrEnable);
1142 if (np->budget < 0)
1143 np->budget = RX_BUDGET;
1144 tasklet_schedule(&np->rx_tasklet);
1145 }
1146 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1147 tx_status = ioread16 (ioaddr + TxStatus);
1148 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1149 if (netif_msg_tx_done(np))
1150 printk
1151 ("%s: Transmit status is %2.2x.\n",
1152 dev->name, tx_status);
1153 if (tx_status & 0x1e) {
b71b95ef
PDM
1154 if (netif_msg_tx_err(np))
1155 printk("%s: Transmit error status %4.4x.\n",
1156 dev->name, tx_status);
1da177e4
LT
1157 np->stats.tx_errors++;
1158 if (tx_status & 0x10)
1159 np->stats.tx_fifo_errors++;
1160 if (tx_status & 0x08)
1161 np->stats.collisions++;
b71b95ef
PDM
1162 if (tx_status & 0x04)
1163 np->stats.tx_fifo_errors++;
1da177e4
LT
1164 if (tx_status & 0x02)
1165 np->stats.tx_window_errors++;
e242040d 1166
b71b95ef
PDM
1167 /*
1168 ** This reset has been verified on
1169 ** DFE-580TX boards ! phdm@macqel.be.
1170 */
1171 if (tx_status & 0x10) { /* TxUnderrun */
b71b95ef
PDM
1172 /* Restart Tx FIFO and transmitter */
1173 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
b71b95ef 1174 /* No need to reset the Tx pointer here */
1da177e4 1175 }
2109f89f
JH
1176 /* Restart the Tx. Need to make sure tx enabled */
1177 i = 10;
1178 do {
1179 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1180 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1181 break;
1182 mdelay(1);
1183 } while (--i);
1da177e4
LT
1184 }
1185 /* Yup, this is a documentation bug. It cost me *hours*. */
1186 iowrite16 (0, ioaddr + TxStatus);
1187 if (tx_cnt < 0) {
1188 iowrite32(5000, ioaddr + DownCounter);
1189 break;
1190 }
1191 tx_status = ioread16 (ioaddr + TxStatus);
1192 }
1193 hw_frame_id = (tx_status >> 8) & 0xff;
1194 } else {
1195 hw_frame_id = ioread8(ioaddr + TxFrameId);
1196 }
6aa20a22
JG
1197
1198 if (np->pci_rev_id >= 0x14) {
1da177e4
LT
1199 spin_lock(&np->lock);
1200 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1201 int entry = np->dirty_tx % TX_RING_SIZE;
1202 struct sk_buff *skb;
1203 int sw_frame_id;
1204 sw_frame_id = (le32_to_cpu(
1205 np->tx_ring[entry].status) >> 2) & 0xff;
1206 if (sw_frame_id == hw_frame_id &&
1207 !(le32_to_cpu(np->tx_ring[entry].status)
1208 & 0x00010000))
1209 break;
6aa20a22 1210 if (sw_frame_id == (hw_frame_id + 1) %
1da177e4
LT
1211 TX_RING_SIZE)
1212 break;
1213 skb = np->tx_skbuff[entry];
1214 /* Free the original skb. */
1215 pci_unmap_single(np->pci_dev,
1216 np->tx_ring[entry].frag[0].addr,
1217 skb->len, PCI_DMA_TODEVICE);
1218 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1219 np->tx_skbuff[entry] = NULL;
1220 np->tx_ring[entry].frag[0].addr = 0;
1221 np->tx_ring[entry].frag[0].length = 0;
1222 }
1223 spin_unlock(&np->lock);
1224 } else {
1225 spin_lock(&np->lock);
1226 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1227 int entry = np->dirty_tx % TX_RING_SIZE;
1228 struct sk_buff *skb;
6aa20a22 1229 if (!(le32_to_cpu(np->tx_ring[entry].status)
1da177e4
LT
1230 & 0x00010000))
1231 break;
1232 skb = np->tx_skbuff[entry];
1233 /* Free the original skb. */
1234 pci_unmap_single(np->pci_dev,
1235 np->tx_ring[entry].frag[0].addr,
1236 skb->len, PCI_DMA_TODEVICE);
1237 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1238 np->tx_skbuff[entry] = NULL;
1239 np->tx_ring[entry].frag[0].addr = 0;
1240 np->tx_ring[entry].frag[0].length = 0;
1241 }
1242 spin_unlock(&np->lock);
1243 }
6aa20a22 1244
1da177e4
LT
1245 if (netif_queue_stopped(dev) &&
1246 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1247 /* The ring is no longer full, clear busy flag. */
1248 netif_wake_queue (dev);
1249 }
1250 /* Abnormal error summary/uncommon events handlers. */
1251 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1252 netdev_error(dev, intr_status);
1253 } while (0);
1254 if (netif_msg_intr(np))
1255 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1256 dev->name, ioread16(ioaddr + IntrStatus));
1257 return IRQ_RETVAL(handled);
1258}
1259
1260static void rx_poll(unsigned long data)
1261{
1262 struct net_device *dev = (struct net_device *)data;
1263 struct netdev_private *np = netdev_priv(dev);
1264 int entry = np->cur_rx % RX_RING_SIZE;
1265 int boguscnt = np->budget;
1266 void __iomem *ioaddr = np->base;
1267 int received = 0;
1268
1269 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1270 while (1) {
1271 struct netdev_desc *desc = &(np->rx_ring[entry]);
1272 u32 frame_status = le32_to_cpu(desc->status);
1273 int pkt_len;
1274
1275 if (--boguscnt < 0) {
1276 goto not_done;
1277 }
1278 if (!(frame_status & DescOwn))
1279 break;
1280 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1281 if (netif_msg_rx_status(np))
1282 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1283 frame_status);
1284 if (frame_status & 0x001f4000) {
1285 /* There was a error. */
1286 if (netif_msg_rx_err(np))
1287 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1288 frame_status);
1289 np->stats.rx_errors++;
1290 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1291 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1292 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1293 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1294 if (frame_status & 0x00100000) {
1295 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1296 " status %8.8x.\n",
1297 dev->name, frame_status);
1298 }
1299 } else {
1300 struct sk_buff *skb;
1301#ifndef final_version
1302 if (netif_msg_rx_status(np))
1303 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1304 ", bogus_cnt %d.\n",
1305 pkt_len, boguscnt);
1306#endif
1307 /* Check if the packet is long enough to accept without copying
1308 to a minimally-sized skbuff. */
1309 if (pkt_len < rx_copybreak
1310 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1311 skb->dev = dev;
1312 skb_reserve(skb, 2); /* 16 byte align the IP header */
1313 pci_dma_sync_single_for_cpu(np->pci_dev,
1314 desc->frag[0].addr,
1315 np->rx_buf_sz,
1316 PCI_DMA_FROMDEVICE);
1317
689be439 1318 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1da177e4
LT
1319 pci_dma_sync_single_for_device(np->pci_dev,
1320 desc->frag[0].addr,
1321 np->rx_buf_sz,
1322 PCI_DMA_FROMDEVICE);
1323 skb_put(skb, pkt_len);
1324 } else {
1325 pci_unmap_single(np->pci_dev,
1326 desc->frag[0].addr,
1327 np->rx_buf_sz,
1328 PCI_DMA_FROMDEVICE);
1329 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1330 np->rx_skbuff[entry] = NULL;
1331 }
1332 skb->protocol = eth_type_trans(skb, dev);
1333 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1334 netif_rx(skb);
1335 dev->last_rx = jiffies;
1336 }
1337 entry = (entry + 1) % RX_RING_SIZE;
1338 received++;
1339 }
1340 np->cur_rx = entry;
1341 refill_rx (dev);
1342 np->budget -= received;
1343 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1344 return;
1345
1346not_done:
1347 np->cur_rx = entry;
1348 refill_rx (dev);
1349 if (!received)
1350 received = 1;
1351 np->budget -= received;
1352 if (np->budget <= 0)
1353 np->budget = RX_BUDGET;
1354 tasklet_schedule(&np->rx_tasklet);
1355 return;
1356}
1357
1358static void refill_rx (struct net_device *dev)
1359{
1360 struct netdev_private *np = netdev_priv(dev);
1361 int entry;
1362 int cnt = 0;
1363
1364 /* Refill the Rx ring buffers. */
1365 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1366 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1367 struct sk_buff *skb;
1368 entry = np->dirty_rx % RX_RING_SIZE;
1369 if (np->rx_skbuff[entry] == NULL) {
1370 skb = dev_alloc_skb(np->rx_buf_sz);
1371 np->rx_skbuff[entry] = skb;
1372 if (skb == NULL)
1373 break; /* Better luck next round. */
1374 skb->dev = dev; /* Mark as being used by this device. */
1375 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1376 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
689be439 1377 pci_map_single(np->pci_dev, skb->data,
1da177e4
LT
1378 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1379 }
1380 /* Perhaps we need not reset this field. */
1381 np->rx_ring[entry].frag[0].length =
1382 cpu_to_le32(np->rx_buf_sz | LastFrag);
1383 np->rx_ring[entry].status = 0;
1384 cnt++;
1385 }
1386 return;
1387}
1388static void netdev_error(struct net_device *dev, int intr_status)
1389{
1390 struct netdev_private *np = netdev_priv(dev);
1391 void __iomem *ioaddr = np->base;
1392 u16 mii_ctl, mii_advertise, mii_lpa;
1393 int speed;
1394
1395 if (intr_status & LinkChange) {
1396 if (np->an_enable) {
1397 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1398 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1399 mii_advertise &= mii_lpa;
1400 printk (KERN_INFO "%s: Link changed: ", dev->name);
1401 if (mii_advertise & ADVERTISE_100FULL) {
1402 np->speed = 100;
1403 printk ("100Mbps, full duplex\n");
1404 } else if (mii_advertise & ADVERTISE_100HALF) {
1405 np->speed = 100;
1406 printk ("100Mbps, half duplex\n");
1407 } else if (mii_advertise & ADVERTISE_10FULL) {
1408 np->speed = 10;
1409 printk ("10Mbps, full duplex\n");
1410 } else if (mii_advertise & ADVERTISE_10HALF) {
1411 np->speed = 10;
1412 printk ("10Mbps, half duplex\n");
1413 } else
1414 printk ("\n");
1415
1416 } else {
1417 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1418 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1419 np->speed = speed;
1420 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1421 dev->name, speed);
1422 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1423 "full" : "half");
1424 }
1425 check_duplex (dev);
1426 if (np->flowctrl && np->mii_if.full_duplex) {
1427 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1428 ioaddr + MulticastFilter1+2);
1429 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1430 ioaddr + MACCtrl0);
1431 }
1432 }
1433 if (intr_status & StatsMax) {
1434 get_stats(dev);
1435 }
1436 if (intr_status & IntrPCIErr) {
1437 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1438 dev->name, intr_status);
1439 /* We must do a global reset of DMA to continue. */
1440 }
1441}
1442
1443static struct net_device_stats *get_stats(struct net_device *dev)
1444{
1445 struct netdev_private *np = netdev_priv(dev);
1446 void __iomem *ioaddr = np->base;
1447 int i;
1448
1449 /* We should lock this segment of code for SMP eventually, although
1450 the vulnerability window is very small and statistics are
1451 non-critical. */
1452 /* The chip only need report frame silently dropped. */
1453 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1454 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1455 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1456 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1457 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1458 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1459 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1460 ioread8(ioaddr + StatsTxDefer);
1461 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1462 ioread8(ioaddr + i);
1463 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1464 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1465 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1466 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1467
1468 return &np->stats;
1469}
1470
1471static void set_rx_mode(struct net_device *dev)
1472{
1473 struct netdev_private *np = netdev_priv(dev);
1474 void __iomem *ioaddr = np->base;
1475 u16 mc_filter[4]; /* Multicast hash filter */
1476 u32 rx_mode;
1477 int i;
1478
1479 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1da177e4
LT
1480 memset(mc_filter, 0xff, sizeof(mc_filter));
1481 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1482 } else if ((dev->mc_count > multicast_filter_limit)
1483 || (dev->flags & IFF_ALLMULTI)) {
1484 /* Too many to match, or accept all multicasts. */
1485 memset(mc_filter, 0xff, sizeof(mc_filter));
1486 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1487 } else if (dev->mc_count) {
1488 struct dev_mc_list *mclist;
1489 int bit;
1490 int index;
1491 int crc;
1492 memset (mc_filter, 0, sizeof (mc_filter));
1493 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1494 i++, mclist = mclist->next) {
1495 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1496 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1497 if (crc & 0x80000000) index |= 1 << bit;
1498 mc_filter[index/16] |= (1 << (index % 16));
1499 }
1500 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1501 } else {
1502 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1503 return;
1504 }
1505 if (np->mii_if.full_duplex && np->flowctrl)
1506 mc_filter[3] |= 0x0200;
1507
1508 for (i = 0; i < 4; i++)
1509 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1510 iowrite8(rx_mode, ioaddr + RxMode);
1511}
1512
1513static int __set_mac_addr(struct net_device *dev)
1514{
1515 struct netdev_private *np = netdev_priv(dev);
1516 u16 addr16;
1517
1518 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1519 iowrite16(addr16, np->base + StationAddr);
1520 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1521 iowrite16(addr16, np->base + StationAddr+2);
1522 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1523 iowrite16(addr16, np->base + StationAddr+4);
1524 return 0;
1525}
1526
1527static int check_if_running(struct net_device *dev)
1528{
1529 if (!netif_running(dev))
1530 return -EINVAL;
1531 return 0;
1532}
1533
1534static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1535{
1536 struct netdev_private *np = netdev_priv(dev);
1537 strcpy(info->driver, DRV_NAME);
1538 strcpy(info->version, DRV_VERSION);
1539 strcpy(info->bus_info, pci_name(np->pci_dev));
1540}
1541
1542static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1543{
1544 struct netdev_private *np = netdev_priv(dev);
1545 spin_lock_irq(&np->lock);
1546 mii_ethtool_gset(&np->mii_if, ecmd);
1547 spin_unlock_irq(&np->lock);
1548 return 0;
1549}
1550
1551static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1552{
1553 struct netdev_private *np = netdev_priv(dev);
1554 int res;
1555 spin_lock_irq(&np->lock);
1556 res = mii_ethtool_sset(&np->mii_if, ecmd);
1557 spin_unlock_irq(&np->lock);
1558 return res;
1559}
1560
1561static int nway_reset(struct net_device *dev)
1562{
1563 struct netdev_private *np = netdev_priv(dev);
1564 return mii_nway_restart(&np->mii_if);
1565}
1566
1567static u32 get_link(struct net_device *dev)
1568{
1569 struct netdev_private *np = netdev_priv(dev);
1570 return mii_link_ok(&np->mii_if);
1571}
1572
1573static u32 get_msglevel(struct net_device *dev)
1574{
1575 struct netdev_private *np = netdev_priv(dev);
1576 return np->msg_enable;
1577}
1578
1579static void set_msglevel(struct net_device *dev, u32 val)
1580{
1581 struct netdev_private *np = netdev_priv(dev);
1582 np->msg_enable = val;
1583}
1584
7282d491 1585static const struct ethtool_ops ethtool_ops = {
1da177e4
LT
1586 .begin = check_if_running,
1587 .get_drvinfo = get_drvinfo,
1588 .get_settings = get_settings,
1589 .set_settings = set_settings,
1590 .nway_reset = nway_reset,
1591 .get_link = get_link,
1592 .get_msglevel = get_msglevel,
1593 .set_msglevel = set_msglevel,
30d60a82 1594 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
1595};
1596
1597static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1598{
1599 struct netdev_private *np = netdev_priv(dev);
1600 void __iomem *ioaddr = np->base;
1601 int rc;
1602 int i;
1603
1604 if (!netif_running(dev))
1605 return -EINVAL;
1606
1607 spin_lock_irq(&np->lock);
1608 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1609 spin_unlock_irq(&np->lock);
1610 switch (cmd) {
1611 case SIOCDEVPRIVATE:
1612 for (i=0; i<TX_RING_SIZE; i++) {
1613 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
6aa20a22 1614 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1da177e4
LT
1615 le32_to_cpu(np->tx_ring[i].next_desc),
1616 le32_to_cpu(np->tx_ring[i].status),
6aa20a22 1617 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1da177e4 1618 & 0xff,
6aa20a22 1619 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1da177e4
LT
1620 le32_to_cpu(np->tx_ring[i].frag[0].length));
1621 }
6aa20a22
JG
1622 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1623 ioread32(np->base + TxListPtr),
1da177e4 1624 netif_queue_stopped(dev));
6aa20a22 1625 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1da177e4
LT
1626 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1627 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1628 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1629 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1630 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1631 return 0;
1632 }
6aa20a22 1633
1da177e4
LT
1634
1635 return rc;
1636}
1637
1638static int netdev_close(struct net_device *dev)
1639{
1640 struct netdev_private *np = netdev_priv(dev);
1641 void __iomem *ioaddr = np->base;
1642 struct sk_buff *skb;
1643 int i;
1644
31f817e9
JH
1645 /* Wait and kill tasklet */
1646 tasklet_kill(&np->rx_tasklet);
1647 tasklet_kill(&np->tx_tasklet);
1648 np->cur_tx = 0;
1649 np->dirty_tx = 0;
1650 np->cur_task = 0;
1651 np->last_tx = 0;
1652
1da177e4
LT
1653 netif_stop_queue(dev);
1654
1655 if (netif_msg_ifdown(np)) {
1656 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1657 "Rx %4.4x Int %2.2x.\n",
1658 dev->name, ioread8(ioaddr + TxStatus),
1659 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1660 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1661 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1662 }
1663
1664 /* Disable interrupts by clearing the interrupt mask. */
1665 iowrite16(0x0000, ioaddr + IntrEnable);
1666
acd70c2b
JH
1667 /* Disable Rx and Tx DMA for safely release resource */
1668 iowrite32(0x500, ioaddr + DMACtrl);
1669
1da177e4
LT
1670 /* Stop the chip's Tx and Rx processes. */
1671 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1672
31f817e9
JH
1673 for (i = 2000; i > 0; i--) {
1674 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1675 break;
1676 mdelay(1);
1677 }
1678
1679 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1680 ioaddr +ASICCtrl + 2);
1681
1682 for (i = 2000; i > 0; i--) {
1683 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1684 break;
1685 mdelay(1);
1686 }
1da177e4
LT
1687
1688#ifdef __i386__
1689 if (netif_msg_hw(np)) {
1690 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1691 (int)(np->tx_ring_dma));
1692 for (i = 0; i < TX_RING_SIZE; i++)
1693 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1694 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1695 np->tx_ring[i].frag[0].length);
1696 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1697 (int)(np->rx_ring_dma));
1698 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1699 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1700 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1701 np->rx_ring[i].frag[0].length);
1702 }
1703 }
1704#endif /* __i386__ debugging only */
1705
1706 free_irq(dev->irq, dev);
1707
1708 del_timer_sync(&np->timer);
1709
1710 /* Free all the skbuffs in the Rx queue. */
1711 for (i = 0; i < RX_RING_SIZE; i++) {
1712 np->rx_ring[i].status = 0;
1713 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1714 skb = np->rx_skbuff[i];
1715 if (skb) {
1716 pci_unmap_single(np->pci_dev,
1717 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1718 PCI_DMA_FROMDEVICE);
1719 dev_kfree_skb(skb);
1720 np->rx_skbuff[i] = NULL;
1721 }
1722 }
1723 for (i = 0; i < TX_RING_SIZE; i++) {
31f817e9 1724 np->tx_ring[i].next_desc = 0;
1da177e4
LT
1725 skb = np->tx_skbuff[i];
1726 if (skb) {
1727 pci_unmap_single(np->pci_dev,
1728 np->tx_ring[i].frag[0].addr, skb->len,
1729 PCI_DMA_TODEVICE);
1730 dev_kfree_skb(skb);
1731 np->tx_skbuff[i] = NULL;
1732 }
1733 }
1734
1735 return 0;
1736}
1737
1738static void __devexit sundance_remove1 (struct pci_dev *pdev)
1739{
1740 struct net_device *dev = pci_get_drvdata(pdev);
1741
1742 if (dev) {
1743 struct netdev_private *np = netdev_priv(dev);
1744
1745 unregister_netdev(dev);
1746 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1747 np->rx_ring_dma);
1748 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1749 np->tx_ring_dma);
1750 pci_iounmap(pdev, np->base);
1751 pci_release_regions(pdev);
1752 free_netdev(dev);
1753 pci_set_drvdata(pdev, NULL);
1754 }
1755}
1756
1757static struct pci_driver sundance_driver = {
1758 .name = DRV_NAME,
1759 .id_table = sundance_pci_tbl,
1760 .probe = sundance_probe1,
1761 .remove = __devexit_p(sundance_remove1),
1762};
1763
1764static int __init sundance_init(void)
1765{
1766/* when a module, this is printed whether or not devices are found in probe */
1767#ifdef MODULE
1768 printk(version);
1769#endif
29917620 1770 return pci_register_driver(&sundance_driver);
1da177e4
LT
1771}
1772
1773static void __exit sundance_exit(void)
1774{
1775 pci_unregister_driver(&sundance_driver);
1776}
1777
1778module_init(sundance_init);
1779module_exit(sundance_exit);
1780
1781
This page took 0.260585 seconds and 5 git commands to generate.