pch_gbe: fix if condition in set_settings()
[deliverable/linux.git] / drivers / net / sundance.c
CommitLineData
1da177e4
LT
1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2/*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
03a8c661 19 [link no longer provides useful info -jgarzik]
e714d99c
PDM
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
1da177e4 22
1da177e4
LT
23*/
24
25#define DRV_NAME "sundance"
d5b20697
AG
26#define DRV_VERSION "1.2"
27#define DRV_RELDATE "11-Sep-2006"
1da177e4
LT
28
29
30/* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
f71e1309 35static const int multicast_filter_limit = 32;
1da177e4
LT
36
37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
40 need a copy-align. */
41static int rx_copybreak;
42static int flowctrl=1;
43
44/* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
51 1 10Mbps half duplex.
52 2 10Mbps full duplex.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
55*/
56#define MAX_UNITS 8
57static char *media[MAX_UNITS];
58
59
60/* Operational parameters that are set at compile time. */
61
62/* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
66 Tx error recovery.
67 Large receive rings merely waste memory. */
68#define TX_RING_SIZE 32
69#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70#define RX_RING_SIZE 64
71#define RX_BUDGET 32
72#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
74
75/* Operational parameters that usually are not changed. */
76/* Time in jiffies before concluding the transmitter is hung. */
77#define TX_TIMEOUT (4*HZ)
78#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79
80/* Include files, designed to support most kernel versions 2.0.0 and later. */
81#include <linux/module.h>
82#include <linux/kernel.h>
83#include <linux/string.h>
84#include <linux/timer.h>
85#include <linux/errno.h>
86#include <linux/ioport.h>
1da177e4
LT
87#include <linux/interrupt.h>
88#include <linux/pci.h>
89#include <linux/netdevice.h>
90#include <linux/etherdevice.h>
91#include <linux/skbuff.h>
92#include <linux/init.h>
93#include <linux/bitops.h>
94#include <asm/uaccess.h>
95#include <asm/processor.h> /* Processor type for cache alignment. */
96#include <asm/io.h>
97#include <linux/delay.h>
98#include <linux/spinlock.h>
0c8a745f 99#include <linux/dma-mapping.h>
1da177e4
LT
100#include <linux/crc32.h>
101#include <linux/ethtool.h>
102#include <linux/mii.h>
1da177e4
LT
103
104/* These identify the driver base version and may not be removed. */
3af0fe39
SH
105static const char version[] __devinitconst =
106 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
107 " Written by Donald Becker\n";
1da177e4
LT
108
109MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
110MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
111MODULE_LICENSE("GPL");
112
113module_param(debug, int, 0);
114module_param(rx_copybreak, int, 0);
115module_param_array(media, charp, NULL, 0);
116module_param(flowctrl, int, 0);
117MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
118MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
119MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
120
121/*
122 Theory of Operation
123
124I. Board Compatibility
125
126This driver is designed for the Sundance Technologies "Alta" ST201 chip.
127
128II. Board-specific settings
129
130III. Driver operation
131
132IIIa. Ring buffers
133
134This driver uses two statically allocated fixed-size descriptor lists
135formed into rings by a branch from the final descriptor to the beginning of
136the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
137Some chips explicitly use only 2^N sized rings, while others use a
138'next descriptor' pointer that the driver forms into rings.
139
140IIIb/c. Transmit/Receive Structure
141
142This driver uses a zero-copy receive and transmit scheme.
143The driver allocates full frame size skbuffs for the Rx ring buffers at
144open() time and passes the skb->data field to the chip as receive data
145buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
146a fresh skbuff is allocated and the frame is copied to the new skbuff.
147When the incoming frame is larger, the skbuff is passed directly up the
148protocol stack. Buffers consumed this way are replaced by newly allocated
149skbuffs in a later phase of receives.
150
151The RX_COPYBREAK value is chosen to trade-off the memory wasted by
152using a full-sized skbuff for small frames vs. the copying costs of larger
153frames. New boards are typically used in generously configured machines
154and the underfilled buffers have negligible impact compared to the benefit of
155a single allocation size, so the default value of zero results in never
156copying packets. When copying is done, the cost is usually mitigated by using
157a combined copy/checksum routine. Copying also preloads the cache, which is
158most useful with small frames.
159
160A subtle aspect of the operation is that the IP header at offset 14 in an
161ethernet frame isn't longword aligned for further processing.
162Unaligned buffers are permitted by the Sundance hardware, so
163frames are received into the skbuff at an offset of "+2", 16-byte aligning
164the IP header.
165
166IIId. Synchronization
167
168The driver runs as two independent, single-threaded flows of control. One
169is the send-packet routine, which enforces single-threaded use by the
170dev->tbusy flag. The other thread is the interrupt handler, which is single
171threaded by the hardware and interrupt handling software.
172
173The send packet thread has partial control over the Tx ring and 'dev->tbusy'
174flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
175queue slot is empty, it clears the tbusy flag when finished otherwise it sets
176the 'lp->tx_full' flag.
177
178The interrupt handler has exclusive control over the Rx ring and records stats
179from the Tx ring. After reaping the stats, it marks the Tx queue entry as
180empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
181clears both the tx_full and tbusy flags.
182
183IV. Notes
184
185IVb. References
186
187The Sundance ST201 datasheet, preliminary version.
b71b95ef
PDM
188The Kendin KS8723 datasheet, preliminary version.
189The ICplus IP100 datasheet, preliminary version.
190http://www.scyld.com/expert/100mbps.html
191http://www.scyld.com/expert/NWay.html
1da177e4
LT
192
193IVc. Errata
194
195*/
196
197/* Work-around for Kendin chip bugs. */
198#ifndef CONFIG_SUNDANCE_MMIO
199#define USE_IO_OPS 1
200#endif
201
a3aa1884 202static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
46009c8b
JG
203 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
204 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
205 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
206 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
207 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
208 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
209 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
210 { }
1da177e4
LT
211};
212MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
213
214enum {
215 netdev_io_size = 128
216};
217
218struct pci_id_info {
219 const char *name;
220};
46009c8b 221static const struct pci_id_info pci_id_tbl[] __devinitdata = {
1da177e4
LT
222 {"D-Link DFE-550TX FAST Ethernet Adapter"},
223 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 {"D-Link DFE-580TX 4 port Server Adapter"},
225 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
226 {"D-Link DL10050-based FAST Ethernet Adapter"},
227 {"Sundance Technology Alta"},
1668b19f 228 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
46009c8b 229 { } /* terminate list. */
1da177e4
LT
230};
231
232/* This driver was written to use PCI memory space, however x86-oriented
233 hardware often uses I/O space accesses. */
234
235/* Offsets to the device registers.
236 Unlike software-only systems, device drivers interact with complex hardware.
237 It's not useful to define symbolic names for every register bit in the
238 device. The name can only partially document the semantics and make
239 the driver longer and more difficult to read.
240 In general, only the important configuration values or bits changed
241 multiple times should be defined symbolically.
242*/
243enum alta_offsets {
244 DMACtrl = 0x00,
245 TxListPtr = 0x04,
246 TxDMABurstThresh = 0x08,
247 TxDMAUrgentThresh = 0x09,
248 TxDMAPollPeriod = 0x0a,
249 RxDMAStatus = 0x0c,
250 RxListPtr = 0x10,
251 DebugCtrl0 = 0x1a,
252 DebugCtrl1 = 0x1c,
253 RxDMABurstThresh = 0x14,
254 RxDMAUrgentThresh = 0x15,
255 RxDMAPollPeriod = 0x16,
256 LEDCtrl = 0x1a,
257 ASICCtrl = 0x30,
258 EEData = 0x34,
259 EECtrl = 0x36,
1da177e4
LT
260 FlashAddr = 0x40,
261 FlashData = 0x44,
262 TxStatus = 0x46,
263 TxFrameId = 0x47,
264 DownCounter = 0x18,
265 IntrClear = 0x4a,
266 IntrEnable = 0x4c,
267 IntrStatus = 0x4e,
268 MACCtrl0 = 0x50,
269 MACCtrl1 = 0x52,
270 StationAddr = 0x54,
271 MaxFrameSize = 0x5A,
272 RxMode = 0x5c,
273 MIICtrl = 0x5e,
274 MulticastFilter0 = 0x60,
275 MulticastFilter1 = 0x64,
276 RxOctetsLow = 0x68,
277 RxOctetsHigh = 0x6a,
278 TxOctetsLow = 0x6c,
279 TxOctetsHigh = 0x6e,
280 TxFramesOK = 0x70,
281 RxFramesOK = 0x72,
282 StatsCarrierError = 0x74,
283 StatsLateColl = 0x75,
284 StatsMultiColl = 0x76,
285 StatsOneColl = 0x77,
286 StatsTxDefer = 0x78,
287 RxMissed = 0x79,
288 StatsTxXSDefer = 0x7a,
289 StatsTxAbort = 0x7b,
290 StatsBcastTx = 0x7c,
291 StatsBcastRx = 0x7d,
292 StatsMcastTx = 0x7e,
293 StatsMcastRx = 0x7f,
294 /* Aliased and bogus values! */
295 RxStatus = 0x0c,
296};
297enum ASICCtrl_HiWord_bit {
298 GlobalReset = 0x0001,
299 RxReset = 0x0002,
300 TxReset = 0x0004,
301 DMAReset = 0x0008,
302 FIFOReset = 0x0010,
303 NetworkReset = 0x0020,
304 HostReset = 0x0040,
305 ResetBusy = 0x0400,
306};
307
308/* Bits in the interrupt status/mask registers. */
309enum intr_status_bits {
310 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
311 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
312 IntrDrvRqst=0x0040,
313 StatsMax=0x0080, LinkChange=0x0100,
314 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
315};
316
317/* Bits in the RxMode register. */
318enum rx_mode_bits {
319 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
320 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
321};
322/* Bits in MACCtrl. */
323enum mac_ctrl0_bits {
324 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
325 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
326};
327enum mac_ctrl1_bits {
328 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
329 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
330 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
331};
332
333/* The Rx and Tx buffer descriptors. */
334/* Note that using only 32 bit fields simplifies conversion to big-endian
335 architectures. */
336struct netdev_desc {
14c9d9b0
AV
337 __le32 next_desc;
338 __le32 status;
339 struct desc_frag { __le32 addr, length; } frag[1];
1da177e4
LT
340};
341
342/* Bits in netdev_desc.status */
343enum desc_status_bits {
344 DescOwn=0x8000,
345 DescEndPacket=0x4000,
346 DescEndRing=0x2000,
347 LastFrag=0x80000000,
348 DescIntrOnTx=0x8000,
349 DescIntrOnDMADone=0x80000000,
350 DisableAlign = 0x00000001,
351};
352
353#define PRIV_ALIGN 15 /* Required alignment mask */
354/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
355 within the structure. */
356#define MII_CNT 4
357struct netdev_private {
358 /* Descriptor rings first for alignment. */
359 struct netdev_desc *rx_ring;
360 struct netdev_desc *tx_ring;
361 struct sk_buff* rx_skbuff[RX_RING_SIZE];
362 struct sk_buff* tx_skbuff[TX_RING_SIZE];
363 dma_addr_t tx_ring_dma;
364 dma_addr_t rx_ring_dma;
1da177e4
LT
365 struct timer_list timer; /* Media monitoring timer. */
366 /* Frequently used values: keep some adjacent for cache effect. */
367 spinlock_t lock;
1da177e4
LT
368 int msg_enable;
369 int chip_id;
370 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
371 unsigned int rx_buf_sz; /* Based on MTU+slack. */
372 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
373 unsigned int cur_tx, dirty_tx;
374 /* These values are keep track of the transceiver/media in use. */
375 unsigned int flowctrl:1;
376 unsigned int default_port:4; /* Last dev->if_port value. */
377 unsigned int an_enable:1;
378 unsigned int speed;
379 struct tasklet_struct rx_tasklet;
380 struct tasklet_struct tx_tasklet;
381 int budget;
382 int cur_task;
383 /* Multicast and receive mode. */
384 spinlock_t mcastlock; /* SMP lock multicast updates. */
385 u16 mcast_filter[4];
386 /* MII transceiver section. */
387 struct mii_if_info mii_if;
388 int mii_preamble_required;
389 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
390 struct pci_dev *pci_dev;
391 void __iomem *base;
7b738b55 392 spinlock_t statlock;
1da177e4
LT
393};
394
395/* The station address location in the EEPROM. */
396#define EEPROM_SA_OFFSET 0x10
397#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
398 IntrDrvRqst | IntrTxDone | StatsMax | \
399 LinkChange)
400
401static int change_mtu(struct net_device *dev, int new_mtu);
402static int eeprom_read(void __iomem *ioaddr, int location);
403static int mdio_read(struct net_device *dev, int phy_id, int location);
404static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
50500155 405static int mdio_wait_link(struct net_device *dev, int wait);
1da177e4
LT
406static int netdev_open(struct net_device *dev);
407static void check_duplex(struct net_device *dev);
408static void netdev_timer(unsigned long data);
409static void tx_timeout(struct net_device *dev);
410static void init_ring(struct net_device *dev);
61357325 411static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
1da177e4 412static int reset_tx (struct net_device *dev);
7d12e780 413static irqreturn_t intr_handler(int irq, void *dev_instance);
1da177e4
LT
414static void rx_poll(unsigned long data);
415static void tx_poll(unsigned long data);
416static void refill_rx (struct net_device *dev);
417static void netdev_error(struct net_device *dev, int intr_status);
418static void netdev_error(struct net_device *dev, int intr_status);
419static void set_rx_mode(struct net_device *dev);
420static int __set_mac_addr(struct net_device *dev);
421static struct net_device_stats *get_stats(struct net_device *dev);
422static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
423static int netdev_close(struct net_device *dev);
7282d491 424static const struct ethtool_ops ethtool_ops;
1da177e4 425
b71b95ef
PDM
426static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
427{
428 struct netdev_private *np = netdev_priv(dev);
429 void __iomem *ioaddr = np->base + ASICCtrl;
430 int countdown;
431
432 /* ST201 documentation states ASICCtrl is a 32bit register */
433 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
434 /* ST201 documentation states reset can take up to 1 ms */
435 countdown = 10 + 1;
436 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
437 if (--countdown == 0) {
438 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
439 break;
440 }
441 udelay(100);
442 }
443}
444
633a277e
SH
445static const struct net_device_ops netdev_ops = {
446 .ndo_open = netdev_open,
447 .ndo_stop = netdev_close,
448 .ndo_start_xmit = start_tx,
449 .ndo_get_stats = get_stats,
450 .ndo_set_multicast_list = set_rx_mode,
451 .ndo_do_ioctl = netdev_ioctl,
452 .ndo_tx_timeout = tx_timeout,
453 .ndo_change_mtu = change_mtu,
454 .ndo_set_mac_address = eth_mac_addr,
455 .ndo_validate_addr = eth_validate_addr,
456};
457
1da177e4
LT
458static int __devinit sundance_probe1 (struct pci_dev *pdev,
459 const struct pci_device_id *ent)
460{
461 struct net_device *dev;
462 struct netdev_private *np;
463 static int card_idx;
464 int chip_idx = ent->driver_data;
465 int irq;
466 int i;
467 void __iomem *ioaddr;
468 u16 mii_ctl;
469 void *ring_space;
470 dma_addr_t ring_dma;
471#ifdef USE_IO_OPS
472 int bar = 0;
473#else
474 int bar = 1;
475#endif
ac1d49f8 476 int phy, phy_end, phy_idx = 0;
1da177e4
LT
477
478/* when built into the kernel, we only print version if device is found */
479#ifndef MODULE
480 static int printed_version;
481 if (!printed_version++)
482 printk(version);
483#endif
484
485 if (pci_enable_device(pdev))
486 return -EIO;
487 pci_set_master(pdev);
488
489 irq = pdev->irq;
490
491 dev = alloc_etherdev(sizeof(*np));
492 if (!dev)
493 return -ENOMEM;
1da177e4
LT
494 SET_NETDEV_DEV(dev, &pdev->dev);
495
496 if (pci_request_regions(pdev, DRV_NAME))
497 goto err_out_netdev;
498
499 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
500 if (!ioaddr)
501 goto err_out_res;
502
503 for (i = 0; i < 3; i++)
14c9d9b0
AV
504 ((__le16 *)dev->dev_addr)[i] =
505 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
30d60a82 506 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
507
508 dev->base_addr = (unsigned long)ioaddr;
509 dev->irq = irq;
510
511 np = netdev_priv(dev);
512 np->base = ioaddr;
513 np->pci_dev = pdev;
514 np->chip_id = chip_idx;
515 np->msg_enable = (1 << debug) - 1;
516 spin_lock_init(&np->lock);
7b738b55 517 spin_lock_init(&np->statlock);
1da177e4
LT
518 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
519 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
520
0c8a745f
DK
521 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
522 &ring_dma, GFP_KERNEL);
1da177e4
LT
523 if (!ring_space)
524 goto err_out_cleardev;
525 np->tx_ring = (struct netdev_desc *)ring_space;
526 np->tx_ring_dma = ring_dma;
527
0c8a745f
DK
528 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
529 &ring_dma, GFP_KERNEL);
1da177e4
LT
530 if (!ring_space)
531 goto err_out_unmap_tx;
532 np->rx_ring = (struct netdev_desc *)ring_space;
533 np->rx_ring_dma = ring_dma;
534
535 np->mii_if.dev = dev;
536 np->mii_if.mdio_read = mdio_read;
537 np->mii_if.mdio_write = mdio_write;
538 np->mii_if.phy_id_mask = 0x1f;
539 np->mii_if.reg_num_mask = 0x1f;
540
541 /* The chip-specific entries in the device structure. */
633a277e 542 dev->netdev_ops = &netdev_ops;
1da177e4 543 SET_ETHTOOL_OPS(dev, &ethtool_ops);
1da177e4 544 dev->watchdog_timeo = TX_TIMEOUT;
633a277e 545
1da177e4
LT
546 pci_set_drvdata(pdev, dev);
547
1da177e4
LT
548 i = register_netdev(dev);
549 if (i)
550 goto err_out_unmap_rx;
551
e174961c 552 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
0795af57 553 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
e174961c 554 dev->dev_addr, irq);
1da177e4 555
67ec2f80
JL
556 np->phys[0] = 1; /* Default setting */
557 np->mii_preamble_required++;
ac1d49f8 558
0d615ec2
ACM
559 /*
560 * It seems some phys doesn't deal well with address 0 being accessed
ac1d49f8 561 * first
0d615ec2 562 */
ac1d49f8
JG
563 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
564 phy = 0;
565 phy_end = 31;
566 } else {
567 phy = 1;
568 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
569 }
570 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
b06c093e 571 int phyx = phy & 0x1f;
0d615ec2 572 int mii_status = mdio_read(dev, phyx, MII_BMSR);
67ec2f80 573 if (mii_status != 0xffff && mii_status != 0x0000) {
b06c093e
JL
574 np->phys[phy_idx++] = phyx;
575 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
67ec2f80
JL
576 if ((mii_status & 0x0040) == 0)
577 np->mii_preamble_required++;
578 printk(KERN_INFO "%s: MII PHY found at address %d, status "
579 "0x%4.4x advertising %4.4x.\n",
b06c093e 580 dev->name, phyx, mii_status, np->mii_if.advertising);
1da177e4 581 }
67ec2f80
JL
582 }
583 np->mii_preamble_required--;
1da177e4 584
67ec2f80
JL
585 if (phy_idx == 0) {
586 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
587 dev->name, ioread32(ioaddr + ASICCtrl));
588 goto err_out_unregister;
1da177e4
LT
589 }
590
67ec2f80
JL
591 np->mii_if.phy_id = np->phys[0];
592
1da177e4
LT
593 /* Parse override configuration */
594 np->an_enable = 1;
595 if (card_idx < MAX_UNITS) {
596 if (media[card_idx] != NULL) {
597 np->an_enable = 0;
598 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
599 strcmp (media[card_idx], "4") == 0) {
600 np->speed = 100;
601 np->mii_if.full_duplex = 1;
8e95a202
JP
602 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
603 strcmp (media[card_idx], "3") == 0) {
1da177e4
LT
604 np->speed = 100;
605 np->mii_if.full_duplex = 0;
606 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
607 strcmp (media[card_idx], "2") == 0) {
608 np->speed = 10;
609 np->mii_if.full_duplex = 1;
610 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
611 strcmp (media[card_idx], "1") == 0) {
612 np->speed = 10;
613 np->mii_if.full_duplex = 0;
614 } else {
615 np->an_enable = 1;
616 }
617 }
618 if (flowctrl == 1)
619 np->flowctrl = 1;
620 }
621
622 /* Fibre PHY? */
623 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
624 /* Default 100Mbps Full */
625 if (np->an_enable) {
626 np->speed = 100;
627 np->mii_if.full_duplex = 1;
628 np->an_enable = 0;
629 }
630 }
631 /* Reset PHY */
632 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
633 mdelay (300);
634 /* If flow control enabled, we need to advertise it.*/
635 if (np->flowctrl)
636 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
637 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
638 /* Force media type */
639 if (!np->an_enable) {
640 mii_ctl = 0;
641 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
642 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
643 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
644 printk (KERN_INFO "Override speed=%d, %s duplex\n",
645 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
646
647 }
648
649 /* Perhaps move the reset here? */
650 /* Reset the chip to erase previous misconfiguration. */
651 if (netif_msg_hw(np))
652 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
e714d99c 653 sundance_reset(dev, 0x00ff << 16);
1da177e4
LT
654 if (netif_msg_hw(np))
655 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
656
657 card_idx++;
658 return 0;
659
660err_out_unregister:
661 unregister_netdev(dev);
662err_out_unmap_rx:
0c8a745f
DK
663 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
664 np->rx_ring, np->rx_ring_dma);
1da177e4 665err_out_unmap_tx:
0c8a745f
DK
666 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
667 np->tx_ring, np->tx_ring_dma);
1da177e4
LT
668err_out_cleardev:
669 pci_set_drvdata(pdev, NULL);
670 pci_iounmap(pdev, ioaddr);
671err_out_res:
672 pci_release_regions(pdev);
673err_out_netdev:
674 free_netdev (dev);
675 return -ENODEV;
676}
677
678static int change_mtu(struct net_device *dev, int new_mtu)
679{
680 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
681 return -EINVAL;
682 if (netif_running(dev))
683 return -EBUSY;
684 dev->mtu = new_mtu;
685 return 0;
686}
687
688#define eeprom_delay(ee_addr) ioread32(ee_addr)
689/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
690static int __devinit eeprom_read(void __iomem *ioaddr, int location)
691{
692 int boguscnt = 10000; /* Typical 1900 ticks. */
693 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
694 do {
695 eeprom_delay(ioaddr + EECtrl);
696 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
697 return ioread16(ioaddr + EEData);
698 }
699 } while (--boguscnt > 0);
700 return 0;
701}
702
703/* MII transceiver control section.
704 Read and write the MII registers using software-generated serial
705 MDIO protocol. See the MII specifications or DP83840A data sheet
706 for details.
707
708 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
709 met by back-to-back 33Mhz PCI cycles. */
710#define mdio_delay() ioread8(mdio_addr)
711
712enum mii_reg_bits {
713 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
714};
715#define MDIO_EnbIn (0)
716#define MDIO_WRITE0 (MDIO_EnbOutput)
717#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
718
719/* Generate the preamble required for initial synchronization and
720 a few older transceivers. */
721static void mdio_sync(void __iomem *mdio_addr)
722{
723 int bits = 32;
724
725 /* Establish sync by sending at least 32 logic ones. */
726 while (--bits >= 0) {
727 iowrite8(MDIO_WRITE1, mdio_addr);
728 mdio_delay();
729 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
730 mdio_delay();
731 }
732}
733
734static int mdio_read(struct net_device *dev, int phy_id, int location)
735{
736 struct netdev_private *np = netdev_priv(dev);
737 void __iomem *mdio_addr = np->base + MIICtrl;
738 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
739 int i, retval = 0;
740
741 if (np->mii_preamble_required)
742 mdio_sync(mdio_addr);
743
744 /* Shift the read command bits out. */
745 for (i = 15; i >= 0; i--) {
746 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
747
748 iowrite8(dataval, mdio_addr);
749 mdio_delay();
750 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
751 mdio_delay();
752 }
753 /* Read the two transition, 16 data, and wire-idle bits. */
754 for (i = 19; i > 0; i--) {
755 iowrite8(MDIO_EnbIn, mdio_addr);
756 mdio_delay();
757 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
758 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
759 mdio_delay();
760 }
761 return (retval>>1) & 0xffff;
762}
763
764static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
765{
766 struct netdev_private *np = netdev_priv(dev);
767 void __iomem *mdio_addr = np->base + MIICtrl;
768 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
769 int i;
770
771 if (np->mii_preamble_required)
772 mdio_sync(mdio_addr);
773
774 /* Shift the command bits out. */
775 for (i = 31; i >= 0; i--) {
776 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
777
778 iowrite8(dataval, mdio_addr);
779 mdio_delay();
780 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
781 mdio_delay();
782 }
783 /* Clear out extra bits. */
784 for (i = 2; i > 0; i--) {
785 iowrite8(MDIO_EnbIn, mdio_addr);
786 mdio_delay();
787 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
788 mdio_delay();
789 }
1da177e4
LT
790}
791
50500155
DN
792static int mdio_wait_link(struct net_device *dev, int wait)
793{
794 int bmsr;
795 int phy_id;
796 struct netdev_private *np;
797
798 np = netdev_priv(dev);
799 phy_id = np->phys[0];
800
801 do {
802 bmsr = mdio_read(dev, phy_id, MII_BMSR);
803 if (bmsr & 0x0004)
804 return 0;
805 mdelay(1);
806 } while (--wait > 0);
807 return -1;
808}
809
1da177e4
LT
810static int netdev_open(struct net_device *dev)
811{
812 struct netdev_private *np = netdev_priv(dev);
813 void __iomem *ioaddr = np->base;
acd70c2b 814 unsigned long flags;
1da177e4
LT
815 int i;
816
817 /* Do we need to reset the chip??? */
818
a0607fd3 819 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
1da177e4
LT
820 if (i)
821 return i;
822
823 if (netif_msg_ifup(np))
824 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
825 dev->name, dev->irq);
826 init_ring(dev);
827
828 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
829 /* The Tx list pointer is written as packets are queued. */
830
831 /* Initialize other registers. */
832 __set_mac_addr(dev);
833#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
834 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
835#else
836 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
837#endif
838 if (dev->mtu > 2047)
839 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
840
841 /* Configure the PCI bus bursts and FIFO thresholds. */
842
843 if (dev->if_port == 0)
844 dev->if_port = np->default_port;
845
846 spin_lock_init(&np->mcastlock);
847
848 set_rx_mode(dev);
849 iowrite16(0, ioaddr + IntrEnable);
850 iowrite16(0, ioaddr + DownCounter);
851 /* Set the chip to poll every N*320nsec. */
852 iowrite8(100, ioaddr + RxDMAPollPeriod);
853 iowrite8(127, ioaddr + TxDMAPollPeriod);
854 /* Fix DFE-580TX packet drop issue */
44c10138 855 if (np->pci_dev->revision >= 0x14)
1da177e4
LT
856 iowrite8(0x01, ioaddr + DebugCtrl1);
857 netif_start_queue(dev);
858
acd70c2b
JH
859 spin_lock_irqsave(&np->lock, flags);
860 reset_tx(dev);
861 spin_unlock_irqrestore(&np->lock, flags);
862
1da177e4
LT
863 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
864
865 if (netif_msg_ifup(np))
866 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
867 "MAC Control %x, %4.4x %4.4x.\n",
868 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
869 ioread32(ioaddr + MACCtrl0),
870 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
871
872 /* Set the timer to check for link beat. */
873 init_timer(&np->timer);
874 np->timer.expires = jiffies + 3*HZ;
875 np->timer.data = (unsigned long)dev;
c061b18d 876 np->timer.function = netdev_timer; /* timer handler */
1da177e4
LT
877 add_timer(&np->timer);
878
879 /* Enable interrupts by setting the interrupt mask. */
880 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
881
882 return 0;
883}
884
885static void check_duplex(struct net_device *dev)
886{
887 struct netdev_private *np = netdev_priv(dev);
888 void __iomem *ioaddr = np->base;
889 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
890 int negotiated = mii_lpa & np->mii_if.advertising;
891 int duplex;
892
893 /* Force media */
894 if (!np->an_enable || mii_lpa == 0xffff) {
895 if (np->mii_if.full_duplex)
896 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
897 ioaddr + MACCtrl0);
898 return;
899 }
900
901 /* Autonegotiation */
902 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
903 if (np->mii_if.full_duplex != duplex) {
904 np->mii_if.full_duplex = duplex;
905 if (netif_msg_link(np))
906 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
907 "negotiated capability %4.4x.\n", dev->name,
908 duplex ? "full" : "half", np->phys[0], negotiated);
62660e28 909 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
1da177e4
LT
910 }
911}
912
913static void netdev_timer(unsigned long data)
914{
915 struct net_device *dev = (struct net_device *)data;
916 struct netdev_private *np = netdev_priv(dev);
917 void __iomem *ioaddr = np->base;
918 int next_tick = 10*HZ;
919
920 if (netif_msg_timer(np)) {
921 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
922 "Tx %x Rx %x.\n",
923 dev->name, ioread16(ioaddr + IntrEnable),
924 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
925 }
926 check_duplex(dev);
927 np->timer.expires = jiffies + next_tick;
928 add_timer(&np->timer);
929}
930
931static void tx_timeout(struct net_device *dev)
932{
933 struct netdev_private *np = netdev_priv(dev);
934 void __iomem *ioaddr = np->base;
935 unsigned long flag;
6aa20a22 936
1da177e4
LT
937 netif_stop_queue(dev);
938 tasklet_disable(&np->tx_tasklet);
939 iowrite16(0, ioaddr + IntrEnable);
940 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
941 "TxFrameId %2.2x,"
942 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
943 ioread8(ioaddr + TxFrameId));
944
945 {
946 int i;
947 for (i=0; i<TX_RING_SIZE; i++) {
948 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
949 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
950 le32_to_cpu(np->tx_ring[i].next_desc),
951 le32_to_cpu(np->tx_ring[i].status),
952 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
6aa20a22 953 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1da177e4
LT
954 le32_to_cpu(np->tx_ring[i].frag[0].length));
955 }
6aa20a22
JG
956 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
957 ioread32(np->base + TxListPtr),
1da177e4 958 netif_queue_stopped(dev));
6aa20a22 959 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1da177e4
LT
960 np->cur_tx, np->cur_tx % TX_RING_SIZE,
961 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
962 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
963 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
964 }
965 spin_lock_irqsave(&np->lock, flag);
966
967 /* Stop and restart the chip's Tx processes . */
968 reset_tx(dev);
969 spin_unlock_irqrestore(&np->lock, flag);
970
971 dev->if_port = 0;
972
1ae5dc34 973 dev->trans_start = jiffies; /* prevent tx timeout */
553e2335 974 dev->stats.tx_errors++;
1da177e4
LT
975 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
976 netif_wake_queue(dev);
977 }
978 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
979 tasklet_enable(&np->tx_tasklet);
980}
981
982
983/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
984static void init_ring(struct net_device *dev)
985{
986 struct netdev_private *np = netdev_priv(dev);
987 int i;
988
989 np->cur_rx = np->cur_tx = 0;
990 np->dirty_rx = np->dirty_tx = 0;
991 np->cur_task = 0;
992
993 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
994
995 /* Initialize all Rx descriptors. */
996 for (i = 0; i < RX_RING_SIZE; i++) {
997 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
998 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
999 np->rx_ring[i].status = 0;
1000 np->rx_ring[i].frag[0].length = 0;
1001 np->rx_skbuff[i] = NULL;
1002 }
1003
1004 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1005 for (i = 0; i < RX_RING_SIZE; i++) {
1006 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1007 np->rx_skbuff[i] = skb;
1008 if (skb == NULL)
1009 break;
1010 skb->dev = dev; /* Mark as being used by this device. */
1011 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1012 np->rx_ring[i].frag[0].addr = cpu_to_le32(
0c8a745f
DK
1013 dma_map_single(&np->pci_dev->dev, skb->data,
1014 np->rx_buf_sz, DMA_FROM_DEVICE));
d91dc279
DK
1015 if (dma_mapping_error(&np->pci_dev->dev,
1016 np->rx_ring[i].frag[0].addr)) {
1017 dev_kfree_skb(skb);
1018 np->rx_skbuff[i] = NULL;
1019 break;
1020 }
1da177e4
LT
1021 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1022 }
1023 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1024
1025 for (i = 0; i < TX_RING_SIZE; i++) {
1026 np->tx_skbuff[i] = NULL;
1027 np->tx_ring[i].status = 0;
1028 }
1da177e4
LT
1029}
1030
1031static void tx_poll (unsigned long data)
1032{
1033 struct net_device *dev = (struct net_device *)data;
1034 struct netdev_private *np = netdev_priv(dev);
1035 unsigned head = np->cur_task % TX_RING_SIZE;
6aa20a22 1036 struct netdev_desc *txdesc =
1da177e4 1037 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
6aa20a22 1038
1da177e4
LT
1039 /* Chain the next pointer */
1040 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1041 int entry = np->cur_task % TX_RING_SIZE;
1042 txdesc = &np->tx_ring[entry];
1043 if (np->last_tx) {
1044 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1045 entry*sizeof(struct netdev_desc));
1046 }
1047 np->last_tx = txdesc;
1048 }
1049 /* Indicate the latest descriptor of tx ring */
1050 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1051
1052 if (ioread32 (np->base + TxListPtr) == 0)
1053 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1054 np->base + TxListPtr);
1da177e4
LT
1055}
1056
61357325 1057static netdev_tx_t
1da177e4
LT
1058start_tx (struct sk_buff *skb, struct net_device *dev)
1059{
1060 struct netdev_private *np = netdev_priv(dev);
1061 struct netdev_desc *txdesc;
1062 unsigned entry;
1063
1064 /* Calculate the next Tx descriptor entry. */
1065 entry = np->cur_tx % TX_RING_SIZE;
1066 np->tx_skbuff[entry] = skb;
1067 txdesc = &np->tx_ring[entry];
1068
1069 txdesc->next_desc = 0;
1070 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
0c8a745f
DK
1071 txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1072 skb->data, skb->len, DMA_TO_DEVICE));
d91dc279
DK
1073 if (dma_mapping_error(&np->pci_dev->dev,
1074 txdesc->frag[0].addr))
1075 goto drop_frame;
1da177e4
LT
1076 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1077
1078 /* Increment cur_tx before tasklet_schedule() */
1079 np->cur_tx++;
1080 mb();
1081 /* Schedule a tx_poll() task */
1082 tasklet_schedule(&np->tx_tasklet);
1083
1084 /* On some architectures: explicitly flush cache lines here. */
8e95a202
JP
1085 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1086 !netif_queue_stopped(dev)) {
1da177e4
LT
1087 /* do nothing */
1088 } else {
1089 netif_stop_queue (dev);
1090 }
1da177e4
LT
1091 if (netif_msg_tx_queued(np)) {
1092 printk (KERN_DEBUG
1093 "%s: Transmit frame #%d queued in slot %d.\n",
1094 dev->name, np->cur_tx, entry);
1095 }
6ed10654 1096 return NETDEV_TX_OK;
d91dc279
DK
1097
1098drop_frame:
1099 dev_kfree_skb(skb);
1100 np->tx_skbuff[entry] = NULL;
1101 dev->stats.tx_dropped++;
1102 return NETDEV_TX_OK;
1da177e4
LT
1103}
1104
1105/* Reset hardware tx and free all of tx buffers */
1106static int
1107reset_tx (struct net_device *dev)
1108{
1109 struct netdev_private *np = netdev_priv(dev);
1110 void __iomem *ioaddr = np->base;
1111 struct sk_buff *skb;
1112 int i;
6aa20a22 1113
1da177e4
LT
1114 /* Reset tx logic, TxListPtr will be cleaned */
1115 iowrite16 (TxDisable, ioaddr + MACCtrl1);
e714d99c
PDM
1116 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1117
1da177e4
LT
1118 /* free all tx skbuff */
1119 for (i = 0; i < TX_RING_SIZE; i++) {
2109f89f
JH
1120 np->tx_ring[i].next_desc = 0;
1121
1da177e4
LT
1122 skb = np->tx_skbuff[i];
1123 if (skb) {
0c8a745f 1124 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1125 le32_to_cpu(np->tx_ring[i].frag[0].addr),
0c8a745f 1126 skb->len, DMA_TO_DEVICE);
a9478e38 1127 dev_kfree_skb_any(skb);
1da177e4 1128 np->tx_skbuff[i] = NULL;
553e2335 1129 dev->stats.tx_dropped++;
1da177e4
LT
1130 }
1131 }
1132 np->cur_tx = np->dirty_tx = 0;
1133 np->cur_task = 0;
2109f89f 1134
bca79eb7 1135 np->last_tx = NULL;
2109f89f
JH
1136 iowrite8(127, ioaddr + TxDMAPollPeriod);
1137
1da177e4
LT
1138 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1139 return 0;
1140}
1141
6aa20a22 1142/* The interrupt handler cleans up after the Tx thread,
1da177e4 1143 and schedule a Rx thread work */
7d12e780 1144static irqreturn_t intr_handler(int irq, void *dev_instance)
1da177e4
LT
1145{
1146 struct net_device *dev = (struct net_device *)dev_instance;
1147 struct netdev_private *np = netdev_priv(dev);
1148 void __iomem *ioaddr = np->base;
1149 int hw_frame_id;
1150 int tx_cnt;
1151 int tx_status;
1152 int handled = 0;
e242040d 1153 int i;
1da177e4
LT
1154
1155
1156 do {
1157 int intr_status = ioread16(ioaddr + IntrStatus);
1158 iowrite16(intr_status, ioaddr + IntrStatus);
1159
1160 if (netif_msg_intr(np))
1161 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1162 dev->name, intr_status);
1163
1164 if (!(intr_status & DEFAULT_INTR))
1165 break;
1166
1167 handled = 1;
1168
1169 if (intr_status & (IntrRxDMADone)) {
1170 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1171 ioaddr + IntrEnable);
1172 if (np->budget < 0)
1173 np->budget = RX_BUDGET;
1174 tasklet_schedule(&np->rx_tasklet);
1175 }
1176 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1177 tx_status = ioread16 (ioaddr + TxStatus);
1178 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1179 if (netif_msg_tx_done(np))
1180 printk
1181 ("%s: Transmit status is %2.2x.\n",
1182 dev->name, tx_status);
1183 if (tx_status & 0x1e) {
b71b95ef
PDM
1184 if (netif_msg_tx_err(np))
1185 printk("%s: Transmit error status %4.4x.\n",
1186 dev->name, tx_status);
553e2335 1187 dev->stats.tx_errors++;
1da177e4 1188 if (tx_status & 0x10)
553e2335 1189 dev->stats.tx_fifo_errors++;
1da177e4 1190 if (tx_status & 0x08)
553e2335 1191 dev->stats.collisions++;
b71b95ef 1192 if (tx_status & 0x04)
553e2335 1193 dev->stats.tx_fifo_errors++;
1da177e4 1194 if (tx_status & 0x02)
553e2335 1195 dev->stats.tx_window_errors++;
e242040d 1196
b71b95ef
PDM
1197 /*
1198 ** This reset has been verified on
1199 ** DFE-580TX boards ! phdm@macqel.be.
1200 */
1201 if (tx_status & 0x10) { /* TxUnderrun */
b71b95ef
PDM
1202 /* Restart Tx FIFO and transmitter */
1203 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
b71b95ef 1204 /* No need to reset the Tx pointer here */
1da177e4 1205 }
2109f89f
JH
1206 /* Restart the Tx. Need to make sure tx enabled */
1207 i = 10;
1208 do {
1209 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1210 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1211 break;
1212 mdelay(1);
1213 } while (--i);
1da177e4
LT
1214 }
1215 /* Yup, this is a documentation bug. It cost me *hours*. */
1216 iowrite16 (0, ioaddr + TxStatus);
1217 if (tx_cnt < 0) {
1218 iowrite32(5000, ioaddr + DownCounter);
1219 break;
1220 }
1221 tx_status = ioread16 (ioaddr + TxStatus);
1222 }
1223 hw_frame_id = (tx_status >> 8) & 0xff;
1224 } else {
1225 hw_frame_id = ioread8(ioaddr + TxFrameId);
1226 }
6aa20a22 1227
44c10138 1228 if (np->pci_dev->revision >= 0x14) {
1da177e4
LT
1229 spin_lock(&np->lock);
1230 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1231 int entry = np->dirty_tx % TX_RING_SIZE;
1232 struct sk_buff *skb;
1233 int sw_frame_id;
1234 sw_frame_id = (le32_to_cpu(
1235 np->tx_ring[entry].status) >> 2) & 0xff;
1236 if (sw_frame_id == hw_frame_id &&
1237 !(le32_to_cpu(np->tx_ring[entry].status)
1238 & 0x00010000))
1239 break;
6aa20a22 1240 if (sw_frame_id == (hw_frame_id + 1) %
1da177e4
LT
1241 TX_RING_SIZE)
1242 break;
1243 skb = np->tx_skbuff[entry];
1244 /* Free the original skb. */
0c8a745f 1245 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1246 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
0c8a745f 1247 skb->len, DMA_TO_DEVICE);
1da177e4
LT
1248 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1249 np->tx_skbuff[entry] = NULL;
1250 np->tx_ring[entry].frag[0].addr = 0;
1251 np->tx_ring[entry].frag[0].length = 0;
1252 }
1253 spin_unlock(&np->lock);
1254 } else {
1255 spin_lock(&np->lock);
1256 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1257 int entry = np->dirty_tx % TX_RING_SIZE;
1258 struct sk_buff *skb;
6aa20a22 1259 if (!(le32_to_cpu(np->tx_ring[entry].status)
1da177e4
LT
1260 & 0x00010000))
1261 break;
1262 skb = np->tx_skbuff[entry];
1263 /* Free the original skb. */
0c8a745f 1264 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1265 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
0c8a745f 1266 skb->len, DMA_TO_DEVICE);
1da177e4
LT
1267 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1268 np->tx_skbuff[entry] = NULL;
1269 np->tx_ring[entry].frag[0].addr = 0;
1270 np->tx_ring[entry].frag[0].length = 0;
1271 }
1272 spin_unlock(&np->lock);
1273 }
6aa20a22 1274
1da177e4
LT
1275 if (netif_queue_stopped(dev) &&
1276 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1277 /* The ring is no longer full, clear busy flag. */
1278 netif_wake_queue (dev);
1279 }
1280 /* Abnormal error summary/uncommon events handlers. */
1281 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1282 netdev_error(dev, intr_status);
1283 } while (0);
1284 if (netif_msg_intr(np))
1285 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1286 dev->name, ioread16(ioaddr + IntrStatus));
1287 return IRQ_RETVAL(handled);
1288}
1289
1290static void rx_poll(unsigned long data)
1291{
1292 struct net_device *dev = (struct net_device *)data;
1293 struct netdev_private *np = netdev_priv(dev);
1294 int entry = np->cur_rx % RX_RING_SIZE;
1295 int boguscnt = np->budget;
1296 void __iomem *ioaddr = np->base;
1297 int received = 0;
1298
1299 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1300 while (1) {
1301 struct netdev_desc *desc = &(np->rx_ring[entry]);
1302 u32 frame_status = le32_to_cpu(desc->status);
1303 int pkt_len;
1304
1305 if (--boguscnt < 0) {
1306 goto not_done;
1307 }
1308 if (!(frame_status & DescOwn))
1309 break;
1310 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1311 if (netif_msg_rx_status(np))
1312 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1313 frame_status);
1314 if (frame_status & 0x001f4000) {
1315 /* There was a error. */
1316 if (netif_msg_rx_err(np))
1317 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1318 frame_status);
553e2335
ED
1319 dev->stats.rx_errors++;
1320 if (frame_status & 0x00100000)
1321 dev->stats.rx_length_errors++;
1322 if (frame_status & 0x00010000)
1323 dev->stats.rx_fifo_errors++;
1324 if (frame_status & 0x00060000)
1325 dev->stats.rx_frame_errors++;
1326 if (frame_status & 0x00080000)
1327 dev->stats.rx_crc_errors++;
1da177e4
LT
1328 if (frame_status & 0x00100000) {
1329 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1330 " status %8.8x.\n",
1331 dev->name, frame_status);
1332 }
1333 } else {
1334 struct sk_buff *skb;
1335#ifndef final_version
1336 if (netif_msg_rx_status(np))
1337 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1338 ", bogus_cnt %d.\n",
1339 pkt_len, boguscnt);
1340#endif
1341 /* Check if the packet is long enough to accept without copying
1342 to a minimally-sized skbuff. */
8e95a202
JP
1343 if (pkt_len < rx_copybreak &&
1344 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1da177e4 1345 skb_reserve(skb, 2); /* 16 byte align the IP header */
0c8a745f
DK
1346 dma_sync_single_for_cpu(&np->pci_dev->dev,
1347 le32_to_cpu(desc->frag[0].addr),
1348 np->rx_buf_sz, DMA_FROM_DEVICE);
8c7b7faa 1349 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
0c8a745f
DK
1350 dma_sync_single_for_device(&np->pci_dev->dev,
1351 le32_to_cpu(desc->frag[0].addr),
1352 np->rx_buf_sz, DMA_FROM_DEVICE);
1da177e4
LT
1353 skb_put(skb, pkt_len);
1354 } else {
0c8a745f 1355 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1356 le32_to_cpu(desc->frag[0].addr),
0c8a745f 1357 np->rx_buf_sz, DMA_FROM_DEVICE);
1da177e4
LT
1358 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1359 np->rx_skbuff[entry] = NULL;
1360 }
1361 skb->protocol = eth_type_trans(skb, dev);
1362 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1363 netif_rx(skb);
1da177e4
LT
1364 }
1365 entry = (entry + 1) % RX_RING_SIZE;
1366 received++;
1367 }
1368 np->cur_rx = entry;
1369 refill_rx (dev);
1370 np->budget -= received;
1371 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1372 return;
1373
1374not_done:
1375 np->cur_rx = entry;
1376 refill_rx (dev);
1377 if (!received)
1378 received = 1;
1379 np->budget -= received;
1380 if (np->budget <= 0)
1381 np->budget = RX_BUDGET;
1382 tasklet_schedule(&np->rx_tasklet);
1da177e4
LT
1383}
1384
1385static void refill_rx (struct net_device *dev)
1386{
1387 struct netdev_private *np = netdev_priv(dev);
1388 int entry;
1389 int cnt = 0;
1390
1391 /* Refill the Rx ring buffers. */
1392 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1393 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1394 struct sk_buff *skb;
1395 entry = np->dirty_rx % RX_RING_SIZE;
1396 if (np->rx_skbuff[entry] == NULL) {
1397 skb = dev_alloc_skb(np->rx_buf_sz);
1398 np->rx_skbuff[entry] = skb;
1399 if (skb == NULL)
1400 break; /* Better luck next round. */
1401 skb->dev = dev; /* Mark as being used by this device. */
1402 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1403 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
0c8a745f
DK
1404 dma_map_single(&np->pci_dev->dev, skb->data,
1405 np->rx_buf_sz, DMA_FROM_DEVICE));
d91dc279
DK
1406 if (dma_mapping_error(&np->pci_dev->dev,
1407 np->rx_ring[entry].frag[0].addr)) {
1408 dev_kfree_skb_irq(skb);
1409 np->rx_skbuff[entry] = NULL;
1410 break;
1411 }
1da177e4
LT
1412 }
1413 /* Perhaps we need not reset this field. */
1414 np->rx_ring[entry].frag[0].length =
1415 cpu_to_le32(np->rx_buf_sz | LastFrag);
1416 np->rx_ring[entry].status = 0;
1417 cnt++;
1418 }
1da177e4
LT
1419}
1420static void netdev_error(struct net_device *dev, int intr_status)
1421{
1422 struct netdev_private *np = netdev_priv(dev);
1423 void __iomem *ioaddr = np->base;
1424 u16 mii_ctl, mii_advertise, mii_lpa;
1425 int speed;
1426
1427 if (intr_status & LinkChange) {
50500155
DN
1428 if (mdio_wait_link(dev, 10) == 0) {
1429 printk(KERN_INFO "%s: Link up\n", dev->name);
1430 if (np->an_enable) {
1431 mii_advertise = mdio_read(dev, np->phys[0],
1432 MII_ADVERTISE);
1433 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1434 mii_advertise &= mii_lpa;
1435 printk(KERN_INFO "%s: Link changed: ",
1436 dev->name);
1437 if (mii_advertise & ADVERTISE_100FULL) {
1438 np->speed = 100;
1439 printk("100Mbps, full duplex\n");
1440 } else if (mii_advertise & ADVERTISE_100HALF) {
1441 np->speed = 100;
1442 printk("100Mbps, half duplex\n");
1443 } else if (mii_advertise & ADVERTISE_10FULL) {
1444 np->speed = 10;
1445 printk("10Mbps, full duplex\n");
1446 } else if (mii_advertise & ADVERTISE_10HALF) {
1447 np->speed = 10;
1448 printk("10Mbps, half duplex\n");
1449 } else
1450 printk("\n");
1da177e4 1451
50500155
DN
1452 } else {
1453 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1454 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1455 np->speed = speed;
1456 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1457 dev->name, speed);
1458 printk("%s duplex.\n",
1459 (mii_ctl & BMCR_FULLDPLX) ?
1460 "full" : "half");
1461 }
1462 check_duplex(dev);
1463 if (np->flowctrl && np->mii_if.full_duplex) {
1464 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1465 ioaddr + MulticastFilter1+2);
1466 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1467 ioaddr + MACCtrl0);
1468 }
1469 netif_carrier_on(dev);
1da177e4 1470 } else {
50500155
DN
1471 printk(KERN_INFO "%s: Link down\n", dev->name);
1472 netif_carrier_off(dev);
1da177e4
LT
1473 }
1474 }
1475 if (intr_status & StatsMax) {
1476 get_stats(dev);
1477 }
1478 if (intr_status & IntrPCIErr) {
1479 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1480 dev->name, intr_status);
1481 /* We must do a global reset of DMA to continue. */
1482 }
1483}
1484
1485static struct net_device_stats *get_stats(struct net_device *dev)
1486{
1487 struct netdev_private *np = netdev_priv(dev);
1488 void __iomem *ioaddr = np->base;
1489 int i;
7b738b55 1490 unsigned long flags;
1da177e4 1491
7b738b55 1492 spin_lock_irqsave(&np->statlock, flags);
1da177e4 1493 /* The chip only need report frame silently dropped. */
553e2335
ED
1494 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1495 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1496 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1497 dev->stats.collisions += ioread8(ioaddr + StatsLateColl);
1498 dev->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1499 dev->stats.collisions += ioread8(ioaddr + StatsOneColl);
1500 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1da177e4
LT
1501 ioread8(ioaddr + StatsTxDefer);
1502 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1503 ioread8(ioaddr + i);
553e2335
ED
1504 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1505 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1506 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1507 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1da177e4 1508
7b738b55
ED
1509 spin_unlock_irqrestore(&np->statlock, flags);
1510
553e2335 1511 return &dev->stats;
1da177e4
LT
1512}
1513
1514static void set_rx_mode(struct net_device *dev)
1515{
1516 struct netdev_private *np = netdev_priv(dev);
1517 void __iomem *ioaddr = np->base;
1518 u16 mc_filter[4]; /* Multicast hash filter */
1519 u32 rx_mode;
1520 int i;
1521
1522 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1da177e4
LT
1523 memset(mc_filter, 0xff, sizeof(mc_filter));
1524 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
4cd24eaf 1525 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
8e95a202 1526 (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
1527 /* Too many to match, or accept all multicasts. */
1528 memset(mc_filter, 0xff, sizeof(mc_filter));
1529 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4cd24eaf 1530 } else if (!netdev_mc_empty(dev)) {
22bedad3 1531 struct netdev_hw_addr *ha;
1da177e4
LT
1532 int bit;
1533 int index;
1534 int crc;
1535 memset (mc_filter, 0, sizeof (mc_filter));
22bedad3
JP
1536 netdev_for_each_mc_addr(ha, dev) {
1537 crc = ether_crc_le(ETH_ALEN, ha->addr);
1da177e4
LT
1538 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1539 if (crc & 0x80000000) index |= 1 << bit;
1540 mc_filter[index/16] |= (1 << (index % 16));
1541 }
1542 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1543 } else {
1544 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1545 return;
1546 }
1547 if (np->mii_if.full_duplex && np->flowctrl)
1548 mc_filter[3] |= 0x0200;
1549
1550 for (i = 0; i < 4; i++)
1551 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1552 iowrite8(rx_mode, ioaddr + RxMode);
1553}
1554
1555static int __set_mac_addr(struct net_device *dev)
1556{
1557 struct netdev_private *np = netdev_priv(dev);
1558 u16 addr16;
1559
1560 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1561 iowrite16(addr16, np->base + StationAddr);
1562 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1563 iowrite16(addr16, np->base + StationAddr+2);
1564 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1565 iowrite16(addr16, np->base + StationAddr+4);
1566 return 0;
1567}
1568
1569static int check_if_running(struct net_device *dev)
1570{
1571 if (!netif_running(dev))
1572 return -EINVAL;
1573 return 0;
1574}
1575
1576static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1577{
1578 struct netdev_private *np = netdev_priv(dev);
1579 strcpy(info->driver, DRV_NAME);
1580 strcpy(info->version, DRV_VERSION);
1581 strcpy(info->bus_info, pci_name(np->pci_dev));
1582}
1583
1584static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1585{
1586 struct netdev_private *np = netdev_priv(dev);
1587 spin_lock_irq(&np->lock);
1588 mii_ethtool_gset(&np->mii_if, ecmd);
1589 spin_unlock_irq(&np->lock);
1590 return 0;
1591}
1592
1593static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1594{
1595 struct netdev_private *np = netdev_priv(dev);
1596 int res;
1597 spin_lock_irq(&np->lock);
1598 res = mii_ethtool_sset(&np->mii_if, ecmd);
1599 spin_unlock_irq(&np->lock);
1600 return res;
1601}
1602
1603static int nway_reset(struct net_device *dev)
1604{
1605 struct netdev_private *np = netdev_priv(dev);
1606 return mii_nway_restart(&np->mii_if);
1607}
1608
1609static u32 get_link(struct net_device *dev)
1610{
1611 struct netdev_private *np = netdev_priv(dev);
1612 return mii_link_ok(&np->mii_if);
1613}
1614
1615static u32 get_msglevel(struct net_device *dev)
1616{
1617 struct netdev_private *np = netdev_priv(dev);
1618 return np->msg_enable;
1619}
1620
1621static void set_msglevel(struct net_device *dev, u32 val)
1622{
1623 struct netdev_private *np = netdev_priv(dev);
1624 np->msg_enable = val;
1625}
1626
7282d491 1627static const struct ethtool_ops ethtool_ops = {
1da177e4
LT
1628 .begin = check_if_running,
1629 .get_drvinfo = get_drvinfo,
1630 .get_settings = get_settings,
1631 .set_settings = set_settings,
1632 .nway_reset = nway_reset,
1633 .get_link = get_link,
1634 .get_msglevel = get_msglevel,
1635 .set_msglevel = set_msglevel,
1636};
1637
1638static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1639{
1640 struct netdev_private *np = netdev_priv(dev);
1da177e4 1641 int rc;
1da177e4
LT
1642
1643 if (!netif_running(dev))
1644 return -EINVAL;
1645
1646 spin_lock_irq(&np->lock);
1647 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1648 spin_unlock_irq(&np->lock);
1da177e4
LT
1649
1650 return rc;
1651}
1652
1653static int netdev_close(struct net_device *dev)
1654{
1655 struct netdev_private *np = netdev_priv(dev);
1656 void __iomem *ioaddr = np->base;
1657 struct sk_buff *skb;
1658 int i;
1659
31f817e9
JH
1660 /* Wait and kill tasklet */
1661 tasklet_kill(&np->rx_tasklet);
1662 tasklet_kill(&np->tx_tasklet);
1663 np->cur_tx = 0;
1664 np->dirty_tx = 0;
1665 np->cur_task = 0;
bca79eb7 1666 np->last_tx = NULL;
31f817e9 1667
1da177e4
LT
1668 netif_stop_queue(dev);
1669
1670 if (netif_msg_ifdown(np)) {
1671 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1672 "Rx %4.4x Int %2.2x.\n",
1673 dev->name, ioread8(ioaddr + TxStatus),
1674 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1675 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1676 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1677 }
1678
1679 /* Disable interrupts by clearing the interrupt mask. */
1680 iowrite16(0x0000, ioaddr + IntrEnable);
1681
acd70c2b
JH
1682 /* Disable Rx and Tx DMA for safely release resource */
1683 iowrite32(0x500, ioaddr + DMACtrl);
1684
1da177e4
LT
1685 /* Stop the chip's Tx and Rx processes. */
1686 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1687
31f817e9
JH
1688 for (i = 2000; i > 0; i--) {
1689 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1690 break;
1691 mdelay(1);
1692 }
1693
1694 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1695 ioaddr +ASICCtrl + 2);
1696
1697 for (i = 2000; i > 0; i--) {
1698 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1699 break;
1700 mdelay(1);
1701 }
1da177e4
LT
1702
1703#ifdef __i386__
1704 if (netif_msg_hw(np)) {
ad361c98 1705 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1da177e4
LT
1706 (int)(np->tx_ring_dma));
1707 for (i = 0; i < TX_RING_SIZE; i++)
ad361c98 1708 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1da177e4
LT
1709 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1710 np->tx_ring[i].frag[0].length);
ad361c98 1711 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1da177e4
LT
1712 (int)(np->rx_ring_dma));
1713 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1714 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1715 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1716 np->rx_ring[i].frag[0].length);
1717 }
1718 }
1719#endif /* __i386__ debugging only */
1720
1721 free_irq(dev->irq, dev);
1722
1723 del_timer_sync(&np->timer);
1724
1725 /* Free all the skbuffs in the Rx queue. */
1726 for (i = 0; i < RX_RING_SIZE; i++) {
1727 np->rx_ring[i].status = 0;
1da177e4
LT
1728 skb = np->rx_skbuff[i];
1729 if (skb) {
0c8a745f 1730 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1731 le32_to_cpu(np->rx_ring[i].frag[0].addr),
0c8a745f 1732 np->rx_buf_sz, DMA_FROM_DEVICE);
1da177e4
LT
1733 dev_kfree_skb(skb);
1734 np->rx_skbuff[i] = NULL;
1735 }
14c9d9b0 1736 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1da177e4
LT
1737 }
1738 for (i = 0; i < TX_RING_SIZE; i++) {
31f817e9 1739 np->tx_ring[i].next_desc = 0;
1da177e4
LT
1740 skb = np->tx_skbuff[i];
1741 if (skb) {
0c8a745f 1742 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1743 le32_to_cpu(np->tx_ring[i].frag[0].addr),
0c8a745f 1744 skb->len, DMA_TO_DEVICE);
1da177e4
LT
1745 dev_kfree_skb(skb);
1746 np->tx_skbuff[i] = NULL;
1747 }
1748 }
1749
1750 return 0;
1751}
1752
1753static void __devexit sundance_remove1 (struct pci_dev *pdev)
1754{
1755 struct net_device *dev = pci_get_drvdata(pdev);
1756
1757 if (dev) {
0c8a745f
DK
1758 struct netdev_private *np = netdev_priv(dev);
1759 unregister_netdev(dev);
1760 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1761 np->rx_ring, np->rx_ring_dma);
1762 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1763 np->tx_ring, np->tx_ring_dma);
1764 pci_iounmap(pdev, np->base);
1765 pci_release_regions(pdev);
1766 free_netdev(dev);
1767 pci_set_drvdata(pdev, NULL);
1da177e4
LT
1768 }
1769}
1770
61a21455
DK
1771#ifdef CONFIG_PM
1772
1773static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1774{
1775 struct net_device *dev = pci_get_drvdata(pci_dev);
1776
1777 if (!netif_running(dev))
1778 return 0;
1779
1780 netdev_close(dev);
1781 netif_device_detach(dev);
1782
1783 pci_save_state(pci_dev);
1784 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1785
1786 return 0;
1787}
1788
1789static int sundance_resume(struct pci_dev *pci_dev)
1790{
1791 struct net_device *dev = pci_get_drvdata(pci_dev);
1792 int err = 0;
1793
1794 if (!netif_running(dev))
1795 return 0;
1796
1797 pci_set_power_state(pci_dev, PCI_D0);
1798 pci_restore_state(pci_dev);
1799
1800 err = netdev_open(dev);
1801 if (err) {
1802 printk(KERN_ERR "%s: Can't resume interface!\n",
1803 dev->name);
1804 goto out;
1805 }
1806
1807 netif_device_attach(dev);
1808
1809out:
1810 return err;
1811}
1812
1813#endif /* CONFIG_PM */
1814
1da177e4
LT
1815static struct pci_driver sundance_driver = {
1816 .name = DRV_NAME,
1817 .id_table = sundance_pci_tbl,
1818 .probe = sundance_probe1,
1819 .remove = __devexit_p(sundance_remove1),
61a21455
DK
1820#ifdef CONFIG_PM
1821 .suspend = sundance_suspend,
1822 .resume = sundance_resume,
1823#endif /* CONFIG_PM */
1da177e4
LT
1824};
1825
1826static int __init sundance_init(void)
1827{
1828/* when a module, this is printed whether or not devices are found in probe */
1829#ifdef MODULE
1830 printk(version);
1831#endif
29917620 1832 return pci_register_driver(&sundance_driver);
1da177e4
LT
1833}
1834
1835static void __exit sundance_exit(void)
1836{
1837 pci_unregister_driver(&sundance_driver);
1838}
1839
1840module_init(sundance_init);
1841module_exit(sundance_exit);
1842
1843
This page took 0.824027 seconds and 5 git commands to generate.