sundance: Handle DMA mapping errors
[deliverable/linux.git] / drivers / net / sundance.c
1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
22
23 */
24
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
28
29
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
36
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
40 need a copy-align. */
41 static int rx_copybreak;
42 static int flowctrl=1;
43
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
51 1 10Mbps half duplex.
52 2 10Mbps full duplex.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
55 */
56 #define MAX_UNITS 8
57 static char *media[MAX_UNITS];
58
59
60 /* Operational parameters that are set at compile time. */
61
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
66 Tx error recovery.
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
71 #define RX_BUDGET 32
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
74
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h> /* Processor type for cache alignment. */
96 #include <asm/io.h>
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #include <linux/dma-mapping.h>
100 #ifndef _COMPAT_WITH_OLD_KERNEL
101 #include <linux/crc32.h>
102 #include <linux/ethtool.h>
103 #include <linux/mii.h>
104 #else
105 #include "crc32.h"
106 #include "ethtool.h"
107 #include "mii.h"
108 #include "compat.h"
109 #endif
110
111 /* These identify the driver base version and may not be removed. */
112 static const char version[] __devinitconst =
113 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
114 " Written by Donald Becker\n";
115
116 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
118 MODULE_LICENSE("GPL");
119
120 module_param(debug, int, 0);
121 module_param(rx_copybreak, int, 0);
122 module_param_array(media, charp, NULL, 0);
123 module_param(flowctrl, int, 0);
124 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
125 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
126 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
127
128 /*
129 Theory of Operation
130
131 I. Board Compatibility
132
133 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
134
135 II. Board-specific settings
136
137 III. Driver operation
138
139 IIIa. Ring buffers
140
141 This driver uses two statically allocated fixed-size descriptor lists
142 formed into rings by a branch from the final descriptor to the beginning of
143 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
144 Some chips explicitly use only 2^N sized rings, while others use a
145 'next descriptor' pointer that the driver forms into rings.
146
147 IIIb/c. Transmit/Receive Structure
148
149 This driver uses a zero-copy receive and transmit scheme.
150 The driver allocates full frame size skbuffs for the Rx ring buffers at
151 open() time and passes the skb->data field to the chip as receive data
152 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
153 a fresh skbuff is allocated and the frame is copied to the new skbuff.
154 When the incoming frame is larger, the skbuff is passed directly up the
155 protocol stack. Buffers consumed this way are replaced by newly allocated
156 skbuffs in a later phase of receives.
157
158 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
159 using a full-sized skbuff for small frames vs. the copying costs of larger
160 frames. New boards are typically used in generously configured machines
161 and the underfilled buffers have negligible impact compared to the benefit of
162 a single allocation size, so the default value of zero results in never
163 copying packets. When copying is done, the cost is usually mitigated by using
164 a combined copy/checksum routine. Copying also preloads the cache, which is
165 most useful with small frames.
166
167 A subtle aspect of the operation is that the IP header at offset 14 in an
168 ethernet frame isn't longword aligned for further processing.
169 Unaligned buffers are permitted by the Sundance hardware, so
170 frames are received into the skbuff at an offset of "+2", 16-byte aligning
171 the IP header.
172
173 IIId. Synchronization
174
175 The driver runs as two independent, single-threaded flows of control. One
176 is the send-packet routine, which enforces single-threaded use by the
177 dev->tbusy flag. The other thread is the interrupt handler, which is single
178 threaded by the hardware and interrupt handling software.
179
180 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
181 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
182 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
183 the 'lp->tx_full' flag.
184
185 The interrupt handler has exclusive control over the Rx ring and records stats
186 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
187 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
188 clears both the tx_full and tbusy flags.
189
190 IV. Notes
191
192 IVb. References
193
194 The Sundance ST201 datasheet, preliminary version.
195 The Kendin KS8723 datasheet, preliminary version.
196 The ICplus IP100 datasheet, preliminary version.
197 http://www.scyld.com/expert/100mbps.html
198 http://www.scyld.com/expert/NWay.html
199
200 IVc. Errata
201
202 */
203
204 /* Work-around for Kendin chip bugs. */
205 #ifndef CONFIG_SUNDANCE_MMIO
206 #define USE_IO_OPS 1
207 #endif
208
209 static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
213 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
214 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
215 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
216 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
217 { }
218 };
219 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
220
221 enum {
222 netdev_io_size = 128
223 };
224
225 struct pci_id_info {
226 const char *name;
227 };
228 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
229 {"D-Link DFE-550TX FAST Ethernet Adapter"},
230 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
231 {"D-Link DFE-580TX 4 port Server Adapter"},
232 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
233 {"D-Link DL10050-based FAST Ethernet Adapter"},
234 {"Sundance Technology Alta"},
235 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
236 { } /* terminate list. */
237 };
238
239 /* This driver was written to use PCI memory space, however x86-oriented
240 hardware often uses I/O space accesses. */
241
242 /* Offsets to the device registers.
243 Unlike software-only systems, device drivers interact with complex hardware.
244 It's not useful to define symbolic names for every register bit in the
245 device. The name can only partially document the semantics and make
246 the driver longer and more difficult to read.
247 In general, only the important configuration values or bits changed
248 multiple times should be defined symbolically.
249 */
250 enum alta_offsets {
251 DMACtrl = 0x00,
252 TxListPtr = 0x04,
253 TxDMABurstThresh = 0x08,
254 TxDMAUrgentThresh = 0x09,
255 TxDMAPollPeriod = 0x0a,
256 RxDMAStatus = 0x0c,
257 RxListPtr = 0x10,
258 DebugCtrl0 = 0x1a,
259 DebugCtrl1 = 0x1c,
260 RxDMABurstThresh = 0x14,
261 RxDMAUrgentThresh = 0x15,
262 RxDMAPollPeriod = 0x16,
263 LEDCtrl = 0x1a,
264 ASICCtrl = 0x30,
265 EEData = 0x34,
266 EECtrl = 0x36,
267 FlashAddr = 0x40,
268 FlashData = 0x44,
269 TxStatus = 0x46,
270 TxFrameId = 0x47,
271 DownCounter = 0x18,
272 IntrClear = 0x4a,
273 IntrEnable = 0x4c,
274 IntrStatus = 0x4e,
275 MACCtrl0 = 0x50,
276 MACCtrl1 = 0x52,
277 StationAddr = 0x54,
278 MaxFrameSize = 0x5A,
279 RxMode = 0x5c,
280 MIICtrl = 0x5e,
281 MulticastFilter0 = 0x60,
282 MulticastFilter1 = 0x64,
283 RxOctetsLow = 0x68,
284 RxOctetsHigh = 0x6a,
285 TxOctetsLow = 0x6c,
286 TxOctetsHigh = 0x6e,
287 TxFramesOK = 0x70,
288 RxFramesOK = 0x72,
289 StatsCarrierError = 0x74,
290 StatsLateColl = 0x75,
291 StatsMultiColl = 0x76,
292 StatsOneColl = 0x77,
293 StatsTxDefer = 0x78,
294 RxMissed = 0x79,
295 StatsTxXSDefer = 0x7a,
296 StatsTxAbort = 0x7b,
297 StatsBcastTx = 0x7c,
298 StatsBcastRx = 0x7d,
299 StatsMcastTx = 0x7e,
300 StatsMcastRx = 0x7f,
301 /* Aliased and bogus values! */
302 RxStatus = 0x0c,
303 };
304 enum ASICCtrl_HiWord_bit {
305 GlobalReset = 0x0001,
306 RxReset = 0x0002,
307 TxReset = 0x0004,
308 DMAReset = 0x0008,
309 FIFOReset = 0x0010,
310 NetworkReset = 0x0020,
311 HostReset = 0x0040,
312 ResetBusy = 0x0400,
313 };
314
315 /* Bits in the interrupt status/mask registers. */
316 enum intr_status_bits {
317 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
318 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
319 IntrDrvRqst=0x0040,
320 StatsMax=0x0080, LinkChange=0x0100,
321 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
322 };
323
324 /* Bits in the RxMode register. */
325 enum rx_mode_bits {
326 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
327 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
328 };
329 /* Bits in MACCtrl. */
330 enum mac_ctrl0_bits {
331 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
332 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
333 };
334 enum mac_ctrl1_bits {
335 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
336 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
337 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
338 };
339
340 /* The Rx and Tx buffer descriptors. */
341 /* Note that using only 32 bit fields simplifies conversion to big-endian
342 architectures. */
343 struct netdev_desc {
344 __le32 next_desc;
345 __le32 status;
346 struct desc_frag { __le32 addr, length; } frag[1];
347 };
348
349 /* Bits in netdev_desc.status */
350 enum desc_status_bits {
351 DescOwn=0x8000,
352 DescEndPacket=0x4000,
353 DescEndRing=0x2000,
354 LastFrag=0x80000000,
355 DescIntrOnTx=0x8000,
356 DescIntrOnDMADone=0x80000000,
357 DisableAlign = 0x00000001,
358 };
359
360 #define PRIV_ALIGN 15 /* Required alignment mask */
361 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
362 within the structure. */
363 #define MII_CNT 4
364 struct netdev_private {
365 /* Descriptor rings first for alignment. */
366 struct netdev_desc *rx_ring;
367 struct netdev_desc *tx_ring;
368 struct sk_buff* rx_skbuff[RX_RING_SIZE];
369 struct sk_buff* tx_skbuff[TX_RING_SIZE];
370 dma_addr_t tx_ring_dma;
371 dma_addr_t rx_ring_dma;
372 struct timer_list timer; /* Media monitoring timer. */
373 /* Frequently used values: keep some adjacent for cache effect. */
374 spinlock_t lock;
375 spinlock_t rx_lock; /* Group with Tx control cache line. */
376 int msg_enable;
377 int chip_id;
378 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
379 unsigned int rx_buf_sz; /* Based on MTU+slack. */
380 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
381 unsigned int cur_tx, dirty_tx;
382 /* These values are keep track of the transceiver/media in use. */
383 unsigned int flowctrl:1;
384 unsigned int default_port:4; /* Last dev->if_port value. */
385 unsigned int an_enable:1;
386 unsigned int speed;
387 struct tasklet_struct rx_tasklet;
388 struct tasklet_struct tx_tasklet;
389 int budget;
390 int cur_task;
391 /* Multicast and receive mode. */
392 spinlock_t mcastlock; /* SMP lock multicast updates. */
393 u16 mcast_filter[4];
394 /* MII transceiver section. */
395 struct mii_if_info mii_if;
396 int mii_preamble_required;
397 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
398 struct pci_dev *pci_dev;
399 void __iomem *base;
400 };
401
402 /* The station address location in the EEPROM. */
403 #define EEPROM_SA_OFFSET 0x10
404 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
405 IntrDrvRqst | IntrTxDone | StatsMax | \
406 LinkChange)
407
408 static int change_mtu(struct net_device *dev, int new_mtu);
409 static int eeprom_read(void __iomem *ioaddr, int location);
410 static int mdio_read(struct net_device *dev, int phy_id, int location);
411 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
412 static int mdio_wait_link(struct net_device *dev, int wait);
413 static int netdev_open(struct net_device *dev);
414 static void check_duplex(struct net_device *dev);
415 static void netdev_timer(unsigned long data);
416 static void tx_timeout(struct net_device *dev);
417 static void init_ring(struct net_device *dev);
418 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
419 static int reset_tx (struct net_device *dev);
420 static irqreturn_t intr_handler(int irq, void *dev_instance);
421 static void rx_poll(unsigned long data);
422 static void tx_poll(unsigned long data);
423 static void refill_rx (struct net_device *dev);
424 static void netdev_error(struct net_device *dev, int intr_status);
425 static void netdev_error(struct net_device *dev, int intr_status);
426 static void set_rx_mode(struct net_device *dev);
427 static int __set_mac_addr(struct net_device *dev);
428 static struct net_device_stats *get_stats(struct net_device *dev);
429 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
430 static int netdev_close(struct net_device *dev);
431 static const struct ethtool_ops ethtool_ops;
432
433 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
434 {
435 struct netdev_private *np = netdev_priv(dev);
436 void __iomem *ioaddr = np->base + ASICCtrl;
437 int countdown;
438
439 /* ST201 documentation states ASICCtrl is a 32bit register */
440 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
441 /* ST201 documentation states reset can take up to 1 ms */
442 countdown = 10 + 1;
443 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
444 if (--countdown == 0) {
445 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
446 break;
447 }
448 udelay(100);
449 }
450 }
451
452 static const struct net_device_ops netdev_ops = {
453 .ndo_open = netdev_open,
454 .ndo_stop = netdev_close,
455 .ndo_start_xmit = start_tx,
456 .ndo_get_stats = get_stats,
457 .ndo_set_multicast_list = set_rx_mode,
458 .ndo_do_ioctl = netdev_ioctl,
459 .ndo_tx_timeout = tx_timeout,
460 .ndo_change_mtu = change_mtu,
461 .ndo_set_mac_address = eth_mac_addr,
462 .ndo_validate_addr = eth_validate_addr,
463 };
464
465 static int __devinit sundance_probe1 (struct pci_dev *pdev,
466 const struct pci_device_id *ent)
467 {
468 struct net_device *dev;
469 struct netdev_private *np;
470 static int card_idx;
471 int chip_idx = ent->driver_data;
472 int irq;
473 int i;
474 void __iomem *ioaddr;
475 u16 mii_ctl;
476 void *ring_space;
477 dma_addr_t ring_dma;
478 #ifdef USE_IO_OPS
479 int bar = 0;
480 #else
481 int bar = 1;
482 #endif
483 int phy, phy_end, phy_idx = 0;
484
485 /* when built into the kernel, we only print version if device is found */
486 #ifndef MODULE
487 static int printed_version;
488 if (!printed_version++)
489 printk(version);
490 #endif
491
492 if (pci_enable_device(pdev))
493 return -EIO;
494 pci_set_master(pdev);
495
496 irq = pdev->irq;
497
498 dev = alloc_etherdev(sizeof(*np));
499 if (!dev)
500 return -ENOMEM;
501 SET_NETDEV_DEV(dev, &pdev->dev);
502
503 if (pci_request_regions(pdev, DRV_NAME))
504 goto err_out_netdev;
505
506 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
507 if (!ioaddr)
508 goto err_out_res;
509
510 for (i = 0; i < 3; i++)
511 ((__le16 *)dev->dev_addr)[i] =
512 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
513 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
514
515 dev->base_addr = (unsigned long)ioaddr;
516 dev->irq = irq;
517
518 np = netdev_priv(dev);
519 np->base = ioaddr;
520 np->pci_dev = pdev;
521 np->chip_id = chip_idx;
522 np->msg_enable = (1 << debug) - 1;
523 spin_lock_init(&np->lock);
524 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
525 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
526
527 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
528 &ring_dma, GFP_KERNEL);
529 if (!ring_space)
530 goto err_out_cleardev;
531 np->tx_ring = (struct netdev_desc *)ring_space;
532 np->tx_ring_dma = ring_dma;
533
534 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
535 &ring_dma, GFP_KERNEL);
536 if (!ring_space)
537 goto err_out_unmap_tx;
538 np->rx_ring = (struct netdev_desc *)ring_space;
539 np->rx_ring_dma = ring_dma;
540
541 np->mii_if.dev = dev;
542 np->mii_if.mdio_read = mdio_read;
543 np->mii_if.mdio_write = mdio_write;
544 np->mii_if.phy_id_mask = 0x1f;
545 np->mii_if.reg_num_mask = 0x1f;
546
547 /* The chip-specific entries in the device structure. */
548 dev->netdev_ops = &netdev_ops;
549 SET_ETHTOOL_OPS(dev, &ethtool_ops);
550 dev->watchdog_timeo = TX_TIMEOUT;
551
552 pci_set_drvdata(pdev, dev);
553
554 i = register_netdev(dev);
555 if (i)
556 goto err_out_unmap_rx;
557
558 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
559 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
560 dev->dev_addr, irq);
561
562 np->phys[0] = 1; /* Default setting */
563 np->mii_preamble_required++;
564
565 /*
566 * It seems some phys doesn't deal well with address 0 being accessed
567 * first
568 */
569 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
570 phy = 0;
571 phy_end = 31;
572 } else {
573 phy = 1;
574 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
575 }
576 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
577 int phyx = phy & 0x1f;
578 int mii_status = mdio_read(dev, phyx, MII_BMSR);
579 if (mii_status != 0xffff && mii_status != 0x0000) {
580 np->phys[phy_idx++] = phyx;
581 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
582 if ((mii_status & 0x0040) == 0)
583 np->mii_preamble_required++;
584 printk(KERN_INFO "%s: MII PHY found at address %d, status "
585 "0x%4.4x advertising %4.4x.\n",
586 dev->name, phyx, mii_status, np->mii_if.advertising);
587 }
588 }
589 np->mii_preamble_required--;
590
591 if (phy_idx == 0) {
592 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
593 dev->name, ioread32(ioaddr + ASICCtrl));
594 goto err_out_unregister;
595 }
596
597 np->mii_if.phy_id = np->phys[0];
598
599 /* Parse override configuration */
600 np->an_enable = 1;
601 if (card_idx < MAX_UNITS) {
602 if (media[card_idx] != NULL) {
603 np->an_enable = 0;
604 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
605 strcmp (media[card_idx], "4") == 0) {
606 np->speed = 100;
607 np->mii_if.full_duplex = 1;
608 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
609 strcmp (media[card_idx], "3") == 0) {
610 np->speed = 100;
611 np->mii_if.full_duplex = 0;
612 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
613 strcmp (media[card_idx], "2") == 0) {
614 np->speed = 10;
615 np->mii_if.full_duplex = 1;
616 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
617 strcmp (media[card_idx], "1") == 0) {
618 np->speed = 10;
619 np->mii_if.full_duplex = 0;
620 } else {
621 np->an_enable = 1;
622 }
623 }
624 if (flowctrl == 1)
625 np->flowctrl = 1;
626 }
627
628 /* Fibre PHY? */
629 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
630 /* Default 100Mbps Full */
631 if (np->an_enable) {
632 np->speed = 100;
633 np->mii_if.full_duplex = 1;
634 np->an_enable = 0;
635 }
636 }
637 /* Reset PHY */
638 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
639 mdelay (300);
640 /* If flow control enabled, we need to advertise it.*/
641 if (np->flowctrl)
642 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
643 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
644 /* Force media type */
645 if (!np->an_enable) {
646 mii_ctl = 0;
647 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
648 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
649 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
650 printk (KERN_INFO "Override speed=%d, %s duplex\n",
651 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
652
653 }
654
655 /* Perhaps move the reset here? */
656 /* Reset the chip to erase previous misconfiguration. */
657 if (netif_msg_hw(np))
658 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
659 sundance_reset(dev, 0x00ff << 16);
660 if (netif_msg_hw(np))
661 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
662
663 card_idx++;
664 return 0;
665
666 err_out_unregister:
667 unregister_netdev(dev);
668 err_out_unmap_rx:
669 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
670 np->rx_ring, np->rx_ring_dma);
671 err_out_unmap_tx:
672 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
673 np->tx_ring, np->tx_ring_dma);
674 err_out_cleardev:
675 pci_set_drvdata(pdev, NULL);
676 pci_iounmap(pdev, ioaddr);
677 err_out_res:
678 pci_release_regions(pdev);
679 err_out_netdev:
680 free_netdev (dev);
681 return -ENODEV;
682 }
683
684 static int change_mtu(struct net_device *dev, int new_mtu)
685 {
686 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
687 return -EINVAL;
688 if (netif_running(dev))
689 return -EBUSY;
690 dev->mtu = new_mtu;
691 return 0;
692 }
693
694 #define eeprom_delay(ee_addr) ioread32(ee_addr)
695 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
696 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
697 {
698 int boguscnt = 10000; /* Typical 1900 ticks. */
699 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
700 do {
701 eeprom_delay(ioaddr + EECtrl);
702 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
703 return ioread16(ioaddr + EEData);
704 }
705 } while (--boguscnt > 0);
706 return 0;
707 }
708
709 /* MII transceiver control section.
710 Read and write the MII registers using software-generated serial
711 MDIO protocol. See the MII specifications or DP83840A data sheet
712 for details.
713
714 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
715 met by back-to-back 33Mhz PCI cycles. */
716 #define mdio_delay() ioread8(mdio_addr)
717
718 enum mii_reg_bits {
719 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
720 };
721 #define MDIO_EnbIn (0)
722 #define MDIO_WRITE0 (MDIO_EnbOutput)
723 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
724
725 /* Generate the preamble required for initial synchronization and
726 a few older transceivers. */
727 static void mdio_sync(void __iomem *mdio_addr)
728 {
729 int bits = 32;
730
731 /* Establish sync by sending at least 32 logic ones. */
732 while (--bits >= 0) {
733 iowrite8(MDIO_WRITE1, mdio_addr);
734 mdio_delay();
735 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
736 mdio_delay();
737 }
738 }
739
740 static int mdio_read(struct net_device *dev, int phy_id, int location)
741 {
742 struct netdev_private *np = netdev_priv(dev);
743 void __iomem *mdio_addr = np->base + MIICtrl;
744 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
745 int i, retval = 0;
746
747 if (np->mii_preamble_required)
748 mdio_sync(mdio_addr);
749
750 /* Shift the read command bits out. */
751 for (i = 15; i >= 0; i--) {
752 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
753
754 iowrite8(dataval, mdio_addr);
755 mdio_delay();
756 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
757 mdio_delay();
758 }
759 /* Read the two transition, 16 data, and wire-idle bits. */
760 for (i = 19; i > 0; i--) {
761 iowrite8(MDIO_EnbIn, mdio_addr);
762 mdio_delay();
763 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
764 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
765 mdio_delay();
766 }
767 return (retval>>1) & 0xffff;
768 }
769
770 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
771 {
772 struct netdev_private *np = netdev_priv(dev);
773 void __iomem *mdio_addr = np->base + MIICtrl;
774 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
775 int i;
776
777 if (np->mii_preamble_required)
778 mdio_sync(mdio_addr);
779
780 /* Shift the command bits out. */
781 for (i = 31; i >= 0; i--) {
782 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
783
784 iowrite8(dataval, mdio_addr);
785 mdio_delay();
786 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
787 mdio_delay();
788 }
789 /* Clear out extra bits. */
790 for (i = 2; i > 0; i--) {
791 iowrite8(MDIO_EnbIn, mdio_addr);
792 mdio_delay();
793 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
794 mdio_delay();
795 }
796 }
797
798 static int mdio_wait_link(struct net_device *dev, int wait)
799 {
800 int bmsr;
801 int phy_id;
802 struct netdev_private *np;
803
804 np = netdev_priv(dev);
805 phy_id = np->phys[0];
806
807 do {
808 bmsr = mdio_read(dev, phy_id, MII_BMSR);
809 if (bmsr & 0x0004)
810 return 0;
811 mdelay(1);
812 } while (--wait > 0);
813 return -1;
814 }
815
816 static int netdev_open(struct net_device *dev)
817 {
818 struct netdev_private *np = netdev_priv(dev);
819 void __iomem *ioaddr = np->base;
820 unsigned long flags;
821 int i;
822
823 /* Do we need to reset the chip??? */
824
825 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
826 if (i)
827 return i;
828
829 if (netif_msg_ifup(np))
830 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
831 dev->name, dev->irq);
832 init_ring(dev);
833
834 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
835 /* The Tx list pointer is written as packets are queued. */
836
837 /* Initialize other registers. */
838 __set_mac_addr(dev);
839 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
840 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
841 #else
842 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
843 #endif
844 if (dev->mtu > 2047)
845 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
846
847 /* Configure the PCI bus bursts and FIFO thresholds. */
848
849 if (dev->if_port == 0)
850 dev->if_port = np->default_port;
851
852 spin_lock_init(&np->mcastlock);
853
854 set_rx_mode(dev);
855 iowrite16(0, ioaddr + IntrEnable);
856 iowrite16(0, ioaddr + DownCounter);
857 /* Set the chip to poll every N*320nsec. */
858 iowrite8(100, ioaddr + RxDMAPollPeriod);
859 iowrite8(127, ioaddr + TxDMAPollPeriod);
860 /* Fix DFE-580TX packet drop issue */
861 if (np->pci_dev->revision >= 0x14)
862 iowrite8(0x01, ioaddr + DebugCtrl1);
863 netif_start_queue(dev);
864
865 spin_lock_irqsave(&np->lock, flags);
866 reset_tx(dev);
867 spin_unlock_irqrestore(&np->lock, flags);
868
869 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
870
871 if (netif_msg_ifup(np))
872 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
873 "MAC Control %x, %4.4x %4.4x.\n",
874 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
875 ioread32(ioaddr + MACCtrl0),
876 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
877
878 /* Set the timer to check for link beat. */
879 init_timer(&np->timer);
880 np->timer.expires = jiffies + 3*HZ;
881 np->timer.data = (unsigned long)dev;
882 np->timer.function = netdev_timer; /* timer handler */
883 add_timer(&np->timer);
884
885 /* Enable interrupts by setting the interrupt mask. */
886 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
887
888 return 0;
889 }
890
891 static void check_duplex(struct net_device *dev)
892 {
893 struct netdev_private *np = netdev_priv(dev);
894 void __iomem *ioaddr = np->base;
895 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
896 int negotiated = mii_lpa & np->mii_if.advertising;
897 int duplex;
898
899 /* Force media */
900 if (!np->an_enable || mii_lpa == 0xffff) {
901 if (np->mii_if.full_duplex)
902 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
903 ioaddr + MACCtrl0);
904 return;
905 }
906
907 /* Autonegotiation */
908 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
909 if (np->mii_if.full_duplex != duplex) {
910 np->mii_if.full_duplex = duplex;
911 if (netif_msg_link(np))
912 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
913 "negotiated capability %4.4x.\n", dev->name,
914 duplex ? "full" : "half", np->phys[0], negotiated);
915 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
916 }
917 }
918
919 static void netdev_timer(unsigned long data)
920 {
921 struct net_device *dev = (struct net_device *)data;
922 struct netdev_private *np = netdev_priv(dev);
923 void __iomem *ioaddr = np->base;
924 int next_tick = 10*HZ;
925
926 if (netif_msg_timer(np)) {
927 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
928 "Tx %x Rx %x.\n",
929 dev->name, ioread16(ioaddr + IntrEnable),
930 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
931 }
932 check_duplex(dev);
933 np->timer.expires = jiffies + next_tick;
934 add_timer(&np->timer);
935 }
936
937 static void tx_timeout(struct net_device *dev)
938 {
939 struct netdev_private *np = netdev_priv(dev);
940 void __iomem *ioaddr = np->base;
941 unsigned long flag;
942
943 netif_stop_queue(dev);
944 tasklet_disable(&np->tx_tasklet);
945 iowrite16(0, ioaddr + IntrEnable);
946 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
947 "TxFrameId %2.2x,"
948 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
949 ioread8(ioaddr + TxFrameId));
950
951 {
952 int i;
953 for (i=0; i<TX_RING_SIZE; i++) {
954 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
955 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
956 le32_to_cpu(np->tx_ring[i].next_desc),
957 le32_to_cpu(np->tx_ring[i].status),
958 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
959 le32_to_cpu(np->tx_ring[i].frag[0].addr),
960 le32_to_cpu(np->tx_ring[i].frag[0].length));
961 }
962 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
963 ioread32(np->base + TxListPtr),
964 netif_queue_stopped(dev));
965 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
966 np->cur_tx, np->cur_tx % TX_RING_SIZE,
967 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
968 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
969 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
970 }
971 spin_lock_irqsave(&np->lock, flag);
972
973 /* Stop and restart the chip's Tx processes . */
974 reset_tx(dev);
975 spin_unlock_irqrestore(&np->lock, flag);
976
977 dev->if_port = 0;
978
979 dev->trans_start = jiffies; /* prevent tx timeout */
980 dev->stats.tx_errors++;
981 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
982 netif_wake_queue(dev);
983 }
984 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
985 tasklet_enable(&np->tx_tasklet);
986 }
987
988
989 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
990 static void init_ring(struct net_device *dev)
991 {
992 struct netdev_private *np = netdev_priv(dev);
993 int i;
994
995 np->cur_rx = np->cur_tx = 0;
996 np->dirty_rx = np->dirty_tx = 0;
997 np->cur_task = 0;
998
999 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1000
1001 /* Initialize all Rx descriptors. */
1002 for (i = 0; i < RX_RING_SIZE; i++) {
1003 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1004 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1005 np->rx_ring[i].status = 0;
1006 np->rx_ring[i].frag[0].length = 0;
1007 np->rx_skbuff[i] = NULL;
1008 }
1009
1010 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1011 for (i = 0; i < RX_RING_SIZE; i++) {
1012 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1013 np->rx_skbuff[i] = skb;
1014 if (skb == NULL)
1015 break;
1016 skb->dev = dev; /* Mark as being used by this device. */
1017 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1018 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1019 dma_map_single(&np->pci_dev->dev, skb->data,
1020 np->rx_buf_sz, DMA_FROM_DEVICE));
1021 if (dma_mapping_error(&np->pci_dev->dev,
1022 np->rx_ring[i].frag[0].addr)) {
1023 dev_kfree_skb(skb);
1024 np->rx_skbuff[i] = NULL;
1025 break;
1026 }
1027 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1028 }
1029 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1030
1031 for (i = 0; i < TX_RING_SIZE; i++) {
1032 np->tx_skbuff[i] = NULL;
1033 np->tx_ring[i].status = 0;
1034 }
1035 }
1036
1037 static void tx_poll (unsigned long data)
1038 {
1039 struct net_device *dev = (struct net_device *)data;
1040 struct netdev_private *np = netdev_priv(dev);
1041 unsigned head = np->cur_task % TX_RING_SIZE;
1042 struct netdev_desc *txdesc =
1043 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1044
1045 /* Chain the next pointer */
1046 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1047 int entry = np->cur_task % TX_RING_SIZE;
1048 txdesc = &np->tx_ring[entry];
1049 if (np->last_tx) {
1050 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1051 entry*sizeof(struct netdev_desc));
1052 }
1053 np->last_tx = txdesc;
1054 }
1055 /* Indicate the latest descriptor of tx ring */
1056 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1057
1058 if (ioread32 (np->base + TxListPtr) == 0)
1059 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1060 np->base + TxListPtr);
1061 }
1062
1063 static netdev_tx_t
1064 start_tx (struct sk_buff *skb, struct net_device *dev)
1065 {
1066 struct netdev_private *np = netdev_priv(dev);
1067 struct netdev_desc *txdesc;
1068 unsigned entry;
1069
1070 /* Calculate the next Tx descriptor entry. */
1071 entry = np->cur_tx % TX_RING_SIZE;
1072 np->tx_skbuff[entry] = skb;
1073 txdesc = &np->tx_ring[entry];
1074
1075 txdesc->next_desc = 0;
1076 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1077 txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1078 skb->data, skb->len, DMA_TO_DEVICE));
1079 if (dma_mapping_error(&np->pci_dev->dev,
1080 txdesc->frag[0].addr))
1081 goto drop_frame;
1082 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1083
1084 /* Increment cur_tx before tasklet_schedule() */
1085 np->cur_tx++;
1086 mb();
1087 /* Schedule a tx_poll() task */
1088 tasklet_schedule(&np->tx_tasklet);
1089
1090 /* On some architectures: explicitly flush cache lines here. */
1091 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1092 !netif_queue_stopped(dev)) {
1093 /* do nothing */
1094 } else {
1095 netif_stop_queue (dev);
1096 }
1097 if (netif_msg_tx_queued(np)) {
1098 printk (KERN_DEBUG
1099 "%s: Transmit frame #%d queued in slot %d.\n",
1100 dev->name, np->cur_tx, entry);
1101 }
1102 return NETDEV_TX_OK;
1103
1104 drop_frame:
1105 dev_kfree_skb(skb);
1106 np->tx_skbuff[entry] = NULL;
1107 dev->stats.tx_dropped++;
1108 return NETDEV_TX_OK;
1109 }
1110
1111 /* Reset hardware tx and free all of tx buffers */
1112 static int
1113 reset_tx (struct net_device *dev)
1114 {
1115 struct netdev_private *np = netdev_priv(dev);
1116 void __iomem *ioaddr = np->base;
1117 struct sk_buff *skb;
1118 int i;
1119 int irq = in_interrupt();
1120
1121 /* Reset tx logic, TxListPtr will be cleaned */
1122 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1123 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1124
1125 /* free all tx skbuff */
1126 for (i = 0; i < TX_RING_SIZE; i++) {
1127 np->tx_ring[i].next_desc = 0;
1128
1129 skb = np->tx_skbuff[i];
1130 if (skb) {
1131 dma_unmap_single(&np->pci_dev->dev,
1132 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1133 skb->len, DMA_TO_DEVICE);
1134 if (irq)
1135 dev_kfree_skb_irq (skb);
1136 else
1137 dev_kfree_skb (skb);
1138 np->tx_skbuff[i] = NULL;
1139 dev->stats.tx_dropped++;
1140 }
1141 }
1142 np->cur_tx = np->dirty_tx = 0;
1143 np->cur_task = 0;
1144
1145 np->last_tx = NULL;
1146 iowrite8(127, ioaddr + TxDMAPollPeriod);
1147
1148 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1149 return 0;
1150 }
1151
1152 /* The interrupt handler cleans up after the Tx thread,
1153 and schedule a Rx thread work */
1154 static irqreturn_t intr_handler(int irq, void *dev_instance)
1155 {
1156 struct net_device *dev = (struct net_device *)dev_instance;
1157 struct netdev_private *np = netdev_priv(dev);
1158 void __iomem *ioaddr = np->base;
1159 int hw_frame_id;
1160 int tx_cnt;
1161 int tx_status;
1162 int handled = 0;
1163 int i;
1164
1165
1166 do {
1167 int intr_status = ioread16(ioaddr + IntrStatus);
1168 iowrite16(intr_status, ioaddr + IntrStatus);
1169
1170 if (netif_msg_intr(np))
1171 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1172 dev->name, intr_status);
1173
1174 if (!(intr_status & DEFAULT_INTR))
1175 break;
1176
1177 handled = 1;
1178
1179 if (intr_status & (IntrRxDMADone)) {
1180 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1181 ioaddr + IntrEnable);
1182 if (np->budget < 0)
1183 np->budget = RX_BUDGET;
1184 tasklet_schedule(&np->rx_tasklet);
1185 }
1186 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1187 tx_status = ioread16 (ioaddr + TxStatus);
1188 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1189 if (netif_msg_tx_done(np))
1190 printk
1191 ("%s: Transmit status is %2.2x.\n",
1192 dev->name, tx_status);
1193 if (tx_status & 0x1e) {
1194 if (netif_msg_tx_err(np))
1195 printk("%s: Transmit error status %4.4x.\n",
1196 dev->name, tx_status);
1197 dev->stats.tx_errors++;
1198 if (tx_status & 0x10)
1199 dev->stats.tx_fifo_errors++;
1200 if (tx_status & 0x08)
1201 dev->stats.collisions++;
1202 if (tx_status & 0x04)
1203 dev->stats.tx_fifo_errors++;
1204 if (tx_status & 0x02)
1205 dev->stats.tx_window_errors++;
1206
1207 /*
1208 ** This reset has been verified on
1209 ** DFE-580TX boards ! phdm@macqel.be.
1210 */
1211 if (tx_status & 0x10) { /* TxUnderrun */
1212 /* Restart Tx FIFO and transmitter */
1213 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1214 /* No need to reset the Tx pointer here */
1215 }
1216 /* Restart the Tx. Need to make sure tx enabled */
1217 i = 10;
1218 do {
1219 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1220 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1221 break;
1222 mdelay(1);
1223 } while (--i);
1224 }
1225 /* Yup, this is a documentation bug. It cost me *hours*. */
1226 iowrite16 (0, ioaddr + TxStatus);
1227 if (tx_cnt < 0) {
1228 iowrite32(5000, ioaddr + DownCounter);
1229 break;
1230 }
1231 tx_status = ioread16 (ioaddr + TxStatus);
1232 }
1233 hw_frame_id = (tx_status >> 8) & 0xff;
1234 } else {
1235 hw_frame_id = ioread8(ioaddr + TxFrameId);
1236 }
1237
1238 if (np->pci_dev->revision >= 0x14) {
1239 spin_lock(&np->lock);
1240 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1241 int entry = np->dirty_tx % TX_RING_SIZE;
1242 struct sk_buff *skb;
1243 int sw_frame_id;
1244 sw_frame_id = (le32_to_cpu(
1245 np->tx_ring[entry].status) >> 2) & 0xff;
1246 if (sw_frame_id == hw_frame_id &&
1247 !(le32_to_cpu(np->tx_ring[entry].status)
1248 & 0x00010000))
1249 break;
1250 if (sw_frame_id == (hw_frame_id + 1) %
1251 TX_RING_SIZE)
1252 break;
1253 skb = np->tx_skbuff[entry];
1254 /* Free the original skb. */
1255 dma_unmap_single(&np->pci_dev->dev,
1256 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1257 skb->len, DMA_TO_DEVICE);
1258 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1259 np->tx_skbuff[entry] = NULL;
1260 np->tx_ring[entry].frag[0].addr = 0;
1261 np->tx_ring[entry].frag[0].length = 0;
1262 }
1263 spin_unlock(&np->lock);
1264 } else {
1265 spin_lock(&np->lock);
1266 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1267 int entry = np->dirty_tx % TX_RING_SIZE;
1268 struct sk_buff *skb;
1269 if (!(le32_to_cpu(np->tx_ring[entry].status)
1270 & 0x00010000))
1271 break;
1272 skb = np->tx_skbuff[entry];
1273 /* Free the original skb. */
1274 dma_unmap_single(&np->pci_dev->dev,
1275 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1276 skb->len, DMA_TO_DEVICE);
1277 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1278 np->tx_skbuff[entry] = NULL;
1279 np->tx_ring[entry].frag[0].addr = 0;
1280 np->tx_ring[entry].frag[0].length = 0;
1281 }
1282 spin_unlock(&np->lock);
1283 }
1284
1285 if (netif_queue_stopped(dev) &&
1286 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1287 /* The ring is no longer full, clear busy flag. */
1288 netif_wake_queue (dev);
1289 }
1290 /* Abnormal error summary/uncommon events handlers. */
1291 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1292 netdev_error(dev, intr_status);
1293 } while (0);
1294 if (netif_msg_intr(np))
1295 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1296 dev->name, ioread16(ioaddr + IntrStatus));
1297 return IRQ_RETVAL(handled);
1298 }
1299
1300 static void rx_poll(unsigned long data)
1301 {
1302 struct net_device *dev = (struct net_device *)data;
1303 struct netdev_private *np = netdev_priv(dev);
1304 int entry = np->cur_rx % RX_RING_SIZE;
1305 int boguscnt = np->budget;
1306 void __iomem *ioaddr = np->base;
1307 int received = 0;
1308
1309 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1310 while (1) {
1311 struct netdev_desc *desc = &(np->rx_ring[entry]);
1312 u32 frame_status = le32_to_cpu(desc->status);
1313 int pkt_len;
1314
1315 if (--boguscnt < 0) {
1316 goto not_done;
1317 }
1318 if (!(frame_status & DescOwn))
1319 break;
1320 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1321 if (netif_msg_rx_status(np))
1322 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1323 frame_status);
1324 if (frame_status & 0x001f4000) {
1325 /* There was a error. */
1326 if (netif_msg_rx_err(np))
1327 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1328 frame_status);
1329 dev->stats.rx_errors++;
1330 if (frame_status & 0x00100000)
1331 dev->stats.rx_length_errors++;
1332 if (frame_status & 0x00010000)
1333 dev->stats.rx_fifo_errors++;
1334 if (frame_status & 0x00060000)
1335 dev->stats.rx_frame_errors++;
1336 if (frame_status & 0x00080000)
1337 dev->stats.rx_crc_errors++;
1338 if (frame_status & 0x00100000) {
1339 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1340 " status %8.8x.\n",
1341 dev->name, frame_status);
1342 }
1343 } else {
1344 struct sk_buff *skb;
1345 #ifndef final_version
1346 if (netif_msg_rx_status(np))
1347 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1348 ", bogus_cnt %d.\n",
1349 pkt_len, boguscnt);
1350 #endif
1351 /* Check if the packet is long enough to accept without copying
1352 to a minimally-sized skbuff. */
1353 if (pkt_len < rx_copybreak &&
1354 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1355 skb_reserve(skb, 2); /* 16 byte align the IP header */
1356 dma_sync_single_for_cpu(&np->pci_dev->dev,
1357 le32_to_cpu(desc->frag[0].addr),
1358 np->rx_buf_sz, DMA_FROM_DEVICE);
1359 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1360 dma_sync_single_for_device(&np->pci_dev->dev,
1361 le32_to_cpu(desc->frag[0].addr),
1362 np->rx_buf_sz, DMA_FROM_DEVICE);
1363 skb_put(skb, pkt_len);
1364 } else {
1365 dma_unmap_single(&np->pci_dev->dev,
1366 le32_to_cpu(desc->frag[0].addr),
1367 np->rx_buf_sz, DMA_FROM_DEVICE);
1368 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1369 np->rx_skbuff[entry] = NULL;
1370 }
1371 skb->protocol = eth_type_trans(skb, dev);
1372 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1373 netif_rx(skb);
1374 }
1375 entry = (entry + 1) % RX_RING_SIZE;
1376 received++;
1377 }
1378 np->cur_rx = entry;
1379 refill_rx (dev);
1380 np->budget -= received;
1381 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1382 return;
1383
1384 not_done:
1385 np->cur_rx = entry;
1386 refill_rx (dev);
1387 if (!received)
1388 received = 1;
1389 np->budget -= received;
1390 if (np->budget <= 0)
1391 np->budget = RX_BUDGET;
1392 tasklet_schedule(&np->rx_tasklet);
1393 }
1394
1395 static void refill_rx (struct net_device *dev)
1396 {
1397 struct netdev_private *np = netdev_priv(dev);
1398 int entry;
1399 int cnt = 0;
1400
1401 /* Refill the Rx ring buffers. */
1402 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1403 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1404 struct sk_buff *skb;
1405 entry = np->dirty_rx % RX_RING_SIZE;
1406 if (np->rx_skbuff[entry] == NULL) {
1407 skb = dev_alloc_skb(np->rx_buf_sz);
1408 np->rx_skbuff[entry] = skb;
1409 if (skb == NULL)
1410 break; /* Better luck next round. */
1411 skb->dev = dev; /* Mark as being used by this device. */
1412 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1413 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1414 dma_map_single(&np->pci_dev->dev, skb->data,
1415 np->rx_buf_sz, DMA_FROM_DEVICE));
1416 if (dma_mapping_error(&np->pci_dev->dev,
1417 np->rx_ring[entry].frag[0].addr)) {
1418 dev_kfree_skb_irq(skb);
1419 np->rx_skbuff[entry] = NULL;
1420 break;
1421 }
1422 }
1423 /* Perhaps we need not reset this field. */
1424 np->rx_ring[entry].frag[0].length =
1425 cpu_to_le32(np->rx_buf_sz | LastFrag);
1426 np->rx_ring[entry].status = 0;
1427 cnt++;
1428 }
1429 }
1430 static void netdev_error(struct net_device *dev, int intr_status)
1431 {
1432 struct netdev_private *np = netdev_priv(dev);
1433 void __iomem *ioaddr = np->base;
1434 u16 mii_ctl, mii_advertise, mii_lpa;
1435 int speed;
1436
1437 if (intr_status & LinkChange) {
1438 if (mdio_wait_link(dev, 10) == 0) {
1439 printk(KERN_INFO "%s: Link up\n", dev->name);
1440 if (np->an_enable) {
1441 mii_advertise = mdio_read(dev, np->phys[0],
1442 MII_ADVERTISE);
1443 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1444 mii_advertise &= mii_lpa;
1445 printk(KERN_INFO "%s: Link changed: ",
1446 dev->name);
1447 if (mii_advertise & ADVERTISE_100FULL) {
1448 np->speed = 100;
1449 printk("100Mbps, full duplex\n");
1450 } else if (mii_advertise & ADVERTISE_100HALF) {
1451 np->speed = 100;
1452 printk("100Mbps, half duplex\n");
1453 } else if (mii_advertise & ADVERTISE_10FULL) {
1454 np->speed = 10;
1455 printk("10Mbps, full duplex\n");
1456 } else if (mii_advertise & ADVERTISE_10HALF) {
1457 np->speed = 10;
1458 printk("10Mbps, half duplex\n");
1459 } else
1460 printk("\n");
1461
1462 } else {
1463 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1464 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1465 np->speed = speed;
1466 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1467 dev->name, speed);
1468 printk("%s duplex.\n",
1469 (mii_ctl & BMCR_FULLDPLX) ?
1470 "full" : "half");
1471 }
1472 check_duplex(dev);
1473 if (np->flowctrl && np->mii_if.full_duplex) {
1474 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1475 ioaddr + MulticastFilter1+2);
1476 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1477 ioaddr + MACCtrl0);
1478 }
1479 netif_carrier_on(dev);
1480 } else {
1481 printk(KERN_INFO "%s: Link down\n", dev->name);
1482 netif_carrier_off(dev);
1483 }
1484 }
1485 if (intr_status & StatsMax) {
1486 get_stats(dev);
1487 }
1488 if (intr_status & IntrPCIErr) {
1489 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1490 dev->name, intr_status);
1491 /* We must do a global reset of DMA to continue. */
1492 }
1493 }
1494
1495 static struct net_device_stats *get_stats(struct net_device *dev)
1496 {
1497 struct netdev_private *np = netdev_priv(dev);
1498 void __iomem *ioaddr = np->base;
1499 int i;
1500
1501 /* We should lock this segment of code for SMP eventually, although
1502 the vulnerability window is very small and statistics are
1503 non-critical. */
1504 /* The chip only need report frame silently dropped. */
1505 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1506 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1507 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1508 dev->stats.collisions += ioread8(ioaddr + StatsLateColl);
1509 dev->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1510 dev->stats.collisions += ioread8(ioaddr + StatsOneColl);
1511 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1512 ioread8(ioaddr + StatsTxDefer);
1513 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1514 ioread8(ioaddr + i);
1515 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1516 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1517 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1518 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1519
1520 return &dev->stats;
1521 }
1522
1523 static void set_rx_mode(struct net_device *dev)
1524 {
1525 struct netdev_private *np = netdev_priv(dev);
1526 void __iomem *ioaddr = np->base;
1527 u16 mc_filter[4]; /* Multicast hash filter */
1528 u32 rx_mode;
1529 int i;
1530
1531 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1532 memset(mc_filter, 0xff, sizeof(mc_filter));
1533 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1534 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1535 (dev->flags & IFF_ALLMULTI)) {
1536 /* Too many to match, or accept all multicasts. */
1537 memset(mc_filter, 0xff, sizeof(mc_filter));
1538 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1539 } else if (!netdev_mc_empty(dev)) {
1540 struct netdev_hw_addr *ha;
1541 int bit;
1542 int index;
1543 int crc;
1544 memset (mc_filter, 0, sizeof (mc_filter));
1545 netdev_for_each_mc_addr(ha, dev) {
1546 crc = ether_crc_le(ETH_ALEN, ha->addr);
1547 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1548 if (crc & 0x80000000) index |= 1 << bit;
1549 mc_filter[index/16] |= (1 << (index % 16));
1550 }
1551 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1552 } else {
1553 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1554 return;
1555 }
1556 if (np->mii_if.full_duplex && np->flowctrl)
1557 mc_filter[3] |= 0x0200;
1558
1559 for (i = 0; i < 4; i++)
1560 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1561 iowrite8(rx_mode, ioaddr + RxMode);
1562 }
1563
1564 static int __set_mac_addr(struct net_device *dev)
1565 {
1566 struct netdev_private *np = netdev_priv(dev);
1567 u16 addr16;
1568
1569 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1570 iowrite16(addr16, np->base + StationAddr);
1571 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1572 iowrite16(addr16, np->base + StationAddr+2);
1573 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1574 iowrite16(addr16, np->base + StationAddr+4);
1575 return 0;
1576 }
1577
1578 static int check_if_running(struct net_device *dev)
1579 {
1580 if (!netif_running(dev))
1581 return -EINVAL;
1582 return 0;
1583 }
1584
1585 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1586 {
1587 struct netdev_private *np = netdev_priv(dev);
1588 strcpy(info->driver, DRV_NAME);
1589 strcpy(info->version, DRV_VERSION);
1590 strcpy(info->bus_info, pci_name(np->pci_dev));
1591 }
1592
1593 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1594 {
1595 struct netdev_private *np = netdev_priv(dev);
1596 spin_lock_irq(&np->lock);
1597 mii_ethtool_gset(&np->mii_if, ecmd);
1598 spin_unlock_irq(&np->lock);
1599 return 0;
1600 }
1601
1602 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1603 {
1604 struct netdev_private *np = netdev_priv(dev);
1605 int res;
1606 spin_lock_irq(&np->lock);
1607 res = mii_ethtool_sset(&np->mii_if, ecmd);
1608 spin_unlock_irq(&np->lock);
1609 return res;
1610 }
1611
1612 static int nway_reset(struct net_device *dev)
1613 {
1614 struct netdev_private *np = netdev_priv(dev);
1615 return mii_nway_restart(&np->mii_if);
1616 }
1617
1618 static u32 get_link(struct net_device *dev)
1619 {
1620 struct netdev_private *np = netdev_priv(dev);
1621 return mii_link_ok(&np->mii_if);
1622 }
1623
1624 static u32 get_msglevel(struct net_device *dev)
1625 {
1626 struct netdev_private *np = netdev_priv(dev);
1627 return np->msg_enable;
1628 }
1629
1630 static void set_msglevel(struct net_device *dev, u32 val)
1631 {
1632 struct netdev_private *np = netdev_priv(dev);
1633 np->msg_enable = val;
1634 }
1635
1636 static const struct ethtool_ops ethtool_ops = {
1637 .begin = check_if_running,
1638 .get_drvinfo = get_drvinfo,
1639 .get_settings = get_settings,
1640 .set_settings = set_settings,
1641 .nway_reset = nway_reset,
1642 .get_link = get_link,
1643 .get_msglevel = get_msglevel,
1644 .set_msglevel = set_msglevel,
1645 };
1646
1647 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1648 {
1649 struct netdev_private *np = netdev_priv(dev);
1650 int rc;
1651
1652 if (!netif_running(dev))
1653 return -EINVAL;
1654
1655 spin_lock_irq(&np->lock);
1656 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1657 spin_unlock_irq(&np->lock);
1658
1659 return rc;
1660 }
1661
1662 static int netdev_close(struct net_device *dev)
1663 {
1664 struct netdev_private *np = netdev_priv(dev);
1665 void __iomem *ioaddr = np->base;
1666 struct sk_buff *skb;
1667 int i;
1668
1669 /* Wait and kill tasklet */
1670 tasklet_kill(&np->rx_tasklet);
1671 tasklet_kill(&np->tx_tasklet);
1672 np->cur_tx = 0;
1673 np->dirty_tx = 0;
1674 np->cur_task = 0;
1675 np->last_tx = NULL;
1676
1677 netif_stop_queue(dev);
1678
1679 if (netif_msg_ifdown(np)) {
1680 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1681 "Rx %4.4x Int %2.2x.\n",
1682 dev->name, ioread8(ioaddr + TxStatus),
1683 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1684 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1685 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1686 }
1687
1688 /* Disable interrupts by clearing the interrupt mask. */
1689 iowrite16(0x0000, ioaddr + IntrEnable);
1690
1691 /* Disable Rx and Tx DMA for safely release resource */
1692 iowrite32(0x500, ioaddr + DMACtrl);
1693
1694 /* Stop the chip's Tx and Rx processes. */
1695 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1696
1697 for (i = 2000; i > 0; i--) {
1698 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1699 break;
1700 mdelay(1);
1701 }
1702
1703 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1704 ioaddr +ASICCtrl + 2);
1705
1706 for (i = 2000; i > 0; i--) {
1707 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1708 break;
1709 mdelay(1);
1710 }
1711
1712 #ifdef __i386__
1713 if (netif_msg_hw(np)) {
1714 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1715 (int)(np->tx_ring_dma));
1716 for (i = 0; i < TX_RING_SIZE; i++)
1717 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1718 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1719 np->tx_ring[i].frag[0].length);
1720 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1721 (int)(np->rx_ring_dma));
1722 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1723 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1724 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1725 np->rx_ring[i].frag[0].length);
1726 }
1727 }
1728 #endif /* __i386__ debugging only */
1729
1730 free_irq(dev->irq, dev);
1731
1732 del_timer_sync(&np->timer);
1733
1734 /* Free all the skbuffs in the Rx queue. */
1735 for (i = 0; i < RX_RING_SIZE; i++) {
1736 np->rx_ring[i].status = 0;
1737 skb = np->rx_skbuff[i];
1738 if (skb) {
1739 dma_unmap_single(&np->pci_dev->dev,
1740 le32_to_cpu(np->rx_ring[i].frag[0].addr),
1741 np->rx_buf_sz, DMA_FROM_DEVICE);
1742 dev_kfree_skb(skb);
1743 np->rx_skbuff[i] = NULL;
1744 }
1745 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1746 }
1747 for (i = 0; i < TX_RING_SIZE; i++) {
1748 np->tx_ring[i].next_desc = 0;
1749 skb = np->tx_skbuff[i];
1750 if (skb) {
1751 dma_unmap_single(&np->pci_dev->dev,
1752 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1753 skb->len, DMA_TO_DEVICE);
1754 dev_kfree_skb(skb);
1755 np->tx_skbuff[i] = NULL;
1756 }
1757 }
1758
1759 return 0;
1760 }
1761
1762 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1763 {
1764 struct net_device *dev = pci_get_drvdata(pdev);
1765
1766 if (dev) {
1767 struct netdev_private *np = netdev_priv(dev);
1768 unregister_netdev(dev);
1769 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1770 np->rx_ring, np->rx_ring_dma);
1771 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1772 np->tx_ring, np->tx_ring_dma);
1773 pci_iounmap(pdev, np->base);
1774 pci_release_regions(pdev);
1775 free_netdev(dev);
1776 pci_set_drvdata(pdev, NULL);
1777 }
1778 }
1779
1780 #ifdef CONFIG_PM
1781
1782 static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1783 {
1784 struct net_device *dev = pci_get_drvdata(pci_dev);
1785
1786 if (!netif_running(dev))
1787 return 0;
1788
1789 netdev_close(dev);
1790 netif_device_detach(dev);
1791
1792 pci_save_state(pci_dev);
1793 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1794
1795 return 0;
1796 }
1797
1798 static int sundance_resume(struct pci_dev *pci_dev)
1799 {
1800 struct net_device *dev = pci_get_drvdata(pci_dev);
1801 int err = 0;
1802
1803 if (!netif_running(dev))
1804 return 0;
1805
1806 pci_set_power_state(pci_dev, PCI_D0);
1807 pci_restore_state(pci_dev);
1808
1809 err = netdev_open(dev);
1810 if (err) {
1811 printk(KERN_ERR "%s: Can't resume interface!\n",
1812 dev->name);
1813 goto out;
1814 }
1815
1816 netif_device_attach(dev);
1817
1818 out:
1819 return err;
1820 }
1821
1822 #endif /* CONFIG_PM */
1823
1824 static struct pci_driver sundance_driver = {
1825 .name = DRV_NAME,
1826 .id_table = sundance_pci_tbl,
1827 .probe = sundance_probe1,
1828 .remove = __devexit_p(sundance_remove1),
1829 #ifdef CONFIG_PM
1830 .suspend = sundance_suspend,
1831 .resume = sundance_resume,
1832 #endif /* CONFIG_PM */
1833 };
1834
1835 static int __init sundance_init(void)
1836 {
1837 /* when a module, this is printed whether or not devices are found in probe */
1838 #ifdef MODULE
1839 printk(version);
1840 #endif
1841 return pci_register_driver(&sundance_driver);
1842 }
1843
1844 static void __exit sundance_exit(void)
1845 {
1846 pci_unregister_driver(&sundance_driver);
1847 }
1848
1849 module_init(sundance_init);
1850 module_exit(sundance_exit);
1851
1852
This page took 0.616183 seconds and 6 git commands to generate.