[PATCH] pcnet32: Handle memory allocation failures cleanly when resizing tx/rx rings
[deliverable/linux.git] / drivers / net / pcnet32.c
CommitLineData
1da177e4
LT
1/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
2/*
3 * Copyright 1996-1999 Thomas Bogendoerfer
4 *
5 * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
6 *
7 * Copyright 1993 United States Government as represented by the
8 * Director, National Security Agency.
9 *
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
12 *
13 * This driver is for PCnet32 and PCnetPCI based ethercards
14 */
15/**************************************************************************
16 * 23 Oct, 2000.
17 * Fixed a few bugs, related to running the controller in 32bit mode.
18 *
19 * Carsten Langgaard, carstenl@mips.com
20 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
21 *
22 *************************************************************************/
23
24#define DRV_NAME "pcnet32"
ac62ef04
DF
25#define DRV_VERSION "1.32"
26#define DRV_RELDATE "18.Mar.2006"
1da177e4
LT
27#define PFX DRV_NAME ": "
28
4a5e8e29
JG
29static const char *const version =
30 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
1da177e4
LT
31
32#include <linux/module.h>
33#include <linux/kernel.h>
34#include <linux/string.h>
35#include <linux/errno.h>
36#include <linux/ioport.h>
37#include <linux/slab.h>
38#include <linux/interrupt.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/init.h>
42#include <linux/ethtool.h>
43#include <linux/mii.h>
44#include <linux/crc32.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/skbuff.h>
48#include <linux/spinlock.h>
49#include <linux/moduleparam.h>
50#include <linux/bitops.h>
51
52#include <asm/dma.h>
53#include <asm/io.h>
54#include <asm/uaccess.h>
55#include <asm/irq.h>
56
57/*
58 * PCI device identifiers for "new style" Linux PCI Device Drivers
59 */
60static struct pci_device_id pcnet32_pci_tbl[] = {
f2622a2b
DF
61 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
62 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
4a5e8e29
JG
63
64 /*
65 * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
66 * the incorrect vendor id.
67 */
f2622a2b
DF
68 { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
69 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
4a5e8e29
JG
70
71 { } /* terminate list */
1da177e4
LT
72};
73
4a5e8e29 74MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
1da177e4
LT
75
76static int cards_found;
77
78/*
79 * VLB I/O addresses
80 */
81static unsigned int pcnet32_portlist[] __initdata =
4a5e8e29 82 { 0x300, 0x320, 0x340, 0x360, 0 };
1da177e4
LT
83
84static int pcnet32_debug = 0;
4a5e8e29
JG
85static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
86static int pcnet32vlb; /* check for VLB cards ? */
1da177e4
LT
87
88static struct net_device *pcnet32_dev;
89
90static int max_interrupt_work = 2;
91static int rx_copybreak = 200;
92
93#define PCNET32_PORT_AUI 0x00
94#define PCNET32_PORT_10BT 0x01
95#define PCNET32_PORT_GPSI 0x02
96#define PCNET32_PORT_MII 0x03
97
98#define PCNET32_PORT_PORTSEL 0x03
99#define PCNET32_PORT_ASEL 0x04
100#define PCNET32_PORT_100 0x40
101#define PCNET32_PORT_FD 0x80
102
103#define PCNET32_DMA_MASK 0xffffffff
104
105#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
106#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4))
107
108/*
109 * table to translate option values from tulip
110 * to internal options
111 */
f71e1309 112static const unsigned char options_mapping[] = {
4a5e8e29
JG
113 PCNET32_PORT_ASEL, /* 0 Auto-select */
114 PCNET32_PORT_AUI, /* 1 BNC/AUI */
115 PCNET32_PORT_AUI, /* 2 AUI/BNC */
116 PCNET32_PORT_ASEL, /* 3 not supported */
117 PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
118 PCNET32_PORT_ASEL, /* 5 not supported */
119 PCNET32_PORT_ASEL, /* 6 not supported */
120 PCNET32_PORT_ASEL, /* 7 not supported */
121 PCNET32_PORT_ASEL, /* 8 not supported */
122 PCNET32_PORT_MII, /* 9 MII 10baseT */
123 PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
124 PCNET32_PORT_MII, /* 11 MII (autosel) */
125 PCNET32_PORT_10BT, /* 12 10BaseT */
126 PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
127 /* 14 MII 100BaseTx-FD */
128 PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
129 PCNET32_PORT_ASEL /* 15 not supported */
1da177e4
LT
130};
131
132static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
4a5e8e29 133 "Loopback test (offline)"
1da177e4 134};
4a5e8e29 135
1da177e4
LT
136#define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN)
137
ac62ef04 138#define PCNET32_NUM_REGS 136
1da177e4 139
4a5e8e29 140#define MAX_UNITS 8 /* More are supported, limit only on options */
1da177e4
LT
141static int options[MAX_UNITS];
142static int full_duplex[MAX_UNITS];
143static int homepna[MAX_UNITS];
144
145/*
146 * Theory of Operation
147 *
148 * This driver uses the same software structure as the normal lance
149 * driver. So look for a verbose description in lance.c. The differences
150 * to the normal lance driver is the use of the 32bit mode of PCnet32
151 * and PCnetPCI chips. Because these chips are 32bit chips, there is no
152 * 16MB limitation and we don't need bounce buffers.
153 */
154
1da177e4
LT
155/*
156 * Set the number of Tx and Rx buffers, using Log_2(# buffers).
157 * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
158 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
159 */
160#ifndef PCNET32_LOG_TX_BUFFERS
eabf0415
HWL
161#define PCNET32_LOG_TX_BUFFERS 4
162#define PCNET32_LOG_RX_BUFFERS 5
163#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
164#define PCNET32_LOG_MAX_RX_BUFFERS 9
1da177e4
LT
165#endif
166
167#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
eabf0415 168#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
1da177e4
LT
169
170#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
eabf0415 171#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
1da177e4
LT
172
173#define PKT_BUF_SZ 1544
174
175/* Offsets from base I/O address. */
176#define PCNET32_WIO_RDP 0x10
177#define PCNET32_WIO_RAP 0x12
178#define PCNET32_WIO_RESET 0x14
179#define PCNET32_WIO_BDP 0x16
180
181#define PCNET32_DWIO_RDP 0x10
182#define PCNET32_DWIO_RAP 0x14
183#define PCNET32_DWIO_RESET 0x18
184#define PCNET32_DWIO_BDP 0x1C
185
186#define PCNET32_TOTAL_SIZE 0x20
187
06c87850
DF
188#define CSR0 0
189#define CSR0_INIT 0x1
190#define CSR0_START 0x2
191#define CSR0_STOP 0x4
192#define CSR0_TXPOLL 0x8
193#define CSR0_INTEN 0x40
194#define CSR0_IDON 0x0100
195#define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
196#define PCNET32_INIT_LOW 1
197#define PCNET32_INIT_HIGH 2
198#define CSR3 3
199#define CSR4 4
200#define CSR5 5
201#define CSR5_SUSPEND 0x0001
202#define CSR15 15
203#define PCNET32_MC_FILTER 8
204
1da177e4
LT
205/* The PCNET32 Rx and Tx ring descriptors. */
206struct pcnet32_rx_head {
0b5bf225
JG
207 u32 base;
208 s16 buf_length;
209 s16 status;
210 u32 msg_length;
211 u32 reserved;
1da177e4
LT
212};
213
214struct pcnet32_tx_head {
0b5bf225
JG
215 u32 base;
216 s16 length;
217 s16 status;
218 u32 misc;
219 u32 reserved;
1da177e4
LT
220};
221
222/* The PCNET32 32-Bit initialization block, described in databook. */
223struct pcnet32_init_block {
0b5bf225
JG
224 u16 mode;
225 u16 tlen_rlen;
226 u8 phys_addr[6];
227 u16 reserved;
228 u32 filter[2];
4a5e8e29 229 /* Receive and transmit ring base, along with extra bits. */
0b5bf225
JG
230 u32 rx_ring;
231 u32 tx_ring;
1da177e4
LT
232};
233
234/* PCnet32 access functions */
235struct pcnet32_access {
4a5e8e29
JG
236 u16 (*read_csr) (unsigned long, int);
237 void (*write_csr) (unsigned long, int, u16);
238 u16 (*read_bcr) (unsigned long, int);
239 void (*write_bcr) (unsigned long, int, u16);
240 u16 (*read_rap) (unsigned long);
241 void (*write_rap) (unsigned long, u16);
242 void (*reset) (unsigned long);
1da177e4
LT
243};
244
245/*
76209926
HWL
246 * The first field of pcnet32_private is read by the ethernet device
247 * so the structure should be allocated using pci_alloc_consistent().
1da177e4
LT
248 */
249struct pcnet32_private {
4a5e8e29
JG
250 struct pcnet32_init_block init_block;
251 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
0b5bf225
JG
252 struct pcnet32_rx_head *rx_ring;
253 struct pcnet32_tx_head *tx_ring;
254 dma_addr_t dma_addr;/* DMA address of beginning of this
255 object, returned by pci_alloc_consistent */
256 struct pci_dev *pci_dev;
257 const char *name;
4a5e8e29 258 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
0b5bf225
JG
259 struct sk_buff **tx_skbuff;
260 struct sk_buff **rx_skbuff;
261 dma_addr_t *tx_dma_addr;
262 dma_addr_t *rx_dma_addr;
263 struct pcnet32_access a;
264 spinlock_t lock; /* Guard lock */
265 unsigned int cur_rx, cur_tx; /* The next free ring entry */
266 unsigned int rx_ring_size; /* current rx ring size */
267 unsigned int tx_ring_size; /* current tx ring size */
268 unsigned int rx_mod_mask; /* rx ring modular mask */
269 unsigned int tx_mod_mask; /* tx ring modular mask */
270 unsigned short rx_len_bits;
271 unsigned short tx_len_bits;
272 dma_addr_t rx_ring_dma_addr;
273 dma_addr_t tx_ring_dma_addr;
274 unsigned int dirty_rx, /* ring entries to be freed. */
275 dirty_tx;
276
277 struct net_device_stats stats;
278 char tx_full;
279 char phycount; /* number of phys found */
280 int options;
281 unsigned int shared_irq:1, /* shared irq possible */
282 dxsuflo:1, /* disable transmit stop on uflo */
283 mii:1; /* mii port available */
284 struct net_device *next;
285 struct mii_if_info mii_if;
286 struct timer_list watchdog_timer;
287 struct timer_list blink_timer;
288 u32 msg_enable; /* debug message level */
4a5e8e29
JG
289
290 /* each bit indicates an available PHY */
0b5bf225 291 u32 phymask;
1da177e4
LT
292};
293
4a5e8e29
JG
294static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
295static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
296static int pcnet32_open(struct net_device *);
297static int pcnet32_init_ring(struct net_device *);
298static int pcnet32_start_xmit(struct sk_buff *, struct net_device *);
299static int pcnet32_rx(struct net_device *);
300static void pcnet32_tx_timeout(struct net_device *dev);
1da177e4 301static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *);
4a5e8e29 302static int pcnet32_close(struct net_device *);
1da177e4
LT
303static struct net_device_stats *pcnet32_get_stats(struct net_device *);
304static void pcnet32_load_multicast(struct net_device *dev);
305static void pcnet32_set_multicast_list(struct net_device *);
4a5e8e29 306static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
1da177e4
LT
307static void pcnet32_watchdog(struct net_device *);
308static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
4a5e8e29
JG
309static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
310 int val);
1da177e4
LT
311static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
312static void pcnet32_ethtool_test(struct net_device *dev,
4a5e8e29
JG
313 struct ethtool_test *eth_test, u64 * data);
314static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
1da177e4
LT
315static int pcnet32_phys_id(struct net_device *dev, u32 data);
316static void pcnet32_led_blink_callback(struct net_device *dev);
317static int pcnet32_get_regs_len(struct net_device *dev);
318static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4a5e8e29 319 void *ptr);
1bcd3153 320static void pcnet32_purge_tx_ring(struct net_device *dev);
a88c844c 321static int pcnet32_alloc_ring(struct net_device *dev, char *name);
eabf0415 322static void pcnet32_free_ring(struct net_device *dev);
ac62ef04 323static void pcnet32_check_media(struct net_device *dev, int verbose);
eabf0415 324
4a5e8e29 325static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
1da177e4 326{
4a5e8e29
JG
327 outw(index, addr + PCNET32_WIO_RAP);
328 return inw(addr + PCNET32_WIO_RDP);
1da177e4
LT
329}
330
4a5e8e29 331static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
1da177e4 332{
4a5e8e29
JG
333 outw(index, addr + PCNET32_WIO_RAP);
334 outw(val, addr + PCNET32_WIO_RDP);
1da177e4
LT
335}
336
4a5e8e29 337static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
1da177e4 338{
4a5e8e29
JG
339 outw(index, addr + PCNET32_WIO_RAP);
340 return inw(addr + PCNET32_WIO_BDP);
1da177e4
LT
341}
342
4a5e8e29 343static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
1da177e4 344{
4a5e8e29
JG
345 outw(index, addr + PCNET32_WIO_RAP);
346 outw(val, addr + PCNET32_WIO_BDP);
1da177e4
LT
347}
348
4a5e8e29 349static u16 pcnet32_wio_read_rap(unsigned long addr)
1da177e4 350{
4a5e8e29 351 return inw(addr + PCNET32_WIO_RAP);
1da177e4
LT
352}
353
4a5e8e29 354static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
1da177e4 355{
4a5e8e29 356 outw(val, addr + PCNET32_WIO_RAP);
1da177e4
LT
357}
358
4a5e8e29 359static void pcnet32_wio_reset(unsigned long addr)
1da177e4 360{
4a5e8e29 361 inw(addr + PCNET32_WIO_RESET);
1da177e4
LT
362}
363
4a5e8e29 364static int pcnet32_wio_check(unsigned long addr)
1da177e4 365{
4a5e8e29
JG
366 outw(88, addr + PCNET32_WIO_RAP);
367 return (inw(addr + PCNET32_WIO_RAP) == 88);
1da177e4
LT
368}
369
370static struct pcnet32_access pcnet32_wio = {
4a5e8e29
JG
371 .read_csr = pcnet32_wio_read_csr,
372 .write_csr = pcnet32_wio_write_csr,
373 .read_bcr = pcnet32_wio_read_bcr,
374 .write_bcr = pcnet32_wio_write_bcr,
375 .read_rap = pcnet32_wio_read_rap,
376 .write_rap = pcnet32_wio_write_rap,
377 .reset = pcnet32_wio_reset
1da177e4
LT
378};
379
4a5e8e29 380static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
1da177e4 381{
4a5e8e29
JG
382 outl(index, addr + PCNET32_DWIO_RAP);
383 return (inl(addr + PCNET32_DWIO_RDP) & 0xffff);
1da177e4
LT
384}
385
4a5e8e29 386static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
1da177e4 387{
4a5e8e29
JG
388 outl(index, addr + PCNET32_DWIO_RAP);
389 outl(val, addr + PCNET32_DWIO_RDP);
1da177e4
LT
390}
391
4a5e8e29 392static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
1da177e4 393{
4a5e8e29
JG
394 outl(index, addr + PCNET32_DWIO_RAP);
395 return (inl(addr + PCNET32_DWIO_BDP) & 0xffff);
1da177e4
LT
396}
397
4a5e8e29 398static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
1da177e4 399{
4a5e8e29
JG
400 outl(index, addr + PCNET32_DWIO_RAP);
401 outl(val, addr + PCNET32_DWIO_BDP);
1da177e4
LT
402}
403
4a5e8e29 404static u16 pcnet32_dwio_read_rap(unsigned long addr)
1da177e4 405{
4a5e8e29 406 return (inl(addr + PCNET32_DWIO_RAP) & 0xffff);
1da177e4
LT
407}
408
4a5e8e29 409static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
1da177e4 410{
4a5e8e29 411 outl(val, addr + PCNET32_DWIO_RAP);
1da177e4
LT
412}
413
4a5e8e29 414static void pcnet32_dwio_reset(unsigned long addr)
1da177e4 415{
4a5e8e29 416 inl(addr + PCNET32_DWIO_RESET);
1da177e4
LT
417}
418
4a5e8e29 419static int pcnet32_dwio_check(unsigned long addr)
1da177e4 420{
4a5e8e29
JG
421 outl(88, addr + PCNET32_DWIO_RAP);
422 return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88);
1da177e4
LT
423}
424
425static struct pcnet32_access pcnet32_dwio = {
4a5e8e29
JG
426 .read_csr = pcnet32_dwio_read_csr,
427 .write_csr = pcnet32_dwio_write_csr,
428 .read_bcr = pcnet32_dwio_read_bcr,
429 .write_bcr = pcnet32_dwio_write_bcr,
430 .read_rap = pcnet32_dwio_read_rap,
431 .write_rap = pcnet32_dwio_write_rap,
432 .reset = pcnet32_dwio_reset
1da177e4
LT
433};
434
06c87850
DF
435static void pcnet32_netif_stop(struct net_device *dev)
436{
437 dev->trans_start = jiffies;
438 netif_poll_disable(dev);
439 netif_tx_disable(dev);
440}
441
442static void pcnet32_netif_start(struct net_device *dev)
443{
444 netif_wake_queue(dev);
445 netif_poll_enable(dev);
446}
447
448/*
449 * Allocate space for the new sized tx ring.
450 * Free old resources
451 * Save new resources.
452 * Any failure keeps old resources.
453 * Must be called with lp->lock held.
454 */
455static void pcnet32_realloc_tx_ring(struct net_device *dev,
456 struct pcnet32_private *lp,
457 unsigned int size)
458{
459 dma_addr_t new_ring_dma_addr;
460 dma_addr_t *new_dma_addr_list;
461 struct pcnet32_tx_head *new_tx_ring;
462 struct sk_buff **new_skb_list;
463
464 pcnet32_purge_tx_ring(dev);
465
466 new_tx_ring = pci_alloc_consistent(lp->pci_dev,
467 sizeof(struct pcnet32_tx_head) *
468 (1 << size),
469 &new_ring_dma_addr);
470 if (new_tx_ring == NULL) {
471 if (netif_msg_drv(lp))
472 printk("\n" KERN_ERR
473 "%s: Consistent memory allocation failed.\n",
474 dev->name);
475 return;
476 }
477 memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
478
479 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
480 GFP_ATOMIC);
481 if (!new_dma_addr_list) {
482 if (netif_msg_drv(lp))
483 printk("\n" KERN_ERR
484 "%s: Memory allocation failed.\n", dev->name);
485 goto free_new_tx_ring;
486 }
487
488 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
489 GFP_ATOMIC);
490 if (!new_skb_list) {
491 if (netif_msg_drv(lp))
492 printk("\n" KERN_ERR
493 "%s: Memory allocation failed.\n", dev->name);
494 goto free_new_lists;
495 }
496
497 kfree(lp->tx_skbuff);
498 kfree(lp->tx_dma_addr);
499 pci_free_consistent(lp->pci_dev,
500 sizeof(struct pcnet32_tx_head) *
501 lp->tx_ring_size, lp->tx_ring,
502 lp->tx_ring_dma_addr);
503
504 lp->tx_ring_size = (1 << size);
505 lp->tx_mod_mask = lp->tx_ring_size - 1;
506 lp->tx_len_bits = (size << 12);
507 lp->tx_ring = new_tx_ring;
508 lp->tx_ring_dma_addr = new_ring_dma_addr;
509 lp->tx_dma_addr = new_dma_addr_list;
510 lp->tx_skbuff = new_skb_list;
511 return;
512
513 free_new_lists:
514 kfree(new_dma_addr_list);
515 free_new_tx_ring:
516 pci_free_consistent(lp->pci_dev,
517 sizeof(struct pcnet32_tx_head) *
518 (1 << size),
519 new_tx_ring,
520 new_ring_dma_addr);
521 return;
522}
523
524/*
525 * Allocate space for the new sized rx ring.
526 * Re-use old receive buffers.
527 * alloc extra buffers
528 * free unneeded buffers
529 * free unneeded buffers
530 * Save new resources.
531 * Any failure keeps old resources.
532 * Must be called with lp->lock held.
533 */
534static void pcnet32_realloc_rx_ring(struct net_device *dev,
535 struct pcnet32_private *lp,
536 unsigned int size)
537{
538 dma_addr_t new_ring_dma_addr;
539 dma_addr_t *new_dma_addr_list;
540 struct pcnet32_rx_head *new_rx_ring;
541 struct sk_buff **new_skb_list;
542 int new, overlap;
543
544 new_rx_ring = pci_alloc_consistent(lp->pci_dev,
545 sizeof(struct pcnet32_rx_head) *
546 (1 << size),
547 &new_ring_dma_addr);
548 if (new_rx_ring == NULL) {
549 if (netif_msg_drv(lp))
550 printk("\n" KERN_ERR
551 "%s: Consistent memory allocation failed.\n",
552 dev->name);
553 return;
554 }
555 memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
556
557 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
558 GFP_ATOMIC);
559 if (!new_dma_addr_list) {
560 if (netif_msg_drv(lp))
561 printk("\n" KERN_ERR
562 "%s: Memory allocation failed.\n", dev->name);
563 goto free_new_rx_ring;
564 }
565
566 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
567 GFP_ATOMIC);
568 if (!new_skb_list) {
569 if (netif_msg_drv(lp))
570 printk("\n" KERN_ERR
571 "%s: Memory allocation failed.\n", dev->name);
572 goto free_new_lists;
573 }
574
575 /* first copy the current receive buffers */
576 overlap = min(size, lp->rx_ring_size);
577 for (new = 0; new < overlap; new++) {
578 new_rx_ring[new] = lp->rx_ring[new];
579 new_dma_addr_list[new] = lp->rx_dma_addr[new];
580 new_skb_list[new] = lp->rx_skbuff[new];
581 }
582 /* now allocate any new buffers needed */
583 for (; new < size; new++ ) {
584 struct sk_buff *rx_skbuff;
585 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ);
586 if (!(rx_skbuff = new_skb_list[new])) {
587 /* keep the original lists and buffers */
588 if (netif_msg_drv(lp))
589 printk(KERN_ERR
590 "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n",
591 dev->name);
592 goto free_all_new;
593 }
594 skb_reserve(rx_skbuff, 2);
595
596 new_dma_addr_list[new] =
597 pci_map_single(lp->pci_dev, rx_skbuff->data,
598 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
599 new_rx_ring[new].base = (u32) le32_to_cpu(new_dma_addr_list[new]);
600 new_rx_ring[new].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
601 new_rx_ring[new].status = le16_to_cpu(0x8000);
602 }
603 /* and free any unneeded buffers */
604 for (; new < lp->rx_ring_size; new++) {
605 if (lp->rx_skbuff[new]) {
606 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
607 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
608 dev_kfree_skb(lp->rx_skbuff[new]);
609 }
610 }
611
612 kfree(lp->rx_skbuff);
613 kfree(lp->rx_dma_addr);
614 pci_free_consistent(lp->pci_dev,
615 sizeof(struct pcnet32_rx_head) *
616 lp->rx_ring_size, lp->rx_ring,
617 lp->rx_ring_dma_addr);
618
619 lp->rx_ring_size = (1 << size);
620 lp->rx_mod_mask = lp->rx_ring_size - 1;
621 lp->rx_len_bits = (size << 4);
622 lp->rx_ring = new_rx_ring;
623 lp->rx_ring_dma_addr = new_ring_dma_addr;
624 lp->rx_dma_addr = new_dma_addr_list;
625 lp->rx_skbuff = new_skb_list;
626 return;
627
628 free_all_new:
629 for (; --new >= lp->rx_ring_size; ) {
630 if (new_skb_list[new]) {
631 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
632 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
633 dev_kfree_skb(new_skb_list[new]);
634 }
635 }
636 kfree(new_skb_list);
637 free_new_lists:
638 kfree(new_dma_addr_list);
639 free_new_rx_ring:
640 pci_free_consistent(lp->pci_dev,
641 sizeof(struct pcnet32_rx_head) *
642 (1 << size),
643 new_rx_ring,
644 new_ring_dma_addr);
645 return;
646}
647
1da177e4
LT
648#ifdef CONFIG_NET_POLL_CONTROLLER
649static void pcnet32_poll_controller(struct net_device *dev)
650{
4a5e8e29
JG
651 disable_irq(dev->irq);
652 pcnet32_interrupt(0, dev, NULL);
653 enable_irq(dev->irq);
1da177e4
LT
654}
655#endif
656
1da177e4
LT
657static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
658{
4a5e8e29
JG
659 struct pcnet32_private *lp = dev->priv;
660 unsigned long flags;
661 int r = -EOPNOTSUPP;
1da177e4 662
4a5e8e29
JG
663 if (lp->mii) {
664 spin_lock_irqsave(&lp->lock, flags);
665 mii_ethtool_gset(&lp->mii_if, cmd);
666 spin_unlock_irqrestore(&lp->lock, flags);
667 r = 0;
668 }
669 return r;
1da177e4
LT
670}
671
672static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
673{
4a5e8e29
JG
674 struct pcnet32_private *lp = dev->priv;
675 unsigned long flags;
676 int r = -EOPNOTSUPP;
1da177e4 677
4a5e8e29
JG
678 if (lp->mii) {
679 spin_lock_irqsave(&lp->lock, flags);
680 r = mii_ethtool_sset(&lp->mii_if, cmd);
681 spin_unlock_irqrestore(&lp->lock, flags);
682 }
683 return r;
1da177e4
LT
684}
685
4a5e8e29
JG
686static void pcnet32_get_drvinfo(struct net_device *dev,
687 struct ethtool_drvinfo *info)
1da177e4 688{
4a5e8e29
JG
689 struct pcnet32_private *lp = dev->priv;
690
691 strcpy(info->driver, DRV_NAME);
692 strcpy(info->version, DRV_VERSION);
693 if (lp->pci_dev)
694 strcpy(info->bus_info, pci_name(lp->pci_dev));
695 else
696 sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
1da177e4
LT
697}
698
699static u32 pcnet32_get_link(struct net_device *dev)
700{
4a5e8e29
JG
701 struct pcnet32_private *lp = dev->priv;
702 unsigned long flags;
703 int r;
1da177e4 704
4a5e8e29
JG
705 spin_lock_irqsave(&lp->lock, flags);
706 if (lp->mii) {
707 r = mii_link_ok(&lp->mii_if);
708 } else {
709 ulong ioaddr = dev->base_addr; /* card base I/O address */
710 r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
711 }
712 spin_unlock_irqrestore(&lp->lock, flags);
713
714 return r;
1da177e4
LT
715}
716
717static u32 pcnet32_get_msglevel(struct net_device *dev)
718{
4a5e8e29
JG
719 struct pcnet32_private *lp = dev->priv;
720 return lp->msg_enable;
1da177e4
LT
721}
722
723static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
724{
4a5e8e29
JG
725 struct pcnet32_private *lp = dev->priv;
726 lp->msg_enable = value;
1da177e4
LT
727}
728
729static int pcnet32_nway_reset(struct net_device *dev)
730{
4a5e8e29
JG
731 struct pcnet32_private *lp = dev->priv;
732 unsigned long flags;
733 int r = -EOPNOTSUPP;
1da177e4 734
4a5e8e29
JG
735 if (lp->mii) {
736 spin_lock_irqsave(&lp->lock, flags);
737 r = mii_nway_restart(&lp->mii_if);
738 spin_unlock_irqrestore(&lp->lock, flags);
739 }
740 return r;
1da177e4
LT
741}
742
4a5e8e29
JG
743static void pcnet32_get_ringparam(struct net_device *dev,
744 struct ethtool_ringparam *ering)
1da177e4 745{
4a5e8e29 746 struct pcnet32_private *lp = dev->priv;
1da177e4 747
6dcd60c2
DF
748 ering->tx_max_pending = TX_MAX_RING_SIZE;
749 ering->tx_pending = lp->tx_ring_size;
750 ering->rx_max_pending = RX_MAX_RING_SIZE;
751 ering->rx_pending = lp->rx_ring_size;
eabf0415
HWL
752}
753
4a5e8e29
JG
754static int pcnet32_set_ringparam(struct net_device *dev,
755 struct ethtool_ringparam *ering)
eabf0415 756{
4a5e8e29
JG
757 struct pcnet32_private *lp = dev->priv;
758 unsigned long flags;
06c87850
DF
759 unsigned int size;
760 ulong ioaddr = dev->base_addr;
4a5e8e29
JG
761 int i;
762
763 if (ering->rx_mini_pending || ering->rx_jumbo_pending)
764 return -EINVAL;
765
766 if (netif_running(dev))
06c87850 767 pcnet32_netif_stop(dev);
4a5e8e29
JG
768
769 spin_lock_irqsave(&lp->lock, flags);
06c87850
DF
770 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
771
772 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
4a5e8e29
JG
773
774 /* set the minimum ring size to 4, to allow the loopback test to work
775 * unchanged.
776 */
777 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
06c87850 778 if (size <= (1 << i))
4a5e8e29
JG
779 break;
780 }
06c87850
DF
781 if ((1 << i) != lp->tx_ring_size)
782 pcnet32_realloc_tx_ring(dev, lp, i);
783
784 size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
4a5e8e29 785 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
06c87850 786 if (size <= (1 << i))
4a5e8e29
JG
787 break;
788 }
06c87850
DF
789 if ((1 << i) != lp->rx_ring_size)
790 pcnet32_realloc_rx_ring(dev, lp, i);
791
792 dev->weight = lp->rx_ring_size / 2;
793
794 if (netif_running(dev)) {
795 pcnet32_netif_start(dev);
796 pcnet32_restart(dev, CSR0_NORMAL);
4a5e8e29 797 }
eabf0415 798
4a5e8e29 799 spin_unlock_irqrestore(&lp->lock, flags);
eabf0415 800
06c87850
DF
801 if (netif_msg_drv(lp))
802 printk(KERN_INFO
4a5e8e29
JG
803 "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
804 lp->rx_ring_size, lp->tx_ring_size);
eabf0415 805
4a5e8e29 806 return 0;
1da177e4
LT
807}
808
4a5e8e29
JG
809static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
810 u8 * data)
1da177e4 811{
4a5e8e29 812 memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
1da177e4
LT
813}
814
815static int pcnet32_self_test_count(struct net_device *dev)
816{
4a5e8e29 817 return PCNET32_TEST_LEN;
1da177e4
LT
818}
819
820static void pcnet32_ethtool_test(struct net_device *dev,
4a5e8e29 821 struct ethtool_test *test, u64 * data)
1da177e4 822{
4a5e8e29
JG
823 struct pcnet32_private *lp = dev->priv;
824 int rc;
825
826 if (test->flags == ETH_TEST_FL_OFFLINE) {
827 rc = pcnet32_loopback_test(dev, data);
828 if (rc) {
829 if (netif_msg_hw(lp))
830 printk(KERN_DEBUG "%s: Loopback test failed.\n",
831 dev->name);
832 test->flags |= ETH_TEST_FL_FAILED;
833 } else if (netif_msg_hw(lp))
834 printk(KERN_DEBUG "%s: Loopback test passed.\n",
835 dev->name);
1da177e4 836 } else if (netif_msg_hw(lp))
4a5e8e29
JG
837 printk(KERN_DEBUG
838 "%s: No tests to run (specify 'Offline' on ethtool).",
839 dev->name);
840} /* end pcnet32_ethtool_test */
1da177e4 841
4a5e8e29 842static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
1da177e4 843{
4a5e8e29
JG
844 struct pcnet32_private *lp = dev->priv;
845 struct pcnet32_access *a = &lp->a; /* access to registers */
846 ulong ioaddr = dev->base_addr; /* card base I/O address */
847 struct sk_buff *skb; /* sk buff */
848 int x, i; /* counters */
849 int numbuffs = 4; /* number of TX/RX buffers and descs */
850 u16 status = 0x8300; /* TX ring status */
851 u16 teststatus; /* test of ring status */
852 int rc; /* return code */
853 int size; /* size of packets */
854 unsigned char *packet; /* source packet data */
855 static const int data_len = 60; /* length of source packets */
856 unsigned long flags;
857 unsigned long ticks;
858
859 *data1 = 1; /* status of test, default to fail */
860 rc = 1; /* default to fail */
861
862 if (netif_running(dev))
863 pcnet32_close(dev);
864
865 spin_lock_irqsave(&lp->lock, flags);
866
867 /* Reset the PCNET32 */
868 lp->a.reset(ioaddr);
869
870 /* switch pcnet32 to 32bit mode */
871 lp->a.write_bcr(ioaddr, 20, 2);
872
873 lp->init_block.mode =
874 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
875 lp->init_block.filter[0] = 0;
876 lp->init_block.filter[1] = 0;
877
878 /* purge & init rings but don't actually restart */
879 pcnet32_restart(dev, 0x0000);
880
881 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
882
883 /* Initialize Transmit buffers. */
884 size = data_len + 15;
885 for (x = 0; x < numbuffs; x++) {
886 if (!(skb = dev_alloc_skb(size))) {
887 if (netif_msg_hw(lp))
888 printk(KERN_DEBUG
889 "%s: Cannot allocate skb at line: %d!\n",
890 dev->name, __LINE__);
891 goto clean_up;
892 } else {
893 packet = skb->data;
894 skb_put(skb, size); /* create space for data */
895 lp->tx_skbuff[x] = skb;
896 lp->tx_ring[x].length = le16_to_cpu(-skb->len);
897 lp->tx_ring[x].misc = 0;
898
899 /* put DA and SA into the skb */
900 for (i = 0; i < 6; i++)
901 *packet++ = dev->dev_addr[i];
902 for (i = 0; i < 6; i++)
903 *packet++ = dev->dev_addr[i];
904 /* type */
905 *packet++ = 0x08;
906 *packet++ = 0x06;
907 /* packet number */
908 *packet++ = x;
909 /* fill packet with data */
910 for (i = 0; i < data_len; i++)
911 *packet++ = i;
912
913 lp->tx_dma_addr[x] =
914 pci_map_single(lp->pci_dev, skb->data, skb->len,
915 PCI_DMA_TODEVICE);
916 lp->tx_ring[x].base =
917 (u32) le32_to_cpu(lp->tx_dma_addr[x]);
918 wmb(); /* Make sure owner changes after all others are visible */
919 lp->tx_ring[x].status = le16_to_cpu(status);
920 }
1da177e4 921 }
1da177e4 922
4a5e8e29
JG
923 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */
924 x = x | 0x0002;
925 a->write_bcr(ioaddr, 32, x);
926
927 lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */
928
929 teststatus = le16_to_cpu(0x8000);
930 lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */
931
932 /* Check status of descriptors */
933 for (x = 0; x < numbuffs; x++) {
934 ticks = 0;
935 rmb();
936 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
937 spin_unlock_irqrestore(&lp->lock, flags);
938 mdelay(1);
939 spin_lock_irqsave(&lp->lock, flags);
940 rmb();
941 ticks++;
942 }
943 if (ticks == 200) {
944 if (netif_msg_hw(lp))
945 printk("%s: Desc %d failed to reset!\n",
946 dev->name, x);
947 break;
948 }
949 }
950
951 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
952 wmb();
953 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
954 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
955
956 for (x = 0; x < numbuffs; x++) {
957 printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x);
958 skb = lp->rx_skbuff[x];
959 for (i = 0; i < size; i++) {
960 printk("%02x ", *(skb->data + i));
961 }
962 printk("\n");
963 }
964 }
1da177e4 965
4a5e8e29
JG
966 x = 0;
967 rc = 0;
968 while (x < numbuffs && !rc) {
969 skb = lp->rx_skbuff[x];
970 packet = lp->tx_skbuff[x]->data;
971 for (i = 0; i < size; i++) {
972 if (*(skb->data + i) != packet[i]) {
973 if (netif_msg_hw(lp))
974 printk(KERN_DEBUG
975 "%s: Error in compare! %2x - %02x %02x\n",
976 dev->name, i, *(skb->data + i),
977 packet[i]);
978 rc = 1;
979 break;
980 }
981 }
982 x++;
983 }
984 if (!rc) {
985 *data1 = 0;
986 }
1da177e4 987
4a5e8e29
JG
988 clean_up:
989 pcnet32_purge_tx_ring(dev);
990 x = a->read_csr(ioaddr, 15) & 0xFFFF;
991 a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */
1da177e4 992
4a5e8e29
JG
993 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
994 x = x & ~0x0002;
995 a->write_bcr(ioaddr, 32, x);
1da177e4 996
4a5e8e29
JG
997 spin_unlock_irqrestore(&lp->lock, flags);
998
999 if (netif_running(dev)) {
1000 pcnet32_open(dev);
1001 } else {
1002 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
1003 }
1004
1005 return (rc);
1006} /* end pcnet32_loopback_test */
1da177e4
LT
1007
1008static void pcnet32_led_blink_callback(struct net_device *dev)
1009{
4a5e8e29
JG
1010 struct pcnet32_private *lp = dev->priv;
1011 struct pcnet32_access *a = &lp->a;
1012 ulong ioaddr = dev->base_addr;
1013 unsigned long flags;
1014 int i;
1015
1016 spin_lock_irqsave(&lp->lock, flags);
1017 for (i = 4; i < 8; i++) {
1018 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
1019 }
1020 spin_unlock_irqrestore(&lp->lock, flags);
1021
1022 mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
1da177e4
LT
1023}
1024
1025static int pcnet32_phys_id(struct net_device *dev, u32 data)
1026{
4a5e8e29
JG
1027 struct pcnet32_private *lp = dev->priv;
1028 struct pcnet32_access *a = &lp->a;
1029 ulong ioaddr = dev->base_addr;
1030 unsigned long flags;
1031 int i, regs[4];
1032
1033 if (!lp->blink_timer.function) {
1034 init_timer(&lp->blink_timer);
1035 lp->blink_timer.function = (void *)pcnet32_led_blink_callback;
1036 lp->blink_timer.data = (unsigned long)dev;
1037 }
1038
1039 /* Save the current value of the bcrs */
1040 spin_lock_irqsave(&lp->lock, flags);
1041 for (i = 4; i < 8; i++) {
1042 regs[i - 4] = a->read_bcr(ioaddr, i);
1043 }
1044 spin_unlock_irqrestore(&lp->lock, flags);
1045
1046 mod_timer(&lp->blink_timer, jiffies);
1047 set_current_state(TASK_INTERRUPTIBLE);
1048
1049 if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)))
1050 data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ);
1051
1052 msleep_interruptible(data * 1000);
1053 del_timer_sync(&lp->blink_timer);
1054
1055 /* Restore the original value of the bcrs */
1056 spin_lock_irqsave(&lp->lock, flags);
1057 for (i = 4; i < 8; i++) {
1058 a->write_bcr(ioaddr, i, regs[i - 4]);
1059 }
1060 spin_unlock_irqrestore(&lp->lock, flags);
1061
1062 return 0;
1da177e4
LT
1063}
1064
ac62ef04
DF
1065#define PCNET32_REGS_PER_PHY 32
1066#define PCNET32_MAX_PHYS 32
1da177e4
LT
1067static int pcnet32_get_regs_len(struct net_device *dev)
1068{
4a5e8e29
JG
1069 struct pcnet32_private *lp = dev->priv;
1070 int j = lp->phycount * PCNET32_REGS_PER_PHY;
ac62ef04 1071
4a5e8e29 1072 return ((PCNET32_NUM_REGS + j) * sizeof(u16));
1da177e4
LT
1073}
1074
1075static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4a5e8e29 1076 void *ptr)
1da177e4 1077{
4a5e8e29
JG
1078 int i, csr0;
1079 u16 *buff = ptr;
1080 struct pcnet32_private *lp = dev->priv;
1081 struct pcnet32_access *a = &lp->a;
1082 ulong ioaddr = dev->base_addr;
1083 int ticks;
1084 unsigned long flags;
1085
1086 spin_lock_irqsave(&lp->lock, flags);
1087
1088 csr0 = a->read_csr(ioaddr, 0);
1089 if (!(csr0 & 0x0004)) { /* If not stopped */
1090 /* set SUSPEND (SPND) - CSR5 bit 0 */
1091 a->write_csr(ioaddr, 5, 0x0001);
1092
1093 /* poll waiting for bit to be set */
1094 ticks = 0;
1095 while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
1096 spin_unlock_irqrestore(&lp->lock, flags);
1097 mdelay(1);
1098 spin_lock_irqsave(&lp->lock, flags);
1099 ticks++;
1100 if (ticks > 200) {
1101 if (netif_msg_hw(lp))
1102 printk(KERN_DEBUG
1103 "%s: Error getting into suspend!\n",
1104 dev->name);
1105 break;
1106 }
ac62ef04 1107 }
1da177e4 1108 }
1da177e4 1109
4a5e8e29
JG
1110 /* read address PROM */
1111 for (i = 0; i < 16; i += 2)
1112 *buff++ = inw(ioaddr + i);
1113
1114 /* read control and status registers */
1115 for (i = 0; i < 90; i++) {
1116 *buff++ = a->read_csr(ioaddr, i);
1117 }
1118
1119 *buff++ = a->read_csr(ioaddr, 112);
1120 *buff++ = a->read_csr(ioaddr, 114);
1da177e4 1121
4a5e8e29
JG
1122 /* read bus configuration registers */
1123 for (i = 0; i < 30; i++) {
1124 *buff++ = a->read_bcr(ioaddr, i);
1125 }
1126 *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
1127 for (i = 31; i < 36; i++) {
1128 *buff++ = a->read_bcr(ioaddr, i);
1129 }
1130
1131 /* read mii phy registers */
1132 if (lp->mii) {
1133 int j;
1134 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
1135 if (lp->phymask & (1 << j)) {
1136 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
1137 lp->a.write_bcr(ioaddr, 33,
1138 (j << 5) | i);
1139 *buff++ = lp->a.read_bcr(ioaddr, 34);
1140 }
1141 }
1142 }
1143 }
1144
1145 if (!(csr0 & 0x0004)) { /* If not stopped */
1146 /* clear SUSPEND (SPND) - CSR5 bit 0 */
1147 a->write_csr(ioaddr, 5, 0x0000);
1148 }
1149
1150 spin_unlock_irqrestore(&lp->lock, flags);
1da177e4
LT
1151}
1152
1153static struct ethtool_ops pcnet32_ethtool_ops = {
4a5e8e29
JG
1154 .get_settings = pcnet32_get_settings,
1155 .set_settings = pcnet32_set_settings,
1156 .get_drvinfo = pcnet32_get_drvinfo,
1157 .get_msglevel = pcnet32_get_msglevel,
1158 .set_msglevel = pcnet32_set_msglevel,
1159 .nway_reset = pcnet32_nway_reset,
1160 .get_link = pcnet32_get_link,
1161 .get_ringparam = pcnet32_get_ringparam,
1162 .set_ringparam = pcnet32_set_ringparam,
1163 .get_tx_csum = ethtool_op_get_tx_csum,
1164 .get_sg = ethtool_op_get_sg,
1165 .get_tso = ethtool_op_get_tso,
1166 .get_strings = pcnet32_get_strings,
1167 .self_test_count = pcnet32_self_test_count,
1168 .self_test = pcnet32_ethtool_test,
1169 .phys_id = pcnet32_phys_id,
1170 .get_regs_len = pcnet32_get_regs_len,
1171 .get_regs = pcnet32_get_regs,
1172 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
1173};
1174
1175/* only probes for non-PCI devices, the rest are handled by
1176 * pci_register_driver via pcnet32_probe_pci */
1177
dcaf9769 1178static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
1da177e4 1179{
4a5e8e29
JG
1180 unsigned int *port, ioaddr;
1181
1182 /* search for PCnet32 VLB cards at known addresses */
1183 for (port = pcnet32_portlist; (ioaddr = *port); port++) {
1184 if (request_region
1185 (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
1186 /* check if there is really a pcnet chip on that ioaddr */
1187 if ((inb(ioaddr + 14) == 0x57)
1188 && (inb(ioaddr + 15) == 0x57)) {
1189 pcnet32_probe1(ioaddr, 0, NULL);
1190 } else {
1191 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1192 }
1193 }
1194 }
1da177e4
LT
1195}
1196
1da177e4
LT
1197static int __devinit
1198pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1199{
4a5e8e29
JG
1200 unsigned long ioaddr;
1201 int err;
1202
1203 err = pci_enable_device(pdev);
1204 if (err < 0) {
1205 if (pcnet32_debug & NETIF_MSG_PROBE)
1206 printk(KERN_ERR PFX
1207 "failed to enable device -- err=%d\n", err);
1208 return err;
1209 }
1210 pci_set_master(pdev);
1211
1212 ioaddr = pci_resource_start(pdev, 0);
1213 if (!ioaddr) {
1214 if (pcnet32_debug & NETIF_MSG_PROBE)
1215 printk(KERN_ERR PFX
1216 "card has no PCI IO resources, aborting\n");
1217 return -ENODEV;
1218 }
1da177e4 1219
4a5e8e29
JG
1220 if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
1221 if (pcnet32_debug & NETIF_MSG_PROBE)
1222 printk(KERN_ERR PFX
1223 "architecture does not support 32bit PCI busmaster DMA\n");
1224 return -ENODEV;
1225 }
1226 if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") ==
1227 NULL) {
1228 if (pcnet32_debug & NETIF_MSG_PROBE)
1229 printk(KERN_ERR PFX
1230 "io address range already allocated\n");
1231 return -EBUSY;
1232 }
1da177e4 1233
4a5e8e29
JG
1234 err = pcnet32_probe1(ioaddr, 1, pdev);
1235 if (err < 0) {
1236 pci_disable_device(pdev);
1237 }
1238 return err;
1da177e4
LT
1239}
1240
1da177e4
LT
1241/* pcnet32_probe1
1242 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
1243 * pdev will be NULL when called from pcnet32_probe_vlbus.
1244 */
1245static int __devinit
1246pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1247{
4a5e8e29
JG
1248 struct pcnet32_private *lp;
1249 dma_addr_t lp_dma_addr;
1250 int i, media;
1251 int fdx, mii, fset, dxsuflo;
1252 int chip_version;
1253 char *chipname;
1254 struct net_device *dev;
1255 struct pcnet32_access *a = NULL;
1256 u8 promaddr[6];
1257 int ret = -ENODEV;
1258
1259 /* reset the chip */
1260 pcnet32_wio_reset(ioaddr);
1261
1262 /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
1263 if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
1264 a = &pcnet32_wio;
1265 } else {
1266 pcnet32_dwio_reset(ioaddr);
1267 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4
1268 && pcnet32_dwio_check(ioaddr)) {
1269 a = &pcnet32_dwio;
1270 } else
1271 goto err_release_region;
1272 }
1273
1274 chip_version =
1275 a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
1276 if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
1277 printk(KERN_INFO " PCnet chip version is %#x.\n",
1278 chip_version);
1279 if ((chip_version & 0xfff) != 0x003) {
1280 if (pcnet32_debug & NETIF_MSG_PROBE)
1281 printk(KERN_INFO PFX "Unsupported chip version.\n");
1282 goto err_release_region;
1283 }
1284
1285 /* initialize variables */
1286 fdx = mii = fset = dxsuflo = 0;
1287 chip_version = (chip_version >> 12) & 0xffff;
1288
1289 switch (chip_version) {
1290 case 0x2420:
1291 chipname = "PCnet/PCI 79C970"; /* PCI */
1292 break;
1293 case 0x2430:
1294 if (shared)
1295 chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
1296 else
1297 chipname = "PCnet/32 79C965"; /* 486/VL bus */
1298 break;
1299 case 0x2621:
1300 chipname = "PCnet/PCI II 79C970A"; /* PCI */
1301 fdx = 1;
1302 break;
1303 case 0x2623:
1304 chipname = "PCnet/FAST 79C971"; /* PCI */
1305 fdx = 1;
1306 mii = 1;
1307 fset = 1;
1308 break;
1309 case 0x2624:
1310 chipname = "PCnet/FAST+ 79C972"; /* PCI */
1311 fdx = 1;
1312 mii = 1;
1313 fset = 1;
1314 break;
1315 case 0x2625:
1316 chipname = "PCnet/FAST III 79C973"; /* PCI */
1317 fdx = 1;
1318 mii = 1;
1319 break;
1320 case 0x2626:
1321 chipname = "PCnet/Home 79C978"; /* PCI */
1322 fdx = 1;
1323 /*
1324 * This is based on specs published at www.amd.com. This section
1325 * assumes that a card with a 79C978 wants to go into standard
1326 * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
1327 * and the module option homepna=1 can select this instead.
1328 */
1329 media = a->read_bcr(ioaddr, 49);
1330 media &= ~3; /* default to 10Mb ethernet */
1331 if (cards_found < MAX_UNITS && homepna[cards_found])
1332 media |= 1; /* switch to home wiring mode */
1333 if (pcnet32_debug & NETIF_MSG_PROBE)
1334 printk(KERN_DEBUG PFX "media set to %sMbit mode.\n",
1335 (media & 1) ? "1" : "10");
1336 a->write_bcr(ioaddr, 49, media);
1337 break;
1338 case 0x2627:
1339 chipname = "PCnet/FAST III 79C975"; /* PCI */
1340 fdx = 1;
1341 mii = 1;
1342 break;
1343 case 0x2628:
1344 chipname = "PCnet/PRO 79C976";
1345 fdx = 1;
1346 mii = 1;
1347 break;
1348 default:
1349 if (pcnet32_debug & NETIF_MSG_PROBE)
1350 printk(KERN_INFO PFX
1351 "PCnet version %#x, no PCnet32 chip.\n",
1352 chip_version);
1353 goto err_release_region;
1354 }
1355
1da177e4 1356 /*
4a5e8e29
JG
1357 * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
1358 * starting until the packet is loaded. Strike one for reliability, lose
1359 * one for latency - although on PCI this isnt a big loss. Older chips
1360 * have FIFO's smaller than a packet, so you can't do this.
1361 * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
1da177e4 1362 */
4a5e8e29
JG
1363
1364 if (fset) {
1365 a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
1366 a->write_csr(ioaddr, 80,
1367 (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
1368 dxsuflo = 1;
1369 }
1370
1371 dev = alloc_etherdev(0);
1372 if (!dev) {
1373 if (pcnet32_debug & NETIF_MSG_PROBE)
1374 printk(KERN_ERR PFX "Memory allocation failed.\n");
1375 ret = -ENOMEM;
1376 goto err_release_region;
1377 }
1378 SET_NETDEV_DEV(dev, &pdev->dev);
1379
1da177e4 1380 if (pcnet32_debug & NETIF_MSG_PROBE)
4a5e8e29
JG
1381 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
1382
1383 /* In most chips, after a chip reset, the ethernet address is read from the
1384 * station address PROM at the base address and programmed into the
1385 * "Physical Address Registers" CSR12-14.
1386 * As a precautionary measure, we read the PROM values and complain if
bc0e1fc9
LV
1387 * they disagree with the CSRs. If they miscompare, and the PROM addr
1388 * is valid, then the PROM addr is used.
4a5e8e29
JG
1389 */
1390 for (i = 0; i < 3; i++) {
1391 unsigned int val;
1392 val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
1393 /* There may be endianness issues here. */
1394 dev->dev_addr[2 * i] = val & 0x0ff;
1395 dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
1396 }
1397
1398 /* read PROM address and compare with CSR address */
1da177e4 1399 for (i = 0; i < 6; i++)
4a5e8e29
JG
1400 promaddr[i] = inb(ioaddr + i);
1401
1402 if (memcmp(promaddr, dev->dev_addr, 6)
1403 || !is_valid_ether_addr(dev->dev_addr)) {
1404 if (is_valid_ether_addr(promaddr)) {
1405 if (pcnet32_debug & NETIF_MSG_PROBE) {
1406 printk(" warning: CSR address invalid,\n");
1407 printk(KERN_INFO
1408 " using instead PROM address of");
1409 }
1410 memcpy(dev->dev_addr, promaddr, 6);
1411 }
1412 }
1413 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1414
1415 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
1416 if (!is_valid_ether_addr(dev->perm_addr))
1417 memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
1418
1419 if (pcnet32_debug & NETIF_MSG_PROBE) {
1420 for (i = 0; i < 6; i++)
1421 printk(" %2.2x", dev->dev_addr[i]);
1422
1423 /* Version 0x2623 and 0x2624 */
1424 if (((chip_version + 1) & 0xfffe) == 0x2624) {
1425 i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
1426 printk("\n" KERN_INFO " tx_start_pt(0x%04x):", i);
1427 switch (i >> 10) {
1428 case 0:
1429 printk(" 20 bytes,");
1430 break;
1431 case 1:
1432 printk(" 64 bytes,");
1433 break;
1434 case 2:
1435 printk(" 128 bytes,");
1436 break;
1437 case 3:
1438 printk("~220 bytes,");
1439 break;
1440 }
1441 i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
1442 printk(" BCR18(%x):", i & 0xffff);
1443 if (i & (1 << 5))
1444 printk("BurstWrEn ");
1445 if (i & (1 << 6))
1446 printk("BurstRdEn ");
1447 if (i & (1 << 7))
1448 printk("DWordIO ");
1449 if (i & (1 << 11))
1450 printk("NoUFlow ");
1451 i = a->read_bcr(ioaddr, 25);
1452 printk("\n" KERN_INFO " SRAMSIZE=0x%04x,", i << 8);
1453 i = a->read_bcr(ioaddr, 26);
1454 printk(" SRAM_BND=0x%04x,", i << 8);
1455 i = a->read_bcr(ioaddr, 27);
1456 if (i & (1 << 14))
1457 printk("LowLatRx");
1458 }
1459 }
1460
1461 dev->base_addr = ioaddr;
1462 /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
1463 if ((lp =
1464 pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) {
1465 if (pcnet32_debug & NETIF_MSG_PROBE)
1466 printk(KERN_ERR PFX
1467 "Consistent memory allocation failed.\n");
1468 ret = -ENOMEM;
1469 goto err_free_netdev;
1470 }
1471
1472 memset(lp, 0, sizeof(*lp));
1473 lp->dma_addr = lp_dma_addr;
1474 lp->pci_dev = pdev;
1475
1476 spin_lock_init(&lp->lock);
1477
1478 SET_MODULE_OWNER(dev);
1479 SET_NETDEV_DEV(dev, &pdev->dev);
1480 dev->priv = lp;
1481 lp->name = chipname;
1482 lp->shared_irq = shared;
1483 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
1484 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
1485 lp->tx_mod_mask = lp->tx_ring_size - 1;
1486 lp->rx_mod_mask = lp->rx_ring_size - 1;
1487 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
1488 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
1489 lp->mii_if.full_duplex = fdx;
1490 lp->mii_if.phy_id_mask = 0x1f;
1491 lp->mii_if.reg_num_mask = 0x1f;
1492 lp->dxsuflo = dxsuflo;
1493 lp->mii = mii;
1494 lp->msg_enable = pcnet32_debug;
1495 if ((cards_found >= MAX_UNITS)
1496 || (options[cards_found] > sizeof(options_mapping)))
1497 lp->options = PCNET32_PORT_ASEL;
1498 else
1499 lp->options = options_mapping[options[cards_found]];
1500 lp->mii_if.dev = dev;
1501 lp->mii_if.mdio_read = mdio_read;
1502 lp->mii_if.mdio_write = mdio_write;
1503
1504 if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
1505 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
1506 lp->options |= PCNET32_PORT_FD;
1507
1508 if (!a) {
1509 if (pcnet32_debug & NETIF_MSG_PROBE)
1510 printk(KERN_ERR PFX "No access methods\n");
1511 ret = -ENODEV;
1512 goto err_free_consistent;
1513 }
1514 lp->a = *a;
1515
1516 /* prior to register_netdev, dev->name is not yet correct */
1517 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
1518 ret = -ENOMEM;
1519 goto err_free_ring;
1520 }
1521 /* detect special T1/E1 WAN card by checking for MAC address */
1522 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
1da177e4 1523 && dev->dev_addr[2] == 0x75)
4a5e8e29 1524 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
1da177e4 1525
4a5e8e29
JG
1526 lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
1527 lp->init_block.tlen_rlen =
1528 le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1529 for (i = 0; i < 6; i++)
1530 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1531 lp->init_block.filter[0] = 0x00000000;
1532 lp->init_block.filter[1] = 0x00000000;
1533 lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr);
1534 lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr);
1535
1536 /* switch pcnet32 to 32bit mode */
1537 a->write_bcr(ioaddr, 20, 2);
1538
1539 a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private,
1540 init_block)) & 0xffff);
1541 a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private,
1542 init_block)) >> 16);
1543
1544 if (pdev) { /* use the IRQ provided by PCI */
1545 dev->irq = pdev->irq;
1546 if (pcnet32_debug & NETIF_MSG_PROBE)
1547 printk(" assigned IRQ %d.\n", dev->irq);
1548 } else {
1549 unsigned long irq_mask = probe_irq_on();
1550
1551 /*
1552 * To auto-IRQ we enable the initialization-done and DMA error
1553 * interrupts. For ISA boards we get a DMA error, but VLB and PCI
1554 * boards will work.
1555 */
1556 /* Trigger an initialization just for the interrupt. */
1557 a->write_csr(ioaddr, 0, 0x41);
1558 mdelay(1);
1559
1560 dev->irq = probe_irq_off(irq_mask);
1561 if (!dev->irq) {
1562 if (pcnet32_debug & NETIF_MSG_PROBE)
1563 printk(", failed to detect IRQ line.\n");
1564 ret = -ENODEV;
1565 goto err_free_ring;
1566 }
1567 if (pcnet32_debug & NETIF_MSG_PROBE)
1568 printk(", probed IRQ %d.\n", dev->irq);
1569 }
1da177e4 1570
4a5e8e29
JG
1571 /* Set the mii phy_id so that we can query the link state */
1572 if (lp->mii) {
1573 /* lp->phycount and lp->phymask are set to 0 by memset above */
1574
1575 lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
1576 /* scan for PHYs */
1577 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
1578 unsigned short id1, id2;
1579
1580 id1 = mdio_read(dev, i, MII_PHYSID1);
1581 if (id1 == 0xffff)
1582 continue;
1583 id2 = mdio_read(dev, i, MII_PHYSID2);
1584 if (id2 == 0xffff)
1585 continue;
1586 if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
1587 continue; /* 79C971 & 79C972 have phantom phy at id 31 */
1588 lp->phycount++;
1589 lp->phymask |= (1 << i);
1590 lp->mii_if.phy_id = i;
1591 if (pcnet32_debug & NETIF_MSG_PROBE)
1592 printk(KERN_INFO PFX
1593 "Found PHY %04x:%04x at address %d.\n",
1594 id1, id2, i);
1595 }
1596 lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
1597 if (lp->phycount > 1) {
1598 lp->options |= PCNET32_PORT_MII;
1599 }
1da177e4 1600 }
4a5e8e29
JG
1601
1602 init_timer(&lp->watchdog_timer);
1603 lp->watchdog_timer.data = (unsigned long)dev;
1604 lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
1605
1606 /* The PCNET32-specific entries in the device structure. */
1607 dev->open = &pcnet32_open;
1608 dev->hard_start_xmit = &pcnet32_start_xmit;
1609 dev->stop = &pcnet32_close;
1610 dev->get_stats = &pcnet32_get_stats;
1611 dev->set_multicast_list = &pcnet32_set_multicast_list;
1612 dev->do_ioctl = &pcnet32_ioctl;
1613 dev->ethtool_ops = &pcnet32_ethtool_ops;
1614 dev->tx_timeout = pcnet32_tx_timeout;
1615 dev->watchdog_timeo = (5 * HZ);
1da177e4
LT
1616
1617#ifdef CONFIG_NET_POLL_CONTROLLER
4a5e8e29 1618 dev->poll_controller = pcnet32_poll_controller;
1da177e4
LT
1619#endif
1620
4a5e8e29
JG
1621 /* Fill in the generic fields of the device structure. */
1622 if (register_netdev(dev))
1623 goto err_free_ring;
1624
1625 if (pdev) {
1626 pci_set_drvdata(pdev, dev);
1627 } else {
1628 lp->next = pcnet32_dev;
1629 pcnet32_dev = dev;
1630 }
1631
1632 if (pcnet32_debug & NETIF_MSG_PROBE)
1633 printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
1634 cards_found++;
1635
1636 /* enable LED writes */
1637 a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
1da177e4 1638
4a5e8e29
JG
1639 return 0;
1640
1641 err_free_ring:
1642 pcnet32_free_ring(dev);
1643 err_free_consistent:
1644 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
1645 err_free_netdev:
1646 free_netdev(dev);
1647 err_release_region:
1648 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1649 return ret;
1650}
1da177e4 1651
a88c844c
DF
1652/* if any allocation fails, caller must also call pcnet32_free_ring */
1653static int pcnet32_alloc_ring(struct net_device *dev, char *name)
eabf0415 1654{
4a5e8e29 1655 struct pcnet32_private *lp = dev->priv;
eabf0415 1656
4a5e8e29
JG
1657 lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
1658 sizeof(struct pcnet32_tx_head) *
1659 lp->tx_ring_size,
1660 &lp->tx_ring_dma_addr);
1661 if (lp->tx_ring == NULL) {
12fa30f3 1662 if (netif_msg_drv(lp))
4a5e8e29
JG
1663 printk("\n" KERN_ERR PFX
1664 "%s: Consistent memory allocation failed.\n",
1665 name);
1666 return -ENOMEM;
1667 }
eabf0415 1668
4a5e8e29
JG
1669 lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
1670 sizeof(struct pcnet32_rx_head) *
1671 lp->rx_ring_size,
1672 &lp->rx_ring_dma_addr);
1673 if (lp->rx_ring == NULL) {
12fa30f3 1674 if (netif_msg_drv(lp))
4a5e8e29
JG
1675 printk("\n" KERN_ERR PFX
1676 "%s: Consistent memory allocation failed.\n",
1677 name);
1678 return -ENOMEM;
1679 }
eabf0415 1680
12fa30f3 1681 lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
4a5e8e29
JG
1682 GFP_ATOMIC);
1683 if (!lp->tx_dma_addr) {
12fa30f3 1684 if (netif_msg_drv(lp))
4a5e8e29
JG
1685 printk("\n" KERN_ERR PFX
1686 "%s: Memory allocation failed.\n", name);
1687 return -ENOMEM;
1688 }
4a5e8e29 1689
12fa30f3 1690 lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
4a5e8e29
JG
1691 GFP_ATOMIC);
1692 if (!lp->rx_dma_addr) {
12fa30f3 1693 if (netif_msg_drv(lp))
4a5e8e29
JG
1694 printk("\n" KERN_ERR PFX
1695 "%s: Memory allocation failed.\n", name);
1696 return -ENOMEM;
1697 }
4a5e8e29 1698
12fa30f3 1699 lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
4a5e8e29
JG
1700 GFP_ATOMIC);
1701 if (!lp->tx_skbuff) {
12fa30f3 1702 if (netif_msg_drv(lp))
4a5e8e29
JG
1703 printk("\n" KERN_ERR PFX
1704 "%s: Memory allocation failed.\n", name);
1705 return -ENOMEM;
1706 }
4a5e8e29 1707
12fa30f3 1708 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
4a5e8e29
JG
1709 GFP_ATOMIC);
1710 if (!lp->rx_skbuff) {
12fa30f3 1711 if (netif_msg_drv(lp))
4a5e8e29
JG
1712 printk("\n" KERN_ERR PFX
1713 "%s: Memory allocation failed.\n", name);
1714 return -ENOMEM;
1715 }
4a5e8e29
JG
1716
1717 return 0;
1718}
eabf0415
HWL
1719
1720static void pcnet32_free_ring(struct net_device *dev)
1721{
4a5e8e29 1722 struct pcnet32_private *lp = dev->priv;
eabf0415 1723
4a5e8e29
JG
1724 kfree(lp->tx_skbuff);
1725 lp->tx_skbuff = NULL;
eabf0415 1726
4a5e8e29
JG
1727 kfree(lp->rx_skbuff);
1728 lp->rx_skbuff = NULL;
eabf0415 1729
4a5e8e29
JG
1730 kfree(lp->tx_dma_addr);
1731 lp->tx_dma_addr = NULL;
eabf0415 1732
4a5e8e29
JG
1733 kfree(lp->rx_dma_addr);
1734 lp->rx_dma_addr = NULL;
eabf0415 1735
4a5e8e29
JG
1736 if (lp->tx_ring) {
1737 pci_free_consistent(lp->pci_dev,
1738 sizeof(struct pcnet32_tx_head) *
1739 lp->tx_ring_size, lp->tx_ring,
1740 lp->tx_ring_dma_addr);
1741 lp->tx_ring = NULL;
1742 }
eabf0415 1743
4a5e8e29
JG
1744 if (lp->rx_ring) {
1745 pci_free_consistent(lp->pci_dev,
1746 sizeof(struct pcnet32_rx_head) *
1747 lp->rx_ring_size, lp->rx_ring,
1748 lp->rx_ring_dma_addr);
1749 lp->rx_ring = NULL;
1750 }
eabf0415
HWL
1751}
1752
4a5e8e29 1753static int pcnet32_open(struct net_device *dev)
1da177e4 1754{
4a5e8e29
JG
1755 struct pcnet32_private *lp = dev->priv;
1756 unsigned long ioaddr = dev->base_addr;
1757 u16 val;
1758 int i;
1759 int rc;
1760 unsigned long flags;
1761
1762 if (request_irq(dev->irq, &pcnet32_interrupt,
1fb9df5d 1763 lp->shared_irq ? IRQF_SHARED : 0, dev->name,
4a5e8e29
JG
1764 (void *)dev)) {
1765 return -EAGAIN;
1766 }
1767
1768 spin_lock_irqsave(&lp->lock, flags);
1769 /* Check for a valid station address */
1770 if (!is_valid_ether_addr(dev->dev_addr)) {
1771 rc = -EINVAL;
1772 goto err_free_irq;
1773 }
1774
1775 /* Reset the PCNET32 */
1776 lp->a.reset(ioaddr);
1777
1778 /* switch pcnet32 to 32bit mode */
1779 lp->a.write_bcr(ioaddr, 20, 2);
1780
1781 if (netif_msg_ifup(lp))
1782 printk(KERN_DEBUG
1783 "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
1784 dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr),
1785 (u32) (lp->rx_ring_dma_addr),
1786 (u32) (lp->dma_addr +
1787 offsetof(struct pcnet32_private, init_block)));
1788
1789 /* set/reset autoselect bit */
1790 val = lp->a.read_bcr(ioaddr, 2) & ~2;
1791 if (lp->options & PCNET32_PORT_ASEL)
1da177e4 1792 val |= 2;
4a5e8e29
JG
1793 lp->a.write_bcr(ioaddr, 2, val);
1794
1795 /* handle full duplex setting */
1796 if (lp->mii_if.full_duplex) {
1797 val = lp->a.read_bcr(ioaddr, 9) & ~3;
1798 if (lp->options & PCNET32_PORT_FD) {
1799 val |= 1;
1800 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
1801 val |= 2;
1802 } else if (lp->options & PCNET32_PORT_ASEL) {
1803 /* workaround of xSeries250, turn on for 79C975 only */
1804 i = ((lp->a.read_csr(ioaddr, 88) |
1805 (lp->a.
1806 read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff;
1807 if (i == 0x2627)
1808 val |= 3;
1809 }
1810 lp->a.write_bcr(ioaddr, 9, val);
1811 }
1812
1813 /* set/reset GPSI bit in test register */
1814 val = lp->a.read_csr(ioaddr, 124) & ~0x10;
1815 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
1816 val |= 0x10;
1817 lp->a.write_csr(ioaddr, 124, val);
1818
1819 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
1820 if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&
2964bbd7
DF
1821 (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
1822 lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
ac62ef04 1823 if (lp->options & PCNET32_PORT_ASEL) {
4a5e8e29
JG
1824 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
1825 if (netif_msg_link(lp))
1826 printk(KERN_DEBUG
1827 "%s: Setting 100Mb-Full Duplex.\n",
1828 dev->name);
1829 }
1830 }
1831 if (lp->phycount < 2) {
1832 /*
1833 * 24 Jun 2004 according AMD, in order to change the PHY,
1834 * DANAS (or DISPM for 79C976) must be set; then select the speed,
1835 * duplex, and/or enable auto negotiation, and clear DANAS
1836 */
1837 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
1838 lp->a.write_bcr(ioaddr, 32,
1839 lp->a.read_bcr(ioaddr, 32) | 0x0080);
1840 /* disable Auto Negotiation, set 10Mpbs, HD */
1841 val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
1842 if (lp->options & PCNET32_PORT_FD)
1843 val |= 0x10;
1844 if (lp->options & PCNET32_PORT_100)
1845 val |= 0x08;
1846 lp->a.write_bcr(ioaddr, 32, val);
1847 } else {
1848 if (lp->options & PCNET32_PORT_ASEL) {
1849 lp->a.write_bcr(ioaddr, 32,
1850 lp->a.read_bcr(ioaddr,
1851 32) | 0x0080);
1852 /* enable auto negotiate, setup, disable fd */
1853 val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
1854 val |= 0x20;
1855 lp->a.write_bcr(ioaddr, 32, val);
1856 }
1857 }
1858 } else {
1859 int first_phy = -1;
1860 u16 bmcr;
1861 u32 bcr9;
1862 struct ethtool_cmd ecmd;
1863
1864 /*
1865 * There is really no good other way to handle multiple PHYs
1866 * other than turning off all automatics
1867 */
1868 val = lp->a.read_bcr(ioaddr, 2);
1869 lp->a.write_bcr(ioaddr, 2, val & ~2);
1870 val = lp->a.read_bcr(ioaddr, 32);
1871 lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
1872
1873 if (!(lp->options & PCNET32_PORT_ASEL)) {
1874 /* setup ecmd */
1875 ecmd.port = PORT_MII;
1876 ecmd.transceiver = XCVR_INTERNAL;
1877 ecmd.autoneg = AUTONEG_DISABLE;
1878 ecmd.speed =
1879 lp->
1880 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
1881 bcr9 = lp->a.read_bcr(ioaddr, 9);
1882
1883 if (lp->options & PCNET32_PORT_FD) {
1884 ecmd.duplex = DUPLEX_FULL;
1885 bcr9 |= (1 << 0);
1886 } else {
1887 ecmd.duplex = DUPLEX_HALF;
1888 bcr9 |= ~(1 << 0);
1889 }
1890 lp->a.write_bcr(ioaddr, 9, bcr9);
ac62ef04 1891 }
4a5e8e29
JG
1892
1893 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
1894 if (lp->phymask & (1 << i)) {
1895 /* isolate all but the first PHY */
1896 bmcr = mdio_read(dev, i, MII_BMCR);
1897 if (first_phy == -1) {
1898 first_phy = i;
1899 mdio_write(dev, i, MII_BMCR,
1900 bmcr & ~BMCR_ISOLATE);
1901 } else {
1902 mdio_write(dev, i, MII_BMCR,
1903 bmcr | BMCR_ISOLATE);
1904 }
1905 /* use mii_ethtool_sset to setup PHY */
1906 lp->mii_if.phy_id = i;
1907 ecmd.phy_address = i;
1908 if (lp->options & PCNET32_PORT_ASEL) {
1909 mii_ethtool_gset(&lp->mii_if, &ecmd);
1910 ecmd.autoneg = AUTONEG_ENABLE;
1911 }
1912 mii_ethtool_sset(&lp->mii_if, &ecmd);
1913 }
1914 }
1915 lp->mii_if.phy_id = first_phy;
1916 if (netif_msg_link(lp))
1917 printk(KERN_INFO "%s: Using PHY number %d.\n",
1918 dev->name, first_phy);
1919 }
1da177e4
LT
1920
1921#ifdef DO_DXSUFLO
4a5e8e29
JG
1922 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
1923 val = lp->a.read_csr(ioaddr, 3);
1924 val |= 0x40;
1925 lp->a.write_csr(ioaddr, 3, val);
1926 }
1da177e4
LT
1927#endif
1928
4a5e8e29
JG
1929 lp->init_block.mode =
1930 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
1931 pcnet32_load_multicast(dev);
1932
1933 if (pcnet32_init_ring(dev)) {
1934 rc = -ENOMEM;
1935 goto err_free_ring;
1936 }
1937
1938 /* Re-initialize the PCNET32, and start it when done. */
1939 lp->a.write_csr(ioaddr, 1, (lp->dma_addr +
1940 offsetof(struct pcnet32_private,
1941 init_block)) & 0xffff);
1942 lp->a.write_csr(ioaddr, 2,
1943 (lp->dma_addr +
1944 offsetof(struct pcnet32_private, init_block)) >> 16);
1945
1946 lp->a.write_csr(ioaddr, 4, 0x0915);
1947 lp->a.write_csr(ioaddr, 0, 0x0001);
1948
1949 netif_start_queue(dev);
1950
1951 /* Print the link status and start the watchdog */
1952 pcnet32_check_media(dev, 1);
1953 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
1954
1955 i = 0;
1956 while (i++ < 100)
1957 if (lp->a.read_csr(ioaddr, 0) & 0x0100)
1958 break;
1959 /*
1960 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
1961 * reports that doing so triggers a bug in the '974.
1962 */
1963 lp->a.write_csr(ioaddr, 0, 0x0042);
1964
1965 if (netif_msg_ifup(lp))
1966 printk(KERN_DEBUG
1967 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
1968 dev->name, i,
1969 (u32) (lp->dma_addr +
1970 offsetof(struct pcnet32_private, init_block)),
1971 lp->a.read_csr(ioaddr, 0));
1972
1973 spin_unlock_irqrestore(&lp->lock, flags);
1974
1975 return 0; /* Always succeed */
1976
1977 err_free_ring:
1978 /* free any allocated skbuffs */
1979 for (i = 0; i < lp->rx_ring_size; i++) {
1980 lp->rx_ring[i].status = 0;
1981 if (lp->rx_skbuff[i]) {
1982 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
1983 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
1984 dev_kfree_skb(lp->rx_skbuff[i]);
1985 }
1986 lp->rx_skbuff[i] = NULL;
1987 lp->rx_dma_addr[i] = 0;
1988 }
1989
4a5e8e29
JG
1990 /*
1991 * Switch back to 16bit mode to avoid problems with dumb
1992 * DOS packet driver after a warm reboot
1993 */
1994 lp->a.write_bcr(ioaddr, 20, 4);
1995
1996 err_free_irq:
1997 spin_unlock_irqrestore(&lp->lock, flags);
1998 free_irq(dev->irq, dev);
1999 return rc;
1da177e4
LT
2000}
2001
2002/*
2003 * The LANCE has been halted for one reason or another (busmaster memory
2004 * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
2005 * etc.). Modern LANCE variants always reload their ring-buffer
2006 * configuration when restarted, so we must reinitialize our ring
2007 * context before restarting. As part of this reinitialization,
2008 * find all packets still on the Tx ring and pretend that they had been
2009 * sent (in effect, drop the packets on the floor) - the higher-level
2010 * protocols will time out and retransmit. It'd be better to shuffle
2011 * these skbs to a temp list and then actually re-Tx them after
2012 * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
2013 */
2014
4a5e8e29 2015static void pcnet32_purge_tx_ring(struct net_device *dev)
1da177e4 2016{
4a5e8e29
JG
2017 struct pcnet32_private *lp = dev->priv;
2018 int i;
1da177e4 2019
4a5e8e29
JG
2020 for (i = 0; i < lp->tx_ring_size; i++) {
2021 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2022 wmb(); /* Make sure adapter sees owner change */
2023 if (lp->tx_skbuff[i]) {
2024 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
2025 lp->tx_skbuff[i]->len,
2026 PCI_DMA_TODEVICE);
2027 dev_kfree_skb_any(lp->tx_skbuff[i]);
2028 }
2029 lp->tx_skbuff[i] = NULL;
2030 lp->tx_dma_addr[i] = 0;
2031 }
2032}
1da177e4
LT
2033
2034/* Initialize the PCNET32 Rx and Tx rings. */
4a5e8e29 2035static int pcnet32_init_ring(struct net_device *dev)
1da177e4 2036{
4a5e8e29
JG
2037 struct pcnet32_private *lp = dev->priv;
2038 int i;
2039
2040 lp->tx_full = 0;
2041 lp->cur_rx = lp->cur_tx = 0;
2042 lp->dirty_rx = lp->dirty_tx = 0;
2043
2044 for (i = 0; i < lp->rx_ring_size; i++) {
2045 struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
2046 if (rx_skbuff == NULL) {
2047 if (!
2048 (rx_skbuff = lp->rx_skbuff[i] =
2049 dev_alloc_skb(PKT_BUF_SZ))) {
2050 /* there is not much, we can do at this point */
2051 if (pcnet32_debug & NETIF_MSG_DRV)
2052 printk(KERN_ERR
2053 "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
2054 dev->name);
2055 return -1;
2056 }
2057 skb_reserve(rx_skbuff, 2);
2058 }
2059
2060 rmb();
2061 if (lp->rx_dma_addr[i] == 0)
2062 lp->rx_dma_addr[i] =
2063 pci_map_single(lp->pci_dev, rx_skbuff->data,
2064 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
2065 lp->rx_ring[i].base = (u32) le32_to_cpu(lp->rx_dma_addr[i]);
2066 lp->rx_ring[i].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
2067 wmb(); /* Make sure owner changes after all others are visible */
2068 lp->rx_ring[i].status = le16_to_cpu(0x8000);
2069 }
2070 /* The Tx buffer address is filled in as needed, but we do need to clear
2071 * the upper ownership bit. */
2072 for (i = 0; i < lp->tx_ring_size; i++) {
2073 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2074 wmb(); /* Make sure adapter sees owner change */
2075 lp->tx_ring[i].base = 0;
2076 lp->tx_dma_addr[i] = 0;
2077 }
2078
2079 lp->init_block.tlen_rlen =
2080 le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
2081 for (i = 0; i < 6; i++)
2082 lp->init_block.phys_addr[i] = dev->dev_addr[i];
2083 lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr);
2084 lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr);
2085 wmb(); /* Make sure all changes are visible */
2086 return 0;
1da177e4
LT
2087}
2088
2089/* the pcnet32 has been issued a stop or reset. Wait for the stop bit
2090 * then flush the pending transmit operations, re-initialize the ring,
2091 * and tell the chip to initialize.
2092 */
4a5e8e29 2093static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
1da177e4 2094{
4a5e8e29
JG
2095 struct pcnet32_private *lp = dev->priv;
2096 unsigned long ioaddr = dev->base_addr;
2097 int i;
1da177e4 2098
4a5e8e29
JG
2099 /* wait for stop */
2100 for (i = 0; i < 100; i++)
2101 if (lp->a.read_csr(ioaddr, 0) & 0x0004)
2102 break;
1da177e4 2103
4a5e8e29
JG
2104 if (i >= 100 && netif_msg_drv(lp))
2105 printk(KERN_ERR
2106 "%s: pcnet32_restart timed out waiting for stop.\n",
2107 dev->name);
1da177e4 2108
4a5e8e29
JG
2109 pcnet32_purge_tx_ring(dev);
2110 if (pcnet32_init_ring(dev))
2111 return;
1da177e4 2112
4a5e8e29
JG
2113 /* ReInit Ring */
2114 lp->a.write_csr(ioaddr, 0, 1);
2115 i = 0;
2116 while (i++ < 1000)
2117 if (lp->a.read_csr(ioaddr, 0) & 0x0100)
2118 break;
1da177e4 2119
4a5e8e29 2120 lp->a.write_csr(ioaddr, 0, csr0_bits);
1da177e4
LT
2121}
2122
4a5e8e29 2123static void pcnet32_tx_timeout(struct net_device *dev)
1da177e4 2124{
4a5e8e29
JG
2125 struct pcnet32_private *lp = dev->priv;
2126 unsigned long ioaddr = dev->base_addr, flags;
2127
2128 spin_lock_irqsave(&lp->lock, flags);
2129 /* Transmitter timeout, serious problems. */
2130 if (pcnet32_debug & NETIF_MSG_DRV)
2131 printk(KERN_ERR
2132 "%s: transmit timed out, status %4.4x, resetting.\n",
2133 dev->name, lp->a.read_csr(ioaddr, 0));
2134 lp->a.write_csr(ioaddr, 0, 0x0004);
2135 lp->stats.tx_errors++;
2136 if (netif_msg_tx_err(lp)) {
2137 int i;
2138 printk(KERN_DEBUG
2139 " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
2140 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
2141 lp->cur_rx);
2142 for (i = 0; i < lp->rx_ring_size; i++)
2143 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
2144 le32_to_cpu(lp->rx_ring[i].base),
2145 (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
2146 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
2147 le16_to_cpu(lp->rx_ring[i].status));
2148 for (i = 0; i < lp->tx_ring_size; i++)
2149 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
2150 le32_to_cpu(lp->tx_ring[i].base),
2151 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
2152 le32_to_cpu(lp->tx_ring[i].misc),
2153 le16_to_cpu(lp->tx_ring[i].status));
2154 printk("\n");
2155 }
2156 pcnet32_restart(dev, 0x0042);
1da177e4 2157
4a5e8e29
JG
2158 dev->trans_start = jiffies;
2159 netif_wake_queue(dev);
1da177e4 2160
4a5e8e29
JG
2161 spin_unlock_irqrestore(&lp->lock, flags);
2162}
2163
2164static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 2165{
4a5e8e29
JG
2166 struct pcnet32_private *lp = dev->priv;
2167 unsigned long ioaddr = dev->base_addr;
2168 u16 status;
2169 int entry;
2170 unsigned long flags;
1da177e4 2171
4a5e8e29 2172 spin_lock_irqsave(&lp->lock, flags);
1da177e4 2173
4a5e8e29
JG
2174 if (netif_msg_tx_queued(lp)) {
2175 printk(KERN_DEBUG
2176 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
2177 dev->name, lp->a.read_csr(ioaddr, 0));
2178 }
1da177e4 2179
4a5e8e29
JG
2180 /* Default status -- will not enable Successful-TxDone
2181 * interrupt when that option is available to us.
2182 */
2183 status = 0x8300;
1da177e4 2184
4a5e8e29 2185 /* Fill in a Tx ring entry */
1da177e4 2186
4a5e8e29
JG
2187 /* Mask to ring buffer boundary. */
2188 entry = lp->cur_tx & lp->tx_mod_mask;
1da177e4 2189
4a5e8e29
JG
2190 /* Caution: the write order is important here, set the status
2191 * with the "ownership" bits last. */
1da177e4 2192
4a5e8e29 2193 lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
1da177e4 2194
4a5e8e29 2195 lp->tx_ring[entry].misc = 0x00000000;
1da177e4 2196
4a5e8e29
JG
2197 lp->tx_skbuff[entry] = skb;
2198 lp->tx_dma_addr[entry] =
2199 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2200 lp->tx_ring[entry].base = (u32) le32_to_cpu(lp->tx_dma_addr[entry]);
2201 wmb(); /* Make sure owner changes after all others are visible */
2202 lp->tx_ring[entry].status = le16_to_cpu(status);
1da177e4 2203
4a5e8e29
JG
2204 lp->cur_tx++;
2205 lp->stats.tx_bytes += skb->len;
1da177e4 2206
4a5e8e29
JG
2207 /* Trigger an immediate send poll. */
2208 lp->a.write_csr(ioaddr, 0, 0x0048);
1da177e4 2209
4a5e8e29 2210 dev->trans_start = jiffies;
1da177e4 2211
4a5e8e29
JG
2212 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
2213 lp->tx_full = 1;
2214 netif_stop_queue(dev);
2215 }
2216 spin_unlock_irqrestore(&lp->lock, flags);
2217 return 0;
1da177e4
LT
2218}
2219
2220/* The PCNET32 interrupt handler. */
2221static irqreturn_t
4a5e8e29 2222pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1da177e4 2223{
4a5e8e29
JG
2224 struct net_device *dev = dev_id;
2225 struct pcnet32_private *lp;
2226 unsigned long ioaddr;
2227 u16 csr0, rap;
2228 int boguscnt = max_interrupt_work;
2229 int must_restart;
2230
2231 if (!dev) {
2232 if (pcnet32_debug & NETIF_MSG_INTR)
2233 printk(KERN_DEBUG "%s(): irq %d for unknown device\n",
2234 __FUNCTION__, irq);
2235 return IRQ_NONE;
1da177e4 2236 }
1da177e4 2237
4a5e8e29
JG
2238 ioaddr = dev->base_addr;
2239 lp = dev->priv;
1da177e4 2240
4a5e8e29
JG
2241 spin_lock(&lp->lock);
2242
2243 rap = lp->a.read_rap(ioaddr);
2244 while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) {
2245 if (csr0 == 0xffff) {
2246 break; /* PCMCIA remove happened */
2247 }
2248 /* Acknowledge all of the current interrupt sources ASAP. */
2249 lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f);
2250
2251 must_restart = 0;
2252
2253 if (netif_msg_intr(lp))
2254 printk(KERN_DEBUG
2255 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
2256 dev->name, csr0, lp->a.read_csr(ioaddr, 0));
2257
2258 if (csr0 & 0x0400) /* Rx interrupt */
2259 pcnet32_rx(dev);
2260
2261 if (csr0 & 0x0200) { /* Tx-done interrupt */
2262 unsigned int dirty_tx = lp->dirty_tx;
2263 int delta;
2264
2265 while (dirty_tx != lp->cur_tx) {
2266 int entry = dirty_tx & lp->tx_mod_mask;
2267 int status =
2268 (short)le16_to_cpu(lp->tx_ring[entry].
2269 status);
2270
2271 if (status < 0)
2272 break; /* It still hasn't been Txed */
2273
2274 lp->tx_ring[entry].base = 0;
2275
2276 if (status & 0x4000) {
2277 /* There was an major error, log it. */
2278 int err_status =
2279 le32_to_cpu(lp->tx_ring[entry].
2280 misc);
2281 lp->stats.tx_errors++;
2282 if (netif_msg_tx_err(lp))
2283 printk(KERN_ERR
2284 "%s: Tx error status=%04x err_status=%08x\n",
2285 dev->name, status,
2286 err_status);
2287 if (err_status & 0x04000000)
2288 lp->stats.tx_aborted_errors++;
2289 if (err_status & 0x08000000)
2290 lp->stats.tx_carrier_errors++;
2291 if (err_status & 0x10000000)
2292 lp->stats.tx_window_errors++;
1da177e4 2293#ifndef DO_DXSUFLO
4a5e8e29
JG
2294 if (err_status & 0x40000000) {
2295 lp->stats.tx_fifo_errors++;
2296 /* Ackk! On FIFO errors the Tx unit is turned off! */
2297 /* Remove this verbosity later! */
2298 if (netif_msg_tx_err(lp))
2299 printk(KERN_ERR
2300 "%s: Tx FIFO error! CSR0=%4.4x\n",
2301 dev->name, csr0);
2302 must_restart = 1;
2303 }
1da177e4 2304#else
4a5e8e29
JG
2305 if (err_status & 0x40000000) {
2306 lp->stats.tx_fifo_errors++;
2307 if (!lp->dxsuflo) { /* If controller doesn't recover ... */
2308 /* Ackk! On FIFO errors the Tx unit is turned off! */
2309 /* Remove this verbosity later! */
2310 if (netif_msg_tx_err
2311 (lp))
2312 printk(KERN_ERR
2313 "%s: Tx FIFO error! CSR0=%4.4x\n",
2314 dev->
2315 name,
2316 csr0);
2317 must_restart = 1;
2318 }
2319 }
1da177e4 2320#endif
4a5e8e29
JG
2321 } else {
2322 if (status & 0x1800)
2323 lp->stats.collisions++;
2324 lp->stats.tx_packets++;
2325 }
2326
2327 /* We must free the original skb */
2328 if (lp->tx_skbuff[entry]) {
2329 pci_unmap_single(lp->pci_dev,
2330 lp->tx_dma_addr[entry],
2331 lp->tx_skbuff[entry]->
2332 len, PCI_DMA_TODEVICE);
2333 dev_kfree_skb_irq(lp->tx_skbuff[entry]);
2334 lp->tx_skbuff[entry] = NULL;
2335 lp->tx_dma_addr[entry] = 0;
2336 }
2337 dirty_tx++;
2338 }
2339
2340 delta =
2341 (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask +
2342 lp->tx_ring_size);
2343 if (delta > lp->tx_ring_size) {
2344 if (netif_msg_drv(lp))
2345 printk(KERN_ERR
2346 "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
2347 dev->name, dirty_tx, lp->cur_tx,
2348 lp->tx_full);
2349 dirty_tx += lp->tx_ring_size;
2350 delta -= lp->tx_ring_size;
2351 }
2352
2353 if (lp->tx_full &&
2354 netif_queue_stopped(dev) &&
2355 delta < lp->tx_ring_size - 2) {
2356 /* The ring is no longer full, clear tbusy. */
2357 lp->tx_full = 0;
2358 netif_wake_queue(dev);
2359 }
2360 lp->dirty_tx = dirty_tx;
2361 }
2362
2363 /* Log misc errors. */
2364 if (csr0 & 0x4000)
2365 lp->stats.tx_errors++; /* Tx babble. */
2366 if (csr0 & 0x1000) {
2367 /*
2368 * this happens when our receive ring is full. This shouldn't
2369 * be a problem as we will see normal rx interrupts for the frames
2370 * in the receive ring. But there are some PCI chipsets (I can
2371 * reproduce this on SP3G with Intel saturn chipset) which have
2372 * sometimes problems and will fill up the receive ring with
2373 * error descriptors. In this situation we don't get a rx
2374 * interrupt, but a missed frame interrupt sooner or later.
2375 * So we try to clean up our receive ring here.
2376 */
2377 pcnet32_rx(dev);
2378 lp->stats.rx_errors++; /* Missed a Rx frame. */
2379 }
2380 if (csr0 & 0x0800) {
2381 if (netif_msg_drv(lp))
2382 printk(KERN_ERR
2383 "%s: Bus master arbitration failure, status %4.4x.\n",
2384 dev->name, csr0);
2385 /* unlike for the lance, there is no restart needed */
1da177e4
LT
2386 }
2387
4a5e8e29
JG
2388 if (must_restart) {
2389 /* reset the chip to clear the error condition, then restart */
2390 lp->a.reset(ioaddr);
2391 lp->a.write_csr(ioaddr, 4, 0x0915);
2392 pcnet32_restart(dev, 0x0002);
2393 netif_wake_queue(dev);
1da177e4 2394 }
4a5e8e29
JG
2395 }
2396
2397 /* Set interrupt enable. */
2398 lp->a.write_csr(ioaddr, 0, 0x0040);
2399 lp->a.write_rap(ioaddr, rap);
2400
2401 if (netif_msg_intr(lp))
2402 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
2403 dev->name, lp->a.read_csr(ioaddr, 0));
2404
2405 spin_unlock(&lp->lock);
2406
2407 return IRQ_HANDLED;
1da177e4
LT
2408}
2409
4a5e8e29 2410static int pcnet32_rx(struct net_device *dev)
1da177e4 2411{
4a5e8e29
JG
2412 struct pcnet32_private *lp = dev->priv;
2413 int entry = lp->cur_rx & lp->rx_mod_mask;
2414 int boguscnt = lp->rx_ring_size / 2;
2415
2416 /* If we own the next entry, it's a new packet. Send it up. */
2417 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
2418 int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
2419
2420 if (status != 0x03) { /* There was an error. */
2421 /*
2422 * There is a tricky error noted by John Murphy,
2423 * <murf@perftech.com> to Russ Nelson: Even with full-sized
2424 * buffers it's possible for a jabber packet to use two
2425 * buffers, with only the last correctly noting the error.
2426 */
2427 if (status & 0x01) /* Only count a general error at the */
2428 lp->stats.rx_errors++; /* end of a packet. */
2429 if (status & 0x20)
2430 lp->stats.rx_frame_errors++;
2431 if (status & 0x10)
2432 lp->stats.rx_over_errors++;
2433 if (status & 0x08)
2434 lp->stats.rx_crc_errors++;
2435 if (status & 0x04)
2436 lp->stats.rx_fifo_errors++;
2437 lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
1da177e4 2438 } else {
4a5e8e29
JG
2439 /* Malloc up new buffer, compatible with net-2e. */
2440 short pkt_len =
2441 (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)
2442 - 4;
2443 struct sk_buff *skb;
2444
2445 /* Discard oversize frames. */
2446 if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
2447 if (netif_msg_drv(lp))
2448 printk(KERN_ERR
2449 "%s: Impossible packet size %d!\n",
2450 dev->name, pkt_len);
2451 lp->stats.rx_errors++;
2452 } else if (pkt_len < 60) {
2453 if (netif_msg_rx_err(lp))
2454 printk(KERN_ERR "%s: Runt packet!\n",
2455 dev->name);
2456 lp->stats.rx_errors++;
2457 } else {
2458 int rx_in_place = 0;
2459
2460 if (pkt_len > rx_copybreak) {
2461 struct sk_buff *newskb;
2462
2463 if ((newskb =
2464 dev_alloc_skb(PKT_BUF_SZ))) {
2465 skb_reserve(newskb, 2);
2466 skb = lp->rx_skbuff[entry];
2467 pci_unmap_single(lp->pci_dev,
2468 lp->
2469 rx_dma_addr
2470 [entry],
2471 PKT_BUF_SZ - 2,
2472 PCI_DMA_FROMDEVICE);
2473 skb_put(skb, pkt_len);
2474 lp->rx_skbuff[entry] = newskb;
2475 newskb->dev = dev;
2476 lp->rx_dma_addr[entry] =
2477 pci_map_single(lp->pci_dev,
2478 newskb->data,
2479 PKT_BUF_SZ -
2480 2,
2481 PCI_DMA_FROMDEVICE);
2482 lp->rx_ring[entry].base =
2483 le32_to_cpu(lp->
2484 rx_dma_addr
2485 [entry]);
2486 rx_in_place = 1;
2487 } else
2488 skb = NULL;
2489 } else {
2490 skb = dev_alloc_skb(pkt_len + 2);
2491 }
2492
2493 if (skb == NULL) {
2494 int i;
2495 if (netif_msg_drv(lp))
2496 printk(KERN_ERR
2497 "%s: Memory squeeze, deferring packet.\n",
2498 dev->name);
2499 for (i = 0; i < lp->rx_ring_size; i++)
2500 if ((short)
2501 le16_to_cpu(lp->
2502 rx_ring[(entry +
2503 i)
2504 & lp->
2505 rx_mod_mask].
2506 status) < 0)
2507 break;
2508
2509 if (i > lp->rx_ring_size - 2) {
2510 lp->stats.rx_dropped++;
2511 lp->rx_ring[entry].status |=
2512 le16_to_cpu(0x8000);
2513 wmb(); /* Make sure adapter sees owner change */
2514 lp->cur_rx++;
2515 }
2516 break;
2517 }
2518 skb->dev = dev;
2519 if (!rx_in_place) {
2520 skb_reserve(skb, 2); /* 16 byte align */
2521 skb_put(skb, pkt_len); /* Make room */
2522 pci_dma_sync_single_for_cpu(lp->pci_dev,
2523 lp->
2524 rx_dma_addr
2525 [entry],
2526 PKT_BUF_SZ -
2527 2,
2528 PCI_DMA_FROMDEVICE);
2529 eth_copy_and_sum(skb,
2530 (unsigned char *)(lp->
2531 rx_skbuff
2532 [entry]->
2533 data),
2534 pkt_len, 0);
2535 pci_dma_sync_single_for_device(lp->
2536 pci_dev,
2537 lp->
2538 rx_dma_addr
2539 [entry],
2540 PKT_BUF_SZ
2541 - 2,
2542 PCI_DMA_FROMDEVICE);
2543 }
2544 lp->stats.rx_bytes += skb->len;
2545 skb->protocol = eth_type_trans(skb, dev);
2546 netif_rx(skb);
2547 dev->last_rx = jiffies;
2548 lp->stats.rx_packets++;
2549 }
1da177e4 2550 }
4a5e8e29
JG
2551 /*
2552 * The docs say that the buffer length isn't touched, but Andrew Boyd
2553 * of QNX reports that some revs of the 79C965 clear it.
2554 */
2555 lp->rx_ring[entry].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
2556 wmb(); /* Make sure owner changes after all others are visible */
2557 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
2558 entry = (++lp->cur_rx) & lp->rx_mod_mask;
2559 if (--boguscnt <= 0)
2560 break; /* don't stay in loop forever */
1da177e4 2561 }
4a5e8e29
JG
2562
2563 return 0;
1da177e4
LT
2564}
2565
4a5e8e29 2566static int pcnet32_close(struct net_device *dev)
1da177e4 2567{
4a5e8e29
JG
2568 unsigned long ioaddr = dev->base_addr;
2569 struct pcnet32_private *lp = dev->priv;
2570 int i;
2571 unsigned long flags;
1da177e4 2572
4a5e8e29 2573 del_timer_sync(&lp->watchdog_timer);
1da177e4 2574
4a5e8e29 2575 netif_stop_queue(dev);
1da177e4 2576
4a5e8e29 2577 spin_lock_irqsave(&lp->lock, flags);
1da177e4 2578
4a5e8e29 2579 lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
1da177e4 2580
4a5e8e29
JG
2581 if (netif_msg_ifdown(lp))
2582 printk(KERN_DEBUG
2583 "%s: Shutting down ethercard, status was %2.2x.\n",
2584 dev->name, lp->a.read_csr(ioaddr, 0));
1da177e4 2585
4a5e8e29
JG
2586 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
2587 lp->a.write_csr(ioaddr, 0, 0x0004);
1da177e4 2588
4a5e8e29
JG
2589 /*
2590 * Switch back to 16bit mode to avoid problems with dumb
2591 * DOS packet driver after a warm reboot
2592 */
2593 lp->a.write_bcr(ioaddr, 20, 4);
1da177e4 2594
4a5e8e29 2595 spin_unlock_irqrestore(&lp->lock, flags);
1da177e4 2596
4a5e8e29 2597 free_irq(dev->irq, dev);
1da177e4 2598
4a5e8e29 2599 spin_lock_irqsave(&lp->lock, flags);
1da177e4 2600
4a5e8e29
JG
2601 /* free all allocated skbuffs */
2602 for (i = 0; i < lp->rx_ring_size; i++) {
2603 lp->rx_ring[i].status = 0;
2604 wmb(); /* Make sure adapter sees owner change */
2605 if (lp->rx_skbuff[i]) {
2606 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
2607 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
2608 dev_kfree_skb(lp->rx_skbuff[i]);
2609 }
2610 lp->rx_skbuff[i] = NULL;
2611 lp->rx_dma_addr[i] = 0;
1da177e4 2612 }
1da177e4 2613
4a5e8e29
JG
2614 for (i = 0; i < lp->tx_ring_size; i++) {
2615 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2616 wmb(); /* Make sure adapter sees owner change */
2617 if (lp->tx_skbuff[i]) {
2618 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
2619 lp->tx_skbuff[i]->len,
2620 PCI_DMA_TODEVICE);
2621 dev_kfree_skb(lp->tx_skbuff[i]);
2622 }
2623 lp->tx_skbuff[i] = NULL;
2624 lp->tx_dma_addr[i] = 0;
1da177e4 2625 }
1da177e4 2626
4a5e8e29 2627 spin_unlock_irqrestore(&lp->lock, flags);
1da177e4 2628
4a5e8e29 2629 return 0;
1da177e4
LT
2630}
2631
4a5e8e29 2632static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
1da177e4 2633{
4a5e8e29
JG
2634 struct pcnet32_private *lp = dev->priv;
2635 unsigned long ioaddr = dev->base_addr;
2636 u16 saved_addr;
2637 unsigned long flags;
2638
2639 spin_lock_irqsave(&lp->lock, flags);
2640 saved_addr = lp->a.read_rap(ioaddr);
2641 lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
2642 lp->a.write_rap(ioaddr, saved_addr);
2643 spin_unlock_irqrestore(&lp->lock, flags);
2644
2645 return &lp->stats;
1da177e4
LT
2646}
2647
2648/* taken from the sunlance driver, which it took from the depca driver */
4a5e8e29 2649static void pcnet32_load_multicast(struct net_device *dev)
1da177e4 2650{
4a5e8e29
JG
2651 struct pcnet32_private *lp = dev->priv;
2652 volatile struct pcnet32_init_block *ib = &lp->init_block;
2653 volatile u16 *mcast_table = (u16 *) & ib->filter;
2654 struct dev_mc_list *dmi = dev->mc_list;
2655 char *addrs;
2656 int i;
2657 u32 crc;
2658
2659 /* set all multicast bits */
2660 if (dev->flags & IFF_ALLMULTI) {
2661 ib->filter[0] = 0xffffffff;
2662 ib->filter[1] = 0xffffffff;
2663 return;
2664 }
2665 /* clear the multicast filter */
2666 ib->filter[0] = 0;
2667 ib->filter[1] = 0;
2668
2669 /* Add addresses */
2670 for (i = 0; i < dev->mc_count; i++) {
2671 addrs = dmi->dmi_addr;
2672 dmi = dmi->next;
2673
2674 /* multicast address? */
2675 if (!(*addrs & 1))
2676 continue;
2677
2678 crc = ether_crc_le(6, addrs);
2679 crc = crc >> 26;
2680 mcast_table[crc >> 4] =
2681 le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) |
2682 (1 << (crc & 0xf)));
2683 }
1da177e4 2684 return;
1da177e4
LT
2685}
2686
1da177e4
LT
2687/*
2688 * Set or clear the multicast filter for this adaptor.
2689 */
2690static void pcnet32_set_multicast_list(struct net_device *dev)
2691{
4a5e8e29
JG
2692 unsigned long ioaddr = dev->base_addr, flags;
2693 struct pcnet32_private *lp = dev->priv;
2694
2695 spin_lock_irqsave(&lp->lock, flags);
2696 if (dev->flags & IFF_PROMISC) {
2697 /* Log any net taps. */
2698 if (netif_msg_hw(lp))
2699 printk(KERN_INFO "%s: Promiscuous mode enabled.\n",
2700 dev->name);
2701 lp->init_block.mode =
2702 le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
2703 7);
2704 } else {
2705 lp->init_block.mode =
2706 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
2707 pcnet32_load_multicast(dev);
2708 }
2709
2710 lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */
2711 pcnet32_restart(dev, 0x0042); /* Resume normal operation */
2712 netif_wake_queue(dev);
2713
2714 spin_unlock_irqrestore(&lp->lock, flags);
1da177e4
LT
2715}
2716
2717/* This routine assumes that the lp->lock is held */
2718static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
2719{
4a5e8e29
JG
2720 struct pcnet32_private *lp = dev->priv;
2721 unsigned long ioaddr = dev->base_addr;
2722 u16 val_out;
1da177e4 2723
4a5e8e29
JG
2724 if (!lp->mii)
2725 return 0;
1da177e4 2726
4a5e8e29
JG
2727 lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2728 val_out = lp->a.read_bcr(ioaddr, 34);
1da177e4 2729
4a5e8e29 2730 return val_out;
1da177e4
LT
2731}
2732
2733/* This routine assumes that the lp->lock is held */
2734static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
2735{
4a5e8e29
JG
2736 struct pcnet32_private *lp = dev->priv;
2737 unsigned long ioaddr = dev->base_addr;
1da177e4 2738
4a5e8e29
JG
2739 if (!lp->mii)
2740 return;
1da177e4 2741
4a5e8e29
JG
2742 lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2743 lp->a.write_bcr(ioaddr, 34, val);
1da177e4
LT
2744}
2745
2746static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2747{
4a5e8e29
JG
2748 struct pcnet32_private *lp = dev->priv;
2749 int rc;
2750 unsigned long flags;
1da177e4 2751
4a5e8e29
JG
2752 /* SIOC[GS]MIIxxx ioctls */
2753 if (lp->mii) {
2754 spin_lock_irqsave(&lp->lock, flags);
2755 rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
2756 spin_unlock_irqrestore(&lp->lock, flags);
2757 } else {
2758 rc = -EOPNOTSUPP;
2759 }
1da177e4 2760
4a5e8e29 2761 return rc;
1da177e4
LT
2762}
2763
ac62ef04
DF
2764static int pcnet32_check_otherphy(struct net_device *dev)
2765{
4a5e8e29
JG
2766 struct pcnet32_private *lp = dev->priv;
2767 struct mii_if_info mii = lp->mii_if;
2768 u16 bmcr;
2769 int i;
ac62ef04 2770
4a5e8e29
JG
2771 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
2772 if (i == lp->mii_if.phy_id)
2773 continue; /* skip active phy */
2774 if (lp->phymask & (1 << i)) {
2775 mii.phy_id = i;
2776 if (mii_link_ok(&mii)) {
2777 /* found PHY with active link */
2778 if (netif_msg_link(lp))
2779 printk(KERN_INFO
2780 "%s: Using PHY number %d.\n",
2781 dev->name, i);
2782
2783 /* isolate inactive phy */
2784 bmcr =
2785 mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
2786 mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
2787 bmcr | BMCR_ISOLATE);
2788
2789 /* de-isolate new phy */
2790 bmcr = mdio_read(dev, i, MII_BMCR);
2791 mdio_write(dev, i, MII_BMCR,
2792 bmcr & ~BMCR_ISOLATE);
2793
2794 /* set new phy address */
2795 lp->mii_if.phy_id = i;
2796 return 1;
2797 }
2798 }
ac62ef04 2799 }
4a5e8e29 2800 return 0;
ac62ef04
DF
2801}
2802
2803/*
2804 * Show the status of the media. Similar to mii_check_media however it
2805 * correctly shows the link speed for all (tested) pcnet32 variants.
2806 * Devices with no mii just report link state without speed.
2807 *
2808 * Caller is assumed to hold and release the lp->lock.
2809 */
2810
2811static void pcnet32_check_media(struct net_device *dev, int verbose)
2812{
4a5e8e29
JG
2813 struct pcnet32_private *lp = dev->priv;
2814 int curr_link;
2815 int prev_link = netif_carrier_ok(dev) ? 1 : 0;
2816 u32 bcr9;
2817
ac62ef04 2818 if (lp->mii) {
4a5e8e29 2819 curr_link = mii_link_ok(&lp->mii_if);
ac62ef04 2820 } else {
4a5e8e29
JG
2821 ulong ioaddr = dev->base_addr; /* card base I/O address */
2822 curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
2823 }
2824 if (!curr_link) {
2825 if (prev_link || verbose) {
2826 netif_carrier_off(dev);
2827 if (netif_msg_link(lp))
2828 printk(KERN_INFO "%s: link down\n", dev->name);
2829 }
2830 if (lp->phycount > 1) {
2831 curr_link = pcnet32_check_otherphy(dev);
2832 prev_link = 0;
2833 }
2834 } else if (verbose || !prev_link) {
2835 netif_carrier_on(dev);
2836 if (lp->mii) {
2837 if (netif_msg_link(lp)) {
2838 struct ethtool_cmd ecmd;
2839 mii_ethtool_gset(&lp->mii_if, &ecmd);
2840 printk(KERN_INFO
2841 "%s: link up, %sMbps, %s-duplex\n",
2842 dev->name,
2843 (ecmd.speed == SPEED_100) ? "100" : "10",
2844 (ecmd.duplex ==
2845 DUPLEX_FULL) ? "full" : "half");
2846 }
2847 bcr9 = lp->a.read_bcr(dev->base_addr, 9);
2848 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
2849 if (lp->mii_if.full_duplex)
2850 bcr9 |= (1 << 0);
2851 else
2852 bcr9 &= ~(1 << 0);
2853 lp->a.write_bcr(dev->base_addr, 9, bcr9);
2854 }
2855 } else {
2856 if (netif_msg_link(lp))
2857 printk(KERN_INFO "%s: link up\n", dev->name);
2858 }
ac62ef04 2859 }
ac62ef04
DF
2860}
2861
2862/*
2863 * Check for loss of link and link establishment.
2864 * Can not use mii_check_media because it does nothing if mode is forced.
2865 */
2866
1da177e4
LT
2867static void pcnet32_watchdog(struct net_device *dev)
2868{
4a5e8e29
JG
2869 struct pcnet32_private *lp = dev->priv;
2870 unsigned long flags;
1da177e4 2871
4a5e8e29
JG
2872 /* Print the link status if it has changed */
2873 spin_lock_irqsave(&lp->lock, flags);
2874 pcnet32_check_media(dev, 0);
2875 spin_unlock_irqrestore(&lp->lock, flags);
1da177e4 2876
4a5e8e29 2877 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
1da177e4
LT
2878}
2879
2880static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
2881{
4a5e8e29
JG
2882 struct net_device *dev = pci_get_drvdata(pdev);
2883
2884 if (dev) {
2885 struct pcnet32_private *lp = dev->priv;
2886
2887 unregister_netdev(dev);
2888 pcnet32_free_ring(dev);
2889 release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
2890 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2891 free_netdev(dev);
2892 pci_disable_device(pdev);
2893 pci_set_drvdata(pdev, NULL);
2894 }
1da177e4
LT
2895}
2896
2897static struct pci_driver pcnet32_driver = {
4a5e8e29
JG
2898 .name = DRV_NAME,
2899 .probe = pcnet32_probe_pci,
2900 .remove = __devexit_p(pcnet32_remove_one),
2901 .id_table = pcnet32_pci_tbl,
1da177e4
LT
2902};
2903
2904/* An additional parameter that may be passed in... */
2905static int debug = -1;
2906static int tx_start_pt = -1;
2907static int pcnet32_have_pci;
2908
2909module_param(debug, int, 0);
2910MODULE_PARM_DESC(debug, DRV_NAME " debug level");
2911module_param(max_interrupt_work, int, 0);
4a5e8e29
JG
2912MODULE_PARM_DESC(max_interrupt_work,
2913 DRV_NAME " maximum events handled per interrupt");
1da177e4 2914module_param(rx_copybreak, int, 0);
4a5e8e29
JG
2915MODULE_PARM_DESC(rx_copybreak,
2916 DRV_NAME " copy breakpoint for copy-only-tiny-frames");
1da177e4
LT
2917module_param(tx_start_pt, int, 0);
2918MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
2919module_param(pcnet32vlb, int, 0);
2920MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
2921module_param_array(options, int, NULL, 0);
2922MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
2923module_param_array(full_duplex, int, NULL, 0);
2924MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
2925/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
2926module_param_array(homepna, int, NULL, 0);
4a5e8e29
JG
2927MODULE_PARM_DESC(homepna,
2928 DRV_NAME
2929 " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
1da177e4
LT
2930
2931MODULE_AUTHOR("Thomas Bogendoerfer");
2932MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
2933MODULE_LICENSE("GPL");
2934
2935#define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
2936
2937static int __init pcnet32_init_module(void)
2938{
4a5e8e29 2939 printk(KERN_INFO "%s", version);
1da177e4 2940
4a5e8e29 2941 pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
1da177e4 2942
4a5e8e29
JG
2943 if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
2944 tx_start = tx_start_pt;
1da177e4 2945
4a5e8e29
JG
2946 /* find the PCI devices */
2947 if (!pci_module_init(&pcnet32_driver))
2948 pcnet32_have_pci = 1;
1da177e4 2949
4a5e8e29
JG
2950 /* should we find any remaining VLbus devices ? */
2951 if (pcnet32vlb)
dcaf9769 2952 pcnet32_probe_vlbus(pcnet32_portlist);
1da177e4 2953
4a5e8e29
JG
2954 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
2955 printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
1da177e4 2956
4a5e8e29 2957 return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
1da177e4
LT
2958}
2959
2960static void __exit pcnet32_cleanup_module(void)
2961{
4a5e8e29
JG
2962 struct net_device *next_dev;
2963
2964 while (pcnet32_dev) {
2965 struct pcnet32_private *lp = pcnet32_dev->priv;
2966 next_dev = lp->next;
2967 unregister_netdev(pcnet32_dev);
2968 pcnet32_free_ring(pcnet32_dev);
2969 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
2970 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2971 free_netdev(pcnet32_dev);
2972 pcnet32_dev = next_dev;
2973 }
1da177e4 2974
4a5e8e29
JG
2975 if (pcnet32_have_pci)
2976 pci_unregister_driver(&pcnet32_driver);
1da177e4
LT
2977}
2978
2979module_init(pcnet32_init_module);
2980module_exit(pcnet32_cleanup_module);
2981
2982/*
2983 * Local variables:
2984 * c-indent-level: 4
2985 * tab-width: 8
2986 * End:
2987 */
This page took 0.303022 seconds and 5 git commands to generate.