net: use netdev_mc_count and netdev_mc_empty when appropriate
[deliverable/linux.git] / drivers / net / tulip / tulip_core.c
1 /* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2
3 Copyright 2000,2001 The Linux Kernel Team
4 Written/copyright 1994-2001 by Donald Becker.
5
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
8
9 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
10 for more information on this driver.
11
12 Please submit bugs to http://bugzilla.kernel.org/ .
13 */
14
15
16 #define DRV_NAME "tulip"
17 #ifdef CONFIG_TULIP_NAPI
18 #define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */
19 #else
20 #define DRV_VERSION "1.1.15"
21 #endif
22 #define DRV_RELDATE "Feb 27, 2007"
23
24
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 #include "tulip.h"
28 #include <linux/init.h>
29 #include <linux/etherdevice.h>
30 #include <linux/delay.h>
31 #include <linux/mii.h>
32 #include <linux/ethtool.h>
33 #include <linux/crc32.h>
34 #include <asm/unaligned.h>
35 #include <asm/uaccess.h>
36
37 #ifdef CONFIG_SPARC
38 #include <asm/prom.h>
39 #endif
40
41 static char version[] __devinitdata =
42 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
43
44 /* A few user-configurable values. */
45
46 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
47 static unsigned int max_interrupt_work = 25;
48
49 #define MAX_UNITS 8
50 /* Used to pass the full-duplex flag, etc. */
51 static int full_duplex[MAX_UNITS];
52 static int options[MAX_UNITS];
53 static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
54
55 /* The possible media types that can be set in options[] are: */
56 const char * const medianame[32] = {
57 "10baseT", "10base2", "AUI", "100baseTx",
58 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
59 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
60 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
61 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
62 "","","","", "","","","", "","","","Transceiver reset",
63 };
64
65 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
66 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
67 defined(CONFIG_SPARC) || defined(__ia64__) || \
68 defined(__sh__) || defined(__mips__)
69 static int rx_copybreak = 1518;
70 #else
71 static int rx_copybreak = 100;
72 #endif
73
74 /*
75 Set the bus performance register.
76 Typical: Set 16 longword cache alignment, no burst limit.
77 Cache alignment bits 15:14 Burst length 13:8
78 0000 No alignment 0x00000000 unlimited 0800 8 longwords
79 4000 8 longwords 0100 1 longword 1000 16 longwords
80 8000 16 longwords 0200 2 longwords 2000 32 longwords
81 C000 32 longwords 0400 4 longwords
82 Warning: many older 486 systems are broken and require setting 0x00A04800
83 8 longword cache alignment, 8 longword burst.
84 ToDo: Non-Intel setting could be better.
85 */
86
87 #if defined(__alpha__) || defined(__ia64__)
88 static int csr0 = 0x01A00000 | 0xE000;
89 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
90 static int csr0 = 0x01A00000 | 0x8000;
91 #elif defined(CONFIG_SPARC) || defined(__hppa__)
92 /* The UltraSparc PCI controllers will disconnect at every 64-byte
93 * crossing anyways so it makes no sense to tell Tulip to burst
94 * any more than that.
95 */
96 static int csr0 = 0x01A00000 | 0x9000;
97 #elif defined(__arm__) || defined(__sh__)
98 static int csr0 = 0x01A00000 | 0x4800;
99 #elif defined(__mips__)
100 static int csr0 = 0x00200000 | 0x4000;
101 #else
102 #warning Processor architecture undefined!
103 static int csr0 = 0x00A00000 | 0x4800;
104 #endif
105
106 /* Operational parameters that usually are not changed. */
107 /* Time in jiffies before concluding the transmitter is hung. */
108 #define TX_TIMEOUT (4*HZ)
109
110
111 MODULE_AUTHOR("The Linux Kernel Team");
112 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
113 MODULE_LICENSE("GPL");
114 MODULE_VERSION(DRV_VERSION);
115 module_param(tulip_debug, int, 0);
116 module_param(max_interrupt_work, int, 0);
117 module_param(rx_copybreak, int, 0);
118 module_param(csr0, int, 0);
119 module_param_array(options, int, NULL, 0);
120 module_param_array(full_duplex, int, NULL, 0);
121
122 #define PFX DRV_NAME ": "
123
124 #ifdef TULIP_DEBUG
125 int tulip_debug = TULIP_DEBUG;
126 #else
127 int tulip_debug = 1;
128 #endif
129
130 static void tulip_timer(unsigned long data)
131 {
132 struct net_device *dev = (struct net_device *)data;
133 struct tulip_private *tp = netdev_priv(dev);
134
135 if (netif_running(dev))
136 schedule_work(&tp->media_work);
137 }
138
139 /*
140 * This table use during operation for capabilities and media timer.
141 *
142 * It is indexed via the values in 'enum chips'
143 */
144
145 struct tulip_chip_table tulip_tbl[] = {
146 { }, /* placeholder for array, slot unused currently */
147 { }, /* placeholder for array, slot unused currently */
148
149 /* DC21140 */
150 { "Digital DS21140 Tulip", 128, 0x0001ebef,
151 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
152 tulip_media_task },
153
154 /* DC21142, DC21143 */
155 { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
156 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
157 | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
158
159 /* LC82C168 */
160 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
161 HAS_MII | HAS_PNICNWAY, pnic_timer, },
162
163 /* MX98713 */
164 { "Macronix 98713 PMAC", 128, 0x0001ebef,
165 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
166
167 /* MX98715 */
168 { "Macronix 98715 PMAC", 256, 0x0001ebef,
169 HAS_MEDIA_TABLE, mxic_timer, },
170
171 /* MX98725 */
172 { "Macronix 98725 PMAC", 256, 0x0001ebef,
173 HAS_MEDIA_TABLE, mxic_timer, },
174
175 /* AX88140 */
176 { "ASIX AX88140", 128, 0x0001fbff,
177 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
178 | IS_ASIX, tulip_timer, tulip_media_task },
179
180 /* PNIC2 */
181 { "Lite-On PNIC-II", 256, 0x0801fbff,
182 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
183
184 /* COMET */
185 { "ADMtek Comet", 256, 0x0001abef,
186 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
187
188 /* COMPEX9881 */
189 { "Compex 9881 PMAC", 128, 0x0001ebef,
190 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
191
192 /* I21145 */
193 { "Intel DS21145 Tulip", 128, 0x0801fbff,
194 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
195 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
196
197 /* DM910X */
198 #ifdef CONFIG_TULIP_DM910X
199 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
200 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
201 tulip_timer, tulip_media_task },
202 #else
203 { NULL },
204 #endif
205
206 /* RS7112 */
207 { "Conexant LANfinity", 256, 0x0001ebef,
208 HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
209
210 };
211
212
213 static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
214 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
215 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
216 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
217 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
218 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
219 /* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
220 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
221 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
222 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
230 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
231 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
232 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
233 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
234 #ifdef CONFIG_TULIP_DM910X
235 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
236 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
237 #endif
238 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
240 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
241 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
242 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
245 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
246 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
247 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
248 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
249 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
250 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
251 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
252 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
253 { } /* terminate list */
254 };
255 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
256
257
258 /* A full-duplex map for media types. */
259 const char tulip_media_cap[32] =
260 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
261
262 static void tulip_tx_timeout(struct net_device *dev);
263 static void tulip_init_ring(struct net_device *dev);
264 static void tulip_free_ring(struct net_device *dev);
265 static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
266 struct net_device *dev);
267 static int tulip_open(struct net_device *dev);
268 static int tulip_close(struct net_device *dev);
269 static void tulip_up(struct net_device *dev);
270 static void tulip_down(struct net_device *dev);
271 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
272 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
273 static void set_rx_mode(struct net_device *dev);
274 #ifdef CONFIG_NET_POLL_CONTROLLER
275 static void poll_tulip(struct net_device *dev);
276 #endif
277
278 static void tulip_set_power_state (struct tulip_private *tp,
279 int sleep, int snooze)
280 {
281 if (tp->flags & HAS_ACPI) {
282 u32 tmp, newtmp;
283 pci_read_config_dword (tp->pdev, CFDD, &tmp);
284 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
285 if (sleep)
286 newtmp |= CFDD_Sleep;
287 else if (snooze)
288 newtmp |= CFDD_Snooze;
289 if (tmp != newtmp)
290 pci_write_config_dword (tp->pdev, CFDD, newtmp);
291 }
292
293 }
294
295
296 static void tulip_up(struct net_device *dev)
297 {
298 struct tulip_private *tp = netdev_priv(dev);
299 void __iomem *ioaddr = tp->base_addr;
300 int next_tick = 3*HZ;
301 u32 reg;
302 int i;
303
304 #ifdef CONFIG_TULIP_NAPI
305 napi_enable(&tp->napi);
306 #endif
307
308 /* Wake the chip from sleep/snooze mode. */
309 tulip_set_power_state (tp, 0, 0);
310
311 /* On some chip revs we must set the MII/SYM port before the reset!? */
312 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
313 iowrite32(0x00040000, ioaddr + CSR6);
314
315 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
316 iowrite32(0x00000001, ioaddr + CSR0);
317 pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg); /* flush write */
318 udelay(100);
319
320 /* Deassert reset.
321 Wait the specified 50 PCI cycles after a reset by initializing
322 Tx and Rx queues and the address filter list. */
323 iowrite32(tp->csr0, ioaddr + CSR0);
324 pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg); /* flush write */
325 udelay(100);
326
327 if (tulip_debug > 1)
328 printk(KERN_DEBUG "%s: tulip_up(), irq==%d\n",
329 dev->name, dev->irq);
330
331 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
332 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
333 tp->cur_rx = tp->cur_tx = 0;
334 tp->dirty_rx = tp->dirty_tx = 0;
335
336 if (tp->flags & MC_HASH_ONLY) {
337 u32 addr_low = get_unaligned_le32(dev->dev_addr);
338 u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
339 if (tp->chip_id == AX88140) {
340 iowrite32(0, ioaddr + CSR13);
341 iowrite32(addr_low, ioaddr + CSR14);
342 iowrite32(1, ioaddr + CSR13);
343 iowrite32(addr_high, ioaddr + CSR14);
344 } else if (tp->flags & COMET_MAC_ADDR) {
345 iowrite32(addr_low, ioaddr + 0xA4);
346 iowrite32(addr_high, ioaddr + 0xA8);
347 iowrite32(0, ioaddr + 0xAC);
348 iowrite32(0, ioaddr + 0xB0);
349 }
350 } else {
351 /* This is set_rx_mode(), but without starting the transmitter. */
352 u16 *eaddrs = (u16 *)dev->dev_addr;
353 u16 *setup_frm = &tp->setup_frame[15*6];
354 dma_addr_t mapping;
355
356 /* 21140 bug: you must add the broadcast address. */
357 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
358 /* Fill the final entry of the table with our physical address. */
359 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
360 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
361 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
362
363 mapping = pci_map_single(tp->pdev, tp->setup_frame,
364 sizeof(tp->setup_frame),
365 PCI_DMA_TODEVICE);
366 tp->tx_buffers[tp->cur_tx].skb = NULL;
367 tp->tx_buffers[tp->cur_tx].mapping = mapping;
368
369 /* Put the setup frame on the Tx list. */
370 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
371 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
372 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
373
374 tp->cur_tx++;
375 }
376
377 tp->saved_if_port = dev->if_port;
378 if (dev->if_port == 0)
379 dev->if_port = tp->default_port;
380
381 /* Allow selecting a default media. */
382 i = 0;
383 if (tp->mtable == NULL)
384 goto media_picked;
385 if (dev->if_port) {
386 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
387 (dev->if_port == 12 ? 0 : dev->if_port);
388 for (i = 0; i < tp->mtable->leafcount; i++)
389 if (tp->mtable->mleaf[i].media == looking_for) {
390 dev_info(&dev->dev,
391 "Using user-specified media %s\n",
392 medianame[dev->if_port]);
393 goto media_picked;
394 }
395 }
396 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
397 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
398 for (i = 0; i < tp->mtable->leafcount; i++)
399 if (tp->mtable->mleaf[i].media == looking_for) {
400 dev_info(&dev->dev,
401 "Using EEPROM-set media %s\n",
402 medianame[looking_for]);
403 goto media_picked;
404 }
405 }
406 /* Start sensing first non-full-duplex media. */
407 for (i = tp->mtable->leafcount - 1;
408 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
409 ;
410 media_picked:
411
412 tp->csr6 = 0;
413 tp->cur_index = i;
414 tp->nwayset = 0;
415
416 if (dev->if_port) {
417 if (tp->chip_id == DC21143 &&
418 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
419 /* We must reset the media CSRs when we force-select MII mode. */
420 iowrite32(0x0000, ioaddr + CSR13);
421 iowrite32(0x0000, ioaddr + CSR14);
422 iowrite32(0x0008, ioaddr + CSR15);
423 }
424 tulip_select_media(dev, 1);
425 } else if (tp->chip_id == DC21142) {
426 if (tp->mii_cnt) {
427 tulip_select_media(dev, 1);
428 if (tulip_debug > 1)
429 dev_info(&dev->dev,
430 "Using MII transceiver %d, status %04x\n",
431 tp->phys[0],
432 tulip_mdio_read(dev, tp->phys[0], 1));
433 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
434 tp->csr6 = csr6_mask_hdcap;
435 dev->if_port = 11;
436 iowrite32(0x0000, ioaddr + CSR13);
437 iowrite32(0x0000, ioaddr + CSR14);
438 } else
439 t21142_start_nway(dev);
440 } else if (tp->chip_id == PNIC2) {
441 /* for initial startup advertise 10/100 Full and Half */
442 tp->sym_advertise = 0x01E0;
443 /* enable autonegotiate end interrupt */
444 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
445 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
446 pnic2_start_nway(dev);
447 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
448 if (tp->mii_cnt) {
449 dev->if_port = 11;
450 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
451 iowrite32(0x0001, ioaddr + CSR15);
452 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
453 pnic_do_nway(dev);
454 else {
455 /* Start with 10mbps to do autonegotiation. */
456 iowrite32(0x32, ioaddr + CSR12);
457 tp->csr6 = 0x00420000;
458 iowrite32(0x0001B078, ioaddr + 0xB8);
459 iowrite32(0x0201B078, ioaddr + 0xB8);
460 next_tick = 1*HZ;
461 }
462 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
463 ! tp->medialock) {
464 dev->if_port = 0;
465 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
466 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
467 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
468 /* Provided by BOLO, Macronix - 12/10/1998. */
469 dev->if_port = 0;
470 tp->csr6 = 0x01a80200;
471 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
472 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
473 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
474 /* Enable automatic Tx underrun recovery. */
475 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
476 dev->if_port = tp->mii_cnt ? 11 : 0;
477 tp->csr6 = 0x00040000;
478 } else if (tp->chip_id == AX88140) {
479 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
480 } else
481 tulip_select_media(dev, 1);
482
483 /* Start the chip's Tx to process setup frame. */
484 tulip_stop_rxtx(tp);
485 barrier();
486 udelay(5);
487 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
488
489 /* Enable interrupts by setting the interrupt mask. */
490 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
491 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
492 tulip_start_rxtx(tp);
493 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
494
495 if (tulip_debug > 2) {
496 printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
497 dev->name, ioread32(ioaddr + CSR0),
498 ioread32(ioaddr + CSR5),
499 ioread32(ioaddr + CSR6));
500 }
501
502 /* Set the timer to switch to check for link beat and perhaps switch
503 to an alternate media type. */
504 tp->timer.expires = RUN_AT(next_tick);
505 add_timer(&tp->timer);
506 #ifdef CONFIG_TULIP_NAPI
507 init_timer(&tp->oom_timer);
508 tp->oom_timer.data = (unsigned long)dev;
509 tp->oom_timer.function = oom_timer;
510 #endif
511 }
512
513 static int
514 tulip_open(struct net_device *dev)
515 {
516 int retval;
517
518 tulip_init_ring (dev);
519
520 retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev);
521 if (retval)
522 goto free_ring;
523
524 tulip_up (dev);
525
526 netif_start_queue (dev);
527
528 return 0;
529
530 free_ring:
531 tulip_free_ring (dev);
532 return retval;
533 }
534
535
536 static void tulip_tx_timeout(struct net_device *dev)
537 {
538 struct tulip_private *tp = netdev_priv(dev);
539 void __iomem *ioaddr = tp->base_addr;
540 unsigned long flags;
541
542 spin_lock_irqsave (&tp->lock, flags);
543
544 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
545 /* Do nothing -- the media monitor should handle this. */
546 if (tulip_debug > 1)
547 dev_warn(&dev->dev,
548 "Transmit timeout using MII device\n");
549 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
550 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
551 tp->chip_id == DM910X) {
552 dev_warn(&dev->dev,
553 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
554 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
555 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
556 ioread32(ioaddr + CSR15));
557 tp->timeout_recovery = 1;
558 schedule_work(&tp->media_work);
559 goto out_unlock;
560 } else if (tp->chip_id == PNIC2) {
561 dev_warn(&dev->dev,
562 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
563 (int)ioread32(ioaddr + CSR5),
564 (int)ioread32(ioaddr + CSR6),
565 (int)ioread32(ioaddr + CSR7),
566 (int)ioread32(ioaddr + CSR12));
567 } else {
568 dev_warn(&dev->dev,
569 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
570 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
571 dev->if_port = 0;
572 }
573
574 #if defined(way_too_many_messages)
575 if (tulip_debug > 3) {
576 int i;
577 for (i = 0; i < RX_RING_SIZE; i++) {
578 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
579 int j;
580 printk(KERN_DEBUG
581 "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
582 i,
583 (unsigned int)tp->rx_ring[i].status,
584 (unsigned int)tp->rx_ring[i].length,
585 (unsigned int)tp->rx_ring[i].buffer1,
586 (unsigned int)tp->rx_ring[i].buffer2,
587 buf[0], buf[1], buf[2]);
588 for (j = 0; buf[j] != 0xee && j < 1600; j++)
589 if (j < 100)
590 pr_cont(" %02x", buf[j]);
591 pr_cont(" j=%d\n", j);
592 }
593 printk(KERN_DEBUG " Rx ring %08x: ", (int)tp->rx_ring);
594 for (i = 0; i < RX_RING_SIZE; i++)
595 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
596 printk(KERN_DEBUG " Tx ring %08x: ", (int)tp->tx_ring);
597 for (i = 0; i < TX_RING_SIZE; i++)
598 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
599 pr_cont("\n");
600 }
601 #endif
602
603 tulip_tx_timeout_complete(tp, ioaddr);
604
605 out_unlock:
606 spin_unlock_irqrestore (&tp->lock, flags);
607 dev->trans_start = jiffies;
608 netif_wake_queue (dev);
609 }
610
611
612 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
613 static void tulip_init_ring(struct net_device *dev)
614 {
615 struct tulip_private *tp = netdev_priv(dev);
616 int i;
617
618 tp->susp_rx = 0;
619 tp->ttimer = 0;
620 tp->nir = 0;
621
622 for (i = 0; i < RX_RING_SIZE; i++) {
623 tp->rx_ring[i].status = 0x00000000;
624 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
625 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
626 tp->rx_buffers[i].skb = NULL;
627 tp->rx_buffers[i].mapping = 0;
628 }
629 /* Mark the last entry as wrapping the ring. */
630 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
631 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
632
633 for (i = 0; i < RX_RING_SIZE; i++) {
634 dma_addr_t mapping;
635
636 /* Note the receive buffer must be longword aligned.
637 dev_alloc_skb() provides 16 byte alignment. But do *not*
638 use skb_reserve() to align the IP header! */
639 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
640 tp->rx_buffers[i].skb = skb;
641 if (skb == NULL)
642 break;
643 mapping = pci_map_single(tp->pdev, skb->data,
644 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
645 tp->rx_buffers[i].mapping = mapping;
646 skb->dev = dev; /* Mark as being used by this device. */
647 tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
648 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
649 }
650 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
651
652 /* The Tx buffer descriptor is filled in as needed, but we
653 do need to clear the ownership bit. */
654 for (i = 0; i < TX_RING_SIZE; i++) {
655 tp->tx_buffers[i].skb = NULL;
656 tp->tx_buffers[i].mapping = 0;
657 tp->tx_ring[i].status = 0x00000000;
658 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
659 }
660 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
661 }
662
663 static netdev_tx_t
664 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
665 {
666 struct tulip_private *tp = netdev_priv(dev);
667 int entry;
668 u32 flag;
669 dma_addr_t mapping;
670 unsigned long flags;
671
672 spin_lock_irqsave(&tp->lock, flags);
673
674 /* Calculate the next Tx descriptor entry. */
675 entry = tp->cur_tx % TX_RING_SIZE;
676
677 tp->tx_buffers[entry].skb = skb;
678 mapping = pci_map_single(tp->pdev, skb->data,
679 skb->len, PCI_DMA_TODEVICE);
680 tp->tx_buffers[entry].mapping = mapping;
681 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
682
683 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
684 flag = 0x60000000; /* No interrupt */
685 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
686 flag = 0xe0000000; /* Tx-done intr. */
687 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
688 flag = 0x60000000; /* No Tx-done intr. */
689 } else { /* Leave room for set_rx_mode() to fill entries. */
690 flag = 0xe0000000; /* Tx-done intr. */
691 netif_stop_queue(dev);
692 }
693 if (entry == TX_RING_SIZE-1)
694 flag = 0xe0000000 | DESC_RING_WRAP;
695
696 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
697 /* if we were using Transmit Automatic Polling, we would need a
698 * wmb() here. */
699 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
700 wmb();
701
702 tp->cur_tx++;
703
704 /* Trigger an immediate transmit demand. */
705 iowrite32(0, tp->base_addr + CSR1);
706
707 spin_unlock_irqrestore(&tp->lock, flags);
708
709 dev->trans_start = jiffies;
710
711 return NETDEV_TX_OK;
712 }
713
714 static void tulip_clean_tx_ring(struct tulip_private *tp)
715 {
716 unsigned int dirty_tx;
717
718 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
719 dirty_tx++) {
720 int entry = dirty_tx % TX_RING_SIZE;
721 int status = le32_to_cpu(tp->tx_ring[entry].status);
722
723 if (status < 0) {
724 tp->stats.tx_errors++; /* It wasn't Txed */
725 tp->tx_ring[entry].status = 0;
726 }
727
728 /* Check for Tx filter setup frames. */
729 if (tp->tx_buffers[entry].skb == NULL) {
730 /* test because dummy frames not mapped */
731 if (tp->tx_buffers[entry].mapping)
732 pci_unmap_single(tp->pdev,
733 tp->tx_buffers[entry].mapping,
734 sizeof(tp->setup_frame),
735 PCI_DMA_TODEVICE);
736 continue;
737 }
738
739 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
740 tp->tx_buffers[entry].skb->len,
741 PCI_DMA_TODEVICE);
742
743 /* Free the original skb. */
744 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
745 tp->tx_buffers[entry].skb = NULL;
746 tp->tx_buffers[entry].mapping = 0;
747 }
748 }
749
750 static void tulip_down (struct net_device *dev)
751 {
752 struct tulip_private *tp = netdev_priv(dev);
753 void __iomem *ioaddr = tp->base_addr;
754 unsigned long flags;
755
756 cancel_work_sync(&tp->media_work);
757
758 #ifdef CONFIG_TULIP_NAPI
759 napi_disable(&tp->napi);
760 #endif
761
762 del_timer_sync (&tp->timer);
763 #ifdef CONFIG_TULIP_NAPI
764 del_timer_sync (&tp->oom_timer);
765 #endif
766 spin_lock_irqsave (&tp->lock, flags);
767
768 /* Disable interrupts by clearing the interrupt mask. */
769 iowrite32 (0x00000000, ioaddr + CSR7);
770
771 /* Stop the Tx and Rx processes. */
772 tulip_stop_rxtx(tp);
773
774 /* prepare receive buffers */
775 tulip_refill_rx(dev);
776
777 /* release any unconsumed transmit buffers */
778 tulip_clean_tx_ring(tp);
779
780 if (ioread32 (ioaddr + CSR6) != 0xffffffff)
781 tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff;
782
783 spin_unlock_irqrestore (&tp->lock, flags);
784
785 init_timer(&tp->timer);
786 tp->timer.data = (unsigned long)dev;
787 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
788
789 dev->if_port = tp->saved_if_port;
790
791 /* Leave the driver in snooze, not sleep, mode. */
792 tulip_set_power_state (tp, 0, 1);
793 }
794
795 static void tulip_free_ring (struct net_device *dev)
796 {
797 struct tulip_private *tp = netdev_priv(dev);
798 int i;
799
800 /* Free all the skbuffs in the Rx queue. */
801 for (i = 0; i < RX_RING_SIZE; i++) {
802 struct sk_buff *skb = tp->rx_buffers[i].skb;
803 dma_addr_t mapping = tp->rx_buffers[i].mapping;
804
805 tp->rx_buffers[i].skb = NULL;
806 tp->rx_buffers[i].mapping = 0;
807
808 tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
809 tp->rx_ring[i].length = 0;
810 /* An invalid address. */
811 tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
812 if (skb) {
813 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
814 PCI_DMA_FROMDEVICE);
815 dev_kfree_skb (skb);
816 }
817 }
818
819 for (i = 0; i < TX_RING_SIZE; i++) {
820 struct sk_buff *skb = tp->tx_buffers[i].skb;
821
822 if (skb != NULL) {
823 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
824 skb->len, PCI_DMA_TODEVICE);
825 dev_kfree_skb (skb);
826 }
827 tp->tx_buffers[i].skb = NULL;
828 tp->tx_buffers[i].mapping = 0;
829 }
830 }
831
832 static int tulip_close (struct net_device *dev)
833 {
834 struct tulip_private *tp = netdev_priv(dev);
835 void __iomem *ioaddr = tp->base_addr;
836
837 netif_stop_queue (dev);
838
839 tulip_down (dev);
840
841 if (tulip_debug > 1)
842 dev_printk(KERN_DEBUG, &dev->dev,
843 "Shutting down ethercard, status was %02x\n",
844 ioread32 (ioaddr + CSR5));
845
846 free_irq (dev->irq, dev);
847
848 tulip_free_ring (dev);
849
850 return 0;
851 }
852
853 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
854 {
855 struct tulip_private *tp = netdev_priv(dev);
856 void __iomem *ioaddr = tp->base_addr;
857
858 if (netif_running(dev)) {
859 unsigned long flags;
860
861 spin_lock_irqsave (&tp->lock, flags);
862
863 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
864
865 spin_unlock_irqrestore(&tp->lock, flags);
866 }
867
868 return &tp->stats;
869 }
870
871
872 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
873 {
874 struct tulip_private *np = netdev_priv(dev);
875 strcpy(info->driver, DRV_NAME);
876 strcpy(info->version, DRV_VERSION);
877 strcpy(info->bus_info, pci_name(np->pdev));
878 }
879
880 static const struct ethtool_ops ops = {
881 .get_drvinfo = tulip_get_drvinfo
882 };
883
884 /* Provide ioctl() calls to examine the MII xcvr state. */
885 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
886 {
887 struct tulip_private *tp = netdev_priv(dev);
888 void __iomem *ioaddr = tp->base_addr;
889 struct mii_ioctl_data *data = if_mii(rq);
890 const unsigned int phy_idx = 0;
891 int phy = tp->phys[phy_idx] & 0x1f;
892 unsigned int regnum = data->reg_num;
893
894 switch (cmd) {
895 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
896 if (tp->mii_cnt)
897 data->phy_id = phy;
898 else if (tp->flags & HAS_NWAY)
899 data->phy_id = 32;
900 else if (tp->chip_id == COMET)
901 data->phy_id = 1;
902 else
903 return -ENODEV;
904
905 case SIOCGMIIREG: /* Read MII PHY register. */
906 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
907 int csr12 = ioread32 (ioaddr + CSR12);
908 int csr14 = ioread32 (ioaddr + CSR14);
909 switch (regnum) {
910 case 0:
911 if (((csr14<<5) & 0x1000) ||
912 (dev->if_port == 5 && tp->nwayset))
913 data->val_out = 0x1000;
914 else
915 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
916 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
917 break;
918 case 1:
919 data->val_out =
920 0x1848 +
921 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
922 ((csr12&0x06) == 6 ? 0 : 4);
923 data->val_out |= 0x6048;
924 break;
925 case 4:
926 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
927 data->val_out =
928 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
929 ((csr14 >> 1) & 0x20) + 1;
930 data->val_out |= ((csr14 >> 9) & 0x03C0);
931 break;
932 case 5: data->val_out = tp->lpar; break;
933 default: data->val_out = 0; break;
934 }
935 } else {
936 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
937 }
938 return 0;
939
940 case SIOCSMIIREG: /* Write MII PHY register. */
941 if (regnum & ~0x1f)
942 return -EINVAL;
943 if (data->phy_id == phy) {
944 u16 value = data->val_in;
945 switch (regnum) {
946 case 0: /* Check for autonegotiation on or reset. */
947 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
948 if (tp->full_duplex_lock)
949 tp->full_duplex = (value & 0x0100) ? 1 : 0;
950 break;
951 case 4:
952 tp->advertising[phy_idx] =
953 tp->mii_advertise = data->val_in;
954 break;
955 }
956 }
957 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
958 u16 value = data->val_in;
959 if (regnum == 0) {
960 if ((value & 0x1200) == 0x1200) {
961 if (tp->chip_id == PNIC2) {
962 pnic2_start_nway (dev);
963 } else {
964 t21142_start_nway (dev);
965 }
966 }
967 } else if (regnum == 4)
968 tp->sym_advertise = value;
969 } else {
970 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
971 }
972 return 0;
973 default:
974 return -EOPNOTSUPP;
975 }
976
977 return -EOPNOTSUPP;
978 }
979
980
981 /* Set or clear the multicast filter for this adaptor.
982 Note that we only use exclusion around actually queueing the
983 new frame, not around filling tp->setup_frame. This is non-deterministic
984 when re-entered but still correct. */
985
986 #undef set_bit_le
987 #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
988
989 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
990 {
991 struct tulip_private *tp = netdev_priv(dev);
992 u16 hash_table[32];
993 struct dev_mc_list *mclist;
994 int i;
995 u16 *eaddrs;
996
997 memset(hash_table, 0, sizeof(hash_table));
998 set_bit_le(255, hash_table); /* Broadcast entry */
999 /* This should work on big-endian machines as well. */
1000 for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
1001 i++, mclist = mclist->next) {
1002 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
1003
1004 set_bit_le(index, hash_table);
1005
1006 }
1007 for (i = 0; i < 32; i++) {
1008 *setup_frm++ = hash_table[i];
1009 *setup_frm++ = hash_table[i];
1010 }
1011 setup_frm = &tp->setup_frame[13*6];
1012
1013 /* Fill the final entry with our physical address. */
1014 eaddrs = (u16 *)dev->dev_addr;
1015 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1016 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1017 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1018 }
1019
1020 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1021 {
1022 struct tulip_private *tp = netdev_priv(dev);
1023 struct dev_mc_list *mclist;
1024 int i;
1025 u16 *eaddrs;
1026
1027 /* We have <= 14 addresses so we can use the wonderful
1028 16 address perfect filtering of the Tulip. */
1029 for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
1030 i++, mclist = mclist->next) {
1031 eaddrs = (u16 *)mclist->dmi_addr;
1032 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1033 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1034 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1035 }
1036 /* Fill the unused entries with the broadcast address. */
1037 memset(setup_frm, 0xff, (15-i)*12);
1038 setup_frm = &tp->setup_frame[15*6];
1039
1040 /* Fill the final entry with our physical address. */
1041 eaddrs = (u16 *)dev->dev_addr;
1042 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1043 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1044 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1045 }
1046
1047
1048 static void set_rx_mode(struct net_device *dev)
1049 {
1050 struct tulip_private *tp = netdev_priv(dev);
1051 void __iomem *ioaddr = tp->base_addr;
1052 int csr6;
1053
1054 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1055
1056 tp->csr6 &= ~0x00D5;
1057 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1058 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1059 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1060 } else if ((netdev_mc_count(dev) > 1000) ||
1061 (dev->flags & IFF_ALLMULTI)) {
1062 /* Too many to filter well -- accept all multicasts. */
1063 tp->csr6 |= AcceptAllMulticast;
1064 csr6 |= AcceptAllMulticast;
1065 } else if (tp->flags & MC_HASH_ONLY) {
1066 /* Some work-alikes have only a 64-entry hash filter table. */
1067 /* Should verify correctness on big-endian/__powerpc__ */
1068 struct dev_mc_list *mclist;
1069 int i;
1070 if (netdev_mc_count(dev) > 64) {
1071 /* Arbitrary non-effective limit. */
1072 tp->csr6 |= AcceptAllMulticast;
1073 csr6 |= AcceptAllMulticast;
1074 } else {
1075 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1076 int filterbit;
1077 for (i = 0, mclist = dev->mc_list;
1078 mclist && i < netdev_mc_count(dev);
1079 i++, mclist = mclist->next) {
1080 if (tp->flags & COMET_MAC_ADDR)
1081 filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
1082 else
1083 filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1084 filterbit &= 0x3f;
1085 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1086 if (tulip_debug > 2)
1087 dev_info(&dev->dev,
1088 "Added filter for %pM %08x bit %d\n",
1089 mclist->dmi_addr,
1090 ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
1091 }
1092 if (mc_filter[0] == tp->mc_filter[0] &&
1093 mc_filter[1] == tp->mc_filter[1])
1094 ; /* No change. */
1095 else if (tp->flags & IS_ASIX) {
1096 iowrite32(2, ioaddr + CSR13);
1097 iowrite32(mc_filter[0], ioaddr + CSR14);
1098 iowrite32(3, ioaddr + CSR13);
1099 iowrite32(mc_filter[1], ioaddr + CSR14);
1100 } else if (tp->flags & COMET_MAC_ADDR) {
1101 iowrite32(mc_filter[0], ioaddr + 0xAC);
1102 iowrite32(mc_filter[1], ioaddr + 0xB0);
1103 }
1104 tp->mc_filter[0] = mc_filter[0];
1105 tp->mc_filter[1] = mc_filter[1];
1106 }
1107 } else {
1108 unsigned long flags;
1109 u32 tx_flags = 0x08000000 | 192;
1110
1111 /* Note that only the low-address shortword of setup_frame is valid!
1112 The values are doubled for big-endian architectures. */
1113 if (netdev_mc_count(dev) > 14) {
1114 /* Must use a multicast hash table. */
1115 build_setup_frame_hash(tp->setup_frame, dev);
1116 tx_flags = 0x08400000 | 192;
1117 } else {
1118 build_setup_frame_perfect(tp->setup_frame, dev);
1119 }
1120
1121 spin_lock_irqsave(&tp->lock, flags);
1122
1123 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1124 /* Same setup recently queued, we need not add it. */
1125 } else {
1126 unsigned int entry;
1127 int dummy = -1;
1128
1129 /* Now add this frame to the Tx list. */
1130
1131 entry = tp->cur_tx++ % TX_RING_SIZE;
1132
1133 if (entry != 0) {
1134 /* Avoid a chip errata by prefixing a dummy entry. */
1135 tp->tx_buffers[entry].skb = NULL;
1136 tp->tx_buffers[entry].mapping = 0;
1137 tp->tx_ring[entry].length =
1138 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1139 tp->tx_ring[entry].buffer1 = 0;
1140 /* Must set DescOwned later to avoid race with chip */
1141 dummy = entry;
1142 entry = tp->cur_tx++ % TX_RING_SIZE;
1143
1144 }
1145
1146 tp->tx_buffers[entry].skb = NULL;
1147 tp->tx_buffers[entry].mapping =
1148 pci_map_single(tp->pdev, tp->setup_frame,
1149 sizeof(tp->setup_frame),
1150 PCI_DMA_TODEVICE);
1151 /* Put the setup frame on the Tx list. */
1152 if (entry == TX_RING_SIZE-1)
1153 tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
1154 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1155 tp->tx_ring[entry].buffer1 =
1156 cpu_to_le32(tp->tx_buffers[entry].mapping);
1157 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1158 if (dummy >= 0)
1159 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1160 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1161 netif_stop_queue(dev);
1162
1163 /* Trigger an immediate transmit demand. */
1164 iowrite32(0, ioaddr + CSR1);
1165 }
1166
1167 spin_unlock_irqrestore(&tp->lock, flags);
1168 }
1169
1170 iowrite32(csr6, ioaddr + CSR6);
1171 }
1172
1173 #ifdef CONFIG_TULIP_MWI
1174 static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1175 struct net_device *dev)
1176 {
1177 struct tulip_private *tp = netdev_priv(dev);
1178 u8 cache;
1179 u16 pci_command;
1180 u32 csr0;
1181
1182 if (tulip_debug > 3)
1183 printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev));
1184
1185 tp->csr0 = csr0 = 0;
1186
1187 /* if we have any cache line size at all, we can do MRM and MWI */
1188 csr0 |= MRM | MWI;
1189
1190 /* Enable MWI in the standard PCI command bit.
1191 * Check for the case where MWI is desired but not available
1192 */
1193 pci_try_set_mwi(pdev);
1194
1195 /* read result from hardware (in case bit refused to enable) */
1196 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1197 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1198 csr0 &= ~MWI;
1199
1200 /* if cache line size hardwired to zero, no MWI */
1201 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1202 if ((csr0 & MWI) && (cache == 0)) {
1203 csr0 &= ~MWI;
1204 pci_clear_mwi(pdev);
1205 }
1206
1207 /* assign per-cacheline-size cache alignment and
1208 * burst length values
1209 */
1210 switch (cache) {
1211 case 8:
1212 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1213 break;
1214 case 16:
1215 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1216 break;
1217 case 32:
1218 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1219 break;
1220 default:
1221 cache = 0;
1222 break;
1223 }
1224
1225 /* if we have a good cache line size, we by now have a good
1226 * csr0, so save it and exit
1227 */
1228 if (cache)
1229 goto out;
1230
1231 /* we don't have a good csr0 or cache line size, disable MWI */
1232 if (csr0 & MWI) {
1233 pci_clear_mwi(pdev);
1234 csr0 &= ~MWI;
1235 }
1236
1237 /* sane defaults for burst length and cache alignment
1238 * originally from de4x5 driver
1239 */
1240 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1241
1242 out:
1243 tp->csr0 = csr0;
1244 if (tulip_debug > 2)
1245 printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n",
1246 pci_name(pdev), cache, csr0);
1247 }
1248 #endif
1249
1250 /*
1251 * Chips that have the MRM/reserved bit quirk and the burst quirk. That
1252 * is the DM910X and the on chip ULi devices
1253 */
1254
1255 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1256 {
1257 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1258 return 1;
1259 return 0;
1260 }
1261
1262 static const struct net_device_ops tulip_netdev_ops = {
1263 .ndo_open = tulip_open,
1264 .ndo_start_xmit = tulip_start_xmit,
1265 .ndo_tx_timeout = tulip_tx_timeout,
1266 .ndo_stop = tulip_close,
1267 .ndo_get_stats = tulip_get_stats,
1268 .ndo_do_ioctl = private_ioctl,
1269 .ndo_set_multicast_list = set_rx_mode,
1270 .ndo_change_mtu = eth_change_mtu,
1271 .ndo_set_mac_address = eth_mac_addr,
1272 .ndo_validate_addr = eth_validate_addr,
1273 #ifdef CONFIG_NET_POLL_CONTROLLER
1274 .ndo_poll_controller = poll_tulip,
1275 #endif
1276 };
1277
1278 static int __devinit tulip_init_one (struct pci_dev *pdev,
1279 const struct pci_device_id *ent)
1280 {
1281 struct tulip_private *tp;
1282 /* See note below on the multiport cards. */
1283 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
1284 static struct pci_device_id early_486_chipsets[] = {
1285 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1286 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1287 { },
1288 };
1289 static int last_irq;
1290 static int multiport_cnt; /* For four-port boards w/one EEPROM */
1291 int i, irq;
1292 unsigned short sum;
1293 unsigned char *ee_data;
1294 struct net_device *dev;
1295 void __iomem *ioaddr;
1296 static int board_idx = -1;
1297 int chip_idx = ent->driver_data;
1298 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1299 unsigned int eeprom_missing = 0;
1300 unsigned int force_csr0 = 0;
1301
1302 #ifndef MODULE
1303 if (tulip_debug > 0)
1304 printk_once(KERN_INFO "%s", version);
1305 #endif
1306
1307 board_idx++;
1308
1309 /*
1310 * Lan media wire a tulip chip to a wan interface. Needs a very
1311 * different driver (lmc driver)
1312 */
1313
1314 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1315 pr_err(PFX "skipping LMC card\n");
1316 return -ENODEV;
1317 }
1318
1319 /*
1320 * DM910x chips should be handled by the dmfe driver, except
1321 * on-board chips on SPARC systems. Also, early DM9100s need
1322 * software CRC which only the dmfe driver supports.
1323 */
1324
1325 #ifdef CONFIG_TULIP_DM910X
1326 if (chip_idx == DM910X) {
1327 struct device_node *dp;
1328
1329 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1330 pdev->revision < 0x30) {
1331 pr_info(PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
1332 return -ENODEV;
1333 }
1334
1335 dp = pci_device_to_OF_node(pdev);
1336 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1337 pr_info(PFX "skipping DM910x expansion card (use dmfe)\n");
1338 return -ENODEV;
1339 }
1340 }
1341 #endif
1342
1343 /*
1344 * Looks for early PCI chipsets where people report hangs
1345 * without the workarounds being on.
1346 */
1347
1348 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1349 aligned. Aries might need this too. The Saturn errata are not
1350 pretty reading but thankfully it's an old 486 chipset.
1351
1352 2. The dreaded SiS496 486 chipset. Same workaround as Intel
1353 Saturn.
1354 */
1355
1356 if (pci_dev_present(early_486_chipsets)) {
1357 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1358 force_csr0 = 1;
1359 }
1360
1361 /* bugfix: the ASIX must have a burst limit or horrible things happen. */
1362 if (chip_idx == AX88140) {
1363 if ((csr0 & 0x3f00) == 0)
1364 csr0 |= 0x2000;
1365 }
1366
1367 /* PNIC doesn't have MWI/MRL/MRM... */
1368 if (chip_idx == LC82C168)
1369 csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1370
1371 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1372 if (tulip_uli_dm_quirk(pdev)) {
1373 csr0 &= ~0x01f100ff;
1374 #if defined(CONFIG_SPARC)
1375 csr0 = (csr0 & ~0xff00) | 0xe000;
1376 #endif
1377 }
1378 /*
1379 * And back to business
1380 */
1381
1382 i = pci_enable_device(pdev);
1383 if (i) {
1384 pr_err(PFX "Cannot enable tulip board #%d, aborting\n",
1385 board_idx);
1386 return i;
1387 }
1388
1389 irq = pdev->irq;
1390
1391 /* alloc_etherdev ensures aligned and zeroed private structures */
1392 dev = alloc_etherdev (sizeof (*tp));
1393 if (!dev) {
1394 pr_err(PFX "ether device alloc failed, aborting\n");
1395 return -ENOMEM;
1396 }
1397
1398 SET_NETDEV_DEV(dev, &pdev->dev);
1399 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1400 pr_err(PFX "%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1401 pci_name(pdev),
1402 (unsigned long long)pci_resource_len (pdev, 0),
1403 (unsigned long long)pci_resource_start (pdev, 0));
1404 goto err_out_free_netdev;
1405 }
1406
1407 /* grab all resources from both PIO and MMIO regions, as we
1408 * don't want anyone else messing around with our hardware */
1409 if (pci_request_regions (pdev, DRV_NAME))
1410 goto err_out_free_netdev;
1411
1412 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1413
1414 if (!ioaddr)
1415 goto err_out_free_res;
1416
1417 /*
1418 * initialize private data structure 'tp'
1419 * it is zeroed and aligned in alloc_etherdev
1420 */
1421 tp = netdev_priv(dev);
1422 tp->dev = dev;
1423
1424 tp->rx_ring = pci_alloc_consistent(pdev,
1425 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1426 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1427 &tp->rx_ring_dma);
1428 if (!tp->rx_ring)
1429 goto err_out_mtable;
1430 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1431 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1432
1433 tp->chip_id = chip_idx;
1434 tp->flags = tulip_tbl[chip_idx].flags;
1435 tp->pdev = pdev;
1436 tp->base_addr = ioaddr;
1437 tp->revision = pdev->revision;
1438 tp->csr0 = csr0;
1439 spin_lock_init(&tp->lock);
1440 spin_lock_init(&tp->mii_lock);
1441 init_timer(&tp->timer);
1442 tp->timer.data = (unsigned long)dev;
1443 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1444
1445 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1446
1447 dev->base_addr = (unsigned long)ioaddr;
1448
1449 #ifdef CONFIG_TULIP_MWI
1450 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1451 tulip_mwi_config (pdev, dev);
1452 #endif
1453
1454 /* Stop the chip's Tx and Rx processes. */
1455 tulip_stop_rxtx(tp);
1456
1457 pci_set_master(pdev);
1458
1459 #ifdef CONFIG_GSC
1460 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1461 switch (pdev->subsystem_device) {
1462 default:
1463 break;
1464 case 0x1061:
1465 case 0x1062:
1466 case 0x1063:
1467 case 0x1098:
1468 case 0x1099:
1469 case 0x10EE:
1470 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1471 chip_name = "GSC DS21140 Tulip";
1472 }
1473 }
1474 #endif
1475
1476 /* Clear the missed-packet counter. */
1477 ioread32(ioaddr + CSR8);
1478
1479 /* The station address ROM is read byte serially. The register must
1480 be polled, waiting for the value to be read bit serially from the
1481 EEPROM.
1482 */
1483 ee_data = tp->eeprom;
1484 memset(ee_data, 0, sizeof(tp->eeprom));
1485 sum = 0;
1486 if (chip_idx == LC82C168) {
1487 for (i = 0; i < 3; i++) {
1488 int value, boguscnt = 100000;
1489 iowrite32(0x600 | i, ioaddr + 0x98);
1490 do {
1491 value = ioread32(ioaddr + CSR9);
1492 } while (value < 0 && --boguscnt > 0);
1493 put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1494 sum += value & 0xffff;
1495 }
1496 } else if (chip_idx == COMET) {
1497 /* No need to read the EEPROM. */
1498 put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1499 put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1500 for (i = 0; i < 6; i ++)
1501 sum += dev->dev_addr[i];
1502 } else {
1503 /* A serial EEPROM interface, we read now and sort it out later. */
1504 int sa_offset = 0;
1505 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1506 int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1507
1508 if (ee_max_addr > sizeof(tp->eeprom))
1509 ee_max_addr = sizeof(tp->eeprom);
1510
1511 for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1512 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1513 ee_data[i] = data & 0xff;
1514 ee_data[i + 1] = data >> 8;
1515 }
1516
1517 /* DEC now has a specification (see Notes) but early board makers
1518 just put the address in the first EEPROM locations. */
1519 /* This does memcmp(ee_data, ee_data+16, 8) */
1520 for (i = 0; i < 8; i ++)
1521 if (ee_data[i] != ee_data[16+i])
1522 sa_offset = 20;
1523 if (chip_idx == CONEXANT) {
1524 /* Check that the tuple type and length is correct. */
1525 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1526 sa_offset = 0x19A;
1527 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1528 ee_data[2] == 0) {
1529 sa_offset = 2; /* Grrr, damn Matrox boards. */
1530 multiport_cnt = 4;
1531 }
1532 #ifdef CONFIG_MIPS_COBALT
1533 if ((pdev->bus->number == 0) &&
1534 ((PCI_SLOT(pdev->devfn) == 7) ||
1535 (PCI_SLOT(pdev->devfn) == 12))) {
1536 /* Cobalt MAC address in first EEPROM locations. */
1537 sa_offset = 0;
1538 /* Ensure our media table fixup get's applied */
1539 memcpy(ee_data + 16, ee_data, 8);
1540 }
1541 #endif
1542 #ifdef CONFIG_GSC
1543 /* Check to see if we have a broken srom */
1544 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1545 /* pci_vendor_id and subsystem_id are swapped */
1546 ee_data[0] = ee_data[2];
1547 ee_data[1] = ee_data[3];
1548 ee_data[2] = 0x61;
1549 ee_data[3] = 0x10;
1550
1551 /* HSC-PCI boards need to be byte-swaped and shifted
1552 * up 1 word. This shift needs to happen at the end
1553 * of the MAC first because of the 2 byte overlap.
1554 */
1555 for (i = 4; i >= 0; i -= 2) {
1556 ee_data[17 + i + 3] = ee_data[17 + i];
1557 ee_data[16 + i + 5] = ee_data[16 + i];
1558 }
1559 }
1560 #endif
1561
1562 for (i = 0; i < 6; i ++) {
1563 dev->dev_addr[i] = ee_data[i + sa_offset];
1564 sum += ee_data[i + sa_offset];
1565 }
1566 }
1567 /* Lite-On boards have the address byte-swapped. */
1568 if ((dev->dev_addr[0] == 0xA0 ||
1569 dev->dev_addr[0] == 0xC0 ||
1570 dev->dev_addr[0] == 0x02) &&
1571 dev->dev_addr[1] == 0x00)
1572 for (i = 0; i < 6; i+=2) {
1573 char tmp = dev->dev_addr[i];
1574 dev->dev_addr[i] = dev->dev_addr[i+1];
1575 dev->dev_addr[i+1] = tmp;
1576 }
1577 /* On the Zynx 315 Etherarray and other multiport boards only the
1578 first Tulip has an EEPROM.
1579 On Sparc systems the mac address is held in the OBP property
1580 "local-mac-address".
1581 The addresses of the subsequent ports are derived from the first.
1582 Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1583 that here as well. */
1584 if (sum == 0 || sum == 6*0xff) {
1585 #if defined(CONFIG_SPARC)
1586 struct device_node *dp = pci_device_to_OF_node(pdev);
1587 const unsigned char *addr;
1588 int len;
1589 #endif
1590 eeprom_missing = 1;
1591 for (i = 0; i < 5; i++)
1592 dev->dev_addr[i] = last_phys_addr[i];
1593 dev->dev_addr[i] = last_phys_addr[i] + 1;
1594 #if defined(CONFIG_SPARC)
1595 addr = of_get_property(dp, "local-mac-address", &len);
1596 if (addr && len == 6)
1597 memcpy(dev->dev_addr, addr, 6);
1598 #endif
1599 #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1600 if (last_irq)
1601 irq = last_irq;
1602 #endif
1603 }
1604
1605 for (i = 0; i < 6; i++)
1606 last_phys_addr[i] = dev->dev_addr[i];
1607 last_irq = irq;
1608 dev->irq = irq;
1609
1610 /* The lower four bits are the media type. */
1611 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1612 if (options[board_idx] & MEDIA_MASK)
1613 tp->default_port = options[board_idx] & MEDIA_MASK;
1614 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1615 tp->full_duplex = 1;
1616 if (mtu[board_idx] > 0)
1617 dev->mtu = mtu[board_idx];
1618 }
1619 if (dev->mem_start & MEDIA_MASK)
1620 tp->default_port = dev->mem_start & MEDIA_MASK;
1621 if (tp->default_port) {
1622 pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1623 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1624 tp->medialock = 1;
1625 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1626 tp->full_duplex = 1;
1627 }
1628 if (tp->full_duplex)
1629 tp->full_duplex_lock = 1;
1630
1631 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1632 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
1633 tp->mii_advertise = media2advert[tp->default_port - 9];
1634 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1635 }
1636
1637 if (tp->flags & HAS_MEDIA_TABLE) {
1638 sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
1639 tulip_parse_eeprom(dev);
1640 strcpy(dev->name, "eth%d"); /* un-hack */
1641 }
1642
1643 if ((tp->flags & ALWAYS_CHECK_MII) ||
1644 (tp->mtable && tp->mtable->has_mii) ||
1645 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1646 if (tp->mtable && tp->mtable->has_mii) {
1647 for (i = 0; i < tp->mtable->leafcount; i++)
1648 if (tp->mtable->mleaf[i].media == 11) {
1649 tp->cur_index = i;
1650 tp->saved_if_port = dev->if_port;
1651 tulip_select_media(dev, 2);
1652 dev->if_port = tp->saved_if_port;
1653 break;
1654 }
1655 }
1656
1657 /* Find the connected MII xcvrs.
1658 Doing this in open() would allow detecting external xcvrs
1659 later, but takes much time. */
1660 tulip_find_mii (dev, board_idx);
1661 }
1662
1663 /* The Tulip-specific entries in the device structure. */
1664 dev->netdev_ops = &tulip_netdev_ops;
1665 dev->watchdog_timeo = TX_TIMEOUT;
1666 #ifdef CONFIG_TULIP_NAPI
1667 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1668 #endif
1669 SET_ETHTOOL_OPS(dev, &ops);
1670
1671 if (register_netdev(dev))
1672 goto err_out_free_ring;
1673
1674 pci_set_drvdata(pdev, dev);
1675
1676 dev_info(&dev->dev,
1677 #ifdef CONFIG_TULIP_MMIO
1678 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1679 #else
1680 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1681 #endif
1682 chip_name, pdev->revision,
1683 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1684 eeprom_missing ? " EEPROM not present," : "",
1685 dev->dev_addr, irq);
1686
1687 if (tp->chip_id == PNIC2)
1688 tp->link_change = pnic2_lnk_change;
1689 else if (tp->flags & HAS_NWAY)
1690 tp->link_change = t21142_lnk_change;
1691 else if (tp->flags & HAS_PNICNWAY)
1692 tp->link_change = pnic_lnk_change;
1693
1694 /* Reset the xcvr interface and turn on heartbeat. */
1695 switch (chip_idx) {
1696 case DC21140:
1697 case DM910X:
1698 default:
1699 if (tp->mtable)
1700 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1701 break;
1702 case DC21142:
1703 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1704 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1705 iowrite32(0x0000, ioaddr + CSR13);
1706 iowrite32(0x0000, ioaddr + CSR14);
1707 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1708 } else
1709 t21142_start_nway(dev);
1710 break;
1711 case PNIC2:
1712 /* just do a reset for sanity sake */
1713 iowrite32(0x0000, ioaddr + CSR13);
1714 iowrite32(0x0000, ioaddr + CSR14);
1715 break;
1716 case LC82C168:
1717 if ( ! tp->mii_cnt) {
1718 tp->nway = 1;
1719 tp->nwayset = 0;
1720 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1721 iowrite32(0x30, ioaddr + CSR12);
1722 iowrite32(0x0001F078, ioaddr + CSR6);
1723 iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1724 }
1725 break;
1726 case MX98713:
1727 case COMPEX9881:
1728 iowrite32(0x00000000, ioaddr + CSR6);
1729 iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1730 iowrite32(0x00000001, ioaddr + CSR13);
1731 break;
1732 case MX98715:
1733 case MX98725:
1734 iowrite32(0x01a80000, ioaddr + CSR6);
1735 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1736 iowrite32(0x00001000, ioaddr + CSR12);
1737 break;
1738 case COMET:
1739 /* No initialization necessary. */
1740 break;
1741 }
1742
1743 /* put the chip in snooze mode until opened */
1744 tulip_set_power_state (tp, 0, 1);
1745
1746 return 0;
1747
1748 err_out_free_ring:
1749 pci_free_consistent (pdev,
1750 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1751 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1752 tp->rx_ring, tp->rx_ring_dma);
1753
1754 err_out_mtable:
1755 kfree (tp->mtable);
1756 pci_iounmap(pdev, ioaddr);
1757
1758 err_out_free_res:
1759 pci_release_regions (pdev);
1760
1761 err_out_free_netdev:
1762 free_netdev (dev);
1763 return -ENODEV;
1764 }
1765
1766
1767 #ifdef CONFIG_PM
1768
1769 static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1770 {
1771 struct net_device *dev = pci_get_drvdata(pdev);
1772
1773 if (!dev)
1774 return -EINVAL;
1775
1776 if (!netif_running(dev))
1777 goto save_state;
1778
1779 tulip_down(dev);
1780
1781 netif_device_detach(dev);
1782 free_irq(dev->irq, dev);
1783
1784 save_state:
1785 pci_save_state(pdev);
1786 pci_disable_device(pdev);
1787 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1788
1789 return 0;
1790 }
1791
1792
1793 static int tulip_resume(struct pci_dev *pdev)
1794 {
1795 struct net_device *dev = pci_get_drvdata(pdev);
1796 int retval;
1797
1798 if (!dev)
1799 return -EINVAL;
1800
1801 pci_set_power_state(pdev, PCI_D0);
1802 pci_restore_state(pdev);
1803
1804 if (!netif_running(dev))
1805 return 0;
1806
1807 if ((retval = pci_enable_device(pdev))) {
1808 pr_err(PFX "pci_enable_device failed in resume\n");
1809 return retval;
1810 }
1811
1812 if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
1813 pr_err(PFX "request_irq failed in resume\n");
1814 return retval;
1815 }
1816
1817 netif_device_attach(dev);
1818
1819 if (netif_running(dev))
1820 tulip_up(dev);
1821
1822 return 0;
1823 }
1824
1825 #endif /* CONFIG_PM */
1826
1827
1828 static void __devexit tulip_remove_one (struct pci_dev *pdev)
1829 {
1830 struct net_device *dev = pci_get_drvdata (pdev);
1831 struct tulip_private *tp;
1832
1833 if (!dev)
1834 return;
1835
1836 tp = netdev_priv(dev);
1837 unregister_netdev(dev);
1838 pci_free_consistent (pdev,
1839 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1840 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1841 tp->rx_ring, tp->rx_ring_dma);
1842 kfree (tp->mtable);
1843 pci_iounmap(pdev, tp->base_addr);
1844 free_netdev (dev);
1845 pci_release_regions (pdev);
1846 pci_set_drvdata (pdev, NULL);
1847
1848 /* pci_power_off (pdev, -1); */
1849 }
1850
1851 #ifdef CONFIG_NET_POLL_CONTROLLER
1852 /*
1853 * Polling 'interrupt' - used by things like netconsole to send skbs
1854 * without having to re-enable interrupts. It's not called while
1855 * the interrupt routine is executing.
1856 */
1857
1858 static void poll_tulip (struct net_device *dev)
1859 {
1860 /* disable_irq here is not very nice, but with the lockless
1861 interrupt handler we have no other choice. */
1862 disable_irq(dev->irq);
1863 tulip_interrupt (dev->irq, dev);
1864 enable_irq(dev->irq);
1865 }
1866 #endif
1867
1868 static struct pci_driver tulip_driver = {
1869 .name = DRV_NAME,
1870 .id_table = tulip_pci_tbl,
1871 .probe = tulip_init_one,
1872 .remove = __devexit_p(tulip_remove_one),
1873 #ifdef CONFIG_PM
1874 .suspend = tulip_suspend,
1875 .resume = tulip_resume,
1876 #endif /* CONFIG_PM */
1877 };
1878
1879
1880 static int __init tulip_init (void)
1881 {
1882 #ifdef MODULE
1883 pr_info("%s", version);
1884 #endif
1885
1886 /* copy module parms into globals */
1887 tulip_rx_copybreak = rx_copybreak;
1888 tulip_max_interrupt_work = max_interrupt_work;
1889
1890 /* probe for and init boards */
1891 return pci_register_driver(&tulip_driver);
1892 }
1893
1894
1895 static void __exit tulip_cleanup (void)
1896 {
1897 pci_unregister_driver (&tulip_driver);
1898 }
1899
1900
1901 module_init(tulip_init);
1902 module_exit(tulip_cleanup);
This page took 0.077046 seconds and 5 git commands to generate.