Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 1997-2000 LAN Media Corporation (LMC) | |
3 | * All rights reserved. www.lanmedia.com | |
64bef763 | 4 | * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> |
1da177e4 LT |
5 | * |
6 | * This code is written by: | |
7 | * Andrew Stanley-Jones (asj@cban.com) | |
8 | * Rob Braun (bbraun@vix.com), | |
9 | * Michael Graff (explorer@vix.com) and | |
10 | * Matt Thomas (matt@3am-software.com). | |
11 | * | |
12 | * With Help By: | |
13 | * David Boggs | |
14 | * Ron Crane | |
15 | * Alan Cox | |
16 | * | |
17 | * This software may be used and distributed according to the terms | |
18 | * of the GNU General Public License version 2, incorporated herein by reference. | |
19 | * | |
20 | * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards. | |
21 | * | |
22 | * To control link specific options lmcctl is required. | |
23 | * It can be obtained from ftp.lanmedia.com. | |
24 | * | |
25 | * Linux driver notes: | |
26 | * Linux uses the device struct lmc_private to pass private information | |
27 | * arround. | |
28 | * | |
29 | * The initialization portion of this driver (the lmc_reset() and the | |
30 | * lmc_dec_reset() functions, as well as the led controls and the | |
31 | * lmc_initcsrs() functions. | |
32 | * | |
33 | * The watchdog function runs every second and checks to see if | |
34 | * we still have link, and that the timing source is what we expected | |
35 | * it to be. If link is lost, the interface is marked down, and | |
36 | * we no longer can transmit. | |
37 | * | |
38 | */ | |
39 | ||
1da177e4 LT |
40 | #include <linux/kernel.h> |
41 | #include <linux/module.h> | |
42 | #include <linux/string.h> | |
43 | #include <linux/timer.h> | |
44 | #include <linux/ptrace.h> | |
45 | #include <linux/errno.h> | |
46 | #include <linux/ioport.h> | |
47 | #include <linux/slab.h> | |
48 | #include <linux/interrupt.h> | |
49 | #include <linux/pci.h> | |
50 | #include <linux/delay.h> | |
64bef763 | 51 | #include <linux/hdlc.h> |
1da177e4 LT |
52 | #include <linux/init.h> |
53 | #include <linux/in.h> | |
54 | #include <linux/if_arp.h> | |
55 | #include <linux/netdevice.h> | |
56 | #include <linux/etherdevice.h> | |
57 | #include <linux/skbuff.h> | |
58 | #include <linux/inet.h> | |
59 | #include <linux/bitops.h> | |
1da177e4 LT |
60 | #include <asm/processor.h> /* Processor type for cache alignment. */ |
61 | #include <asm/io.h> | |
62 | #include <asm/dma.h> | |
63 | #include <asm/uaccess.h> | |
64 | //#include <asm/spinlock.h> | |
65 | ||
66 | #define DRIVER_MAJOR_VERSION 1 | |
67 | #define DRIVER_MINOR_VERSION 34 | |
68 | #define DRIVER_SUB_VERSION 0 | |
69 | ||
70 | #define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION) | |
71 | ||
72 | #include "lmc.h" | |
73 | #include "lmc_var.h" | |
74 | #include "lmc_ioctl.h" | |
75 | #include "lmc_debug.h" | |
76 | #include "lmc_proto.h" | |
77 | ||
1da177e4 LT |
78 | static int LMC_PKT_BUF_SZ = 1542; |
79 | ||
a3aa1884 | 80 | static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = { |
1da177e4 LT |
81 | { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, |
82 | PCI_VENDOR_ID_LMC, PCI_ANY_ID }, | |
83 | { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, | |
84 | PCI_ANY_ID, PCI_VENDOR_ID_LMC }, | |
85 | { 0 } | |
86 | }; | |
87 | ||
88 | MODULE_DEVICE_TABLE(pci, lmc_pci_tbl); | |
64bef763 | 89 | MODULE_LICENSE("GPL v2"); |
1da177e4 LT |
90 | |
91 | ||
d71a6749 SH |
92 | static netdev_tx_t lmc_start_xmit(struct sk_buff *skb, |
93 | struct net_device *dev); | |
1da177e4 LT |
94 | static int lmc_rx (struct net_device *dev); |
95 | static int lmc_open(struct net_device *dev); | |
96 | static int lmc_close(struct net_device *dev); | |
97 | static struct net_device_stats *lmc_get_stats(struct net_device *dev); | |
7d12e780 | 98 | static irqreturn_t lmc_interrupt(int irq, void *dev_instance); |
1da177e4 LT |
99 | static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size); |
100 | static void lmc_softreset(lmc_softc_t * const); | |
101 | static void lmc_running_reset(struct net_device *dev); | |
102 | static int lmc_ifdown(struct net_device * const); | |
103 | static void lmc_watchdog(unsigned long data); | |
104 | static void lmc_reset(lmc_softc_t * const sc); | |
105 | static void lmc_dec_reset(lmc_softc_t * const sc); | |
106 | static void lmc_driver_timeout(struct net_device *dev); | |
107 | ||
108 | /* | |
109 | * linux reserves 16 device specific IOCTLs. We call them | |
110 | * LMCIOC* to control various bits of our world. | |
111 | */ | |
64bef763 | 112 | int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ |
1da177e4 | 113 | { |
64bef763 | 114 | lmc_softc_t *sc = dev_to_sc(dev); |
1da177e4 | 115 | lmc_ctl_t ctl; |
64bef763 KH |
116 | int ret = -EOPNOTSUPP; |
117 | u16 regVal; | |
1da177e4 LT |
118 | unsigned long flags; |
119 | ||
1da177e4 LT |
120 | lmc_trace(dev, "lmc_ioctl in"); |
121 | ||
122 | /* | |
123 | * Most functions mess with the structure | |
124 | * Disable interrupts while we do the polling | |
125 | */ | |
1da177e4 LT |
126 | |
127 | switch (cmd) { | |
128 | /* | |
129 | * Return current driver state. Since we keep this up | |
130 | * To date internally, just copy this out to the user. | |
131 | */ | |
132 | case LMCIOCGINFO: /*fold01*/ | |
e3376dca RK |
133 | if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t))) |
134 | ret = -EFAULT; | |
135 | else | |
136 | ret = 0; | |
1da177e4 LT |
137 | break; |
138 | ||
139 | case LMCIOCSINFO: /*fold01*/ | |
1da177e4 LT |
140 | if (!capable(CAP_NET_ADMIN)) { |
141 | ret = -EPERM; | |
142 | break; | |
143 | } | |
144 | ||
145 | if(dev->flags & IFF_UP){ | |
146 | ret = -EBUSY; | |
147 | break; | |
148 | } | |
149 | ||
e3376dca RK |
150 | if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) { |
151 | ret = -EFAULT; | |
152 | break; | |
153 | } | |
1da177e4 | 154 | |
8ab7b667 | 155 | spin_lock_irqsave(&sc->lmc_lock, flags); |
1da177e4 LT |
156 | sc->lmc_media->set_status (sc, &ctl); |
157 | ||
158 | if(ctl.crc_length != sc->ictl.crc_length) { | |
159 | sc->lmc_media->set_crc_length(sc, ctl.crc_length); | |
160 | if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) | |
161 | sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE; | |
162 | else | |
163 | sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE; | |
164 | } | |
8ab7b667 | 165 | spin_unlock_irqrestore(&sc->lmc_lock, flags); |
1da177e4 | 166 | |
1da177e4 LT |
167 | ret = 0; |
168 | break; | |
169 | ||
170 | case LMCIOCIFTYPE: /*fold01*/ | |
171 | { | |
64bef763 KH |
172 | u16 old_type = sc->if_type; |
173 | u16 new_type; | |
1da177e4 LT |
174 | |
175 | if (!capable(CAP_NET_ADMIN)) { | |
176 | ret = -EPERM; | |
177 | break; | |
178 | } | |
179 | ||
64bef763 | 180 | if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) { |
e3376dca RK |
181 | ret = -EFAULT; |
182 | break; | |
183 | } | |
1da177e4 LT |
184 | |
185 | ||
186 | if (new_type == old_type) | |
187 | { | |
188 | ret = 0 ; | |
189 | break; /* no change */ | |
190 | } | |
191 | ||
8ab7b667 | 192 | spin_lock_irqsave(&sc->lmc_lock, flags); |
1da177e4 | 193 | lmc_proto_close(sc); |
1da177e4 LT |
194 | |
195 | sc->if_type = new_type; | |
1da177e4 | 196 | lmc_proto_attach(sc); |
64bef763 | 197 | ret = lmc_proto_open(sc); |
8ab7b667 | 198 | spin_unlock_irqrestore(&sc->lmc_lock, flags); |
64bef763 | 199 | break; |
1da177e4 LT |
200 | } |
201 | ||
202 | case LMCIOCGETXINFO: /*fold01*/ | |
8ab7b667 | 203 | spin_lock_irqsave(&sc->lmc_lock, flags); |
1da177e4 LT |
204 | sc->lmc_xinfo.Magic0 = 0xBEEFCAFE; |
205 | ||
206 | sc->lmc_xinfo.PciCardType = sc->lmc_cardtype; | |
207 | sc->lmc_xinfo.PciSlotNumber = 0; | |
208 | sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION; | |
209 | sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION; | |
210 | sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION; | |
211 | sc->lmc_xinfo.XilinxRevisionNumber = | |
212 | lmc_mii_readreg (sc, 0, 3) & 0xf; | |
213 | sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ; | |
214 | sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc); | |
215 | sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16); | |
8ab7b667 | 216 | spin_unlock_irqrestore(&sc->lmc_lock, flags); |
1da177e4 LT |
217 | |
218 | sc->lmc_xinfo.Magic1 = 0xDEADBEEF; | |
219 | ||
220 | if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo, | |
5c41542b | 221 | sizeof(struct lmc_xinfo))) |
e3376dca RK |
222 | ret = -EFAULT; |
223 | else | |
224 | ret = 0; | |
1da177e4 LT |
225 | |
226 | break; | |
227 | ||
64bef763 | 228 | case LMCIOCGETLMCSTATS: |
8ab7b667 | 229 | spin_lock_irqsave(&sc->lmc_lock, flags); |
64bef763 KH |
230 | if (sc->lmc_cardtype == LMC_CARDTYPE_T1) { |
231 | lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB); | |
232 | sc->extra_stats.framingBitErrorCount += | |
233 | lmc_mii_readreg(sc, 0, 18) & 0xff; | |
234 | lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB); | |
235 | sc->extra_stats.framingBitErrorCount += | |
236 | (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; | |
237 | lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB); | |
238 | sc->extra_stats.lineCodeViolationCount += | |
239 | lmc_mii_readreg(sc, 0, 18) & 0xff; | |
240 | lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB); | |
241 | sc->extra_stats.lineCodeViolationCount += | |
242 | (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; | |
243 | lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR); | |
244 | regVal = lmc_mii_readreg(sc, 0, 18) & 0xff; | |
245 | ||
246 | sc->extra_stats.lossOfFrameCount += | |
247 | (regVal & T1FRAMER_LOF_MASK) >> 4; | |
248 | sc->extra_stats.changeOfFrameAlignmentCount += | |
249 | (regVal & T1FRAMER_COFA_MASK) >> 2; | |
250 | sc->extra_stats.severelyErroredFrameCount += | |
251 | regVal & T1FRAMER_SEF_MASK; | |
252 | } | |
8ab7b667 | 253 | spin_unlock_irqrestore(&sc->lmc_lock, flags); |
64bef763 KH |
254 | if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats, |
255 | sizeof(sc->lmc_device->stats)) || | |
256 | copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats), | |
257 | &sc->extra_stats, sizeof(sc->extra_stats))) | |
258 | ret = -EFAULT; | |
259 | else | |
260 | ret = 0; | |
261 | break; | |
1da177e4 | 262 | |
64bef763 KH |
263 | case LMCIOCCLEARLMCSTATS: |
264 | if (!capable(CAP_NET_ADMIN)) { | |
265 | ret = -EPERM; | |
266 | break; | |
267 | } | |
1da177e4 | 268 | |
8ab7b667 | 269 | spin_lock_irqsave(&sc->lmc_lock, flags); |
64bef763 KH |
270 | memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats)); |
271 | memset(&sc->extra_stats, 0, sizeof(sc->extra_stats)); | |
272 | sc->extra_stats.check = STATCHECK; | |
273 | sc->extra_stats.version_size = (DRIVER_VERSION << 16) + | |
274 | sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); | |
275 | sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; | |
8ab7b667 | 276 | spin_unlock_irqrestore(&sc->lmc_lock, flags); |
64bef763 KH |
277 | ret = 0; |
278 | break; | |
1da177e4 LT |
279 | |
280 | case LMCIOCSETCIRCUIT: /*fold01*/ | |
281 | if (!capable(CAP_NET_ADMIN)){ | |
282 | ret = -EPERM; | |
283 | break; | |
284 | } | |
285 | ||
286 | if(dev->flags & IFF_UP){ | |
287 | ret = -EBUSY; | |
288 | break; | |
289 | } | |
290 | ||
e3376dca RK |
291 | if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) { |
292 | ret = -EFAULT; | |
293 | break; | |
294 | } | |
8ab7b667 | 295 | spin_lock_irqsave(&sc->lmc_lock, flags); |
1da177e4 LT |
296 | sc->lmc_media->set_circuit_type(sc, ctl.circuit_type); |
297 | sc->ictl.circuit_type = ctl.circuit_type; | |
8ab7b667 | 298 | spin_unlock_irqrestore(&sc->lmc_lock, flags); |
1da177e4 LT |
299 | ret = 0; |
300 | ||
301 | break; | |
302 | ||
303 | case LMCIOCRESET: /*fold01*/ | |
304 | if (!capable(CAP_NET_ADMIN)){ | |
305 | ret = -EPERM; | |
306 | break; | |
307 | } | |
308 | ||
8ab7b667 | 309 | spin_lock_irqsave(&sc->lmc_lock, flags); |
1da177e4 LT |
310 | /* Reset driver and bring back to current state */ |
311 | printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16)); | |
312 | lmc_running_reset (dev); | |
313 | printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16)); | |
314 | ||
315 | LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16)); | |
8ab7b667 | 316 | spin_unlock_irqrestore(&sc->lmc_lock, flags); |
1da177e4 LT |
317 | |
318 | ret = 0; | |
319 | break; | |
320 | ||
321 | #ifdef DEBUG | |
322 | case LMCIOCDUMPEVENTLOG: | |
e3376dca RK |
323 | if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) { |
324 | ret = -EFAULT; | |
325 | break; | |
326 | } | |
867240f7 KH |
327 | if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf, |
328 | sizeof(lmcEventLogBuf))) | |
e3376dca RK |
329 | ret = -EFAULT; |
330 | else | |
331 | ret = 0; | |
1da177e4 | 332 | |
1da177e4 LT |
333 | break; |
334 | #endif /* end ifdef _DBG_EVENTLOG */ | |
335 | case LMCIOCT1CONTROL: /*fold01*/ | |
336 | if (sc->lmc_cardtype != LMC_CARDTYPE_T1){ | |
337 | ret = -EOPNOTSUPP; | |
338 | break; | |
339 | } | |
340 | break; | |
341 | case LMCIOCXILINX: /*fold01*/ | |
342 | { | |
343 | struct lmc_xilinx_control xc; /*fold02*/ | |
344 | ||
345 | if (!capable(CAP_NET_ADMIN)){ | |
346 | ret = -EPERM; | |
347 | break; | |
348 | } | |
349 | ||
350 | /* | |
351 | * Stop the xwitter whlie we restart the hardware | |
352 | */ | |
353 | netif_stop_queue(dev); | |
354 | ||
8ab7b667 | 355 | if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) { |
e3376dca RK |
356 | ret = -EFAULT; |
357 | break; | |
8ab7b667 | 358 | } |
1da177e4 LT |
359 | switch(xc.command){ |
360 | case lmc_xilinx_reset: /*fold02*/ | |
361 | { | |
362 | u16 mii; | |
8ab7b667 | 363 | spin_lock_irqsave(&sc->lmc_lock, flags); |
1da177e4 LT |
364 | mii = lmc_mii_readreg (sc, 0, 16); |
365 | ||
366 | /* | |
367 | * Make all of them 0 and make input | |
368 | */ | |
369 | lmc_gpio_mkinput(sc, 0xff); | |
370 | ||
371 | /* | |
372 | * make the reset output | |
373 | */ | |
374 | lmc_gpio_mkoutput(sc, LMC_GEP_RESET); | |
375 | ||
376 | /* | |
377 | * RESET low to force configuration. This also forces | |
378 | * the transmitter clock to be internal, but we expect to reset | |
379 | * that later anyway. | |
380 | */ | |
381 | ||
382 | sc->lmc_gpio &= ~LMC_GEP_RESET; | |
383 | LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); | |
384 | ||
385 | ||
386 | /* | |
387 | * hold for more than 10 microseconds | |
388 | */ | |
389 | udelay(50); | |
390 | ||
391 | sc->lmc_gpio |= LMC_GEP_RESET; | |
392 | LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); | |
393 | ||
394 | ||
395 | /* | |
396 | * stop driving Xilinx-related signals | |
397 | */ | |
398 | lmc_gpio_mkinput(sc, 0xff); | |
399 | ||
400 | /* Reset the frammer hardware */ | |
401 | sc->lmc_media->set_link_status (sc, 1); | |
402 | sc->lmc_media->set_status (sc, NULL); | |
403 | // lmc_softreset(sc); | |
404 | ||
405 | { | |
406 | int i; | |
407 | for(i = 0; i < 5; i++){ | |
408 | lmc_led_on(sc, LMC_DS3_LED0); | |
409 | mdelay(100); | |
410 | lmc_led_off(sc, LMC_DS3_LED0); | |
411 | lmc_led_on(sc, LMC_DS3_LED1); | |
412 | mdelay(100); | |
413 | lmc_led_off(sc, LMC_DS3_LED1); | |
414 | lmc_led_on(sc, LMC_DS3_LED3); | |
415 | mdelay(100); | |
416 | lmc_led_off(sc, LMC_DS3_LED3); | |
417 | lmc_led_on(sc, LMC_DS3_LED2); | |
418 | mdelay(100); | |
419 | lmc_led_off(sc, LMC_DS3_LED2); | |
420 | } | |
421 | } | |
8ab7b667 | 422 | spin_unlock_irqrestore(&sc->lmc_lock, flags); |
1da177e4 LT |
423 | |
424 | ||
425 | ||
426 | ret = 0x0; | |
427 | ||
428 | } | |
429 | ||
430 | break; | |
431 | case lmc_xilinx_load_prom: /*fold02*/ | |
432 | { | |
433 | u16 mii; | |
434 | int timeout = 500000; | |
8ab7b667 | 435 | spin_lock_irqsave(&sc->lmc_lock, flags); |
1da177e4 LT |
436 | mii = lmc_mii_readreg (sc, 0, 16); |
437 | ||
438 | /* | |
439 | * Make all of them 0 and make input | |
440 | */ | |
441 | lmc_gpio_mkinput(sc, 0xff); | |
442 | ||
443 | /* | |
444 | * make the reset output | |
445 | */ | |
446 | lmc_gpio_mkoutput(sc, LMC_GEP_DP | LMC_GEP_RESET); | |
447 | ||
448 | /* | |
449 | * RESET low to force configuration. This also forces | |
450 | * the transmitter clock to be internal, but we expect to reset | |
451 | * that later anyway. | |
452 | */ | |
453 | ||
454 | sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP); | |
455 | LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); | |
456 | ||
457 | ||
458 | /* | |
459 | * hold for more than 10 microseconds | |
460 | */ | |
461 | udelay(50); | |
462 | ||
463 | sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET; | |
464 | LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); | |
465 | ||
466 | /* | |
467 | * busy wait for the chip to reset | |
468 | */ | |
469 | while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 && | |
470 | (timeout-- > 0)) | |
8ab7b667 | 471 | cpu_relax(); |
1da177e4 LT |
472 | |
473 | ||
474 | /* | |
475 | * stop driving Xilinx-related signals | |
476 | */ | |
477 | lmc_gpio_mkinput(sc, 0xff); | |
8ab7b667 | 478 | spin_unlock_irqrestore(&sc->lmc_lock, flags); |
1da177e4 LT |
479 | |
480 | ret = 0x0; | |
481 | ||
482 | ||
483 | break; | |
484 | ||
485 | } | |
486 | ||
487 | case lmc_xilinx_load: /*fold02*/ | |
488 | { | |
489 | char *data; | |
490 | int pos; | |
491 | int timeout = 500000; | |
492 | ||
79ea13ce | 493 | if (!xc.data) { |
1da177e4 LT |
494 | ret = -EINVAL; |
495 | break; | |
496 | } | |
497 | ||
498 | data = kmalloc(xc.len, GFP_KERNEL); | |
79ea13ce | 499 | if (!data) { |
1da177e4 LT |
500 | printk(KERN_WARNING "%s: Failed to allocate memory for copy\n", dev->name); |
501 | ret = -ENOMEM; | |
502 | break; | |
503 | } | |
504 | ||
505 | if(copy_from_user(data, xc.data, xc.len)) | |
506 | { | |
507 | kfree(data); | |
508 | ret = -ENOMEM; | |
509 | break; | |
510 | } | |
511 | ||
512 | printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data); | |
513 | ||
8ab7b667 | 514 | spin_lock_irqsave(&sc->lmc_lock, flags); |
1da177e4 LT |
515 | lmc_gpio_mkinput(sc, 0xff); |
516 | ||
517 | /* | |
518 | * Clear the Xilinx and start prgramming from the DEC | |
519 | */ | |
520 | ||
521 | /* | |
522 | * Set ouput as: | |
523 | * Reset: 0 (active) | |
524 | * DP: 0 (active) | |
525 | * Mode: 1 | |
526 | * | |
527 | */ | |
528 | sc->lmc_gpio = 0x00; | |
529 | sc->lmc_gpio &= ~LMC_GEP_DP; | |
530 | sc->lmc_gpio &= ~LMC_GEP_RESET; | |
531 | sc->lmc_gpio |= LMC_GEP_MODE; | |
532 | LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); | |
533 | ||
534 | lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET); | |
535 | ||
536 | /* | |
537 | * Wait at least 10 us 20 to be safe | |
538 | */ | |
539 | udelay(50); | |
540 | ||
541 | /* | |
542 | * Clear reset and activate programming lines | |
543 | * Reset: Input | |
544 | * DP: Input | |
545 | * Clock: Output | |
546 | * Data: Output | |
547 | * Mode: Output | |
548 | */ | |
549 | lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET); | |
550 | ||
551 | /* | |
552 | * Set LOAD, DATA, Clock to 1 | |
553 | */ | |
554 | sc->lmc_gpio = 0x00; | |
555 | sc->lmc_gpio |= LMC_GEP_MODE; | |
556 | sc->lmc_gpio |= LMC_GEP_DATA; | |
557 | sc->lmc_gpio |= LMC_GEP_CLK; | |
558 | LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); | |
559 | ||
560 | lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE ); | |
561 | ||
562 | /* | |
563 | * busy wait for the chip to reset | |
564 | */ | |
565 | while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 && | |
566 | (timeout-- > 0)) | |
8ab7b667 | 567 | cpu_relax(); |
1da177e4 LT |
568 | |
569 | printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout); | |
570 | ||
571 | for(pos = 0; pos < xc.len; pos++){ | |
572 | switch(data[pos]){ | |
573 | case 0: | |
574 | sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */ | |
575 | break; | |
576 | case 1: | |
577 | sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */ | |
578 | break; | |
579 | default: | |
580 | printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]); | |
581 | sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */ | |
582 | } | |
583 | sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */ | |
584 | sc->lmc_gpio |= LMC_GEP_MODE; | |
585 | LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); | |
586 | udelay(1); | |
587 | ||
588 | sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */ | |
589 | sc->lmc_gpio |= LMC_GEP_MODE; | |
590 | LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); | |
591 | udelay(1); | |
592 | } | |
593 | if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){ | |
594 | printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name); | |
595 | } | |
596 | else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){ | |
597 | printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name); | |
598 | } | |
599 | else { | |
600 | printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos); | |
601 | } | |
602 | ||
603 | lmc_gpio_mkinput(sc, 0xff); | |
604 | ||
605 | sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET; | |
606 | lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); | |
607 | ||
608 | sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET; | |
609 | lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); | |
8ab7b667 | 610 | spin_unlock_irqrestore(&sc->lmc_lock, flags); |
1da177e4 LT |
611 | |
612 | kfree(data); | |
613 | ||
614 | ret = 0; | |
615 | ||
616 | break; | |
617 | } | |
618 | default: /*fold02*/ | |
619 | ret = -EBADE; | |
620 | break; | |
621 | } | |
622 | ||
623 | netif_wake_queue(dev); | |
624 | sc->lmc_txfull = 0; | |
625 | ||
626 | } | |
627 | break; | |
628 | default: /*fold01*/ | |
629 | /* If we don't know what to do, give the protocol a shot. */ | |
630 | ret = lmc_proto_ioctl (sc, ifr, cmd); | |
631 | break; | |
632 | } | |
633 | ||
1da177e4 LT |
634 | lmc_trace(dev, "lmc_ioctl out"); |
635 | ||
636 | return ret; | |
637 | } | |
638 | ||
639 | ||
640 | /* the watchdog process that cruises around */ | |
641 | static void lmc_watchdog (unsigned long data) /*fold00*/ | |
642 | { | |
64bef763 KH |
643 | struct net_device *dev = (struct net_device *)data; |
644 | lmc_softc_t *sc = dev_to_sc(dev); | |
1da177e4 | 645 | int link_status; |
867240f7 | 646 | u32 ticks; |
1da177e4 LT |
647 | unsigned long flags; |
648 | ||
1da177e4 LT |
649 | lmc_trace(dev, "lmc_watchdog in"); |
650 | ||
651 | spin_lock_irqsave(&sc->lmc_lock, flags); | |
652 | ||
653 | if(sc->check != 0xBEAFCAFE){ | |
32357988 | 654 | printk("LMC: Corrupt net_device struct, breaking out\n"); |
1da177e4 LT |
655 | spin_unlock_irqrestore(&sc->lmc_lock, flags); |
656 | return; | |
657 | } | |
658 | ||
659 | ||
660 | /* Make sure the tx jabber and rx watchdog are off, | |
661 | * and the transmit and receive processes are running. | |
662 | */ | |
663 | ||
664 | LMC_CSR_WRITE (sc, csr_15, 0x00000011); | |
665 | sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN; | |
666 | LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); | |
667 | ||
668 | if (sc->lmc_ok == 0) | |
669 | goto kick_timer; | |
670 | ||
671 | LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16)); | |
672 | ||
673 | /* --- begin time out check ----------------------------------- | |
674 | * check for a transmit interrupt timeout | |
675 | * Has the packet xmt vs xmt serviced threshold been exceeded */ | |
676 | if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && | |
64bef763 KH |
677 | sc->lmc_device->stats.tx_packets > sc->lasttx_packets && |
678 | sc->tx_TimeoutInd == 0) | |
1da177e4 LT |
679 | { |
680 | ||
681 | /* wait for the watchdog to come around again */ | |
682 | sc->tx_TimeoutInd = 1; | |
683 | } | |
684 | else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && | |
64bef763 KH |
685 | sc->lmc_device->stats.tx_packets > sc->lasttx_packets && |
686 | sc->tx_TimeoutInd) | |
1da177e4 LT |
687 | { |
688 | ||
689 | LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0); | |
690 | ||
691 | sc->tx_TimeoutDisplay = 1; | |
64bef763 | 692 | sc->extra_stats.tx_TimeoutCnt++; |
1da177e4 LT |
693 | |
694 | /* DEC chip is stuck, hit it with a RESET!!!! */ | |
695 | lmc_running_reset (dev); | |
696 | ||
697 | ||
698 | /* look at receive & transmit process state to make sure they are running */ | |
699 | LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); | |
700 | ||
701 | /* look at: DSR - 02 for Reg 16 | |
702 | * CTS - 08 | |
703 | * DCD - 10 | |
704 | * RI - 20 | |
705 | * for Reg 17 | |
706 | */ | |
707 | LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17)); | |
708 | ||
709 | /* reset the transmit timeout detection flag */ | |
710 | sc->tx_TimeoutInd = 0; | |
711 | sc->lastlmc_taint_tx = sc->lmc_taint_tx; | |
64bef763 KH |
712 | sc->lasttx_packets = sc->lmc_device->stats.tx_packets; |
713 | } else { | |
1da177e4 LT |
714 | sc->tx_TimeoutInd = 0; |
715 | sc->lastlmc_taint_tx = sc->lmc_taint_tx; | |
64bef763 | 716 | sc->lasttx_packets = sc->lmc_device->stats.tx_packets; |
1da177e4 LT |
717 | } |
718 | ||
719 | /* --- end time out check ----------------------------------- */ | |
720 | ||
721 | ||
722 | link_status = sc->lmc_media->get_link_status (sc); | |
723 | ||
724 | /* | |
725 | * hardware level link lost, but the interface is marked as up. | |
726 | * Mark it as down. | |
727 | */ | |
728 | if ((link_status == 0) && (sc->last_link_status != 0)) { | |
729 | printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name); | |
730 | sc->last_link_status = 0; | |
731 | /* lmc_reset (sc); Why reset??? The link can go down ok */ | |
732 | ||
733 | /* Inform the world that link has been lost */ | |
7d17c1d6 | 734 | netif_carrier_off(dev); |
1da177e4 LT |
735 | } |
736 | ||
737 | /* | |
738 | * hardware link is up, but the interface is marked as down. | |
739 | * Bring it back up again. | |
740 | */ | |
741 | if (link_status != 0 && sc->last_link_status == 0) { | |
742 | printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name); | |
743 | sc->last_link_status = 1; | |
744 | /* lmc_reset (sc); Again why reset??? */ | |
745 | ||
7d17c1d6 | 746 | netif_carrier_on(dev); |
1da177e4 LT |
747 | } |
748 | ||
749 | /* Call media specific watchdog functions */ | |
750 | sc->lmc_media->watchdog(sc); | |
751 | ||
752 | /* | |
753 | * Poke the transmitter to make sure it | |
754 | * never stops, even if we run out of mem | |
755 | */ | |
756 | LMC_CSR_WRITE(sc, csr_rxpoll, 0); | |
757 | ||
758 | /* | |
759 | * Check for code that failed | |
760 | * and try and fix it as appropriate | |
761 | */ | |
762 | if(sc->failed_ring == 1){ | |
763 | /* | |
764 | * Failed to setup the recv/xmit rin | |
765 | * Try again | |
766 | */ | |
767 | sc->failed_ring = 0; | |
768 | lmc_softreset(sc); | |
769 | } | |
770 | if(sc->failed_recv_alloc == 1){ | |
771 | /* | |
772 | * We failed to alloc mem in the | |
773 | * interrupt handler, go through the rings | |
774 | * and rebuild them | |
775 | */ | |
776 | sc->failed_recv_alloc = 0; | |
777 | lmc_softreset(sc); | |
778 | } | |
779 | ||
780 | ||
781 | /* | |
782 | * remember the timer value | |
783 | */ | |
784 | kick_timer: | |
785 | ||
786 | ticks = LMC_CSR_READ (sc, csr_gp_timer); | |
787 | LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL); | |
788 | sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff); | |
789 | ||
790 | /* | |
791 | * restart this timer. | |
792 | */ | |
793 | sc->timer.expires = jiffies + (HZ); | |
794 | add_timer (&sc->timer); | |
795 | ||
796 | spin_unlock_irqrestore(&sc->lmc_lock, flags); | |
797 | ||
798 | lmc_trace(dev, "lmc_watchdog out"); | |
799 | ||
800 | } | |
801 | ||
64bef763 KH |
802 | static int lmc_attach(struct net_device *dev, unsigned short encoding, |
803 | unsigned short parity) | |
1da177e4 | 804 | { |
64bef763 KH |
805 | if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) |
806 | return 0; | |
807 | return -EINVAL; | |
1da177e4 LT |
808 | } |
809 | ||
991990a1 KH |
810 | static const struct net_device_ops lmc_ops = { |
811 | .ndo_open = lmc_open, | |
812 | .ndo_stop = lmc_close, | |
813 | .ndo_change_mtu = hdlc_change_mtu, | |
814 | .ndo_start_xmit = hdlc_start_xmit, | |
815 | .ndo_do_ioctl = lmc_ioctl, | |
816 | .ndo_tx_timeout = lmc_driver_timeout, | |
817 | .ndo_get_stats = lmc_get_stats, | |
818 | }; | |
819 | ||
1da177e4 LT |
820 | static int __devinit lmc_init_one(struct pci_dev *pdev, |
821 | const struct pci_device_id *ent) | |
822 | { | |
64bef763 KH |
823 | lmc_softc_t *sc; |
824 | struct net_device *dev; | |
825 | u16 subdevice; | |
826 | u16 AdapModelNum; | |
827 | int err; | |
828 | static int cards_found; | |
829 | ||
830 | /* lmc_trace(dev, "lmc_init_one in"); */ | |
831 | ||
832 | err = pci_enable_device(pdev); | |
833 | if (err) { | |
834 | printk(KERN_ERR "lmc: pci enable failed: %d\n", err); | |
835 | return err; | |
836 | } | |
1da177e4 | 837 | |
64bef763 KH |
838 | err = pci_request_regions(pdev, "lmc"); |
839 | if (err) { | |
840 | printk(KERN_ERR "lmc: pci_request_region failed\n"); | |
841 | goto err_req_io; | |
842 | } | |
1da177e4 | 843 | |
64bef763 KH |
844 | /* |
845 | * Allocate our own device structure | |
846 | */ | |
847 | sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL); | |
848 | if (!sc) { | |
849 | err = -ENOMEM; | |
850 | goto err_kzalloc; | |
851 | } | |
1da177e4 | 852 | |
64bef763 KH |
853 | dev = alloc_hdlcdev(sc); |
854 | if (!dev) { | |
855 | printk(KERN_ERR "lmc:alloc_netdev for device failed\n"); | |
856 | goto err_hdlcdev; | |
857 | } | |
1da177e4 | 858 | |
1da177e4 | 859 | |
64bef763 KH |
860 | dev->type = ARPHRD_HDLC; |
861 | dev_to_hdlc(dev)->xmit = lmc_start_xmit; | |
862 | dev_to_hdlc(dev)->attach = lmc_attach; | |
991990a1 | 863 | dev->netdev_ops = &lmc_ops; |
64bef763 KH |
864 | dev->watchdog_timeo = HZ; /* 1 second */ |
865 | dev->tx_queue_len = 100; | |
866 | sc->lmc_device = dev; | |
867 | sc->name = dev->name; | |
868 | sc->if_type = LMC_PPP; | |
869 | sc->check = 0xBEAFCAFE; | |
870 | dev->base_addr = pci_resource_start(pdev, 0); | |
871 | dev->irq = pdev->irq; | |
872 | pci_set_drvdata(pdev, dev); | |
873 | SET_NETDEV_DEV(dev, &pdev->dev); | |
874 | ||
875 | /* | |
876 | * This will get the protocol layer ready and do any 1 time init's | |
877 | * Must have a valid sc and dev structure | |
878 | */ | |
879 | lmc_proto_attach(sc); | |
880 | ||
881 | /* Init the spin lock so can call it latter */ | |
882 | ||
883 | spin_lock_init(&sc->lmc_lock); | |
884 | pci_set_master(pdev); | |
885 | ||
886 | printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name, | |
887 | dev->base_addr, dev->irq); | |
888 | ||
889 | err = register_hdlc_device(dev); | |
890 | if (err) { | |
891 | printk(KERN_ERR "%s: register_netdev failed.\n", dev->name); | |
892 | free_netdev(dev); | |
893 | goto err_hdlcdev; | |
894 | } | |
1da177e4 LT |
895 | |
896 | sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN; | |
897 | sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; | |
898 | ||
899 | /* | |
900 | * | |
901 | * Check either the subvendor or the subdevice, some systems reverse | |
902 | * the setting in the bois, seems to be version and arch dependent? | |
903 | * Fix the error, exchange the two values | |
904 | */ | |
905 | if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC) | |
906 | subdevice = pdev->subsystem_vendor; | |
907 | ||
908 | switch (subdevice) { | |
909 | case PCI_DEVICE_ID_LMC_HSSI: | |
64bef763 | 910 | printk(KERN_INFO "%s: LMC HSSI\n", dev->name); |
1da177e4 LT |
911 | sc->lmc_cardtype = LMC_CARDTYPE_HSSI; |
912 | sc->lmc_media = &lmc_hssi_media; | |
913 | break; | |
914 | case PCI_DEVICE_ID_LMC_DS3: | |
64bef763 | 915 | printk(KERN_INFO "%s: LMC DS3\n", dev->name); |
1da177e4 LT |
916 | sc->lmc_cardtype = LMC_CARDTYPE_DS3; |
917 | sc->lmc_media = &lmc_ds3_media; | |
918 | break; | |
919 | case PCI_DEVICE_ID_LMC_SSI: | |
64bef763 | 920 | printk(KERN_INFO "%s: LMC SSI\n", dev->name); |
1da177e4 LT |
921 | sc->lmc_cardtype = LMC_CARDTYPE_SSI; |
922 | sc->lmc_media = &lmc_ssi_media; | |
923 | break; | |
924 | case PCI_DEVICE_ID_LMC_T1: | |
64bef763 | 925 | printk(KERN_INFO "%s: LMC T1\n", dev->name); |
1da177e4 LT |
926 | sc->lmc_cardtype = LMC_CARDTYPE_T1; |
927 | sc->lmc_media = &lmc_t1_media; | |
928 | break; | |
929 | default: | |
af901ca1 | 930 | printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name); |
1da177e4 LT |
931 | break; |
932 | } | |
933 | ||
934 | lmc_initcsrs (sc, dev->base_addr, 8); | |
935 | ||
936 | lmc_gpio_mkinput (sc, 0xff); | |
937 | sc->lmc_gpio = 0; /* drive no signals yet */ | |
938 | ||
939 | sc->lmc_media->defaults (sc); | |
940 | ||
941 | sc->lmc_media->set_link_status (sc, LMC_LINK_UP); | |
942 | ||
943 | /* verify that the PCI Sub System ID matches the Adapter Model number | |
944 | * from the MII register | |
945 | */ | |
946 | AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4; | |
947 | ||
64bef763 KH |
948 | if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */ |
949 | subdevice != PCI_DEVICE_ID_LMC_T1) && | |
950 | (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */ | |
951 | subdevice != PCI_DEVICE_ID_LMC_SSI) && | |
952 | (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */ | |
953 | subdevice != PCI_DEVICE_ID_LMC_DS3) && | |
954 | (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */ | |
955 | subdevice != PCI_DEVICE_ID_LMC_HSSI)) | |
956 | printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI" | |
957 | " Subsystem ID = 0x%04x\n", | |
958 | dev->name, AdapModelNum, subdevice); | |
1da177e4 | 959 | |
1da177e4 LT |
960 | /* |
961 | * reset clock | |
962 | */ | |
963 | LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL); | |
964 | ||
965 | sc->board_idx = cards_found++; | |
64bef763 KH |
966 | sc->extra_stats.check = STATCHECK; |
967 | sc->extra_stats.version_size = (DRIVER_VERSION << 16) + | |
968 | sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); | |
969 | sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; | |
1da177e4 LT |
970 | |
971 | sc->lmc_ok = 0; | |
972 | sc->last_link_status = 0; | |
973 | ||
974 | lmc_trace(dev, "lmc_init_one out"); | |
975 | return 0; | |
976 | ||
64bef763 KH |
977 | err_hdlcdev: |
978 | pci_set_drvdata(pdev, NULL); | |
979 | kfree(sc); | |
980 | err_kzalloc: | |
981 | pci_release_regions(pdev); | |
982 | err_req_io: | |
983 | pci_disable_device(pdev); | |
984 | return err; | |
1da177e4 LT |
985 | } |
986 | ||
987 | /* | |
988 | * Called from pci when removing module. | |
989 | */ | |
64bef763 | 990 | static void __devexit lmc_remove_one(struct pci_dev *pdev) |
1da177e4 | 991 | { |
64bef763 KH |
992 | struct net_device *dev = pci_get_drvdata(pdev); |
993 | ||
994 | if (dev) { | |
995 | printk(KERN_DEBUG "%s: removing...\n", dev->name); | |
996 | unregister_hdlc_device(dev); | |
997 | free_netdev(dev); | |
998 | pci_release_regions(pdev); | |
999 | pci_disable_device(pdev); | |
1000 | pci_set_drvdata(pdev, NULL); | |
1001 | } | |
1da177e4 LT |
1002 | } |
1003 | ||
1004 | /* After this is called, packets can be sent. | |
1005 | * Does not initialize the addresses | |
1006 | */ | |
64bef763 | 1007 | static int lmc_open(struct net_device *dev) |
1da177e4 | 1008 | { |
64bef763 KH |
1009 | lmc_softc_t *sc = dev_to_sc(dev); |
1010 | int err; | |
1da177e4 LT |
1011 | |
1012 | lmc_trace(dev, "lmc_open in"); | |
1013 | ||
1014 | lmc_led_on(sc, LMC_DS3_LED0); | |
1015 | ||
64bef763 KH |
1016 | lmc_dec_reset(sc); |
1017 | lmc_reset(sc); | |
1da177e4 | 1018 | |
64bef763 KH |
1019 | LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0); |
1020 | LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16), | |
1021 | lmc_mii_readreg(sc, 0, 17)); | |
1da177e4 LT |
1022 | |
1023 | if (sc->lmc_ok){ | |
1024 | lmc_trace(dev, "lmc_open lmc_ok out"); | |
1025 | return (0); | |
1026 | } | |
1027 | ||
1028 | lmc_softreset (sc); | |
1029 | ||
1030 | /* Since we have to use PCI bus, this should work on x86,alpha,ppc */ | |
a0607fd3 | 1031 | if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){ |
1da177e4 LT |
1032 | printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq); |
1033 | lmc_trace(dev, "lmc_open irq failed out"); | |
1034 | return -EAGAIN; | |
1035 | } | |
1036 | sc->got_irq = 1; | |
1037 | ||
1038 | /* Assert Terminal Active */ | |
1039 | sc->lmc_miireg16 |= LMC_MII16_LED_ALL; | |
1040 | sc->lmc_media->set_link_status (sc, LMC_LINK_UP); | |
1041 | ||
1042 | /* | |
1043 | * reset to last state. | |
1044 | */ | |
1045 | sc->lmc_media->set_status (sc, NULL); | |
1046 | ||
1047 | /* setup default bits to be used in tulip_desc_t transmit descriptor | |
1048 | * -baz */ | |
1049 | sc->TxDescriptControlInit = ( | |
1050 | LMC_TDES_INTERRUPT_ON_COMPLETION | |
1051 | | LMC_TDES_FIRST_SEGMENT | |
1052 | | LMC_TDES_LAST_SEGMENT | |
1053 | | LMC_TDES_SECOND_ADDR_CHAINED | |
1054 | | LMC_TDES_DISABLE_PADDING | |
1055 | ); | |
1056 | ||
1057 | if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) { | |
1058 | /* disable 32 bit CRC generated by ASIC */ | |
1059 | sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE; | |
1060 | } | |
1061 | sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length); | |
1062 | /* Acknoledge the Terminal Active and light LEDs */ | |
1063 | ||
1064 | /* dev->flags |= IFF_UP; */ | |
1065 | ||
64bef763 KH |
1066 | if ((err = lmc_proto_open(sc)) != 0) |
1067 | return err; | |
1da177e4 | 1068 | |
1da177e4 | 1069 | netif_start_queue(dev); |
64bef763 | 1070 | sc->extra_stats.tx_tbusy0++; |
1da177e4 LT |
1071 | |
1072 | /* | |
1073 | * select what interrupts we want to get | |
1074 | */ | |
1075 | sc->lmc_intrmask = 0; | |
1076 | /* Should be using the default interrupt mask defined in the .h file. */ | |
1077 | sc->lmc_intrmask |= (TULIP_STS_NORMALINTR | |
1078 | | TULIP_STS_RXINTR | |
1079 | | TULIP_STS_TXINTR | |
1080 | | TULIP_STS_ABNRMLINTR | |
1081 | | TULIP_STS_SYSERROR | |
1082 | | TULIP_STS_TXSTOPPED | |
1083 | | TULIP_STS_TXUNDERFLOW | |
1084 | | TULIP_STS_RXSTOPPED | |
1085 | | TULIP_STS_RXNOBUF | |
1086 | ); | |
1087 | LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); | |
1088 | ||
1089 | sc->lmc_cmdmode |= TULIP_CMD_TXRUN; | |
1090 | sc->lmc_cmdmode |= TULIP_CMD_RXRUN; | |
1091 | LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); | |
1092 | ||
1093 | sc->lmc_ok = 1; /* Run watchdog */ | |
1094 | ||
1095 | /* | |
1096 | * Set the if up now - pfb | |
1097 | */ | |
1098 | ||
1099 | sc->last_link_status = 1; | |
1100 | ||
1101 | /* | |
1102 | * Setup a timer for the watchdog on probe, and start it running. | |
1103 | * Since lmc_ok == 0, it will be a NOP for now. | |
1104 | */ | |
1105 | init_timer (&sc->timer); | |
1106 | sc->timer.expires = jiffies + HZ; | |
1107 | sc->timer.data = (unsigned long) dev; | |
1108 | sc->timer.function = &lmc_watchdog; | |
1109 | add_timer (&sc->timer); | |
1110 | ||
1111 | lmc_trace(dev, "lmc_open out"); | |
1112 | ||
1113 | return (0); | |
1114 | } | |
1115 | ||
1116 | /* Total reset to compensate for the AdTran DSU doing bad things | |
1117 | * under heavy load | |
1118 | */ | |
1119 | ||
1120 | static void lmc_running_reset (struct net_device *dev) /*fold00*/ | |
1121 | { | |
64bef763 | 1122 | lmc_softc_t *sc = dev_to_sc(dev); |
1da177e4 LT |
1123 | |
1124 | lmc_trace(dev, "lmc_runnig_reset in"); | |
1125 | ||
1126 | /* stop interrupts */ | |
1127 | /* Clear the interrupt mask */ | |
1128 | LMC_CSR_WRITE (sc, csr_intr, 0x00000000); | |
1129 | ||
1130 | lmc_dec_reset (sc); | |
1131 | lmc_reset (sc); | |
1132 | lmc_softreset (sc); | |
1133 | /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */ | |
1134 | sc->lmc_media->set_link_status (sc, 1); | |
1135 | sc->lmc_media->set_status (sc, NULL); | |
1136 | ||
1da177e4 LT |
1137 | netif_wake_queue(dev); |
1138 | ||
1139 | sc->lmc_txfull = 0; | |
64bef763 | 1140 | sc->extra_stats.tx_tbusy0++; |
1da177e4 LT |
1141 | |
1142 | sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK; | |
1143 | LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); | |
1144 | ||
1145 | sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN); | |
1146 | LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); | |
1147 | ||
1148 | lmc_trace(dev, "lmc_runnin_reset_out"); | |
1149 | } | |
1150 | ||
1151 | ||
1152 | /* This is what is called when you ifconfig down a device. | |
1153 | * This disables the timer for the watchdog and keepalives, | |
1154 | * and disables the irq for dev. | |
1155 | */ | |
64bef763 | 1156 | static int lmc_close(struct net_device *dev) |
1da177e4 LT |
1157 | { |
1158 | /* not calling release_region() as we should */ | |
64bef763 | 1159 | lmc_softc_t *sc = dev_to_sc(dev); |
1da177e4 LT |
1160 | |
1161 | lmc_trace(dev, "lmc_close in"); | |
64bef763 | 1162 | |
1da177e4 LT |
1163 | sc->lmc_ok = 0; |
1164 | sc->lmc_media->set_link_status (sc, 0); | |
1165 | del_timer (&sc->timer); | |
1166 | lmc_proto_close(sc); | |
1167 | lmc_ifdown (dev); | |
1168 | ||
1169 | lmc_trace(dev, "lmc_close out"); | |
64bef763 | 1170 | |
1da177e4 LT |
1171 | return 0; |
1172 | } | |
1173 | ||
1174 | /* Ends the transfer of packets */ | |
1175 | /* When the interface goes down, this is called */ | |
1176 | static int lmc_ifdown (struct net_device *dev) /*fold00*/ | |
1177 | { | |
64bef763 | 1178 | lmc_softc_t *sc = dev_to_sc(dev); |
1da177e4 LT |
1179 | u32 csr6; |
1180 | int i; | |
1181 | ||
1182 | lmc_trace(dev, "lmc_ifdown in"); | |
64bef763 | 1183 | |
1da177e4 LT |
1184 | /* Don't let anything else go on right now */ |
1185 | // dev->start = 0; | |
1186 | netif_stop_queue(dev); | |
64bef763 | 1187 | sc->extra_stats.tx_tbusy1++; |
1da177e4 LT |
1188 | |
1189 | /* stop interrupts */ | |
1190 | /* Clear the interrupt mask */ | |
1191 | LMC_CSR_WRITE (sc, csr_intr, 0x00000000); | |
1192 | ||
1193 | /* Stop Tx and Rx on the chip */ | |
1194 | csr6 = LMC_CSR_READ (sc, csr_command); | |
1195 | csr6 &= ~LMC_DEC_ST; /* Turn off the Transmission bit */ | |
1196 | csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */ | |
1197 | LMC_CSR_WRITE (sc, csr_command, csr6); | |
1198 | ||
64bef763 KH |
1199 | sc->lmc_device->stats.rx_missed_errors += |
1200 | LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; | |
1da177e4 LT |
1201 | |
1202 | /* release the interrupt */ | |
1203 | if(sc->got_irq == 1){ | |
1204 | free_irq (dev->irq, dev); | |
1205 | sc->got_irq = 0; | |
1206 | } | |
1207 | ||
1208 | /* free skbuffs in the Rx queue */ | |
1209 | for (i = 0; i < LMC_RXDESCS; i++) | |
1210 | { | |
1211 | struct sk_buff *skb = sc->lmc_rxq[i]; | |
1212 | sc->lmc_rxq[i] = NULL; | |
1213 | sc->lmc_rxring[i].status = 0; | |
1214 | sc->lmc_rxring[i].length = 0; | |
1215 | sc->lmc_rxring[i].buffer1 = 0xDEADBEEF; | |
1216 | if (skb != NULL) | |
1217 | dev_kfree_skb(skb); | |
1218 | sc->lmc_rxq[i] = NULL; | |
1219 | } | |
1220 | ||
1221 | for (i = 0; i < LMC_TXDESCS; i++) | |
1222 | { | |
1223 | if (sc->lmc_txq[i] != NULL) | |
1224 | dev_kfree_skb(sc->lmc_txq[i]); | |
1225 | sc->lmc_txq[i] = NULL; | |
1226 | } | |
1227 | ||
1228 | lmc_led_off (sc, LMC_MII16_LED_ALL); | |
1229 | ||
1230 | netif_wake_queue(dev); | |
64bef763 | 1231 | sc->extra_stats.tx_tbusy0++; |
1da177e4 LT |
1232 | |
1233 | lmc_trace(dev, "lmc_ifdown out"); | |
1234 | ||
1235 | return 0; | |
1236 | } | |
1237 | ||
1238 | /* Interrupt handling routine. This will take an incoming packet, or clean | |
1239 | * up after a trasmit. | |
1240 | */ | |
7d12e780 | 1241 | static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ |
1da177e4 LT |
1242 | { |
1243 | struct net_device *dev = (struct net_device *) dev_instance; | |
64bef763 | 1244 | lmc_softc_t *sc = dev_to_sc(dev); |
1da177e4 LT |
1245 | u32 csr; |
1246 | int i; | |
1247 | s32 stat; | |
1248 | unsigned int badtx; | |
1249 | u32 firstcsr; | |
1250 | int max_work = LMC_RXDESCS; | |
1251 | int handled = 0; | |
1252 | ||
1253 | lmc_trace(dev, "lmc_interrupt in"); | |
1254 | ||
1da177e4 LT |
1255 | spin_lock(&sc->lmc_lock); |
1256 | ||
1257 | /* | |
1258 | * Read the csr to find what interrupts we have (if any) | |
1259 | */ | |
1260 | csr = LMC_CSR_READ (sc, csr_status); | |
1261 | ||
1262 | /* | |
1263 | * Make sure this is our interrupt | |
1264 | */ | |
1265 | if ( ! (csr & sc->lmc_intrmask)) { | |
1266 | goto lmc_int_fail_out; | |
1267 | } | |
1268 | ||
1269 | firstcsr = csr; | |
1270 | ||
1271 | /* always go through this loop at least once */ | |
1272 | while (csr & sc->lmc_intrmask) { | |
1273 | handled = 1; | |
1274 | ||
1275 | /* | |
1276 | * Clear interrupt bits, we handle all case below | |
1277 | */ | |
1278 | LMC_CSR_WRITE (sc, csr_status, csr); | |
1279 | ||
1280 | /* | |
1281 | * One of | |
1282 | * - Transmit process timed out CSR5<1> | |
1283 | * - Transmit jabber timeout CSR5<3> | |
1284 | * - Transmit underflow CSR5<5> | |
1285 | * - Transmit Receiver buffer unavailable CSR5<7> | |
1286 | * - Receive process stopped CSR5<8> | |
1287 | * - Receive watchdog timeout CSR5<9> | |
1288 | * - Early transmit interrupt CSR5<10> | |
1289 | * | |
1290 | * Is this really right? Should we do a running reset for jabber? | |
1291 | * (being a WAN card and all) | |
1292 | */ | |
1293 | if (csr & TULIP_STS_ABNRMLINTR){ | |
1294 | lmc_running_reset (dev); | |
1295 | break; | |
1296 | } | |
1297 | ||
1298 | if (csr & TULIP_STS_RXINTR){ | |
1299 | lmc_trace(dev, "rx interrupt"); | |
1300 | lmc_rx (dev); | |
1301 | ||
1302 | } | |
1303 | if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) { | |
1304 | ||
1305 | int n_compl = 0 ; | |
1306 | /* reset the transmit timeout detection flag -baz */ | |
64bef763 | 1307 | sc->extra_stats.tx_NoCompleteCnt = 0; |
1da177e4 LT |
1308 | |
1309 | badtx = sc->lmc_taint_tx; | |
1310 | i = badtx % LMC_TXDESCS; | |
1311 | ||
1312 | while ((badtx < sc->lmc_next_tx)) { | |
1313 | stat = sc->lmc_txring[i].status; | |
1314 | ||
1315 | LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat, | |
1316 | sc->lmc_txring[i].length); | |
1317 | /* | |
1318 | * If bit 31 is 1 the tulip owns it break out of the loop | |
1319 | */ | |
1320 | if (stat & 0x80000000) | |
1321 | break; | |
1322 | ||
1323 | n_compl++ ; /* i.e., have an empty slot in ring */ | |
1324 | /* | |
1325 | * If we have no skbuff or have cleared it | |
1326 | * Already continue to the next buffer | |
1327 | */ | |
1328 | if (sc->lmc_txq[i] == NULL) | |
1329 | continue; | |
1330 | ||
64bef763 KH |
1331 | /* |
1332 | * Check the total error summary to look for any errors | |
1333 | */ | |
1334 | if (stat & 0x8000) { | |
1335 | sc->lmc_device->stats.tx_errors++; | |
1336 | if (stat & 0x4104) | |
1337 | sc->lmc_device->stats.tx_aborted_errors++; | |
1338 | if (stat & 0x0C00) | |
1339 | sc->lmc_device->stats.tx_carrier_errors++; | |
1340 | if (stat & 0x0200) | |
1341 | sc->lmc_device->stats.tx_window_errors++; | |
1342 | if (stat & 0x0002) | |
1343 | sc->lmc_device->stats.tx_fifo_errors++; | |
1344 | } else { | |
1345 | sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff; | |
1346 | ||
1347 | sc->lmc_device->stats.tx_packets++; | |
1da177e4 | 1348 | } |
64bef763 | 1349 | |
1da177e4 LT |
1350 | // dev_kfree_skb(sc->lmc_txq[i]); |
1351 | dev_kfree_skb_irq(sc->lmc_txq[i]); | |
1352 | sc->lmc_txq[i] = NULL; | |
1353 | ||
1354 | badtx++; | |
1355 | i = badtx % LMC_TXDESCS; | |
1356 | } | |
1357 | ||
1358 | if (sc->lmc_next_tx - badtx > LMC_TXDESCS) | |
1359 | { | |
1360 | printk ("%s: out of sync pointer\n", dev->name); | |
1361 | badtx += LMC_TXDESCS; | |
1362 | } | |
1363 | LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0); | |
1364 | sc->lmc_txfull = 0; | |
1365 | netif_wake_queue(dev); | |
64bef763 | 1366 | sc->extra_stats.tx_tbusy0++; |
1da177e4 LT |
1367 | |
1368 | ||
1369 | #ifdef DEBUG | |
64bef763 KH |
1370 | sc->extra_stats.dirtyTx = badtx; |
1371 | sc->extra_stats.lmc_next_tx = sc->lmc_next_tx; | |
1372 | sc->extra_stats.lmc_txfull = sc->lmc_txfull; | |
1da177e4 LT |
1373 | #endif |
1374 | sc->lmc_taint_tx = badtx; | |
1375 | ||
1376 | /* | |
1377 | * Why was there a break here??? | |
1378 | */ | |
1379 | } /* end handle transmit interrupt */ | |
1380 | ||
1381 | if (csr & TULIP_STS_SYSERROR) { | |
1382 | u32 error; | |
1383 | printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr); | |
1384 | error = csr>>23 & 0x7; | |
1385 | switch(error){ | |
1386 | case 0x000: | |
1387 | printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name); | |
1388 | break; | |
1389 | case 0x001: | |
1390 | printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name); | |
1391 | break; | |
1392 | case 0x010: | |
1393 | printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name); | |
1394 | break; | |
1395 | default: | |
1396 | printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name); | |
1397 | } | |
1398 | lmc_dec_reset (sc); | |
1399 | lmc_reset (sc); | |
1400 | LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); | |
1401 | LMC_EVENT_LOG(LMC_EVENT_RESET2, | |
1402 | lmc_mii_readreg (sc, 0, 16), | |
1403 | lmc_mii_readreg (sc, 0, 17)); | |
1404 | ||
1405 | } | |
1406 | ||
1407 | ||
1408 | if(max_work-- <= 0) | |
1409 | break; | |
1410 | ||
1411 | /* | |
1412 | * Get current csr status to make sure | |
1413 | * we've cleared all interrupts | |
1414 | */ | |
1415 | csr = LMC_CSR_READ (sc, csr_status); | |
1416 | } /* end interrupt loop */ | |
1417 | LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr); | |
1418 | ||
1419 | lmc_int_fail_out: | |
1420 | ||
1421 | spin_unlock(&sc->lmc_lock); | |
1422 | ||
1423 | lmc_trace(dev, "lmc_interrupt out"); | |
1424 | return IRQ_RETVAL(handled); | |
1425 | } | |
1426 | ||
d71a6749 SH |
1427 | static netdev_tx_t lmc_start_xmit(struct sk_buff *skb, |
1428 | struct net_device *dev) | |
1da177e4 | 1429 | { |
64bef763 | 1430 | lmc_softc_t *sc = dev_to_sc(dev); |
1da177e4 LT |
1431 | u32 flag; |
1432 | int entry; | |
1da177e4 LT |
1433 | unsigned long flags; |
1434 | ||
1435 | lmc_trace(dev, "lmc_start_xmit in"); | |
1436 | ||
1da177e4 LT |
1437 | spin_lock_irqsave(&sc->lmc_lock, flags); |
1438 | ||
1439 | /* normal path, tbusy known to be zero */ | |
1440 | ||
1441 | entry = sc->lmc_next_tx % LMC_TXDESCS; | |
1442 | ||
1443 | sc->lmc_txq[entry] = skb; | |
1444 | sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data); | |
1445 | ||
1446 | LMC_CONSOLE_LOG("xmit", skb->data, skb->len); | |
1447 | ||
1448 | #ifndef GCOM | |
1449 | /* If the queue is less than half full, don't interrupt */ | |
1450 | if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2) | |
1451 | { | |
1452 | /* Do not interrupt on completion of this packet */ | |
1453 | flag = 0x60000000; | |
1454 | netif_wake_queue(dev); | |
1455 | } | |
1456 | else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2) | |
1457 | { | |
1458 | /* This generates an interrupt on completion of this packet */ | |
1459 | flag = 0xe0000000; | |
1460 | netif_wake_queue(dev); | |
1461 | } | |
1462 | else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1) | |
1463 | { | |
1464 | /* Do not interrupt on completion of this packet */ | |
1465 | flag = 0x60000000; | |
1466 | netif_wake_queue(dev); | |
1467 | } | |
1468 | else | |
1469 | { | |
1470 | /* This generates an interrupt on completion of this packet */ | |
1471 | flag = 0xe0000000; | |
1472 | sc->lmc_txfull = 1; | |
1473 | netif_stop_queue(dev); | |
1474 | } | |
1475 | #else | |
1476 | flag = LMC_TDES_INTERRUPT_ON_COMPLETION; | |
1477 | ||
1478 | if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1) | |
1479 | { /* ring full, go busy */ | |
1480 | sc->lmc_txfull = 1; | |
64bef763 KH |
1481 | netif_stop_queue(dev); |
1482 | sc->extra_stats.tx_tbusy1++; | |
1da177e4 LT |
1483 | LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0); |
1484 | } | |
1485 | #endif | |
1486 | ||
1487 | ||
1488 | if (entry == LMC_TXDESCS - 1) /* last descriptor in ring */ | |
1489 | flag |= LMC_TDES_END_OF_RING; /* flag as such for Tulip */ | |
1490 | ||
1491 | /* don't pad small packets either */ | |
1492 | flag = sc->lmc_txring[entry].length = (skb->len) | flag | | |
1493 | sc->TxDescriptControlInit; | |
1494 | ||
1495 | /* set the transmit timeout flag to be checked in | |
1496 | * the watchdog timer handler. -baz | |
1497 | */ | |
1498 | ||
64bef763 | 1499 | sc->extra_stats.tx_NoCompleteCnt++; |
1da177e4 LT |
1500 | sc->lmc_next_tx++; |
1501 | ||
1502 | /* give ownership to the chip */ | |
1503 | LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry); | |
1504 | sc->lmc_txring[entry].status = 0x80000000; | |
1505 | ||
1506 | /* send now! */ | |
1507 | LMC_CSR_WRITE (sc, csr_txpoll, 0); | |
1508 | ||
1da177e4 LT |
1509 | spin_unlock_irqrestore(&sc->lmc_lock, flags); |
1510 | ||
1511 | lmc_trace(dev, "lmc_start_xmit_out"); | |
d71a6749 | 1512 | return NETDEV_TX_OK; |
1da177e4 LT |
1513 | } |
1514 | ||
1515 | ||
64bef763 | 1516 | static int lmc_rx(struct net_device *dev) |
1da177e4 | 1517 | { |
64bef763 | 1518 | lmc_softc_t *sc = dev_to_sc(dev); |
1da177e4 LT |
1519 | int i; |
1520 | int rx_work_limit = LMC_RXDESCS; | |
1521 | unsigned int next_rx; | |
1522 | int rxIntLoopCnt; /* debug -baz */ | |
1523 | int localLengthErrCnt = 0; | |
1524 | long stat; | |
1525 | struct sk_buff *skb, *nsb; | |
1526 | u16 len; | |
1527 | ||
1528 | lmc_trace(dev, "lmc_rx in"); | |
1529 | ||
1da177e4 LT |
1530 | lmc_led_on(sc, LMC_DS3_LED3); |
1531 | ||
1532 | rxIntLoopCnt = 0; /* debug -baz */ | |
1533 | ||
1534 | i = sc->lmc_next_rx % LMC_RXDESCS; | |
1535 | next_rx = sc->lmc_next_rx; | |
1536 | ||
1537 | while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4) | |
1538 | { | |
1539 | rxIntLoopCnt++; /* debug -baz */ | |
1540 | len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER); | |
1541 | if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */ | |
64bef763 KH |
1542 | if ((stat & 0x0000ffff) != 0x7fff) { |
1543 | /* Oversized frame */ | |
1544 | sc->lmc_device->stats.rx_length_errors++; | |
1545 | goto skip_packet; | |
1546 | } | |
1547 | } | |
1da177e4 | 1548 | |
64bef763 KH |
1549 | if (stat & 0x00000008) { /* Catch a dribbling bit error */ |
1550 | sc->lmc_device->stats.rx_errors++; | |
1551 | sc->lmc_device->stats.rx_frame_errors++; | |
1552 | goto skip_packet; | |
1553 | } | |
1da177e4 | 1554 | |
1da177e4 | 1555 | |
64bef763 KH |
1556 | if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */ |
1557 | sc->lmc_device->stats.rx_errors++; | |
1558 | sc->lmc_device->stats.rx_crc_errors++; | |
1559 | goto skip_packet; | |
1560 | } | |
1da177e4 | 1561 | |
64bef763 KH |
1562 | if (len > LMC_PKT_BUF_SZ) { |
1563 | sc->lmc_device->stats.rx_length_errors++; | |
1564 | localLengthErrCnt++; | |
1565 | goto skip_packet; | |
1566 | } | |
1da177e4 | 1567 | |
64bef763 KH |
1568 | if (len < sc->lmc_crcSize + 2) { |
1569 | sc->lmc_device->stats.rx_length_errors++; | |
1570 | sc->extra_stats.rx_SmallPktCnt++; | |
1571 | localLengthErrCnt++; | |
1572 | goto skip_packet; | |
1573 | } | |
1da177e4 LT |
1574 | |
1575 | if(stat & 0x00004000){ | |
1576 | printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name); | |
1577 | } | |
1578 | ||
1579 | len -= sc->lmc_crcSize; | |
1580 | ||
1581 | skb = sc->lmc_rxq[i]; | |
1582 | ||
1583 | /* | |
1584 | * We ran out of memory at some point | |
1585 | * just allocate an skb buff and continue. | |
1586 | */ | |
1587 | ||
79ea13ce | 1588 | if (!skb) { |
1da177e4 LT |
1589 | nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); |
1590 | if (nsb) { | |
1591 | sc->lmc_rxq[i] = nsb; | |
1592 | nsb->dev = dev; | |
27a884dc | 1593 | sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); |
1da177e4 LT |
1594 | } |
1595 | sc->failed_recv_alloc = 1; | |
1596 | goto skip_packet; | |
1597 | } | |
1598 | ||
64bef763 KH |
1599 | sc->lmc_device->stats.rx_packets++; |
1600 | sc->lmc_device->stats.rx_bytes += len; | |
1da177e4 LT |
1601 | |
1602 | LMC_CONSOLE_LOG("recv", skb->data, len); | |
1603 | ||
1604 | /* | |
1605 | * I'm not sure of the sanity of this | |
1606 | * Packets could be arriving at a constant | |
1607 | * 44.210mbits/sec and we're going to copy | |
1608 | * them into a new buffer?? | |
1609 | */ | |
1610 | ||
1611 | if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */ | |
1612 | /* | |
1613 | * If it's a large packet don't copy it just hand it up | |
1614 | */ | |
1615 | give_it_anyways: | |
1616 | ||
1617 | sc->lmc_rxq[i] = NULL; | |
1618 | sc->lmc_rxring[i].buffer1 = 0x0; | |
1619 | ||
1620 | skb_put (skb, len); | |
1621 | skb->protocol = lmc_proto_type(sc, skb); | |
459a98ed | 1622 | skb_reset_mac_header(skb); |
c1d2bbe1 | 1623 | /* skb_reset_network_header(skb); */ |
1da177e4 LT |
1624 | skb->dev = dev; |
1625 | lmc_proto_netif(sc, skb); | |
1626 | ||
1627 | /* | |
1628 | * This skb will be destroyed by the upper layers, make a new one | |
1629 | */ | |
1630 | nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); | |
1631 | if (nsb) { | |
1632 | sc->lmc_rxq[i] = nsb; | |
1633 | nsb->dev = dev; | |
27a884dc | 1634 | sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); |
1da177e4 LT |
1635 | /* Transferred to 21140 below */ |
1636 | } | |
1637 | else { | |
1638 | /* | |
1639 | * We've run out of memory, stop trying to allocate | |
1640 | * memory and exit the interrupt handler | |
1641 | * | |
1642 | * The chip may run out of receivers and stop | |
1643 | * in which care we'll try to allocate the buffer | |
1644 | * again. (once a second) | |
1645 | */ | |
64bef763 | 1646 | sc->extra_stats.rx_BuffAllocErr++; |
1da177e4 LT |
1647 | LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); |
1648 | sc->failed_recv_alloc = 1; | |
1649 | goto skip_out_of_mem; | |
1650 | } | |
1651 | } | |
1652 | else { | |
1653 | nsb = dev_alloc_skb(len); | |
1654 | if(!nsb) { | |
1655 | goto give_it_anyways; | |
1656 | } | |
d626f62b | 1657 | skb_copy_from_linear_data(skb, skb_put(nsb, len), len); |
1da177e4 | 1658 | |
38edb5b8 | 1659 | nsb->protocol = lmc_proto_type(sc, nsb); |
459a98ed | 1660 | skb_reset_mac_header(nsb); |
c1d2bbe1 | 1661 | /* skb_reset_network_header(nsb); */ |
1da177e4 LT |
1662 | nsb->dev = dev; |
1663 | lmc_proto_netif(sc, nsb); | |
1664 | } | |
1665 | ||
1666 | skip_packet: | |
1667 | LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); | |
1668 | sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4; | |
1669 | ||
1670 | sc->lmc_next_rx++; | |
1671 | i = sc->lmc_next_rx % LMC_RXDESCS; | |
1672 | rx_work_limit--; | |
1673 | if (rx_work_limit < 0) | |
1674 | break; | |
1675 | } | |
1676 | ||
1677 | /* detect condition for LMC1000 where DSU cable attaches and fills | |
1678 | * descriptors with bogus packets | |
1679 | * | |
1680 | if (localLengthErrCnt > LMC_RXDESCS - 3) { | |
64bef763 KH |
1681 | sc->extra_stats.rx_BadPktSurgeCnt++; |
1682 | LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt, | |
1683 | sc->extra_stats.rx_BadPktSurgeCnt); | |
1da177e4 LT |
1684 | } */ |
1685 | ||
1686 | /* save max count of receive descriptors serviced */ | |
64bef763 KH |
1687 | if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt) |
1688 | sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ | |
1da177e4 LT |
1689 | |
1690 | #ifdef DEBUG | |
1691 | if (rxIntLoopCnt == 0) | |
1692 | { | |
1693 | for (i = 0; i < LMC_RXDESCS; i++) | |
1694 | { | |
1695 | if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT) | |
1696 | != DESC_OWNED_BY_DC21X4) | |
1697 | { | |
1698 | rxIntLoopCnt++; | |
1699 | } | |
1700 | } | |
1701 | LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0); | |
1702 | } | |
1703 | #endif | |
1704 | ||
1705 | ||
1706 | lmc_led_off(sc, LMC_DS3_LED3); | |
1707 | ||
1708 | skip_out_of_mem: | |
1709 | ||
1710 | lmc_trace(dev, "lmc_rx out"); | |
1711 | ||
1712 | return 0; | |
1713 | } | |
1714 | ||
64bef763 | 1715 | static struct net_device_stats *lmc_get_stats(struct net_device *dev) |
1da177e4 | 1716 | { |
64bef763 | 1717 | lmc_softc_t *sc = dev_to_sc(dev); |
1da177e4 LT |
1718 | unsigned long flags; |
1719 | ||
1720 | lmc_trace(dev, "lmc_get_stats in"); | |
1721 | ||
1da177e4 LT |
1722 | spin_lock_irqsave(&sc->lmc_lock, flags); |
1723 | ||
64bef763 | 1724 | sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; |
1da177e4 LT |
1725 | |
1726 | spin_unlock_irqrestore(&sc->lmc_lock, flags); | |
1727 | ||
1728 | lmc_trace(dev, "lmc_get_stats out"); | |
1729 | ||
64bef763 | 1730 | return &sc->lmc_device->stats; |
1da177e4 LT |
1731 | } |
1732 | ||
1733 | static struct pci_driver lmc_driver = { | |
1734 | .name = "lmc", | |
1735 | .id_table = lmc_pci_tbl, | |
1736 | .probe = lmc_init_one, | |
1737 | .remove = __devexit_p(lmc_remove_one), | |
1738 | }; | |
1739 | ||
1740 | static int __init init_lmc(void) | |
1741 | { | |
29917620 | 1742 | return pci_register_driver(&lmc_driver); |
1da177e4 LT |
1743 | } |
1744 | ||
1745 | static void __exit exit_lmc(void) | |
1746 | { | |
1747 | pci_unregister_driver(&lmc_driver); | |
1748 | } | |
1749 | ||
1750 | module_init(init_lmc); | |
1751 | module_exit(exit_lmc); | |
1752 | ||
1753 | unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/ | |
1754 | { | |
1755 | int i; | |
1756 | int command = (0xf6 << 10) | (devaddr << 5) | regno; | |
1757 | int retval = 0; | |
1758 | ||
1759 | lmc_trace(sc->lmc_device, "lmc_mii_readreg in"); | |
1760 | ||
1761 | LMC_MII_SYNC (sc); | |
1762 | ||
1763 | lmc_trace(sc->lmc_device, "lmc_mii_readreg: done sync"); | |
1764 | ||
1765 | for (i = 15; i >= 0; i--) | |
1766 | { | |
1767 | int dataval = (command & (1 << i)) ? 0x20000 : 0; | |
1768 | ||
1769 | LMC_CSR_WRITE (sc, csr_9, dataval); | |
1770 | lmc_delay (); | |
1771 | /* __SLOW_DOWN_IO; */ | |
1772 | LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000); | |
1773 | lmc_delay (); | |
1774 | /* __SLOW_DOWN_IO; */ | |
1775 | } | |
1776 | ||
1777 | lmc_trace(sc->lmc_device, "lmc_mii_readreg: done1"); | |
1778 | ||
1779 | for (i = 19; i > 0; i--) | |
1780 | { | |
1781 | LMC_CSR_WRITE (sc, csr_9, 0x40000); | |
1782 | lmc_delay (); | |
1783 | /* __SLOW_DOWN_IO; */ | |
1784 | retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0); | |
1785 | LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000); | |
1786 | lmc_delay (); | |
1787 | /* __SLOW_DOWN_IO; */ | |
1788 | } | |
1789 | ||
1790 | lmc_trace(sc->lmc_device, "lmc_mii_readreg out"); | |
1791 | ||
1792 | return (retval >> 1) & 0xffff; | |
1793 | } | |
1794 | ||
1795 | void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/ | |
1796 | { | |
1797 | int i = 32; | |
1798 | int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data; | |
1799 | ||
1800 | lmc_trace(sc->lmc_device, "lmc_mii_writereg in"); | |
1801 | ||
1802 | LMC_MII_SYNC (sc); | |
1803 | ||
1804 | i = 31; | |
1805 | while (i >= 0) | |
1806 | { | |
1807 | int datav; | |
1808 | ||
1809 | if (command & (1 << i)) | |
1810 | datav = 0x20000; | |
1811 | else | |
1812 | datav = 0x00000; | |
1813 | ||
1814 | LMC_CSR_WRITE (sc, csr_9, datav); | |
1815 | lmc_delay (); | |
1816 | /* __SLOW_DOWN_IO; */ | |
1817 | LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000)); | |
1818 | lmc_delay (); | |
1819 | /* __SLOW_DOWN_IO; */ | |
1820 | i--; | |
1821 | } | |
1822 | ||
1823 | i = 2; | |
1824 | while (i > 0) | |
1825 | { | |
1826 | LMC_CSR_WRITE (sc, csr_9, 0x40000); | |
1827 | lmc_delay (); | |
1828 | /* __SLOW_DOWN_IO; */ | |
1829 | LMC_CSR_WRITE (sc, csr_9, 0x50000); | |
1830 | lmc_delay (); | |
1831 | /* __SLOW_DOWN_IO; */ | |
1832 | i--; | |
1833 | } | |
1834 | ||
1835 | lmc_trace(sc->lmc_device, "lmc_mii_writereg out"); | |
1836 | } | |
1837 | ||
1838 | static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/ | |
1839 | { | |
1840 | int i; | |
1841 | ||
1842 | lmc_trace(sc->lmc_device, "lmc_softreset in"); | |
1843 | ||
1844 | /* Initialize the receive rings and buffers. */ | |
1845 | sc->lmc_txfull = 0; | |
1846 | sc->lmc_next_rx = 0; | |
1847 | sc->lmc_next_tx = 0; | |
1848 | sc->lmc_taint_rx = 0; | |
1849 | sc->lmc_taint_tx = 0; | |
1850 | ||
1851 | /* | |
1852 | * Setup each one of the receiver buffers | |
1853 | * allocate an skbuff for each one, setup the descriptor table | |
1854 | * and point each buffer at the next one | |
1855 | */ | |
1856 | ||
1857 | for (i = 0; i < LMC_RXDESCS; i++) | |
1858 | { | |
1859 | struct sk_buff *skb; | |
1860 | ||
1861 | if (sc->lmc_rxq[i] == NULL) | |
1862 | { | |
1863 | skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); | |
1864 | if(skb == NULL){ | |
1865 | printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name); | |
1866 | sc->failed_ring = 1; | |
1867 | break; | |
1868 | } | |
1869 | else{ | |
1870 | sc->lmc_rxq[i] = skb; | |
1871 | } | |
1872 | } | |
1873 | else | |
1874 | { | |
1875 | skb = sc->lmc_rxq[i]; | |
1876 | } | |
1877 | ||
1878 | skb->dev = sc->lmc_device; | |
1879 | ||
1880 | /* owned by 21140 */ | |
1881 | sc->lmc_rxring[i].status = 0x80000000; | |
1882 | ||
1883 | /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */ | |
d004b8d4 | 1884 | sc->lmc_rxring[i].length = skb_tailroom(skb); |
1da177e4 LT |
1885 | |
1886 | /* use to be tail which is dumb since you're thinking why write | |
1887 | * to the end of the packj,et but since there's nothing there tail == data | |
1888 | */ | |
1889 | sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data); | |
1890 | ||
1891 | /* This is fair since the structure is static and we have the next address */ | |
1892 | sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]); | |
1893 | ||
1894 | } | |
1895 | ||
1896 | /* | |
1897 | * Sets end of ring | |
1898 | */ | |
8dd07086 | 1899 | if (i != 0) { |
1900 | sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */ | |
1901 | sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */ | |
1902 | } | |
1da177e4 LT |
1903 | LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */ |
1904 | ||
1da177e4 LT |
1905 | /* Initialize the transmit rings and buffers */ |
1906 | for (i = 0; i < LMC_TXDESCS; i++) | |
1907 | { | |
1908 | if (sc->lmc_txq[i] != NULL){ /* have buffer */ | |
1909 | dev_kfree_skb(sc->lmc_txq[i]); /* free it */ | |
64bef763 | 1910 | sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */ |
1da177e4 LT |
1911 | } |
1912 | sc->lmc_txq[i] = NULL; | |
1913 | sc->lmc_txring[i].status = 0x00000000; | |
1914 | sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]); | |
1915 | } | |
1916 | sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]); | |
1917 | LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring)); | |
1918 | ||
1919 | lmc_trace(sc->lmc_device, "lmc_softreset out"); | |
1920 | } | |
1921 | ||
867240f7 | 1922 | void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/ |
1da177e4 LT |
1923 | { |
1924 | lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in"); | |
1925 | sc->lmc_gpio_io &= ~bits; | |
1926 | LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io)); | |
1927 | lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out"); | |
1928 | } | |
1929 | ||
867240f7 | 1930 | void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/ |
1da177e4 LT |
1931 | { |
1932 | lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in"); | |
1933 | sc->lmc_gpio_io |= bits; | |
1934 | LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io)); | |
1935 | lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out"); | |
1936 | } | |
1937 | ||
867240f7 | 1938 | void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/ |
1da177e4 LT |
1939 | { |
1940 | lmc_trace(sc->lmc_device, "lmc_led_on in"); | |
1941 | if((~sc->lmc_miireg16) & led){ /* Already on! */ | |
1942 | lmc_trace(sc->lmc_device, "lmc_led_on aon out"); | |
1943 | return; | |
1944 | } | |
1945 | ||
1946 | sc->lmc_miireg16 &= ~led; | |
1947 | lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); | |
1948 | lmc_trace(sc->lmc_device, "lmc_led_on out"); | |
1949 | } | |
1950 | ||
867240f7 | 1951 | void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/ |
1da177e4 LT |
1952 | { |
1953 | lmc_trace(sc->lmc_device, "lmc_led_off in"); | |
1954 | if(sc->lmc_miireg16 & led){ /* Already set don't do anything */ | |
1955 | lmc_trace(sc->lmc_device, "lmc_led_off aoff out"); | |
1956 | return; | |
1957 | } | |
1958 | ||
1959 | sc->lmc_miireg16 |= led; | |
1960 | lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); | |
1961 | lmc_trace(sc->lmc_device, "lmc_led_off out"); | |
1962 | } | |
1963 | ||
1964 | static void lmc_reset(lmc_softc_t * const sc) /*fold00*/ | |
1965 | { | |
1966 | lmc_trace(sc->lmc_device, "lmc_reset in"); | |
1967 | sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET; | |
1968 | lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); | |
1969 | ||
1970 | sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET; | |
1971 | lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); | |
1972 | ||
1973 | /* | |
1974 | * make some of the GPIO pins be outputs | |
1975 | */ | |
1976 | lmc_gpio_mkoutput(sc, LMC_GEP_RESET); | |
1977 | ||
1978 | /* | |
1979 | * RESET low to force state reset. This also forces | |
1980 | * the transmitter clock to be internal, but we expect to reset | |
1981 | * that later anyway. | |
1982 | */ | |
1983 | sc->lmc_gpio &= ~(LMC_GEP_RESET); | |
1984 | LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); | |
1985 | ||
1986 | /* | |
1987 | * hold for more than 10 microseconds | |
1988 | */ | |
1989 | udelay(50); | |
1990 | ||
1991 | /* | |
1992 | * stop driving Xilinx-related signals | |
1993 | */ | |
1994 | lmc_gpio_mkinput(sc, LMC_GEP_RESET); | |
1995 | ||
1996 | /* | |
1997 | * Call media specific init routine | |
1998 | */ | |
1999 | sc->lmc_media->init(sc); | |
2000 | ||
64bef763 | 2001 | sc->extra_stats.resetCount++; |
1da177e4 LT |
2002 | lmc_trace(sc->lmc_device, "lmc_reset out"); |
2003 | } | |
2004 | ||
2005 | static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/ | |
2006 | { | |
867240f7 | 2007 | u32 val; |
1da177e4 LT |
2008 | lmc_trace(sc->lmc_device, "lmc_dec_reset in"); |
2009 | ||
2010 | /* | |
2011 | * disable all interrupts | |
2012 | */ | |
2013 | sc->lmc_intrmask = 0; | |
2014 | LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask); | |
2015 | ||
2016 | /* | |
2017 | * Reset the chip with a software reset command. | |
2018 | * Wait 10 microseconds (actually 50 PCI cycles but at | |
2019 | * 33MHz that comes to two microseconds but wait a | |
2020 | * bit longer anyways) | |
2021 | */ | |
2022 | LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET); | |
2023 | udelay(25); | |
2024 | #ifdef __sparc__ | |
2025 | sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode); | |
2026 | sc->lmc_busmode = 0x00100000; | |
2027 | sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET; | |
2028 | LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode); | |
2029 | #endif | |
2030 | sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command); | |
2031 | ||
2032 | /* | |
2033 | * We want: | |
2034 | * no ethernet address in frames we write | |
2035 | * disable padding (txdesc, padding disable) | |
2036 | * ignore runt frames (rdes0 bit 15) | |
2037 | * no receiver watchdog or transmitter jabber timer | |
2038 | * (csr15 bit 0,14 == 1) | |
2039 | * if using 16-bit CRC, turn off CRC (trans desc, crc disable) | |
2040 | */ | |
2041 | ||
2042 | sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS | |
2043 | | TULIP_CMD_FULLDUPLEX | |
2044 | | TULIP_CMD_PASSBADPKT | |
2045 | | TULIP_CMD_NOHEARTBEAT | |
2046 | | TULIP_CMD_PORTSELECT | |
2047 | | TULIP_CMD_RECEIVEALL | |
2048 | | TULIP_CMD_MUSTBEONE | |
2049 | ); | |
2050 | sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE | |
2051 | | TULIP_CMD_THRESHOLDCTL | |
2052 | | TULIP_CMD_STOREFWD | |
2053 | | TULIP_CMD_TXTHRSHLDCTL | |
2054 | ); | |
2055 | ||
2056 | LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode); | |
2057 | ||
2058 | /* | |
2059 | * disable receiver watchdog and transmit jabber | |
2060 | */ | |
2061 | val = LMC_CSR_READ(sc, csr_sia_general); | |
2062 | val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE); | |
2063 | LMC_CSR_WRITE(sc, csr_sia_general, val); | |
2064 | ||
2065 | lmc_trace(sc->lmc_device, "lmc_dec_reset out"); | |
2066 | } | |
2067 | ||
2068 | static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/ | |
2069 | size_t csr_size) | |
2070 | { | |
2071 | lmc_trace(sc->lmc_device, "lmc_initcsrs in"); | |
2072 | sc->lmc_csrs.csr_busmode = csr_base + 0 * csr_size; | |
2073 | sc->lmc_csrs.csr_txpoll = csr_base + 1 * csr_size; | |
2074 | sc->lmc_csrs.csr_rxpoll = csr_base + 2 * csr_size; | |
2075 | sc->lmc_csrs.csr_rxlist = csr_base + 3 * csr_size; | |
2076 | sc->lmc_csrs.csr_txlist = csr_base + 4 * csr_size; | |
2077 | sc->lmc_csrs.csr_status = csr_base + 5 * csr_size; | |
2078 | sc->lmc_csrs.csr_command = csr_base + 6 * csr_size; | |
2079 | sc->lmc_csrs.csr_intr = csr_base + 7 * csr_size; | |
2080 | sc->lmc_csrs.csr_missed_frames = csr_base + 8 * csr_size; | |
2081 | sc->lmc_csrs.csr_9 = csr_base + 9 * csr_size; | |
2082 | sc->lmc_csrs.csr_10 = csr_base + 10 * csr_size; | |
2083 | sc->lmc_csrs.csr_11 = csr_base + 11 * csr_size; | |
2084 | sc->lmc_csrs.csr_12 = csr_base + 12 * csr_size; | |
2085 | sc->lmc_csrs.csr_13 = csr_base + 13 * csr_size; | |
2086 | sc->lmc_csrs.csr_14 = csr_base + 14 * csr_size; | |
2087 | sc->lmc_csrs.csr_15 = csr_base + 15 * csr_size; | |
2088 | lmc_trace(sc->lmc_device, "lmc_initcsrs out"); | |
2089 | } | |
2090 | ||
64bef763 KH |
2091 | static void lmc_driver_timeout(struct net_device *dev) |
2092 | { | |
2093 | lmc_softc_t *sc = dev_to_sc(dev); | |
1da177e4 LT |
2094 | u32 csr6; |
2095 | unsigned long flags; | |
2096 | ||
2097 | lmc_trace(dev, "lmc_driver_timeout in"); | |
2098 | ||
1da177e4 LT |
2099 | spin_lock_irqsave(&sc->lmc_lock, flags); |
2100 | ||
2101 | printk("%s: Xmitter busy|\n", dev->name); | |
2102 | ||
64bef763 | 2103 | sc->extra_stats.tx_tbusy_calls++; |
1ae5dc34 | 2104 | if (jiffies - dev_trans_start(dev) < TX_TIMEOUT) |
64bef763 | 2105 | goto bug_out; |
1da177e4 LT |
2106 | |
2107 | /* | |
2108 | * Chip seems to have locked up | |
2109 | * Reset it | |
2110 | * This whips out all our decriptor | |
2111 | * table and starts from scartch | |
2112 | */ | |
2113 | ||
2114 | LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO, | |
2115 | LMC_CSR_READ (sc, csr_status), | |
64bef763 | 2116 | sc->extra_stats.tx_ProcTimeout); |
1da177e4 LT |
2117 | |
2118 | lmc_running_reset (dev); | |
2119 | ||
2120 | LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); | |
2121 | LMC_EVENT_LOG(LMC_EVENT_RESET2, | |
2122 | lmc_mii_readreg (sc, 0, 16), | |
2123 | lmc_mii_readreg (sc, 0, 17)); | |
2124 | ||
2125 | /* restart the tx processes */ | |
2126 | csr6 = LMC_CSR_READ (sc, csr_command); | |
2127 | LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002); | |
2128 | LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002); | |
2129 | ||
2130 | /* immediate transmit */ | |
2131 | LMC_CSR_WRITE (sc, csr_txpoll, 0); | |
2132 | ||
64bef763 KH |
2133 | sc->lmc_device->stats.tx_errors++; |
2134 | sc->extra_stats.tx_ProcTimeout++; /* -baz */ | |
1da177e4 | 2135 | |
1ae5dc34 | 2136 | dev->trans_start = jiffies; /* prevent tx timeout */ |
1da177e4 LT |
2137 | |
2138 | bug_out: | |
2139 | ||
2140 | spin_unlock_irqrestore(&sc->lmc_lock, flags); | |
2141 | ||
2142 | lmc_trace(dev, "lmc_driver_timout out"); | |
2143 | ||
2144 | ||
2145 | } |