net: trans_start cleanups
[deliverable/linux.git] / drivers / net / dm9000.c
1 /*
2 * Davicom DM9000 Fast Ethernet driver for Linux.
3 * Copyright (C) 1997 Sten Wang
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
16 *
17 * Additional updates, Copyright:
18 * Ben Dooks <ben@simtec.co.uk>
19 * Sascha Hauer <s.hauer@pengutronix.de>
20 */
21
22 #include <linux/module.h>
23 #include <linux/ioport.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/init.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/crc32.h>
30 #include <linux/mii.h>
31 #include <linux/ethtool.h>
32 #include <linux/dm9000.h>
33 #include <linux/delay.h>
34 #include <linux/platform_device.h>
35 #include <linux/irq.h>
36 #include <linux/slab.h>
37
38 #include <asm/delay.h>
39 #include <asm/irq.h>
40 #include <asm/io.h>
41
42 #include "dm9000.h"
43
44 /* Board/System/Debug information/definition ---------------- */
45
46 #define DM9000_PHY 0x40 /* PHY address 0x01 */
47
48 #define CARDNAME "dm9000"
49 #define DRV_VERSION "1.31"
50
51 /*
52 * Transmit timeout, default 5 seconds.
53 */
54 static int watchdog = 5000;
55 module_param(watchdog, int, 0400);
56 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
57
58 /* DM9000 register address locking.
59 *
60 * The DM9000 uses an address register to control where data written
61 * to the data register goes. This means that the address register
62 * must be preserved over interrupts or similar calls.
63 *
64 * During interrupt and other critical calls, a spinlock is used to
65 * protect the system, but the calls themselves save the address
66 * in the address register in case they are interrupting another
67 * access to the device.
68 *
69 * For general accesses a lock is provided so that calls which are
70 * allowed to sleep are serialised so that the address register does
71 * not need to be saved. This lock also serves to serialise access
72 * to the EEPROM and PHY access registers which are shared between
73 * these two devices.
74 */
75
76 /* The driver supports the original DM9000E, and now the two newer
77 * devices, DM9000A and DM9000B.
78 */
79
80 enum dm9000_type {
81 TYPE_DM9000E, /* original DM9000 */
82 TYPE_DM9000A,
83 TYPE_DM9000B
84 };
85
86 /* Structure/enum declaration ------------------------------- */
87 typedef struct board_info {
88
89 void __iomem *io_addr; /* Register I/O base address */
90 void __iomem *io_data; /* Data I/O address */
91 u16 irq; /* IRQ */
92
93 u16 tx_pkt_cnt;
94 u16 queue_pkt_len;
95 u16 queue_start_addr;
96 u16 queue_ip_summed;
97 u16 dbug_cnt;
98 u8 io_mode; /* 0:word, 2:byte */
99 u8 phy_addr;
100 u8 imr_all;
101
102 unsigned int flags;
103 unsigned int in_suspend :1;
104 unsigned int wake_supported :1;
105 int debug_level;
106
107 enum dm9000_type type;
108
109 void (*inblk)(void __iomem *port, void *data, int length);
110 void (*outblk)(void __iomem *port, void *data, int length);
111 void (*dumpblk)(void __iomem *port, int length);
112
113 struct device *dev; /* parent device */
114
115 struct resource *addr_res; /* resources found */
116 struct resource *data_res;
117 struct resource *addr_req; /* resources requested */
118 struct resource *data_req;
119 struct resource *irq_res;
120
121 int irq_wake;
122
123 struct mutex addr_lock; /* phy and eeprom access lock */
124
125 struct delayed_work phy_poll;
126 struct net_device *ndev;
127
128 spinlock_t lock;
129
130 struct mii_if_info mii;
131 u32 msg_enable;
132 u32 wake_state;
133
134 int rx_csum;
135 int can_csum;
136 int ip_summed;
137 } board_info_t;
138
139 /* debug code */
140
141 #define dm9000_dbg(db, lev, msg...) do { \
142 if ((lev) < CONFIG_DM9000_DEBUGLEVEL && \
143 (lev) < db->debug_level) { \
144 dev_dbg(db->dev, msg); \
145 } \
146 } while (0)
147
148 static inline board_info_t *to_dm9000_board(struct net_device *dev)
149 {
150 return netdev_priv(dev);
151 }
152
153 /* DM9000 network board routine ---------------------------- */
154
155 static void
156 dm9000_reset(board_info_t * db)
157 {
158 dev_dbg(db->dev, "resetting device\n");
159
160 /* RESET device */
161 writeb(DM9000_NCR, db->io_addr);
162 udelay(200);
163 writeb(NCR_RST, db->io_data);
164 udelay(200);
165 }
166
167 /*
168 * Read a byte from I/O port
169 */
170 static u8
171 ior(board_info_t * db, int reg)
172 {
173 writeb(reg, db->io_addr);
174 return readb(db->io_data);
175 }
176
177 /*
178 * Write a byte to I/O port
179 */
180
181 static void
182 iow(board_info_t * db, int reg, int value)
183 {
184 writeb(reg, db->io_addr);
185 writeb(value, db->io_data);
186 }
187
188 /* routines for sending block to chip */
189
190 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
191 {
192 writesb(reg, data, count);
193 }
194
195 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
196 {
197 writesw(reg, data, (count+1) >> 1);
198 }
199
200 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
201 {
202 writesl(reg, data, (count+3) >> 2);
203 }
204
205 /* input block from chip to memory */
206
207 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
208 {
209 readsb(reg, data, count);
210 }
211
212
213 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
214 {
215 readsw(reg, data, (count+1) >> 1);
216 }
217
218 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
219 {
220 readsl(reg, data, (count+3) >> 2);
221 }
222
223 /* dump block from chip to null */
224
225 static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
226 {
227 int i;
228 int tmp;
229
230 for (i = 0; i < count; i++)
231 tmp = readb(reg);
232 }
233
234 static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
235 {
236 int i;
237 int tmp;
238
239 count = (count + 1) >> 1;
240
241 for (i = 0; i < count; i++)
242 tmp = readw(reg);
243 }
244
245 static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
246 {
247 int i;
248 int tmp;
249
250 count = (count + 3) >> 2;
251
252 for (i = 0; i < count; i++)
253 tmp = readl(reg);
254 }
255
256 /* dm9000_set_io
257 *
258 * select the specified set of io routines to use with the
259 * device
260 */
261
262 static void dm9000_set_io(struct board_info *db, int byte_width)
263 {
264 /* use the size of the data resource to work out what IO
265 * routines we want to use
266 */
267
268 switch (byte_width) {
269 case 1:
270 db->dumpblk = dm9000_dumpblk_8bit;
271 db->outblk = dm9000_outblk_8bit;
272 db->inblk = dm9000_inblk_8bit;
273 break;
274
275
276 case 3:
277 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
278 case 2:
279 db->dumpblk = dm9000_dumpblk_16bit;
280 db->outblk = dm9000_outblk_16bit;
281 db->inblk = dm9000_inblk_16bit;
282 break;
283
284 case 4:
285 default:
286 db->dumpblk = dm9000_dumpblk_32bit;
287 db->outblk = dm9000_outblk_32bit;
288 db->inblk = dm9000_inblk_32bit;
289 break;
290 }
291 }
292
293 static void dm9000_schedule_poll(board_info_t *db)
294 {
295 if (db->type == TYPE_DM9000E)
296 schedule_delayed_work(&db->phy_poll, HZ * 2);
297 }
298
299 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
300 {
301 board_info_t *dm = to_dm9000_board(dev);
302
303 if (!netif_running(dev))
304 return -EINVAL;
305
306 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
307 }
308
309 static unsigned int
310 dm9000_read_locked(board_info_t *db, int reg)
311 {
312 unsigned long flags;
313 unsigned int ret;
314
315 spin_lock_irqsave(&db->lock, flags);
316 ret = ior(db, reg);
317 spin_unlock_irqrestore(&db->lock, flags);
318
319 return ret;
320 }
321
322 static int dm9000_wait_eeprom(board_info_t *db)
323 {
324 unsigned int status;
325 int timeout = 8; /* wait max 8msec */
326
327 /* The DM9000 data sheets say we should be able to
328 * poll the ERRE bit in EPCR to wait for the EEPROM
329 * operation. From testing several chips, this bit
330 * does not seem to work.
331 *
332 * We attempt to use the bit, but fall back to the
333 * timeout (which is why we do not return an error
334 * on expiry) to say that the EEPROM operation has
335 * completed.
336 */
337
338 while (1) {
339 status = dm9000_read_locked(db, DM9000_EPCR);
340
341 if ((status & EPCR_ERRE) == 0)
342 break;
343
344 msleep(1);
345
346 if (timeout-- < 0) {
347 dev_dbg(db->dev, "timeout waiting EEPROM\n");
348 break;
349 }
350 }
351
352 return 0;
353 }
354
355 /*
356 * Read a word data from EEPROM
357 */
358 static void
359 dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
360 {
361 unsigned long flags;
362
363 if (db->flags & DM9000_PLATF_NO_EEPROM) {
364 to[0] = 0xff;
365 to[1] = 0xff;
366 return;
367 }
368
369 mutex_lock(&db->addr_lock);
370
371 spin_lock_irqsave(&db->lock, flags);
372
373 iow(db, DM9000_EPAR, offset);
374 iow(db, DM9000_EPCR, EPCR_ERPRR);
375
376 spin_unlock_irqrestore(&db->lock, flags);
377
378 dm9000_wait_eeprom(db);
379
380 /* delay for at-least 150uS */
381 msleep(1);
382
383 spin_lock_irqsave(&db->lock, flags);
384
385 iow(db, DM9000_EPCR, 0x0);
386
387 to[0] = ior(db, DM9000_EPDRL);
388 to[1] = ior(db, DM9000_EPDRH);
389
390 spin_unlock_irqrestore(&db->lock, flags);
391
392 mutex_unlock(&db->addr_lock);
393 }
394
395 /*
396 * Write a word data to SROM
397 */
398 static void
399 dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
400 {
401 unsigned long flags;
402
403 if (db->flags & DM9000_PLATF_NO_EEPROM)
404 return;
405
406 mutex_lock(&db->addr_lock);
407
408 spin_lock_irqsave(&db->lock, flags);
409 iow(db, DM9000_EPAR, offset);
410 iow(db, DM9000_EPDRH, data[1]);
411 iow(db, DM9000_EPDRL, data[0]);
412 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
413 spin_unlock_irqrestore(&db->lock, flags);
414
415 dm9000_wait_eeprom(db);
416
417 mdelay(1); /* wait at least 150uS to clear */
418
419 spin_lock_irqsave(&db->lock, flags);
420 iow(db, DM9000_EPCR, 0);
421 spin_unlock_irqrestore(&db->lock, flags);
422
423 mutex_unlock(&db->addr_lock);
424 }
425
426 /* ethtool ops */
427
428 static void dm9000_get_drvinfo(struct net_device *dev,
429 struct ethtool_drvinfo *info)
430 {
431 board_info_t *dm = to_dm9000_board(dev);
432
433 strcpy(info->driver, CARDNAME);
434 strcpy(info->version, DRV_VERSION);
435 strcpy(info->bus_info, to_platform_device(dm->dev)->name);
436 }
437
438 static u32 dm9000_get_msglevel(struct net_device *dev)
439 {
440 board_info_t *dm = to_dm9000_board(dev);
441
442 return dm->msg_enable;
443 }
444
445 static void dm9000_set_msglevel(struct net_device *dev, u32 value)
446 {
447 board_info_t *dm = to_dm9000_board(dev);
448
449 dm->msg_enable = value;
450 }
451
452 static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
453 {
454 board_info_t *dm = to_dm9000_board(dev);
455
456 mii_ethtool_gset(&dm->mii, cmd);
457 return 0;
458 }
459
460 static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
461 {
462 board_info_t *dm = to_dm9000_board(dev);
463
464 return mii_ethtool_sset(&dm->mii, cmd);
465 }
466
467 static int dm9000_nway_reset(struct net_device *dev)
468 {
469 board_info_t *dm = to_dm9000_board(dev);
470 return mii_nway_restart(&dm->mii);
471 }
472
473 static uint32_t dm9000_get_rx_csum(struct net_device *dev)
474 {
475 board_info_t *dm = to_dm9000_board(dev);
476 return dm->rx_csum;
477 }
478
479 static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
480 {
481 board_info_t *dm = to_dm9000_board(dev);
482 unsigned long flags;
483
484 if (dm->can_csum) {
485 dm->rx_csum = data;
486
487 spin_lock_irqsave(&dm->lock, flags);
488 iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
489 spin_unlock_irqrestore(&dm->lock, flags);
490
491 return 0;
492 }
493
494 return -EOPNOTSUPP;
495 }
496
497 static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
498 {
499 board_info_t *dm = to_dm9000_board(dev);
500 int ret = -EOPNOTSUPP;
501
502 if (dm->can_csum)
503 ret = ethtool_op_set_tx_csum(dev, data);
504 return ret;
505 }
506
507 static u32 dm9000_get_link(struct net_device *dev)
508 {
509 board_info_t *dm = to_dm9000_board(dev);
510 u32 ret;
511
512 if (dm->flags & DM9000_PLATF_EXT_PHY)
513 ret = mii_link_ok(&dm->mii);
514 else
515 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
516
517 return ret;
518 }
519
520 #define DM_EEPROM_MAGIC (0x444D394B)
521
522 static int dm9000_get_eeprom_len(struct net_device *dev)
523 {
524 return 128;
525 }
526
527 static int dm9000_get_eeprom(struct net_device *dev,
528 struct ethtool_eeprom *ee, u8 *data)
529 {
530 board_info_t *dm = to_dm9000_board(dev);
531 int offset = ee->offset;
532 int len = ee->len;
533 int i;
534
535 /* EEPROM access is aligned to two bytes */
536
537 if ((len & 1) != 0 || (offset & 1) != 0)
538 return -EINVAL;
539
540 if (dm->flags & DM9000_PLATF_NO_EEPROM)
541 return -ENOENT;
542
543 ee->magic = DM_EEPROM_MAGIC;
544
545 for (i = 0; i < len; i += 2)
546 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
547
548 return 0;
549 }
550
551 static int dm9000_set_eeprom(struct net_device *dev,
552 struct ethtool_eeprom *ee, u8 *data)
553 {
554 board_info_t *dm = to_dm9000_board(dev);
555 int offset = ee->offset;
556 int len = ee->len;
557 int i;
558
559 /* EEPROM access is aligned to two bytes */
560
561 if ((len & 1) != 0 || (offset & 1) != 0)
562 return -EINVAL;
563
564 if (dm->flags & DM9000_PLATF_NO_EEPROM)
565 return -ENOENT;
566
567 if (ee->magic != DM_EEPROM_MAGIC)
568 return -EINVAL;
569
570 for (i = 0; i < len; i += 2)
571 dm9000_write_eeprom(dm, (offset + i) / 2, data + i);
572
573 return 0;
574 }
575
576 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
577 {
578 board_info_t *dm = to_dm9000_board(dev);
579
580 memset(w, 0, sizeof(struct ethtool_wolinfo));
581
582 /* note, we could probably support wake-phy too */
583 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
584 w->wolopts = dm->wake_state;
585 }
586
587 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
588 {
589 board_info_t *dm = to_dm9000_board(dev);
590 unsigned long flags;
591 u32 opts = w->wolopts;
592 u32 wcr = 0;
593
594 if (!dm->wake_supported)
595 return -EOPNOTSUPP;
596
597 if (opts & ~WAKE_MAGIC)
598 return -EINVAL;
599
600 if (opts & WAKE_MAGIC)
601 wcr |= WCR_MAGICEN;
602
603 mutex_lock(&dm->addr_lock);
604
605 spin_lock_irqsave(&dm->lock, flags);
606 iow(dm, DM9000_WCR, wcr);
607 spin_unlock_irqrestore(&dm->lock, flags);
608
609 mutex_unlock(&dm->addr_lock);
610
611 if (dm->wake_state != opts) {
612 /* change in wol state, update IRQ state */
613
614 if (!dm->wake_state)
615 set_irq_wake(dm->irq_wake, 1);
616 else if (dm->wake_state & !opts)
617 set_irq_wake(dm->irq_wake, 0);
618 }
619
620 dm->wake_state = opts;
621 return 0;
622 }
623
624 static const struct ethtool_ops dm9000_ethtool_ops = {
625 .get_drvinfo = dm9000_get_drvinfo,
626 .get_settings = dm9000_get_settings,
627 .set_settings = dm9000_set_settings,
628 .get_msglevel = dm9000_get_msglevel,
629 .set_msglevel = dm9000_set_msglevel,
630 .nway_reset = dm9000_nway_reset,
631 .get_link = dm9000_get_link,
632 .get_wol = dm9000_get_wol,
633 .set_wol = dm9000_set_wol,
634 .get_eeprom_len = dm9000_get_eeprom_len,
635 .get_eeprom = dm9000_get_eeprom,
636 .set_eeprom = dm9000_set_eeprom,
637 .get_rx_csum = dm9000_get_rx_csum,
638 .set_rx_csum = dm9000_set_rx_csum,
639 .get_tx_csum = ethtool_op_get_tx_csum,
640 .set_tx_csum = dm9000_set_tx_csum,
641 };
642
643 static void dm9000_show_carrier(board_info_t *db,
644 unsigned carrier, unsigned nsr)
645 {
646 struct net_device *ndev = db->ndev;
647 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
648
649 if (carrier)
650 dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
651 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
652 (ncr & NCR_FDX) ? "full" : "half");
653 else
654 dev_info(db->dev, "%s: link down\n", ndev->name);
655 }
656
657 static void
658 dm9000_poll_work(struct work_struct *w)
659 {
660 struct delayed_work *dw = to_delayed_work(w);
661 board_info_t *db = container_of(dw, board_info_t, phy_poll);
662 struct net_device *ndev = db->ndev;
663
664 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
665 !(db->flags & DM9000_PLATF_EXT_PHY)) {
666 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
667 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
668 unsigned new_carrier;
669
670 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
671
672 if (old_carrier != new_carrier) {
673 if (netif_msg_link(db))
674 dm9000_show_carrier(db, new_carrier, nsr);
675
676 if (!new_carrier)
677 netif_carrier_off(ndev);
678 else
679 netif_carrier_on(ndev);
680 }
681 } else
682 mii_check_media(&db->mii, netif_msg_link(db), 0);
683
684 if (netif_running(ndev))
685 dm9000_schedule_poll(db);
686 }
687
688 /* dm9000_release_board
689 *
690 * release a board, and any mapped resources
691 */
692
693 static void
694 dm9000_release_board(struct platform_device *pdev, struct board_info *db)
695 {
696 /* unmap our resources */
697
698 iounmap(db->io_addr);
699 iounmap(db->io_data);
700
701 /* release the resources */
702
703 release_resource(db->data_req);
704 kfree(db->data_req);
705
706 release_resource(db->addr_req);
707 kfree(db->addr_req);
708 }
709
710 static unsigned char dm9000_type_to_char(enum dm9000_type type)
711 {
712 switch (type) {
713 case TYPE_DM9000E: return 'e';
714 case TYPE_DM9000A: return 'a';
715 case TYPE_DM9000B: return 'b';
716 }
717
718 return '?';
719 }
720
721 /*
722 * Set DM9000 multicast address
723 */
724 static void
725 dm9000_hash_table(struct net_device *dev)
726 {
727 board_info_t *db = netdev_priv(dev);
728 struct netdev_hw_addr *ha;
729 int i, oft;
730 u32 hash_val;
731 u16 hash_table[4];
732 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
733 unsigned long flags;
734
735 dm9000_dbg(db, 1, "entering %s\n", __func__);
736
737 spin_lock_irqsave(&db->lock, flags);
738
739 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
740 iow(db, oft, dev->dev_addr[i]);
741
742 /* Clear Hash Table */
743 for (i = 0; i < 4; i++)
744 hash_table[i] = 0x0;
745
746 /* broadcast address */
747 hash_table[3] = 0x8000;
748
749 if (dev->flags & IFF_PROMISC)
750 rcr |= RCR_PRMSC;
751
752 if (dev->flags & IFF_ALLMULTI)
753 rcr |= RCR_ALL;
754
755 /* the multicast address in Hash Table : 64 bits */
756 netdev_for_each_mc_addr(ha, dev) {
757 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
758 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
759 }
760
761 /* Write the hash table to MAC MD table */
762 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
763 iow(db, oft++, hash_table[i]);
764 iow(db, oft++, hash_table[i] >> 8);
765 }
766
767 iow(db, DM9000_RCR, rcr);
768 spin_unlock_irqrestore(&db->lock, flags);
769 }
770
771 /*
772 * Initialize dm9000 board
773 */
774 static void
775 dm9000_init_dm9000(struct net_device *dev)
776 {
777 board_info_t *db = netdev_priv(dev);
778 unsigned int imr;
779 unsigned int ncr;
780
781 dm9000_dbg(db, 1, "entering %s\n", __func__);
782
783 /* I/O mode */
784 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
785
786 /* Checksum mode */
787 dm9000_set_rx_csum(dev, db->rx_csum);
788
789 /* GPIO0 on pre-activate PHY */
790 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
791 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
792 iow(db, DM9000_GPR, 0); /* Enable PHY */
793
794 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
795
796 /* if wol is needed, then always set NCR_WAKEEN otherwise we end
797 * up dumping the wake events if we disable this. There is already
798 * a wake-mask in DM9000_WCR */
799 if (db->wake_supported)
800 ncr |= NCR_WAKEEN;
801
802 iow(db, DM9000_NCR, ncr);
803
804 /* Program operating register */
805 iow(db, DM9000_TCR, 0); /* TX Polling clear */
806 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
807 iow(db, DM9000_FCR, 0xff); /* Flow Control */
808 iow(db, DM9000_SMCR, 0); /* Special Mode */
809 /* clear TX status */
810 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
811 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
812
813 /* Set address filter table */
814 dm9000_hash_table(dev);
815
816 imr = IMR_PAR | IMR_PTM | IMR_PRM;
817 if (db->type != TYPE_DM9000E)
818 imr |= IMR_LNKCHNG;
819
820 db->imr_all = imr;
821
822 /* Enable TX/RX interrupt mask */
823 iow(db, DM9000_IMR, imr);
824
825 /* Init Driver variable */
826 db->tx_pkt_cnt = 0;
827 db->queue_pkt_len = 0;
828 dev->trans_start = jiffies;
829 }
830
831 /* Our watchdog timed out. Called by the networking layer */
832 static void dm9000_timeout(struct net_device *dev)
833 {
834 board_info_t *db = netdev_priv(dev);
835 u8 reg_save;
836 unsigned long flags;
837
838 /* Save previous register address */
839 reg_save = readb(db->io_addr);
840 spin_lock_irqsave(&db->lock, flags);
841
842 netif_stop_queue(dev);
843 dm9000_reset(db);
844 dm9000_init_dm9000(dev);
845 /* We can accept TX packets again */
846 dev->trans_start = jiffies; /* prevent tx timeout */
847 netif_wake_queue(dev);
848
849 /* Restore previous register address */
850 writeb(reg_save, db->io_addr);
851 spin_unlock_irqrestore(&db->lock, flags);
852 }
853
854 static void dm9000_send_packet(struct net_device *dev,
855 int ip_summed,
856 u16 pkt_len)
857 {
858 board_info_t *dm = to_dm9000_board(dev);
859
860 /* The DM9000 is not smart enough to leave fragmented packets alone. */
861 if (dm->ip_summed != ip_summed) {
862 if (ip_summed == CHECKSUM_NONE)
863 iow(dm, DM9000_TCCR, 0);
864 else
865 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
866 dm->ip_summed = ip_summed;
867 }
868
869 /* Set TX length to DM9000 */
870 iow(dm, DM9000_TXPLL, pkt_len);
871 iow(dm, DM9000_TXPLH, pkt_len >> 8);
872
873 /* Issue TX polling command */
874 iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
875 }
876
877 /*
878 * Hardware start transmission.
879 * Send a packet to media from the upper layer.
880 */
881 static int
882 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
883 {
884 unsigned long flags;
885 board_info_t *db = netdev_priv(dev);
886
887 dm9000_dbg(db, 3, "%s:\n", __func__);
888
889 if (db->tx_pkt_cnt > 1)
890 return NETDEV_TX_BUSY;
891
892 spin_lock_irqsave(&db->lock, flags);
893
894 /* Move data to DM9000 TX RAM */
895 writeb(DM9000_MWCMD, db->io_addr);
896
897 (db->outblk)(db->io_data, skb->data, skb->len);
898 dev->stats.tx_bytes += skb->len;
899
900 db->tx_pkt_cnt++;
901 /* TX control: First packet immediately send, second packet queue */
902 if (db->tx_pkt_cnt == 1) {
903 dm9000_send_packet(dev, skb->ip_summed, skb->len);
904 } else {
905 /* Second packet */
906 db->queue_pkt_len = skb->len;
907 db->queue_ip_summed = skb->ip_summed;
908 netif_stop_queue(dev);
909 }
910
911 spin_unlock_irqrestore(&db->lock, flags);
912
913 /* free this SKB */
914 dev_kfree_skb(skb);
915
916 return NETDEV_TX_OK;
917 }
918
919 /*
920 * DM9000 interrupt handler
921 * receive the packet to upper layer, free the transmitted packet
922 */
923
924 static void dm9000_tx_done(struct net_device *dev, board_info_t *db)
925 {
926 int tx_status = ior(db, DM9000_NSR); /* Got TX status */
927
928 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
929 /* One packet sent complete */
930 db->tx_pkt_cnt--;
931 dev->stats.tx_packets++;
932
933 if (netif_msg_tx_done(db))
934 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
935
936 /* Queue packet check & send */
937 if (db->tx_pkt_cnt > 0)
938 dm9000_send_packet(dev, db->queue_ip_summed,
939 db->queue_pkt_len);
940 netif_wake_queue(dev);
941 }
942 }
943
944 struct dm9000_rxhdr {
945 u8 RxPktReady;
946 u8 RxStatus;
947 __le16 RxLen;
948 } __attribute__((__packed__));
949
950 /*
951 * Received a packet and pass to upper layer
952 */
953 static void
954 dm9000_rx(struct net_device *dev)
955 {
956 board_info_t *db = netdev_priv(dev);
957 struct dm9000_rxhdr rxhdr;
958 struct sk_buff *skb;
959 u8 rxbyte, *rdptr;
960 bool GoodPacket;
961 int RxLen;
962
963 /* Check packet ready or not */
964 do {
965 ior(db, DM9000_MRCMDX); /* Dummy read */
966
967 /* Get most updated data */
968 rxbyte = readb(db->io_data);
969
970 /* Status check: this byte must be 0 or 1 */
971 if (rxbyte & DM9000_PKT_ERR) {
972 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
973 iow(db, DM9000_RCR, 0x00); /* Stop Device */
974 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
975 return;
976 }
977
978 if (!(rxbyte & DM9000_PKT_RDY))
979 return;
980
981 /* A packet ready now & Get status/length */
982 GoodPacket = true;
983 writeb(DM9000_MRCMD, db->io_addr);
984
985 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
986
987 RxLen = le16_to_cpu(rxhdr.RxLen);
988
989 if (netif_msg_rx_status(db))
990 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
991 rxhdr.RxStatus, RxLen);
992
993 /* Packet Status check */
994 if (RxLen < 0x40) {
995 GoodPacket = false;
996 if (netif_msg_rx_err(db))
997 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
998 }
999
1000 if (RxLen > DM9000_PKT_MAX) {
1001 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1002 }
1003
1004 /* rxhdr.RxStatus is identical to RSR register. */
1005 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1006 RSR_PLE | RSR_RWTO |
1007 RSR_LCS | RSR_RF)) {
1008 GoodPacket = false;
1009 if (rxhdr.RxStatus & RSR_FOE) {
1010 if (netif_msg_rx_err(db))
1011 dev_dbg(db->dev, "fifo error\n");
1012 dev->stats.rx_fifo_errors++;
1013 }
1014 if (rxhdr.RxStatus & RSR_CE) {
1015 if (netif_msg_rx_err(db))
1016 dev_dbg(db->dev, "crc error\n");
1017 dev->stats.rx_crc_errors++;
1018 }
1019 if (rxhdr.RxStatus & RSR_RF) {
1020 if (netif_msg_rx_err(db))
1021 dev_dbg(db->dev, "length error\n");
1022 dev->stats.rx_length_errors++;
1023 }
1024 }
1025
1026 /* Move data from DM9000 */
1027 if (GoodPacket &&
1028 ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
1029 skb_reserve(skb, 2);
1030 rdptr = (u8 *) skb_put(skb, RxLen - 4);
1031
1032 /* Read received packet from RX SRAM */
1033
1034 (db->inblk)(db->io_data, rdptr, RxLen);
1035 dev->stats.rx_bytes += RxLen;
1036
1037 /* Pass to upper layer */
1038 skb->protocol = eth_type_trans(skb, dev);
1039 if (db->rx_csum) {
1040 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1041 skb->ip_summed = CHECKSUM_UNNECESSARY;
1042 else
1043 skb->ip_summed = CHECKSUM_NONE;
1044 }
1045 netif_rx(skb);
1046 dev->stats.rx_packets++;
1047
1048 } else {
1049 /* need to dump the packet's data */
1050
1051 (db->dumpblk)(db->io_data, RxLen);
1052 }
1053 } while (rxbyte & DM9000_PKT_RDY);
1054 }
1055
1056 static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1057 {
1058 struct net_device *dev = dev_id;
1059 board_info_t *db = netdev_priv(dev);
1060 int int_status;
1061 unsigned long flags;
1062 u8 reg_save;
1063
1064 dm9000_dbg(db, 3, "entering %s\n", __func__);
1065
1066 /* A real interrupt coming */
1067
1068 /* holders of db->lock must always block IRQs */
1069 spin_lock_irqsave(&db->lock, flags);
1070
1071 /* Save previous register address */
1072 reg_save = readb(db->io_addr);
1073
1074 /* Disable all interrupts */
1075 iow(db, DM9000_IMR, IMR_PAR);
1076
1077 /* Got DM9000 interrupt status */
1078 int_status = ior(db, DM9000_ISR); /* Got ISR */
1079 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
1080
1081 if (netif_msg_intr(db))
1082 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1083
1084 /* Received the coming packet */
1085 if (int_status & ISR_PRS)
1086 dm9000_rx(dev);
1087
1088 /* Trnasmit Interrupt check */
1089 if (int_status & ISR_PTS)
1090 dm9000_tx_done(dev, db);
1091
1092 if (db->type != TYPE_DM9000E) {
1093 if (int_status & ISR_LNKCHNG) {
1094 /* fire a link-change request */
1095 schedule_delayed_work(&db->phy_poll, 1);
1096 }
1097 }
1098
1099 /* Re-enable interrupt mask */
1100 iow(db, DM9000_IMR, db->imr_all);
1101
1102 /* Restore previous register address */
1103 writeb(reg_save, db->io_addr);
1104
1105 spin_unlock_irqrestore(&db->lock, flags);
1106
1107 return IRQ_HANDLED;
1108 }
1109
1110 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1111 {
1112 struct net_device *dev = dev_id;
1113 board_info_t *db = netdev_priv(dev);
1114 unsigned long flags;
1115 unsigned nsr, wcr;
1116
1117 spin_lock_irqsave(&db->lock, flags);
1118
1119 nsr = ior(db, DM9000_NSR);
1120 wcr = ior(db, DM9000_WCR);
1121
1122 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1123
1124 if (nsr & NSR_WAKEST) {
1125 /* clear, so we can avoid */
1126 iow(db, DM9000_NSR, NSR_WAKEST);
1127
1128 if (wcr & WCR_LINKST)
1129 dev_info(db->dev, "wake by link status change\n");
1130 if (wcr & WCR_SAMPLEST)
1131 dev_info(db->dev, "wake by sample packet\n");
1132 if (wcr & WCR_MAGICST )
1133 dev_info(db->dev, "wake by magic packet\n");
1134 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1135 dev_err(db->dev, "wake signalled with no reason? "
1136 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1137
1138 }
1139
1140 spin_unlock_irqrestore(&db->lock, flags);
1141
1142 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1143 }
1144
1145 #ifdef CONFIG_NET_POLL_CONTROLLER
1146 /*
1147 *Used by netconsole
1148 */
1149 static void dm9000_poll_controller(struct net_device *dev)
1150 {
1151 disable_irq(dev->irq);
1152 dm9000_interrupt(dev->irq, dev);
1153 enable_irq(dev->irq);
1154 }
1155 #endif
1156
1157 /*
1158 * Open the interface.
1159 * The interface is opened whenever "ifconfig" actives it.
1160 */
1161 static int
1162 dm9000_open(struct net_device *dev)
1163 {
1164 board_info_t *db = netdev_priv(dev);
1165 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1166
1167 if (netif_msg_ifup(db))
1168 dev_dbg(db->dev, "enabling %s\n", dev->name);
1169
1170 /* If there is no IRQ type specified, default to something that
1171 * may work, and tell the user that this is a problem */
1172
1173 if (irqflags == IRQF_TRIGGER_NONE)
1174 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1175
1176 irqflags |= IRQF_SHARED;
1177
1178 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1179 return -EAGAIN;
1180
1181 /* Initialize DM9000 board */
1182 dm9000_reset(db);
1183 dm9000_init_dm9000(dev);
1184
1185 /* Init driver variable */
1186 db->dbug_cnt = 0;
1187
1188 mii_check_media(&db->mii, netif_msg_link(db), 1);
1189 netif_start_queue(dev);
1190
1191 dm9000_schedule_poll(db);
1192
1193 return 0;
1194 }
1195
1196 /*
1197 * Sleep, either by using msleep() or if we are suspending, then
1198 * use mdelay() to sleep.
1199 */
1200 static void dm9000_msleep(board_info_t *db, unsigned int ms)
1201 {
1202 if (db->in_suspend)
1203 mdelay(ms);
1204 else
1205 msleep(ms);
1206 }
1207
1208 /*
1209 * Read a word from phyxcer
1210 */
1211 static int
1212 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1213 {
1214 board_info_t *db = netdev_priv(dev);
1215 unsigned long flags;
1216 unsigned int reg_save;
1217 int ret;
1218
1219 mutex_lock(&db->addr_lock);
1220
1221 spin_lock_irqsave(&db->lock,flags);
1222
1223 /* Save previous register address */
1224 reg_save = readb(db->io_addr);
1225
1226 /* Fill the phyxcer register into REG_0C */
1227 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1228
1229 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
1230
1231 writeb(reg_save, db->io_addr);
1232 spin_unlock_irqrestore(&db->lock,flags);
1233
1234 dm9000_msleep(db, 1); /* Wait read complete */
1235
1236 spin_lock_irqsave(&db->lock,flags);
1237 reg_save = readb(db->io_addr);
1238
1239 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
1240
1241 /* The read data keeps on REG_0D & REG_0E */
1242 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
1243
1244 /* restore the previous address */
1245 writeb(reg_save, db->io_addr);
1246 spin_unlock_irqrestore(&db->lock,flags);
1247
1248 mutex_unlock(&db->addr_lock);
1249
1250 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
1251 return ret;
1252 }
1253
1254 /*
1255 * Write a word to phyxcer
1256 */
1257 static void
1258 dm9000_phy_write(struct net_device *dev,
1259 int phyaddr_unused, int reg, int value)
1260 {
1261 board_info_t *db = netdev_priv(dev);
1262 unsigned long flags;
1263 unsigned long reg_save;
1264
1265 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
1266 mutex_lock(&db->addr_lock);
1267
1268 spin_lock_irqsave(&db->lock,flags);
1269
1270 /* Save previous register address */
1271 reg_save = readb(db->io_addr);
1272
1273 /* Fill the phyxcer register into REG_0C */
1274 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1275
1276 /* Fill the written data into REG_0D & REG_0E */
1277 iow(db, DM9000_EPDRL, value);
1278 iow(db, DM9000_EPDRH, value >> 8);
1279
1280 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
1281
1282 writeb(reg_save, db->io_addr);
1283 spin_unlock_irqrestore(&db->lock, flags);
1284
1285 dm9000_msleep(db, 1); /* Wait write complete */
1286
1287 spin_lock_irqsave(&db->lock,flags);
1288 reg_save = readb(db->io_addr);
1289
1290 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
1291
1292 /* restore the previous address */
1293 writeb(reg_save, db->io_addr);
1294
1295 spin_unlock_irqrestore(&db->lock, flags);
1296 mutex_unlock(&db->addr_lock);
1297 }
1298
1299 static void
1300 dm9000_shutdown(struct net_device *dev)
1301 {
1302 board_info_t *db = netdev_priv(dev);
1303
1304 /* RESET device */
1305 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
1306 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
1307 iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */
1308 iow(db, DM9000_RCR, 0x00); /* Disable RX */
1309 }
1310
1311 /*
1312 * Stop the interface.
1313 * The interface is stopped when it is brought.
1314 */
1315 static int
1316 dm9000_stop(struct net_device *ndev)
1317 {
1318 board_info_t *db = netdev_priv(ndev);
1319
1320 if (netif_msg_ifdown(db))
1321 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1322
1323 cancel_delayed_work_sync(&db->phy_poll);
1324
1325 netif_stop_queue(ndev);
1326 netif_carrier_off(ndev);
1327
1328 /* free interrupt */
1329 free_irq(ndev->irq, ndev);
1330
1331 dm9000_shutdown(ndev);
1332
1333 return 0;
1334 }
1335
1336 static const struct net_device_ops dm9000_netdev_ops = {
1337 .ndo_open = dm9000_open,
1338 .ndo_stop = dm9000_stop,
1339 .ndo_start_xmit = dm9000_start_xmit,
1340 .ndo_tx_timeout = dm9000_timeout,
1341 .ndo_set_multicast_list = dm9000_hash_table,
1342 .ndo_do_ioctl = dm9000_ioctl,
1343 .ndo_change_mtu = eth_change_mtu,
1344 .ndo_validate_addr = eth_validate_addr,
1345 .ndo_set_mac_address = eth_mac_addr,
1346 #ifdef CONFIG_NET_POLL_CONTROLLER
1347 .ndo_poll_controller = dm9000_poll_controller,
1348 #endif
1349 };
1350
1351 /*
1352 * Search DM9000 board, allocate space and register it
1353 */
1354 static int __devinit
1355 dm9000_probe(struct platform_device *pdev)
1356 {
1357 struct dm9000_plat_data *pdata = pdev->dev.platform_data;
1358 struct board_info *db; /* Point a board information structure */
1359 struct net_device *ndev;
1360 const unsigned char *mac_src;
1361 int ret = 0;
1362 int iosize;
1363 int i;
1364 u32 id_val;
1365
1366 /* Init network device */
1367 ndev = alloc_etherdev(sizeof(struct board_info));
1368 if (!ndev) {
1369 dev_err(&pdev->dev, "could not allocate device.\n");
1370 return -ENOMEM;
1371 }
1372
1373 SET_NETDEV_DEV(ndev, &pdev->dev);
1374
1375 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1376
1377 /* setup board info structure */
1378 db = netdev_priv(ndev);
1379
1380 db->dev = &pdev->dev;
1381 db->ndev = ndev;
1382
1383 spin_lock_init(&db->lock);
1384 mutex_init(&db->addr_lock);
1385
1386 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1387
1388 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1389 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1390 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1391
1392 if (db->addr_res == NULL || db->data_res == NULL ||
1393 db->irq_res == NULL) {
1394 dev_err(db->dev, "insufficient resources\n");
1395 ret = -ENOENT;
1396 goto out;
1397 }
1398
1399 db->irq_wake = platform_get_irq(pdev, 1);
1400 if (db->irq_wake >= 0) {
1401 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1402
1403 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1404 IRQF_SHARED, dev_name(db->dev), ndev);
1405 if (ret) {
1406 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1407 } else {
1408
1409 /* test to see if irq is really wakeup capable */
1410 ret = set_irq_wake(db->irq_wake, 1);
1411 if (ret) {
1412 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1413 db->irq_wake, ret);
1414 ret = 0;
1415 } else {
1416 set_irq_wake(db->irq_wake, 0);
1417 db->wake_supported = 1;
1418 }
1419 }
1420 }
1421
1422 iosize = resource_size(db->addr_res);
1423 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1424 pdev->name);
1425
1426 if (db->addr_req == NULL) {
1427 dev_err(db->dev, "cannot claim address reg area\n");
1428 ret = -EIO;
1429 goto out;
1430 }
1431
1432 db->io_addr = ioremap(db->addr_res->start, iosize);
1433
1434 if (db->io_addr == NULL) {
1435 dev_err(db->dev, "failed to ioremap address reg\n");
1436 ret = -EINVAL;
1437 goto out;
1438 }
1439
1440 iosize = resource_size(db->data_res);
1441 db->data_req = request_mem_region(db->data_res->start, iosize,
1442 pdev->name);
1443
1444 if (db->data_req == NULL) {
1445 dev_err(db->dev, "cannot claim data reg area\n");
1446 ret = -EIO;
1447 goto out;
1448 }
1449
1450 db->io_data = ioremap(db->data_res->start, iosize);
1451
1452 if (db->io_data == NULL) {
1453 dev_err(db->dev, "failed to ioremap data reg\n");
1454 ret = -EINVAL;
1455 goto out;
1456 }
1457
1458 /* fill in parameters for net-dev structure */
1459 ndev->base_addr = (unsigned long)db->io_addr;
1460 ndev->irq = db->irq_res->start;
1461
1462 /* ensure at least we have a default set of IO routines */
1463 dm9000_set_io(db, iosize);
1464
1465 /* check to see if anything is being over-ridden */
1466 if (pdata != NULL) {
1467 /* check to see if the driver wants to over-ride the
1468 * default IO width */
1469
1470 if (pdata->flags & DM9000_PLATF_8BITONLY)
1471 dm9000_set_io(db, 1);
1472
1473 if (pdata->flags & DM9000_PLATF_16BITONLY)
1474 dm9000_set_io(db, 2);
1475
1476 if (pdata->flags & DM9000_PLATF_32BITONLY)
1477 dm9000_set_io(db, 4);
1478
1479 /* check to see if there are any IO routine
1480 * over-rides */
1481
1482 if (pdata->inblk != NULL)
1483 db->inblk = pdata->inblk;
1484
1485 if (pdata->outblk != NULL)
1486 db->outblk = pdata->outblk;
1487
1488 if (pdata->dumpblk != NULL)
1489 db->dumpblk = pdata->dumpblk;
1490
1491 db->flags = pdata->flags;
1492 }
1493
1494 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1495 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1496 #endif
1497
1498 dm9000_reset(db);
1499
1500 /* try multiple times, DM9000 sometimes gets the read wrong */
1501 for (i = 0; i < 8; i++) {
1502 id_val = ior(db, DM9000_VIDL);
1503 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1504 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1505 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1506
1507 if (id_val == DM9000_ID)
1508 break;
1509 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1510 }
1511
1512 if (id_val != DM9000_ID) {
1513 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1514 ret = -ENODEV;
1515 goto out;
1516 }
1517
1518 /* Identify what type of DM9000 we are working on */
1519
1520 id_val = ior(db, DM9000_CHIPR);
1521 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1522
1523 switch (id_val) {
1524 case CHIPR_DM9000A:
1525 db->type = TYPE_DM9000A;
1526 break;
1527 case CHIPR_DM9000B:
1528 db->type = TYPE_DM9000B;
1529 break;
1530 default:
1531 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1532 db->type = TYPE_DM9000E;
1533 }
1534
1535 /* dm9000a/b are capable of hardware checksum offload */
1536 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1537 db->can_csum = 1;
1538 db->rx_csum = 1;
1539 ndev->features |= NETIF_F_IP_CSUM;
1540 }
1541
1542 /* from this point we assume that we have found a DM9000 */
1543
1544 /* driver system function */
1545 ether_setup(ndev);
1546
1547 ndev->netdev_ops = &dm9000_netdev_ops;
1548 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1549 ndev->ethtool_ops = &dm9000_ethtool_ops;
1550
1551 db->msg_enable = NETIF_MSG_LINK;
1552 db->mii.phy_id_mask = 0x1f;
1553 db->mii.reg_num_mask = 0x1f;
1554 db->mii.force_media = 0;
1555 db->mii.full_duplex = 0;
1556 db->mii.dev = ndev;
1557 db->mii.mdio_read = dm9000_phy_read;
1558 db->mii.mdio_write = dm9000_phy_write;
1559
1560 mac_src = "eeprom";
1561
1562 /* try reading the node address from the attached EEPROM */
1563 for (i = 0; i < 6; i += 2)
1564 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1565
1566 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1567 mac_src = "platform data";
1568 memcpy(ndev->dev_addr, pdata->dev_addr, 6);
1569 }
1570
1571 if (!is_valid_ether_addr(ndev->dev_addr)) {
1572 /* try reading from mac */
1573
1574 mac_src = "chip";
1575 for (i = 0; i < 6; i++)
1576 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1577 }
1578
1579 if (!is_valid_ether_addr(ndev->dev_addr))
1580 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1581 "set using ifconfig\n", ndev->name);
1582
1583 platform_set_drvdata(pdev, ndev);
1584 ret = register_netdev(ndev);
1585
1586 if (ret == 0)
1587 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1588 ndev->name, dm9000_type_to_char(db->type),
1589 db->io_addr, db->io_data, ndev->irq,
1590 ndev->dev_addr, mac_src);
1591 return 0;
1592
1593 out:
1594 dev_err(db->dev, "not found (%d).\n", ret);
1595
1596 dm9000_release_board(pdev, db);
1597 free_netdev(ndev);
1598
1599 return ret;
1600 }
1601
1602 static int
1603 dm9000_drv_suspend(struct device *dev)
1604 {
1605 struct platform_device *pdev = to_platform_device(dev);
1606 struct net_device *ndev = platform_get_drvdata(pdev);
1607 board_info_t *db;
1608
1609 if (ndev) {
1610 db = netdev_priv(ndev);
1611 db->in_suspend = 1;
1612
1613 if (!netif_running(ndev))
1614 return 0;
1615
1616 netif_device_detach(ndev);
1617
1618 /* only shutdown if not using WoL */
1619 if (!db->wake_state)
1620 dm9000_shutdown(ndev);
1621 }
1622 return 0;
1623 }
1624
1625 static int
1626 dm9000_drv_resume(struct device *dev)
1627 {
1628 struct platform_device *pdev = to_platform_device(dev);
1629 struct net_device *ndev = platform_get_drvdata(pdev);
1630 board_info_t *db = netdev_priv(ndev);
1631
1632 if (ndev) {
1633 if (netif_running(ndev)) {
1634 /* reset if we were not in wake mode to ensure if
1635 * the device was powered off it is in a known state */
1636 if (!db->wake_state) {
1637 dm9000_reset(db);
1638 dm9000_init_dm9000(ndev);
1639 }
1640
1641 netif_device_attach(ndev);
1642 }
1643
1644 db->in_suspend = 0;
1645 }
1646 return 0;
1647 }
1648
1649 static const struct dev_pm_ops dm9000_drv_pm_ops = {
1650 .suspend = dm9000_drv_suspend,
1651 .resume = dm9000_drv_resume,
1652 };
1653
1654 static int __devexit
1655 dm9000_drv_remove(struct platform_device *pdev)
1656 {
1657 struct net_device *ndev = platform_get_drvdata(pdev);
1658
1659 platform_set_drvdata(pdev, NULL);
1660
1661 unregister_netdev(ndev);
1662 dm9000_release_board(pdev, (board_info_t *) netdev_priv(ndev));
1663 free_netdev(ndev); /* free device structure */
1664
1665 dev_dbg(&pdev->dev, "released and freed device\n");
1666 return 0;
1667 }
1668
1669 static struct platform_driver dm9000_driver = {
1670 .driver = {
1671 .name = "dm9000",
1672 .owner = THIS_MODULE,
1673 .pm = &dm9000_drv_pm_ops,
1674 },
1675 .probe = dm9000_probe,
1676 .remove = __devexit_p(dm9000_drv_remove),
1677 };
1678
1679 static int __init
1680 dm9000_init(void)
1681 {
1682 printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
1683
1684 return platform_driver_register(&dm9000_driver);
1685 }
1686
1687 static void __exit
1688 dm9000_cleanup(void)
1689 {
1690 platform_driver_unregister(&dm9000_driver);
1691 }
1692
1693 module_init(dm9000_init);
1694 module_exit(dm9000_cleanup);
1695
1696 MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1697 MODULE_DESCRIPTION("Davicom DM9000 network driver");
1698 MODULE_LICENSE("GPL");
1699 MODULE_ALIAS("platform:dm9000");
This page took 0.066065 seconds and 5 git commands to generate.