Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / drivers / net / dm9000.c
1 /*
2 * Davicom DM9000 Fast Ethernet driver for Linux.
3 * Copyright (C) 1997 Sten Wang
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
16 *
17 * Additional updates, Copyright:
18 * Ben Dooks <ben@simtec.co.uk>
19 * Sascha Hauer <s.hauer@pengutronix.de>
20 */
21
22 #include <linux/module.h>
23 #include <linux/ioport.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/skbuff.h>
29 #include <linux/spinlock.h>
30 #include <linux/crc32.h>
31 #include <linux/mii.h>
32 #include <linux/ethtool.h>
33 #include <linux/dm9000.h>
34 #include <linux/delay.h>
35 #include <linux/platform_device.h>
36 #include <linux/irq.h>
37 #include <linux/slab.h>
38
39 #include <asm/delay.h>
40 #include <asm/irq.h>
41 #include <asm/io.h>
42
43 #include "dm9000.h"
44
45 /* Board/System/Debug information/definition ---------------- */
46
47 #define DM9000_PHY 0x40 /* PHY address 0x01 */
48
49 #define CARDNAME "dm9000"
50 #define DRV_VERSION "1.31"
51
52 /*
53 * Transmit timeout, default 5 seconds.
54 */
55 static int watchdog = 5000;
56 module_param(watchdog, int, 0400);
57 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
58
59 /* DM9000 register address locking.
60 *
61 * The DM9000 uses an address register to control where data written
62 * to the data register goes. This means that the address register
63 * must be preserved over interrupts or similar calls.
64 *
65 * During interrupt and other critical calls, a spinlock is used to
66 * protect the system, but the calls themselves save the address
67 * in the address register in case they are interrupting another
68 * access to the device.
69 *
70 * For general accesses a lock is provided so that calls which are
71 * allowed to sleep are serialised so that the address register does
72 * not need to be saved. This lock also serves to serialise access
73 * to the EEPROM and PHY access registers which are shared between
74 * these two devices.
75 */
76
77 /* The driver supports the original DM9000E, and now the two newer
78 * devices, DM9000A and DM9000B.
79 */
80
81 enum dm9000_type {
82 TYPE_DM9000E, /* original DM9000 */
83 TYPE_DM9000A,
84 TYPE_DM9000B
85 };
86
87 /* Structure/enum declaration ------------------------------- */
88 typedef struct board_info {
89
90 void __iomem *io_addr; /* Register I/O base address */
91 void __iomem *io_data; /* Data I/O address */
92 u16 irq; /* IRQ */
93
94 u16 tx_pkt_cnt;
95 u16 queue_pkt_len;
96 u16 queue_start_addr;
97 u16 queue_ip_summed;
98 u16 dbug_cnt;
99 u8 io_mode; /* 0:word, 2:byte */
100 u8 phy_addr;
101 u8 imr_all;
102
103 unsigned int flags;
104 unsigned int in_suspend :1;
105 unsigned int wake_supported :1;
106 int debug_level;
107
108 enum dm9000_type type;
109
110 void (*inblk)(void __iomem *port, void *data, int length);
111 void (*outblk)(void __iomem *port, void *data, int length);
112 void (*dumpblk)(void __iomem *port, int length);
113
114 struct device *dev; /* parent device */
115
116 struct resource *addr_res; /* resources found */
117 struct resource *data_res;
118 struct resource *addr_req; /* resources requested */
119 struct resource *data_req;
120 struct resource *irq_res;
121
122 int irq_wake;
123
124 struct mutex addr_lock; /* phy and eeprom access lock */
125
126 struct delayed_work phy_poll;
127 struct net_device *ndev;
128
129 spinlock_t lock;
130
131 struct mii_if_info mii;
132 u32 msg_enable;
133 u32 wake_state;
134
135 int ip_summed;
136 } board_info_t;
137
138 /* debug code */
139
140 #define dm9000_dbg(db, lev, msg...) do { \
141 if ((lev) < CONFIG_DM9000_DEBUGLEVEL && \
142 (lev) < db->debug_level) { \
143 dev_dbg(db->dev, msg); \
144 } \
145 } while (0)
146
147 static inline board_info_t *to_dm9000_board(struct net_device *dev)
148 {
149 return netdev_priv(dev);
150 }
151
152 /* DM9000 network board routine ---------------------------- */
153
154 static void
155 dm9000_reset(board_info_t * db)
156 {
157 dev_dbg(db->dev, "resetting device\n");
158
159 /* RESET device */
160 writeb(DM9000_NCR, db->io_addr);
161 udelay(200);
162 writeb(NCR_RST, db->io_data);
163 udelay(200);
164 }
165
166 /*
167 * Read a byte from I/O port
168 */
169 static u8
170 ior(board_info_t * db, int reg)
171 {
172 writeb(reg, db->io_addr);
173 return readb(db->io_data);
174 }
175
176 /*
177 * Write a byte to I/O port
178 */
179
180 static void
181 iow(board_info_t * db, int reg, int value)
182 {
183 writeb(reg, db->io_addr);
184 writeb(value, db->io_data);
185 }
186
187 /* routines for sending block to chip */
188
189 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
190 {
191 writesb(reg, data, count);
192 }
193
194 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
195 {
196 writesw(reg, data, (count+1) >> 1);
197 }
198
199 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
200 {
201 writesl(reg, data, (count+3) >> 2);
202 }
203
204 /* input block from chip to memory */
205
206 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
207 {
208 readsb(reg, data, count);
209 }
210
211
212 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
213 {
214 readsw(reg, data, (count+1) >> 1);
215 }
216
217 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
218 {
219 readsl(reg, data, (count+3) >> 2);
220 }
221
222 /* dump block from chip to null */
223
224 static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
225 {
226 int i;
227 int tmp;
228
229 for (i = 0; i < count; i++)
230 tmp = readb(reg);
231 }
232
233 static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
234 {
235 int i;
236 int tmp;
237
238 count = (count + 1) >> 1;
239
240 for (i = 0; i < count; i++)
241 tmp = readw(reg);
242 }
243
244 static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
245 {
246 int i;
247 int tmp;
248
249 count = (count + 3) >> 2;
250
251 for (i = 0; i < count; i++)
252 tmp = readl(reg);
253 }
254
255 /* dm9000_set_io
256 *
257 * select the specified set of io routines to use with the
258 * device
259 */
260
261 static void dm9000_set_io(struct board_info *db, int byte_width)
262 {
263 /* use the size of the data resource to work out what IO
264 * routines we want to use
265 */
266
267 switch (byte_width) {
268 case 1:
269 db->dumpblk = dm9000_dumpblk_8bit;
270 db->outblk = dm9000_outblk_8bit;
271 db->inblk = dm9000_inblk_8bit;
272 break;
273
274
275 case 3:
276 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
277 case 2:
278 db->dumpblk = dm9000_dumpblk_16bit;
279 db->outblk = dm9000_outblk_16bit;
280 db->inblk = dm9000_inblk_16bit;
281 break;
282
283 case 4:
284 default:
285 db->dumpblk = dm9000_dumpblk_32bit;
286 db->outblk = dm9000_outblk_32bit;
287 db->inblk = dm9000_inblk_32bit;
288 break;
289 }
290 }
291
292 static void dm9000_schedule_poll(board_info_t *db)
293 {
294 if (db->type == TYPE_DM9000E)
295 schedule_delayed_work(&db->phy_poll, HZ * 2);
296 }
297
298 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
299 {
300 board_info_t *dm = to_dm9000_board(dev);
301
302 if (!netif_running(dev))
303 return -EINVAL;
304
305 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
306 }
307
308 static unsigned int
309 dm9000_read_locked(board_info_t *db, int reg)
310 {
311 unsigned long flags;
312 unsigned int ret;
313
314 spin_lock_irqsave(&db->lock, flags);
315 ret = ior(db, reg);
316 spin_unlock_irqrestore(&db->lock, flags);
317
318 return ret;
319 }
320
321 static int dm9000_wait_eeprom(board_info_t *db)
322 {
323 unsigned int status;
324 int timeout = 8; /* wait max 8msec */
325
326 /* The DM9000 data sheets say we should be able to
327 * poll the ERRE bit in EPCR to wait for the EEPROM
328 * operation. From testing several chips, this bit
329 * does not seem to work.
330 *
331 * We attempt to use the bit, but fall back to the
332 * timeout (which is why we do not return an error
333 * on expiry) to say that the EEPROM operation has
334 * completed.
335 */
336
337 while (1) {
338 status = dm9000_read_locked(db, DM9000_EPCR);
339
340 if ((status & EPCR_ERRE) == 0)
341 break;
342
343 msleep(1);
344
345 if (timeout-- < 0) {
346 dev_dbg(db->dev, "timeout waiting EEPROM\n");
347 break;
348 }
349 }
350
351 return 0;
352 }
353
354 /*
355 * Read a word data from EEPROM
356 */
357 static void
358 dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
359 {
360 unsigned long flags;
361
362 if (db->flags & DM9000_PLATF_NO_EEPROM) {
363 to[0] = 0xff;
364 to[1] = 0xff;
365 return;
366 }
367
368 mutex_lock(&db->addr_lock);
369
370 spin_lock_irqsave(&db->lock, flags);
371
372 iow(db, DM9000_EPAR, offset);
373 iow(db, DM9000_EPCR, EPCR_ERPRR);
374
375 spin_unlock_irqrestore(&db->lock, flags);
376
377 dm9000_wait_eeprom(db);
378
379 /* delay for at-least 150uS */
380 msleep(1);
381
382 spin_lock_irqsave(&db->lock, flags);
383
384 iow(db, DM9000_EPCR, 0x0);
385
386 to[0] = ior(db, DM9000_EPDRL);
387 to[1] = ior(db, DM9000_EPDRH);
388
389 spin_unlock_irqrestore(&db->lock, flags);
390
391 mutex_unlock(&db->addr_lock);
392 }
393
394 /*
395 * Write a word data to SROM
396 */
397 static void
398 dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
399 {
400 unsigned long flags;
401
402 if (db->flags & DM9000_PLATF_NO_EEPROM)
403 return;
404
405 mutex_lock(&db->addr_lock);
406
407 spin_lock_irqsave(&db->lock, flags);
408 iow(db, DM9000_EPAR, offset);
409 iow(db, DM9000_EPDRH, data[1]);
410 iow(db, DM9000_EPDRL, data[0]);
411 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
412 spin_unlock_irqrestore(&db->lock, flags);
413
414 dm9000_wait_eeprom(db);
415
416 mdelay(1); /* wait at least 150uS to clear */
417
418 spin_lock_irqsave(&db->lock, flags);
419 iow(db, DM9000_EPCR, 0);
420 spin_unlock_irqrestore(&db->lock, flags);
421
422 mutex_unlock(&db->addr_lock);
423 }
424
425 /* ethtool ops */
426
427 static void dm9000_get_drvinfo(struct net_device *dev,
428 struct ethtool_drvinfo *info)
429 {
430 board_info_t *dm = to_dm9000_board(dev);
431
432 strcpy(info->driver, CARDNAME);
433 strcpy(info->version, DRV_VERSION);
434 strcpy(info->bus_info, to_platform_device(dm->dev)->name);
435 }
436
437 static u32 dm9000_get_msglevel(struct net_device *dev)
438 {
439 board_info_t *dm = to_dm9000_board(dev);
440
441 return dm->msg_enable;
442 }
443
444 static void dm9000_set_msglevel(struct net_device *dev, u32 value)
445 {
446 board_info_t *dm = to_dm9000_board(dev);
447
448 dm->msg_enable = value;
449 }
450
451 static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
452 {
453 board_info_t *dm = to_dm9000_board(dev);
454
455 mii_ethtool_gset(&dm->mii, cmd);
456 return 0;
457 }
458
459 static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
460 {
461 board_info_t *dm = to_dm9000_board(dev);
462
463 return mii_ethtool_sset(&dm->mii, cmd);
464 }
465
466 static int dm9000_nway_reset(struct net_device *dev)
467 {
468 board_info_t *dm = to_dm9000_board(dev);
469 return mii_nway_restart(&dm->mii);
470 }
471
472 static int dm9000_set_features(struct net_device *dev, u32 features)
473 {
474 board_info_t *dm = to_dm9000_board(dev);
475 u32 changed = dev->features ^ features;
476 unsigned long flags;
477
478 if (!(changed & NETIF_F_RXCSUM))
479 return 0;
480
481 spin_lock_irqsave(&dm->lock, flags);
482 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
483 spin_unlock_irqrestore(&dm->lock, flags);
484
485 return 0;
486 }
487
488 static u32 dm9000_get_link(struct net_device *dev)
489 {
490 board_info_t *dm = to_dm9000_board(dev);
491 u32 ret;
492
493 if (dm->flags & DM9000_PLATF_EXT_PHY)
494 ret = mii_link_ok(&dm->mii);
495 else
496 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
497
498 return ret;
499 }
500
501 #define DM_EEPROM_MAGIC (0x444D394B)
502
503 static int dm9000_get_eeprom_len(struct net_device *dev)
504 {
505 return 128;
506 }
507
508 static int dm9000_get_eeprom(struct net_device *dev,
509 struct ethtool_eeprom *ee, u8 *data)
510 {
511 board_info_t *dm = to_dm9000_board(dev);
512 int offset = ee->offset;
513 int len = ee->len;
514 int i;
515
516 /* EEPROM access is aligned to two bytes */
517
518 if ((len & 1) != 0 || (offset & 1) != 0)
519 return -EINVAL;
520
521 if (dm->flags & DM9000_PLATF_NO_EEPROM)
522 return -ENOENT;
523
524 ee->magic = DM_EEPROM_MAGIC;
525
526 for (i = 0; i < len; i += 2)
527 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
528
529 return 0;
530 }
531
532 static int dm9000_set_eeprom(struct net_device *dev,
533 struct ethtool_eeprom *ee, u8 *data)
534 {
535 board_info_t *dm = to_dm9000_board(dev);
536 int offset = ee->offset;
537 int len = ee->len;
538 int done;
539
540 /* EEPROM access is aligned to two bytes */
541
542 if (dm->flags & DM9000_PLATF_NO_EEPROM)
543 return -ENOENT;
544
545 if (ee->magic != DM_EEPROM_MAGIC)
546 return -EINVAL;
547
548 while (len > 0) {
549 if (len & 1 || offset & 1) {
550 int which = offset & 1;
551 u8 tmp[2];
552
553 dm9000_read_eeprom(dm, offset / 2, tmp);
554 tmp[which] = *data;
555 dm9000_write_eeprom(dm, offset / 2, tmp);
556
557 done = 1;
558 } else {
559 dm9000_write_eeprom(dm, offset / 2, data);
560 done = 2;
561 }
562
563 data += done;
564 offset += done;
565 len -= done;
566 }
567
568 return 0;
569 }
570
571 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
572 {
573 board_info_t *dm = to_dm9000_board(dev);
574
575 memset(w, 0, sizeof(struct ethtool_wolinfo));
576
577 /* note, we could probably support wake-phy too */
578 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
579 w->wolopts = dm->wake_state;
580 }
581
582 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
583 {
584 board_info_t *dm = to_dm9000_board(dev);
585 unsigned long flags;
586 u32 opts = w->wolopts;
587 u32 wcr = 0;
588
589 if (!dm->wake_supported)
590 return -EOPNOTSUPP;
591
592 if (opts & ~WAKE_MAGIC)
593 return -EINVAL;
594
595 if (opts & WAKE_MAGIC)
596 wcr |= WCR_MAGICEN;
597
598 mutex_lock(&dm->addr_lock);
599
600 spin_lock_irqsave(&dm->lock, flags);
601 iow(dm, DM9000_WCR, wcr);
602 spin_unlock_irqrestore(&dm->lock, flags);
603
604 mutex_unlock(&dm->addr_lock);
605
606 if (dm->wake_state != opts) {
607 /* change in wol state, update IRQ state */
608
609 if (!dm->wake_state)
610 irq_set_irq_wake(dm->irq_wake, 1);
611 else if (dm->wake_state & !opts)
612 irq_set_irq_wake(dm->irq_wake, 0);
613 }
614
615 dm->wake_state = opts;
616 return 0;
617 }
618
619 static const struct ethtool_ops dm9000_ethtool_ops = {
620 .get_drvinfo = dm9000_get_drvinfo,
621 .get_settings = dm9000_get_settings,
622 .set_settings = dm9000_set_settings,
623 .get_msglevel = dm9000_get_msglevel,
624 .set_msglevel = dm9000_set_msglevel,
625 .nway_reset = dm9000_nway_reset,
626 .get_link = dm9000_get_link,
627 .get_wol = dm9000_get_wol,
628 .set_wol = dm9000_set_wol,
629 .get_eeprom_len = dm9000_get_eeprom_len,
630 .get_eeprom = dm9000_get_eeprom,
631 .set_eeprom = dm9000_set_eeprom,
632 };
633
634 static void dm9000_show_carrier(board_info_t *db,
635 unsigned carrier, unsigned nsr)
636 {
637 struct net_device *ndev = db->ndev;
638 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
639
640 if (carrier)
641 dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
642 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
643 (ncr & NCR_FDX) ? "full" : "half");
644 else
645 dev_info(db->dev, "%s: link down\n", ndev->name);
646 }
647
648 static void
649 dm9000_poll_work(struct work_struct *w)
650 {
651 struct delayed_work *dw = to_delayed_work(w);
652 board_info_t *db = container_of(dw, board_info_t, phy_poll);
653 struct net_device *ndev = db->ndev;
654
655 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
656 !(db->flags & DM9000_PLATF_EXT_PHY)) {
657 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
658 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
659 unsigned new_carrier;
660
661 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
662
663 if (old_carrier != new_carrier) {
664 if (netif_msg_link(db))
665 dm9000_show_carrier(db, new_carrier, nsr);
666
667 if (!new_carrier)
668 netif_carrier_off(ndev);
669 else
670 netif_carrier_on(ndev);
671 }
672 } else
673 mii_check_media(&db->mii, netif_msg_link(db), 0);
674
675 if (netif_running(ndev))
676 dm9000_schedule_poll(db);
677 }
678
679 /* dm9000_release_board
680 *
681 * release a board, and any mapped resources
682 */
683
684 static void
685 dm9000_release_board(struct platform_device *pdev, struct board_info *db)
686 {
687 /* unmap our resources */
688
689 iounmap(db->io_addr);
690 iounmap(db->io_data);
691
692 /* release the resources */
693
694 release_resource(db->data_req);
695 kfree(db->data_req);
696
697 release_resource(db->addr_req);
698 kfree(db->addr_req);
699 }
700
701 static unsigned char dm9000_type_to_char(enum dm9000_type type)
702 {
703 switch (type) {
704 case TYPE_DM9000E: return 'e';
705 case TYPE_DM9000A: return 'a';
706 case TYPE_DM9000B: return 'b';
707 }
708
709 return '?';
710 }
711
712 /*
713 * Set DM9000 multicast address
714 */
715 static void
716 dm9000_hash_table_unlocked(struct net_device *dev)
717 {
718 board_info_t *db = netdev_priv(dev);
719 struct netdev_hw_addr *ha;
720 int i, oft;
721 u32 hash_val;
722 u16 hash_table[4];
723 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
724
725 dm9000_dbg(db, 1, "entering %s\n", __func__);
726
727 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
728 iow(db, oft, dev->dev_addr[i]);
729
730 /* Clear Hash Table */
731 for (i = 0; i < 4; i++)
732 hash_table[i] = 0x0;
733
734 /* broadcast address */
735 hash_table[3] = 0x8000;
736
737 if (dev->flags & IFF_PROMISC)
738 rcr |= RCR_PRMSC;
739
740 if (dev->flags & IFF_ALLMULTI)
741 rcr |= RCR_ALL;
742
743 /* the multicast address in Hash Table : 64 bits */
744 netdev_for_each_mc_addr(ha, dev) {
745 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
746 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
747 }
748
749 /* Write the hash table to MAC MD table */
750 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
751 iow(db, oft++, hash_table[i]);
752 iow(db, oft++, hash_table[i] >> 8);
753 }
754
755 iow(db, DM9000_RCR, rcr);
756 }
757
758 static void
759 dm9000_hash_table(struct net_device *dev)
760 {
761 board_info_t *db = netdev_priv(dev);
762 unsigned long flags;
763
764 spin_lock_irqsave(&db->lock, flags);
765 dm9000_hash_table_unlocked(dev);
766 spin_unlock_irqrestore(&db->lock, flags);
767 }
768
769 /*
770 * Initialize dm9000 board
771 */
772 static void
773 dm9000_init_dm9000(struct net_device *dev)
774 {
775 board_info_t *db = netdev_priv(dev);
776 unsigned int imr;
777 unsigned int ncr;
778
779 dm9000_dbg(db, 1, "entering %s\n", __func__);
780
781 /* I/O mode */
782 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
783
784 /* Checksum mode */
785 if (dev->hw_features & NETIF_F_RXCSUM)
786 iow(db, DM9000_RCSR,
787 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
788
789 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
790
791 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
792
793 /* if wol is needed, then always set NCR_WAKEEN otherwise we end
794 * up dumping the wake events if we disable this. There is already
795 * a wake-mask in DM9000_WCR */
796 if (db->wake_supported)
797 ncr |= NCR_WAKEEN;
798
799 iow(db, DM9000_NCR, ncr);
800
801 /* Program operating register */
802 iow(db, DM9000_TCR, 0); /* TX Polling clear */
803 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
804 iow(db, DM9000_FCR, 0xff); /* Flow Control */
805 iow(db, DM9000_SMCR, 0); /* Special Mode */
806 /* clear TX status */
807 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
808 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
809
810 /* Set address filter table */
811 dm9000_hash_table_unlocked(dev);
812
813 imr = IMR_PAR | IMR_PTM | IMR_PRM;
814 if (db->type != TYPE_DM9000E)
815 imr |= IMR_LNKCHNG;
816
817 db->imr_all = imr;
818
819 /* Enable TX/RX interrupt mask */
820 iow(db, DM9000_IMR, imr);
821
822 /* Init Driver variable */
823 db->tx_pkt_cnt = 0;
824 db->queue_pkt_len = 0;
825 dev->trans_start = jiffies;
826 }
827
828 /* Our watchdog timed out. Called by the networking layer */
829 static void dm9000_timeout(struct net_device *dev)
830 {
831 board_info_t *db = netdev_priv(dev);
832 u8 reg_save;
833 unsigned long flags;
834
835 /* Save previous register address */
836 spin_lock_irqsave(&db->lock, flags);
837 reg_save = readb(db->io_addr);
838
839 netif_stop_queue(dev);
840 dm9000_reset(db);
841 dm9000_init_dm9000(dev);
842 /* We can accept TX packets again */
843 dev->trans_start = jiffies; /* prevent tx timeout */
844 netif_wake_queue(dev);
845
846 /* Restore previous register address */
847 writeb(reg_save, db->io_addr);
848 spin_unlock_irqrestore(&db->lock, flags);
849 }
850
851 static void dm9000_send_packet(struct net_device *dev,
852 int ip_summed,
853 u16 pkt_len)
854 {
855 board_info_t *dm = to_dm9000_board(dev);
856
857 /* The DM9000 is not smart enough to leave fragmented packets alone. */
858 if (dm->ip_summed != ip_summed) {
859 if (ip_summed == CHECKSUM_NONE)
860 iow(dm, DM9000_TCCR, 0);
861 else
862 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
863 dm->ip_summed = ip_summed;
864 }
865
866 /* Set TX length to DM9000 */
867 iow(dm, DM9000_TXPLL, pkt_len);
868 iow(dm, DM9000_TXPLH, pkt_len >> 8);
869
870 /* Issue TX polling command */
871 iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
872 }
873
874 /*
875 * Hardware start transmission.
876 * Send a packet to media from the upper layer.
877 */
878 static int
879 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
880 {
881 unsigned long flags;
882 board_info_t *db = netdev_priv(dev);
883
884 dm9000_dbg(db, 3, "%s:\n", __func__);
885
886 if (db->tx_pkt_cnt > 1)
887 return NETDEV_TX_BUSY;
888
889 spin_lock_irqsave(&db->lock, flags);
890
891 /* Move data to DM9000 TX RAM */
892 writeb(DM9000_MWCMD, db->io_addr);
893
894 (db->outblk)(db->io_data, skb->data, skb->len);
895 dev->stats.tx_bytes += skb->len;
896
897 db->tx_pkt_cnt++;
898 /* TX control: First packet immediately send, second packet queue */
899 if (db->tx_pkt_cnt == 1) {
900 dm9000_send_packet(dev, skb->ip_summed, skb->len);
901 } else {
902 /* Second packet */
903 db->queue_pkt_len = skb->len;
904 db->queue_ip_summed = skb->ip_summed;
905 netif_stop_queue(dev);
906 }
907
908 spin_unlock_irqrestore(&db->lock, flags);
909
910 /* free this SKB */
911 dev_kfree_skb(skb);
912
913 return NETDEV_TX_OK;
914 }
915
916 /*
917 * DM9000 interrupt handler
918 * receive the packet to upper layer, free the transmitted packet
919 */
920
921 static void dm9000_tx_done(struct net_device *dev, board_info_t *db)
922 {
923 int tx_status = ior(db, DM9000_NSR); /* Got TX status */
924
925 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
926 /* One packet sent complete */
927 db->tx_pkt_cnt--;
928 dev->stats.tx_packets++;
929
930 if (netif_msg_tx_done(db))
931 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
932
933 /* Queue packet check & send */
934 if (db->tx_pkt_cnt > 0)
935 dm9000_send_packet(dev, db->queue_ip_summed,
936 db->queue_pkt_len);
937 netif_wake_queue(dev);
938 }
939 }
940
941 struct dm9000_rxhdr {
942 u8 RxPktReady;
943 u8 RxStatus;
944 __le16 RxLen;
945 } __packed;
946
947 /*
948 * Received a packet and pass to upper layer
949 */
950 static void
951 dm9000_rx(struct net_device *dev)
952 {
953 board_info_t *db = netdev_priv(dev);
954 struct dm9000_rxhdr rxhdr;
955 struct sk_buff *skb;
956 u8 rxbyte, *rdptr;
957 bool GoodPacket;
958 int RxLen;
959
960 /* Check packet ready or not */
961 do {
962 ior(db, DM9000_MRCMDX); /* Dummy read */
963
964 /* Get most updated data */
965 rxbyte = readb(db->io_data);
966
967 /* Status check: this byte must be 0 or 1 */
968 if (rxbyte & DM9000_PKT_ERR) {
969 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
970 iow(db, DM9000_RCR, 0x00); /* Stop Device */
971 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
972 return;
973 }
974
975 if (!(rxbyte & DM9000_PKT_RDY))
976 return;
977
978 /* A packet ready now & Get status/length */
979 GoodPacket = true;
980 writeb(DM9000_MRCMD, db->io_addr);
981
982 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
983
984 RxLen = le16_to_cpu(rxhdr.RxLen);
985
986 if (netif_msg_rx_status(db))
987 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
988 rxhdr.RxStatus, RxLen);
989
990 /* Packet Status check */
991 if (RxLen < 0x40) {
992 GoodPacket = false;
993 if (netif_msg_rx_err(db))
994 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
995 }
996
997 if (RxLen > DM9000_PKT_MAX) {
998 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
999 }
1000
1001 /* rxhdr.RxStatus is identical to RSR register. */
1002 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1003 RSR_PLE | RSR_RWTO |
1004 RSR_LCS | RSR_RF)) {
1005 GoodPacket = false;
1006 if (rxhdr.RxStatus & RSR_FOE) {
1007 if (netif_msg_rx_err(db))
1008 dev_dbg(db->dev, "fifo error\n");
1009 dev->stats.rx_fifo_errors++;
1010 }
1011 if (rxhdr.RxStatus & RSR_CE) {
1012 if (netif_msg_rx_err(db))
1013 dev_dbg(db->dev, "crc error\n");
1014 dev->stats.rx_crc_errors++;
1015 }
1016 if (rxhdr.RxStatus & RSR_RF) {
1017 if (netif_msg_rx_err(db))
1018 dev_dbg(db->dev, "length error\n");
1019 dev->stats.rx_length_errors++;
1020 }
1021 }
1022
1023 /* Move data from DM9000 */
1024 if (GoodPacket &&
1025 ((skb = dev_alloc_skb(RxLen + 4)) != NULL)) {
1026 skb_reserve(skb, 2);
1027 rdptr = (u8 *) skb_put(skb, RxLen - 4);
1028
1029 /* Read received packet from RX SRAM */
1030
1031 (db->inblk)(db->io_data, rdptr, RxLen);
1032 dev->stats.rx_bytes += RxLen;
1033
1034 /* Pass to upper layer */
1035 skb->protocol = eth_type_trans(skb, dev);
1036 if (dev->features & NETIF_F_RXCSUM) {
1037 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1038 skb->ip_summed = CHECKSUM_UNNECESSARY;
1039 else
1040 skb_checksum_none_assert(skb);
1041 }
1042 netif_rx(skb);
1043 dev->stats.rx_packets++;
1044
1045 } else {
1046 /* need to dump the packet's data */
1047
1048 (db->dumpblk)(db->io_data, RxLen);
1049 }
1050 } while (rxbyte & DM9000_PKT_RDY);
1051 }
1052
1053 static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1054 {
1055 struct net_device *dev = dev_id;
1056 board_info_t *db = netdev_priv(dev);
1057 int int_status;
1058 unsigned long flags;
1059 u8 reg_save;
1060
1061 dm9000_dbg(db, 3, "entering %s\n", __func__);
1062
1063 /* A real interrupt coming */
1064
1065 /* holders of db->lock must always block IRQs */
1066 spin_lock_irqsave(&db->lock, flags);
1067
1068 /* Save previous register address */
1069 reg_save = readb(db->io_addr);
1070
1071 /* Disable all interrupts */
1072 iow(db, DM9000_IMR, IMR_PAR);
1073
1074 /* Got DM9000 interrupt status */
1075 int_status = ior(db, DM9000_ISR); /* Got ISR */
1076 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
1077
1078 if (netif_msg_intr(db))
1079 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1080
1081 /* Received the coming packet */
1082 if (int_status & ISR_PRS)
1083 dm9000_rx(dev);
1084
1085 /* Trnasmit Interrupt check */
1086 if (int_status & ISR_PTS)
1087 dm9000_tx_done(dev, db);
1088
1089 if (db->type != TYPE_DM9000E) {
1090 if (int_status & ISR_LNKCHNG) {
1091 /* fire a link-change request */
1092 schedule_delayed_work(&db->phy_poll, 1);
1093 }
1094 }
1095
1096 /* Re-enable interrupt mask */
1097 iow(db, DM9000_IMR, db->imr_all);
1098
1099 /* Restore previous register address */
1100 writeb(reg_save, db->io_addr);
1101
1102 spin_unlock_irqrestore(&db->lock, flags);
1103
1104 return IRQ_HANDLED;
1105 }
1106
1107 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1108 {
1109 struct net_device *dev = dev_id;
1110 board_info_t *db = netdev_priv(dev);
1111 unsigned long flags;
1112 unsigned nsr, wcr;
1113
1114 spin_lock_irqsave(&db->lock, flags);
1115
1116 nsr = ior(db, DM9000_NSR);
1117 wcr = ior(db, DM9000_WCR);
1118
1119 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1120
1121 if (nsr & NSR_WAKEST) {
1122 /* clear, so we can avoid */
1123 iow(db, DM9000_NSR, NSR_WAKEST);
1124
1125 if (wcr & WCR_LINKST)
1126 dev_info(db->dev, "wake by link status change\n");
1127 if (wcr & WCR_SAMPLEST)
1128 dev_info(db->dev, "wake by sample packet\n");
1129 if (wcr & WCR_MAGICST )
1130 dev_info(db->dev, "wake by magic packet\n");
1131 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1132 dev_err(db->dev, "wake signalled with no reason? "
1133 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1134
1135 }
1136
1137 spin_unlock_irqrestore(&db->lock, flags);
1138
1139 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1140 }
1141
1142 #ifdef CONFIG_NET_POLL_CONTROLLER
1143 /*
1144 *Used by netconsole
1145 */
1146 static void dm9000_poll_controller(struct net_device *dev)
1147 {
1148 disable_irq(dev->irq);
1149 dm9000_interrupt(dev->irq, dev);
1150 enable_irq(dev->irq);
1151 }
1152 #endif
1153
1154 /*
1155 * Open the interface.
1156 * The interface is opened whenever "ifconfig" actives it.
1157 */
1158 static int
1159 dm9000_open(struct net_device *dev)
1160 {
1161 board_info_t *db = netdev_priv(dev);
1162 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1163
1164 if (netif_msg_ifup(db))
1165 dev_dbg(db->dev, "enabling %s\n", dev->name);
1166
1167 /* If there is no IRQ type specified, default to something that
1168 * may work, and tell the user that this is a problem */
1169
1170 if (irqflags == IRQF_TRIGGER_NONE)
1171 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1172
1173 irqflags |= IRQF_SHARED;
1174
1175 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1176 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1177 mdelay(1); /* delay needs by DM9000B */
1178
1179 /* Initialize DM9000 board */
1180 dm9000_reset(db);
1181 dm9000_init_dm9000(dev);
1182
1183 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1184 return -EAGAIN;
1185
1186 /* Init driver variable */
1187 db->dbug_cnt = 0;
1188
1189 mii_check_media(&db->mii, netif_msg_link(db), 1);
1190 netif_start_queue(dev);
1191
1192 dm9000_schedule_poll(db);
1193
1194 return 0;
1195 }
1196
1197 /*
1198 * Sleep, either by using msleep() or if we are suspending, then
1199 * use mdelay() to sleep.
1200 */
1201 static void dm9000_msleep(board_info_t *db, unsigned int ms)
1202 {
1203 if (db->in_suspend)
1204 mdelay(ms);
1205 else
1206 msleep(ms);
1207 }
1208
1209 /*
1210 * Read a word from phyxcer
1211 */
1212 static int
1213 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1214 {
1215 board_info_t *db = netdev_priv(dev);
1216 unsigned long flags;
1217 unsigned int reg_save;
1218 int ret;
1219
1220 mutex_lock(&db->addr_lock);
1221
1222 spin_lock_irqsave(&db->lock,flags);
1223
1224 /* Save previous register address */
1225 reg_save = readb(db->io_addr);
1226
1227 /* Fill the phyxcer register into REG_0C */
1228 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1229
1230 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
1231
1232 writeb(reg_save, db->io_addr);
1233 spin_unlock_irqrestore(&db->lock,flags);
1234
1235 dm9000_msleep(db, 1); /* Wait read complete */
1236
1237 spin_lock_irqsave(&db->lock,flags);
1238 reg_save = readb(db->io_addr);
1239
1240 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
1241
1242 /* The read data keeps on REG_0D & REG_0E */
1243 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
1244
1245 /* restore the previous address */
1246 writeb(reg_save, db->io_addr);
1247 spin_unlock_irqrestore(&db->lock,flags);
1248
1249 mutex_unlock(&db->addr_lock);
1250
1251 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
1252 return ret;
1253 }
1254
1255 /*
1256 * Write a word to phyxcer
1257 */
1258 static void
1259 dm9000_phy_write(struct net_device *dev,
1260 int phyaddr_unused, int reg, int value)
1261 {
1262 board_info_t *db = netdev_priv(dev);
1263 unsigned long flags;
1264 unsigned long reg_save;
1265
1266 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
1267 mutex_lock(&db->addr_lock);
1268
1269 spin_lock_irqsave(&db->lock,flags);
1270
1271 /* Save previous register address */
1272 reg_save = readb(db->io_addr);
1273
1274 /* Fill the phyxcer register into REG_0C */
1275 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1276
1277 /* Fill the written data into REG_0D & REG_0E */
1278 iow(db, DM9000_EPDRL, value);
1279 iow(db, DM9000_EPDRH, value >> 8);
1280
1281 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
1282
1283 writeb(reg_save, db->io_addr);
1284 spin_unlock_irqrestore(&db->lock, flags);
1285
1286 dm9000_msleep(db, 1); /* Wait write complete */
1287
1288 spin_lock_irqsave(&db->lock,flags);
1289 reg_save = readb(db->io_addr);
1290
1291 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
1292
1293 /* restore the previous address */
1294 writeb(reg_save, db->io_addr);
1295
1296 spin_unlock_irqrestore(&db->lock, flags);
1297 mutex_unlock(&db->addr_lock);
1298 }
1299
1300 static void
1301 dm9000_shutdown(struct net_device *dev)
1302 {
1303 board_info_t *db = netdev_priv(dev);
1304
1305 /* RESET device */
1306 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
1307 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
1308 iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */
1309 iow(db, DM9000_RCR, 0x00); /* Disable RX */
1310 }
1311
1312 /*
1313 * Stop the interface.
1314 * The interface is stopped when it is brought.
1315 */
1316 static int
1317 dm9000_stop(struct net_device *ndev)
1318 {
1319 board_info_t *db = netdev_priv(ndev);
1320
1321 if (netif_msg_ifdown(db))
1322 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1323
1324 cancel_delayed_work_sync(&db->phy_poll);
1325
1326 netif_stop_queue(ndev);
1327 netif_carrier_off(ndev);
1328
1329 /* free interrupt */
1330 free_irq(ndev->irq, ndev);
1331
1332 dm9000_shutdown(ndev);
1333
1334 return 0;
1335 }
1336
1337 static const struct net_device_ops dm9000_netdev_ops = {
1338 .ndo_open = dm9000_open,
1339 .ndo_stop = dm9000_stop,
1340 .ndo_start_xmit = dm9000_start_xmit,
1341 .ndo_tx_timeout = dm9000_timeout,
1342 .ndo_set_multicast_list = dm9000_hash_table,
1343 .ndo_do_ioctl = dm9000_ioctl,
1344 .ndo_change_mtu = eth_change_mtu,
1345 .ndo_set_features = dm9000_set_features,
1346 .ndo_validate_addr = eth_validate_addr,
1347 .ndo_set_mac_address = eth_mac_addr,
1348 #ifdef CONFIG_NET_POLL_CONTROLLER
1349 .ndo_poll_controller = dm9000_poll_controller,
1350 #endif
1351 };
1352
1353 /*
1354 * Search DM9000 board, allocate space and register it
1355 */
1356 static int __devinit
1357 dm9000_probe(struct platform_device *pdev)
1358 {
1359 struct dm9000_plat_data *pdata = pdev->dev.platform_data;
1360 struct board_info *db; /* Point a board information structure */
1361 struct net_device *ndev;
1362 const unsigned char *mac_src;
1363 int ret = 0;
1364 int iosize;
1365 int i;
1366 u32 id_val;
1367
1368 /* Init network device */
1369 ndev = alloc_etherdev(sizeof(struct board_info));
1370 if (!ndev) {
1371 dev_err(&pdev->dev, "could not allocate device.\n");
1372 return -ENOMEM;
1373 }
1374
1375 SET_NETDEV_DEV(ndev, &pdev->dev);
1376
1377 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1378
1379 /* setup board info structure */
1380 db = netdev_priv(ndev);
1381
1382 db->dev = &pdev->dev;
1383 db->ndev = ndev;
1384
1385 spin_lock_init(&db->lock);
1386 mutex_init(&db->addr_lock);
1387
1388 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1389
1390 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1391 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1392 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1393
1394 if (db->addr_res == NULL || db->data_res == NULL ||
1395 db->irq_res == NULL) {
1396 dev_err(db->dev, "insufficient resources\n");
1397 ret = -ENOENT;
1398 goto out;
1399 }
1400
1401 db->irq_wake = platform_get_irq(pdev, 1);
1402 if (db->irq_wake >= 0) {
1403 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1404
1405 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1406 IRQF_SHARED, dev_name(db->dev), ndev);
1407 if (ret) {
1408 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1409 } else {
1410
1411 /* test to see if irq is really wakeup capable */
1412 ret = irq_set_irq_wake(db->irq_wake, 1);
1413 if (ret) {
1414 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1415 db->irq_wake, ret);
1416 ret = 0;
1417 } else {
1418 irq_set_irq_wake(db->irq_wake, 0);
1419 db->wake_supported = 1;
1420 }
1421 }
1422 }
1423
1424 iosize = resource_size(db->addr_res);
1425 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1426 pdev->name);
1427
1428 if (db->addr_req == NULL) {
1429 dev_err(db->dev, "cannot claim address reg area\n");
1430 ret = -EIO;
1431 goto out;
1432 }
1433
1434 db->io_addr = ioremap(db->addr_res->start, iosize);
1435
1436 if (db->io_addr == NULL) {
1437 dev_err(db->dev, "failed to ioremap address reg\n");
1438 ret = -EINVAL;
1439 goto out;
1440 }
1441
1442 iosize = resource_size(db->data_res);
1443 db->data_req = request_mem_region(db->data_res->start, iosize,
1444 pdev->name);
1445
1446 if (db->data_req == NULL) {
1447 dev_err(db->dev, "cannot claim data reg area\n");
1448 ret = -EIO;
1449 goto out;
1450 }
1451
1452 db->io_data = ioremap(db->data_res->start, iosize);
1453
1454 if (db->io_data == NULL) {
1455 dev_err(db->dev, "failed to ioremap data reg\n");
1456 ret = -EINVAL;
1457 goto out;
1458 }
1459
1460 /* fill in parameters for net-dev structure */
1461 ndev->base_addr = (unsigned long)db->io_addr;
1462 ndev->irq = db->irq_res->start;
1463
1464 /* ensure at least we have a default set of IO routines */
1465 dm9000_set_io(db, iosize);
1466
1467 /* check to see if anything is being over-ridden */
1468 if (pdata != NULL) {
1469 /* check to see if the driver wants to over-ride the
1470 * default IO width */
1471
1472 if (pdata->flags & DM9000_PLATF_8BITONLY)
1473 dm9000_set_io(db, 1);
1474
1475 if (pdata->flags & DM9000_PLATF_16BITONLY)
1476 dm9000_set_io(db, 2);
1477
1478 if (pdata->flags & DM9000_PLATF_32BITONLY)
1479 dm9000_set_io(db, 4);
1480
1481 /* check to see if there are any IO routine
1482 * over-rides */
1483
1484 if (pdata->inblk != NULL)
1485 db->inblk = pdata->inblk;
1486
1487 if (pdata->outblk != NULL)
1488 db->outblk = pdata->outblk;
1489
1490 if (pdata->dumpblk != NULL)
1491 db->dumpblk = pdata->dumpblk;
1492
1493 db->flags = pdata->flags;
1494 }
1495
1496 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1497 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1498 #endif
1499
1500 dm9000_reset(db);
1501
1502 /* try multiple times, DM9000 sometimes gets the read wrong */
1503 for (i = 0; i < 8; i++) {
1504 id_val = ior(db, DM9000_VIDL);
1505 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1506 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1507 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1508
1509 if (id_val == DM9000_ID)
1510 break;
1511 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1512 }
1513
1514 if (id_val != DM9000_ID) {
1515 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1516 ret = -ENODEV;
1517 goto out;
1518 }
1519
1520 /* Identify what type of DM9000 we are working on */
1521
1522 id_val = ior(db, DM9000_CHIPR);
1523 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1524
1525 switch (id_val) {
1526 case CHIPR_DM9000A:
1527 db->type = TYPE_DM9000A;
1528 break;
1529 case CHIPR_DM9000B:
1530 db->type = TYPE_DM9000B;
1531 break;
1532 default:
1533 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1534 db->type = TYPE_DM9000E;
1535 }
1536
1537 /* dm9000a/b are capable of hardware checksum offload */
1538 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1539 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
1540 ndev->features |= ndev->hw_features;
1541 }
1542
1543 /* from this point we assume that we have found a DM9000 */
1544
1545 /* driver system function */
1546 ether_setup(ndev);
1547
1548 ndev->netdev_ops = &dm9000_netdev_ops;
1549 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1550 ndev->ethtool_ops = &dm9000_ethtool_ops;
1551
1552 db->msg_enable = NETIF_MSG_LINK;
1553 db->mii.phy_id_mask = 0x1f;
1554 db->mii.reg_num_mask = 0x1f;
1555 db->mii.force_media = 0;
1556 db->mii.full_duplex = 0;
1557 db->mii.dev = ndev;
1558 db->mii.mdio_read = dm9000_phy_read;
1559 db->mii.mdio_write = dm9000_phy_write;
1560
1561 mac_src = "eeprom";
1562
1563 /* try reading the node address from the attached EEPROM */
1564 for (i = 0; i < 6; i += 2)
1565 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1566
1567 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1568 mac_src = "platform data";
1569 memcpy(ndev->dev_addr, pdata->dev_addr, 6);
1570 }
1571
1572 if (!is_valid_ether_addr(ndev->dev_addr)) {
1573 /* try reading from mac */
1574
1575 mac_src = "chip";
1576 for (i = 0; i < 6; i++)
1577 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1578 }
1579
1580 if (!is_valid_ether_addr(ndev->dev_addr)) {
1581 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1582 "set using ifconfig\n", ndev->name);
1583
1584 random_ether_addr(ndev->dev_addr);
1585 mac_src = "random";
1586 }
1587
1588
1589 platform_set_drvdata(pdev, ndev);
1590 ret = register_netdev(ndev);
1591
1592 if (ret == 0)
1593 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1594 ndev->name, dm9000_type_to_char(db->type),
1595 db->io_addr, db->io_data, ndev->irq,
1596 ndev->dev_addr, mac_src);
1597 return 0;
1598
1599 out:
1600 dev_err(db->dev, "not found (%d).\n", ret);
1601
1602 dm9000_release_board(pdev, db);
1603 free_netdev(ndev);
1604
1605 return ret;
1606 }
1607
1608 static int
1609 dm9000_drv_suspend(struct device *dev)
1610 {
1611 struct platform_device *pdev = to_platform_device(dev);
1612 struct net_device *ndev = platform_get_drvdata(pdev);
1613 board_info_t *db;
1614
1615 if (ndev) {
1616 db = netdev_priv(ndev);
1617 db->in_suspend = 1;
1618
1619 if (!netif_running(ndev))
1620 return 0;
1621
1622 netif_device_detach(ndev);
1623
1624 /* only shutdown if not using WoL */
1625 if (!db->wake_state)
1626 dm9000_shutdown(ndev);
1627 }
1628 return 0;
1629 }
1630
1631 static int
1632 dm9000_drv_resume(struct device *dev)
1633 {
1634 struct platform_device *pdev = to_platform_device(dev);
1635 struct net_device *ndev = platform_get_drvdata(pdev);
1636 board_info_t *db = netdev_priv(ndev);
1637
1638 if (ndev) {
1639 if (netif_running(ndev)) {
1640 /* reset if we were not in wake mode to ensure if
1641 * the device was powered off it is in a known state */
1642 if (!db->wake_state) {
1643 dm9000_reset(db);
1644 dm9000_init_dm9000(ndev);
1645 }
1646
1647 netif_device_attach(ndev);
1648 }
1649
1650 db->in_suspend = 0;
1651 }
1652 return 0;
1653 }
1654
1655 static const struct dev_pm_ops dm9000_drv_pm_ops = {
1656 .suspend = dm9000_drv_suspend,
1657 .resume = dm9000_drv_resume,
1658 };
1659
1660 static int __devexit
1661 dm9000_drv_remove(struct platform_device *pdev)
1662 {
1663 struct net_device *ndev = platform_get_drvdata(pdev);
1664
1665 platform_set_drvdata(pdev, NULL);
1666
1667 unregister_netdev(ndev);
1668 dm9000_release_board(pdev, netdev_priv(ndev));
1669 free_netdev(ndev); /* free device structure */
1670
1671 dev_dbg(&pdev->dev, "released and freed device\n");
1672 return 0;
1673 }
1674
1675 static struct platform_driver dm9000_driver = {
1676 .driver = {
1677 .name = "dm9000",
1678 .owner = THIS_MODULE,
1679 .pm = &dm9000_drv_pm_ops,
1680 },
1681 .probe = dm9000_probe,
1682 .remove = __devexit_p(dm9000_drv_remove),
1683 };
1684
1685 static int __init
1686 dm9000_init(void)
1687 {
1688 printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
1689
1690 return platform_driver_register(&dm9000_driver);
1691 }
1692
1693 static void __exit
1694 dm9000_cleanup(void)
1695 {
1696 platform_driver_unregister(&dm9000_driver);
1697 }
1698
1699 module_init(dm9000_init);
1700 module_exit(dm9000_cleanup);
1701
1702 MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1703 MODULE_DESCRIPTION("Davicom DM9000 network driver");
1704 MODULE_LICENSE("GPL");
1705 MODULE_ALIAS("platform:dm9000");
This page took 0.0631 seconds and 6 git commands to generate.