7b3e331dd442d0d5fed51fae388c22a1cc7ba5ea
[deliverable/linux.git] / drivers / net / fec.c
1 /*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
4 *
5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
10 * small packets.
11 *
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
14 *
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
17 *
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA.
20 */
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/ptrace.h>
26 #include <linux/errno.h>
27 #include <linux/ioport.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/pci.h>
31 #include <linux/init.h>
32 #include <linux/delay.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/spinlock.h>
37 #include <linux/workqueue.h>
38 #include <linux/bitops.h>
39 #include <linux/io.h>
40 #include <linux/irq.h>
41 #include <linux/clk.h>
42 #include <linux/platform_device.h>
43
44 #include <asm/cacheflush.h>
45
46 #ifndef CONFIG_ARCH_MXC
47 #include <asm/coldfire.h>
48 #include <asm/mcfsim.h>
49 #endif
50
51 #include "fec.h"
52
53 #ifdef CONFIG_ARCH_MXC
54 #include <mach/hardware.h>
55 #define FEC_ALIGNMENT 0xf
56 #else
57 #define FEC_ALIGNMENT 0x3
58 #endif
59
60 /*
61 * Define the fixed address of the FEC hardware.
62 */
63 #if defined(CONFIG_M5272)
64 #define HAVE_mii_link_interrupt
65
66 static unsigned char fec_mac_default[] = {
67 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
68 };
69
70 /*
71 * Some hardware gets it MAC address out of local flash memory.
72 * if this is non-zero then assume it is the address to get MAC from.
73 */
74 #if defined(CONFIG_NETtel)
75 #define FEC_FLASHMAC 0xf0006006
76 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
77 #define FEC_FLASHMAC 0xf0006000
78 #elif defined(CONFIG_CANCam)
79 #define FEC_FLASHMAC 0xf0020000
80 #elif defined (CONFIG_M5272C3)
81 #define FEC_FLASHMAC (0xffe04000 + 4)
82 #elif defined(CONFIG_MOD5272)
83 #define FEC_FLASHMAC 0xffc0406b
84 #else
85 #define FEC_FLASHMAC 0
86 #endif
87 #endif /* CONFIG_M5272 */
88
89 /* Forward declarations of some structures to support different PHYs */
90
91 typedef struct {
92 uint mii_data;
93 void (*funct)(uint mii_reg, struct net_device *dev);
94 } phy_cmd_t;
95
96 typedef struct {
97 uint id;
98 char *name;
99
100 const phy_cmd_t *config;
101 const phy_cmd_t *startup;
102 const phy_cmd_t *ack_int;
103 const phy_cmd_t *shutdown;
104 } phy_info_t;
105
106 /* The number of Tx and Rx buffers. These are allocated from the page
107 * pool. The code may assume these are power of two, so it it best
108 * to keep them that size.
109 * We don't need to allocate pages for the transmitter. We just use
110 * the skbuffer directly.
111 */
112 #define FEC_ENET_RX_PAGES 8
113 #define FEC_ENET_RX_FRSIZE 2048
114 #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
115 #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
116 #define FEC_ENET_TX_FRSIZE 2048
117 #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
118 #define TX_RING_SIZE 16 /* Must be power of two */
119 #define TX_RING_MOD_MASK 15 /* for this to work */
120
121 #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
122 #error "FEC: descriptor ring size constants too large"
123 #endif
124
125 /* Interrupt events/masks. */
126 #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
127 #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
128 #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
129 #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
130 #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
131 #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
132 #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
133 #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
134 #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
135 #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
136
137 /* The FEC stores dest/src/type, data, and checksum for receive packets.
138 */
139 #define PKT_MAXBUF_SIZE 1518
140 #define PKT_MINBUF_SIZE 64
141 #define PKT_MAXBLR_SIZE 1520
142
143
144 /*
145 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
146 * size bits. Other FEC hardware does not, so we need to take that into
147 * account when setting it.
148 */
149 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
150 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
151 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
152 #else
153 #define OPT_FRAME_SIZE 0
154 #endif
155
156 /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
157 * tx_bd_base always point to the base of the buffer descriptors. The
158 * cur_rx and cur_tx point to the currently available buffer.
159 * The dirty_tx tracks the current buffer that is being sent by the
160 * controller. The cur_tx and dirty_tx are equal under both completely
161 * empty and completely full conditions. The empty/ready indicator in
162 * the buffer descriptor determines the actual condition.
163 */
164 struct fec_enet_private {
165 /* Hardware registers of the FEC device */
166 void __iomem *hwp;
167
168 struct net_device *netdev;
169
170 struct clk *clk;
171
172 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
173 unsigned char *tx_bounce[TX_RING_SIZE];
174 struct sk_buff* tx_skbuff[TX_RING_SIZE];
175 ushort skb_cur;
176 ushort skb_dirty;
177
178 /* CPM dual port RAM relative addresses */
179 dma_addr_t bd_dma;
180 /* Address of Rx and Tx buffers */
181 struct bufdesc *rx_bd_base;
182 struct bufdesc *tx_bd_base;
183 /* The next free ring entry */
184 struct bufdesc *cur_rx, *cur_tx;
185 /* The ring entries to be free()ed */
186 struct bufdesc *dirty_tx;
187
188 uint tx_full;
189 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
190 spinlock_t hw_lock;
191 /* hold while accessing the mii_list_t() elements */
192 spinlock_t mii_lock;
193
194 uint phy_id;
195 uint phy_id_done;
196 uint phy_status;
197 uint phy_speed;
198 phy_info_t const *phy;
199 struct work_struct phy_task;
200
201 uint sequence_done;
202 uint mii_phy_task_queued;
203
204 uint phy_addr;
205
206 int index;
207 int opened;
208 int link;
209 int old_link;
210 int full_duplex;
211 };
212
213 static int fec_enet_open(struct net_device *dev);
214 static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev);
215 static void fec_enet_mii(struct net_device *dev);
216 static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
217 static void fec_enet_tx(struct net_device *dev);
218 static void fec_enet_rx(struct net_device *dev);
219 static int fec_enet_close(struct net_device *dev);
220 static void set_multicast_list(struct net_device *dev);
221 static void fec_restart(struct net_device *dev, int duplex);
222 static void fec_stop(struct net_device *dev);
223 static void fec_set_mac_address(struct net_device *dev);
224
225
226 /* MII processing. We keep this as simple as possible. Requests are
227 * placed on the list (if there is room). When the request is finished
228 * by the MII, an optional function may be called.
229 */
230 typedef struct mii_list {
231 uint mii_regval;
232 void (*mii_func)(uint val, struct net_device *dev);
233 struct mii_list *mii_next;
234 } mii_list_t;
235
236 #define NMII 20
237 static mii_list_t mii_cmds[NMII];
238 static mii_list_t *mii_free;
239 static mii_list_t *mii_head;
240 static mii_list_t *mii_tail;
241
242 static int mii_queue(struct net_device *dev, int request,
243 void (*func)(uint, struct net_device *));
244
245 /* Make MII read/write commands for the FEC */
246 #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
247 #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
248 (VAL & 0xffff))
249 #define mk_mii_end 0
250
251 /* Transmitter timeout */
252 #define TX_TIMEOUT (2 * HZ)
253
254 /* Register definitions for the PHY */
255
256 #define MII_REG_CR 0 /* Control Register */
257 #define MII_REG_SR 1 /* Status Register */
258 #define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */
259 #define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */
260 #define MII_REG_ANAR 4 /* A-N Advertisement Register */
261 #define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */
262 #define MII_REG_ANER 6 /* A-N Expansion Register */
263 #define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */
264 #define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */
265
266 /* values for phy_status */
267
268 #define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
269 #define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
270 #define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
271 #define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
272 #define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
273 #define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
274 #define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
275
276 #define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
277 #define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
278 #define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
279 #define PHY_STAT_SPMASK 0xf000 /* mask for speed */
280 #define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
281 #define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
282 #define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
283 #define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
284
285
286 static int
287 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
288 {
289 struct fec_enet_private *fep = netdev_priv(dev);
290 struct bufdesc *bdp;
291 unsigned short status;
292 unsigned long flags;
293
294 if (!fep->link) {
295 /* Link is down or autonegotiation is in progress. */
296 return 1;
297 }
298
299 spin_lock_irqsave(&fep->hw_lock, flags);
300 /* Fill in a Tx ring entry */
301 bdp = fep->cur_tx;
302
303 status = bdp->cbd_sc;
304
305 if (status & BD_ENET_TX_READY) {
306 /* Ooops. All transmit buffers are full. Bail out.
307 * This should not happen, since dev->tbusy should be set.
308 */
309 printk("%s: tx queue full!.\n", dev->name);
310 spin_unlock_irqrestore(&fep->hw_lock, flags);
311 return 1;
312 }
313
314 /* Clear all of the status flags */
315 status &= ~BD_ENET_TX_STATS;
316
317 /* Set buffer length and buffer pointer */
318 bdp->cbd_bufaddr = __pa(skb->data);
319 bdp->cbd_datlen = skb->len;
320
321 /*
322 * On some FEC implementations data must be aligned on
323 * 4-byte boundaries. Use bounce buffers to copy data
324 * and get it aligned. Ugh.
325 */
326 if (bdp->cbd_bufaddr & FEC_ALIGNMENT) {
327 unsigned int index;
328 index = bdp - fep->tx_bd_base;
329 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
330 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]);
331 }
332
333 /* Save skb pointer */
334 fep->tx_skbuff[fep->skb_cur] = skb;
335
336 dev->stats.tx_bytes += skb->len;
337 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
338
339 /* Push the data cache so the CPM does not get stale memory
340 * data.
341 */
342 dma_sync_single(NULL, bdp->cbd_bufaddr,
343 bdp->cbd_datlen, DMA_TO_DEVICE);
344
345 /* Send it on its way. Tell FEC it's ready, interrupt when done,
346 * it's the last BD of the frame, and to put the CRC on the end.
347 */
348 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
349 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
350 bdp->cbd_sc = status;
351
352 dev->trans_start = jiffies;
353
354 /* Trigger transmission start */
355 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
356
357 /* If this was the last BD in the ring, start at the beginning again. */
358 if (status & BD_ENET_TX_WRAP)
359 bdp = fep->tx_bd_base;
360 else
361 bdp++;
362
363 if (bdp == fep->dirty_tx) {
364 fep->tx_full = 1;
365 netif_stop_queue(dev);
366 }
367
368 fep->cur_tx = bdp;
369
370 spin_unlock_irqrestore(&fep->hw_lock, flags);
371
372 return 0;
373 }
374
375 static void
376 fec_timeout(struct net_device *dev)
377 {
378 struct fec_enet_private *fep = netdev_priv(dev);
379
380 dev->stats.tx_errors++;
381
382 fec_restart(dev, fep->full_duplex);
383 netif_wake_queue(dev);
384 }
385
386 static irqreturn_t
387 fec_enet_interrupt(int irq, void * dev_id)
388 {
389 struct net_device *dev = dev_id;
390 struct fec_enet_private *fep = netdev_priv(dev);
391 uint int_events;
392 irqreturn_t ret = IRQ_NONE;
393
394 do {
395 int_events = readl(fep->hwp + FEC_IEVENT);
396 writel(int_events, fep->hwp + FEC_IEVENT);
397
398 if (int_events & FEC_ENET_RXF) {
399 ret = IRQ_HANDLED;
400 fec_enet_rx(dev);
401 }
402
403 /* Transmit OK, or non-fatal error. Update the buffer
404 * descriptors. FEC handles all errors, we just discover
405 * them as part of the transmit process.
406 */
407 if (int_events & FEC_ENET_TXF) {
408 ret = IRQ_HANDLED;
409 fec_enet_tx(dev);
410 }
411
412 if (int_events & FEC_ENET_MII) {
413 ret = IRQ_HANDLED;
414 fec_enet_mii(dev);
415 }
416
417 } while (int_events);
418
419 return ret;
420 }
421
422
423 static void
424 fec_enet_tx(struct net_device *dev)
425 {
426 struct fec_enet_private *fep;
427 struct bufdesc *bdp;
428 unsigned short status;
429 struct sk_buff *skb;
430
431 fep = netdev_priv(dev);
432 spin_lock_irq(&fep->hw_lock);
433 bdp = fep->dirty_tx;
434
435 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
436 if (bdp == fep->cur_tx && fep->tx_full == 0) break;
437
438 skb = fep->tx_skbuff[fep->skb_dirty];
439 /* Check for errors. */
440 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
441 BD_ENET_TX_RL | BD_ENET_TX_UN |
442 BD_ENET_TX_CSL)) {
443 dev->stats.tx_errors++;
444 if (status & BD_ENET_TX_HB) /* No heartbeat */
445 dev->stats.tx_heartbeat_errors++;
446 if (status & BD_ENET_TX_LC) /* Late collision */
447 dev->stats.tx_window_errors++;
448 if (status & BD_ENET_TX_RL) /* Retrans limit */
449 dev->stats.tx_aborted_errors++;
450 if (status & BD_ENET_TX_UN) /* Underrun */
451 dev->stats.tx_fifo_errors++;
452 if (status & BD_ENET_TX_CSL) /* Carrier lost */
453 dev->stats.tx_carrier_errors++;
454 } else {
455 dev->stats.tx_packets++;
456 }
457
458 if (status & BD_ENET_TX_READY)
459 printk("HEY! Enet xmit interrupt and TX_READY.\n");
460
461 /* Deferred means some collisions occurred during transmit,
462 * but we eventually sent the packet OK.
463 */
464 if (status & BD_ENET_TX_DEF)
465 dev->stats.collisions++;
466
467 /* Free the sk buffer associated with this last transmit */
468 dev_kfree_skb_any(skb);
469 fep->tx_skbuff[fep->skb_dirty] = NULL;
470 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
471
472 /* Update pointer to next buffer descriptor to be transmitted */
473 if (status & BD_ENET_TX_WRAP)
474 bdp = fep->tx_bd_base;
475 else
476 bdp++;
477
478 /* Since we have freed up a buffer, the ring is no longer full
479 */
480 if (fep->tx_full) {
481 fep->tx_full = 0;
482 if (netif_queue_stopped(dev))
483 netif_wake_queue(dev);
484 }
485 }
486 fep->dirty_tx = bdp;
487 spin_unlock_irq(&fep->hw_lock);
488 }
489
490
491 /* During a receive, the cur_rx points to the current incoming buffer.
492 * When we update through the ring, if the next incoming buffer has
493 * not been given to the system, we just set the empty indicator,
494 * effectively tossing the packet.
495 */
496 static void
497 fec_enet_rx(struct net_device *dev)
498 {
499 struct fec_enet_private *fep = netdev_priv(dev);
500 struct bufdesc *bdp;
501 unsigned short status;
502 struct sk_buff *skb;
503 ushort pkt_len;
504 __u8 *data;
505
506 #ifdef CONFIG_M532x
507 flush_cache_all();
508 #endif
509
510 spin_lock_irq(&fep->hw_lock);
511
512 /* First, grab all of the stats for the incoming packet.
513 * These get messed up if we get called due to a busy condition.
514 */
515 bdp = fep->cur_rx;
516
517 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
518
519 /* Since we have allocated space to hold a complete frame,
520 * the last indicator should be set.
521 */
522 if ((status & BD_ENET_RX_LAST) == 0)
523 printk("FEC ENET: rcv is not +last\n");
524
525 if (!fep->opened)
526 goto rx_processing_done;
527
528 /* Check for errors. */
529 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
530 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
531 dev->stats.rx_errors++;
532 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
533 /* Frame too long or too short. */
534 dev->stats.rx_length_errors++;
535 }
536 if (status & BD_ENET_RX_NO) /* Frame alignment */
537 dev->stats.rx_frame_errors++;
538 if (status & BD_ENET_RX_CR) /* CRC Error */
539 dev->stats.rx_crc_errors++;
540 if (status & BD_ENET_RX_OV) /* FIFO overrun */
541 dev->stats.rx_fifo_errors++;
542 }
543
544 /* Report late collisions as a frame error.
545 * On this error, the BD is closed, but we don't know what we
546 * have in the buffer. So, just drop this frame on the floor.
547 */
548 if (status & BD_ENET_RX_CL) {
549 dev->stats.rx_errors++;
550 dev->stats.rx_frame_errors++;
551 goto rx_processing_done;
552 }
553
554 /* Process the incoming frame. */
555 dev->stats.rx_packets++;
556 pkt_len = bdp->cbd_datlen;
557 dev->stats.rx_bytes += pkt_len;
558 data = (__u8*)__va(bdp->cbd_bufaddr);
559
560 dma_sync_single(NULL, (unsigned long)__pa(data),
561 pkt_len - 4, DMA_FROM_DEVICE);
562
563 /* This does 16 byte alignment, exactly what we need.
564 * The packet length includes FCS, but we don't want to
565 * include that when passing upstream as it messes up
566 * bridging applications.
567 */
568 skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
569
570 if (unlikely(!skb)) {
571 printk("%s: Memory squeeze, dropping packet.\n",
572 dev->name);
573 dev->stats.rx_dropped++;
574 } else {
575 skb_reserve(skb, NET_IP_ALIGN);
576 skb_put(skb, pkt_len - 4); /* Make room */
577 skb_copy_to_linear_data(skb, data, pkt_len - 4);
578 skb->protocol = eth_type_trans(skb, dev);
579 netif_rx(skb);
580 }
581 rx_processing_done:
582 /* Clear the status flags for this buffer */
583 status &= ~BD_ENET_RX_STATS;
584
585 /* Mark the buffer empty */
586 status |= BD_ENET_RX_EMPTY;
587 bdp->cbd_sc = status;
588
589 /* Update BD pointer to next entry */
590 if (status & BD_ENET_RX_WRAP)
591 bdp = fep->rx_bd_base;
592 else
593 bdp++;
594 /* Doing this here will keep the FEC running while we process
595 * incoming frames. On a heavily loaded network, we should be
596 * able to keep up at the expense of system resources.
597 */
598 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
599 }
600 fep->cur_rx = bdp;
601
602 spin_unlock_irq(&fep->hw_lock);
603 }
604
605 /* called from interrupt context */
606 static void
607 fec_enet_mii(struct net_device *dev)
608 {
609 struct fec_enet_private *fep;
610 mii_list_t *mip;
611
612 fep = netdev_priv(dev);
613 spin_lock_irq(&fep->mii_lock);
614
615 if ((mip = mii_head) == NULL) {
616 printk("MII and no head!\n");
617 goto unlock;
618 }
619
620 if (mip->mii_func != NULL)
621 (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev);
622
623 mii_head = mip->mii_next;
624 mip->mii_next = mii_free;
625 mii_free = mip;
626
627 if ((mip = mii_head) != NULL)
628 writel(mip->mii_regval, fep->hwp + FEC_MII_DATA);
629
630 unlock:
631 spin_unlock_irq(&fep->mii_lock);
632 }
633
634 static int
635 mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *))
636 {
637 struct fec_enet_private *fep;
638 unsigned long flags;
639 mii_list_t *mip;
640 int retval;
641
642 /* Add PHY address to register command */
643 fep = netdev_priv(dev);
644 spin_lock_irqsave(&fep->mii_lock, flags);
645
646 regval |= fep->phy_addr << 23;
647 retval = 0;
648
649 if ((mip = mii_free) != NULL) {
650 mii_free = mip->mii_next;
651 mip->mii_regval = regval;
652 mip->mii_func = func;
653 mip->mii_next = NULL;
654 if (mii_head) {
655 mii_tail->mii_next = mip;
656 mii_tail = mip;
657 } else {
658 mii_head = mii_tail = mip;
659 writel(regval, fep->hwp + FEC_MII_DATA);
660 }
661 } else {
662 retval = 1;
663 }
664
665 spin_unlock_irqrestore(&fep->mii_lock, flags);
666 return retval;
667 }
668
669 static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
670 {
671 if(!c)
672 return;
673
674 for (; c->mii_data != mk_mii_end; c++)
675 mii_queue(dev, c->mii_data, c->funct);
676 }
677
678 static void mii_parse_sr(uint mii_reg, struct net_device *dev)
679 {
680 struct fec_enet_private *fep = netdev_priv(dev);
681 volatile uint *s = &(fep->phy_status);
682 uint status;
683
684 status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
685
686 if (mii_reg & 0x0004)
687 status |= PHY_STAT_LINK;
688 if (mii_reg & 0x0010)
689 status |= PHY_STAT_FAULT;
690 if (mii_reg & 0x0020)
691 status |= PHY_STAT_ANC;
692 *s = status;
693 }
694
695 static void mii_parse_cr(uint mii_reg, struct net_device *dev)
696 {
697 struct fec_enet_private *fep = netdev_priv(dev);
698 volatile uint *s = &(fep->phy_status);
699 uint status;
700
701 status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP);
702
703 if (mii_reg & 0x1000)
704 status |= PHY_CONF_ANE;
705 if (mii_reg & 0x4000)
706 status |= PHY_CONF_LOOP;
707 *s = status;
708 }
709
710 static void mii_parse_anar(uint mii_reg, struct net_device *dev)
711 {
712 struct fec_enet_private *fep = netdev_priv(dev);
713 volatile uint *s = &(fep->phy_status);
714 uint status;
715
716 status = *s & ~(PHY_CONF_SPMASK);
717
718 if (mii_reg & 0x0020)
719 status |= PHY_CONF_10HDX;
720 if (mii_reg & 0x0040)
721 status |= PHY_CONF_10FDX;
722 if (mii_reg & 0x0080)
723 status |= PHY_CONF_100HDX;
724 if (mii_reg & 0x00100)
725 status |= PHY_CONF_100FDX;
726 *s = status;
727 }
728
729 /* ------------------------------------------------------------------------- */
730 /* The Level one LXT970 is used by many boards */
731
732 #define MII_LXT970_MIRROR 16 /* Mirror register */
733 #define MII_LXT970_IER 17 /* Interrupt Enable Register */
734 #define MII_LXT970_ISR 18 /* Interrupt Status Register */
735 #define MII_LXT970_CONFIG 19 /* Configuration Register */
736 #define MII_LXT970_CSR 20 /* Chip Status Register */
737
738 static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
739 {
740 struct fec_enet_private *fep = netdev_priv(dev);
741 volatile uint *s = &(fep->phy_status);
742 uint status;
743
744 status = *s & ~(PHY_STAT_SPMASK);
745 if (mii_reg & 0x0800) {
746 if (mii_reg & 0x1000)
747 status |= PHY_STAT_100FDX;
748 else
749 status |= PHY_STAT_100HDX;
750 } else {
751 if (mii_reg & 0x1000)
752 status |= PHY_STAT_10FDX;
753 else
754 status |= PHY_STAT_10HDX;
755 }
756 *s = status;
757 }
758
759 static phy_cmd_t const phy_cmd_lxt970_config[] = {
760 { mk_mii_read(MII_REG_CR), mii_parse_cr },
761 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
762 { mk_mii_end, }
763 };
764 static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */
765 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
766 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
767 { mk_mii_end, }
768 };
769 static phy_cmd_t const phy_cmd_lxt970_ack_int[] = {
770 /* read SR and ISR to acknowledge */
771 { mk_mii_read(MII_REG_SR), mii_parse_sr },
772 { mk_mii_read(MII_LXT970_ISR), NULL },
773
774 /* find out the current status */
775 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
776 { mk_mii_end, }
777 };
778 static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */
779 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
780 { mk_mii_end, }
781 };
782 static phy_info_t const phy_info_lxt970 = {
783 .id = 0x07810000,
784 .name = "LXT970",
785 .config = phy_cmd_lxt970_config,
786 .startup = phy_cmd_lxt970_startup,
787 .ack_int = phy_cmd_lxt970_ack_int,
788 .shutdown = phy_cmd_lxt970_shutdown
789 };
790
791 /* ------------------------------------------------------------------------- */
792 /* The Level one LXT971 is used on some of my custom boards */
793
794 /* register definitions for the 971 */
795
796 #define MII_LXT971_PCR 16 /* Port Control Register */
797 #define MII_LXT971_SR2 17 /* Status Register 2 */
798 #define MII_LXT971_IER 18 /* Interrupt Enable Register */
799 #define MII_LXT971_ISR 19 /* Interrupt Status Register */
800 #define MII_LXT971_LCR 20 /* LED Control Register */
801 #define MII_LXT971_TCR 30 /* Transmit Control Register */
802
803 /*
804 * I had some nice ideas of running the MDIO faster...
805 * The 971 should support 8MHz and I tried it, but things acted really
806 * weird, so 2.5 MHz ought to be enough for anyone...
807 */
808
809 static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
810 {
811 struct fec_enet_private *fep = netdev_priv(dev);
812 volatile uint *s = &(fep->phy_status);
813 uint status;
814
815 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
816
817 if (mii_reg & 0x0400) {
818 fep->link = 1;
819 status |= PHY_STAT_LINK;
820 } else {
821 fep->link = 0;
822 }
823 if (mii_reg & 0x0080)
824 status |= PHY_STAT_ANC;
825 if (mii_reg & 0x4000) {
826 if (mii_reg & 0x0200)
827 status |= PHY_STAT_100FDX;
828 else
829 status |= PHY_STAT_100HDX;
830 } else {
831 if (mii_reg & 0x0200)
832 status |= PHY_STAT_10FDX;
833 else
834 status |= PHY_STAT_10HDX;
835 }
836 if (mii_reg & 0x0008)
837 status |= PHY_STAT_FAULT;
838
839 *s = status;
840 }
841
842 static phy_cmd_t const phy_cmd_lxt971_config[] = {
843 /* limit to 10MBit because my prototype board
844 * doesn't work with 100. */
845 { mk_mii_read(MII_REG_CR), mii_parse_cr },
846 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
847 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
848 { mk_mii_end, }
849 };
850 static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */
851 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
852 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
853 { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */
854 /* Somehow does the 971 tell me that the link is down
855 * the first read after power-up.
856 * read here to get a valid value in ack_int */
857 { mk_mii_read(MII_REG_SR), mii_parse_sr },
858 { mk_mii_end, }
859 };
860 static phy_cmd_t const phy_cmd_lxt971_ack_int[] = {
861 /* acknowledge the int before reading status ! */
862 { mk_mii_read(MII_LXT971_ISR), NULL },
863 /* find out the current status */
864 { mk_mii_read(MII_REG_SR), mii_parse_sr },
865 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
866 { mk_mii_end, }
867 };
868 static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */
869 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
870 { mk_mii_end, }
871 };
872 static phy_info_t const phy_info_lxt971 = {
873 .id = 0x0001378e,
874 .name = "LXT971",
875 .config = phy_cmd_lxt971_config,
876 .startup = phy_cmd_lxt971_startup,
877 .ack_int = phy_cmd_lxt971_ack_int,
878 .shutdown = phy_cmd_lxt971_shutdown
879 };
880
881 /* ------------------------------------------------------------------------- */
882 /* The Quality Semiconductor QS6612 is used on the RPX CLLF */
883
884 /* register definitions */
885
886 #define MII_QS6612_MCR 17 /* Mode Control Register */
887 #define MII_QS6612_FTR 27 /* Factory Test Register */
888 #define MII_QS6612_MCO 28 /* Misc. Control Register */
889 #define MII_QS6612_ISR 29 /* Interrupt Source Register */
890 #define MII_QS6612_IMR 30 /* Interrupt Mask Register */
891 #define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
892
893 static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
894 {
895 struct fec_enet_private *fep = netdev_priv(dev);
896 volatile uint *s = &(fep->phy_status);
897 uint status;
898
899 status = *s & ~(PHY_STAT_SPMASK);
900
901 switch((mii_reg >> 2) & 7) {
902 case 1: status |= PHY_STAT_10HDX; break;
903 case 2: status |= PHY_STAT_100HDX; break;
904 case 5: status |= PHY_STAT_10FDX; break;
905 case 6: status |= PHY_STAT_100FDX; break;
906 }
907
908 *s = status;
909 }
910
911 static phy_cmd_t const phy_cmd_qs6612_config[] = {
912 /* The PHY powers up isolated on the RPX,
913 * so send a command to allow operation.
914 */
915 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
916
917 /* parse cr and anar to get some info */
918 { mk_mii_read(MII_REG_CR), mii_parse_cr },
919 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
920 { mk_mii_end, }
921 };
922 static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */
923 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
924 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
925 { mk_mii_end, }
926 };
927 static phy_cmd_t const phy_cmd_qs6612_ack_int[] = {
928 /* we need to read ISR, SR and ANER to acknowledge */
929 { mk_mii_read(MII_QS6612_ISR), NULL },
930 { mk_mii_read(MII_REG_SR), mii_parse_sr },
931 { mk_mii_read(MII_REG_ANER), NULL },
932
933 /* read pcr to get info */
934 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
935 { mk_mii_end, }
936 };
937 static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */
938 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
939 { mk_mii_end, }
940 };
941 static phy_info_t const phy_info_qs6612 = {
942 .id = 0x00181440,
943 .name = "QS6612",
944 .config = phy_cmd_qs6612_config,
945 .startup = phy_cmd_qs6612_startup,
946 .ack_int = phy_cmd_qs6612_ack_int,
947 .shutdown = phy_cmd_qs6612_shutdown
948 };
949
950 /* ------------------------------------------------------------------------- */
951 /* AMD AM79C874 phy */
952
953 /* register definitions for the 874 */
954
955 #define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */
956 #define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */
957 #define MII_AM79C874_DR 18 /* Diagnostic Register */
958 #define MII_AM79C874_PMLR 19 /* Power and Loopback Register */
959 #define MII_AM79C874_MCR 21 /* ModeControl Register */
960 #define MII_AM79C874_DC 23 /* Disconnect Counter */
961 #define MII_AM79C874_REC 24 /* Recieve Error Counter */
962
963 static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev)
964 {
965 struct fec_enet_private *fep = netdev_priv(dev);
966 volatile uint *s = &(fep->phy_status);
967 uint status;
968
969 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC);
970
971 if (mii_reg & 0x0080)
972 status |= PHY_STAT_ANC;
973 if (mii_reg & 0x0400)
974 status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX);
975 else
976 status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX);
977
978 *s = status;
979 }
980
981 static phy_cmd_t const phy_cmd_am79c874_config[] = {
982 { mk_mii_read(MII_REG_CR), mii_parse_cr },
983 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
984 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
985 { mk_mii_end, }
986 };
987 static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */
988 { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
989 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
990 { mk_mii_read(MII_REG_SR), mii_parse_sr },
991 { mk_mii_end, }
992 };
993 static phy_cmd_t const phy_cmd_am79c874_ack_int[] = {
994 /* find out the current status */
995 { mk_mii_read(MII_REG_SR), mii_parse_sr },
996 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
997 /* we only need to read ISR to acknowledge */
998 { mk_mii_read(MII_AM79C874_ICSR), NULL },
999 { mk_mii_end, }
1000 };
1001 static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */
1002 { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
1003 { mk_mii_end, }
1004 };
1005 static phy_info_t const phy_info_am79c874 = {
1006 .id = 0x00022561,
1007 .name = "AM79C874",
1008 .config = phy_cmd_am79c874_config,
1009 .startup = phy_cmd_am79c874_startup,
1010 .ack_int = phy_cmd_am79c874_ack_int,
1011 .shutdown = phy_cmd_am79c874_shutdown
1012 };
1013
1014
1015 /* ------------------------------------------------------------------------- */
1016 /* Kendin KS8721BL phy */
1017
1018 /* register definitions for the 8721 */
1019
1020 #define MII_KS8721BL_RXERCR 21
1021 #define MII_KS8721BL_ICSR 27
1022 #define MII_KS8721BL_PHYCR 31
1023
1024 static phy_cmd_t const phy_cmd_ks8721bl_config[] = {
1025 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1026 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1027 { mk_mii_end, }
1028 };
1029 static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */
1030 { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL },
1031 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1032 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1033 { mk_mii_end, }
1034 };
1035 static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = {
1036 /* find out the current status */
1037 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1038 /* we only need to read ISR to acknowledge */
1039 { mk_mii_read(MII_KS8721BL_ICSR), NULL },
1040 { mk_mii_end, }
1041 };
1042 static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */
1043 { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL },
1044 { mk_mii_end, }
1045 };
1046 static phy_info_t const phy_info_ks8721bl = {
1047 .id = 0x00022161,
1048 .name = "KS8721BL",
1049 .config = phy_cmd_ks8721bl_config,
1050 .startup = phy_cmd_ks8721bl_startup,
1051 .ack_int = phy_cmd_ks8721bl_ack_int,
1052 .shutdown = phy_cmd_ks8721bl_shutdown
1053 };
1054
1055 /* ------------------------------------------------------------------------- */
1056 /* register definitions for the DP83848 */
1057
1058 #define MII_DP8384X_PHYSTST 16 /* PHY Status Register */
1059
1060 static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev)
1061 {
1062 struct fec_enet_private *fep = netdev_priv(dev);
1063 volatile uint *s = &(fep->phy_status);
1064
1065 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
1066
1067 /* Link up */
1068 if (mii_reg & 0x0001) {
1069 fep->link = 1;
1070 *s |= PHY_STAT_LINK;
1071 } else
1072 fep->link = 0;
1073 /* Status of link */
1074 if (mii_reg & 0x0010) /* Autonegotioation complete */
1075 *s |= PHY_STAT_ANC;
1076 if (mii_reg & 0x0002) { /* 10MBps? */
1077 if (mii_reg & 0x0004) /* Full Duplex? */
1078 *s |= PHY_STAT_10FDX;
1079 else
1080 *s |= PHY_STAT_10HDX;
1081 } else { /* 100 Mbps? */
1082 if (mii_reg & 0x0004) /* Full Duplex? */
1083 *s |= PHY_STAT_100FDX;
1084 else
1085 *s |= PHY_STAT_100HDX;
1086 }
1087 if (mii_reg & 0x0008)
1088 *s |= PHY_STAT_FAULT;
1089 }
1090
1091 static phy_info_t phy_info_dp83848= {
1092 0x020005c9,
1093 "DP83848",
1094
1095 (const phy_cmd_t []) { /* config */
1096 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1097 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1098 { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 },
1099 { mk_mii_end, }
1100 },
1101 (const phy_cmd_t []) { /* startup - enable interrupts */
1102 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1103 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1104 { mk_mii_end, }
1105 },
1106 (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */
1107 { mk_mii_end, }
1108 },
1109 (const phy_cmd_t []) { /* shutdown */
1110 { mk_mii_end, }
1111 },
1112 };
1113
1114 /* ------------------------------------------------------------------------- */
1115
1116 static phy_info_t const * const phy_info[] = {
1117 &phy_info_lxt970,
1118 &phy_info_lxt971,
1119 &phy_info_qs6612,
1120 &phy_info_am79c874,
1121 &phy_info_ks8721bl,
1122 &phy_info_dp83848,
1123 NULL
1124 };
1125
1126 /* ------------------------------------------------------------------------- */
1127 #ifdef HAVE_mii_link_interrupt
1128 static irqreturn_t
1129 mii_link_interrupt(int irq, void * dev_id);
1130
1131 /*
1132 * This is specific to the MII interrupt setup of the M5272EVB.
1133 */
1134 static void __inline__ fec_request_mii_intr(struct net_device *dev)
1135 {
1136 if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0)
1137 printk("FEC: Could not allocate fec(MII) IRQ(66)!\n");
1138 }
1139
1140 static void __inline__ fec_disable_phy_intr(void)
1141 {
1142 volatile unsigned long *icrp;
1143 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1);
1144 *icrp = 0x08000000;
1145 }
1146
1147 static void __inline__ fec_phy_ack_intr(void)
1148 {
1149 volatile unsigned long *icrp;
1150 /* Acknowledge the interrupt */
1151 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1);
1152 *icrp = 0x0d000000;
1153 }
1154
1155 #ifdef CONFIG_M5272
1156 static void __inline__ fec_get_mac(struct net_device *dev)
1157 {
1158 struct fec_enet_private *fep = netdev_priv(dev);
1159 unsigned char *iap, tmpaddr[ETH_ALEN];
1160
1161 if (FEC_FLASHMAC) {
1162 /*
1163 * Get MAC address from FLASH.
1164 * If it is all 1's or 0's, use the default.
1165 */
1166 iap = (unsigned char *)FEC_FLASHMAC;
1167 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
1168 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
1169 iap = fec_mac_default;
1170 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
1171 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
1172 iap = fec_mac_default;
1173 } else {
1174 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
1175 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1176 iap = &tmpaddr[0];
1177 }
1178
1179 memcpy(dev->dev_addr, iap, ETH_ALEN);
1180
1181 /* Adjust MAC if using default MAC address */
1182 if (iap == fec_mac_default)
1183 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
1184 }
1185 #endif
1186
1187 /* ------------------------------------------------------------------------- */
1188
1189 static void mii_display_status(struct net_device *dev)
1190 {
1191 struct fec_enet_private *fep = netdev_priv(dev);
1192 volatile uint *s = &(fep->phy_status);
1193
1194 if (!fep->link && !fep->old_link) {
1195 /* Link is still down - don't print anything */
1196 return;
1197 }
1198
1199 printk("%s: status: ", dev->name);
1200
1201 if (!fep->link) {
1202 printk("link down");
1203 } else {
1204 printk("link up");
1205
1206 switch(*s & PHY_STAT_SPMASK) {
1207 case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break;
1208 case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break;
1209 case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break;
1210 case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break;
1211 default:
1212 printk(", Unknown speed/duplex");
1213 }
1214
1215 if (*s & PHY_STAT_ANC)
1216 printk(", auto-negotiation complete");
1217 }
1218
1219 if (*s & PHY_STAT_FAULT)
1220 printk(", remote fault");
1221
1222 printk(".\n");
1223 }
1224
1225 static void mii_display_config(struct work_struct *work)
1226 {
1227 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
1228 struct net_device *dev = fep->netdev;
1229 uint status = fep->phy_status;
1230
1231 /*
1232 ** When we get here, phy_task is already removed from
1233 ** the workqueue. It is thus safe to allow to reuse it.
1234 */
1235 fep->mii_phy_task_queued = 0;
1236 printk("%s: config: auto-negotiation ", dev->name);
1237
1238 if (status & PHY_CONF_ANE)
1239 printk("on");
1240 else
1241 printk("off");
1242
1243 if (status & PHY_CONF_100FDX)
1244 printk(", 100FDX");
1245 if (status & PHY_CONF_100HDX)
1246 printk(", 100HDX");
1247 if (status & PHY_CONF_10FDX)
1248 printk(", 10FDX");
1249 if (status & PHY_CONF_10HDX)
1250 printk(", 10HDX");
1251 if (!(status & PHY_CONF_SPMASK))
1252 printk(", No speed/duplex selected?");
1253
1254 if (status & PHY_CONF_LOOP)
1255 printk(", loopback enabled");
1256
1257 printk(".\n");
1258
1259 fep->sequence_done = 1;
1260 }
1261
1262 static void mii_relink(struct work_struct *work)
1263 {
1264 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
1265 struct net_device *dev = fep->netdev;
1266 int duplex;
1267
1268 /*
1269 ** When we get here, phy_task is already removed from
1270 ** the workqueue. It is thus safe to allow to reuse it.
1271 */
1272 fep->mii_phy_task_queued = 0;
1273 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
1274 mii_display_status(dev);
1275 fep->old_link = fep->link;
1276
1277 if (fep->link) {
1278 duplex = 0;
1279 if (fep->phy_status
1280 & (PHY_STAT_100FDX | PHY_STAT_10FDX))
1281 duplex = 1;
1282 fec_restart(dev, duplex);
1283 } else
1284 fec_stop(dev);
1285 }
1286
1287 /* mii_queue_relink is called in interrupt context from mii_link_interrupt */
1288 static void mii_queue_relink(uint mii_reg, struct net_device *dev)
1289 {
1290 struct fec_enet_private *fep = netdev_priv(dev);
1291
1292 /*
1293 * We cannot queue phy_task twice in the workqueue. It
1294 * would cause an endless loop in the workqueue.
1295 * Fortunately, if the last mii_relink entry has not yet been
1296 * executed now, it will do the job for the current interrupt,
1297 * which is just what we want.
1298 */
1299 if (fep->mii_phy_task_queued)
1300 return;
1301
1302 fep->mii_phy_task_queued = 1;
1303 INIT_WORK(&fep->phy_task, mii_relink);
1304 schedule_work(&fep->phy_task);
1305 }
1306
1307 /* mii_queue_config is called in interrupt context from fec_enet_mii */
1308 static void mii_queue_config(uint mii_reg, struct net_device *dev)
1309 {
1310 struct fec_enet_private *fep = netdev_priv(dev);
1311
1312 if (fep->mii_phy_task_queued)
1313 return;
1314
1315 fep->mii_phy_task_queued = 1;
1316 INIT_WORK(&fep->phy_task, mii_display_config);
1317 schedule_work(&fep->phy_task);
1318 }
1319
1320 phy_cmd_t const phy_cmd_relink[] = {
1321 { mk_mii_read(MII_REG_CR), mii_queue_relink },
1322 { mk_mii_end, }
1323 };
1324 phy_cmd_t const phy_cmd_config[] = {
1325 { mk_mii_read(MII_REG_CR), mii_queue_config },
1326 { mk_mii_end, }
1327 };
1328
1329 /* Read remainder of PHY ID. */
1330 static void
1331 mii_discover_phy3(uint mii_reg, struct net_device *dev)
1332 {
1333 struct fec_enet_private *fep;
1334 int i;
1335
1336 fep = netdev_priv(dev);
1337 fep->phy_id |= (mii_reg & 0xffff);
1338 printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id);
1339
1340 for(i = 0; phy_info[i]; i++) {
1341 if(phy_info[i]->id == (fep->phy_id >> 4))
1342 break;
1343 }
1344
1345 if (phy_info[i])
1346 printk(" -- %s\n", phy_info[i]->name);
1347 else
1348 printk(" -- unknown PHY!\n");
1349
1350 fep->phy = phy_info[i];
1351 fep->phy_id_done = 1;
1352 }
1353
1354 /* Scan all of the MII PHY addresses looking for someone to respond
1355 * with a valid ID. This usually happens quickly.
1356 */
1357 static void
1358 mii_discover_phy(uint mii_reg, struct net_device *dev)
1359 {
1360 struct fec_enet_private *fep;
1361 uint phytype;
1362
1363 fep = netdev_priv(dev);
1364
1365 if (fep->phy_addr < 32) {
1366 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
1367
1368 /* Got first part of ID, now get remainder */
1369 fep->phy_id = phytype << 16;
1370 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2),
1371 mii_discover_phy3);
1372 } else {
1373 fep->phy_addr++;
1374 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
1375 mii_discover_phy);
1376 }
1377 } else {
1378 printk("FEC: No PHY device found.\n");
1379 /* Disable external MII interface */
1380 writel(0, fep->hwp + FEC_MII_SPEED);
1381 fep->phy_speed = 0;
1382 #ifdef HAVE_mii_link_interrupt
1383 fec_disable_phy_intr();
1384 #endif
1385 }
1386 }
1387
1388 /* This interrupt occurs when the PHY detects a link change */
1389 #ifdef HAVE_mii_link_interrupt
1390 static irqreturn_t
1391 mii_link_interrupt(int irq, void * dev_id)
1392 {
1393 struct net_device *dev = dev_id;
1394 struct fec_enet_private *fep = netdev_priv(dev);
1395
1396 fec_phy_ack_intr();
1397
1398 mii_do_cmd(dev, fep->phy->ack_int);
1399 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
1400
1401 return IRQ_HANDLED;
1402 }
1403 #endif
1404
1405 static int
1406 fec_enet_open(struct net_device *dev)
1407 {
1408 struct fec_enet_private *fep = netdev_priv(dev);
1409
1410 /* I should reset the ring buffers here, but I don't yet know
1411 * a simple way to do that.
1412 */
1413 fec_set_mac_address(dev);
1414
1415 fep->sequence_done = 0;
1416 fep->link = 0;
1417
1418 if (fep->phy) {
1419 mii_do_cmd(dev, fep->phy->ack_int);
1420 mii_do_cmd(dev, fep->phy->config);
1421 mii_do_cmd(dev, phy_cmd_config); /* display configuration */
1422
1423 /* Poll until the PHY tells us its configuration
1424 * (not link state).
1425 * Request is initiated by mii_do_cmd above, but answer
1426 * comes by interrupt.
1427 * This should take about 25 usec per register at 2.5 MHz,
1428 * and we read approximately 5 registers.
1429 */
1430 while(!fep->sequence_done)
1431 schedule();
1432
1433 mii_do_cmd(dev, fep->phy->startup);
1434
1435 /* Set the initial link state to true. A lot of hardware
1436 * based on this device does not implement a PHY interrupt,
1437 * so we are never notified of link change.
1438 */
1439 fep->link = 1;
1440 } else {
1441 fep->link = 1; /* lets just try it and see */
1442 /* no phy, go full duplex, it's most likely a hub chip */
1443 fec_restart(dev, 1);
1444 }
1445
1446 netif_start_queue(dev);
1447 fep->opened = 1;
1448 return 0;
1449 }
1450
1451 static int
1452 fec_enet_close(struct net_device *dev)
1453 {
1454 struct fec_enet_private *fep = netdev_priv(dev);
1455
1456 /* Don't know what to do yet. */
1457 fep->opened = 0;
1458 netif_stop_queue(dev);
1459 fec_stop(dev);
1460
1461 return 0;
1462 }
1463
1464 /* Set or clear the multicast filter for this adaptor.
1465 * Skeleton taken from sunlance driver.
1466 * The CPM Ethernet implementation allows Multicast as well as individual
1467 * MAC address filtering. Some of the drivers check to make sure it is
1468 * a group multicast address, and discard those that are not. I guess I
1469 * will do the same for now, but just remove the test if you want
1470 * individual filtering as well (do the upper net layers want or support
1471 * this kind of feature?).
1472 */
1473
1474 #define HASH_BITS 6 /* #bits in hash */
1475 #define CRC32_POLY 0xEDB88320
1476
1477 static void set_multicast_list(struct net_device *dev)
1478 {
1479 struct fec_enet_private *fep = netdev_priv(dev);
1480 struct dev_mc_list *dmi;
1481 unsigned int i, j, bit, data, crc, tmp;
1482 unsigned char hash;
1483
1484 if (dev->flags & IFF_PROMISC) {
1485 tmp = readl(fep->hwp + FEC_R_CNTRL);
1486 tmp |= 0x8;
1487 writel(tmp, fep->hwp + FEC_R_CNTRL);
1488 return;
1489 }
1490
1491 tmp = readl(fep->hwp + FEC_R_CNTRL);
1492 tmp &= ~0x8;
1493 writel(tmp, fep->hwp + FEC_R_CNTRL);
1494
1495 if (dev->flags & IFF_ALLMULTI) {
1496 /* Catch all multicast addresses, so set the
1497 * filter to all 1's
1498 */
1499 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1500 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1501
1502 return;
1503 }
1504
1505 /* Clear filter and add the addresses in hash register
1506 */
1507 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1508 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1509
1510 dmi = dev->mc_list;
1511
1512 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) {
1513 /* Only support group multicast for now */
1514 if (!(dmi->dmi_addr[0] & 1))
1515 continue;
1516
1517 /* calculate crc32 value of mac address */
1518 crc = 0xffffffff;
1519
1520 for (i = 0; i < dmi->dmi_addrlen; i++) {
1521 data = dmi->dmi_addr[i];
1522 for (bit = 0; bit < 8; bit++, data >>= 1) {
1523 crc = (crc >> 1) ^
1524 (((crc ^ data) & 1) ? CRC32_POLY : 0);
1525 }
1526 }
1527
1528 /* only upper 6 bits (HASH_BITS) are used
1529 * which point to specific bit in he hash registers
1530 */
1531 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
1532
1533 if (hash > 31) {
1534 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1535 tmp |= 1 << (hash - 32);
1536 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1537 } else {
1538 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1539 tmp |= 1 << hash;
1540 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1541 }
1542 }
1543 }
1544
1545 /* Set a MAC change in hardware. */
1546 static void
1547 fec_set_mac_address(struct net_device *dev)
1548 {
1549 struct fec_enet_private *fep = netdev_priv(dev);
1550
1551 /* Set station address. */
1552 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
1553 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
1554 fep->hwp + FEC_ADDR_LOW);
1555 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
1556 fep + FEC_ADDR_HIGH);
1557 }
1558
1559 /*
1560 * XXX: We need to clean up on failure exits here.
1561 *
1562 * index is only used in legacy code
1563 */
1564 int __init fec_enet_init(struct net_device *dev, int index)
1565 {
1566 struct fec_enet_private *fep = netdev_priv(dev);
1567 unsigned long mem_addr;
1568 struct bufdesc *bdp, *cbd_base;
1569 int i, j;
1570
1571 /* Allocate memory for buffer descriptors. */
1572 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
1573 GFP_KERNEL);
1574 if (!cbd_base) {
1575 printk("FEC: allocate descriptor memory failed?\n");
1576 return -ENOMEM;
1577 }
1578
1579 spin_lock_init(&fep->hw_lock);
1580 spin_lock_init(&fep->mii_lock);
1581
1582 fep->index = index;
1583 fep->hwp = (void __iomem *)dev->base_addr;
1584 fep->netdev = dev;
1585
1586 /* Set the Ethernet address */
1587 #ifdef CONFIG_M5272
1588 fec_get_mac(dev);
1589 #else
1590 {
1591 unsigned long l;
1592 l = readl(fep->hwp + FEC_ADDR_LOW);
1593 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
1594 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
1595 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
1596 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
1597 l = readl(fep->hwp + FEC_ADDR_HIGH);
1598 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
1599 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
1600 }
1601 #endif
1602
1603 /* Set receive and transmit descriptor base. */
1604 fep->rx_bd_base = cbd_base;
1605 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1606
1607 /* Initialize the receive buffer descriptors. */
1608 bdp = fep->rx_bd_base;
1609 for (i=0; i<FEC_ENET_RX_PAGES; i++) {
1610
1611 /* Allocate a page */
1612 mem_addr = __get_free_page(GFP_KERNEL);
1613 /* XXX: missing check for allocation failure */
1614
1615 /* Initialize the BD for every fragment in the page */
1616 for (j=0; j<FEC_ENET_RX_FRPPG; j++) {
1617 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1618 bdp->cbd_bufaddr = __pa(mem_addr);
1619 mem_addr += FEC_ENET_RX_FRSIZE;
1620 bdp++;
1621 }
1622 }
1623
1624 /* Set the last buffer to wrap */
1625 bdp--;
1626 bdp->cbd_sc |= BD_SC_WRAP;
1627
1628 /* ...and the same for transmit */
1629 bdp = fep->tx_bd_base;
1630 for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) {
1631 if (j >= FEC_ENET_TX_FRPPG) {
1632 mem_addr = __get_free_page(GFP_KERNEL);
1633 j = 1;
1634 } else {
1635 mem_addr += FEC_ENET_TX_FRSIZE;
1636 j++;
1637 }
1638 fep->tx_bounce[i] = (unsigned char *) mem_addr;
1639
1640 /* Initialize the BD for every fragment in the page */
1641 bdp->cbd_sc = 0;
1642 bdp->cbd_bufaddr = 0;
1643 bdp++;
1644 }
1645
1646 /* Set the last buffer to wrap */
1647 bdp--;
1648 bdp->cbd_sc |= BD_SC_WRAP;
1649
1650 #ifdef HAVE_mii_link_interrupt
1651 fec_request_mii_intr(dev);
1652 #endif
1653 /* The FEC Ethernet specific entries in the device structure */
1654 dev->open = fec_enet_open;
1655 dev->hard_start_xmit = fec_enet_start_xmit;
1656 dev->tx_timeout = fec_timeout;
1657 dev->watchdog_timeo = TX_TIMEOUT;
1658 dev->stop = fec_enet_close;
1659 dev->set_multicast_list = set_multicast_list;
1660
1661 for (i=0; i<NMII-1; i++)
1662 mii_cmds[i].mii_next = &mii_cmds[i+1];
1663 mii_free = mii_cmds;
1664
1665 /* Set MII speed to 2.5 MHz */
1666 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
1667 / 2500000) / 2) & 0x3F) << 1;
1668 fec_restart(dev, 0);
1669
1670 /* Queue up command to detect the PHY and initialize the
1671 * remainder of the interface.
1672 */
1673 fep->phy_id_done = 0;
1674 fep->phy_addr = 0;
1675 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
1676
1677 return 0;
1678 }
1679
1680 /* This function is called to start or restart the FEC during a link
1681 * change. This only happens when switching between half and full
1682 * duplex.
1683 */
1684 static void
1685 fec_restart(struct net_device *dev, int duplex)
1686 {
1687 struct fec_enet_private *fep = netdev_priv(dev);
1688 struct bufdesc *bdp;
1689 int i;
1690
1691 /* Whack a reset. We should wait for this. */
1692 writel(1, fep->hwp + FEC_ECNTRL);
1693 udelay(10);
1694
1695 /* Clear any outstanding interrupt. */
1696 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1697
1698 /* Set station address. */
1699 fec_set_mac_address(dev);
1700
1701 /* Reset all multicast. */
1702 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1703 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1704 #ifndef CONFIG_M5272
1705 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1706 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1707 #endif
1708
1709 /* Set maximum receive buffer size. */
1710 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1711
1712 /* Set receive and transmit descriptor base. */
1713 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1714 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
1715 fep->hwp + FEC_X_DES_START);
1716
1717 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1718 fep->cur_rx = fep->rx_bd_base;
1719
1720 /* Reset SKB transmit buffers. */
1721 fep->skb_cur = fep->skb_dirty = 0;
1722 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
1723 if (fep->tx_skbuff[i]) {
1724 dev_kfree_skb_any(fep->tx_skbuff[i]);
1725 fep->tx_skbuff[i] = NULL;
1726 }
1727 }
1728
1729 /* Initialize the receive buffer descriptors. */
1730 bdp = fep->rx_bd_base;
1731 for (i = 0; i < RX_RING_SIZE; i++) {
1732
1733 /* Initialize the BD for every fragment in the page. */
1734 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1735 bdp++;
1736 }
1737
1738 /* Set the last buffer to wrap */
1739 bdp--;
1740 bdp->cbd_sc |= BD_SC_WRAP;
1741
1742 /* ...and the same for transmit */
1743 bdp = fep->tx_bd_base;
1744 for (i = 0; i < TX_RING_SIZE; i++) {
1745
1746 /* Initialize the BD for every fragment in the page. */
1747 bdp->cbd_sc = 0;
1748 bdp->cbd_bufaddr = 0;
1749 bdp++;
1750 }
1751
1752 /* Set the last buffer to wrap */
1753 bdp--;
1754 bdp->cbd_sc |= BD_SC_WRAP;
1755
1756 /* Enable MII mode */
1757 if (duplex) {
1758 /* MII enable / FD enable */
1759 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1760 writel(0x04, fep->hwp + FEC_X_CNTRL);
1761 } else {
1762 /* MII enable / No Rcv on Xmit */
1763 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
1764 writel(0x0, fep->hwp + FEC_X_CNTRL);
1765 }
1766 fep->full_duplex = duplex;
1767
1768 /* Set MII speed */
1769 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1770
1771 /* And last, enable the transmit and receive processing */
1772 writel(2, fep->hwp + FEC_ECNTRL);
1773 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1774
1775 /* Enable interrupts we wish to service */
1776 writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
1777 fep->hwp + FEC_IMASK);
1778 }
1779
1780 static void
1781 fec_stop(struct net_device *dev)
1782 {
1783 struct fec_enet_private *fep = netdev_priv(dev);
1784
1785 /* We cannot expect a graceful transmit stop without link !!! */
1786 if (fep->link) {
1787 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1788 udelay(10);
1789 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1790 printk("fec_stop : Graceful transmit stop did not complete !\n");
1791 }
1792
1793 /* Whack a reset. We should wait for this. */
1794 writel(1, fep->hwp + FEC_ECNTRL);
1795 udelay(10);
1796
1797 /* Clear outstanding MII command interrupts. */
1798 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
1799
1800 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1801 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1802 }
1803
1804 static int __devinit
1805 fec_probe(struct platform_device *pdev)
1806 {
1807 struct fec_enet_private *fep;
1808 struct net_device *ndev;
1809 int i, irq, ret = 0;
1810 struct resource *r;
1811
1812 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1813 if (!r)
1814 return -ENXIO;
1815
1816 r = request_mem_region(r->start, resource_size(r), pdev->name);
1817 if (!r)
1818 return -EBUSY;
1819
1820 /* Init network device */
1821 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1822 if (!ndev)
1823 return -ENOMEM;
1824
1825 SET_NETDEV_DEV(ndev, &pdev->dev);
1826
1827 /* setup board info structure */
1828 fep = netdev_priv(ndev);
1829 memset(fep, 0, sizeof(*fep));
1830
1831 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
1832
1833 if (!ndev->base_addr) {
1834 ret = -ENOMEM;
1835 goto failed_ioremap;
1836 }
1837
1838 platform_set_drvdata(pdev, ndev);
1839
1840 /* This device has up to three irqs on some platforms */
1841 for (i = 0; i < 3; i++) {
1842 irq = platform_get_irq(pdev, i);
1843 if (i && irq < 0)
1844 break;
1845 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
1846 if (ret) {
1847 while (i >= 0) {
1848 irq = platform_get_irq(pdev, i);
1849 free_irq(irq, ndev);
1850 i--;
1851 }
1852 goto failed_irq;
1853 }
1854 }
1855
1856 fep->clk = clk_get(&pdev->dev, "fec_clk");
1857 if (IS_ERR(fep->clk)) {
1858 ret = PTR_ERR(fep->clk);
1859 goto failed_clk;
1860 }
1861 clk_enable(fep->clk);
1862
1863 ret = fec_enet_init(ndev, 0);
1864 if (ret)
1865 goto failed_init;
1866
1867 ret = register_netdev(ndev);
1868 if (ret)
1869 goto failed_register;
1870
1871 return 0;
1872
1873 failed_register:
1874 failed_init:
1875 clk_disable(fep->clk);
1876 clk_put(fep->clk);
1877 failed_clk:
1878 for (i = 0; i < 3; i++) {
1879 irq = platform_get_irq(pdev, i);
1880 if (irq > 0)
1881 free_irq(irq, ndev);
1882 }
1883 failed_irq:
1884 iounmap((void __iomem *)ndev->base_addr);
1885 failed_ioremap:
1886 free_netdev(ndev);
1887
1888 return ret;
1889 }
1890
1891 static int __devexit
1892 fec_drv_remove(struct platform_device *pdev)
1893 {
1894 struct net_device *ndev = platform_get_drvdata(pdev);
1895 struct fec_enet_private *fep = netdev_priv(ndev);
1896
1897 platform_set_drvdata(pdev, NULL);
1898
1899 fec_stop(ndev);
1900 clk_disable(fep->clk);
1901 clk_put(fep->clk);
1902 iounmap((void __iomem *)ndev->base_addr);
1903 unregister_netdev(ndev);
1904 free_netdev(ndev);
1905 return 0;
1906 }
1907
1908 static int
1909 fec_suspend(struct platform_device *dev, pm_message_t state)
1910 {
1911 struct net_device *ndev = platform_get_drvdata(dev);
1912 struct fec_enet_private *fep;
1913
1914 if (ndev) {
1915 fep = netdev_priv(ndev);
1916 if (netif_running(ndev)) {
1917 netif_device_detach(ndev);
1918 fec_stop(ndev);
1919 }
1920 }
1921 return 0;
1922 }
1923
1924 static int
1925 fec_resume(struct platform_device *dev)
1926 {
1927 struct net_device *ndev = platform_get_drvdata(dev);
1928
1929 if (ndev) {
1930 if (netif_running(ndev)) {
1931 fec_enet_init(ndev, 0);
1932 netif_device_attach(ndev);
1933 }
1934 }
1935 return 0;
1936 }
1937
1938 static struct platform_driver fec_driver = {
1939 .driver = {
1940 .name = "fec",
1941 .owner = THIS_MODULE,
1942 },
1943 .probe = fec_probe,
1944 .remove = __devexit_p(fec_drv_remove),
1945 .suspend = fec_suspend,
1946 .resume = fec_resume,
1947 };
1948
1949 static int __init
1950 fec_enet_module_init(void)
1951 {
1952 printk(KERN_INFO "FEC Ethernet Driver\n");
1953
1954 return platform_driver_register(&fec_driver);
1955 }
1956
1957 static void __exit
1958 fec_enet_cleanup(void)
1959 {
1960 platform_driver_unregister(&fec_driver);
1961 }
1962
1963 module_exit(fec_enet_cleanup);
1964 module_init(fec_enet_module_init);
1965
1966 MODULE_LICENSE("GPL");
This page took 0.073722 seconds and 4 git commands to generate.