Commit | Line | Data |
---|---|---|
48257c4f PA |
1 | /* |
2 | * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. | |
3 | * | |
4 | * Copyright (c) 2003 Intracom S.A. | |
5 | * by Pantelis Antoniou <panto@intracom.gr> | |
6 | * | |
7 | * 2005 (c) MontaVista Software, Inc. | |
8 | * Vitaly Bordug <vbordug@ru.mvista.com> | |
9 | * | |
10 | * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> | |
11 | * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> | |
12 | * | |
13 | * This file is licensed under the terms of the GNU General Public License | |
14 | * version 2. This program is licensed "as is" without any warranty of any | |
15 | * kind, whether express or implied. | |
16 | */ | |
17 | ||
18 | #include <linux/config.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/types.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/ptrace.h> | |
25 | #include <linux/errno.h> | |
26 | #include <linux/ioport.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/interrupt.h> | |
29 | #include <linux/pci.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/delay.h> | |
32 | #include <linux/netdevice.h> | |
33 | #include <linux/etherdevice.h> | |
34 | #include <linux/skbuff.h> | |
35 | #include <linux/spinlock.h> | |
36 | #include <linux/mii.h> | |
37 | #include <linux/ethtool.h> | |
38 | #include <linux/bitops.h> | |
39 | #include <linux/fs.h> | |
f7b99969 | 40 | #include <linux/platform_device.h> |
48257c4f PA |
41 | |
42 | #include <linux/vmalloc.h> | |
43 | #include <asm/pgtable.h> | |
44 | ||
45 | #include <asm/pgtable.h> | |
46 | #include <asm/irq.h> | |
47 | #include <asm/uaccess.h> | |
48 | ||
49 | #include "fs_enet.h" | |
50 | ||
51 | /*************************************************/ | |
52 | ||
53 | static char version[] __devinitdata = | |
54 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n"; | |
55 | ||
56 | MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>"); | |
57 | MODULE_DESCRIPTION("Freescale Ethernet Driver"); | |
58 | MODULE_LICENSE("GPL"); | |
59 | MODULE_VERSION(DRV_MODULE_VERSION); | |
60 | ||
8d3b33f6 RR |
61 | int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ |
62 | module_param(fs_enet_debug, int, 0); | |
48257c4f PA |
63 | MODULE_PARM_DESC(fs_enet_debug, |
64 | "Freescale bitmapped debugging message enable value"); | |
65 | ||
48257c4f PA |
66 | |
67 | static void fs_set_multicast_list(struct net_device *dev) | |
68 | { | |
69 | struct fs_enet_private *fep = netdev_priv(dev); | |
70 | ||
71 | (*fep->ops->set_multicast_list)(dev); | |
72 | } | |
73 | ||
74 | /* NAPI receive function */ | |
75 | static int fs_enet_rx_napi(struct net_device *dev, int *budget) | |
76 | { | |
77 | struct fs_enet_private *fep = netdev_priv(dev); | |
78 | const struct fs_platform_info *fpi = fep->fpi; | |
79 | cbd_t *bdp; | |
80 | struct sk_buff *skb, *skbn, *skbt; | |
81 | int received = 0; | |
82 | u16 pkt_len, sc; | |
83 | int curidx; | |
84 | int rx_work_limit = 0; /* pacify gcc */ | |
85 | ||
86 | rx_work_limit = min(dev->quota, *budget); | |
87 | ||
88 | if (!netif_running(dev)) | |
89 | return 0; | |
90 | ||
91 | /* | |
92 | * First, grab all of the stats for the incoming packet. | |
93 | * These get messed up if we get called due to a busy condition. | |
94 | */ | |
95 | bdp = fep->cur_rx; | |
96 | ||
97 | /* clear RX status bits for napi*/ | |
98 | (*fep->ops->napi_clear_rx_event)(dev); | |
99 | ||
100 | while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { | |
101 | ||
102 | curidx = bdp - fep->rx_bd_base; | |
103 | ||
104 | /* | |
105 | * Since we have allocated space to hold a complete frame, | |
106 | * the last indicator should be set. | |
107 | */ | |
108 | if ((sc & BD_ENET_RX_LAST) == 0) | |
109 | printk(KERN_WARNING DRV_MODULE_NAME | |
110 | ": %s rcv is not +last\n", | |
111 | dev->name); | |
112 | ||
113 | /* | |
114 | * Check for errors. | |
115 | */ | |
116 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | | |
117 | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | |
118 | fep->stats.rx_errors++; | |
119 | /* Frame too long or too short. */ | |
120 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) | |
121 | fep->stats.rx_length_errors++; | |
122 | /* Frame alignment */ | |
123 | if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) | |
124 | fep->stats.rx_frame_errors++; | |
125 | /* CRC Error */ | |
126 | if (sc & BD_ENET_RX_CR) | |
127 | fep->stats.rx_crc_errors++; | |
128 | /* FIFO overrun */ | |
129 | if (sc & BD_ENET_RX_OV) | |
130 | fep->stats.rx_crc_errors++; | |
131 | ||
132 | skb = fep->rx_skbuff[curidx]; | |
133 | ||
34e30d61 | 134 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
48257c4f PA |
135 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), |
136 | DMA_FROM_DEVICE); | |
137 | ||
138 | skbn = skb; | |
139 | ||
140 | } else { | |
141 | ||
142 | /* napi, got packet but no quota */ | |
143 | if (--rx_work_limit < 0) | |
144 | break; | |
145 | ||
146 | skb = fep->rx_skbuff[curidx]; | |
147 | ||
34e30d61 | 148 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
48257c4f PA |
149 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), |
150 | DMA_FROM_DEVICE); | |
151 | ||
152 | /* | |
153 | * Process the incoming frame. | |
154 | */ | |
155 | fep->stats.rx_packets++; | |
156 | pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ | |
157 | fep->stats.rx_bytes += pkt_len + 4; | |
158 | ||
159 | if (pkt_len <= fpi->rx_copybreak) { | |
160 | /* +2 to make IP header L1 cache aligned */ | |
161 | skbn = dev_alloc_skb(pkt_len + 2); | |
162 | if (skbn != NULL) { | |
163 | skb_reserve(skbn, 2); /* align IP header */ | |
164 | memcpy(skbn->data, skb->data, pkt_len); | |
165 | /* swap */ | |
166 | skbt = skb; | |
167 | skb = skbn; | |
168 | skbn = skbt; | |
169 | } | |
170 | } else | |
171 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | |
172 | ||
173 | if (skbn != NULL) { | |
174 | skb->dev = dev; | |
175 | skb_put(skb, pkt_len); /* Make room */ | |
176 | skb->protocol = eth_type_trans(skb, dev); | |
177 | received++; | |
178 | netif_receive_skb(skb); | |
179 | } else { | |
180 | printk(KERN_WARNING DRV_MODULE_NAME | |
181 | ": %s Memory squeeze, dropping packet.\n", | |
182 | dev->name); | |
183 | fep->stats.rx_dropped++; | |
184 | skbn = skb; | |
185 | } | |
186 | } | |
187 | ||
188 | fep->rx_skbuff[curidx] = skbn; | |
189 | CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, | |
190 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | |
191 | DMA_FROM_DEVICE)); | |
192 | CBDW_DATLEN(bdp, 0); | |
193 | CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); | |
194 | ||
195 | /* | |
196 | * Update BD pointer to next entry. | |
197 | */ | |
198 | if ((sc & BD_ENET_RX_WRAP) == 0) | |
199 | bdp++; | |
200 | else | |
201 | bdp = fep->rx_bd_base; | |
202 | ||
203 | (*fep->ops->rx_bd_done)(dev); | |
204 | } | |
205 | ||
206 | fep->cur_rx = bdp; | |
207 | ||
208 | dev->quota -= received; | |
209 | *budget -= received; | |
210 | ||
211 | if (rx_work_limit < 0) | |
212 | return 1; /* not done */ | |
213 | ||
214 | /* done */ | |
215 | netif_rx_complete(dev); | |
216 | ||
217 | (*fep->ops->napi_enable_rx)(dev); | |
218 | ||
219 | return 0; | |
220 | } | |
221 | ||
222 | /* non NAPI receive function */ | |
223 | static int fs_enet_rx_non_napi(struct net_device *dev) | |
224 | { | |
225 | struct fs_enet_private *fep = netdev_priv(dev); | |
226 | const struct fs_platform_info *fpi = fep->fpi; | |
227 | cbd_t *bdp; | |
228 | struct sk_buff *skb, *skbn, *skbt; | |
229 | int received = 0; | |
230 | u16 pkt_len, sc; | |
231 | int curidx; | |
232 | /* | |
233 | * First, grab all of the stats for the incoming packet. | |
234 | * These get messed up if we get called due to a busy condition. | |
235 | */ | |
236 | bdp = fep->cur_rx; | |
237 | ||
238 | while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { | |
239 | ||
240 | curidx = bdp - fep->rx_bd_base; | |
241 | ||
242 | /* | |
243 | * Since we have allocated space to hold a complete frame, | |
244 | * the last indicator should be set. | |
245 | */ | |
246 | if ((sc & BD_ENET_RX_LAST) == 0) | |
247 | printk(KERN_WARNING DRV_MODULE_NAME | |
248 | ": %s rcv is not +last\n", | |
249 | dev->name); | |
250 | ||
251 | /* | |
252 | * Check for errors. | |
253 | */ | |
254 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | | |
255 | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | |
256 | fep->stats.rx_errors++; | |
257 | /* Frame too long or too short. */ | |
258 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) | |
259 | fep->stats.rx_length_errors++; | |
260 | /* Frame alignment */ | |
261 | if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) | |
262 | fep->stats.rx_frame_errors++; | |
263 | /* CRC Error */ | |
264 | if (sc & BD_ENET_RX_CR) | |
265 | fep->stats.rx_crc_errors++; | |
266 | /* FIFO overrun */ | |
267 | if (sc & BD_ENET_RX_OV) | |
268 | fep->stats.rx_crc_errors++; | |
269 | ||
270 | skb = fep->rx_skbuff[curidx]; | |
271 | ||
34e30d61 | 272 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
48257c4f PA |
273 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), |
274 | DMA_FROM_DEVICE); | |
275 | ||
276 | skbn = skb; | |
277 | ||
278 | } else { | |
279 | ||
280 | skb = fep->rx_skbuff[curidx]; | |
281 | ||
34e30d61 | 282 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
48257c4f PA |
283 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), |
284 | DMA_FROM_DEVICE); | |
285 | ||
286 | /* | |
287 | * Process the incoming frame. | |
288 | */ | |
289 | fep->stats.rx_packets++; | |
290 | pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ | |
291 | fep->stats.rx_bytes += pkt_len + 4; | |
292 | ||
293 | if (pkt_len <= fpi->rx_copybreak) { | |
294 | /* +2 to make IP header L1 cache aligned */ | |
295 | skbn = dev_alloc_skb(pkt_len + 2); | |
296 | if (skbn != NULL) { | |
297 | skb_reserve(skbn, 2); /* align IP header */ | |
298 | memcpy(skbn->data, skb->data, pkt_len); | |
299 | /* swap */ | |
300 | skbt = skb; | |
301 | skb = skbn; | |
302 | skbn = skbt; | |
303 | } | |
304 | } else | |
305 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | |
306 | ||
307 | if (skbn != NULL) { | |
308 | skb->dev = dev; | |
309 | skb_put(skb, pkt_len); /* Make room */ | |
310 | skb->protocol = eth_type_trans(skb, dev); | |
311 | received++; | |
312 | netif_rx(skb); | |
313 | } else { | |
314 | printk(KERN_WARNING DRV_MODULE_NAME | |
315 | ": %s Memory squeeze, dropping packet.\n", | |
316 | dev->name); | |
317 | fep->stats.rx_dropped++; | |
318 | skbn = skb; | |
319 | } | |
320 | } | |
321 | ||
322 | fep->rx_skbuff[curidx] = skbn; | |
323 | CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, | |
324 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | |
325 | DMA_FROM_DEVICE)); | |
326 | CBDW_DATLEN(bdp, 0); | |
327 | CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); | |
328 | ||
329 | /* | |
330 | * Update BD pointer to next entry. | |
331 | */ | |
332 | if ((sc & BD_ENET_RX_WRAP) == 0) | |
333 | bdp++; | |
334 | else | |
335 | bdp = fep->rx_bd_base; | |
336 | ||
337 | (*fep->ops->rx_bd_done)(dev); | |
338 | } | |
339 | ||
340 | fep->cur_rx = bdp; | |
341 | ||
342 | return 0; | |
343 | } | |
344 | ||
345 | static void fs_enet_tx(struct net_device *dev) | |
346 | { | |
347 | struct fs_enet_private *fep = netdev_priv(dev); | |
348 | cbd_t *bdp; | |
349 | struct sk_buff *skb; | |
350 | int dirtyidx, do_wake, do_restart; | |
351 | u16 sc; | |
352 | ||
353 | spin_lock(&fep->lock); | |
354 | bdp = fep->dirty_tx; | |
355 | ||
356 | do_wake = do_restart = 0; | |
357 | while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { | |
358 | ||
359 | dirtyidx = bdp - fep->tx_bd_base; | |
360 | ||
361 | if (fep->tx_free == fep->tx_ring) | |
362 | break; | |
363 | ||
364 | skb = fep->tx_skbuff[dirtyidx]; | |
365 | ||
366 | /* | |
367 | * Check for errors. | |
368 | */ | |
369 | if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | | |
370 | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { | |
371 | ||
372 | if (sc & BD_ENET_TX_HB) /* No heartbeat */ | |
373 | fep->stats.tx_heartbeat_errors++; | |
374 | if (sc & BD_ENET_TX_LC) /* Late collision */ | |
375 | fep->stats.tx_window_errors++; | |
376 | if (sc & BD_ENET_TX_RL) /* Retrans limit */ | |
377 | fep->stats.tx_aborted_errors++; | |
378 | if (sc & BD_ENET_TX_UN) /* Underrun */ | |
379 | fep->stats.tx_fifo_errors++; | |
380 | if (sc & BD_ENET_TX_CSL) /* Carrier lost */ | |
381 | fep->stats.tx_carrier_errors++; | |
382 | ||
383 | if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { | |
384 | fep->stats.tx_errors++; | |
385 | do_restart = 1; | |
386 | } | |
387 | } else | |
388 | fep->stats.tx_packets++; | |
389 | ||
390 | if (sc & BD_ENET_TX_READY) | |
391 | printk(KERN_WARNING DRV_MODULE_NAME | |
392 | ": %s HEY! Enet xmit interrupt and TX_READY.\n", | |
393 | dev->name); | |
394 | ||
395 | /* | |
396 | * Deferred means some collisions occurred during transmit, | |
397 | * but we eventually sent the packet OK. | |
398 | */ | |
399 | if (sc & BD_ENET_TX_DEF) | |
400 | fep->stats.collisions++; | |
401 | ||
402 | /* unmap */ | |
34e30d61 PA |
403 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
404 | skb->len, DMA_TO_DEVICE); | |
48257c4f PA |
405 | |
406 | /* | |
407 | * Free the sk buffer associated with this last transmit. | |
408 | */ | |
409 | dev_kfree_skb_irq(skb); | |
410 | fep->tx_skbuff[dirtyidx] = NULL; | |
411 | ||
412 | /* | |
413 | * Update pointer to next buffer descriptor to be transmitted. | |
414 | */ | |
415 | if ((sc & BD_ENET_TX_WRAP) == 0) | |
416 | bdp++; | |
417 | else | |
418 | bdp = fep->tx_bd_base; | |
419 | ||
420 | /* | |
421 | * Since we have freed up a buffer, the ring is no longer | |
422 | * full. | |
423 | */ | |
424 | if (!fep->tx_free++) | |
425 | do_wake = 1; | |
426 | } | |
427 | ||
428 | fep->dirty_tx = bdp; | |
429 | ||
430 | if (do_restart) | |
431 | (*fep->ops->tx_restart)(dev); | |
432 | ||
433 | spin_unlock(&fep->lock); | |
434 | ||
435 | if (do_wake) | |
436 | netif_wake_queue(dev); | |
437 | } | |
438 | ||
439 | /* | |
440 | * The interrupt handler. | |
441 | * This is called from the MPC core interrupt. | |
442 | */ | |
443 | static irqreturn_t | |
444 | fs_enet_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |
445 | { | |
446 | struct net_device *dev = dev_id; | |
447 | struct fs_enet_private *fep; | |
448 | const struct fs_platform_info *fpi; | |
449 | u32 int_events; | |
450 | u32 int_clr_events; | |
451 | int nr, napi_ok; | |
452 | int handled; | |
453 | ||
454 | fep = netdev_priv(dev); | |
455 | fpi = fep->fpi; | |
456 | ||
457 | nr = 0; | |
458 | while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { | |
459 | ||
460 | nr++; | |
461 | ||
462 | int_clr_events = int_events; | |
463 | if (fpi->use_napi) | |
464 | int_clr_events &= ~fep->ev_napi_rx; | |
465 | ||
466 | (*fep->ops->clear_int_events)(dev, int_clr_events); | |
467 | ||
468 | if (int_events & fep->ev_err) | |
469 | (*fep->ops->ev_error)(dev, int_events); | |
470 | ||
471 | if (int_events & fep->ev_rx) { | |
472 | if (!fpi->use_napi) | |
473 | fs_enet_rx_non_napi(dev); | |
474 | else { | |
475 | napi_ok = netif_rx_schedule_prep(dev); | |
476 | ||
477 | (*fep->ops->napi_disable_rx)(dev); | |
478 | (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); | |
479 | ||
480 | /* NOTE: it is possible for FCCs in NAPI mode */ | |
481 | /* to submit a spurious interrupt while in poll */ | |
482 | if (napi_ok) | |
483 | __netif_rx_schedule(dev); | |
484 | } | |
485 | } | |
486 | ||
487 | if (int_events & fep->ev_tx) | |
488 | fs_enet_tx(dev); | |
489 | } | |
490 | ||
491 | handled = nr > 0; | |
492 | return IRQ_RETVAL(handled); | |
493 | } | |
494 | ||
495 | void fs_init_bds(struct net_device *dev) | |
496 | { | |
497 | struct fs_enet_private *fep = netdev_priv(dev); | |
498 | cbd_t *bdp; | |
499 | struct sk_buff *skb; | |
500 | int i; | |
501 | ||
502 | fs_cleanup_bds(dev); | |
503 | ||
504 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | |
505 | fep->tx_free = fep->tx_ring; | |
506 | fep->cur_rx = fep->rx_bd_base; | |
507 | ||
508 | /* | |
509 | * Initialize the receive buffer descriptors. | |
510 | */ | |
511 | for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { | |
512 | skb = dev_alloc_skb(ENET_RX_FRSIZE); | |
513 | if (skb == NULL) { | |
514 | printk(KERN_WARNING DRV_MODULE_NAME | |
515 | ": %s Memory squeeze, unable to allocate skb\n", | |
516 | dev->name); | |
517 | break; | |
518 | } | |
519 | fep->rx_skbuff[i] = skb; | |
520 | skb->dev = dev; | |
521 | CBDW_BUFADDR(bdp, | |
522 | dma_map_single(fep->dev, skb->data, | |
523 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | |
524 | DMA_FROM_DEVICE)); | |
525 | CBDW_DATLEN(bdp, 0); /* zero */ | |
526 | CBDW_SC(bdp, BD_ENET_RX_EMPTY | | |
527 | ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); | |
528 | } | |
529 | /* | |
530 | * if we failed, fillup remainder | |
531 | */ | |
532 | for (; i < fep->rx_ring; i++, bdp++) { | |
533 | fep->rx_skbuff[i] = NULL; | |
534 | CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); | |
535 | } | |
536 | ||
537 | /* | |
538 | * ...and the same for transmit. | |
539 | */ | |
540 | for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { | |
541 | fep->tx_skbuff[i] = NULL; | |
542 | CBDW_BUFADDR(bdp, 0); | |
543 | CBDW_DATLEN(bdp, 0); | |
544 | CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); | |
545 | } | |
546 | } | |
547 | ||
548 | void fs_cleanup_bds(struct net_device *dev) | |
549 | { | |
550 | struct fs_enet_private *fep = netdev_priv(dev); | |
551 | struct sk_buff *skb; | |
34e30d61 | 552 | cbd_t *bdp; |
48257c4f PA |
553 | int i; |
554 | ||
555 | /* | |
556 | * Reset SKB transmit buffers. | |
557 | */ | |
34e30d61 | 558 | for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { |
48257c4f PA |
559 | if ((skb = fep->tx_skbuff[i]) == NULL) |
560 | continue; | |
561 | ||
562 | /* unmap */ | |
34e30d61 PA |
563 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
564 | skb->len, DMA_TO_DEVICE); | |
48257c4f PA |
565 | |
566 | fep->tx_skbuff[i] = NULL; | |
567 | dev_kfree_skb(skb); | |
568 | } | |
569 | ||
570 | /* | |
571 | * Reset SKB receive buffers | |
572 | */ | |
34e30d61 | 573 | for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { |
48257c4f PA |
574 | if ((skb = fep->rx_skbuff[i]) == NULL) |
575 | continue; | |
576 | ||
577 | /* unmap */ | |
34e30d61 | 578 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
48257c4f PA |
579 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), |
580 | DMA_FROM_DEVICE); | |
581 | ||
582 | fep->rx_skbuff[i] = NULL; | |
583 | ||
584 | dev_kfree_skb(skb); | |
585 | } | |
586 | } | |
587 | ||
588 | /**********************************************************************************/ | |
589 | ||
590 | static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
591 | { | |
592 | struct fs_enet_private *fep = netdev_priv(dev); | |
593 | cbd_t *bdp; | |
594 | int curidx; | |
595 | u16 sc; | |
596 | unsigned long flags; | |
597 | ||
598 | spin_lock_irqsave(&fep->tx_lock, flags); | |
599 | ||
600 | /* | |
601 | * Fill in a Tx ring entry | |
602 | */ | |
603 | bdp = fep->cur_tx; | |
604 | ||
605 | if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { | |
606 | netif_stop_queue(dev); | |
607 | spin_unlock_irqrestore(&fep->tx_lock, flags); | |
608 | ||
609 | /* | |
610 | * Ooops. All transmit buffers are full. Bail out. | |
611 | * This should not happen, since the tx queue should be stopped. | |
612 | */ | |
613 | printk(KERN_WARNING DRV_MODULE_NAME | |
614 | ": %s tx queue full!.\n", dev->name); | |
615 | return NETDEV_TX_BUSY; | |
616 | } | |
617 | ||
618 | curidx = bdp - fep->tx_bd_base; | |
619 | /* | |
620 | * Clear all of the status flags. | |
621 | */ | |
622 | CBDC_SC(bdp, BD_ENET_TX_STATS); | |
623 | ||
624 | /* | |
625 | * Save skb pointer. | |
626 | */ | |
627 | fep->tx_skbuff[curidx] = skb; | |
628 | ||
629 | fep->stats.tx_bytes += skb->len; | |
630 | ||
631 | /* | |
632 | * Push the data cache so the CPM does not get stale memory data. | |
633 | */ | |
634 | CBDW_BUFADDR(bdp, dma_map_single(fep->dev, | |
635 | skb->data, skb->len, DMA_TO_DEVICE)); | |
636 | CBDW_DATLEN(bdp, skb->len); | |
637 | ||
638 | dev->trans_start = jiffies; | |
639 | ||
640 | /* | |
641 | * If this was the last BD in the ring, start at the beginning again. | |
642 | */ | |
643 | if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) | |
644 | fep->cur_tx++; | |
645 | else | |
646 | fep->cur_tx = fep->tx_bd_base; | |
647 | ||
648 | if (!--fep->tx_free) | |
649 | netif_stop_queue(dev); | |
650 | ||
651 | /* Trigger transmission start */ | |
652 | sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | | |
653 | BD_ENET_TX_LAST | BD_ENET_TX_TC; | |
654 | ||
655 | /* note that while FEC does not have this bit | |
656 | * it marks it as available for software use | |
657 | * yay for hw reuse :) */ | |
658 | if (skb->len <= 60) | |
659 | sc |= BD_ENET_TX_PAD; | |
660 | CBDS_SC(bdp, sc); | |
661 | ||
662 | (*fep->ops->tx_kickstart)(dev); | |
663 | ||
664 | spin_unlock_irqrestore(&fep->tx_lock, flags); | |
665 | ||
666 | return NETDEV_TX_OK; | |
667 | } | |
668 | ||
669 | static int fs_request_irq(struct net_device *dev, int irq, const char *name, | |
670 | irqreturn_t (*irqf)(int irq, void *dev_id, struct pt_regs *regs)) | |
671 | { | |
672 | struct fs_enet_private *fep = netdev_priv(dev); | |
673 | ||
674 | (*fep->ops->pre_request_irq)(dev, irq); | |
675 | return request_irq(irq, irqf, SA_SHIRQ, name, dev); | |
676 | } | |
677 | ||
678 | static void fs_free_irq(struct net_device *dev, int irq) | |
679 | { | |
680 | struct fs_enet_private *fep = netdev_priv(dev); | |
681 | ||
682 | free_irq(irq, dev); | |
683 | (*fep->ops->post_free_irq)(dev, irq); | |
684 | } | |
685 | ||
686 | /**********************************************************************************/ | |
687 | ||
688 | /* This interrupt occurs when the PHY detects a link change. */ | |
689 | static irqreturn_t | |
690 | fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |
691 | { | |
692 | struct net_device *dev = dev_id; | |
693 | struct fs_enet_private *fep; | |
694 | const struct fs_platform_info *fpi; | |
695 | ||
696 | fep = netdev_priv(dev); | |
697 | fpi = fep->fpi; | |
698 | ||
699 | /* | |
700 | * Acknowledge the interrupt if possible. If we have not | |
701 | * found the PHY yet we can't process or acknowledge the | |
702 | * interrupt now. Instead we ignore this interrupt for now, | |
703 | * which we can do since it is edge triggered. It will be | |
704 | * acknowledged later by fs_enet_open(). | |
705 | */ | |
706 | if (!fep->phy) | |
707 | return IRQ_NONE; | |
708 | ||
709 | fs_mii_ack_int(dev); | |
710 | fs_mii_link_status_change_check(dev, 0); | |
711 | ||
712 | return IRQ_HANDLED; | |
713 | } | |
714 | ||
715 | static void fs_timeout(struct net_device *dev) | |
716 | { | |
717 | struct fs_enet_private *fep = netdev_priv(dev); | |
718 | unsigned long flags; | |
719 | int wake = 0; | |
720 | ||
721 | fep->stats.tx_errors++; | |
722 | ||
723 | spin_lock_irqsave(&fep->lock, flags); | |
724 | ||
725 | if (dev->flags & IFF_UP) { | |
726 | (*fep->ops->stop)(dev); | |
727 | (*fep->ops->restart)(dev); | |
728 | } | |
729 | ||
730 | wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); | |
731 | spin_unlock_irqrestore(&fep->lock, flags); | |
732 | ||
733 | if (wake) | |
734 | netif_wake_queue(dev); | |
735 | } | |
736 | ||
737 | static int fs_enet_open(struct net_device *dev) | |
738 | { | |
739 | struct fs_enet_private *fep = netdev_priv(dev); | |
740 | const struct fs_platform_info *fpi = fep->fpi; | |
741 | int r; | |
742 | ||
743 | /* Install our interrupt handler. */ | |
744 | r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); | |
745 | if (r != 0) { | |
746 | printk(KERN_ERR DRV_MODULE_NAME | |
747 | ": %s Could not allocate FEC IRQ!", dev->name); | |
748 | return -EINVAL; | |
749 | } | |
750 | ||
751 | /* Install our phy interrupt handler */ | |
752 | if (fpi->phy_irq != -1) { | |
753 | ||
754 | r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt); | |
755 | if (r != 0) { | |
756 | printk(KERN_ERR DRV_MODULE_NAME | |
757 | ": %s Could not allocate PHY IRQ!", dev->name); | |
758 | fs_free_irq(dev, fep->interrupt); | |
759 | return -EINVAL; | |
760 | } | |
761 | } | |
762 | ||
763 | fs_mii_startup(dev); | |
764 | netif_carrier_off(dev); | |
765 | fs_mii_link_status_change_check(dev, 1); | |
766 | ||
767 | return 0; | |
768 | } | |
769 | ||
770 | static int fs_enet_close(struct net_device *dev) | |
771 | { | |
772 | struct fs_enet_private *fep = netdev_priv(dev); | |
773 | const struct fs_platform_info *fpi = fep->fpi; | |
774 | unsigned long flags; | |
775 | ||
776 | netif_stop_queue(dev); | |
777 | netif_carrier_off(dev); | |
778 | fs_mii_shutdown(dev); | |
779 | ||
780 | spin_lock_irqsave(&fep->lock, flags); | |
781 | (*fep->ops->stop)(dev); | |
782 | spin_unlock_irqrestore(&fep->lock, flags); | |
783 | ||
784 | /* release any irqs */ | |
785 | if (fpi->phy_irq != -1) | |
786 | fs_free_irq(dev, fpi->phy_irq); | |
787 | fs_free_irq(dev, fep->interrupt); | |
788 | ||
789 | return 0; | |
790 | } | |
791 | ||
792 | static struct net_device_stats *fs_enet_get_stats(struct net_device *dev) | |
793 | { | |
794 | struct fs_enet_private *fep = netdev_priv(dev); | |
795 | return &fep->stats; | |
796 | } | |
797 | ||
798 | /*************************************************************************/ | |
799 | ||
800 | static void fs_get_drvinfo(struct net_device *dev, | |
801 | struct ethtool_drvinfo *info) | |
802 | { | |
803 | strcpy(info->driver, DRV_MODULE_NAME); | |
804 | strcpy(info->version, DRV_MODULE_VERSION); | |
805 | } | |
806 | ||
807 | static int fs_get_regs_len(struct net_device *dev) | |
808 | { | |
809 | struct fs_enet_private *fep = netdev_priv(dev); | |
810 | ||
811 | return (*fep->ops->get_regs_len)(dev); | |
812 | } | |
813 | ||
814 | static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |
815 | void *p) | |
816 | { | |
817 | struct fs_enet_private *fep = netdev_priv(dev); | |
818 | unsigned long flags; | |
819 | int r, len; | |
820 | ||
821 | len = regs->len; | |
822 | ||
823 | spin_lock_irqsave(&fep->lock, flags); | |
824 | r = (*fep->ops->get_regs)(dev, p, &len); | |
825 | spin_unlock_irqrestore(&fep->lock, flags); | |
826 | ||
827 | if (r == 0) | |
828 | regs->version = 0; | |
829 | } | |
830 | ||
831 | static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
832 | { | |
833 | struct fs_enet_private *fep = netdev_priv(dev); | |
834 | unsigned long flags; | |
835 | int rc; | |
836 | ||
837 | spin_lock_irqsave(&fep->lock, flags); | |
838 | rc = mii_ethtool_gset(&fep->mii_if, cmd); | |
839 | spin_unlock_irqrestore(&fep->lock, flags); | |
840 | ||
841 | return rc; | |
842 | } | |
843 | ||
844 | static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
845 | { | |
846 | struct fs_enet_private *fep = netdev_priv(dev); | |
847 | unsigned long flags; | |
848 | int rc; | |
849 | ||
850 | spin_lock_irqsave(&fep->lock, flags); | |
851 | rc = mii_ethtool_sset(&fep->mii_if, cmd); | |
852 | spin_unlock_irqrestore(&fep->lock, flags); | |
853 | ||
854 | return rc; | |
855 | } | |
856 | ||
857 | static int fs_nway_reset(struct net_device *dev) | |
858 | { | |
859 | struct fs_enet_private *fep = netdev_priv(dev); | |
860 | return mii_nway_restart(&fep->mii_if); | |
861 | } | |
862 | ||
863 | static u32 fs_get_msglevel(struct net_device *dev) | |
864 | { | |
865 | struct fs_enet_private *fep = netdev_priv(dev); | |
866 | return fep->msg_enable; | |
867 | } | |
868 | ||
869 | static void fs_set_msglevel(struct net_device *dev, u32 value) | |
870 | { | |
871 | struct fs_enet_private *fep = netdev_priv(dev); | |
872 | fep->msg_enable = value; | |
873 | } | |
874 | ||
875 | static struct ethtool_ops fs_ethtool_ops = { | |
876 | .get_drvinfo = fs_get_drvinfo, | |
877 | .get_regs_len = fs_get_regs_len, | |
878 | .get_settings = fs_get_settings, | |
879 | .set_settings = fs_set_settings, | |
880 | .nway_reset = fs_nway_reset, | |
881 | .get_link = ethtool_op_get_link, | |
882 | .get_msglevel = fs_get_msglevel, | |
883 | .set_msglevel = fs_set_msglevel, | |
884 | .get_tx_csum = ethtool_op_get_tx_csum, | |
885 | .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ | |
886 | .get_sg = ethtool_op_get_sg, | |
887 | .set_sg = ethtool_op_set_sg, | |
888 | .get_regs = fs_get_regs, | |
889 | }; | |
890 | ||
891 | static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
892 | { | |
893 | struct fs_enet_private *fep = netdev_priv(dev); | |
894 | struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data; | |
895 | unsigned long flags; | |
896 | int rc; | |
897 | ||
898 | if (!netif_running(dev)) | |
899 | return -EINVAL; | |
900 | ||
901 | spin_lock_irqsave(&fep->lock, flags); | |
902 | rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL); | |
903 | spin_unlock_irqrestore(&fep->lock, flags); | |
904 | return rc; | |
905 | } | |
906 | ||
907 | extern int fs_mii_connect(struct net_device *dev); | |
908 | extern void fs_mii_disconnect(struct net_device *dev); | |
909 | ||
910 | static struct net_device *fs_init_instance(struct device *dev, | |
911 | const struct fs_platform_info *fpi) | |
912 | { | |
913 | struct net_device *ndev = NULL; | |
914 | struct fs_enet_private *fep = NULL; | |
915 | int privsize, i, r, err = 0, registered = 0; | |
916 | ||
917 | /* guard */ | |
918 | if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX) | |
919 | return ERR_PTR(-EINVAL); | |
920 | ||
921 | privsize = sizeof(*fep) + (sizeof(struct sk_buff **) * | |
922 | (fpi->rx_ring + fpi->tx_ring)); | |
923 | ||
924 | ndev = alloc_etherdev(privsize); | |
925 | if (!ndev) { | |
926 | err = -ENOMEM; | |
927 | goto err; | |
928 | } | |
929 | SET_MODULE_OWNER(ndev); | |
930 | ||
931 | fep = netdev_priv(ndev); | |
932 | memset(fep, 0, privsize); /* clear everything */ | |
933 | ||
934 | fep->dev = dev; | |
935 | dev_set_drvdata(dev, ndev); | |
936 | fep->fpi = fpi; | |
937 | if (fpi->init_ioports) | |
938 | fpi->init_ioports(); | |
939 | ||
940 | #ifdef CONFIG_FS_ENET_HAS_FEC | |
941 | if (fs_get_fec_index(fpi->fs_no) >= 0) | |
942 | fep->ops = &fs_fec_ops; | |
943 | #endif | |
944 | ||
945 | #ifdef CONFIG_FS_ENET_HAS_SCC | |
946 | if (fs_get_scc_index(fpi->fs_no) >=0 ) | |
947 | fep->ops = &fs_scc_ops; | |
948 | #endif | |
949 | ||
950 | #ifdef CONFIG_FS_ENET_HAS_FCC | |
951 | if (fs_get_fcc_index(fpi->fs_no) >= 0) | |
952 | fep->ops = &fs_fcc_ops; | |
953 | #endif | |
954 | ||
955 | if (fep->ops == NULL) { | |
956 | printk(KERN_ERR DRV_MODULE_NAME | |
957 | ": %s No matching ops found (%d).\n", | |
958 | ndev->name, fpi->fs_no); | |
959 | err = -EINVAL; | |
960 | goto err; | |
961 | } | |
962 | ||
963 | r = (*fep->ops->setup_data)(ndev); | |
964 | if (r != 0) { | |
965 | printk(KERN_ERR DRV_MODULE_NAME | |
966 | ": %s setup_data failed\n", | |
967 | ndev->name); | |
968 | err = r; | |
969 | goto err; | |
970 | } | |
971 | ||
972 | /* point rx_skbuff, tx_skbuff */ | |
973 | fep->rx_skbuff = (struct sk_buff **)&fep[1]; | |
974 | fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; | |
975 | ||
976 | /* init locks */ | |
977 | spin_lock_init(&fep->lock); | |
978 | spin_lock_init(&fep->tx_lock); | |
979 | ||
980 | /* | |
981 | * Set the Ethernet address. | |
982 | */ | |
983 | for (i = 0; i < 6; i++) | |
984 | ndev->dev_addr[i] = fpi->macaddr[i]; | |
985 | ||
986 | r = (*fep->ops->allocate_bd)(ndev); | |
987 | ||
988 | if (fep->ring_base == NULL) { | |
989 | printk(KERN_ERR DRV_MODULE_NAME | |
990 | ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r); | |
991 | err = r; | |
992 | goto err; | |
993 | } | |
994 | ||
995 | /* | |
996 | * Set receive and transmit descriptor base. | |
997 | */ | |
998 | fep->rx_bd_base = fep->ring_base; | |
999 | fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; | |
1000 | ||
1001 | /* initialize ring size variables */ | |
1002 | fep->tx_ring = fpi->tx_ring; | |
1003 | fep->rx_ring = fpi->rx_ring; | |
1004 | ||
1005 | /* | |
1006 | * The FEC Ethernet specific entries in the device structure. | |
1007 | */ | |
1008 | ndev->open = fs_enet_open; | |
1009 | ndev->hard_start_xmit = fs_enet_start_xmit; | |
1010 | ndev->tx_timeout = fs_timeout; | |
1011 | ndev->watchdog_timeo = 2 * HZ; | |
1012 | ndev->stop = fs_enet_close; | |
1013 | ndev->get_stats = fs_enet_get_stats; | |
1014 | ndev->set_multicast_list = fs_set_multicast_list; | |
1015 | if (fpi->use_napi) { | |
1016 | ndev->poll = fs_enet_rx_napi; | |
1017 | ndev->weight = fpi->napi_weight; | |
1018 | } | |
1019 | ndev->ethtool_ops = &fs_ethtool_ops; | |
1020 | ndev->do_ioctl = fs_ioctl; | |
1021 | ||
1022 | init_timer(&fep->phy_timer_list); | |
1023 | ||
1024 | netif_carrier_off(ndev); | |
1025 | ||
1026 | err = register_netdev(ndev); | |
1027 | if (err != 0) { | |
1028 | printk(KERN_ERR DRV_MODULE_NAME | |
1029 | ": %s register_netdev failed.\n", ndev->name); | |
1030 | goto err; | |
1031 | } | |
1032 | registered = 1; | |
1033 | ||
1034 | err = fs_mii_connect(ndev); | |
1035 | if (err != 0) { | |
1036 | printk(KERN_ERR DRV_MODULE_NAME | |
1037 | ": %s fs_mii_connect failed.\n", ndev->name); | |
1038 | goto err; | |
1039 | } | |
1040 | ||
1041 | return ndev; | |
1042 | ||
1043 | err: | |
1044 | if (ndev != NULL) { | |
1045 | ||
1046 | if (registered) | |
1047 | unregister_netdev(ndev); | |
1048 | ||
1049 | if (fep != NULL) { | |
1050 | (*fep->ops->free_bd)(ndev); | |
1051 | (*fep->ops->cleanup_data)(ndev); | |
1052 | } | |
1053 | ||
1054 | free_netdev(ndev); | |
1055 | } | |
1056 | ||
1057 | dev_set_drvdata(dev, NULL); | |
1058 | ||
1059 | return ERR_PTR(err); | |
1060 | } | |
1061 | ||
1062 | static int fs_cleanup_instance(struct net_device *ndev) | |
1063 | { | |
1064 | struct fs_enet_private *fep; | |
1065 | const struct fs_platform_info *fpi; | |
1066 | struct device *dev; | |
1067 | ||
1068 | if (ndev == NULL) | |
1069 | return -EINVAL; | |
1070 | ||
1071 | fep = netdev_priv(ndev); | |
1072 | if (fep == NULL) | |
1073 | return -EINVAL; | |
1074 | ||
1075 | fpi = fep->fpi; | |
1076 | ||
1077 | fs_mii_disconnect(ndev); | |
1078 | ||
1079 | unregister_netdev(ndev); | |
1080 | ||
1081 | dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), | |
1082 | fep->ring_base, fep->ring_mem_addr); | |
1083 | ||
1084 | /* reset it */ | |
1085 | (*fep->ops->cleanup_data)(ndev); | |
1086 | ||
1087 | dev = fep->dev; | |
1088 | if (dev != NULL) { | |
1089 | dev_set_drvdata(dev, NULL); | |
1090 | fep->dev = NULL; | |
1091 | } | |
1092 | ||
1093 | free_netdev(ndev); | |
1094 | ||
1095 | return 0; | |
1096 | } | |
1097 | ||
1098 | /**************************************************************************************/ | |
1099 | ||
1100 | /* handy pointer to the immap */ | |
1101 | void *fs_enet_immap = NULL; | |
1102 | ||
1103 | static int setup_immap(void) | |
1104 | { | |
1105 | phys_addr_t paddr = 0; | |
1106 | unsigned long size = 0; | |
1107 | ||
1108 | #ifdef CONFIG_CPM1 | |
1109 | paddr = IMAP_ADDR; | |
1110 | size = 0x10000; /* map 64K */ | |
1111 | #endif | |
1112 | ||
1113 | #ifdef CONFIG_CPM2 | |
1114 | paddr = CPM_MAP_ADDR; | |
1115 | size = 0x40000; /* map 256 K */ | |
1116 | #endif | |
1117 | fs_enet_immap = ioremap(paddr, size); | |
1118 | if (fs_enet_immap == NULL) | |
1119 | return -EBADF; /* XXX ahem; maybe just BUG_ON? */ | |
1120 | ||
1121 | return 0; | |
1122 | } | |
1123 | ||
1124 | static void cleanup_immap(void) | |
1125 | { | |
1126 | if (fs_enet_immap != NULL) { | |
1127 | iounmap(fs_enet_immap); | |
1128 | fs_enet_immap = NULL; | |
1129 | } | |
1130 | } | |
1131 | ||
1132 | /**************************************************************************************/ | |
1133 | ||
1134 | static int __devinit fs_enet_probe(struct device *dev) | |
1135 | { | |
1136 | struct net_device *ndev; | |
1137 | ||
1138 | /* no fixup - no device */ | |
1139 | if (dev->platform_data == NULL) { | |
1140 | printk(KERN_INFO "fs_enet: " | |
1141 | "probe called with no platform data; " | |
1142 | "remove unused devices\n"); | |
1143 | return -ENODEV; | |
1144 | } | |
1145 | ||
1146 | ndev = fs_init_instance(dev, dev->platform_data); | |
1147 | if (IS_ERR(ndev)) | |
1148 | return PTR_ERR(ndev); | |
1149 | return 0; | |
1150 | } | |
1151 | ||
1152 | static int fs_enet_remove(struct device *dev) | |
1153 | { | |
1154 | return fs_cleanup_instance(dev_get_drvdata(dev)); | |
1155 | } | |
1156 | ||
1157 | static struct device_driver fs_enet_fec_driver = { | |
1158 | .name = "fsl-cpm-fec", | |
1159 | .bus = &platform_bus_type, | |
1160 | .probe = fs_enet_probe, | |
1161 | .remove = fs_enet_remove, | |
1162 | #ifdef CONFIG_PM | |
1163 | /* .suspend = fs_enet_suspend, TODO */ | |
1164 | /* .resume = fs_enet_resume, TODO */ | |
1165 | #endif | |
1166 | }; | |
1167 | ||
1168 | static struct device_driver fs_enet_scc_driver = { | |
1169 | .name = "fsl-cpm-scc", | |
1170 | .bus = &platform_bus_type, | |
1171 | .probe = fs_enet_probe, | |
1172 | .remove = fs_enet_remove, | |
1173 | #ifdef CONFIG_PM | |
1174 | /* .suspend = fs_enet_suspend, TODO */ | |
1175 | /* .resume = fs_enet_resume, TODO */ | |
1176 | #endif | |
1177 | }; | |
1178 | ||
1179 | static struct device_driver fs_enet_fcc_driver = { | |
1180 | .name = "fsl-cpm-fcc", | |
1181 | .bus = &platform_bus_type, | |
1182 | .probe = fs_enet_probe, | |
1183 | .remove = fs_enet_remove, | |
1184 | #ifdef CONFIG_PM | |
1185 | /* .suspend = fs_enet_suspend, TODO */ | |
1186 | /* .resume = fs_enet_resume, TODO */ | |
1187 | #endif | |
1188 | }; | |
1189 | ||
1190 | static int __init fs_init(void) | |
1191 | { | |
1192 | int r; | |
1193 | ||
1194 | printk(KERN_INFO | |
1195 | "%s", version); | |
1196 | ||
1197 | r = setup_immap(); | |
1198 | if (r != 0) | |
1199 | return r; | |
1200 | r = driver_register(&fs_enet_fec_driver); | |
1201 | if (r != 0) | |
1202 | goto err; | |
1203 | ||
1204 | r = driver_register(&fs_enet_fcc_driver); | |
1205 | if (r != 0) | |
1206 | goto err; | |
1207 | ||
1208 | r = driver_register(&fs_enet_scc_driver); | |
1209 | if (r != 0) | |
1210 | goto err; | |
1211 | ||
1212 | return 0; | |
1213 | err: | |
1214 | cleanup_immap(); | |
1215 | return r; | |
1216 | ||
1217 | } | |
1218 | ||
1219 | static void __exit fs_cleanup(void) | |
1220 | { | |
1221 | driver_unregister(&fs_enet_fec_driver); | |
1222 | driver_unregister(&fs_enet_fcc_driver); | |
1223 | driver_unregister(&fs_enet_scc_driver); | |
1224 | cleanup_immap(); | |
1225 | } | |
1226 | ||
1227 | /**************************************************************************************/ | |
1228 | ||
1229 | module_init(fs_init); | |
1230 | module_exit(fs_cleanup); |