Commit | Line | Data |
---|---|---|
89e5785f | 1 | /* |
f75ba50b | 2 | * Cadence MACB/GEM Ethernet Controller driver |
89e5785f HS |
3 | * |
4 | * Copyright (C) 2004-2006 Atmel Corporation | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
c220f8cd | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
89e5785f HS |
12 | #include <linux/clk.h> |
13 | #include <linux/module.h> | |
14 | #include <linux/moduleparam.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/types.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/init.h> | |
a6b7a407 | 19 | #include <linux/interrupt.h> |
89e5785f HS |
20 | #include <linux/netdevice.h> |
21 | #include <linux/etherdevice.h> | |
89e5785f | 22 | #include <linux/dma-mapping.h> |
84e0cdb0 | 23 | #include <linux/platform_data/macb.h> |
89e5785f | 24 | #include <linux/platform_device.h> |
6c36a707 | 25 | #include <linux/phy.h> |
b17471f5 | 26 | #include <linux/of.h> |
fb97a846 JCPV |
27 | #include <linux/of_device.h> |
28 | #include <linux/of_net.h> | |
89e5785f | 29 | |
89e5785f HS |
30 | #include "macb.h" |
31 | ||
89e5785f HS |
32 | #define RX_BUFFER_SIZE 128 |
33 | #define RX_RING_SIZE 512 | |
34 | #define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE) | |
35 | ||
36 | /* Make the IP header word-aligned (the ethernet header is 14 bytes) */ | |
37 | #define RX_OFFSET 2 | |
38 | ||
39 | #define TX_RING_SIZE 128 | |
40 | #define DEF_TX_RING_PENDING (TX_RING_SIZE - 1) | |
41 | #define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE) | |
42 | ||
43 | #define TX_RING_GAP(bp) \ | |
44 | (TX_RING_SIZE - (bp)->tx_pending) | |
45 | #define TX_BUFFS_AVAIL(bp) \ | |
46 | (((bp)->tx_tail <= (bp)->tx_head) ? \ | |
47 | (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \ | |
48 | (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp)) | |
49 | #define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1)) | |
50 | ||
51 | #define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1)) | |
52 | ||
53 | /* minimum number of free TX descriptors before waking up TX process */ | |
54 | #define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4) | |
55 | ||
56 | #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ | |
57 | | MACB_BIT(ISR_ROVR)) | |
58 | ||
59 | static void __macb_set_hwaddr(struct macb *bp) | |
60 | { | |
61 | u32 bottom; | |
62 | u16 top; | |
63 | ||
64 | bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); | |
f75ba50b | 65 | macb_or_gem_writel(bp, SA1B, bottom); |
89e5785f | 66 | top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); |
f75ba50b | 67 | macb_or_gem_writel(bp, SA1T, top); |
89e5785f HS |
68 | } |
69 | ||
70 | static void __init macb_get_hwaddr(struct macb *bp) | |
71 | { | |
72 | u32 bottom; | |
73 | u16 top; | |
74 | u8 addr[6]; | |
75 | ||
f75ba50b JI |
76 | bottom = macb_or_gem_readl(bp, SA1B); |
77 | top = macb_or_gem_readl(bp, SA1T); | |
89e5785f HS |
78 | |
79 | addr[0] = bottom & 0xff; | |
80 | addr[1] = (bottom >> 8) & 0xff; | |
81 | addr[2] = (bottom >> 16) & 0xff; | |
82 | addr[3] = (bottom >> 24) & 0xff; | |
83 | addr[4] = top & 0xff; | |
84 | addr[5] = (top >> 8) & 0xff; | |
85 | ||
d1d5741d | 86 | if (is_valid_ether_addr(addr)) { |
89e5785f | 87 | memcpy(bp->dev->dev_addr, addr, sizeof(addr)); |
d1d5741d | 88 | } else { |
c220f8cd | 89 | netdev_info(bp->dev, "invalid hw address, using random\n"); |
d1d5741d SS |
90 | random_ether_addr(bp->dev->dev_addr); |
91 | } | |
89e5785f HS |
92 | } |
93 | ||
6c36a707 | 94 | static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
89e5785f | 95 | { |
6c36a707 | 96 | struct macb *bp = bus->priv; |
89e5785f HS |
97 | int value; |
98 | ||
89e5785f HS |
99 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) |
100 | | MACB_BF(RW, MACB_MAN_READ) | |
6c36a707 R |
101 | | MACB_BF(PHYA, mii_id) |
102 | | MACB_BF(REGA, regnum) | |
89e5785f HS |
103 | | MACB_BF(CODE, MACB_MAN_CODE))); |
104 | ||
6c36a707 R |
105 | /* wait for end of transfer */ |
106 | while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) | |
107 | cpu_relax(); | |
89e5785f HS |
108 | |
109 | value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); | |
89e5785f HS |
110 | |
111 | return value; | |
112 | } | |
113 | ||
6c36a707 R |
114 | static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, |
115 | u16 value) | |
89e5785f | 116 | { |
6c36a707 | 117 | struct macb *bp = bus->priv; |
89e5785f HS |
118 | |
119 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | |
120 | | MACB_BF(RW, MACB_MAN_WRITE) | |
6c36a707 R |
121 | | MACB_BF(PHYA, mii_id) |
122 | | MACB_BF(REGA, regnum) | |
89e5785f | 123 | | MACB_BF(CODE, MACB_MAN_CODE) |
6c36a707 | 124 | | MACB_BF(DATA, value))); |
89e5785f | 125 | |
6c36a707 R |
126 | /* wait for end of transfer */ |
127 | while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) | |
128 | cpu_relax(); | |
129 | ||
130 | return 0; | |
131 | } | |
89e5785f | 132 | |
6c36a707 R |
133 | static int macb_mdio_reset(struct mii_bus *bus) |
134 | { | |
135 | return 0; | |
89e5785f HS |
136 | } |
137 | ||
6c36a707 | 138 | static void macb_handle_link_change(struct net_device *dev) |
89e5785f | 139 | { |
6c36a707 R |
140 | struct macb *bp = netdev_priv(dev); |
141 | struct phy_device *phydev = bp->phy_dev; | |
142 | unsigned long flags; | |
89e5785f | 143 | |
6c36a707 | 144 | int status_change = 0; |
89e5785f | 145 | |
6c36a707 R |
146 | spin_lock_irqsave(&bp->lock, flags); |
147 | ||
148 | if (phydev->link) { | |
149 | if ((bp->speed != phydev->speed) || | |
150 | (bp->duplex != phydev->duplex)) { | |
151 | u32 reg; | |
152 | ||
153 | reg = macb_readl(bp, NCFGR); | |
154 | reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); | |
155 | ||
156 | if (phydev->duplex) | |
157 | reg |= MACB_BIT(FD); | |
179956f4 | 158 | if (phydev->speed == SPEED_100) |
6c36a707 R |
159 | reg |= MACB_BIT(SPD); |
160 | ||
161 | macb_writel(bp, NCFGR, reg); | |
162 | ||
163 | bp->speed = phydev->speed; | |
164 | bp->duplex = phydev->duplex; | |
165 | status_change = 1; | |
166 | } | |
89e5785f HS |
167 | } |
168 | ||
6c36a707 | 169 | if (phydev->link != bp->link) { |
c8f15686 | 170 | if (!phydev->link) { |
6c36a707 R |
171 | bp->speed = 0; |
172 | bp->duplex = -1; | |
173 | } | |
174 | bp->link = phydev->link; | |
89e5785f | 175 | |
6c36a707 R |
176 | status_change = 1; |
177 | } | |
89e5785f | 178 | |
6c36a707 R |
179 | spin_unlock_irqrestore(&bp->lock, flags); |
180 | ||
181 | if (status_change) { | |
182 | if (phydev->link) | |
c220f8cd JI |
183 | netdev_info(dev, "link up (%d/%s)\n", |
184 | phydev->speed, | |
185 | phydev->duplex == DUPLEX_FULL ? | |
186 | "Full" : "Half"); | |
6c36a707 | 187 | else |
c220f8cd | 188 | netdev_info(dev, "link down\n"); |
6c36a707 | 189 | } |
89e5785f HS |
190 | } |
191 | ||
6c36a707 R |
192 | /* based on au1000_eth. c*/ |
193 | static int macb_mii_probe(struct net_device *dev) | |
89e5785f | 194 | { |
6c36a707 | 195 | struct macb *bp = netdev_priv(dev); |
7455a76f | 196 | struct phy_device *phydev; |
7455a76f | 197 | int ret; |
6c36a707 | 198 | |
7455a76f | 199 | phydev = phy_find_first(bp->mii_bus); |
6c36a707 | 200 | if (!phydev) { |
c220f8cd | 201 | netdev_err(dev, "no PHY found\n"); |
6c36a707 R |
202 | return -1; |
203 | } | |
204 | ||
6c36a707 R |
205 | /* TODO : add pin_irq */ |
206 | ||
207 | /* attach the mac to the phy */ | |
7455a76f | 208 | ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0, |
fb97a846 | 209 | bp->phy_interface); |
7455a76f | 210 | if (ret) { |
c220f8cd | 211 | netdev_err(dev, "Could not attach to PHY\n"); |
7455a76f | 212 | return ret; |
6c36a707 R |
213 | } |
214 | ||
215 | /* mask with MAC supported features */ | |
216 | phydev->supported &= PHY_BASIC_FEATURES; | |
217 | ||
218 | phydev->advertising = phydev->supported; | |
219 | ||
220 | bp->link = 0; | |
221 | bp->speed = 0; | |
222 | bp->duplex = -1; | |
223 | bp->phy_dev = phydev; | |
224 | ||
225 | return 0; | |
89e5785f HS |
226 | } |
227 | ||
6c36a707 | 228 | static int macb_mii_init(struct macb *bp) |
89e5785f | 229 | { |
84e0cdb0 | 230 | struct macb_platform_data *pdata; |
6c36a707 | 231 | int err = -ENXIO, i; |
89e5785f | 232 | |
3dbda77e | 233 | /* Enable management port */ |
6c36a707 | 234 | macb_writel(bp, NCR, MACB_BIT(MPE)); |
89e5785f | 235 | |
298cf9be LB |
236 | bp->mii_bus = mdiobus_alloc(); |
237 | if (bp->mii_bus == NULL) { | |
238 | err = -ENOMEM; | |
239 | goto err_out; | |
240 | } | |
241 | ||
242 | bp->mii_bus->name = "MACB_mii_bus"; | |
243 | bp->mii_bus->read = &macb_mdio_read; | |
244 | bp->mii_bus->write = &macb_mdio_write; | |
245 | bp->mii_bus->reset = &macb_mdio_reset; | |
98d5e57e FF |
246 | snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
247 | bp->pdev->name, bp->pdev->id); | |
298cf9be LB |
248 | bp->mii_bus->priv = bp; |
249 | bp->mii_bus->parent = &bp->dev->dev; | |
6c36a707 | 250 | pdata = bp->pdev->dev.platform_data; |
89e5785f | 251 | |
6c36a707 | 252 | if (pdata) |
298cf9be | 253 | bp->mii_bus->phy_mask = pdata->phy_mask; |
89e5785f | 254 | |
298cf9be LB |
255 | bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); |
256 | if (!bp->mii_bus->irq) { | |
6c36a707 | 257 | err = -ENOMEM; |
298cf9be | 258 | goto err_out_free_mdiobus; |
89e5785f HS |
259 | } |
260 | ||
6c36a707 | 261 | for (i = 0; i < PHY_MAX_ADDR; i++) |
298cf9be | 262 | bp->mii_bus->irq[i] = PHY_POLL; |
89e5785f | 263 | |
91523947 | 264 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
89e5785f | 265 | |
298cf9be | 266 | if (mdiobus_register(bp->mii_bus)) |
6c36a707 | 267 | goto err_out_free_mdio_irq; |
89e5785f | 268 | |
6c36a707 R |
269 | if (macb_mii_probe(bp->dev) != 0) { |
270 | goto err_out_unregister_bus; | |
271 | } | |
89e5785f | 272 | |
6c36a707 | 273 | return 0; |
89e5785f | 274 | |
6c36a707 | 275 | err_out_unregister_bus: |
298cf9be | 276 | mdiobus_unregister(bp->mii_bus); |
6c36a707 | 277 | err_out_free_mdio_irq: |
298cf9be LB |
278 | kfree(bp->mii_bus->irq); |
279 | err_out_free_mdiobus: | |
280 | mdiobus_free(bp->mii_bus); | |
6c36a707 R |
281 | err_out: |
282 | return err; | |
89e5785f HS |
283 | } |
284 | ||
285 | static void macb_update_stats(struct macb *bp) | |
286 | { | |
287 | u32 __iomem *reg = bp->regs + MACB_PFR; | |
a494ed8e JI |
288 | u32 *p = &bp->hw_stats.macb.rx_pause_frames; |
289 | u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; | |
89e5785f HS |
290 | |
291 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); | |
292 | ||
293 | for(; p < end; p++, reg++) | |
0f0d84e5 | 294 | *p += __raw_readl(reg); |
89e5785f HS |
295 | } |
296 | ||
89e5785f HS |
297 | static void macb_tx(struct macb *bp) |
298 | { | |
299 | unsigned int tail; | |
300 | unsigned int head; | |
301 | u32 status; | |
302 | ||
303 | status = macb_readl(bp, TSR); | |
304 | macb_writel(bp, TSR, status); | |
305 | ||
c220f8cd | 306 | netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status); |
89e5785f | 307 | |
ee33c585 | 308 | if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { |
bdcba151 | 309 | int i; |
c220f8cd JI |
310 | netdev_err(bp->dev, "TX %s, resetting buffers\n", |
311 | status & MACB_BIT(UND) ? | |
312 | "underrun" : "retry limit exceeded"); | |
bdcba151 | 313 | |
39eddb4c RR |
314 | /* Transfer ongoing, disable transmitter, to avoid confusion */ |
315 | if (status & MACB_BIT(TGO)) | |
316 | macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE)); | |
317 | ||
bdcba151 GC |
318 | head = bp->tx_head; |
319 | ||
320 | /*Mark all the buffer as used to avoid sending a lost buffer*/ | |
321 | for (i = 0; i < TX_RING_SIZE; i++) | |
322 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); | |
323 | ||
d3e61457 TA |
324 | /* Add wrap bit */ |
325 | bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); | |
326 | ||
bdcba151 GC |
327 | /* free transmit buffer in upper layer*/ |
328 | for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { | |
329 | struct ring_info *rp = &bp->tx_skb[tail]; | |
330 | struct sk_buff *skb = rp->skb; | |
331 | ||
332 | BUG_ON(skb == NULL); | |
333 | ||
334 | rmb(); | |
335 | ||
336 | dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, | |
337 | DMA_TO_DEVICE); | |
338 | rp->skb = NULL; | |
339 | dev_kfree_skb_irq(skb); | |
340 | } | |
341 | ||
89e5785f | 342 | bp->tx_head = bp->tx_tail = 0; |
39eddb4c RR |
343 | |
344 | /* Enable the transmitter again */ | |
345 | if (status & MACB_BIT(TGO)) | |
346 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE)); | |
89e5785f HS |
347 | } |
348 | ||
349 | if (!(status & MACB_BIT(COMP))) | |
350 | /* | |
351 | * This may happen when a buffer becomes complete | |
352 | * between reading the ISR and scanning the | |
353 | * descriptors. Nothing to worry about. | |
354 | */ | |
355 | return; | |
356 | ||
357 | head = bp->tx_head; | |
358 | for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { | |
359 | struct ring_info *rp = &bp->tx_skb[tail]; | |
360 | struct sk_buff *skb = rp->skb; | |
361 | u32 bufstat; | |
362 | ||
363 | BUG_ON(skb == NULL); | |
364 | ||
365 | rmb(); | |
366 | bufstat = bp->tx_ring[tail].ctrl; | |
367 | ||
368 | if (!(bufstat & MACB_BIT(TX_USED))) | |
369 | break; | |
370 | ||
c220f8cd JI |
371 | netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n", |
372 | tail, skb->data); | |
89e5785f HS |
373 | dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, |
374 | DMA_TO_DEVICE); | |
375 | bp->stats.tx_packets++; | |
376 | bp->stats.tx_bytes += skb->len; | |
377 | rp->skb = NULL; | |
378 | dev_kfree_skb_irq(skb); | |
379 | } | |
380 | ||
381 | bp->tx_tail = tail; | |
382 | if (netif_queue_stopped(bp->dev) && | |
383 | TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH) | |
384 | netif_wake_queue(bp->dev); | |
385 | } | |
386 | ||
387 | static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | |
388 | unsigned int last_frag) | |
389 | { | |
390 | unsigned int len; | |
391 | unsigned int frag; | |
392 | unsigned int offset = 0; | |
393 | struct sk_buff *skb; | |
394 | ||
395 | len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); | |
396 | ||
c220f8cd JI |
397 | netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", |
398 | first_frag, last_frag, len); | |
89e5785f HS |
399 | |
400 | skb = dev_alloc_skb(len + RX_OFFSET); | |
401 | if (!skb) { | |
402 | bp->stats.rx_dropped++; | |
403 | for (frag = first_frag; ; frag = NEXT_RX(frag)) { | |
404 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | |
405 | if (frag == last_frag) | |
406 | break; | |
407 | } | |
408 | wmb(); | |
409 | return 1; | |
410 | } | |
411 | ||
412 | skb_reserve(skb, RX_OFFSET); | |
bc8acf2c | 413 | skb_checksum_none_assert(skb); |
89e5785f HS |
414 | skb_put(skb, len); |
415 | ||
416 | for (frag = first_frag; ; frag = NEXT_RX(frag)) { | |
417 | unsigned int frag_len = RX_BUFFER_SIZE; | |
418 | ||
419 | if (offset + frag_len > len) { | |
420 | BUG_ON(frag != last_frag); | |
421 | frag_len = len - offset; | |
422 | } | |
27d7ff46 ACM |
423 | skb_copy_to_linear_data_offset(skb, offset, |
424 | (bp->rx_buffers + | |
425 | (RX_BUFFER_SIZE * frag)), | |
426 | frag_len); | |
89e5785f HS |
427 | offset += RX_BUFFER_SIZE; |
428 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | |
429 | wmb(); | |
430 | ||
431 | if (frag == last_frag) | |
432 | break; | |
433 | } | |
434 | ||
435 | skb->protocol = eth_type_trans(skb, bp->dev); | |
436 | ||
437 | bp->stats.rx_packets++; | |
438 | bp->stats.rx_bytes += len; | |
c220f8cd JI |
439 | netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n", |
440 | skb->len, skb->csum); | |
89e5785f HS |
441 | netif_receive_skb(skb); |
442 | ||
443 | return 0; | |
444 | } | |
445 | ||
446 | /* Mark DMA descriptors from begin up to and not including end as unused */ | |
447 | static void discard_partial_frame(struct macb *bp, unsigned int begin, | |
448 | unsigned int end) | |
449 | { | |
450 | unsigned int frag; | |
451 | ||
452 | for (frag = begin; frag != end; frag = NEXT_RX(frag)) | |
453 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | |
454 | wmb(); | |
455 | ||
456 | /* | |
457 | * When this happens, the hardware stats registers for | |
458 | * whatever caused this is updated, so we don't have to record | |
459 | * anything. | |
460 | */ | |
461 | } | |
462 | ||
463 | static int macb_rx(struct macb *bp, int budget) | |
464 | { | |
465 | int received = 0; | |
466 | unsigned int tail = bp->rx_tail; | |
467 | int first_frag = -1; | |
468 | ||
469 | for (; budget > 0; tail = NEXT_RX(tail)) { | |
470 | u32 addr, ctrl; | |
471 | ||
472 | rmb(); | |
473 | addr = bp->rx_ring[tail].addr; | |
474 | ctrl = bp->rx_ring[tail].ctrl; | |
475 | ||
476 | if (!(addr & MACB_BIT(RX_USED))) | |
477 | break; | |
478 | ||
479 | if (ctrl & MACB_BIT(RX_SOF)) { | |
480 | if (first_frag != -1) | |
481 | discard_partial_frame(bp, first_frag, tail); | |
482 | first_frag = tail; | |
483 | } | |
484 | ||
485 | if (ctrl & MACB_BIT(RX_EOF)) { | |
486 | int dropped; | |
487 | BUG_ON(first_frag == -1); | |
488 | ||
489 | dropped = macb_rx_frame(bp, first_frag, tail); | |
490 | first_frag = -1; | |
491 | if (!dropped) { | |
492 | received++; | |
493 | budget--; | |
494 | } | |
495 | } | |
496 | } | |
497 | ||
498 | if (first_frag != -1) | |
499 | bp->rx_tail = first_frag; | |
500 | else | |
501 | bp->rx_tail = tail; | |
502 | ||
503 | return received; | |
504 | } | |
505 | ||
bea3348e | 506 | static int macb_poll(struct napi_struct *napi, int budget) |
89e5785f | 507 | { |
bea3348e | 508 | struct macb *bp = container_of(napi, struct macb, napi); |
bea3348e | 509 | int work_done; |
89e5785f HS |
510 | u32 status; |
511 | ||
512 | status = macb_readl(bp, RSR); | |
513 | macb_writel(bp, RSR, status); | |
514 | ||
bea3348e | 515 | work_done = 0; |
89e5785f | 516 | |
c220f8cd JI |
517 | netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n", |
518 | (unsigned long)status, budget); | |
89e5785f | 519 | |
bea3348e | 520 | work_done = macb_rx(bp, budget); |
b336369c | 521 | if (work_done < budget) { |
288379f0 | 522 | napi_complete(napi); |
89e5785f | 523 | |
b336369c JH |
524 | /* |
525 | * We've done what we can to clean the buffers. Make sure we | |
526 | * get notified when new packets arrive. | |
527 | */ | |
528 | macb_writel(bp, IER, MACB_RX_INT_FLAGS); | |
529 | } | |
89e5785f HS |
530 | |
531 | /* TODO: Handle errors */ | |
532 | ||
bea3348e | 533 | return work_done; |
89e5785f HS |
534 | } |
535 | ||
536 | static irqreturn_t macb_interrupt(int irq, void *dev_id) | |
537 | { | |
538 | struct net_device *dev = dev_id; | |
539 | struct macb *bp = netdev_priv(dev); | |
540 | u32 status; | |
541 | ||
542 | status = macb_readl(bp, ISR); | |
543 | ||
544 | if (unlikely(!status)) | |
545 | return IRQ_NONE; | |
546 | ||
547 | spin_lock(&bp->lock); | |
548 | ||
549 | while (status) { | |
89e5785f HS |
550 | /* close possible race with dev_close */ |
551 | if (unlikely(!netif_running(dev))) { | |
552 | macb_writel(bp, IDR, ~0UL); | |
553 | break; | |
554 | } | |
555 | ||
556 | if (status & MACB_RX_INT_FLAGS) { | |
b336369c JH |
557 | /* |
558 | * There's no point taking any more interrupts | |
559 | * until we have processed the buffers. The | |
560 | * scheduling call may fail if the poll routine | |
561 | * is already scheduled, so disable interrupts | |
562 | * now. | |
563 | */ | |
564 | macb_writel(bp, IDR, MACB_RX_INT_FLAGS); | |
565 | ||
288379f0 | 566 | if (napi_schedule_prep(&bp->napi)) { |
c220f8cd | 567 | netdev_dbg(bp->dev, "scheduling RX softirq\n"); |
288379f0 | 568 | __napi_schedule(&bp->napi); |
89e5785f HS |
569 | } |
570 | } | |
571 | ||
ee33c585 EW |
572 | if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) | |
573 | MACB_BIT(ISR_RLE))) | |
89e5785f HS |
574 | macb_tx(bp); |
575 | ||
576 | /* | |
577 | * Link change detection isn't possible with RMII, so we'll | |
578 | * add that if/when we get our hands on a full-blown MII PHY. | |
579 | */ | |
580 | ||
b19f7f71 AS |
581 | if (status & MACB_BIT(ISR_ROVR)) { |
582 | /* We missed at least one packet */ | |
f75ba50b JI |
583 | if (macb_is_gem(bp)) |
584 | bp->hw_stats.gem.rx_overruns++; | |
585 | else | |
586 | bp->hw_stats.macb.rx_overruns++; | |
b19f7f71 AS |
587 | } |
588 | ||
89e5785f HS |
589 | if (status & MACB_BIT(HRESP)) { |
590 | /* | |
c220f8cd JI |
591 | * TODO: Reset the hardware, and maybe move the |
592 | * netdev_err to a lower-priority context as well | |
593 | * (work queue?) | |
89e5785f | 594 | */ |
c220f8cd | 595 | netdev_err(dev, "DMA bus error: HRESP not OK\n"); |
89e5785f HS |
596 | } |
597 | ||
598 | status = macb_readl(bp, ISR); | |
599 | } | |
600 | ||
601 | spin_unlock(&bp->lock); | |
602 | ||
603 | return IRQ_HANDLED; | |
604 | } | |
605 | ||
6e8cf5c0 TP |
606 | #ifdef CONFIG_NET_POLL_CONTROLLER |
607 | /* | |
608 | * Polling receive - used by netconsole and other diagnostic tools | |
609 | * to allow network i/o with interrupts disabled. | |
610 | */ | |
611 | static void macb_poll_controller(struct net_device *dev) | |
612 | { | |
613 | unsigned long flags; | |
614 | ||
615 | local_irq_save(flags); | |
616 | macb_interrupt(dev->irq, dev); | |
617 | local_irq_restore(flags); | |
618 | } | |
619 | #endif | |
620 | ||
89e5785f HS |
621 | static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) |
622 | { | |
623 | struct macb *bp = netdev_priv(dev); | |
624 | dma_addr_t mapping; | |
625 | unsigned int len, entry; | |
626 | u32 ctrl; | |
4871953c | 627 | unsigned long flags; |
89e5785f HS |
628 | |
629 | #ifdef DEBUG | |
c220f8cd JI |
630 | netdev_dbg(bp->dev, |
631 | "start_xmit: len %u head %p data %p tail %p end %p\n", | |
632 | skb->len, skb->head, skb->data, | |
633 | skb_tail_pointer(skb), skb_end_pointer(skb)); | |
634 | print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, | |
635 | skb->data, 16, true); | |
89e5785f HS |
636 | #endif |
637 | ||
638 | len = skb->len; | |
4871953c | 639 | spin_lock_irqsave(&bp->lock, flags); |
89e5785f HS |
640 | |
641 | /* This is a hard error, log it. */ | |
642 | if (TX_BUFFS_AVAIL(bp) < 1) { | |
643 | netif_stop_queue(dev); | |
4871953c | 644 | spin_unlock_irqrestore(&bp->lock, flags); |
c220f8cd JI |
645 | netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n"); |
646 | netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", | |
647 | bp->tx_head, bp->tx_tail); | |
5b548140 | 648 | return NETDEV_TX_BUSY; |
89e5785f HS |
649 | } |
650 | ||
651 | entry = bp->tx_head; | |
c220f8cd | 652 | netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry); |
89e5785f HS |
653 | mapping = dma_map_single(&bp->pdev->dev, skb->data, |
654 | len, DMA_TO_DEVICE); | |
655 | bp->tx_skb[entry].skb = skb; | |
656 | bp->tx_skb[entry].mapping = mapping; | |
c220f8cd JI |
657 | netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n", |
658 | skb->data, (unsigned long)mapping); | |
89e5785f HS |
659 | |
660 | ctrl = MACB_BF(TX_FRMLEN, len); | |
661 | ctrl |= MACB_BIT(TX_LAST); | |
662 | if (entry == (TX_RING_SIZE - 1)) | |
663 | ctrl |= MACB_BIT(TX_WRAP); | |
664 | ||
665 | bp->tx_ring[entry].addr = mapping; | |
666 | bp->tx_ring[entry].ctrl = ctrl; | |
667 | wmb(); | |
668 | ||
669 | entry = NEXT_TX(entry); | |
670 | bp->tx_head = entry; | |
671 | ||
e072092f RC |
672 | skb_tx_timestamp(skb); |
673 | ||
89e5785f HS |
674 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); |
675 | ||
676 | if (TX_BUFFS_AVAIL(bp) < 1) | |
677 | netif_stop_queue(dev); | |
678 | ||
4871953c | 679 | spin_unlock_irqrestore(&bp->lock, flags); |
89e5785f | 680 | |
6ed10654 | 681 | return NETDEV_TX_OK; |
89e5785f HS |
682 | } |
683 | ||
684 | static void macb_free_consistent(struct macb *bp) | |
685 | { | |
686 | if (bp->tx_skb) { | |
687 | kfree(bp->tx_skb); | |
688 | bp->tx_skb = NULL; | |
689 | } | |
690 | if (bp->rx_ring) { | |
691 | dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, | |
692 | bp->rx_ring, bp->rx_ring_dma); | |
693 | bp->rx_ring = NULL; | |
694 | } | |
695 | if (bp->tx_ring) { | |
696 | dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, | |
697 | bp->tx_ring, bp->tx_ring_dma); | |
698 | bp->tx_ring = NULL; | |
699 | } | |
700 | if (bp->rx_buffers) { | |
701 | dma_free_coherent(&bp->pdev->dev, | |
702 | RX_RING_SIZE * RX_BUFFER_SIZE, | |
703 | bp->rx_buffers, bp->rx_buffers_dma); | |
704 | bp->rx_buffers = NULL; | |
705 | } | |
706 | } | |
707 | ||
708 | static int macb_alloc_consistent(struct macb *bp) | |
709 | { | |
710 | int size; | |
711 | ||
712 | size = TX_RING_SIZE * sizeof(struct ring_info); | |
713 | bp->tx_skb = kmalloc(size, GFP_KERNEL); | |
714 | if (!bp->tx_skb) | |
715 | goto out_err; | |
716 | ||
717 | size = RX_RING_BYTES; | |
718 | bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, | |
719 | &bp->rx_ring_dma, GFP_KERNEL); | |
720 | if (!bp->rx_ring) | |
721 | goto out_err; | |
c220f8cd JI |
722 | netdev_dbg(bp->dev, |
723 | "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", | |
724 | size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); | |
89e5785f HS |
725 | |
726 | size = TX_RING_BYTES; | |
727 | bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, | |
728 | &bp->tx_ring_dma, GFP_KERNEL); | |
729 | if (!bp->tx_ring) | |
730 | goto out_err; | |
c220f8cd JI |
731 | netdev_dbg(bp->dev, |
732 | "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", | |
733 | size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); | |
89e5785f HS |
734 | |
735 | size = RX_RING_SIZE * RX_BUFFER_SIZE; | |
736 | bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, | |
737 | &bp->rx_buffers_dma, GFP_KERNEL); | |
738 | if (!bp->rx_buffers) | |
739 | goto out_err; | |
c220f8cd JI |
740 | netdev_dbg(bp->dev, |
741 | "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", | |
742 | size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); | |
89e5785f HS |
743 | |
744 | return 0; | |
745 | ||
746 | out_err: | |
747 | macb_free_consistent(bp); | |
748 | return -ENOMEM; | |
749 | } | |
750 | ||
751 | static void macb_init_rings(struct macb *bp) | |
752 | { | |
753 | int i; | |
754 | dma_addr_t addr; | |
755 | ||
756 | addr = bp->rx_buffers_dma; | |
757 | for (i = 0; i < RX_RING_SIZE; i++) { | |
758 | bp->rx_ring[i].addr = addr; | |
759 | bp->rx_ring[i].ctrl = 0; | |
760 | addr += RX_BUFFER_SIZE; | |
761 | } | |
762 | bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); | |
763 | ||
764 | for (i = 0; i < TX_RING_SIZE; i++) { | |
765 | bp->tx_ring[i].addr = 0; | |
766 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); | |
767 | } | |
768 | bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); | |
769 | ||
770 | bp->rx_tail = bp->tx_head = bp->tx_tail = 0; | |
771 | } | |
772 | ||
773 | static void macb_reset_hw(struct macb *bp) | |
774 | { | |
775 | /* Make sure we have the write buffer for ourselves */ | |
776 | wmb(); | |
777 | ||
778 | /* | |
779 | * Disable RX and TX (XXX: Should we halt the transmission | |
780 | * more gracefully?) | |
781 | */ | |
782 | macb_writel(bp, NCR, 0); | |
783 | ||
784 | /* Clear the stats registers (XXX: Update stats first?) */ | |
785 | macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); | |
786 | ||
787 | /* Clear all status flags */ | |
788 | macb_writel(bp, TSR, ~0UL); | |
789 | macb_writel(bp, RSR, ~0UL); | |
790 | ||
791 | /* Disable all interrupts */ | |
792 | macb_writel(bp, IDR, ~0UL); | |
793 | macb_readl(bp, ISR); | |
794 | } | |
795 | ||
70c9f3d4 JI |
796 | static u32 gem_mdc_clk_div(struct macb *bp) |
797 | { | |
798 | u32 config; | |
799 | unsigned long pclk_hz = clk_get_rate(bp->pclk); | |
800 | ||
801 | if (pclk_hz <= 20000000) | |
802 | config = GEM_BF(CLK, GEM_CLK_DIV8); | |
803 | else if (pclk_hz <= 40000000) | |
804 | config = GEM_BF(CLK, GEM_CLK_DIV16); | |
805 | else if (pclk_hz <= 80000000) | |
806 | config = GEM_BF(CLK, GEM_CLK_DIV32); | |
807 | else if (pclk_hz <= 120000000) | |
808 | config = GEM_BF(CLK, GEM_CLK_DIV48); | |
809 | else if (pclk_hz <= 160000000) | |
810 | config = GEM_BF(CLK, GEM_CLK_DIV64); | |
811 | else | |
812 | config = GEM_BF(CLK, GEM_CLK_DIV96); | |
813 | ||
814 | return config; | |
815 | } | |
816 | ||
817 | static u32 macb_mdc_clk_div(struct macb *bp) | |
818 | { | |
819 | u32 config; | |
820 | unsigned long pclk_hz; | |
821 | ||
822 | if (macb_is_gem(bp)) | |
823 | return gem_mdc_clk_div(bp); | |
824 | ||
825 | pclk_hz = clk_get_rate(bp->pclk); | |
826 | if (pclk_hz <= 20000000) | |
827 | config = MACB_BF(CLK, MACB_CLK_DIV8); | |
828 | else if (pclk_hz <= 40000000) | |
829 | config = MACB_BF(CLK, MACB_CLK_DIV16); | |
830 | else if (pclk_hz <= 80000000) | |
831 | config = MACB_BF(CLK, MACB_CLK_DIV32); | |
832 | else | |
833 | config = MACB_BF(CLK, MACB_CLK_DIV64); | |
834 | ||
835 | return config; | |
836 | } | |
837 | ||
757a03c6 JI |
838 | /* |
839 | * Get the DMA bus width field of the network configuration register that we | |
840 | * should program. We find the width from decoding the design configuration | |
841 | * register to find the maximum supported data bus width. | |
842 | */ | |
843 | static u32 macb_dbw(struct macb *bp) | |
844 | { | |
845 | if (!macb_is_gem(bp)) | |
846 | return 0; | |
847 | ||
848 | switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { | |
849 | case 4: | |
850 | return GEM_BF(DBW, GEM_DBW128); | |
851 | case 2: | |
852 | return GEM_BF(DBW, GEM_DBW64); | |
853 | case 1: | |
854 | default: | |
855 | return GEM_BF(DBW, GEM_DBW32); | |
856 | } | |
857 | } | |
858 | ||
0116da4f JI |
859 | /* |
860 | * Configure the receive DMA engine to use the correct receive buffer size. | |
861 | * This is a configurable parameter for GEM. | |
862 | */ | |
863 | static void macb_configure_dma(struct macb *bp) | |
864 | { | |
865 | u32 dmacfg; | |
866 | ||
867 | if (macb_is_gem(bp)) { | |
868 | dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); | |
869 | dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64); | |
870 | gem_writel(bp, DMACFG, dmacfg); | |
871 | } | |
872 | } | |
873 | ||
89e5785f HS |
874 | static void macb_init_hw(struct macb *bp) |
875 | { | |
876 | u32 config; | |
877 | ||
878 | macb_reset_hw(bp); | |
879 | __macb_set_hwaddr(bp); | |
880 | ||
70c9f3d4 | 881 | config = macb_mdc_clk_div(bp); |
89e5785f HS |
882 | config |= MACB_BIT(PAE); /* PAuse Enable */ |
883 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ | |
8dd4bd00 | 884 | config |= MACB_BIT(BIG); /* Receive oversized frames */ |
89e5785f HS |
885 | if (bp->dev->flags & IFF_PROMISC) |
886 | config |= MACB_BIT(CAF); /* Copy All Frames */ | |
887 | if (!(bp->dev->flags & IFF_BROADCAST)) | |
888 | config |= MACB_BIT(NBC); /* No BroadCast */ | |
757a03c6 | 889 | config |= macb_dbw(bp); |
89e5785f HS |
890 | macb_writel(bp, NCFGR, config); |
891 | ||
0116da4f JI |
892 | macb_configure_dma(bp); |
893 | ||
89e5785f HS |
894 | /* Initialize TX and RX buffers */ |
895 | macb_writel(bp, RBQP, bp->rx_ring_dma); | |
896 | macb_writel(bp, TBQP, bp->tx_ring_dma); | |
897 | ||
898 | /* Enable TX and RX */ | |
6c36a707 | 899 | macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); |
89e5785f HS |
900 | |
901 | /* Enable interrupts */ | |
902 | macb_writel(bp, IER, (MACB_BIT(RCOMP) | |
903 | | MACB_BIT(RXUBR) | |
904 | | MACB_BIT(ISR_TUND) | |
905 | | MACB_BIT(ISR_RLE) | |
906 | | MACB_BIT(TXERR) | |
907 | | MACB_BIT(TCOMP) | |
908 | | MACB_BIT(ISR_ROVR) | |
909 | | MACB_BIT(HRESP))); | |
89e5785f | 910 | |
89e5785f HS |
911 | } |
912 | ||
446ebd01 PV |
913 | /* |
914 | * The hash address register is 64 bits long and takes up two | |
915 | * locations in the memory map. The least significant bits are stored | |
916 | * in EMAC_HSL and the most significant bits in EMAC_HSH. | |
917 | * | |
918 | * The unicast hash enable and the multicast hash enable bits in the | |
919 | * network configuration register enable the reception of hash matched | |
920 | * frames. The destination address is reduced to a 6 bit index into | |
921 | * the 64 bit hash register using the following hash function. The | |
922 | * hash function is an exclusive or of every sixth bit of the | |
923 | * destination address. | |
924 | * | |
925 | * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] | |
926 | * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] | |
927 | * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] | |
928 | * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] | |
929 | * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] | |
930 | * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] | |
931 | * | |
932 | * da[0] represents the least significant bit of the first byte | |
933 | * received, that is, the multicast/unicast indicator, and da[47] | |
934 | * represents the most significant bit of the last byte received. If | |
935 | * the hash index, hi[n], points to a bit that is set in the hash | |
936 | * register then the frame will be matched according to whether the | |
937 | * frame is multicast or unicast. A multicast match will be signalled | |
938 | * if the multicast hash enable bit is set, da[0] is 1 and the hash | |
939 | * index points to a bit set in the hash register. A unicast match | |
940 | * will be signalled if the unicast hash enable bit is set, da[0] is 0 | |
941 | * and the hash index points to a bit set in the hash register. To | |
942 | * receive all multicast frames, the hash register should be set with | |
943 | * all ones and the multicast hash enable bit should be set in the | |
944 | * network configuration register. | |
945 | */ | |
946 | ||
947 | static inline int hash_bit_value(int bitnr, __u8 *addr) | |
948 | { | |
949 | if (addr[bitnr / 8] & (1 << (bitnr % 8))) | |
950 | return 1; | |
951 | return 0; | |
952 | } | |
953 | ||
954 | /* | |
955 | * Return the hash index value for the specified address. | |
956 | */ | |
957 | static int hash_get_index(__u8 *addr) | |
958 | { | |
959 | int i, j, bitval; | |
960 | int hash_index = 0; | |
961 | ||
962 | for (j = 0; j < 6; j++) { | |
963 | for (i = 0, bitval = 0; i < 8; i++) | |
964 | bitval ^= hash_bit_value(i*6 + j, addr); | |
965 | ||
966 | hash_index |= (bitval << j); | |
967 | } | |
968 | ||
969 | return hash_index; | |
970 | } | |
971 | ||
972 | /* | |
973 | * Add multicast addresses to the internal multicast-hash table. | |
974 | */ | |
975 | static void macb_sethashtable(struct net_device *dev) | |
976 | { | |
22bedad3 | 977 | struct netdev_hw_addr *ha; |
446ebd01 | 978 | unsigned long mc_filter[2]; |
f9dcbcc9 | 979 | unsigned int bitnr; |
446ebd01 PV |
980 | struct macb *bp = netdev_priv(dev); |
981 | ||
982 | mc_filter[0] = mc_filter[1] = 0; | |
983 | ||
22bedad3 JP |
984 | netdev_for_each_mc_addr(ha, dev) { |
985 | bitnr = hash_get_index(ha->addr); | |
446ebd01 PV |
986 | mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); |
987 | } | |
988 | ||
f75ba50b JI |
989 | macb_or_gem_writel(bp, HRB, mc_filter[0]); |
990 | macb_or_gem_writel(bp, HRT, mc_filter[1]); | |
446ebd01 PV |
991 | } |
992 | ||
993 | /* | |
994 | * Enable/Disable promiscuous and multicast modes. | |
995 | */ | |
996 | static void macb_set_rx_mode(struct net_device *dev) | |
997 | { | |
998 | unsigned long cfg; | |
999 | struct macb *bp = netdev_priv(dev); | |
1000 | ||
1001 | cfg = macb_readl(bp, NCFGR); | |
1002 | ||
1003 | if (dev->flags & IFF_PROMISC) | |
1004 | /* Enable promiscuous mode */ | |
1005 | cfg |= MACB_BIT(CAF); | |
1006 | else if (dev->flags & (~IFF_PROMISC)) | |
1007 | /* Disable promiscuous mode */ | |
1008 | cfg &= ~MACB_BIT(CAF); | |
1009 | ||
1010 | if (dev->flags & IFF_ALLMULTI) { | |
1011 | /* Enable all multicast mode */ | |
f75ba50b JI |
1012 | macb_or_gem_writel(bp, HRB, -1); |
1013 | macb_or_gem_writel(bp, HRT, -1); | |
446ebd01 | 1014 | cfg |= MACB_BIT(NCFGR_MTI); |
4cd24eaf | 1015 | } else if (!netdev_mc_empty(dev)) { |
446ebd01 PV |
1016 | /* Enable specific multicasts */ |
1017 | macb_sethashtable(dev); | |
1018 | cfg |= MACB_BIT(NCFGR_MTI); | |
1019 | } else if (dev->flags & (~IFF_ALLMULTI)) { | |
1020 | /* Disable all multicast mode */ | |
f75ba50b JI |
1021 | macb_or_gem_writel(bp, HRB, 0); |
1022 | macb_or_gem_writel(bp, HRT, 0); | |
446ebd01 PV |
1023 | cfg &= ~MACB_BIT(NCFGR_MTI); |
1024 | } | |
1025 | ||
1026 | macb_writel(bp, NCFGR, cfg); | |
1027 | } | |
1028 | ||
89e5785f HS |
1029 | static int macb_open(struct net_device *dev) |
1030 | { | |
1031 | struct macb *bp = netdev_priv(dev); | |
1032 | int err; | |
1033 | ||
c220f8cd | 1034 | netdev_dbg(bp->dev, "open\n"); |
89e5785f | 1035 | |
6c36a707 R |
1036 | /* if the phy is not yet register, retry later*/ |
1037 | if (!bp->phy_dev) | |
1038 | return -EAGAIN; | |
1039 | ||
89e5785f HS |
1040 | if (!is_valid_ether_addr(dev->dev_addr)) |
1041 | return -EADDRNOTAVAIL; | |
1042 | ||
1043 | err = macb_alloc_consistent(bp); | |
1044 | if (err) { | |
c220f8cd JI |
1045 | netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", |
1046 | err); | |
89e5785f HS |
1047 | return err; |
1048 | } | |
1049 | ||
bea3348e SH |
1050 | napi_enable(&bp->napi); |
1051 | ||
89e5785f HS |
1052 | macb_init_rings(bp); |
1053 | macb_init_hw(bp); | |
89e5785f | 1054 | |
6c36a707 R |
1055 | /* schedule a link state check */ |
1056 | phy_start(bp->phy_dev); | |
89e5785f | 1057 | |
6c36a707 | 1058 | netif_start_queue(dev); |
89e5785f HS |
1059 | |
1060 | return 0; | |
1061 | } | |
1062 | ||
1063 | static int macb_close(struct net_device *dev) | |
1064 | { | |
1065 | struct macb *bp = netdev_priv(dev); | |
1066 | unsigned long flags; | |
1067 | ||
89e5785f | 1068 | netif_stop_queue(dev); |
bea3348e | 1069 | napi_disable(&bp->napi); |
89e5785f | 1070 | |
6c36a707 R |
1071 | if (bp->phy_dev) |
1072 | phy_stop(bp->phy_dev); | |
1073 | ||
89e5785f HS |
1074 | spin_lock_irqsave(&bp->lock, flags); |
1075 | macb_reset_hw(bp); | |
1076 | netif_carrier_off(dev); | |
1077 | spin_unlock_irqrestore(&bp->lock, flags); | |
1078 | ||
1079 | macb_free_consistent(bp); | |
1080 | ||
1081 | return 0; | |
1082 | } | |
1083 | ||
a494ed8e JI |
1084 | static void gem_update_stats(struct macb *bp) |
1085 | { | |
1086 | u32 __iomem *reg = bp->regs + GEM_OTX; | |
1087 | u32 *p = &bp->hw_stats.gem.tx_octets_31_0; | |
1088 | u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1; | |
1089 | ||
1090 | for (; p < end; p++, reg++) | |
1091 | *p += __raw_readl(reg); | |
1092 | } | |
1093 | ||
1094 | static struct net_device_stats *gem_get_stats(struct macb *bp) | |
1095 | { | |
1096 | struct gem_stats *hwstat = &bp->hw_stats.gem; | |
1097 | struct net_device_stats *nstat = &bp->stats; | |
1098 | ||
1099 | gem_update_stats(bp); | |
1100 | ||
1101 | nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + | |
1102 | hwstat->rx_alignment_errors + | |
1103 | hwstat->rx_resource_errors + | |
1104 | hwstat->rx_overruns + | |
1105 | hwstat->rx_oversize_frames + | |
1106 | hwstat->rx_jabbers + | |
1107 | hwstat->rx_undersized_frames + | |
1108 | hwstat->rx_length_field_frame_errors); | |
1109 | nstat->tx_errors = (hwstat->tx_late_collisions + | |
1110 | hwstat->tx_excessive_collisions + | |
1111 | hwstat->tx_underrun + | |
1112 | hwstat->tx_carrier_sense_errors); | |
1113 | nstat->multicast = hwstat->rx_multicast_frames; | |
1114 | nstat->collisions = (hwstat->tx_single_collision_frames + | |
1115 | hwstat->tx_multiple_collision_frames + | |
1116 | hwstat->tx_excessive_collisions); | |
1117 | nstat->rx_length_errors = (hwstat->rx_oversize_frames + | |
1118 | hwstat->rx_jabbers + | |
1119 | hwstat->rx_undersized_frames + | |
1120 | hwstat->rx_length_field_frame_errors); | |
1121 | nstat->rx_over_errors = hwstat->rx_resource_errors; | |
1122 | nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; | |
1123 | nstat->rx_frame_errors = hwstat->rx_alignment_errors; | |
1124 | nstat->rx_fifo_errors = hwstat->rx_overruns; | |
1125 | nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; | |
1126 | nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; | |
1127 | nstat->tx_fifo_errors = hwstat->tx_underrun; | |
1128 | ||
1129 | return nstat; | |
1130 | } | |
1131 | ||
89e5785f HS |
1132 | static struct net_device_stats *macb_get_stats(struct net_device *dev) |
1133 | { | |
1134 | struct macb *bp = netdev_priv(dev); | |
1135 | struct net_device_stats *nstat = &bp->stats; | |
a494ed8e JI |
1136 | struct macb_stats *hwstat = &bp->hw_stats.macb; |
1137 | ||
1138 | if (macb_is_gem(bp)) | |
1139 | return gem_get_stats(bp); | |
89e5785f | 1140 | |
6c36a707 R |
1141 | /* read stats from hardware */ |
1142 | macb_update_stats(bp); | |
1143 | ||
89e5785f HS |
1144 | /* Convert HW stats into netdevice stats */ |
1145 | nstat->rx_errors = (hwstat->rx_fcs_errors + | |
1146 | hwstat->rx_align_errors + | |
1147 | hwstat->rx_resource_errors + | |
1148 | hwstat->rx_overruns + | |
1149 | hwstat->rx_oversize_pkts + | |
1150 | hwstat->rx_jabbers + | |
1151 | hwstat->rx_undersize_pkts + | |
1152 | hwstat->sqe_test_errors + | |
1153 | hwstat->rx_length_mismatch); | |
1154 | nstat->tx_errors = (hwstat->tx_late_cols + | |
1155 | hwstat->tx_excessive_cols + | |
1156 | hwstat->tx_underruns + | |
1157 | hwstat->tx_carrier_errors); | |
1158 | nstat->collisions = (hwstat->tx_single_cols + | |
1159 | hwstat->tx_multiple_cols + | |
1160 | hwstat->tx_excessive_cols); | |
1161 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + | |
1162 | hwstat->rx_jabbers + | |
1163 | hwstat->rx_undersize_pkts + | |
1164 | hwstat->rx_length_mismatch); | |
b19f7f71 AS |
1165 | nstat->rx_over_errors = hwstat->rx_resource_errors + |
1166 | hwstat->rx_overruns; | |
89e5785f HS |
1167 | nstat->rx_crc_errors = hwstat->rx_fcs_errors; |
1168 | nstat->rx_frame_errors = hwstat->rx_align_errors; | |
1169 | nstat->rx_fifo_errors = hwstat->rx_overruns; | |
1170 | /* XXX: What does "missed" mean? */ | |
1171 | nstat->tx_aborted_errors = hwstat->tx_excessive_cols; | |
1172 | nstat->tx_carrier_errors = hwstat->tx_carrier_errors; | |
1173 | nstat->tx_fifo_errors = hwstat->tx_underruns; | |
1174 | /* Don't know about heartbeat or window errors... */ | |
1175 | ||
1176 | return nstat; | |
1177 | } | |
1178 | ||
1179 | static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1180 | { | |
1181 | struct macb *bp = netdev_priv(dev); | |
6c36a707 R |
1182 | struct phy_device *phydev = bp->phy_dev; |
1183 | ||
1184 | if (!phydev) | |
1185 | return -ENODEV; | |
89e5785f | 1186 | |
6c36a707 | 1187 | return phy_ethtool_gset(phydev, cmd); |
89e5785f HS |
1188 | } |
1189 | ||
1190 | static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1191 | { | |
1192 | struct macb *bp = netdev_priv(dev); | |
6c36a707 | 1193 | struct phy_device *phydev = bp->phy_dev; |
89e5785f | 1194 | |
6c36a707 R |
1195 | if (!phydev) |
1196 | return -ENODEV; | |
1197 | ||
1198 | return phy_ethtool_sset(phydev, cmd); | |
89e5785f HS |
1199 | } |
1200 | ||
6c36a707 R |
1201 | static void macb_get_drvinfo(struct net_device *dev, |
1202 | struct ethtool_drvinfo *info) | |
89e5785f HS |
1203 | { |
1204 | struct macb *bp = netdev_priv(dev); | |
1205 | ||
1206 | strcpy(info->driver, bp->pdev->dev.driver->name); | |
1207 | strcpy(info->version, "$Revision: 1.14 $"); | |
db1d7bf7 | 1208 | strcpy(info->bus_info, dev_name(&bp->pdev->dev)); |
89e5785f HS |
1209 | } |
1210 | ||
0fc0b732 | 1211 | static const struct ethtool_ops macb_ethtool_ops = { |
89e5785f HS |
1212 | .get_settings = macb_get_settings, |
1213 | .set_settings = macb_set_settings, | |
1214 | .get_drvinfo = macb_get_drvinfo, | |
89e5785f HS |
1215 | .get_link = ethtool_op_get_link, |
1216 | }; | |
1217 | ||
1218 | static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
1219 | { | |
1220 | struct macb *bp = netdev_priv(dev); | |
6c36a707 | 1221 | struct phy_device *phydev = bp->phy_dev; |
89e5785f HS |
1222 | |
1223 | if (!netif_running(dev)) | |
1224 | return -EINVAL; | |
1225 | ||
6c36a707 R |
1226 | if (!phydev) |
1227 | return -ENODEV; | |
89e5785f | 1228 | |
28b04113 | 1229 | return phy_mii_ioctl(phydev, rq, cmd); |
89e5785f HS |
1230 | } |
1231 | ||
5f1fa992 AB |
1232 | static const struct net_device_ops macb_netdev_ops = { |
1233 | .ndo_open = macb_open, | |
1234 | .ndo_stop = macb_close, | |
1235 | .ndo_start_xmit = macb_start_xmit, | |
afc4b13d | 1236 | .ndo_set_rx_mode = macb_set_rx_mode, |
5f1fa992 AB |
1237 | .ndo_get_stats = macb_get_stats, |
1238 | .ndo_do_ioctl = macb_ioctl, | |
1239 | .ndo_validate_addr = eth_validate_addr, | |
1240 | .ndo_change_mtu = eth_change_mtu, | |
1241 | .ndo_set_mac_address = eth_mac_addr, | |
6e8cf5c0 TP |
1242 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1243 | .ndo_poll_controller = macb_poll_controller, | |
1244 | #endif | |
5f1fa992 AB |
1245 | }; |
1246 | ||
fb97a846 JCPV |
1247 | #if defined(CONFIG_OF) |
1248 | static const struct of_device_id macb_dt_ids[] = { | |
1249 | { .compatible = "cdns,at32ap7000-macb" }, | |
1250 | { .compatible = "cdns,at91sam9260-macb" }, | |
1251 | { .compatible = "cdns,macb" }, | |
1252 | { .compatible = "cdns,pc302-gem" }, | |
1253 | { .compatible = "cdns,gem" }, | |
1254 | { /* sentinel */ } | |
1255 | }; | |
1256 | ||
1257 | MODULE_DEVICE_TABLE(of, macb_dt_ids); | |
1258 | ||
1259 | static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev) | |
1260 | { | |
1261 | struct device_node *np = pdev->dev.of_node; | |
1262 | ||
1263 | if (np) | |
1264 | return of_get_phy_mode(np); | |
1265 | ||
1266 | return -ENODEV; | |
1267 | } | |
1268 | ||
1269 | static int __devinit macb_get_hwaddr_dt(struct macb *bp) | |
1270 | { | |
1271 | struct device_node *np = bp->pdev->dev.of_node; | |
1272 | if (np) { | |
1273 | const char *mac = of_get_mac_address(np); | |
1274 | if (mac) { | |
1275 | memcpy(bp->dev->dev_addr, mac, ETH_ALEN); | |
1276 | return 0; | |
1277 | } | |
1278 | } | |
1279 | ||
1280 | return -ENODEV; | |
1281 | } | |
1282 | #else | |
1283 | static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev) | |
1284 | { | |
1285 | return -ENODEV; | |
1286 | } | |
1287 | static int __devinit macb_get_hwaddr_dt(struct macb *bp) | |
1288 | { | |
1289 | return -ENODEV; | |
1290 | } | |
1291 | #endif | |
1292 | ||
06c3fd6a | 1293 | static int __init macb_probe(struct platform_device *pdev) |
89e5785f | 1294 | { |
84e0cdb0 | 1295 | struct macb_platform_data *pdata; |
89e5785f HS |
1296 | struct resource *regs; |
1297 | struct net_device *dev; | |
1298 | struct macb *bp; | |
6c36a707 | 1299 | struct phy_device *phydev; |
89e5785f HS |
1300 | u32 config; |
1301 | int err = -ENXIO; | |
1302 | ||
1303 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1304 | if (!regs) { | |
1305 | dev_err(&pdev->dev, "no mmio resource defined\n"); | |
1306 | goto err_out; | |
1307 | } | |
1308 | ||
1309 | err = -ENOMEM; | |
1310 | dev = alloc_etherdev(sizeof(*bp)); | |
41de8d4c | 1311 | if (!dev) |
89e5785f | 1312 | goto err_out; |
89e5785f | 1313 | |
89e5785f HS |
1314 | SET_NETDEV_DEV(dev, &pdev->dev); |
1315 | ||
1316 | /* TODO: Actually, we have some interesting features... */ | |
1317 | dev->features |= 0; | |
1318 | ||
1319 | bp = netdev_priv(dev); | |
1320 | bp->pdev = pdev; | |
1321 | bp->dev = dev; | |
1322 | ||
1323 | spin_lock_init(&bp->lock); | |
1324 | ||
461845db | 1325 | bp->pclk = clk_get(&pdev->dev, "pclk"); |
0cc8674f AV |
1326 | if (IS_ERR(bp->pclk)) { |
1327 | dev_err(&pdev->dev, "failed to get macb_clk\n"); | |
1328 | goto err_out_free_dev; | |
1329 | } | |
1330 | clk_enable(bp->pclk); | |
461845db | 1331 | |
89e5785f HS |
1332 | bp->hclk = clk_get(&pdev->dev, "hclk"); |
1333 | if (IS_ERR(bp->hclk)) { | |
1334 | dev_err(&pdev->dev, "failed to get hclk\n"); | |
1335 | goto err_out_put_pclk; | |
1336 | } | |
89e5785f HS |
1337 | clk_enable(bp->hclk); |
1338 | ||
28f65c11 | 1339 | bp->regs = ioremap(regs->start, resource_size(regs)); |
89e5785f HS |
1340 | if (!bp->regs) { |
1341 | dev_err(&pdev->dev, "failed to map registers, aborting.\n"); | |
1342 | err = -ENOMEM; | |
1343 | goto err_out_disable_clocks; | |
1344 | } | |
1345 | ||
1346 | dev->irq = platform_get_irq(pdev, 0); | |
ab392d2d | 1347 | err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev); |
89e5785f | 1348 | if (err) { |
c220f8cd JI |
1349 | dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", |
1350 | dev->irq, err); | |
89e5785f HS |
1351 | goto err_out_iounmap; |
1352 | } | |
1353 | ||
5f1fa992 | 1354 | dev->netdev_ops = &macb_netdev_ops; |
bea3348e | 1355 | netif_napi_add(dev, &bp->napi, macb_poll, 64); |
89e5785f HS |
1356 | dev->ethtool_ops = &macb_ethtool_ops; |
1357 | ||
1358 | dev->base_addr = regs->start; | |
1359 | ||
89e5785f | 1360 | /* Set MII management clock divider */ |
70c9f3d4 | 1361 | config = macb_mdc_clk_div(bp); |
757a03c6 | 1362 | config |= macb_dbw(bp); |
89e5785f HS |
1363 | macb_writel(bp, NCFGR, config); |
1364 | ||
fb97a846 JCPV |
1365 | err = macb_get_hwaddr_dt(bp); |
1366 | if (err < 0) | |
1367 | macb_get_hwaddr(bp); | |
1368 | ||
1369 | err = macb_get_phy_mode_dt(pdev); | |
1370 | if (err < 0) { | |
1371 | pdata = pdev->dev.platform_data; | |
1372 | if (pdata && pdata->is_rmii) | |
1373 | bp->phy_interface = PHY_INTERFACE_MODE_RMII; | |
1374 | else | |
1375 | bp->phy_interface = PHY_INTERFACE_MODE_MII; | |
1376 | } else { | |
1377 | bp->phy_interface = err; | |
1378 | } | |
6c36a707 | 1379 | |
fb97a846 | 1380 | if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) |
0cc8674f | 1381 | #if defined(CONFIG_ARCH_AT91) |
f75ba50b JI |
1382 | macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) | |
1383 | MACB_BIT(CLKEN))); | |
0cc8674f | 1384 | #else |
f75ba50b | 1385 | macb_or_gem_writel(bp, USRIO, 0); |
0cc8674f | 1386 | #endif |
89e5785f | 1387 | else |
0cc8674f | 1388 | #if defined(CONFIG_ARCH_AT91) |
f75ba50b | 1389 | macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN)); |
0cc8674f | 1390 | #else |
f75ba50b | 1391 | macb_or_gem_writel(bp, USRIO, MACB_BIT(MII)); |
0cc8674f | 1392 | #endif |
89e5785f HS |
1393 | |
1394 | bp->tx_pending = DEF_TX_RING_PENDING; | |
1395 | ||
1396 | err = register_netdev(dev); | |
1397 | if (err) { | |
1398 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); | |
1399 | goto err_out_free_irq; | |
1400 | } | |
1401 | ||
6c36a707 R |
1402 | if (macb_mii_init(bp) != 0) { |
1403 | goto err_out_unregister_netdev; | |
1404 | } | |
89e5785f | 1405 | |
6c36a707 | 1406 | platform_set_drvdata(pdev, dev); |
89e5785f | 1407 | |
f75ba50b JI |
1408 | netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n", |
1409 | macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr, | |
1410 | dev->irq, dev->dev_addr); | |
89e5785f | 1411 | |
6c36a707 | 1412 | phydev = bp->phy_dev; |
c220f8cd JI |
1413 | netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", |
1414 | phydev->drv->name, dev_name(&phydev->dev), phydev->irq); | |
6c36a707 | 1415 | |
89e5785f HS |
1416 | return 0; |
1417 | ||
6c36a707 R |
1418 | err_out_unregister_netdev: |
1419 | unregister_netdev(dev); | |
89e5785f HS |
1420 | err_out_free_irq: |
1421 | free_irq(dev->irq, dev); | |
1422 | err_out_iounmap: | |
1423 | iounmap(bp->regs); | |
1424 | err_out_disable_clocks: | |
1425 | clk_disable(bp->hclk); | |
89e5785f | 1426 | clk_put(bp->hclk); |
0cc8674f | 1427 | clk_disable(bp->pclk); |
89e5785f HS |
1428 | err_out_put_pclk: |
1429 | clk_put(bp->pclk); | |
1430 | err_out_free_dev: | |
1431 | free_netdev(dev); | |
1432 | err_out: | |
1433 | platform_set_drvdata(pdev, NULL); | |
1434 | return err; | |
1435 | } | |
1436 | ||
06c3fd6a | 1437 | static int __exit macb_remove(struct platform_device *pdev) |
89e5785f HS |
1438 | { |
1439 | struct net_device *dev; | |
1440 | struct macb *bp; | |
1441 | ||
1442 | dev = platform_get_drvdata(pdev); | |
1443 | ||
1444 | if (dev) { | |
1445 | bp = netdev_priv(dev); | |
84b7901f AN |
1446 | if (bp->phy_dev) |
1447 | phy_disconnect(bp->phy_dev); | |
298cf9be LB |
1448 | mdiobus_unregister(bp->mii_bus); |
1449 | kfree(bp->mii_bus->irq); | |
1450 | mdiobus_free(bp->mii_bus); | |
89e5785f HS |
1451 | unregister_netdev(dev); |
1452 | free_irq(dev->irq, dev); | |
1453 | iounmap(bp->regs); | |
1454 | clk_disable(bp->hclk); | |
89e5785f | 1455 | clk_put(bp->hclk); |
0cc8674f | 1456 | clk_disable(bp->pclk); |
89e5785f HS |
1457 | clk_put(bp->pclk); |
1458 | free_netdev(dev); | |
1459 | platform_set_drvdata(pdev, NULL); | |
1460 | } | |
1461 | ||
1462 | return 0; | |
1463 | } | |
1464 | ||
c1f598fd HS |
1465 | #ifdef CONFIG_PM |
1466 | static int macb_suspend(struct platform_device *pdev, pm_message_t state) | |
1467 | { | |
1468 | struct net_device *netdev = platform_get_drvdata(pdev); | |
1469 | struct macb *bp = netdev_priv(netdev); | |
1470 | ||
1471 | netif_device_detach(netdev); | |
1472 | ||
c1f598fd | 1473 | clk_disable(bp->hclk); |
c1f598fd HS |
1474 | clk_disable(bp->pclk); |
1475 | ||
1476 | return 0; | |
1477 | } | |
1478 | ||
1479 | static int macb_resume(struct platform_device *pdev) | |
1480 | { | |
1481 | struct net_device *netdev = platform_get_drvdata(pdev); | |
1482 | struct macb *bp = netdev_priv(netdev); | |
1483 | ||
1484 | clk_enable(bp->pclk); | |
c1f598fd | 1485 | clk_enable(bp->hclk); |
c1f598fd HS |
1486 | |
1487 | netif_device_attach(netdev); | |
1488 | ||
1489 | return 0; | |
1490 | } | |
1491 | #else | |
1492 | #define macb_suspend NULL | |
1493 | #define macb_resume NULL | |
1494 | #endif | |
1495 | ||
89e5785f | 1496 | static struct platform_driver macb_driver = { |
06c3fd6a | 1497 | .remove = __exit_p(macb_remove), |
c1f598fd HS |
1498 | .suspend = macb_suspend, |
1499 | .resume = macb_resume, | |
89e5785f HS |
1500 | .driver = { |
1501 | .name = "macb", | |
72abb461 | 1502 | .owner = THIS_MODULE, |
fb97a846 | 1503 | .of_match_table = of_match_ptr(macb_dt_ids), |
89e5785f HS |
1504 | }, |
1505 | }; | |
1506 | ||
1507 | static int __init macb_init(void) | |
1508 | { | |
06c3fd6a | 1509 | return platform_driver_probe(&macb_driver, macb_probe); |
89e5785f HS |
1510 | } |
1511 | ||
1512 | static void __exit macb_exit(void) | |
1513 | { | |
1514 | platform_driver_unregister(&macb_driver); | |
1515 | } | |
1516 | ||
1517 | module_init(macb_init); | |
1518 | module_exit(macb_exit); | |
1519 | ||
1520 | MODULE_LICENSE("GPL"); | |
f75ba50b | 1521 | MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); |
e05503ef | 1522 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
72abb461 | 1523 | MODULE_ALIAS("platform:macb"); |