Merge remote-tracking branch 'battery/for-next'
[deliverable/linux.git] / drivers / net / ethernet / nuvoton / w90p910_ether.c
CommitLineData
a50a97d4
WZ
1/*
2 * Copyright (c) 2008-2009 Nuvoton technology corporation.
3 *
4 * Wan ZongShun <mcuos.com@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation;version 2 of the License.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/mii.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/skbuff.h>
18#include <linux/ethtool.h>
19#include <linux/platform_device.h>
20#include <linux/clk.h>
5a0e3ad6 21#include <linux/gfp.h>
a50a97d4
WZ
22
23#define DRV_MODULE_NAME "w90p910-emc"
24#define DRV_MODULE_VERSION "0.1"
25
26/* Ethernet MAC Registers */
27#define REG_CAMCMR 0x00
28#define REG_CAMEN 0x04
29#define REG_CAMM_BASE 0x08
30#define REG_CAML_BASE 0x0c
31#define REG_TXDLSA 0x88
32#define REG_RXDLSA 0x8C
33#define REG_MCMDR 0x90
34#define REG_MIID 0x94
35#define REG_MIIDA 0x98
36#define REG_FFTCR 0x9C
37#define REG_TSDR 0xa0
38#define REG_RSDR 0xa4
39#define REG_DMARFC 0xa8
40#define REG_MIEN 0xac
41#define REG_MISTA 0xb0
42#define REG_CTXDSA 0xcc
43#define REG_CTXBSA 0xd0
44#define REG_CRXDSA 0xd4
45#define REG_CRXBSA 0xd8
46
47/* mac controller bit */
48#define MCMDR_RXON 0x01
49#define MCMDR_ACP (0x01 << 3)
50#define MCMDR_SPCRC (0x01 << 5)
51#define MCMDR_TXON (0x01 << 8)
52#define MCMDR_FDUP (0x01 << 18)
53#define MCMDR_ENMDC (0x01 << 19)
54#define MCMDR_OPMOD (0x01 << 20)
55#define SWR (0x01 << 24)
56
57/* cam command regiser */
58#define CAMCMR_AUP 0x01
59#define CAMCMR_AMP (0x01 << 1)
60#define CAMCMR_ABP (0x01 << 2)
61#define CAMCMR_CCAM (0x01 << 3)
62#define CAMCMR_ECMP (0x01 << 4)
63#define CAM0EN 0x01
64
65/* mac mii controller bit */
66#define MDCCR (0x0a << 20)
67#define PHYAD (0x01 << 8)
68#define PHYWR (0x01 << 16)
69#define PHYBUSY (0x01 << 17)
70#define PHYPRESP (0x01 << 18)
71#define CAM_ENTRY_SIZE 0x08
72
73/* rx and tx status */
74#define TXDS_TXCP (0x01 << 19)
75#define RXDS_CRCE (0x01 << 17)
76#define RXDS_PTLE (0x01 << 19)
77#define RXDS_RXGD (0x01 << 20)
78#define RXDS_ALIE (0x01 << 21)
79#define RXDS_RP (0x01 << 22)
80
81/* mac interrupt status*/
82#define MISTA_EXDEF (0x01 << 19)
83#define MISTA_TXBERR (0x01 << 24)
84#define MISTA_TDU (0x01 << 23)
85#define MISTA_RDU (0x01 << 10)
86#define MISTA_RXBERR (0x01 << 11)
87
88#define ENSTART 0x01
89#define ENRXINTR 0x01
90#define ENRXGD (0x01 << 4)
91#define ENRXBERR (0x01 << 11)
92#define ENTXINTR (0x01 << 16)
93#define ENTXCP (0x01 << 18)
94#define ENTXABT (0x01 << 21)
95#define ENTXBERR (0x01 << 24)
96#define ENMDC (0x01 << 19)
97#define PHYBUSY (0x01 << 17)
98#define MDCCR_VAL 0xa00000
99
100/* rx and tx owner bit */
101#define RX_OWEN_DMA (0x01 << 31)
102#define RX_OWEN_CPU (~(0x03 << 30))
103#define TX_OWEN_DMA (0x01 << 31)
104#define TX_OWEN_CPU (~(0x01 << 31))
105
106/* tx frame desc controller bit */
107#define MACTXINTEN 0x04
108#define CRCMODE 0x02
109#define PADDINGMODE 0x01
110
111/* fftcr controller bit */
112#define TXTHD (0x03 << 8)
113#define BLENGTH (0x01 << 20)
114
115/* global setting for driver */
116#define RX_DESC_SIZE 50
117#define TX_DESC_SIZE 10
118#define MAX_RBUFF_SZ 0x600
119#define MAX_TBUFF_SZ 0x600
c63fdf46 120#define TX_TIMEOUT (HZ/2)
a50a97d4
WZ
121#define DELAY 1000
122#define CAM0 0x0
123
124static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg);
125
126struct w90p910_rxbd {
127 unsigned int sl;
128 unsigned int buffer;
129 unsigned int reserved;
130 unsigned int next;
131};
132
133struct w90p910_txbd {
134 unsigned int mode;
135 unsigned int buffer;
136 unsigned int sl;
137 unsigned int next;
138};
139
140struct recv_pdesc {
141 struct w90p910_rxbd desclist[RX_DESC_SIZE];
142 char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ];
143};
144
145struct tran_pdesc {
146 struct w90p910_txbd desclist[TX_DESC_SIZE];
1e5053b7 147 char tran_buf[TX_DESC_SIZE][MAX_TBUFF_SZ];
a50a97d4
WZ
148};
149
150struct w90p910_ether {
151 struct recv_pdesc *rdesc;
a50a97d4 152 struct tran_pdesc *tdesc;
1e5053b7
WZ
153 dma_addr_t rdesc_phys;
154 dma_addr_t tdesc_phys;
a50a97d4
WZ
155 struct net_device_stats stats;
156 struct platform_device *pdev;
1e5053b7 157 struct resource *res;
a50a97d4
WZ
158 struct sk_buff *skb;
159 struct clk *clk;
160 struct clk *rmiiclk;
161 struct mii_if_info mii;
162 struct timer_list check_timer;
163 void __iomem *reg;
ddb14175
WZ
164 int rxirq;
165 int txirq;
a50a97d4
WZ
166 unsigned int cur_tx;
167 unsigned int cur_rx;
168 unsigned int finish_tx;
169 unsigned int rx_packets;
170 unsigned int rx_bytes;
171 unsigned int start_tx_ptr;
172 unsigned int start_rx_ptr;
173 unsigned int linkflag;
a50a97d4
WZ
174};
175
176static void update_linkspeed_register(struct net_device *dev,
177 unsigned int speed, unsigned int duplex)
178{
179 struct w90p910_ether *ether = netdev_priv(dev);
180 unsigned int val;
181
182 val = __raw_readl(ether->reg + REG_MCMDR);
183
184 if (speed == SPEED_100) {
185 /* 100 full/half duplex */
186 if (duplex == DUPLEX_FULL) {
187 val |= (MCMDR_OPMOD | MCMDR_FDUP);
188 } else {
189 val |= MCMDR_OPMOD;
190 val &= ~MCMDR_FDUP;
191 }
192 } else {
193 /* 10 full/half duplex */
194 if (duplex == DUPLEX_FULL) {
195 val |= MCMDR_FDUP;
196 val &= ~MCMDR_OPMOD;
197 } else {
198 val &= ~(MCMDR_FDUP | MCMDR_OPMOD);
199 }
200 }
201
202 __raw_writel(val, ether->reg + REG_MCMDR);
203}
204
205static void update_linkspeed(struct net_device *dev)
206{
207 struct w90p910_ether *ether = netdev_priv(dev);
208 struct platform_device *pdev;
209 unsigned int bmsr, bmcr, lpa, speed, duplex;
210
211 pdev = ether->pdev;
212
213 if (!mii_link_ok(&ether->mii)) {
214 ether->linkflag = 0x0;
215 netif_carrier_off(dev);
216 dev_warn(&pdev->dev, "%s: Link down.\n", dev->name);
217 return;
218 }
219
220 if (ether->linkflag == 1)
221 return;
222
223 bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR);
224 bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR);
225
226 if (bmcr & BMCR_ANENABLE) {
227 if (!(bmsr & BMSR_ANEGCOMPLETE))
228 return;
229
230 lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA);
231
232 if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF))
233 speed = SPEED_100;
234 else
235 speed = SPEED_10;
236
237 if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL))
238 duplex = DUPLEX_FULL;
239 else
240 duplex = DUPLEX_HALF;
241
242 } else {
243 speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
244 duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
245 }
246
247 update_linkspeed_register(dev, speed, duplex);
248
249 dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed,
250 (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
251 ether->linkflag = 0x01;
252
253 netif_carrier_on(dev);
254}
255
256static void w90p910_check_link(unsigned long dev_id)
257{
258 struct net_device *dev = (struct net_device *) dev_id;
259 struct w90p910_ether *ether = netdev_priv(dev);
260
261 update_linkspeed(dev);
262 mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
263}
264
265static void w90p910_write_cam(struct net_device *dev,
266 unsigned int x, unsigned char *pval)
267{
268 struct w90p910_ether *ether = netdev_priv(dev);
269 unsigned int msw, lsw;
270
271 msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3];
272
273 lsw = (pval[4] << 24) | (pval[5] << 16);
274
275 __raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE);
276 __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE);
277}
278
1e5053b7 279static int w90p910_init_desc(struct net_device *dev)
a50a97d4
WZ
280{
281 struct w90p910_ether *ether;
1e5053b7
WZ
282 struct w90p910_txbd *tdesc;
283 struct w90p910_rxbd *rdesc;
284 struct platform_device *pdev;
285 unsigned int i;
a50a97d4
WZ
286
287 ether = netdev_priv(dev);
1e5053b7 288 pdev = ether->pdev;
a50a97d4 289
d0320f75
JP
290 ether->tdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
291 &ether->tdesc_phys, GFP_KERNEL);
292 if (!ether->tdesc)
1e5053b7 293 return -ENOMEM;
1e5053b7 294
d0320f75
JP
295 ether->rdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
296 &ether->rdesc_phys, GFP_KERNEL);
1e5053b7 297 if (!ether->rdesc) {
1e5053b7 298 dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
d0320f75 299 ether->tdesc, ether->tdesc_phys);
1e5053b7
WZ
300 return -ENOMEM;
301 }
a50a97d4
WZ
302
303 for (i = 0; i < TX_DESC_SIZE; i++) {
1e5053b7 304 unsigned int offset;
a50a97d4 305
1e5053b7 306 tdesc = &(ether->tdesc->desclist[i]);
a50a97d4 307
1e5053b7
WZ
308 if (i == TX_DESC_SIZE - 1)
309 offset = offsetof(struct tran_pdesc, desclist[0]);
310 else
311 offset = offsetof(struct tran_pdesc, desclist[i + 1]);
a50a97d4 312
1e5053b7
WZ
313 tdesc->next = ether->tdesc_phys + offset;
314 tdesc->buffer = ether->tdesc_phys +
315 offsetof(struct tran_pdesc, tran_buf[i]);
a50a97d4
WZ
316 tdesc->sl = 0;
317 tdesc->mode = 0;
318 }
319
1e5053b7
WZ
320 ether->start_tx_ptr = ether->tdesc_phys;
321
a50a97d4 322 for (i = 0; i < RX_DESC_SIZE; i++) {
1e5053b7 323 unsigned int offset;
a50a97d4 324
1e5053b7 325 rdesc = &(ether->rdesc->desclist[i]);
a50a97d4 326
1e5053b7
WZ
327 if (i == RX_DESC_SIZE - 1)
328 offset = offsetof(struct recv_pdesc, desclist[0]);
329 else
330 offset = offsetof(struct recv_pdesc, desclist[i + 1]);
a50a97d4 331
1e5053b7 332 rdesc->next = ether->rdesc_phys + offset;
a50a97d4 333 rdesc->sl = RX_OWEN_DMA;
1e5053b7
WZ
334 rdesc->buffer = ether->rdesc_phys +
335 offsetof(struct recv_pdesc, recv_buf[i]);
a50a97d4 336 }
1e5053b7
WZ
337
338 ether->start_rx_ptr = ether->rdesc_phys;
339
340 return 0;
a50a97d4
WZ
341}
342
343static void w90p910_set_fifo_threshold(struct net_device *dev)
344{
345 struct w90p910_ether *ether = netdev_priv(dev);
346 unsigned int val;
347
348 val = TXTHD | BLENGTH;
349 __raw_writel(val, ether->reg + REG_FFTCR);
350}
351
352static void w90p910_return_default_idle(struct net_device *dev)
353{
354 struct w90p910_ether *ether = netdev_priv(dev);
355 unsigned int val;
356
357 val = __raw_readl(ether->reg + REG_MCMDR);
358 val |= SWR;
359 __raw_writel(val, ether->reg + REG_MCMDR);
360}
361
362static void w90p910_trigger_rx(struct net_device *dev)
363{
364 struct w90p910_ether *ether = netdev_priv(dev);
365
366 __raw_writel(ENSTART, ether->reg + REG_RSDR);
367}
368
369static void w90p910_trigger_tx(struct net_device *dev)
370{
371 struct w90p910_ether *ether = netdev_priv(dev);
372
373 __raw_writel(ENSTART, ether->reg + REG_TSDR);
374}
375
376static void w90p910_enable_mac_interrupt(struct net_device *dev)
377{
378 struct w90p910_ether *ether = netdev_priv(dev);
379 unsigned int val;
380
381 val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP;
382 val |= ENTXBERR | ENRXBERR | ENTXABT;
383
384 __raw_writel(val, ether->reg + REG_MIEN);
385}
386
387static void w90p910_get_and_clear_int(struct net_device *dev,
388 unsigned int *val)
389{
390 struct w90p910_ether *ether = netdev_priv(dev);
391
392 *val = __raw_readl(ether->reg + REG_MISTA);
393 __raw_writel(*val, ether->reg + REG_MISTA);
394}
395
396static void w90p910_set_global_maccmd(struct net_device *dev)
397{
398 struct w90p910_ether *ether = netdev_priv(dev);
399 unsigned int val;
400
401 val = __raw_readl(ether->reg + REG_MCMDR);
402 val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC;
403 __raw_writel(val, ether->reg + REG_MCMDR);
404}
405
406static void w90p910_enable_cam(struct net_device *dev)
407{
408 struct w90p910_ether *ether = netdev_priv(dev);
409 unsigned int val;
410
411 w90p910_write_cam(dev, CAM0, dev->dev_addr);
412
413 val = __raw_readl(ether->reg + REG_CAMEN);
414 val |= CAM0EN;
415 __raw_writel(val, ether->reg + REG_CAMEN);
416}
417
418static void w90p910_enable_cam_command(struct net_device *dev)
419{
420 struct w90p910_ether *ether = netdev_priv(dev);
421 unsigned int val;
422
423 val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP;
424 __raw_writel(val, ether->reg + REG_CAMCMR);
425}
426
427static void w90p910_enable_tx(struct net_device *dev, unsigned int enable)
428{
429 struct w90p910_ether *ether = netdev_priv(dev);
430 unsigned int val;
431
432 val = __raw_readl(ether->reg + REG_MCMDR);
433
434 if (enable)
435 val |= MCMDR_TXON;
436 else
437 val &= ~MCMDR_TXON;
438
439 __raw_writel(val, ether->reg + REG_MCMDR);
440}
441
442static void w90p910_enable_rx(struct net_device *dev, unsigned int enable)
443{
444 struct w90p910_ether *ether = netdev_priv(dev);
445 unsigned int val;
446
447 val = __raw_readl(ether->reg + REG_MCMDR);
448
449 if (enable)
450 val |= MCMDR_RXON;
451 else
452 val &= ~MCMDR_RXON;
453
454 __raw_writel(val, ether->reg + REG_MCMDR);
455}
456
457static void w90p910_set_curdest(struct net_device *dev)
458{
459 struct w90p910_ether *ether = netdev_priv(dev);
460
461 __raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA);
462 __raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA);
463}
464
465static void w90p910_reset_mac(struct net_device *dev)
466{
467 struct w90p910_ether *ether = netdev_priv(dev);
468
a50a97d4
WZ
469 w90p910_enable_tx(dev, 0);
470 w90p910_enable_rx(dev, 0);
471 w90p910_set_fifo_threshold(dev);
472 w90p910_return_default_idle(dev);
473
474 if (!netif_queue_stopped(dev))
475 netif_stop_queue(dev);
476
477 w90p910_init_desc(dev);
478
860e9538 479 netif_trans_update(dev); /* prevent tx timeout */
a50a97d4
WZ
480 ether->cur_tx = 0x0;
481 ether->finish_tx = 0x0;
482 ether->cur_rx = 0x0;
483
484 w90p910_set_curdest(dev);
485 w90p910_enable_cam(dev);
486 w90p910_enable_cam_command(dev);
487 w90p910_enable_mac_interrupt(dev);
488 w90p910_enable_tx(dev, 1);
489 w90p910_enable_rx(dev, 1);
490 w90p910_trigger_tx(dev);
491 w90p910_trigger_rx(dev);
492
860e9538 493 netif_trans_update(dev); /* prevent tx timeout */
a50a97d4
WZ
494
495 if (netif_queue_stopped(dev))
496 netif_wake_queue(dev);
a50a97d4
WZ
497}
498
499static void w90p910_mdio_write(struct net_device *dev,
500 int phy_id, int reg, int data)
501{
502 struct w90p910_ether *ether = netdev_priv(dev);
503 struct platform_device *pdev;
504 unsigned int val, i;
505
506 pdev = ether->pdev;
507
508 __raw_writel(data, ether->reg + REG_MIID);
509
510 val = (phy_id << 0x08) | reg;
511 val |= PHYBUSY | PHYWR | MDCCR_VAL;
512 __raw_writel(val, ether->reg + REG_MIIDA);
513
514 for (i = 0; i < DELAY; i++) {
515 if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
516 break;
517 }
518
519 if (i == DELAY)
520 dev_warn(&pdev->dev, "mdio write timed out\n");
521}
522
523static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg)
524{
525 struct w90p910_ether *ether = netdev_priv(dev);
526 struct platform_device *pdev;
527 unsigned int val, i, data;
528
529 pdev = ether->pdev;
530
531 val = (phy_id << 0x08) | reg;
532 val |= PHYBUSY | MDCCR_VAL;
533 __raw_writel(val, ether->reg + REG_MIIDA);
534
535 for (i = 0; i < DELAY; i++) {
536 if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
537 break;
538 }
539
540 if (i == DELAY) {
541 dev_warn(&pdev->dev, "mdio read timed out\n");
542 data = 0xffff;
543 } else {
544 data = __raw_readl(ether->reg + REG_MIID);
545 }
546
547 return data;
548}
549
1e5053b7 550static int w90p910_set_mac_address(struct net_device *dev, void *addr)
a50a97d4
WZ
551{
552 struct sockaddr *address = addr;
553
554 if (!is_valid_ether_addr(address->sa_data))
555 return -EADDRNOTAVAIL;
556
557 memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
558 w90p910_write_cam(dev, CAM0, dev->dev_addr);
559
560 return 0;
561}
562
563static int w90p910_ether_close(struct net_device *dev)
564{
565 struct w90p910_ether *ether = netdev_priv(dev);
1e5053b7 566 struct platform_device *pdev;
a50a97d4 567
1e5053b7
WZ
568 pdev = ether->pdev;
569
570 dma_free_coherent(&pdev->dev, sizeof(struct recv_pdesc),
571 ether->rdesc, ether->rdesc_phys);
572 dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
573 ether->tdesc, ether->tdesc_phys);
a50a97d4
WZ
574
575 netif_stop_queue(dev);
576
577 del_timer_sync(&ether->check_timer);
578 clk_disable(ether->rmiiclk);
579 clk_disable(ether->clk);
580
581 free_irq(ether->txirq, dev);
582 free_irq(ether->rxirq, dev);
583
584 return 0;
585}
586
587static struct net_device_stats *w90p910_ether_stats(struct net_device *dev)
588{
589 struct w90p910_ether *ether;
590
591 ether = netdev_priv(dev);
592
593 return &ether->stats;
594}
595
596static int w90p910_send_frame(struct net_device *dev,
597 unsigned char *data, int length)
598{
599 struct w90p910_ether *ether;
600 struct w90p910_txbd *txbd;
601 struct platform_device *pdev;
602 unsigned char *buffer;
603
604 ether = netdev_priv(dev);
605 pdev = ether->pdev;
606
607 txbd = &ether->tdesc->desclist[ether->cur_tx];
608 buffer = ether->tdesc->tran_buf[ether->cur_tx];
1e5053b7 609
a50a97d4
WZ
610 if (length > 1514) {
611 dev_err(&pdev->dev, "send data %d bytes, check it\n", length);
612 length = 1514;
613 }
614
615 txbd->sl = length & 0xFFFF;
616
617 memcpy(buffer, data, length);
618
619 txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN;
620
621 w90p910_enable_tx(dev, 1);
622
623 w90p910_trigger_tx(dev);
624
1e5053b7
WZ
625 if (++ether->cur_tx >= TX_DESC_SIZE)
626 ether->cur_tx = 0;
627
a50a97d4
WZ
628 txbd = &ether->tdesc->desclist[ether->cur_tx];
629
a50a97d4
WZ
630 if (txbd->mode & TX_OWEN_DMA)
631 netif_stop_queue(dev);
632
633 return 0;
634}
635
636static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
637{
638 struct w90p910_ether *ether = netdev_priv(dev);
639
640 if (!(w90p910_send_frame(dev, skb->data, skb->len))) {
641 ether->skb = skb;
642 dev_kfree_skb_irq(skb);
643 return 0;
644 }
1e5053b7 645 return -EAGAIN;
a50a97d4
WZ
646}
647
648static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
649{
650 struct w90p910_ether *ether;
651 struct w90p910_txbd *txbd;
652 struct platform_device *pdev;
a50a97d4
WZ
653 struct net_device *dev;
654 unsigned int cur_entry, entry, status;
655
1e5053b7 656 dev = dev_id;
a50a97d4
WZ
657 ether = netdev_priv(dev);
658 pdev = ether->pdev;
659
a50a97d4
WZ
660 w90p910_get_and_clear_int(dev, &status);
661
662 cur_entry = __raw_readl(ether->reg + REG_CTXDSA);
663
1e5053b7
WZ
664 entry = ether->tdesc_phys +
665 offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
a50a97d4
WZ
666
667 while (entry != cur_entry) {
668 txbd = &ether->tdesc->desclist[ether->finish_tx];
669
1e5053b7
WZ
670 if (++ether->finish_tx >= TX_DESC_SIZE)
671 ether->finish_tx = 0;
a50a97d4
WZ
672
673 if (txbd->sl & TXDS_TXCP) {
674 ether->stats.tx_packets++;
675 ether->stats.tx_bytes += txbd->sl & 0xFFFF;
676 } else {
677 ether->stats.tx_errors++;
678 }
679
680 txbd->sl = 0x0;
681 txbd->mode = 0x0;
682
683 if (netif_queue_stopped(dev))
684 netif_wake_queue(dev);
685
1e5053b7
WZ
686 entry = ether->tdesc_phys +
687 offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
a50a97d4
WZ
688 }
689
690 if (status & MISTA_EXDEF) {
691 dev_err(&pdev->dev, "emc defer exceed interrupt\n");
692 } else if (status & MISTA_TXBERR) {
1e5053b7
WZ
693 dev_err(&pdev->dev, "emc bus error interrupt\n");
694 w90p910_reset_mac(dev);
695 } else if (status & MISTA_TDU) {
696 if (netif_queue_stopped(dev))
697 netif_wake_queue(dev);
698 }
a50a97d4
WZ
699
700 return IRQ_HANDLED;
701}
702
703static void netdev_rx(struct net_device *dev)
704{
705 struct w90p910_ether *ether;
706 struct w90p910_rxbd *rxbd;
707 struct platform_device *pdev;
a50a97d4
WZ
708 struct sk_buff *skb;
709 unsigned char *data;
710 unsigned int length, status, val, entry;
711
712 ether = netdev_priv(dev);
713 pdev = ether->pdev;
a50a97d4
WZ
714
715 rxbd = &ether->rdesc->desclist[ether->cur_rx];
716
717 do {
718 val = __raw_readl(ether->reg + REG_CRXDSA);
1e5053b7
WZ
719
720 entry = ether->rdesc_phys +
721 offsetof(struct recv_pdesc, desclist[ether->cur_rx]);
a50a97d4
WZ
722
723 if (val == entry)
724 break;
725
726 status = rxbd->sl;
727 length = status & 0xFFFF;
728
729 if (status & RXDS_RXGD) {
730 data = ether->rdesc->recv_buf[ether->cur_rx];
dae2e9f4 731 skb = netdev_alloc_skb(dev, length + 2);
a50a97d4 732 if (!skb) {
a50a97d4
WZ
733 ether->stats.rx_dropped++;
734 return;
735 }
736
a50a97d4
WZ
737 skb_reserve(skb, 2);
738 skb_put(skb, length);
739 skb_copy_to_linear_data(skb, data, length);
740 skb->protocol = eth_type_trans(skb, dev);
741 ether->stats.rx_packets++;
742 ether->stats.rx_bytes += length;
743 netif_rx(skb);
744 } else {
745 ether->stats.rx_errors++;
746
747 if (status & RXDS_RP) {
748 dev_err(&pdev->dev, "rx runt err\n");
749 ether->stats.rx_length_errors++;
750 } else if (status & RXDS_CRCE) {
1e5053b7
WZ
751 dev_err(&pdev->dev, "rx crc err\n");
752 ether->stats.rx_crc_errors++;
753 } else if (status & RXDS_ALIE) {
6b2a314f 754 dev_err(&pdev->dev, "rx alignment err\n");
a50a97d4
WZ
755 ether->stats.rx_frame_errors++;
756 } else if (status & RXDS_PTLE) {
1e5053b7
WZ
757 dev_err(&pdev->dev, "rx longer err\n");
758 ether->stats.rx_over_errors++;
a50a97d4 759 }
1e5053b7 760 }
a50a97d4
WZ
761
762 rxbd->sl = RX_OWEN_DMA;
763 rxbd->reserved = 0x0;
1e5053b7
WZ
764
765 if (++ether->cur_rx >= RX_DESC_SIZE)
766 ether->cur_rx = 0;
767
a50a97d4
WZ
768 rxbd = &ether->rdesc->desclist[ether->cur_rx];
769
a50a97d4
WZ
770 } while (1);
771}
772
773static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id)
774{
775 struct net_device *dev;
776 struct w90p910_ether *ether;
777 struct platform_device *pdev;
778 unsigned int status;
779
1e5053b7 780 dev = dev_id;
a50a97d4
WZ
781 ether = netdev_priv(dev);
782 pdev = ether->pdev;
783
a50a97d4
WZ
784 w90p910_get_and_clear_int(dev, &status);
785
786 if (status & MISTA_RDU) {
787 netdev_rx(dev);
a50a97d4
WZ
788 w90p910_trigger_rx(dev);
789
a50a97d4
WZ
790 return IRQ_HANDLED;
791 } else if (status & MISTA_RXBERR) {
1e5053b7
WZ
792 dev_err(&pdev->dev, "emc rx bus error\n");
793 w90p910_reset_mac(dev);
794 }
a50a97d4
WZ
795
796 netdev_rx(dev);
a50a97d4
WZ
797 return IRQ_HANDLED;
798}
799
800static int w90p910_ether_open(struct net_device *dev)
801{
802 struct w90p910_ether *ether;
803 struct platform_device *pdev;
804
805 ether = netdev_priv(dev);
806 pdev = ether->pdev;
807
808 w90p910_reset_mac(dev);
809 w90p910_set_fifo_threshold(dev);
810 w90p910_set_curdest(dev);
811 w90p910_enable_cam(dev);
812 w90p910_enable_cam_command(dev);
813 w90p910_enable_mac_interrupt(dev);
814 w90p910_set_global_maccmd(dev);
815 w90p910_enable_rx(dev, 1);
816
d1853dc8
WZ
817 clk_enable(ether->rmiiclk);
818 clk_enable(ether->clk);
819
a50a97d4
WZ
820 ether->rx_packets = 0x0;
821 ether->rx_bytes = 0x0;
822
823 if (request_irq(ether->txirq, w90p910_tx_interrupt,
824 0x0, pdev->name, dev)) {
825 dev_err(&pdev->dev, "register irq tx failed\n");
826 return -EAGAIN;
827 }
828
829 if (request_irq(ether->rxirq, w90p910_rx_interrupt,
830 0x0, pdev->name, dev)) {
831 dev_err(&pdev->dev, "register irq rx failed\n");
1e5053b7 832 free_irq(ether->txirq, dev);
a50a97d4
WZ
833 return -EAGAIN;
834 }
835
836 mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
837 netif_start_queue(dev);
838 w90p910_trigger_rx(dev);
839
840 dev_info(&pdev->dev, "%s is OPENED\n", dev->name);
841
842 return 0;
843}
844
845static void w90p910_ether_set_multicast_list(struct net_device *dev)
846{
847 struct w90p910_ether *ether;
848 unsigned int rx_mode;
849
850 ether = netdev_priv(dev);
851
852 if (dev->flags & IFF_PROMISC)
853 rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
3b9a7728
JP
854 else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
855 rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
856 else
857 rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
a50a97d4
WZ
858 __raw_writel(rx_mode, ether->reg + REG_CAMCMR);
859}
860
861static int w90p910_ether_ioctl(struct net_device *dev,
862 struct ifreq *ifr, int cmd)
863{
864 struct w90p910_ether *ether = netdev_priv(dev);
865 struct mii_ioctl_data *data = if_mii(ifr);
866
867 return generic_mii_ioctl(&ether->mii, data, cmd, NULL);
868}
869
870static void w90p910_get_drvinfo(struct net_device *dev,
871 struct ethtool_drvinfo *info)
872{
7826d43f
JP
873 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
874 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
a50a97d4
WZ
875}
876
877static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
878{
879 struct w90p910_ether *ether = netdev_priv(dev);
880 return mii_ethtool_gset(&ether->mii, cmd);
881}
882
883static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
884{
885 struct w90p910_ether *ether = netdev_priv(dev);
886 return mii_ethtool_sset(&ether->mii, cmd);
887}
888
889static int w90p910_nway_reset(struct net_device *dev)
890{
891 struct w90p910_ether *ether = netdev_priv(dev);
892 return mii_nway_restart(&ether->mii);
893}
894
895static u32 w90p910_get_link(struct net_device *dev)
896{
897 struct w90p910_ether *ether = netdev_priv(dev);
898 return mii_link_ok(&ether->mii);
899}
900
901static const struct ethtool_ops w90p910_ether_ethtool_ops = {
902 .get_settings = w90p910_get_settings,
903 .set_settings = w90p910_set_settings,
904 .get_drvinfo = w90p910_get_drvinfo,
905 .nway_reset = w90p910_nway_reset,
906 .get_link = w90p910_get_link,
907};
908
909static const struct net_device_ops w90p910_ether_netdev_ops = {
910 .ndo_open = w90p910_ether_open,
911 .ndo_stop = w90p910_ether_close,
912 .ndo_start_xmit = w90p910_ether_start_xmit,
913 .ndo_get_stats = w90p910_ether_stats,
afc4b13d 914 .ndo_set_rx_mode = w90p910_ether_set_multicast_list,
1e5053b7 915 .ndo_set_mac_address = w90p910_set_mac_address,
a50a97d4
WZ
916 .ndo_do_ioctl = w90p910_ether_ioctl,
917 .ndo_validate_addr = eth_validate_addr,
918 .ndo_change_mtu = eth_change_mtu,
919};
920
921static void __init get_mac_address(struct net_device *dev)
922{
923 struct w90p910_ether *ether = netdev_priv(dev);
924 struct platform_device *pdev;
1409a932 925 char addr[ETH_ALEN];
a50a97d4
WZ
926
927 pdev = ether->pdev;
928
929 addr[0] = 0x00;
930 addr[1] = 0x02;
931 addr[2] = 0xac;
932 addr[3] = 0x55;
933 addr[4] = 0x88;
934 addr[5] = 0xa8;
935
936 if (is_valid_ether_addr(addr))
1409a932 937 memcpy(dev->dev_addr, &addr, ETH_ALEN);
a50a97d4
WZ
938 else
939 dev_err(&pdev->dev, "invalid mac address\n");
940}
941
942static int w90p910_ether_setup(struct net_device *dev)
943{
944 struct w90p910_ether *ether = netdev_priv(dev);
945
a50a97d4
WZ
946 dev->netdev_ops = &w90p910_ether_netdev_ops;
947 dev->ethtool_ops = &w90p910_ether_ethtool_ops;
948
949 dev->tx_queue_len = 16;
950 dev->dma = 0x0;
951 dev->watchdog_timeo = TX_TIMEOUT;
952
953 get_mac_address(dev);
954
a50a97d4
WZ
955 ether->cur_tx = 0x0;
956 ether->cur_rx = 0x0;
957 ether->finish_tx = 0x0;
958 ether->linkflag = 0x0;
959 ether->mii.phy_id = 0x01;
960 ether->mii.phy_id_mask = 0x1f;
961 ether->mii.reg_num_mask = 0x1f;
962 ether->mii.dev = dev;
963 ether->mii.mdio_read = w90p910_mdio_read;
964 ether->mii.mdio_write = w90p910_mdio_write;
965
966 setup_timer(&ether->check_timer, w90p910_check_link,
967 (unsigned long)dev);
968
969 return 0;
970}
971
26735f2f 972static int w90p910_ether_probe(struct platform_device *pdev)
a50a97d4
WZ
973{
974 struct w90p910_ether *ether;
975 struct net_device *dev;
a50a97d4
WZ
976 int error;
977
978 dev = alloc_etherdev(sizeof(struct w90p910_ether));
979 if (!dev)
980 return -ENOMEM;
981
1e5053b7
WZ
982 ether = netdev_priv(dev);
983
984 ether->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
985 if (ether->res == NULL) {
a50a97d4
WZ
986 dev_err(&pdev->dev, "failed to get I/O memory\n");
987 error = -ENXIO;
988 goto failed_free;
989 }
990
1e5053b7
WZ
991 if (!request_mem_region(ether->res->start,
992 resource_size(ether->res), pdev->name)) {
a50a97d4
WZ
993 dev_err(&pdev->dev, "failed to request I/O memory\n");
994 error = -EBUSY;
995 goto failed_free;
996 }
997
1e5053b7 998 ether->reg = ioremap(ether->res->start, resource_size(ether->res));
a50a97d4
WZ
999 if (ether->reg == NULL) {
1000 dev_err(&pdev->dev, "failed to remap I/O memory\n");
1001 error = -ENXIO;
1002 goto failed_free_mem;
1003 }
1004
1005 ether->txirq = platform_get_irq(pdev, 0);
1006 if (ether->txirq < 0) {
1007 dev_err(&pdev->dev, "failed to get ether tx irq\n");
1008 error = -ENXIO;
1009 goto failed_free_io;
1010 }
1011
1012 ether->rxirq = platform_get_irq(pdev, 1);
1013 if (ether->rxirq < 0) {
1014 dev_err(&pdev->dev, "failed to get ether rx irq\n");
1015 error = -ENXIO;
0a171933 1016 goto failed_free_io;
a50a97d4
WZ
1017 }
1018
1019 platform_set_drvdata(pdev, dev);
1020
1021 ether->clk = clk_get(&pdev->dev, NULL);
1022 if (IS_ERR(ether->clk)) {
1023 dev_err(&pdev->dev, "failed to get ether clock\n");
1024 error = PTR_ERR(ether->clk);
0a171933 1025 goto failed_free_io;
a50a97d4
WZ
1026 }
1027
1028 ether->rmiiclk = clk_get(&pdev->dev, "RMII");
1029 if (IS_ERR(ether->rmiiclk)) {
1030 dev_err(&pdev->dev, "failed to get ether clock\n");
1031 error = PTR_ERR(ether->rmiiclk);
1032 goto failed_put_clk;
1033 }
1034
1035 ether->pdev = pdev;
1036
1037 w90p910_ether_setup(dev);
1038
1039 error = register_netdev(dev);
1040 if (error != 0) {
fc4fa6e1 1041 dev_err(&pdev->dev, "Register EMC w90p910 FAILED\n");
a50a97d4
WZ
1042 error = -ENODEV;
1043 goto failed_put_rmiiclk;
1044 }
1045
1046 return 0;
1047failed_put_rmiiclk:
1048 clk_put(ether->rmiiclk);
1049failed_put_clk:
1050 clk_put(ether->clk);
a50a97d4
WZ
1051failed_free_io:
1052 iounmap(ether->reg);
1053failed_free_mem:
1e5053b7 1054 release_mem_region(ether->res->start, resource_size(ether->res));
a50a97d4
WZ
1055failed_free:
1056 free_netdev(dev);
1057 return error;
1058}
1059
26735f2f 1060static int w90p910_ether_remove(struct platform_device *pdev)
a50a97d4
WZ
1061{
1062 struct net_device *dev = platform_get_drvdata(pdev);
1063 struct w90p910_ether *ether = netdev_priv(dev);
1064
1065 unregister_netdev(dev);
1e5053b7 1066
a50a97d4
WZ
1067 clk_put(ether->rmiiclk);
1068 clk_put(ether->clk);
1e5053b7
WZ
1069
1070 iounmap(ether->reg);
1071 release_mem_region(ether->res->start, resource_size(ether->res));
1072
a50a97d4 1073 del_timer_sync(&ether->check_timer);
1e5053b7 1074
a50a97d4
WZ
1075 free_netdev(dev);
1076 return 0;
1077}
1078
1079static struct platform_driver w90p910_ether_driver = {
1080 .probe = w90p910_ether_probe,
26735f2f 1081 .remove = w90p910_ether_remove,
a50a97d4 1082 .driver = {
456d8991 1083 .name = "nuc900-emc",
a50a97d4
WZ
1084 },
1085};
1086
db62f684 1087module_platform_driver(w90p910_ether_driver);
a50a97d4
WZ
1088
1089MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
1090MODULE_DESCRIPTION("w90p910 MAC driver!");
1091MODULE_LICENSE("GPL");
456d8991 1092MODULE_ALIAS("platform:nuc900-emc");
a50a97d4 1093
This page took 0.590367 seconds and 5 git commands to generate.