[PATCH] New PowerPC 4xx on-chip ethernet controller driver
[deliverable/linux.git] / drivers / net / ibm_emac / ibm_emac_core.c
1 /*
2 * drivers/net/ibm_emac/ibm_emac_core.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 */
21
22 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/string.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40
41 #include <asm/processor.h>
42 #include <asm/io.h>
43 #include <asm/dma.h>
44 #include <asm/uaccess.h>
45 #include <asm/ocp.h>
46
47 #include "ibm_emac_core.h"
48 #include "ibm_emac_debug.h"
49
50 /*
51 * Lack of dma_unmap_???? calls is intentional.
52 *
53 * API-correct usage requires additional support state information to be
54 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
55 * EMAC design (e.g. TX buffer passed from network stack can be split into
56 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
57 * maintaining such information will add additional overhead.
58 * Current DMA API implementation for 4xx processors only ensures cache coherency
59 * and dma_unmap_???? routines are empty and are likely to stay this way.
60 * I decided to omit dma_unmap_??? calls because I don't want to add additional
61 * complexity just for the sake of following some abstract API, when it doesn't
62 * add any real benefit to the driver. I understand that this decision maybe
63 * controversial, but I really tried to make code API-correct and efficient
64 * at the same time and didn't come up with code I liked :(. --ebs
65 */
66
67 #define DRV_NAME "emac"
68 #define DRV_VERSION "3.53"
69 #define DRV_DESC "PPC 4xx OCP EMAC driver"
70
71 MODULE_DESCRIPTION(DRV_DESC);
72 MODULE_AUTHOR
73 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
74 MODULE_LICENSE("GPL");
75
76 /* minimum number of free TX descriptors required to wake up TX process */
77 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
78
79 /* If packet size is less than this number, we allocate small skb and copy packet
80 * contents into it instead of just sending original big skb up
81 */
82 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
83
84 /* Since multiple EMACs share MDIO lines in various ways, we need
85 * to avoid re-using the same PHY ID in cases where the arch didn't
86 * setup precise phy_map entries
87 */
88 static u32 busy_phy_map;
89
90 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && (defined(CONFIG_405EP) || defined(CONFIG_440EP))
91 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
92 * with PHY RX clock problem.
93 * 440EP has more sane SDR0_MFR register implementation than 440GX, which
94 * also allows controlling each EMAC clock
95 */
96 static inline void EMAC_RX_CLK_TX(int idx)
97 {
98 unsigned long flags;
99 local_irq_save(flags);
100
101 #if defined(CONFIG_405EP)
102 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
103 #else /* CONFIG_440EP */
104 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
105 #endif
106
107 local_irq_restore(flags);
108 }
109
110 static inline void EMAC_RX_CLK_DEFAULT(int idx)
111 {
112 unsigned long flags;
113 local_irq_save(flags);
114
115 #if defined(CONFIG_405EP)
116 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
117 #else /* CONFIG_440EP */
118 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
119 #endif
120
121 local_irq_restore(flags);
122 }
123 #else
124 #define EMAC_RX_CLK_TX(idx) ((void)0)
125 #define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
126 #endif
127
128 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
129 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
130 * unfortunately this is less flexible than 440EP case, because it's a global
131 * setting for all EMACs, therefore we do this clock trick only during probe.
132 */
133 #define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
134 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
135 #define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
136 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
137 #else
138 #define EMAC_CLK_INTERNAL ((void)0)
139 #define EMAC_CLK_EXTERNAL ((void)0)
140 #endif
141
142 /* I don't want to litter system log with timeout errors
143 * when we have brain-damaged PHY.
144 */
145 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
146 const char *error)
147 {
148 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
149 DBG("%d: %s" NL, dev->def->index, error);
150 #else
151 if (net_ratelimit())
152 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
153 #endif
154 }
155
156 /* PHY polling intervals */
157 #define PHY_POLL_LINK_ON HZ
158 #define PHY_POLL_LINK_OFF (HZ / 5)
159
160 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
161 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
162 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
163 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
164 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
165 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
166 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
167 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
168 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
169 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
170 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
171 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
172 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
173 "tx_bd_excessive_collisions", "tx_bd_late_collision",
174 "tx_bd_multple_collisions", "tx_bd_single_collision",
175 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
176 "tx_errors"
177 };
178
179 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs);
180 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
181
182 static inline int emac_phy_supports_gige(int phy_mode)
183 {
184 return phy_mode == PHY_MODE_GMII ||
185 phy_mode == PHY_MODE_RGMII ||
186 phy_mode == PHY_MODE_TBI ||
187 phy_mode == PHY_MODE_RTBI;
188 }
189
190 static inline int emac_phy_gpcs(int phy_mode)
191 {
192 return phy_mode == PHY_MODE_TBI ||
193 phy_mode == PHY_MODE_RTBI;
194 }
195
196 static inline void emac_tx_enable(struct ocp_enet_private *dev)
197 {
198 struct emac_regs *p = dev->emacp;
199 unsigned long flags;
200 u32 r;
201
202 local_irq_save(flags);
203
204 DBG("%d: tx_enable" NL, dev->def->index);
205
206 r = in_be32(&p->mr0);
207 if (!(r & EMAC_MR0_TXE))
208 out_be32(&p->mr0, r | EMAC_MR0_TXE);
209 local_irq_restore(flags);
210 }
211
212 static void emac_tx_disable(struct ocp_enet_private *dev)
213 {
214 struct emac_regs *p = dev->emacp;
215 unsigned long flags;
216 u32 r;
217
218 local_irq_save(flags);
219
220 DBG("%d: tx_disable" NL, dev->def->index);
221
222 r = in_be32(&p->mr0);
223 if (r & EMAC_MR0_TXE) {
224 int n = 300;
225 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
226 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n)
227 --n;
228 if (unlikely(!n))
229 emac_report_timeout_error(dev, "TX disable timeout");
230 }
231 local_irq_restore(flags);
232 }
233
234 static void emac_rx_enable(struct ocp_enet_private *dev)
235 {
236 struct emac_regs *p = dev->emacp;
237 unsigned long flags;
238 u32 r;
239
240 local_irq_save(flags);
241 if (unlikely(dev->commac.rx_stopped))
242 goto out;
243
244 DBG("%d: rx_enable" NL, dev->def->index);
245
246 r = in_be32(&p->mr0);
247 if (!(r & EMAC_MR0_RXE)) {
248 if (unlikely(!(r & EMAC_MR0_RXI))) {
249 /* Wait if previous async disable is still in progress */
250 int n = 100;
251 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
252 --n;
253 if (unlikely(!n))
254 emac_report_timeout_error(dev,
255 "RX disable timeout");
256 }
257 out_be32(&p->mr0, r | EMAC_MR0_RXE);
258 }
259 out:
260 local_irq_restore(flags);
261 }
262
263 static void emac_rx_disable(struct ocp_enet_private *dev)
264 {
265 struct emac_regs *p = dev->emacp;
266 unsigned long flags;
267 u32 r;
268
269 local_irq_save(flags);
270
271 DBG("%d: rx_disable" NL, dev->def->index);
272
273 r = in_be32(&p->mr0);
274 if (r & EMAC_MR0_RXE) {
275 int n = 300;
276 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
277 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
278 --n;
279 if (unlikely(!n))
280 emac_report_timeout_error(dev, "RX disable timeout");
281 }
282 local_irq_restore(flags);
283 }
284
285 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
286 {
287 struct emac_regs *p = dev->emacp;
288 unsigned long flags;
289 u32 r;
290
291 local_irq_save(flags);
292
293 DBG("%d: rx_disable_async" NL, dev->def->index);
294
295 r = in_be32(&p->mr0);
296 if (r & EMAC_MR0_RXE)
297 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
298 local_irq_restore(flags);
299 }
300
301 static int emac_reset(struct ocp_enet_private *dev)
302 {
303 struct emac_regs *p = dev->emacp;
304 unsigned long flags;
305 int n = 20;
306
307 DBG("%d: reset" NL, dev->def->index);
308
309 local_irq_save(flags);
310
311 if (!dev->reset_failed) {
312 /* 40x erratum suggests stopping RX channel before reset,
313 * we stop TX as well
314 */
315 emac_rx_disable(dev);
316 emac_tx_disable(dev);
317 }
318
319 out_be32(&p->mr0, EMAC_MR0_SRST);
320 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
321 --n;
322 local_irq_restore(flags);
323
324 if (n) {
325 dev->reset_failed = 0;
326 return 0;
327 } else {
328 emac_report_timeout_error(dev, "reset timeout");
329 dev->reset_failed = 1;
330 return -ETIMEDOUT;
331 }
332 }
333
334 static void emac_hash_mc(struct ocp_enet_private *dev)
335 {
336 struct emac_regs *p = dev->emacp;
337 u16 gaht[4] = { 0 };
338 struct dev_mc_list *dmi;
339
340 DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
341
342 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
343 int bit;
344 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
345 dev->def->index,
346 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
347 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
348
349 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
350 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
351 }
352 out_be32(&p->gaht1, gaht[0]);
353 out_be32(&p->gaht2, gaht[1]);
354 out_be32(&p->gaht3, gaht[2]);
355 out_be32(&p->gaht4, gaht[3]);
356 }
357
358 static inline u32 emac_iff2rmr(struct net_device *ndev)
359 {
360 u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
361 EMAC_RMR_BASE;
362
363 if (ndev->flags & IFF_PROMISC)
364 r |= EMAC_RMR_PME;
365 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
366 r |= EMAC_RMR_PMME;
367 else if (ndev->mc_count > 0)
368 r |= EMAC_RMR_MAE;
369
370 return r;
371 }
372
373 static inline int emac_opb_mhz(void)
374 {
375 return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
376 }
377
378 /* BHs disabled */
379 static int emac_configure(struct ocp_enet_private *dev)
380 {
381 struct emac_regs *p = dev->emacp;
382 struct net_device *ndev = dev->ndev;
383 int gige;
384 u32 r;
385
386 DBG("%d: configure" NL, dev->def->index);
387
388 if (emac_reset(dev) < 0)
389 return -ETIMEDOUT;
390
391 tah_reset(dev->tah_dev);
392
393 /* Mode register */
394 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
395 if (dev->phy.duplex == DUPLEX_FULL)
396 r |= EMAC_MR1_FDE;
397 switch (dev->phy.speed) {
398 case SPEED_1000:
399 if (emac_phy_gpcs(dev->phy.mode)) {
400 r |= EMAC_MR1_MF_1000GPCS |
401 EMAC_MR1_MF_IPPA(dev->phy.address);
402
403 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
404 * identify this GPCS PHY later.
405 */
406 out_be32(&p->ipcr, 0xdeadbeef);
407 } else
408 r |= EMAC_MR1_MF_1000;
409 r |= EMAC_MR1_RFS_16K;
410 gige = 1;
411
412 if (dev->ndev->mtu > ETH_DATA_LEN)
413 r |= EMAC_MR1_JPSM;
414 break;
415 case SPEED_100:
416 r |= EMAC_MR1_MF_100;
417 /* Fall through */
418 default:
419 r |= EMAC_MR1_RFS_4K;
420 gige = 0;
421 break;
422 }
423
424 if (dev->rgmii_dev)
425 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
426 dev->phy.speed);
427 else
428 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
429
430 #if !defined(CONFIG_40x)
431 /* on 40x erratum forces us to NOT use integrated flow control,
432 * let's hope it works on 44x ;)
433 */
434 if (dev->phy.duplex == DUPLEX_FULL) {
435 if (dev->phy.pause)
436 r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
437 else if (dev->phy.asym_pause)
438 r |= EMAC_MR1_APP;
439 }
440 #endif
441 out_be32(&p->mr1, r);
442
443 /* Set individual MAC address */
444 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
445 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
446 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
447 ndev->dev_addr[5]);
448
449 /* VLAN Tag Protocol ID */
450 out_be32(&p->vtpid, 0x8100);
451
452 /* Receive mode register */
453 r = emac_iff2rmr(ndev);
454 if (r & EMAC_RMR_MAE)
455 emac_hash_mc(dev);
456 out_be32(&p->rmr, r);
457
458 /* FIFOs thresholds */
459 r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
460 EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
461 out_be32(&p->tmr1, r);
462 out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
463
464 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
465 there should be still enough space in FIFO to allow the our link
466 partner time to process this frame and also time to send PAUSE
467 frame itself.
468
469 Here is the worst case scenario for the RX FIFO "headroom"
470 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
471
472 1) One maximum-length frame on TX 1522 bytes
473 2) One PAUSE frame time 64 bytes
474 3) PAUSE frame decode time allowance 64 bytes
475 4) One maximum-length frame on RX 1522 bytes
476 5) Round-trip propagation delay of the link (100Mb) 15 bytes
477 ----------
478 3187 bytes
479
480 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
481 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
482 */
483 r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
484 EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
485 out_be32(&p->rwmr, r);
486
487 /* Set PAUSE timer to the maximum */
488 out_be32(&p->ptr, 0xffff);
489
490 /* IRQ sources */
491 out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
492 EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
493 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
494 EMAC_ISR_IRE | EMAC_ISR_TE);
495
496 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
497 if (emac_phy_gpcs(dev->phy.mode))
498 mii_reset_phy(&dev->phy);
499
500 return 0;
501 }
502
503 /* BHs disabled */
504 static void emac_reinitialize(struct ocp_enet_private *dev)
505 {
506 DBG("%d: reinitialize" NL, dev->def->index);
507
508 if (!emac_configure(dev)) {
509 emac_tx_enable(dev);
510 emac_rx_enable(dev);
511 }
512 }
513
514 /* BHs disabled */
515 static void emac_full_tx_reset(struct net_device *ndev)
516 {
517 struct ocp_enet_private *dev = ndev->priv;
518 struct ocp_func_emac_data *emacdata = dev->def->additions;
519
520 DBG("%d: full_tx_reset" NL, dev->def->index);
521
522 emac_tx_disable(dev);
523 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
524 emac_clean_tx_ring(dev);
525 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
526
527 emac_configure(dev);
528
529 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
530 emac_tx_enable(dev);
531 emac_rx_enable(dev);
532
533 netif_wake_queue(ndev);
534 }
535
536 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
537 {
538 struct emac_regs *p = dev->emacp;
539 u32 r;
540 int n;
541
542 DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
543
544 /* Enable proper MDIO port */
545 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
546
547 /* Wait for management interface to become idle */
548 n = 10;
549 while (!(in_be32(&p->stacr) & EMAC_STACR_OC)) {
550 udelay(1);
551 if (!--n)
552 goto to;
553 }
554
555 /* Issue read command */
556 out_be32(&p->stacr,
557 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
558 (reg & EMAC_STACR_PRA_MASK)
559 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT));
560
561 /* Wait for read to complete */
562 n = 100;
563 while (!((r = in_be32(&p->stacr)) & EMAC_STACR_OC)) {
564 udelay(1);
565 if (!--n)
566 goto to;
567 }
568
569 if (unlikely(r & EMAC_STACR_PHYE)) {
570 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
571 id, reg);
572 return -EREMOTEIO;
573 }
574
575 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
576 DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
577 return r;
578 to:
579 DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
580 return -ETIMEDOUT;
581 }
582
583 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
584 u16 val)
585 {
586 struct emac_regs *p = dev->emacp;
587 int n;
588
589 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
590 val);
591
592 /* Enable proper MDIO port */
593 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
594
595 /* Wait for management interface to be idle */
596 n = 10;
597 while (!(in_be32(&p->stacr) & EMAC_STACR_OC)) {
598 udelay(1);
599 if (!--n)
600 goto to;
601 }
602
603 /* Issue write command */
604 out_be32(&p->stacr,
605 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
606 (reg & EMAC_STACR_PRA_MASK) |
607 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
608 (val << EMAC_STACR_PHYD_SHIFT));
609
610 /* Wait for write to complete */
611 n = 100;
612 while (!(in_be32(&p->stacr) & EMAC_STACR_OC)) {
613 udelay(1);
614 if (!--n)
615 goto to;
616 }
617 return;
618 to:
619 DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
620 }
621
622 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
623 {
624 struct ocp_enet_private *dev = ndev->priv;
625 int res;
626
627 local_bh_disable();
628 res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
629 (u8) reg);
630 local_bh_enable();
631 return res;
632 }
633
634 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
635 {
636 struct ocp_enet_private *dev = ndev->priv;
637
638 local_bh_disable();
639 __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
640 (u8) reg, (u16) val);
641 local_bh_enable();
642 }
643
644 /* BHs disabled */
645 static void emac_set_multicast_list(struct net_device *ndev)
646 {
647 struct ocp_enet_private *dev = ndev->priv;
648 struct emac_regs *p = dev->emacp;
649 u32 rmr = emac_iff2rmr(ndev);
650
651 DBG("%d: multicast %08x" NL, dev->def->index, rmr);
652 BUG_ON(!netif_running(dev->ndev));
653
654 /* I decided to relax register access rules here to avoid
655 * full EMAC reset.
656 *
657 * There is a real problem with EMAC4 core if we use MWSW_001 bit
658 * in MR1 register and do a full EMAC reset.
659 * One TX BD status update is delayed and, after EMAC reset, it
660 * never happens, resulting in TX hung (it'll be recovered by TX
661 * timeout handler eventually, but this is just gross).
662 * So we either have to do full TX reset or try to cheat here :)
663 *
664 * The only required change is to RX mode register, so I *think* all
665 * we need is just to stop RX channel. This seems to work on all
666 * tested SoCs. --ebs
667 */
668 emac_rx_disable(dev);
669 if (rmr & EMAC_RMR_MAE)
670 emac_hash_mc(dev);
671 out_be32(&p->rmr, rmr);
672 emac_rx_enable(dev);
673 }
674
675 /* BHs disabled */
676 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
677 {
678 struct ocp_func_emac_data *emacdata = dev->def->additions;
679 int rx_sync_size = emac_rx_sync_size(new_mtu);
680 int rx_skb_size = emac_rx_skb_size(new_mtu);
681 int i, ret = 0;
682
683 emac_rx_disable(dev);
684 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
685
686 if (dev->rx_sg_skb) {
687 ++dev->estats.rx_dropped_resize;
688 dev_kfree_skb(dev->rx_sg_skb);
689 dev->rx_sg_skb = NULL;
690 }
691
692 /* Make a first pass over RX ring and mark BDs ready, dropping
693 * non-processed packets on the way. We need this as a separate pass
694 * to simplify error recovery in the case of allocation failure later.
695 */
696 for (i = 0; i < NUM_RX_BUFF; ++i) {
697 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
698 ++dev->estats.rx_dropped_resize;
699
700 dev->rx_desc[i].data_len = 0;
701 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
702 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
703 }
704
705 /* Reallocate RX ring only if bigger skb buffers are required */
706 if (rx_skb_size <= dev->rx_skb_size)
707 goto skip;
708
709 /* Second pass, allocate new skbs */
710 for (i = 0; i < NUM_RX_BUFF; ++i) {
711 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
712 if (!skb) {
713 ret = -ENOMEM;
714 goto oom;
715 }
716
717 BUG_ON(!dev->rx_skb[i]);
718 dev_kfree_skb(dev->rx_skb[i]);
719
720 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
721 dev->rx_desc[i].data_ptr =
722 dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
723 DMA_FROM_DEVICE) + 2;
724 dev->rx_skb[i] = skb;
725 }
726 skip:
727 /* Check if we need to change "Jumbo" bit in MR1 */
728 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
729 /* This is to prevent starting RX channel in emac_rx_enable() */
730 dev->commac.rx_stopped = 1;
731
732 dev->ndev->mtu = new_mtu;
733 emac_full_tx_reset(dev->ndev);
734 }
735
736 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
737 oom:
738 /* Restart RX */
739 dev->commac.rx_stopped = dev->rx_slot = 0;
740 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
741 emac_rx_enable(dev);
742
743 return ret;
744 }
745
746 /* Process ctx, rtnl_lock semaphore */
747 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
748 {
749 struct ocp_enet_private *dev = ndev->priv;
750 int ret = 0;
751
752 if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
753 return -EINVAL;
754
755 DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
756
757 local_bh_disable();
758 if (netif_running(ndev)) {
759 /* Check if we really need to reinitalize RX ring */
760 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
761 ret = emac_resize_rx_ring(dev, new_mtu);
762 }
763
764 if (!ret) {
765 ndev->mtu = new_mtu;
766 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
767 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
768 }
769 local_bh_enable();
770
771 return ret;
772 }
773
774 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
775 {
776 int i;
777 for (i = 0; i < NUM_TX_BUFF; ++i) {
778 if (dev->tx_skb[i]) {
779 dev_kfree_skb(dev->tx_skb[i]);
780 dev->tx_skb[i] = NULL;
781 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
782 ++dev->estats.tx_dropped;
783 }
784 dev->tx_desc[i].ctrl = 0;
785 dev->tx_desc[i].data_ptr = 0;
786 }
787 }
788
789 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
790 {
791 int i;
792 for (i = 0; i < NUM_RX_BUFF; ++i)
793 if (dev->rx_skb[i]) {
794 dev->rx_desc[i].ctrl = 0;
795 dev_kfree_skb(dev->rx_skb[i]);
796 dev->rx_skb[i] = NULL;
797 dev->rx_desc[i].data_ptr = 0;
798 }
799
800 if (dev->rx_sg_skb) {
801 dev_kfree_skb(dev->rx_sg_skb);
802 dev->rx_sg_skb = NULL;
803 }
804 }
805
806 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
807 int flags)
808 {
809 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
810 if (unlikely(!skb))
811 return -ENOMEM;
812
813 dev->rx_skb[slot] = skb;
814 dev->rx_desc[slot].data_len = 0;
815
816 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
817 dev->rx_desc[slot].data_ptr =
818 dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size,
819 DMA_FROM_DEVICE) + 2;
820 barrier();
821 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
822 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
823
824 return 0;
825 }
826
827 static void emac_print_link_status(struct ocp_enet_private *dev)
828 {
829 if (netif_carrier_ok(dev->ndev))
830 printk(KERN_INFO "%s: link is up, %d %s%s\n",
831 dev->ndev->name, dev->phy.speed,
832 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
833 dev->phy.pause ? ", pause enabled" :
834 dev->phy.asym_pause ? ", assymetric pause enabled" : "");
835 else
836 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
837 }
838
839 /* Process ctx, rtnl_lock semaphore */
840 static int emac_open(struct net_device *ndev)
841 {
842 struct ocp_enet_private *dev = ndev->priv;
843 struct ocp_func_emac_data *emacdata = dev->def->additions;
844 int err, i;
845
846 DBG("%d: open" NL, dev->def->index);
847
848 /* Setup error IRQ handler */
849 err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
850 if (err) {
851 printk(KERN_ERR "%s: failed to request IRQ %d\n",
852 ndev->name, dev->def->irq);
853 return err;
854 }
855
856 /* Allocate RX ring */
857 for (i = 0; i < NUM_RX_BUFF; ++i)
858 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
859 printk(KERN_ERR "%s: failed to allocate RX ring\n",
860 ndev->name);
861 goto oom;
862 }
863
864 local_bh_disable();
865 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
866 dev->commac.rx_stopped = 0;
867 dev->rx_sg_skb = NULL;
868
869 if (dev->phy.address >= 0) {
870 int link_poll_interval;
871 if (dev->phy.def->ops->poll_link(&dev->phy)) {
872 dev->phy.def->ops->read_link(&dev->phy);
873 EMAC_RX_CLK_DEFAULT(dev->def->index);
874 netif_carrier_on(dev->ndev);
875 link_poll_interval = PHY_POLL_LINK_ON;
876 } else {
877 EMAC_RX_CLK_TX(dev->def->index);
878 netif_carrier_off(dev->ndev);
879 link_poll_interval = PHY_POLL_LINK_OFF;
880 }
881 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
882 emac_print_link_status(dev);
883 } else
884 netif_carrier_on(dev->ndev);
885
886 emac_configure(dev);
887 mal_poll_add(dev->mal, &dev->commac);
888 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
889 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
890 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
891 emac_tx_enable(dev);
892 emac_rx_enable(dev);
893 netif_start_queue(ndev);
894 local_bh_enable();
895
896 return 0;
897 oom:
898 emac_clean_rx_ring(dev);
899 free_irq(dev->def->irq, dev);
900 return -ENOMEM;
901 }
902
903 /* BHs disabled */
904 static int emac_link_differs(struct ocp_enet_private *dev)
905 {
906 u32 r = in_be32(&dev->emacp->mr1);
907
908 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
909 int speed, pause, asym_pause;
910
911 if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
912 speed = SPEED_1000;
913 else if (r & EMAC_MR1_MF_100)
914 speed = SPEED_100;
915 else
916 speed = SPEED_10;
917
918 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
919 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
920 pause = 1;
921 asym_pause = 0;
922 break;
923 case EMAC_MR1_APP:
924 pause = 0;
925 asym_pause = 1;
926 break;
927 default:
928 pause = asym_pause = 0;
929 }
930 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
931 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
932 }
933
934 /* BHs disabled */
935 static void emac_link_timer(unsigned long data)
936 {
937 struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
938 int link_poll_interval;
939
940 DBG2("%d: link timer" NL, dev->def->index);
941
942 if (dev->phy.def->ops->poll_link(&dev->phy)) {
943 if (!netif_carrier_ok(dev->ndev)) {
944 EMAC_RX_CLK_DEFAULT(dev->def->index);
945
946 /* Get new link parameters */
947 dev->phy.def->ops->read_link(&dev->phy);
948
949 if (dev->tah_dev || emac_link_differs(dev))
950 emac_full_tx_reset(dev->ndev);
951
952 netif_carrier_on(dev->ndev);
953 emac_print_link_status(dev);
954 }
955 link_poll_interval = PHY_POLL_LINK_ON;
956 } else {
957 if (netif_carrier_ok(dev->ndev)) {
958 EMAC_RX_CLK_TX(dev->def->index);
959 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
960 emac_reinitialize(dev);
961 #endif
962 netif_carrier_off(dev->ndev);
963 emac_print_link_status(dev);
964 }
965
966 /* Retry reset if the previous attempt failed.
967 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
968 * case, but I left it here because it shouldn't trigger for
969 * sane PHYs anyway.
970 */
971 if (unlikely(dev->reset_failed))
972 emac_reinitialize(dev);
973
974 link_poll_interval = PHY_POLL_LINK_OFF;
975 }
976 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
977 }
978
979 /* BHs disabled */
980 static void emac_force_link_update(struct ocp_enet_private *dev)
981 {
982 netif_carrier_off(dev->ndev);
983 if (timer_pending(&dev->link_timer))
984 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
985 }
986
987 /* Process ctx, rtnl_lock semaphore */
988 static int emac_close(struct net_device *ndev)
989 {
990 struct ocp_enet_private *dev = ndev->priv;
991 struct ocp_func_emac_data *emacdata = dev->def->additions;
992
993 DBG("%d: close" NL, dev->def->index);
994
995 local_bh_disable();
996
997 if (dev->phy.address >= 0)
998 del_timer_sync(&dev->link_timer);
999
1000 netif_stop_queue(ndev);
1001 emac_rx_disable(dev);
1002 emac_tx_disable(dev);
1003 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1004 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1005 mal_poll_del(dev->mal, &dev->commac);
1006 local_bh_enable();
1007
1008 emac_clean_tx_ring(dev);
1009 emac_clean_rx_ring(dev);
1010 free_irq(dev->def->irq, dev);
1011
1012 return 0;
1013 }
1014
1015 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1016 struct sk_buff *skb)
1017 {
1018 #if defined(CONFIG_IBM_EMAC_TAH)
1019 if (skb->ip_summed == CHECKSUM_HW) {
1020 ++dev->stats.tx_packets_csum;
1021 return EMAC_TX_CTRL_TAH_CSUM;
1022 }
1023 #endif
1024 return 0;
1025 }
1026
1027 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1028 {
1029 struct emac_regs *p = dev->emacp;
1030 struct net_device *ndev = dev->ndev;
1031
1032 /* Send the packet out */
1033 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1034
1035 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1036 netif_stop_queue(ndev);
1037 DBG2("%d: stopped TX queue" NL, dev->def->index);
1038 }
1039
1040 ndev->trans_start = jiffies;
1041 ++dev->stats.tx_packets;
1042 dev->stats.tx_bytes += len;
1043
1044 return 0;
1045 }
1046
1047 /* BHs disabled */
1048 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1049 {
1050 struct ocp_enet_private *dev = ndev->priv;
1051 unsigned int len = skb->len;
1052 int slot;
1053
1054 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1055 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1056
1057 slot = dev->tx_slot++;
1058 if (dev->tx_slot == NUM_TX_BUFF) {
1059 dev->tx_slot = 0;
1060 ctrl |= MAL_TX_CTRL_WRAP;
1061 }
1062
1063 DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1064
1065 dev->tx_skb[slot] = skb;
1066 dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1067 DMA_TO_DEVICE);
1068 dev->tx_desc[slot].data_len = (u16) len;
1069 barrier();
1070 dev->tx_desc[slot].ctrl = ctrl;
1071
1072 return emac_xmit_finish(dev, len);
1073 }
1074
1075 #if defined(CONFIG_IBM_EMAC_TAH)
1076 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1077 u32 pd, int len, int last, u16 base_ctrl)
1078 {
1079 while (1) {
1080 u16 ctrl = base_ctrl;
1081 int chunk = min(len, MAL_MAX_TX_SIZE);
1082 len -= chunk;
1083
1084 slot = (slot + 1) % NUM_TX_BUFF;
1085
1086 if (last && !len)
1087 ctrl |= MAL_TX_CTRL_LAST;
1088 if (slot == NUM_TX_BUFF - 1)
1089 ctrl |= MAL_TX_CTRL_WRAP;
1090
1091 dev->tx_skb[slot] = NULL;
1092 dev->tx_desc[slot].data_ptr = pd;
1093 dev->tx_desc[slot].data_len = (u16) chunk;
1094 dev->tx_desc[slot].ctrl = ctrl;
1095 ++dev->tx_cnt;
1096
1097 if (!len)
1098 break;
1099
1100 pd += chunk;
1101 }
1102 return slot;
1103 }
1104
1105 /* BHs disabled (SG version for TAH equipped EMACs) */
1106 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1107 {
1108 struct ocp_enet_private *dev = ndev->priv;
1109 int nr_frags = skb_shinfo(skb)->nr_frags;
1110 int len = skb->len, chunk;
1111 int slot, i;
1112 u16 ctrl;
1113 u32 pd;
1114
1115 /* This is common "fast" path */
1116 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1117 return emac_start_xmit(skb, ndev);
1118
1119 len -= skb->data_len;
1120
1121 /* Note, this is only an *estimation*, we can still run out of empty
1122 * slots because of the additional fragmentation into
1123 * MAL_MAX_TX_SIZE-sized chunks
1124 */
1125 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1126 goto stop_queue;
1127
1128 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1129 emac_tx_csum(dev, skb);
1130 slot = dev->tx_slot;
1131
1132 /* skb data */
1133 dev->tx_skb[slot] = NULL;
1134 chunk = min(len, MAL_MAX_TX_SIZE);
1135 dev->tx_desc[slot].data_ptr = pd =
1136 dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1137 dev->tx_desc[slot].data_len = (u16) chunk;
1138 len -= chunk;
1139 if (unlikely(len))
1140 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1141 ctrl);
1142 /* skb fragments */
1143 for (i = 0; i < nr_frags; ++i) {
1144 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1145 len = frag->size;
1146
1147 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1148 goto undo_frame;
1149
1150 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1151 DMA_TO_DEVICE);
1152
1153 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1154 ctrl);
1155 }
1156
1157 DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1158 dev->tx_slot, slot);
1159
1160 /* Attach skb to the last slot so we don't release it too early */
1161 dev->tx_skb[slot] = skb;
1162
1163 /* Send the packet out */
1164 if (dev->tx_slot == NUM_TX_BUFF - 1)
1165 ctrl |= MAL_TX_CTRL_WRAP;
1166 barrier();
1167 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1168 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1169
1170 return emac_xmit_finish(dev, skb->len);
1171
1172 undo_frame:
1173 /* Well, too bad. Our previous estimation was overly optimistic.
1174 * Undo everything.
1175 */
1176 while (slot != dev->tx_slot) {
1177 dev->tx_desc[slot].ctrl = 0;
1178 --dev->tx_cnt;
1179 if (--slot < 0)
1180 slot = NUM_TX_BUFF - 1;
1181 }
1182 ++dev->estats.tx_undo;
1183
1184 stop_queue:
1185 netif_stop_queue(ndev);
1186 DBG2("%d: stopped TX queue" NL, dev->def->index);
1187 return 1;
1188 }
1189 #else
1190 # define emac_start_xmit_sg emac_start_xmit
1191 #endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1192
1193 /* BHs disabled */
1194 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1195 {
1196 struct ibm_emac_error_stats *st = &dev->estats;
1197 DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1198
1199 ++st->tx_bd_errors;
1200 if (ctrl & EMAC_TX_ST_BFCS)
1201 ++st->tx_bd_bad_fcs;
1202 if (ctrl & EMAC_TX_ST_LCS)
1203 ++st->tx_bd_carrier_loss;
1204 if (ctrl & EMAC_TX_ST_ED)
1205 ++st->tx_bd_excessive_deferral;
1206 if (ctrl & EMAC_TX_ST_EC)
1207 ++st->tx_bd_excessive_collisions;
1208 if (ctrl & EMAC_TX_ST_LC)
1209 ++st->tx_bd_late_collision;
1210 if (ctrl & EMAC_TX_ST_MC)
1211 ++st->tx_bd_multple_collisions;
1212 if (ctrl & EMAC_TX_ST_SC)
1213 ++st->tx_bd_single_collision;
1214 if (ctrl & EMAC_TX_ST_UR)
1215 ++st->tx_bd_underrun;
1216 if (ctrl & EMAC_TX_ST_SQE)
1217 ++st->tx_bd_sqe;
1218 }
1219
1220 static void emac_poll_tx(void *param)
1221 {
1222 struct ocp_enet_private *dev = param;
1223 DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1224 dev->ack_slot);
1225
1226 if (dev->tx_cnt) {
1227 u16 ctrl;
1228 int slot = dev->ack_slot, n = 0;
1229 again:
1230 ctrl = dev->tx_desc[slot].ctrl;
1231 if (!(ctrl & MAL_TX_CTRL_READY)) {
1232 struct sk_buff *skb = dev->tx_skb[slot];
1233 ++n;
1234
1235 if (skb) {
1236 dev_kfree_skb(skb);
1237 dev->tx_skb[slot] = NULL;
1238 }
1239 slot = (slot + 1) % NUM_TX_BUFF;
1240
1241 if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1242 emac_parse_tx_error(dev, ctrl);
1243
1244 if (--dev->tx_cnt)
1245 goto again;
1246 }
1247 if (n) {
1248 dev->ack_slot = slot;
1249 if (netif_queue_stopped(dev->ndev) &&
1250 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1251 netif_wake_queue(dev->ndev);
1252
1253 DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1254 }
1255 }
1256 }
1257
1258 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1259 int len)
1260 {
1261 struct sk_buff *skb = dev->rx_skb[slot];
1262 DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1263
1264 if (len)
1265 dma_map_single(dev->ldev, skb->data - 2,
1266 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1267
1268 dev->rx_desc[slot].data_len = 0;
1269 barrier();
1270 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1271 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1272 }
1273
1274 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1275 {
1276 struct ibm_emac_error_stats *st = &dev->estats;
1277 DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1278
1279 ++st->rx_bd_errors;
1280 if (ctrl & EMAC_RX_ST_OE)
1281 ++st->rx_bd_overrun;
1282 if (ctrl & EMAC_RX_ST_BP)
1283 ++st->rx_bd_bad_packet;
1284 if (ctrl & EMAC_RX_ST_RP)
1285 ++st->rx_bd_runt_packet;
1286 if (ctrl & EMAC_RX_ST_SE)
1287 ++st->rx_bd_short_event;
1288 if (ctrl & EMAC_RX_ST_AE)
1289 ++st->rx_bd_alignment_error;
1290 if (ctrl & EMAC_RX_ST_BFCS)
1291 ++st->rx_bd_bad_fcs;
1292 if (ctrl & EMAC_RX_ST_PTL)
1293 ++st->rx_bd_packet_too_long;
1294 if (ctrl & EMAC_RX_ST_ORE)
1295 ++st->rx_bd_out_of_range;
1296 if (ctrl & EMAC_RX_ST_IRE)
1297 ++st->rx_bd_in_range;
1298 }
1299
1300 static inline void emac_rx_csum(struct ocp_enet_private *dev,
1301 struct sk_buff *skb, u16 ctrl)
1302 {
1303 #if defined(CONFIG_IBM_EMAC_TAH)
1304 if (!ctrl && dev->tah_dev) {
1305 skb->ip_summed = CHECKSUM_UNNECESSARY;
1306 ++dev->stats.rx_packets_csum;
1307 }
1308 #endif
1309 }
1310
1311 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1312 {
1313 if (likely(dev->rx_sg_skb != NULL)) {
1314 int len = dev->rx_desc[slot].data_len;
1315 int tot_len = dev->rx_sg_skb->len + len;
1316
1317 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1318 ++dev->estats.rx_dropped_mtu;
1319 dev_kfree_skb(dev->rx_sg_skb);
1320 dev->rx_sg_skb = NULL;
1321 } else {
1322 cacheable_memcpy(dev->rx_sg_skb->tail,
1323 dev->rx_skb[slot]->data, len);
1324 skb_put(dev->rx_sg_skb, len);
1325 emac_recycle_rx_skb(dev, slot, len);
1326 return 0;
1327 }
1328 }
1329 emac_recycle_rx_skb(dev, slot, 0);
1330 return -1;
1331 }
1332
1333 /* BHs disabled */
1334 static int emac_poll_rx(void *param, int budget)
1335 {
1336 struct ocp_enet_private *dev = param;
1337 int slot = dev->rx_slot, received = 0;
1338
1339 DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1340
1341 again:
1342 while (budget > 0) {
1343 int len;
1344 struct sk_buff *skb;
1345 u16 ctrl = dev->rx_desc[slot].ctrl;
1346
1347 if (ctrl & MAL_RX_CTRL_EMPTY)
1348 break;
1349
1350 skb = dev->rx_skb[slot];
1351 barrier();
1352 len = dev->rx_desc[slot].data_len;
1353
1354 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1355 goto sg;
1356
1357 ctrl &= EMAC_BAD_RX_MASK;
1358 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1359 emac_parse_rx_error(dev, ctrl);
1360 ++dev->estats.rx_dropped_error;
1361 emac_recycle_rx_skb(dev, slot, 0);
1362 len = 0;
1363 goto next;
1364 }
1365
1366 if (len && len < EMAC_RX_COPY_THRESH) {
1367 struct sk_buff *copy_skb =
1368 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1369 if (unlikely(!copy_skb))
1370 goto oom;
1371
1372 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1373 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1374 len + 2);
1375 emac_recycle_rx_skb(dev, slot, len);
1376 skb = copy_skb;
1377 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1378 goto oom;
1379
1380 skb_put(skb, len);
1381 push_packet:
1382 skb->dev = dev->ndev;
1383 skb->protocol = eth_type_trans(skb, dev->ndev);
1384 emac_rx_csum(dev, skb, ctrl);
1385
1386 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1387 ++dev->estats.rx_dropped_stack;
1388 next:
1389 ++dev->stats.rx_packets;
1390 skip:
1391 dev->stats.rx_bytes += len;
1392 slot = (slot + 1) % NUM_RX_BUFF;
1393 --budget;
1394 ++received;
1395 continue;
1396 sg:
1397 if (ctrl & MAL_RX_CTRL_FIRST) {
1398 BUG_ON(dev->rx_sg_skb);
1399 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1400 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1401 ++dev->estats.rx_dropped_oom;
1402 emac_recycle_rx_skb(dev, slot, 0);
1403 } else {
1404 dev->rx_sg_skb = skb;
1405 skb_put(skb, len);
1406 }
1407 } else if (!emac_rx_sg_append(dev, slot) &&
1408 (ctrl & MAL_RX_CTRL_LAST)) {
1409
1410 skb = dev->rx_sg_skb;
1411 dev->rx_sg_skb = NULL;
1412
1413 ctrl &= EMAC_BAD_RX_MASK;
1414 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1415 emac_parse_rx_error(dev, ctrl);
1416 ++dev->estats.rx_dropped_error;
1417 dev_kfree_skb(skb);
1418 len = 0;
1419 } else
1420 goto push_packet;
1421 }
1422 goto skip;
1423 oom:
1424 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1425 /* Drop the packet and recycle skb */
1426 ++dev->estats.rx_dropped_oom;
1427 emac_recycle_rx_skb(dev, slot, 0);
1428 goto next;
1429 }
1430
1431 if (received) {
1432 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1433 dev->rx_slot = slot;
1434 }
1435
1436 if (unlikely(budget && dev->commac.rx_stopped)) {
1437 struct ocp_func_emac_data *emacdata = dev->def->additions;
1438
1439 barrier();
1440 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1441 DBG2("%d: rx restart" NL, dev->def->index);
1442 received = 0;
1443 goto again;
1444 }
1445
1446 if (dev->rx_sg_skb) {
1447 DBG2("%d: dropping partial rx packet" NL,
1448 dev->def->index);
1449 ++dev->estats.rx_dropped_error;
1450 dev_kfree_skb(dev->rx_sg_skb);
1451 dev->rx_sg_skb = NULL;
1452 }
1453
1454 dev->commac.rx_stopped = 0;
1455 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1456 emac_rx_enable(dev);
1457 dev->rx_slot = 0;
1458 }
1459 return received;
1460 }
1461
1462 /* BHs disabled */
1463 static int emac_peek_rx(void *param)
1464 {
1465 struct ocp_enet_private *dev = param;
1466 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1467 }
1468
1469 /* BHs disabled */
1470 static int emac_peek_rx_sg(void *param)
1471 {
1472 struct ocp_enet_private *dev = param;
1473 int slot = dev->rx_slot;
1474 while (1) {
1475 u16 ctrl = dev->rx_desc[slot].ctrl;
1476 if (ctrl & MAL_RX_CTRL_EMPTY)
1477 return 0;
1478 else if (ctrl & MAL_RX_CTRL_LAST)
1479 return 1;
1480
1481 slot = (slot + 1) % NUM_RX_BUFF;
1482
1483 /* I'm just being paranoid here :) */
1484 if (unlikely(slot == dev->rx_slot))
1485 return 0;
1486 }
1487 }
1488
1489 /* Hard IRQ */
1490 static void emac_rxde(void *param)
1491 {
1492 struct ocp_enet_private *dev = param;
1493 ++dev->estats.rx_stopped;
1494 emac_rx_disable_async(dev);
1495 }
1496
1497 /* Hard IRQ */
1498 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs)
1499 {
1500 struct ocp_enet_private *dev = dev_instance;
1501 struct emac_regs *p = dev->emacp;
1502 struct ibm_emac_error_stats *st = &dev->estats;
1503
1504 u32 isr = in_be32(&p->isr);
1505 out_be32(&p->isr, isr);
1506
1507 DBG("%d: isr = %08x" NL, dev->def->index, isr);
1508
1509 if (isr & EMAC_ISR_TXPE)
1510 ++st->tx_parity;
1511 if (isr & EMAC_ISR_RXPE)
1512 ++st->rx_parity;
1513 if (isr & EMAC_ISR_TXUE)
1514 ++st->tx_underrun;
1515 if (isr & EMAC_ISR_RXOE)
1516 ++st->rx_fifo_overrun;
1517 if (isr & EMAC_ISR_OVR)
1518 ++st->rx_overrun;
1519 if (isr & EMAC_ISR_BP)
1520 ++st->rx_bad_packet;
1521 if (isr & EMAC_ISR_RP)
1522 ++st->rx_runt_packet;
1523 if (isr & EMAC_ISR_SE)
1524 ++st->rx_short_event;
1525 if (isr & EMAC_ISR_ALE)
1526 ++st->rx_alignment_error;
1527 if (isr & EMAC_ISR_BFCS)
1528 ++st->rx_bad_fcs;
1529 if (isr & EMAC_ISR_PTLE)
1530 ++st->rx_packet_too_long;
1531 if (isr & EMAC_ISR_ORE)
1532 ++st->rx_out_of_range;
1533 if (isr & EMAC_ISR_IRE)
1534 ++st->rx_in_range;
1535 if (isr & EMAC_ISR_SQE)
1536 ++st->tx_sqe;
1537 if (isr & EMAC_ISR_TE)
1538 ++st->tx_errors;
1539
1540 return IRQ_HANDLED;
1541 }
1542
1543 static struct net_device_stats *emac_stats(struct net_device *ndev)
1544 {
1545 struct ocp_enet_private *dev = ndev->priv;
1546 struct ibm_emac_stats *st = &dev->stats;
1547 struct ibm_emac_error_stats *est = &dev->estats;
1548 struct net_device_stats *nst = &dev->nstats;
1549
1550 DBG2("%d: stats" NL, dev->def->index);
1551
1552 /* Compute "legacy" statistics */
1553 local_irq_disable();
1554 nst->rx_packets = (unsigned long)st->rx_packets;
1555 nst->rx_bytes = (unsigned long)st->rx_bytes;
1556 nst->tx_packets = (unsigned long)st->tx_packets;
1557 nst->tx_bytes = (unsigned long)st->tx_bytes;
1558 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1559 est->rx_dropped_error +
1560 est->rx_dropped_resize +
1561 est->rx_dropped_mtu);
1562 nst->tx_dropped = (unsigned long)est->tx_dropped;
1563
1564 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1565 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1566 est->rx_fifo_overrun +
1567 est->rx_overrun);
1568 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1569 est->rx_alignment_error);
1570 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1571 est->rx_bad_fcs);
1572 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1573 est->rx_bd_short_event +
1574 est->rx_bd_packet_too_long +
1575 est->rx_bd_out_of_range +
1576 est->rx_bd_in_range +
1577 est->rx_runt_packet +
1578 est->rx_short_event +
1579 est->rx_packet_too_long +
1580 est->rx_out_of_range +
1581 est->rx_in_range);
1582
1583 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1584 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1585 est->tx_underrun);
1586 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1587 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1588 est->tx_bd_excessive_collisions +
1589 est->tx_bd_late_collision +
1590 est->tx_bd_multple_collisions);
1591 local_irq_enable();
1592 return nst;
1593 }
1594
1595 static void emac_remove(struct ocp_device *ocpdev)
1596 {
1597 struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1598
1599 DBG("%d: remove" NL, dev->def->index);
1600
1601 ocp_set_drvdata(ocpdev, 0);
1602 unregister_netdev(dev->ndev);
1603
1604 tah_fini(dev->tah_dev);
1605 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1606 zmii_fini(dev->zmii_dev, dev->zmii_input);
1607
1608 emac_dbg_register(dev->def->index, 0);
1609
1610 mal_unregister_commac(dev->mal, &dev->commac);
1611 iounmap((void *)dev->emacp);
1612 kfree(dev->ndev);
1613 }
1614
1615 static struct mal_commac_ops emac_commac_ops = {
1616 .poll_tx = &emac_poll_tx,
1617 .poll_rx = &emac_poll_rx,
1618 .peek_rx = &emac_peek_rx,
1619 .rxde = &emac_rxde,
1620 };
1621
1622 static struct mal_commac_ops emac_commac_sg_ops = {
1623 .poll_tx = &emac_poll_tx,
1624 .poll_rx = &emac_poll_rx,
1625 .peek_rx = &emac_peek_rx_sg,
1626 .rxde = &emac_rxde,
1627 };
1628
1629 /* Ethtool support */
1630 static int emac_ethtool_get_settings(struct net_device *ndev,
1631 struct ethtool_cmd *cmd)
1632 {
1633 struct ocp_enet_private *dev = ndev->priv;
1634
1635 cmd->supported = dev->phy.features;
1636 cmd->port = PORT_MII;
1637 cmd->phy_address = dev->phy.address;
1638 cmd->transceiver =
1639 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1640
1641 local_bh_disable();
1642 cmd->advertising = dev->phy.advertising;
1643 cmd->autoneg = dev->phy.autoneg;
1644 cmd->speed = dev->phy.speed;
1645 cmd->duplex = dev->phy.duplex;
1646 local_bh_enable();
1647
1648 return 0;
1649 }
1650
1651 static int emac_ethtool_set_settings(struct net_device *ndev,
1652 struct ethtool_cmd *cmd)
1653 {
1654 struct ocp_enet_private *dev = ndev->priv;
1655 u32 f = dev->phy.features;
1656
1657 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1658 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1659
1660 /* Basic sanity checks */
1661 if (dev->phy.address < 0)
1662 return -EOPNOTSUPP;
1663 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1664 return -EINVAL;
1665 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1666 return -EINVAL;
1667 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1668 return -EINVAL;
1669
1670 if (cmd->autoneg == AUTONEG_DISABLE) {
1671 switch (cmd->speed) {
1672 case SPEED_10:
1673 if (cmd->duplex == DUPLEX_HALF
1674 && !(f & SUPPORTED_10baseT_Half))
1675 return -EINVAL;
1676 if (cmd->duplex == DUPLEX_FULL
1677 && !(f & SUPPORTED_10baseT_Full))
1678 return -EINVAL;
1679 break;
1680 case SPEED_100:
1681 if (cmd->duplex == DUPLEX_HALF
1682 && !(f & SUPPORTED_100baseT_Half))
1683 return -EINVAL;
1684 if (cmd->duplex == DUPLEX_FULL
1685 && !(f & SUPPORTED_100baseT_Full))
1686 return -EINVAL;
1687 break;
1688 case SPEED_1000:
1689 if (cmd->duplex == DUPLEX_HALF
1690 && !(f & SUPPORTED_1000baseT_Half))
1691 return -EINVAL;
1692 if (cmd->duplex == DUPLEX_FULL
1693 && !(f & SUPPORTED_1000baseT_Full))
1694 return -EINVAL;
1695 break;
1696 default:
1697 return -EINVAL;
1698 }
1699
1700 local_bh_disable();
1701 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1702 cmd->duplex);
1703
1704 } else {
1705 if (!(f & SUPPORTED_Autoneg))
1706 return -EINVAL;
1707
1708 local_bh_disable();
1709 dev->phy.def->ops->setup_aneg(&dev->phy,
1710 (cmd->advertising & f) |
1711 (dev->phy.advertising &
1712 (ADVERTISED_Pause |
1713 ADVERTISED_Asym_Pause)));
1714 }
1715 emac_force_link_update(dev);
1716 local_bh_enable();
1717
1718 return 0;
1719 }
1720
1721 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1722 struct ethtool_ringparam *rp)
1723 {
1724 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1725 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1726 }
1727
1728 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1729 struct ethtool_pauseparam *pp)
1730 {
1731 struct ocp_enet_private *dev = ndev->priv;
1732
1733 local_bh_disable();
1734 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1735 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1736 pp->autoneg = 1;
1737
1738 if (dev->phy.duplex == DUPLEX_FULL) {
1739 if (dev->phy.pause)
1740 pp->rx_pause = pp->tx_pause = 1;
1741 else if (dev->phy.asym_pause)
1742 pp->tx_pause = 1;
1743 }
1744 local_bh_enable();
1745 }
1746
1747 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1748 {
1749 struct ocp_enet_private *dev = ndev->priv;
1750 return dev->tah_dev != 0;
1751 }
1752
1753 static int emac_get_regs_len(struct ocp_enet_private *dev)
1754 {
1755 return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1756 }
1757
1758 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1759 {
1760 struct ocp_enet_private *dev = ndev->priv;
1761 return sizeof(struct emac_ethtool_regs_hdr) +
1762 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1763 zmii_get_regs_len(dev->zmii_dev) +
1764 rgmii_get_regs_len(dev->rgmii_dev) +
1765 tah_get_regs_len(dev->tah_dev);
1766 }
1767
1768 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1769 {
1770 struct emac_ethtool_regs_subhdr *hdr = buf;
1771
1772 hdr->version = EMAC_ETHTOOL_REGS_VER;
1773 hdr->index = dev->def->index;
1774 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1775 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1776 }
1777
1778 static void emac_ethtool_get_regs(struct net_device *ndev,
1779 struct ethtool_regs *regs, void *buf)
1780 {
1781 struct ocp_enet_private *dev = ndev->priv;
1782 struct emac_ethtool_regs_hdr *hdr = buf;
1783
1784 hdr->components = 0;
1785 buf = hdr + 1;
1786
1787 local_irq_disable();
1788 buf = mal_dump_regs(dev->mal, buf);
1789 buf = emac_dump_regs(dev, buf);
1790 if (dev->zmii_dev) {
1791 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1792 buf = zmii_dump_regs(dev->zmii_dev, buf);
1793 }
1794 if (dev->rgmii_dev) {
1795 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1796 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1797 }
1798 if (dev->tah_dev) {
1799 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1800 buf = tah_dump_regs(dev->tah_dev, buf);
1801 }
1802 local_irq_enable();
1803 }
1804
1805 static int emac_ethtool_nway_reset(struct net_device *ndev)
1806 {
1807 struct ocp_enet_private *dev = ndev->priv;
1808 int res = 0;
1809
1810 DBG("%d: nway_reset" NL, dev->def->index);
1811
1812 if (dev->phy.address < 0)
1813 return -EOPNOTSUPP;
1814
1815 local_bh_disable();
1816 if (!dev->phy.autoneg) {
1817 res = -EINVAL;
1818 goto out;
1819 }
1820
1821 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1822 emac_force_link_update(dev);
1823
1824 out:
1825 local_bh_enable();
1826 return res;
1827 }
1828
1829 static int emac_ethtool_get_stats_count(struct net_device *ndev)
1830 {
1831 return EMAC_ETHTOOL_STATS_COUNT;
1832 }
1833
1834 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1835 u8 * buf)
1836 {
1837 if (stringset == ETH_SS_STATS)
1838 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1839 }
1840
1841 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1842 struct ethtool_stats *estats,
1843 u64 * tmp_stats)
1844 {
1845 struct ocp_enet_private *dev = ndev->priv;
1846 local_irq_disable();
1847 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1848 tmp_stats += sizeof(dev->stats) / sizeof(u64);
1849 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1850 local_irq_enable();
1851 }
1852
1853 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1854 struct ethtool_drvinfo *info)
1855 {
1856 struct ocp_enet_private *dev = ndev->priv;
1857
1858 strcpy(info->driver, "ibm_emac");
1859 strcpy(info->version, DRV_VERSION);
1860 info->fw_version[0] = '\0';
1861 sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1862 info->n_stats = emac_ethtool_get_stats_count(ndev);
1863 info->regdump_len = emac_ethtool_get_regs_len(ndev);
1864 }
1865
1866 static struct ethtool_ops emac_ethtool_ops = {
1867 .get_settings = emac_ethtool_get_settings,
1868 .set_settings = emac_ethtool_set_settings,
1869 .get_drvinfo = emac_ethtool_get_drvinfo,
1870
1871 .get_regs_len = emac_ethtool_get_regs_len,
1872 .get_regs = emac_ethtool_get_regs,
1873
1874 .nway_reset = emac_ethtool_nway_reset,
1875
1876 .get_ringparam = emac_ethtool_get_ringparam,
1877 .get_pauseparam = emac_ethtool_get_pauseparam,
1878
1879 .get_rx_csum = emac_ethtool_get_rx_csum,
1880
1881 .get_strings = emac_ethtool_get_strings,
1882 .get_stats_count = emac_ethtool_get_stats_count,
1883 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1884
1885 .get_link = ethtool_op_get_link,
1886 .get_tx_csum = ethtool_op_get_tx_csum,
1887 .get_sg = ethtool_op_get_sg,
1888 };
1889
1890 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1891 {
1892 struct ocp_enet_private *dev = ndev->priv;
1893 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1894
1895 DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1896
1897 if (dev->phy.address < 0)
1898 return -EOPNOTSUPP;
1899
1900 switch (cmd) {
1901 case SIOCGMIIPHY:
1902 case SIOCDEVPRIVATE:
1903 data[0] = dev->phy.address;
1904 /* Fall through */
1905 case SIOCGMIIREG:
1906 case SIOCDEVPRIVATE + 1:
1907 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1908 return 0;
1909
1910 case SIOCSMIIREG:
1911 case SIOCDEVPRIVATE + 2:
1912 if (!capable(CAP_NET_ADMIN))
1913 return -EPERM;
1914 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1915 return 0;
1916 default:
1917 return -EOPNOTSUPP;
1918 }
1919 }
1920
1921 static int __init emac_probe(struct ocp_device *ocpdev)
1922 {
1923 struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1924 struct net_device *ndev;
1925 struct ocp_device *maldev;
1926 struct ocp_enet_private *dev;
1927 int err, i;
1928
1929 DBG("%d: probe" NL, ocpdev->def->index);
1930
1931 if (!emacdata) {
1932 printk(KERN_ERR "emac%d: Missing additional data!\n",
1933 ocpdev->def->index);
1934 return -ENODEV;
1935 }
1936
1937 /* Allocate our net_device structure */
1938 ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1939 if (!ndev) {
1940 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1941 ocpdev->def->index);
1942 return -ENOMEM;
1943 }
1944 dev = ndev->priv;
1945 dev->ndev = ndev;
1946 dev->ldev = &ocpdev->dev;
1947 dev->def = ocpdev->def;
1948 SET_MODULE_OWNER(ndev);
1949
1950 /* Find MAL device we are connected to */
1951 maldev =
1952 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1953 if (!maldev) {
1954 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1955 dev->def->index, emacdata->mal_idx);
1956 err = -ENODEV;
1957 goto out;
1958 }
1959 dev->mal = ocp_get_drvdata(maldev);
1960 if (!dev->mal) {
1961 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1962 dev->def->index, emacdata->mal_idx);
1963 err = -ENODEV;
1964 goto out;
1965 }
1966
1967 /* Register with MAL */
1968 dev->commac.ops = &emac_commac_ops;
1969 dev->commac.dev = dev;
1970 dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1971 dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1972 err = mal_register_commac(dev->mal, &dev->commac);
1973 if (err) {
1974 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1975 dev->def->index, emacdata->mal_idx);
1976 goto out;
1977 }
1978 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1979 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1980
1981 /* Get pointers to BD rings */
1982 dev->tx_desc =
1983 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
1984 emacdata->mal_tx_chan);
1985 dev->rx_desc =
1986 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
1987 emacdata->mal_rx_chan);
1988
1989 DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
1990 DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
1991
1992 /* Clean rings */
1993 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
1994 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
1995
1996 /* If we depend on another EMAC for MDIO, check whether it was probed already */
1997 if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
1998 struct ocp_device *mdiodev =
1999 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2000 emacdata->mdio_idx);
2001 if (!mdiodev) {
2002 printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2003 dev->def->index, emacdata->mdio_idx);
2004 err = -ENODEV;
2005 goto out2;
2006 }
2007 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2008 if (!dev->mdio_dev) {
2009 printk(KERN_ERR
2010 "emac%d: emac%d hasn't been initialized yet!\n",
2011 dev->def->index, emacdata->mdio_idx);
2012 err = -ENODEV;
2013 goto out2;
2014 }
2015 }
2016
2017 /* Attach to ZMII, if needed */
2018 if ((err = zmii_attach(dev)) != 0)
2019 goto out2;
2020
2021 /* Attach to RGMII, if needed */
2022 if ((err = rgmii_attach(dev)) != 0)
2023 goto out3;
2024
2025 /* Attach to TAH, if needed */
2026 if ((err = tah_attach(dev)) != 0)
2027 goto out4;
2028
2029 /* Map EMAC regs */
2030 dev->emacp =
2031 (struct emac_regs *)ioremap(dev->def->paddr,
2032 sizeof(struct emac_regs));
2033 if (!dev->emacp) {
2034 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2035 dev->def->index);
2036 err = -ENOMEM;
2037 goto out5;
2038 }
2039
2040 /* Fill in MAC address */
2041 for (i = 0; i < 6; ++i)
2042 ndev->dev_addr[i] = emacdata->mac_addr[i];
2043
2044 /* Set some link defaults before we can find out real parameters */
2045 dev->phy.speed = SPEED_100;
2046 dev->phy.duplex = DUPLEX_FULL;
2047 dev->phy.autoneg = AUTONEG_DISABLE;
2048 dev->phy.pause = dev->phy.asym_pause = 0;
2049 init_timer(&dev->link_timer);
2050 dev->link_timer.function = emac_link_timer;
2051 dev->link_timer.data = (unsigned long)dev;
2052
2053 /* Find PHY if any */
2054 dev->phy.dev = ndev;
2055 dev->phy.mode = emacdata->phy_mode;
2056 if (emacdata->phy_map != 0xffffffff) {
2057 u32 phy_map = emacdata->phy_map | busy_phy_map;
2058 u32 adv;
2059
2060 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2061 emacdata->phy_map, busy_phy_map);
2062
2063 EMAC_RX_CLK_TX(dev->def->index);
2064
2065 dev->phy.mdio_read = emac_mdio_read;
2066 dev->phy.mdio_write = emac_mdio_write;
2067
2068 /* Configure EMAC with defaults so we can at least use MDIO
2069 * This is needed mostly for 440GX
2070 */
2071 if (emac_phy_gpcs(dev->phy.mode)) {
2072 /* XXX
2073 * Make GPCS PHY address equal to EMAC index.
2074 * We probably should take into account busy_phy_map
2075 * and/or phy_map here.
2076 */
2077 dev->phy.address = dev->def->index;
2078 }
2079
2080 emac_configure(dev);
2081
2082 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2083 if (!(phy_map & 1)) {
2084 int r;
2085 busy_phy_map |= 1 << i;
2086
2087 /* Quick check if there is a PHY at the address */
2088 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2089 if (r == 0xffff || r < 0)
2090 continue;
2091 if (!mii_phy_probe(&dev->phy, i))
2092 break;
2093 }
2094 if (i == 0x20) {
2095 printk(KERN_WARNING "emac%d: can't find PHY!\n",
2096 dev->def->index);
2097 goto out6;
2098 }
2099
2100 /* Init PHY */
2101 if (dev->phy.def->ops->init)
2102 dev->phy.def->ops->init(&dev->phy);
2103
2104 /* Disable any PHY features not supported by the platform */
2105 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2106
2107 /* Setup initial link parameters */
2108 if (dev->phy.features & SUPPORTED_Autoneg) {
2109 adv = dev->phy.features;
2110 #if !defined(CONFIG_40x)
2111 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2112 #endif
2113 /* Restart autonegotiation */
2114 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2115 } else {
2116 u32 f = dev->phy.def->features;
2117 int speed = SPEED_10, fd = DUPLEX_HALF;
2118
2119 /* Select highest supported speed/duplex */
2120 if (f & SUPPORTED_1000baseT_Full) {
2121 speed = SPEED_1000;
2122 fd = DUPLEX_FULL;
2123 } else if (f & SUPPORTED_1000baseT_Half)
2124 speed = SPEED_1000;
2125 else if (f & SUPPORTED_100baseT_Full) {
2126 speed = SPEED_100;
2127 fd = DUPLEX_FULL;
2128 } else if (f & SUPPORTED_100baseT_Half)
2129 speed = SPEED_100;
2130 else if (f & SUPPORTED_10baseT_Full)
2131 fd = DUPLEX_FULL;
2132
2133 /* Force link parameters */
2134 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2135 }
2136 } else {
2137 emac_reset(dev);
2138
2139 /* PHY-less configuration.
2140 * XXX I probably should move these settings to emacdata
2141 */
2142 dev->phy.address = -1;
2143 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2144 dev->phy.pause = 1;
2145 }
2146
2147 /* Fill in the driver function table */
2148 ndev->open = &emac_open;
2149 if (dev->tah_dev) {
2150 ndev->hard_start_xmit = &emac_start_xmit_sg;
2151 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2152 } else
2153 ndev->hard_start_xmit = &emac_start_xmit;
2154 ndev->tx_timeout = &emac_full_tx_reset;
2155 ndev->watchdog_timeo = 5 * HZ;
2156 ndev->stop = &emac_close;
2157 ndev->get_stats = &emac_stats;
2158 ndev->set_multicast_list = &emac_set_multicast_list;
2159 ndev->do_ioctl = &emac_ioctl;
2160 if (emac_phy_supports_gige(emacdata->phy_mode)) {
2161 ndev->change_mtu = &emac_change_mtu;
2162 dev->commac.ops = &emac_commac_sg_ops;
2163 }
2164 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2165
2166 netif_carrier_off(ndev);
2167 netif_stop_queue(ndev);
2168
2169 err = register_netdev(ndev);
2170 if (err) {
2171 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2172 dev->def->index, err);
2173 goto out6;
2174 }
2175
2176 ocp_set_drvdata(ocpdev, dev);
2177
2178 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2179 ndev->name, dev->def->index,
2180 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2181 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2182
2183 if (dev->phy.address >= 0)
2184 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2185 dev->phy.def->name, dev->phy.address);
2186
2187 emac_dbg_register(dev->def->index, dev);
2188
2189 return 0;
2190 out6:
2191 iounmap((void *)dev->emacp);
2192 out5:
2193 tah_fini(dev->tah_dev);
2194 out4:
2195 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2196 out3:
2197 zmii_fini(dev->zmii_dev, dev->zmii_input);
2198 out2:
2199 mal_unregister_commac(dev->mal, &dev->commac);
2200 out:
2201 kfree(ndev);
2202 return err;
2203 }
2204
2205 static struct ocp_device_id emac_ids[] = {
2206 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2207 { .vendor = OCP_VENDOR_INVALID}
2208 };
2209
2210 static struct ocp_driver emac_driver = {
2211 .name = "emac",
2212 .id_table = emac_ids,
2213 .probe = emac_probe,
2214 .remove = emac_remove,
2215 };
2216
2217 static int __init emac_init(void)
2218 {
2219 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2220
2221 DBG(": init" NL);
2222
2223 if (mal_init())
2224 return -ENODEV;
2225
2226 EMAC_CLK_INTERNAL;
2227 if (ocp_register_driver(&emac_driver)) {
2228 EMAC_CLK_EXTERNAL;
2229 ocp_unregister_driver(&emac_driver);
2230 mal_exit();
2231 return -ENODEV;
2232 }
2233 EMAC_CLK_EXTERNAL;
2234
2235 emac_init_debug();
2236 return 0;
2237 }
2238
2239 static void __exit emac_exit(void)
2240 {
2241 DBG(": exit" NL);
2242 ocp_unregister_driver(&emac_driver);
2243 mal_exit();
2244 emac_fini_debug();
2245 }
2246
2247 module_init(emac_init);
2248 module_exit(emac_exit);
This page took 0.078114 seconds and 5 git commands to generate.