2 * drivers/net/ibm_emac/ibm_emac_core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
22 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/string.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
41 #include <asm/processor.h>
44 #include <asm/uaccess.h>
47 #include "ibm_emac_core.h"
48 #include "ibm_emac_debug.h"
51 * Lack of dma_unmap_???? calls is intentional.
53 * API-correct usage requires additional support state information to be
54 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
55 * EMAC design (e.g. TX buffer passed from network stack can be split into
56 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
57 * maintaining such information will add additional overhead.
58 * Current DMA API implementation for 4xx processors only ensures cache coherency
59 * and dma_unmap_???? routines are empty and are likely to stay this way.
60 * I decided to omit dma_unmap_??? calls because I don't want to add additional
61 * complexity just for the sake of following some abstract API, when it doesn't
62 * add any real benefit to the driver. I understand that this decision maybe
63 * controversial, but I really tried to make code API-correct and efficient
64 * at the same time and didn't come up with code I liked :(. --ebs
67 #define DRV_NAME "emac"
68 #define DRV_VERSION "3.53"
69 #define DRV_DESC "PPC 4xx OCP EMAC driver"
71 MODULE_DESCRIPTION(DRV_DESC
);
73 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
74 MODULE_LICENSE("GPL");
76 /* minimum number of free TX descriptors required to wake up TX process */
77 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
79 /* If packet size is less than this number, we allocate small skb and copy packet
80 * contents into it instead of just sending original big skb up
82 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
84 /* Since multiple EMACs share MDIO lines in various ways, we need
85 * to avoid re-using the same PHY ID in cases where the arch didn't
86 * setup precise phy_map entries
88 static u32 busy_phy_map
;
90 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && (defined(CONFIG_405EP) || defined(CONFIG_440EP))
91 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
92 * with PHY RX clock problem.
93 * 440EP has more sane SDR0_MFR register implementation than 440GX, which
94 * also allows controlling each EMAC clock
96 static inline void EMAC_RX_CLK_TX(int idx
)
99 local_irq_save(flags
);
101 #if defined(CONFIG_405EP)
102 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx
));
103 #else /* CONFIG_440EP */
104 SDR_WRITE(DCRN_SDR_MFR
, SDR_READ(DCRN_SDR_MFR
) | (0x08000000 >> idx
));
107 local_irq_restore(flags
);
110 static inline void EMAC_RX_CLK_DEFAULT(int idx
)
113 local_irq_save(flags
);
115 #if defined(CONFIG_405EP)
116 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx
));
117 #else /* CONFIG_440EP */
118 SDR_WRITE(DCRN_SDR_MFR
, SDR_READ(DCRN_SDR_MFR
) & ~(0x08000000 >> idx
));
121 local_irq_restore(flags
);
124 #define EMAC_RX_CLK_TX(idx) ((void)0)
125 #define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
128 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
129 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
130 * unfortunately this is less flexible than 440EP case, because it's a global
131 * setting for all EMACs, therefore we do this clock trick only during probe.
133 #define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
134 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
135 #define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
136 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
138 #define EMAC_CLK_INTERNAL ((void)0)
139 #define EMAC_CLK_EXTERNAL ((void)0)
142 /* I don't want to litter system log with timeout errors
143 * when we have brain-damaged PHY.
145 static inline void emac_report_timeout_error(struct ocp_enet_private
*dev
,
148 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
149 DBG("%d: %s" NL
, dev
->def
->index
, error
);
152 printk(KERN_ERR
"emac%d: %s\n", dev
->def
->index
, error
);
156 /* PHY polling intervals */
157 #define PHY_POLL_LINK_ON HZ
158 #define PHY_POLL_LINK_OFF (HZ / 5)
160 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
161 static const char emac_stats_keys
[EMAC_ETHTOOL_STATS_COUNT
][ETH_GSTRING_LEN
] = {
162 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
163 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
164 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
165 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
166 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
167 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
168 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
169 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
170 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
171 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
172 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
173 "tx_bd_excessive_collisions", "tx_bd_late_collision",
174 "tx_bd_multple_collisions", "tx_bd_single_collision",
175 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
179 static irqreturn_t
emac_irq(int irq
, void *dev_instance
, struct pt_regs
*regs
);
180 static void emac_clean_tx_ring(struct ocp_enet_private
*dev
);
182 static inline int emac_phy_supports_gige(int phy_mode
)
184 return phy_mode
== PHY_MODE_GMII
||
185 phy_mode
== PHY_MODE_RGMII
||
186 phy_mode
== PHY_MODE_TBI
||
187 phy_mode
== PHY_MODE_RTBI
;
190 static inline int emac_phy_gpcs(int phy_mode
)
192 return phy_mode
== PHY_MODE_TBI
||
193 phy_mode
== PHY_MODE_RTBI
;
196 static inline void emac_tx_enable(struct ocp_enet_private
*dev
)
198 struct emac_regs
*p
= dev
->emacp
;
202 local_irq_save(flags
);
204 DBG("%d: tx_enable" NL
, dev
->def
->index
);
206 r
= in_be32(&p
->mr0
);
207 if (!(r
& EMAC_MR0_TXE
))
208 out_be32(&p
->mr0
, r
| EMAC_MR0_TXE
);
209 local_irq_restore(flags
);
212 static void emac_tx_disable(struct ocp_enet_private
*dev
)
214 struct emac_regs
*p
= dev
->emacp
;
218 local_irq_save(flags
);
220 DBG("%d: tx_disable" NL
, dev
->def
->index
);
222 r
= in_be32(&p
->mr0
);
223 if (r
& EMAC_MR0_TXE
) {
225 out_be32(&p
->mr0
, r
& ~EMAC_MR0_TXE
);
226 while (!(in_be32(&p
->mr0
) & EMAC_MR0_TXI
) && n
)
229 emac_report_timeout_error(dev
, "TX disable timeout");
231 local_irq_restore(flags
);
234 static void emac_rx_enable(struct ocp_enet_private
*dev
)
236 struct emac_regs
*p
= dev
->emacp
;
240 local_irq_save(flags
);
241 if (unlikely(dev
->commac
.rx_stopped
))
244 DBG("%d: rx_enable" NL
, dev
->def
->index
);
246 r
= in_be32(&p
->mr0
);
247 if (!(r
& EMAC_MR0_RXE
)) {
248 if (unlikely(!(r
& EMAC_MR0_RXI
))) {
249 /* Wait if previous async disable is still in progress */
251 while (!(r
= in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
)
254 emac_report_timeout_error(dev
,
255 "RX disable timeout");
257 out_be32(&p
->mr0
, r
| EMAC_MR0_RXE
);
260 local_irq_restore(flags
);
263 static void emac_rx_disable(struct ocp_enet_private
*dev
)
265 struct emac_regs
*p
= dev
->emacp
;
269 local_irq_save(flags
);
271 DBG("%d: rx_disable" NL
, dev
->def
->index
);
273 r
= in_be32(&p
->mr0
);
274 if (r
& EMAC_MR0_RXE
) {
276 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
277 while (!(in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
)
280 emac_report_timeout_error(dev
, "RX disable timeout");
282 local_irq_restore(flags
);
285 static inline void emac_rx_disable_async(struct ocp_enet_private
*dev
)
287 struct emac_regs
*p
= dev
->emacp
;
291 local_irq_save(flags
);
293 DBG("%d: rx_disable_async" NL
, dev
->def
->index
);
295 r
= in_be32(&p
->mr0
);
296 if (r
& EMAC_MR0_RXE
)
297 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
298 local_irq_restore(flags
);
301 static int emac_reset(struct ocp_enet_private
*dev
)
303 struct emac_regs
*p
= dev
->emacp
;
307 DBG("%d: reset" NL
, dev
->def
->index
);
309 local_irq_save(flags
);
311 if (!dev
->reset_failed
) {
312 /* 40x erratum suggests stopping RX channel before reset,
315 emac_rx_disable(dev
);
316 emac_tx_disable(dev
);
319 out_be32(&p
->mr0
, EMAC_MR0_SRST
);
320 while ((in_be32(&p
->mr0
) & EMAC_MR0_SRST
) && n
)
322 local_irq_restore(flags
);
325 dev
->reset_failed
= 0;
328 emac_report_timeout_error(dev
, "reset timeout");
329 dev
->reset_failed
= 1;
334 static void emac_hash_mc(struct ocp_enet_private
*dev
)
336 struct emac_regs
*p
= dev
->emacp
;
338 struct dev_mc_list
*dmi
;
340 DBG("%d: hash_mc %d" NL
, dev
->def
->index
, dev
->ndev
->mc_count
);
342 for (dmi
= dev
->ndev
->mc_list
; dmi
; dmi
= dmi
->next
) {
344 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL
,
346 dmi
->dmi_addr
[0], dmi
->dmi_addr
[1], dmi
->dmi_addr
[2],
347 dmi
->dmi_addr
[3], dmi
->dmi_addr
[4], dmi
->dmi_addr
[5]);
349 bit
= 63 - (ether_crc(ETH_ALEN
, dmi
->dmi_addr
) >> 26);
350 gaht
[bit
>> 4] |= 0x8000 >> (bit
& 0x0f);
352 out_be32(&p
->gaht1
, gaht
[0]);
353 out_be32(&p
->gaht2
, gaht
[1]);
354 out_be32(&p
->gaht3
, gaht
[2]);
355 out_be32(&p
->gaht4
, gaht
[3]);
358 static inline u32
emac_iff2rmr(struct net_device
*ndev
)
360 u32 r
= EMAC_RMR_SP
| EMAC_RMR_SFCS
| EMAC_RMR_IAE
| EMAC_RMR_BAE
|
363 if (ndev
->flags
& IFF_PROMISC
)
365 else if (ndev
->flags
& IFF_ALLMULTI
|| ndev
->mc_count
> 32)
367 else if (ndev
->mc_count
> 0)
373 static inline int emac_opb_mhz(void)
375 return (ocp_sys_info
.opb_bus_freq
+ 500000) / 1000000;
379 static int emac_configure(struct ocp_enet_private
*dev
)
381 struct emac_regs
*p
= dev
->emacp
;
382 struct net_device
*ndev
= dev
->ndev
;
386 DBG("%d: configure" NL
, dev
->def
->index
);
388 if (emac_reset(dev
) < 0)
391 tah_reset(dev
->tah_dev
);
394 r
= EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE
| EMAC_MR1_IST
;
395 if (dev
->phy
.duplex
== DUPLEX_FULL
)
397 switch (dev
->phy
.speed
) {
399 if (emac_phy_gpcs(dev
->phy
.mode
)) {
400 r
|= EMAC_MR1_MF_1000GPCS
|
401 EMAC_MR1_MF_IPPA(dev
->phy
.address
);
403 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
404 * identify this GPCS PHY later.
406 out_be32(&p
->ipcr
, 0xdeadbeef);
408 r
|= EMAC_MR1_MF_1000
;
409 r
|= EMAC_MR1_RFS_16K
;
412 if (dev
->ndev
->mtu
> ETH_DATA_LEN
)
416 r
|= EMAC_MR1_MF_100
;
419 r
|= EMAC_MR1_RFS_4K
;
425 rgmii_set_speed(dev
->rgmii_dev
, dev
->rgmii_input
,
428 zmii_set_speed(dev
->zmii_dev
, dev
->zmii_input
, dev
->phy
.speed
);
430 #if !defined(CONFIG_40x)
431 /* on 40x erratum forces us to NOT use integrated flow control,
432 * let's hope it works on 44x ;)
434 if (dev
->phy
.duplex
== DUPLEX_FULL
) {
436 r
|= EMAC_MR1_EIFC
| EMAC_MR1_APP
;
437 else if (dev
->phy
.asym_pause
)
441 out_be32(&p
->mr1
, r
);
443 /* Set individual MAC address */
444 out_be32(&p
->iahr
, (ndev
->dev_addr
[0] << 8) | ndev
->dev_addr
[1]);
445 out_be32(&p
->ialr
, (ndev
->dev_addr
[2] << 24) |
446 (ndev
->dev_addr
[3] << 16) | (ndev
->dev_addr
[4] << 8) |
449 /* VLAN Tag Protocol ID */
450 out_be32(&p
->vtpid
, 0x8100);
452 /* Receive mode register */
453 r
= emac_iff2rmr(ndev
);
454 if (r
& EMAC_RMR_MAE
)
456 out_be32(&p
->rmr
, r
);
458 /* FIFOs thresholds */
459 r
= EMAC_TMR1((EMAC_MAL_BURST_SIZE
/ EMAC_FIFO_ENTRY_SIZE
) + 1,
460 EMAC_TX_FIFO_SIZE
/ 2 / EMAC_FIFO_ENTRY_SIZE
);
461 out_be32(&p
->tmr1
, r
);
462 out_be32(&p
->trtr
, EMAC_TRTR(EMAC_TX_FIFO_SIZE
/ 2));
464 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
465 there should be still enough space in FIFO to allow the our link
466 partner time to process this frame and also time to send PAUSE
469 Here is the worst case scenario for the RX FIFO "headroom"
470 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
472 1) One maximum-length frame on TX 1522 bytes
473 2) One PAUSE frame time 64 bytes
474 3) PAUSE frame decode time allowance 64 bytes
475 4) One maximum-length frame on RX 1522 bytes
476 5) Round-trip propagation delay of the link (100Mb) 15 bytes
480 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
481 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
483 r
= EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige
) / 8 / EMAC_FIFO_ENTRY_SIZE
,
484 EMAC_RX_FIFO_SIZE(gige
) / 4 / EMAC_FIFO_ENTRY_SIZE
);
485 out_be32(&p
->rwmr
, r
);
487 /* Set PAUSE timer to the maximum */
488 out_be32(&p
->ptr
, 0xffff);
491 out_be32(&p
->iser
, EMAC_ISR_TXPE
| EMAC_ISR_RXPE
| /* EMAC_ISR_TXUE |
492 EMAC_ISR_RXOE | */ EMAC_ISR_OVR
| EMAC_ISR_BP
| EMAC_ISR_SE
|
493 EMAC_ISR_ALE
| EMAC_ISR_BFCS
| EMAC_ISR_PTLE
| EMAC_ISR_ORE
|
494 EMAC_ISR_IRE
| EMAC_ISR_TE
);
496 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
497 if (emac_phy_gpcs(dev
->phy
.mode
))
498 mii_reset_phy(&dev
->phy
);
504 static void emac_reinitialize(struct ocp_enet_private
*dev
)
506 DBG("%d: reinitialize" NL
, dev
->def
->index
);
508 if (!emac_configure(dev
)) {
515 static void emac_full_tx_reset(struct net_device
*ndev
)
517 struct ocp_enet_private
*dev
= ndev
->priv
;
518 struct ocp_func_emac_data
*emacdata
= dev
->def
->additions
;
520 DBG("%d: full_tx_reset" NL
, dev
->def
->index
);
522 emac_tx_disable(dev
);
523 mal_disable_tx_channel(dev
->mal
, emacdata
->mal_tx_chan
);
524 emac_clean_tx_ring(dev
);
525 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= 0;
529 mal_enable_tx_channel(dev
->mal
, emacdata
->mal_tx_chan
);
533 netif_wake_queue(ndev
);
536 static int __emac_mdio_read(struct ocp_enet_private
*dev
, u8 id
, u8 reg
)
538 struct emac_regs
*p
= dev
->emacp
;
542 DBG2("%d: mdio_read(%02x,%02x)" NL
, dev
->def
->index
, id
, reg
);
544 /* Enable proper MDIO port */
545 zmii_enable_mdio(dev
->zmii_dev
, dev
->zmii_input
);
547 /* Wait for management interface to become idle */
549 while (!(in_be32(&p
->stacr
) & EMAC_STACR_OC
)) {
555 /* Issue read command */
557 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ
|
558 (reg
& EMAC_STACR_PRA_MASK
)
559 | ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
));
561 /* Wait for read to complete */
563 while (!((r
= in_be32(&p
->stacr
)) & EMAC_STACR_OC
)) {
569 if (unlikely(r
& EMAC_STACR_PHYE
)) {
570 DBG("%d: mdio_read(%02x, %02x) failed" NL
, dev
->def
->index
,
575 r
= ((r
>> EMAC_STACR_PHYD_SHIFT
) & EMAC_STACR_PHYD_MASK
);
576 DBG2("%d: mdio_read -> %04x" NL
, dev
->def
->index
, r
);
579 DBG("%d: MII management interface timeout (read)" NL
, dev
->def
->index
);
583 static void __emac_mdio_write(struct ocp_enet_private
*dev
, u8 id
, u8 reg
,
586 struct emac_regs
*p
= dev
->emacp
;
589 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL
, dev
->def
->index
, id
, reg
,
592 /* Enable proper MDIO port */
593 zmii_enable_mdio(dev
->zmii_dev
, dev
->zmii_input
);
595 /* Wait for management interface to be idle */
597 while (!(in_be32(&p
->stacr
) & EMAC_STACR_OC
)) {
603 /* Issue write command */
605 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE
|
606 (reg
& EMAC_STACR_PRA_MASK
) |
607 ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
) |
608 (val
<< EMAC_STACR_PHYD_SHIFT
));
610 /* Wait for write to complete */
612 while (!(in_be32(&p
->stacr
) & EMAC_STACR_OC
)) {
619 DBG("%d: MII management interface timeout (write)" NL
, dev
->def
->index
);
622 static int emac_mdio_read(struct net_device
*ndev
, int id
, int reg
)
624 struct ocp_enet_private
*dev
= ndev
->priv
;
628 res
= __emac_mdio_read(dev
->mdio_dev
? dev
->mdio_dev
: dev
, (u8
) id
,
634 static void emac_mdio_write(struct net_device
*ndev
, int id
, int reg
, int val
)
636 struct ocp_enet_private
*dev
= ndev
->priv
;
639 __emac_mdio_write(dev
->mdio_dev
? dev
->mdio_dev
: dev
, (u8
) id
,
640 (u8
) reg
, (u16
) val
);
645 static void emac_set_multicast_list(struct net_device
*ndev
)
647 struct ocp_enet_private
*dev
= ndev
->priv
;
648 struct emac_regs
*p
= dev
->emacp
;
649 u32 rmr
= emac_iff2rmr(ndev
);
651 DBG("%d: multicast %08x" NL
, dev
->def
->index
, rmr
);
652 BUG_ON(!netif_running(dev
->ndev
));
654 /* I decided to relax register access rules here to avoid
657 * There is a real problem with EMAC4 core if we use MWSW_001 bit
658 * in MR1 register and do a full EMAC reset.
659 * One TX BD status update is delayed and, after EMAC reset, it
660 * never happens, resulting in TX hung (it'll be recovered by TX
661 * timeout handler eventually, but this is just gross).
662 * So we either have to do full TX reset or try to cheat here :)
664 * The only required change is to RX mode register, so I *think* all
665 * we need is just to stop RX channel. This seems to work on all
668 emac_rx_disable(dev
);
669 if (rmr
& EMAC_RMR_MAE
)
671 out_be32(&p
->rmr
, rmr
);
676 static int emac_resize_rx_ring(struct ocp_enet_private
*dev
, int new_mtu
)
678 struct ocp_func_emac_data
*emacdata
= dev
->def
->additions
;
679 int rx_sync_size
= emac_rx_sync_size(new_mtu
);
680 int rx_skb_size
= emac_rx_skb_size(new_mtu
);
683 emac_rx_disable(dev
);
684 mal_disable_rx_channel(dev
->mal
, emacdata
->mal_rx_chan
);
686 if (dev
->rx_sg_skb
) {
687 ++dev
->estats
.rx_dropped_resize
;
688 dev_kfree_skb(dev
->rx_sg_skb
);
689 dev
->rx_sg_skb
= NULL
;
692 /* Make a first pass over RX ring and mark BDs ready, dropping
693 * non-processed packets on the way. We need this as a separate pass
694 * to simplify error recovery in the case of allocation failure later.
696 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
697 if (dev
->rx_desc
[i
].ctrl
& MAL_RX_CTRL_FIRST
)
698 ++dev
->estats
.rx_dropped_resize
;
700 dev
->rx_desc
[i
].data_len
= 0;
701 dev
->rx_desc
[i
].ctrl
= MAL_RX_CTRL_EMPTY
|
702 (i
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
705 /* Reallocate RX ring only if bigger skb buffers are required */
706 if (rx_skb_size
<= dev
->rx_skb_size
)
709 /* Second pass, allocate new skbs */
710 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
711 struct sk_buff
*skb
= alloc_skb(rx_skb_size
, GFP_ATOMIC
);
717 BUG_ON(!dev
->rx_skb
[i
]);
718 dev_kfree_skb(dev
->rx_skb
[i
]);
720 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
721 dev
->rx_desc
[i
].data_ptr
=
722 dma_map_single(dev
->ldev
, skb
->data
- 2, rx_sync_size
,
723 DMA_FROM_DEVICE
) + 2;
724 dev
->rx_skb
[i
] = skb
;
727 /* Check if we need to change "Jumbo" bit in MR1 */
728 if ((new_mtu
> ETH_DATA_LEN
) ^ (dev
->ndev
->mtu
> ETH_DATA_LEN
)) {
729 /* This is to prevent starting RX channel in emac_rx_enable() */
730 dev
->commac
.rx_stopped
= 1;
732 dev
->ndev
->mtu
= new_mtu
;
733 emac_full_tx_reset(dev
->ndev
);
736 mal_set_rcbs(dev
->mal
, emacdata
->mal_rx_chan
, emac_rx_size(new_mtu
));
739 dev
->commac
.rx_stopped
= dev
->rx_slot
= 0;
740 mal_enable_rx_channel(dev
->mal
, emacdata
->mal_rx_chan
);
746 /* Process ctx, rtnl_lock semaphore */
747 static int emac_change_mtu(struct net_device
*ndev
, int new_mtu
)
749 struct ocp_enet_private
*dev
= ndev
->priv
;
752 if (new_mtu
< EMAC_MIN_MTU
|| new_mtu
> EMAC_MAX_MTU
)
755 DBG("%d: change_mtu(%d)" NL
, dev
->def
->index
, new_mtu
);
758 if (netif_running(ndev
)) {
759 /* Check if we really need to reinitalize RX ring */
760 if (emac_rx_skb_size(ndev
->mtu
) != emac_rx_skb_size(new_mtu
))
761 ret
= emac_resize_rx_ring(dev
, new_mtu
);
766 dev
->rx_skb_size
= emac_rx_skb_size(new_mtu
);
767 dev
->rx_sync_size
= emac_rx_sync_size(new_mtu
);
774 static void emac_clean_tx_ring(struct ocp_enet_private
*dev
)
777 for (i
= 0; i
< NUM_TX_BUFF
; ++i
) {
778 if (dev
->tx_skb
[i
]) {
779 dev_kfree_skb(dev
->tx_skb
[i
]);
780 dev
->tx_skb
[i
] = NULL
;
781 if (dev
->tx_desc
[i
].ctrl
& MAL_TX_CTRL_READY
)
782 ++dev
->estats
.tx_dropped
;
784 dev
->tx_desc
[i
].ctrl
= 0;
785 dev
->tx_desc
[i
].data_ptr
= 0;
789 static void emac_clean_rx_ring(struct ocp_enet_private
*dev
)
792 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
793 if (dev
->rx_skb
[i
]) {
794 dev
->rx_desc
[i
].ctrl
= 0;
795 dev_kfree_skb(dev
->rx_skb
[i
]);
796 dev
->rx_skb
[i
] = NULL
;
797 dev
->rx_desc
[i
].data_ptr
= 0;
800 if (dev
->rx_sg_skb
) {
801 dev_kfree_skb(dev
->rx_sg_skb
);
802 dev
->rx_sg_skb
= NULL
;
806 static inline int emac_alloc_rx_skb(struct ocp_enet_private
*dev
, int slot
,
809 struct sk_buff
*skb
= alloc_skb(dev
->rx_skb_size
, flags
);
813 dev
->rx_skb
[slot
] = skb
;
814 dev
->rx_desc
[slot
].data_len
= 0;
816 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
817 dev
->rx_desc
[slot
].data_ptr
=
818 dma_map_single(dev
->ldev
, skb
->data
- 2, dev
->rx_sync_size
,
819 DMA_FROM_DEVICE
) + 2;
821 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
822 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
827 static void emac_print_link_status(struct ocp_enet_private
*dev
)
829 if (netif_carrier_ok(dev
->ndev
))
830 printk(KERN_INFO
"%s: link is up, %d %s%s\n",
831 dev
->ndev
->name
, dev
->phy
.speed
,
832 dev
->phy
.duplex
== DUPLEX_FULL
? "FDX" : "HDX",
833 dev
->phy
.pause
? ", pause enabled" :
834 dev
->phy
.asym_pause
? ", assymetric pause enabled" : "");
836 printk(KERN_INFO
"%s: link is down\n", dev
->ndev
->name
);
839 /* Process ctx, rtnl_lock semaphore */
840 static int emac_open(struct net_device
*ndev
)
842 struct ocp_enet_private
*dev
= ndev
->priv
;
843 struct ocp_func_emac_data
*emacdata
= dev
->def
->additions
;
846 DBG("%d: open" NL
, dev
->def
->index
);
848 /* Setup error IRQ handler */
849 err
= request_irq(dev
->def
->irq
, emac_irq
, 0, "EMAC", dev
);
851 printk(KERN_ERR
"%s: failed to request IRQ %d\n",
852 ndev
->name
, dev
->def
->irq
);
856 /* Allocate RX ring */
857 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
858 if (emac_alloc_rx_skb(dev
, i
, GFP_KERNEL
)) {
859 printk(KERN_ERR
"%s: failed to allocate RX ring\n",
865 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= dev
->rx_slot
=
866 dev
->commac
.rx_stopped
= 0;
867 dev
->rx_sg_skb
= NULL
;
869 if (dev
->phy
.address
>= 0) {
870 int link_poll_interval
;
871 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
872 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
873 EMAC_RX_CLK_DEFAULT(dev
->def
->index
);
874 netif_carrier_on(dev
->ndev
);
875 link_poll_interval
= PHY_POLL_LINK_ON
;
877 EMAC_RX_CLK_TX(dev
->def
->index
);
878 netif_carrier_off(dev
->ndev
);
879 link_poll_interval
= PHY_POLL_LINK_OFF
;
881 mod_timer(&dev
->link_timer
, jiffies
+ link_poll_interval
);
882 emac_print_link_status(dev
);
884 netif_carrier_on(dev
->ndev
);
887 mal_poll_add(dev
->mal
, &dev
->commac
);
888 mal_enable_tx_channel(dev
->mal
, emacdata
->mal_tx_chan
);
889 mal_set_rcbs(dev
->mal
, emacdata
->mal_rx_chan
, emac_rx_size(ndev
->mtu
));
890 mal_enable_rx_channel(dev
->mal
, emacdata
->mal_rx_chan
);
893 netif_start_queue(ndev
);
898 emac_clean_rx_ring(dev
);
899 free_irq(dev
->def
->irq
, dev
);
904 static int emac_link_differs(struct ocp_enet_private
*dev
)
906 u32 r
= in_be32(&dev
->emacp
->mr1
);
908 int duplex
= r
& EMAC_MR1_FDE
? DUPLEX_FULL
: DUPLEX_HALF
;
909 int speed
, pause
, asym_pause
;
911 if (r
& (EMAC_MR1_MF_1000
| EMAC_MR1_MF_1000GPCS
))
913 else if (r
& EMAC_MR1_MF_100
)
918 switch (r
& (EMAC_MR1_EIFC
| EMAC_MR1_APP
)) {
919 case (EMAC_MR1_EIFC
| EMAC_MR1_APP
):
928 pause
= asym_pause
= 0;
930 return speed
!= dev
->phy
.speed
|| duplex
!= dev
->phy
.duplex
||
931 pause
!= dev
->phy
.pause
|| asym_pause
!= dev
->phy
.asym_pause
;
935 static void emac_link_timer(unsigned long data
)
937 struct ocp_enet_private
*dev
= (struct ocp_enet_private
*)data
;
938 int link_poll_interval
;
940 DBG2("%d: link timer" NL
, dev
->def
->index
);
942 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
943 if (!netif_carrier_ok(dev
->ndev
)) {
944 EMAC_RX_CLK_DEFAULT(dev
->def
->index
);
946 /* Get new link parameters */
947 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
949 if (dev
->tah_dev
|| emac_link_differs(dev
))
950 emac_full_tx_reset(dev
->ndev
);
952 netif_carrier_on(dev
->ndev
);
953 emac_print_link_status(dev
);
955 link_poll_interval
= PHY_POLL_LINK_ON
;
957 if (netif_carrier_ok(dev
->ndev
)) {
958 EMAC_RX_CLK_TX(dev
->def
->index
);
959 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
960 emac_reinitialize(dev
);
962 netif_carrier_off(dev
->ndev
);
963 emac_print_link_status(dev
);
966 /* Retry reset if the previous attempt failed.
967 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
968 * case, but I left it here because it shouldn't trigger for
971 if (unlikely(dev
->reset_failed
))
972 emac_reinitialize(dev
);
974 link_poll_interval
= PHY_POLL_LINK_OFF
;
976 mod_timer(&dev
->link_timer
, jiffies
+ link_poll_interval
);
980 static void emac_force_link_update(struct ocp_enet_private
*dev
)
982 netif_carrier_off(dev
->ndev
);
983 if (timer_pending(&dev
->link_timer
))
984 mod_timer(&dev
->link_timer
, jiffies
+ PHY_POLL_LINK_OFF
);
987 /* Process ctx, rtnl_lock semaphore */
988 static int emac_close(struct net_device
*ndev
)
990 struct ocp_enet_private
*dev
= ndev
->priv
;
991 struct ocp_func_emac_data
*emacdata
= dev
->def
->additions
;
993 DBG("%d: close" NL
, dev
->def
->index
);
997 if (dev
->phy
.address
>= 0)
998 del_timer_sync(&dev
->link_timer
);
1000 netif_stop_queue(ndev
);
1001 emac_rx_disable(dev
);
1002 emac_tx_disable(dev
);
1003 mal_disable_rx_channel(dev
->mal
, emacdata
->mal_rx_chan
);
1004 mal_disable_tx_channel(dev
->mal
, emacdata
->mal_tx_chan
);
1005 mal_poll_del(dev
->mal
, &dev
->commac
);
1008 emac_clean_tx_ring(dev
);
1009 emac_clean_rx_ring(dev
);
1010 free_irq(dev
->def
->irq
, dev
);
1015 static inline u16
emac_tx_csum(struct ocp_enet_private
*dev
,
1016 struct sk_buff
*skb
)
1018 #if defined(CONFIG_IBM_EMAC_TAH)
1019 if (skb
->ip_summed
== CHECKSUM_HW
) {
1020 ++dev
->stats
.tx_packets_csum
;
1021 return EMAC_TX_CTRL_TAH_CSUM
;
1027 static inline int emac_xmit_finish(struct ocp_enet_private
*dev
, int len
)
1029 struct emac_regs
*p
= dev
->emacp
;
1030 struct net_device
*ndev
= dev
->ndev
;
1032 /* Send the packet out */
1033 out_be32(&p
->tmr0
, EMAC_TMR0_XMIT
);
1035 if (unlikely(++dev
->tx_cnt
== NUM_TX_BUFF
)) {
1036 netif_stop_queue(ndev
);
1037 DBG2("%d: stopped TX queue" NL
, dev
->def
->index
);
1040 ndev
->trans_start
= jiffies
;
1041 ++dev
->stats
.tx_packets
;
1042 dev
->stats
.tx_bytes
+= len
;
1048 static int emac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1050 struct ocp_enet_private
*dev
= ndev
->priv
;
1051 unsigned int len
= skb
->len
;
1054 u16 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1055 MAL_TX_CTRL_LAST
| emac_tx_csum(dev
, skb
);
1057 slot
= dev
->tx_slot
++;
1058 if (dev
->tx_slot
== NUM_TX_BUFF
) {
1060 ctrl
|= MAL_TX_CTRL_WRAP
;
1063 DBG2("%d: xmit(%u) %d" NL
, dev
->def
->index
, len
, slot
);
1065 dev
->tx_skb
[slot
] = skb
;
1066 dev
->tx_desc
[slot
].data_ptr
= dma_map_single(dev
->ldev
, skb
->data
, len
,
1068 dev
->tx_desc
[slot
].data_len
= (u16
) len
;
1070 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1072 return emac_xmit_finish(dev
, len
);
1075 #if defined(CONFIG_IBM_EMAC_TAH)
1076 static inline int emac_xmit_split(struct ocp_enet_private
*dev
, int slot
,
1077 u32 pd
, int len
, int last
, u16 base_ctrl
)
1080 u16 ctrl
= base_ctrl
;
1081 int chunk
= min(len
, MAL_MAX_TX_SIZE
);
1084 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1087 ctrl
|= MAL_TX_CTRL_LAST
;
1088 if (slot
== NUM_TX_BUFF
- 1)
1089 ctrl
|= MAL_TX_CTRL_WRAP
;
1091 dev
->tx_skb
[slot
] = NULL
;
1092 dev
->tx_desc
[slot
].data_ptr
= pd
;
1093 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1094 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1105 /* BHs disabled (SG version for TAH equipped EMACs) */
1106 static int emac_start_xmit_sg(struct sk_buff
*skb
, struct net_device
*ndev
)
1108 struct ocp_enet_private
*dev
= ndev
->priv
;
1109 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1110 int len
= skb
->len
, chunk
;
1115 /* This is common "fast" path */
1116 if (likely(!nr_frags
&& len
<= MAL_MAX_TX_SIZE
))
1117 return emac_start_xmit(skb
, ndev
);
1119 len
-= skb
->data_len
;
1121 /* Note, this is only an *estimation*, we can still run out of empty
1122 * slots because of the additional fragmentation into
1123 * MAL_MAX_TX_SIZE-sized chunks
1125 if (unlikely(dev
->tx_cnt
+ nr_frags
+ mal_tx_chunks(len
) > NUM_TX_BUFF
))
1128 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1129 emac_tx_csum(dev
, skb
);
1130 slot
= dev
->tx_slot
;
1133 dev
->tx_skb
[slot
] = NULL
;
1134 chunk
= min(len
, MAL_MAX_TX_SIZE
);
1135 dev
->tx_desc
[slot
].data_ptr
= pd
=
1136 dma_map_single(dev
->ldev
, skb
->data
, len
, DMA_TO_DEVICE
);
1137 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1140 slot
= emac_xmit_split(dev
, slot
, pd
+ chunk
, len
, !nr_frags
,
1143 for (i
= 0; i
< nr_frags
; ++i
) {
1144 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
1147 if (unlikely(dev
->tx_cnt
+ mal_tx_chunks(len
) >= NUM_TX_BUFF
))
1150 pd
= dma_map_page(dev
->ldev
, frag
->page
, frag
->page_offset
, len
,
1153 slot
= emac_xmit_split(dev
, slot
, pd
, len
, i
== nr_frags
- 1,
1157 DBG2("%d: xmit_sg(%u) %d - %d" NL
, dev
->def
->index
, skb
->len
,
1158 dev
->tx_slot
, slot
);
1160 /* Attach skb to the last slot so we don't release it too early */
1161 dev
->tx_skb
[slot
] = skb
;
1163 /* Send the packet out */
1164 if (dev
->tx_slot
== NUM_TX_BUFF
- 1)
1165 ctrl
|= MAL_TX_CTRL_WRAP
;
1167 dev
->tx_desc
[dev
->tx_slot
].ctrl
= ctrl
;
1168 dev
->tx_slot
= (slot
+ 1) % NUM_TX_BUFF
;
1170 return emac_xmit_finish(dev
, skb
->len
);
1173 /* Well, too bad. Our previous estimation was overly optimistic.
1176 while (slot
!= dev
->tx_slot
) {
1177 dev
->tx_desc
[slot
].ctrl
= 0;
1180 slot
= NUM_TX_BUFF
- 1;
1182 ++dev
->estats
.tx_undo
;
1185 netif_stop_queue(ndev
);
1186 DBG2("%d: stopped TX queue" NL
, dev
->def
->index
);
1190 # define emac_start_xmit_sg emac_start_xmit
1191 #endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1194 static void emac_parse_tx_error(struct ocp_enet_private
*dev
, u16 ctrl
)
1196 struct ibm_emac_error_stats
*st
= &dev
->estats
;
1197 DBG("%d: BD TX error %04x" NL
, dev
->def
->index
, ctrl
);
1200 if (ctrl
& EMAC_TX_ST_BFCS
)
1201 ++st
->tx_bd_bad_fcs
;
1202 if (ctrl
& EMAC_TX_ST_LCS
)
1203 ++st
->tx_bd_carrier_loss
;
1204 if (ctrl
& EMAC_TX_ST_ED
)
1205 ++st
->tx_bd_excessive_deferral
;
1206 if (ctrl
& EMAC_TX_ST_EC
)
1207 ++st
->tx_bd_excessive_collisions
;
1208 if (ctrl
& EMAC_TX_ST_LC
)
1209 ++st
->tx_bd_late_collision
;
1210 if (ctrl
& EMAC_TX_ST_MC
)
1211 ++st
->tx_bd_multple_collisions
;
1212 if (ctrl
& EMAC_TX_ST_SC
)
1213 ++st
->tx_bd_single_collision
;
1214 if (ctrl
& EMAC_TX_ST_UR
)
1215 ++st
->tx_bd_underrun
;
1216 if (ctrl
& EMAC_TX_ST_SQE
)
1220 static void emac_poll_tx(void *param
)
1222 struct ocp_enet_private
*dev
= param
;
1223 DBG2("%d: poll_tx, %d %d" NL
, dev
->def
->index
, dev
->tx_cnt
,
1228 int slot
= dev
->ack_slot
, n
= 0;
1230 ctrl
= dev
->tx_desc
[slot
].ctrl
;
1231 if (!(ctrl
& MAL_TX_CTRL_READY
)) {
1232 struct sk_buff
*skb
= dev
->tx_skb
[slot
];
1237 dev
->tx_skb
[slot
] = NULL
;
1239 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1241 if (unlikely(EMAC_IS_BAD_TX(ctrl
)))
1242 emac_parse_tx_error(dev
, ctrl
);
1248 dev
->ack_slot
= slot
;
1249 if (netif_queue_stopped(dev
->ndev
) &&
1250 dev
->tx_cnt
< EMAC_TX_WAKEUP_THRESH
)
1251 netif_wake_queue(dev
->ndev
);
1253 DBG2("%d: tx %d pkts" NL
, dev
->def
->index
, n
);
1258 static inline void emac_recycle_rx_skb(struct ocp_enet_private
*dev
, int slot
,
1261 struct sk_buff
*skb
= dev
->rx_skb
[slot
];
1262 DBG2("%d: recycle %d %d" NL
, dev
->def
->index
, slot
, len
);
1265 dma_map_single(dev
->ldev
, skb
->data
- 2,
1266 EMAC_DMA_ALIGN(len
+ 2), DMA_FROM_DEVICE
);
1268 dev
->rx_desc
[slot
].data_len
= 0;
1270 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1271 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1274 static void emac_parse_rx_error(struct ocp_enet_private
*dev
, u16 ctrl
)
1276 struct ibm_emac_error_stats
*st
= &dev
->estats
;
1277 DBG("%d: BD RX error %04x" NL
, dev
->def
->index
, ctrl
);
1280 if (ctrl
& EMAC_RX_ST_OE
)
1281 ++st
->rx_bd_overrun
;
1282 if (ctrl
& EMAC_RX_ST_BP
)
1283 ++st
->rx_bd_bad_packet
;
1284 if (ctrl
& EMAC_RX_ST_RP
)
1285 ++st
->rx_bd_runt_packet
;
1286 if (ctrl
& EMAC_RX_ST_SE
)
1287 ++st
->rx_bd_short_event
;
1288 if (ctrl
& EMAC_RX_ST_AE
)
1289 ++st
->rx_bd_alignment_error
;
1290 if (ctrl
& EMAC_RX_ST_BFCS
)
1291 ++st
->rx_bd_bad_fcs
;
1292 if (ctrl
& EMAC_RX_ST_PTL
)
1293 ++st
->rx_bd_packet_too_long
;
1294 if (ctrl
& EMAC_RX_ST_ORE
)
1295 ++st
->rx_bd_out_of_range
;
1296 if (ctrl
& EMAC_RX_ST_IRE
)
1297 ++st
->rx_bd_in_range
;
1300 static inline void emac_rx_csum(struct ocp_enet_private
*dev
,
1301 struct sk_buff
*skb
, u16 ctrl
)
1303 #if defined(CONFIG_IBM_EMAC_TAH)
1304 if (!ctrl
&& dev
->tah_dev
) {
1305 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1306 ++dev
->stats
.rx_packets_csum
;
1311 static inline int emac_rx_sg_append(struct ocp_enet_private
*dev
, int slot
)
1313 if (likely(dev
->rx_sg_skb
!= NULL
)) {
1314 int len
= dev
->rx_desc
[slot
].data_len
;
1315 int tot_len
= dev
->rx_sg_skb
->len
+ len
;
1317 if (unlikely(tot_len
+ 2 > dev
->rx_skb_size
)) {
1318 ++dev
->estats
.rx_dropped_mtu
;
1319 dev_kfree_skb(dev
->rx_sg_skb
);
1320 dev
->rx_sg_skb
= NULL
;
1322 cacheable_memcpy(dev
->rx_sg_skb
->tail
,
1323 dev
->rx_skb
[slot
]->data
, len
);
1324 skb_put(dev
->rx_sg_skb
, len
);
1325 emac_recycle_rx_skb(dev
, slot
, len
);
1329 emac_recycle_rx_skb(dev
, slot
, 0);
1334 static int emac_poll_rx(void *param
, int budget
)
1336 struct ocp_enet_private
*dev
= param
;
1337 int slot
= dev
->rx_slot
, received
= 0;
1339 DBG2("%d: poll_rx(%d)" NL
, dev
->def
->index
, budget
);
1342 while (budget
> 0) {
1344 struct sk_buff
*skb
;
1345 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1347 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1350 skb
= dev
->rx_skb
[slot
];
1352 len
= dev
->rx_desc
[slot
].data_len
;
1354 if (unlikely(!MAL_IS_SINGLE_RX(ctrl
)))
1357 ctrl
&= EMAC_BAD_RX_MASK
;
1358 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1359 emac_parse_rx_error(dev
, ctrl
);
1360 ++dev
->estats
.rx_dropped_error
;
1361 emac_recycle_rx_skb(dev
, slot
, 0);
1366 if (len
&& len
< EMAC_RX_COPY_THRESH
) {
1367 struct sk_buff
*copy_skb
=
1368 alloc_skb(len
+ EMAC_RX_SKB_HEADROOM
+ 2, GFP_ATOMIC
);
1369 if (unlikely(!copy_skb
))
1372 skb_reserve(copy_skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1373 cacheable_memcpy(copy_skb
->data
- 2, skb
->data
- 2,
1375 emac_recycle_rx_skb(dev
, slot
, len
);
1377 } else if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
)))
1382 skb
->dev
= dev
->ndev
;
1383 skb
->protocol
= eth_type_trans(skb
, dev
->ndev
);
1384 emac_rx_csum(dev
, skb
, ctrl
);
1386 if (unlikely(netif_receive_skb(skb
) == NET_RX_DROP
))
1387 ++dev
->estats
.rx_dropped_stack
;
1389 ++dev
->stats
.rx_packets
;
1391 dev
->stats
.rx_bytes
+= len
;
1392 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1397 if (ctrl
& MAL_RX_CTRL_FIRST
) {
1398 BUG_ON(dev
->rx_sg_skb
);
1399 if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
))) {
1400 DBG("%d: rx OOM %d" NL
, dev
->def
->index
, slot
);
1401 ++dev
->estats
.rx_dropped_oom
;
1402 emac_recycle_rx_skb(dev
, slot
, 0);
1404 dev
->rx_sg_skb
= skb
;
1407 } else if (!emac_rx_sg_append(dev
, slot
) &&
1408 (ctrl
& MAL_RX_CTRL_LAST
)) {
1410 skb
= dev
->rx_sg_skb
;
1411 dev
->rx_sg_skb
= NULL
;
1413 ctrl
&= EMAC_BAD_RX_MASK
;
1414 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1415 emac_parse_rx_error(dev
, ctrl
);
1416 ++dev
->estats
.rx_dropped_error
;
1424 DBG("%d: rx OOM %d" NL
, dev
->def
->index
, slot
);
1425 /* Drop the packet and recycle skb */
1426 ++dev
->estats
.rx_dropped_oom
;
1427 emac_recycle_rx_skb(dev
, slot
, 0);
1432 DBG2("%d: rx %d BDs" NL
, dev
->def
->index
, received
);
1433 dev
->rx_slot
= slot
;
1436 if (unlikely(budget
&& dev
->commac
.rx_stopped
)) {
1437 struct ocp_func_emac_data
*emacdata
= dev
->def
->additions
;
1440 if (!(dev
->rx_desc
[slot
].ctrl
& MAL_RX_CTRL_EMPTY
)) {
1441 DBG2("%d: rx restart" NL
, dev
->def
->index
);
1446 if (dev
->rx_sg_skb
) {
1447 DBG2("%d: dropping partial rx packet" NL
,
1449 ++dev
->estats
.rx_dropped_error
;
1450 dev_kfree_skb(dev
->rx_sg_skb
);
1451 dev
->rx_sg_skb
= NULL
;
1454 dev
->commac
.rx_stopped
= 0;
1455 mal_enable_rx_channel(dev
->mal
, emacdata
->mal_rx_chan
);
1456 emac_rx_enable(dev
);
1463 static int emac_peek_rx(void *param
)
1465 struct ocp_enet_private
*dev
= param
;
1466 return !(dev
->rx_desc
[dev
->rx_slot
].ctrl
& MAL_RX_CTRL_EMPTY
);
1470 static int emac_peek_rx_sg(void *param
)
1472 struct ocp_enet_private
*dev
= param
;
1473 int slot
= dev
->rx_slot
;
1475 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1476 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1478 else if (ctrl
& MAL_RX_CTRL_LAST
)
1481 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1483 /* I'm just being paranoid here :) */
1484 if (unlikely(slot
== dev
->rx_slot
))
1490 static void emac_rxde(void *param
)
1492 struct ocp_enet_private
*dev
= param
;
1493 ++dev
->estats
.rx_stopped
;
1494 emac_rx_disable_async(dev
);
1498 static irqreturn_t
emac_irq(int irq
, void *dev_instance
, struct pt_regs
*regs
)
1500 struct ocp_enet_private
*dev
= dev_instance
;
1501 struct emac_regs
*p
= dev
->emacp
;
1502 struct ibm_emac_error_stats
*st
= &dev
->estats
;
1504 u32 isr
= in_be32(&p
->isr
);
1505 out_be32(&p
->isr
, isr
);
1507 DBG("%d: isr = %08x" NL
, dev
->def
->index
, isr
);
1509 if (isr
& EMAC_ISR_TXPE
)
1511 if (isr
& EMAC_ISR_RXPE
)
1513 if (isr
& EMAC_ISR_TXUE
)
1515 if (isr
& EMAC_ISR_RXOE
)
1516 ++st
->rx_fifo_overrun
;
1517 if (isr
& EMAC_ISR_OVR
)
1519 if (isr
& EMAC_ISR_BP
)
1520 ++st
->rx_bad_packet
;
1521 if (isr
& EMAC_ISR_RP
)
1522 ++st
->rx_runt_packet
;
1523 if (isr
& EMAC_ISR_SE
)
1524 ++st
->rx_short_event
;
1525 if (isr
& EMAC_ISR_ALE
)
1526 ++st
->rx_alignment_error
;
1527 if (isr
& EMAC_ISR_BFCS
)
1529 if (isr
& EMAC_ISR_PTLE
)
1530 ++st
->rx_packet_too_long
;
1531 if (isr
& EMAC_ISR_ORE
)
1532 ++st
->rx_out_of_range
;
1533 if (isr
& EMAC_ISR_IRE
)
1535 if (isr
& EMAC_ISR_SQE
)
1537 if (isr
& EMAC_ISR_TE
)
1543 static struct net_device_stats
*emac_stats(struct net_device
*ndev
)
1545 struct ocp_enet_private
*dev
= ndev
->priv
;
1546 struct ibm_emac_stats
*st
= &dev
->stats
;
1547 struct ibm_emac_error_stats
*est
= &dev
->estats
;
1548 struct net_device_stats
*nst
= &dev
->nstats
;
1550 DBG2("%d: stats" NL
, dev
->def
->index
);
1552 /* Compute "legacy" statistics */
1553 local_irq_disable();
1554 nst
->rx_packets
= (unsigned long)st
->rx_packets
;
1555 nst
->rx_bytes
= (unsigned long)st
->rx_bytes
;
1556 nst
->tx_packets
= (unsigned long)st
->tx_packets
;
1557 nst
->tx_bytes
= (unsigned long)st
->tx_bytes
;
1558 nst
->rx_dropped
= (unsigned long)(est
->rx_dropped_oom
+
1559 est
->rx_dropped_error
+
1560 est
->rx_dropped_resize
+
1561 est
->rx_dropped_mtu
);
1562 nst
->tx_dropped
= (unsigned long)est
->tx_dropped
;
1564 nst
->rx_errors
= (unsigned long)est
->rx_bd_errors
;
1565 nst
->rx_fifo_errors
= (unsigned long)(est
->rx_bd_overrun
+
1566 est
->rx_fifo_overrun
+
1568 nst
->rx_frame_errors
= (unsigned long)(est
->rx_bd_alignment_error
+
1569 est
->rx_alignment_error
);
1570 nst
->rx_crc_errors
= (unsigned long)(est
->rx_bd_bad_fcs
+
1572 nst
->rx_length_errors
= (unsigned long)(est
->rx_bd_runt_packet
+
1573 est
->rx_bd_short_event
+
1574 est
->rx_bd_packet_too_long
+
1575 est
->rx_bd_out_of_range
+
1576 est
->rx_bd_in_range
+
1577 est
->rx_runt_packet
+
1578 est
->rx_short_event
+
1579 est
->rx_packet_too_long
+
1580 est
->rx_out_of_range
+
1583 nst
->tx_errors
= (unsigned long)(est
->tx_bd_errors
+ est
->tx_errors
);
1584 nst
->tx_fifo_errors
= (unsigned long)(est
->tx_bd_underrun
+
1586 nst
->tx_carrier_errors
= (unsigned long)est
->tx_bd_carrier_loss
;
1587 nst
->collisions
= (unsigned long)(est
->tx_bd_excessive_deferral
+
1588 est
->tx_bd_excessive_collisions
+
1589 est
->tx_bd_late_collision
+
1590 est
->tx_bd_multple_collisions
);
1595 static void emac_remove(struct ocp_device
*ocpdev
)
1597 struct ocp_enet_private
*dev
= ocp_get_drvdata(ocpdev
);
1599 DBG("%d: remove" NL
, dev
->def
->index
);
1601 ocp_set_drvdata(ocpdev
, 0);
1602 unregister_netdev(dev
->ndev
);
1604 tah_fini(dev
->tah_dev
);
1605 rgmii_fini(dev
->rgmii_dev
, dev
->rgmii_input
);
1606 zmii_fini(dev
->zmii_dev
, dev
->zmii_input
);
1608 emac_dbg_register(dev
->def
->index
, 0);
1610 mal_unregister_commac(dev
->mal
, &dev
->commac
);
1611 iounmap((void *)dev
->emacp
);
1615 static struct mal_commac_ops emac_commac_ops
= {
1616 .poll_tx
= &emac_poll_tx
,
1617 .poll_rx
= &emac_poll_rx
,
1618 .peek_rx
= &emac_peek_rx
,
1622 static struct mal_commac_ops emac_commac_sg_ops
= {
1623 .poll_tx
= &emac_poll_tx
,
1624 .poll_rx
= &emac_poll_rx
,
1625 .peek_rx
= &emac_peek_rx_sg
,
1629 /* Ethtool support */
1630 static int emac_ethtool_get_settings(struct net_device
*ndev
,
1631 struct ethtool_cmd
*cmd
)
1633 struct ocp_enet_private
*dev
= ndev
->priv
;
1635 cmd
->supported
= dev
->phy
.features
;
1636 cmd
->port
= PORT_MII
;
1637 cmd
->phy_address
= dev
->phy
.address
;
1639 dev
->phy
.address
>= 0 ? XCVR_EXTERNAL
: XCVR_INTERNAL
;
1642 cmd
->advertising
= dev
->phy
.advertising
;
1643 cmd
->autoneg
= dev
->phy
.autoneg
;
1644 cmd
->speed
= dev
->phy
.speed
;
1645 cmd
->duplex
= dev
->phy
.duplex
;
1651 static int emac_ethtool_set_settings(struct net_device
*ndev
,
1652 struct ethtool_cmd
*cmd
)
1654 struct ocp_enet_private
*dev
= ndev
->priv
;
1655 u32 f
= dev
->phy
.features
;
1657 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL
, dev
->def
->index
,
1658 cmd
->autoneg
, cmd
->speed
, cmd
->duplex
, cmd
->advertising
);
1660 /* Basic sanity checks */
1661 if (dev
->phy
.address
< 0)
1663 if (cmd
->autoneg
!= AUTONEG_ENABLE
&& cmd
->autoneg
!= AUTONEG_DISABLE
)
1665 if (cmd
->autoneg
== AUTONEG_ENABLE
&& cmd
->advertising
== 0)
1667 if (cmd
->duplex
!= DUPLEX_HALF
&& cmd
->duplex
!= DUPLEX_FULL
)
1670 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
1671 switch (cmd
->speed
) {
1673 if (cmd
->duplex
== DUPLEX_HALF
1674 && !(f
& SUPPORTED_10baseT_Half
))
1676 if (cmd
->duplex
== DUPLEX_FULL
1677 && !(f
& SUPPORTED_10baseT_Full
))
1681 if (cmd
->duplex
== DUPLEX_HALF
1682 && !(f
& SUPPORTED_100baseT_Half
))
1684 if (cmd
->duplex
== DUPLEX_FULL
1685 && !(f
& SUPPORTED_100baseT_Full
))
1689 if (cmd
->duplex
== DUPLEX_HALF
1690 && !(f
& SUPPORTED_1000baseT_Half
))
1692 if (cmd
->duplex
== DUPLEX_FULL
1693 && !(f
& SUPPORTED_1000baseT_Full
))
1701 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, cmd
->speed
,
1705 if (!(f
& SUPPORTED_Autoneg
))
1709 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
,
1710 (cmd
->advertising
& f
) |
1711 (dev
->phy
.advertising
&
1713 ADVERTISED_Asym_Pause
)));
1715 emac_force_link_update(dev
);
1721 static void emac_ethtool_get_ringparam(struct net_device
*ndev
,
1722 struct ethtool_ringparam
*rp
)
1724 rp
->rx_max_pending
= rp
->rx_pending
= NUM_RX_BUFF
;
1725 rp
->tx_max_pending
= rp
->tx_pending
= NUM_TX_BUFF
;
1728 static void emac_ethtool_get_pauseparam(struct net_device
*ndev
,
1729 struct ethtool_pauseparam
*pp
)
1731 struct ocp_enet_private
*dev
= ndev
->priv
;
1734 if ((dev
->phy
.features
& SUPPORTED_Autoneg
) &&
1735 (dev
->phy
.advertising
& (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
)))
1738 if (dev
->phy
.duplex
== DUPLEX_FULL
) {
1740 pp
->rx_pause
= pp
->tx_pause
= 1;
1741 else if (dev
->phy
.asym_pause
)
1747 static u32
emac_ethtool_get_rx_csum(struct net_device
*ndev
)
1749 struct ocp_enet_private
*dev
= ndev
->priv
;
1750 return dev
->tah_dev
!= 0;
1753 static int emac_get_regs_len(struct ocp_enet_private
*dev
)
1755 return sizeof(struct emac_ethtool_regs_subhdr
) + EMAC_ETHTOOL_REGS_SIZE
;
1758 static int emac_ethtool_get_regs_len(struct net_device
*ndev
)
1760 struct ocp_enet_private
*dev
= ndev
->priv
;
1761 return sizeof(struct emac_ethtool_regs_hdr
) +
1762 emac_get_regs_len(dev
) + mal_get_regs_len(dev
->mal
) +
1763 zmii_get_regs_len(dev
->zmii_dev
) +
1764 rgmii_get_regs_len(dev
->rgmii_dev
) +
1765 tah_get_regs_len(dev
->tah_dev
);
1768 static void *emac_dump_regs(struct ocp_enet_private
*dev
, void *buf
)
1770 struct emac_ethtool_regs_subhdr
*hdr
= buf
;
1772 hdr
->version
= EMAC_ETHTOOL_REGS_VER
;
1773 hdr
->index
= dev
->def
->index
;
1774 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC_ETHTOOL_REGS_SIZE
);
1775 return ((void *)(hdr
+ 1) + EMAC_ETHTOOL_REGS_SIZE
);
1778 static void emac_ethtool_get_regs(struct net_device
*ndev
,
1779 struct ethtool_regs
*regs
, void *buf
)
1781 struct ocp_enet_private
*dev
= ndev
->priv
;
1782 struct emac_ethtool_regs_hdr
*hdr
= buf
;
1784 hdr
->components
= 0;
1787 local_irq_disable();
1788 buf
= mal_dump_regs(dev
->mal
, buf
);
1789 buf
= emac_dump_regs(dev
, buf
);
1790 if (dev
->zmii_dev
) {
1791 hdr
->components
|= EMAC_ETHTOOL_REGS_ZMII
;
1792 buf
= zmii_dump_regs(dev
->zmii_dev
, buf
);
1794 if (dev
->rgmii_dev
) {
1795 hdr
->components
|= EMAC_ETHTOOL_REGS_RGMII
;
1796 buf
= rgmii_dump_regs(dev
->rgmii_dev
, buf
);
1799 hdr
->components
|= EMAC_ETHTOOL_REGS_TAH
;
1800 buf
= tah_dump_regs(dev
->tah_dev
, buf
);
1805 static int emac_ethtool_nway_reset(struct net_device
*ndev
)
1807 struct ocp_enet_private
*dev
= ndev
->priv
;
1810 DBG("%d: nway_reset" NL
, dev
->def
->index
);
1812 if (dev
->phy
.address
< 0)
1816 if (!dev
->phy
.autoneg
) {
1821 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, dev
->phy
.advertising
);
1822 emac_force_link_update(dev
);
1829 static int emac_ethtool_get_stats_count(struct net_device
*ndev
)
1831 return EMAC_ETHTOOL_STATS_COUNT
;
1834 static void emac_ethtool_get_strings(struct net_device
*ndev
, u32 stringset
,
1837 if (stringset
== ETH_SS_STATS
)
1838 memcpy(buf
, &emac_stats_keys
, sizeof(emac_stats_keys
));
1841 static void emac_ethtool_get_ethtool_stats(struct net_device
*ndev
,
1842 struct ethtool_stats
*estats
,
1845 struct ocp_enet_private
*dev
= ndev
->priv
;
1846 local_irq_disable();
1847 memcpy(tmp_stats
, &dev
->stats
, sizeof(dev
->stats
));
1848 tmp_stats
+= sizeof(dev
->stats
) / sizeof(u64
);
1849 memcpy(tmp_stats
, &dev
->estats
, sizeof(dev
->estats
));
1853 static void emac_ethtool_get_drvinfo(struct net_device
*ndev
,
1854 struct ethtool_drvinfo
*info
)
1856 struct ocp_enet_private
*dev
= ndev
->priv
;
1858 strcpy(info
->driver
, "ibm_emac");
1859 strcpy(info
->version
, DRV_VERSION
);
1860 info
->fw_version
[0] = '\0';
1861 sprintf(info
->bus_info
, "PPC 4xx EMAC %d", dev
->def
->index
);
1862 info
->n_stats
= emac_ethtool_get_stats_count(ndev
);
1863 info
->regdump_len
= emac_ethtool_get_regs_len(ndev
);
1866 static struct ethtool_ops emac_ethtool_ops
= {
1867 .get_settings
= emac_ethtool_get_settings
,
1868 .set_settings
= emac_ethtool_set_settings
,
1869 .get_drvinfo
= emac_ethtool_get_drvinfo
,
1871 .get_regs_len
= emac_ethtool_get_regs_len
,
1872 .get_regs
= emac_ethtool_get_regs
,
1874 .nway_reset
= emac_ethtool_nway_reset
,
1876 .get_ringparam
= emac_ethtool_get_ringparam
,
1877 .get_pauseparam
= emac_ethtool_get_pauseparam
,
1879 .get_rx_csum
= emac_ethtool_get_rx_csum
,
1881 .get_strings
= emac_ethtool_get_strings
,
1882 .get_stats_count
= emac_ethtool_get_stats_count
,
1883 .get_ethtool_stats
= emac_ethtool_get_ethtool_stats
,
1885 .get_link
= ethtool_op_get_link
,
1886 .get_tx_csum
= ethtool_op_get_tx_csum
,
1887 .get_sg
= ethtool_op_get_sg
,
1890 static int emac_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
1892 struct ocp_enet_private
*dev
= ndev
->priv
;
1893 uint16_t *data
= (uint16_t *) & rq
->ifr_ifru
;
1895 DBG("%d: ioctl %08x" NL
, dev
->def
->index
, cmd
);
1897 if (dev
->phy
.address
< 0)
1902 case SIOCDEVPRIVATE
:
1903 data
[0] = dev
->phy
.address
;
1906 case SIOCDEVPRIVATE
+ 1:
1907 data
[3] = emac_mdio_read(ndev
, dev
->phy
.address
, data
[1]);
1911 case SIOCDEVPRIVATE
+ 2:
1912 if (!capable(CAP_NET_ADMIN
))
1914 emac_mdio_write(ndev
, dev
->phy
.address
, data
[1], data
[2]);
1921 static int __init
emac_probe(struct ocp_device
*ocpdev
)
1923 struct ocp_func_emac_data
*emacdata
= ocpdev
->def
->additions
;
1924 struct net_device
*ndev
;
1925 struct ocp_device
*maldev
;
1926 struct ocp_enet_private
*dev
;
1929 DBG("%d: probe" NL
, ocpdev
->def
->index
);
1932 printk(KERN_ERR
"emac%d: Missing additional data!\n",
1933 ocpdev
->def
->index
);
1937 /* Allocate our net_device structure */
1938 ndev
= alloc_etherdev(sizeof(struct ocp_enet_private
));
1940 printk(KERN_ERR
"emac%d: could not allocate ethernet device!\n",
1941 ocpdev
->def
->index
);
1946 dev
->ldev
= &ocpdev
->dev
;
1947 dev
->def
= ocpdev
->def
;
1948 SET_MODULE_OWNER(ndev
);
1950 /* Find MAL device we are connected to */
1952 ocp_find_device(OCP_VENDOR_IBM
, OCP_FUNC_MAL
, emacdata
->mal_idx
);
1954 printk(KERN_ERR
"emac%d: unknown mal%d device!\n",
1955 dev
->def
->index
, emacdata
->mal_idx
);
1959 dev
->mal
= ocp_get_drvdata(maldev
);
1961 printk(KERN_ERR
"emac%d: mal%d hasn't been initialized yet!\n",
1962 dev
->def
->index
, emacdata
->mal_idx
);
1967 /* Register with MAL */
1968 dev
->commac
.ops
= &emac_commac_ops
;
1969 dev
->commac
.dev
= dev
;
1970 dev
->commac
.tx_chan_mask
= MAL_CHAN_MASK(emacdata
->mal_tx_chan
);
1971 dev
->commac
.rx_chan_mask
= MAL_CHAN_MASK(emacdata
->mal_rx_chan
);
1972 err
= mal_register_commac(dev
->mal
, &dev
->commac
);
1974 printk(KERN_ERR
"emac%d: failed to register with mal%d!\n",
1975 dev
->def
->index
, emacdata
->mal_idx
);
1978 dev
->rx_skb_size
= emac_rx_skb_size(ndev
->mtu
);
1979 dev
->rx_sync_size
= emac_rx_sync_size(ndev
->mtu
);
1981 /* Get pointers to BD rings */
1983 dev
->mal
->bd_virt
+ mal_tx_bd_offset(dev
->mal
,
1984 emacdata
->mal_tx_chan
);
1986 dev
->mal
->bd_virt
+ mal_rx_bd_offset(dev
->mal
,
1987 emacdata
->mal_rx_chan
);
1989 DBG("%d: tx_desc %p" NL
, ocpdev
->def
->index
, dev
->tx_desc
);
1990 DBG("%d: rx_desc %p" NL
, ocpdev
->def
->index
, dev
->rx_desc
);
1993 memset(dev
->tx_desc
, 0, NUM_TX_BUFF
* sizeof(struct mal_descriptor
));
1994 memset(dev
->rx_desc
, 0, NUM_RX_BUFF
* sizeof(struct mal_descriptor
));
1996 /* If we depend on another EMAC for MDIO, check whether it was probed already */
1997 if (emacdata
->mdio_idx
>= 0 && emacdata
->mdio_idx
!= ocpdev
->def
->index
) {
1998 struct ocp_device
*mdiodev
=
1999 ocp_find_device(OCP_VENDOR_IBM
, OCP_FUNC_EMAC
,
2000 emacdata
->mdio_idx
);
2002 printk(KERN_ERR
"emac%d: unknown emac%d device!\n",
2003 dev
->def
->index
, emacdata
->mdio_idx
);
2007 dev
->mdio_dev
= ocp_get_drvdata(mdiodev
);
2008 if (!dev
->mdio_dev
) {
2010 "emac%d: emac%d hasn't been initialized yet!\n",
2011 dev
->def
->index
, emacdata
->mdio_idx
);
2017 /* Attach to ZMII, if needed */
2018 if ((err
= zmii_attach(dev
)) != 0)
2021 /* Attach to RGMII, if needed */
2022 if ((err
= rgmii_attach(dev
)) != 0)
2025 /* Attach to TAH, if needed */
2026 if ((err
= tah_attach(dev
)) != 0)
2031 (struct emac_regs
*)ioremap(dev
->def
->paddr
,
2032 sizeof(struct emac_regs
));
2034 printk(KERN_ERR
"emac%d: could not ioremap device registers!\n",
2040 /* Fill in MAC address */
2041 for (i
= 0; i
< 6; ++i
)
2042 ndev
->dev_addr
[i
] = emacdata
->mac_addr
[i
];
2044 /* Set some link defaults before we can find out real parameters */
2045 dev
->phy
.speed
= SPEED_100
;
2046 dev
->phy
.duplex
= DUPLEX_FULL
;
2047 dev
->phy
.autoneg
= AUTONEG_DISABLE
;
2048 dev
->phy
.pause
= dev
->phy
.asym_pause
= 0;
2049 init_timer(&dev
->link_timer
);
2050 dev
->link_timer
.function
= emac_link_timer
;
2051 dev
->link_timer
.data
= (unsigned long)dev
;
2053 /* Find PHY if any */
2054 dev
->phy
.dev
= ndev
;
2055 dev
->phy
.mode
= emacdata
->phy_mode
;
2056 if (emacdata
->phy_map
!= 0xffffffff) {
2057 u32 phy_map
= emacdata
->phy_map
| busy_phy_map
;
2060 DBG("%d: PHY maps %08x %08x" NL
, dev
->def
->index
,
2061 emacdata
->phy_map
, busy_phy_map
);
2063 EMAC_RX_CLK_TX(dev
->def
->index
);
2065 dev
->phy
.mdio_read
= emac_mdio_read
;
2066 dev
->phy
.mdio_write
= emac_mdio_write
;
2068 /* Configure EMAC with defaults so we can at least use MDIO
2069 * This is needed mostly for 440GX
2071 if (emac_phy_gpcs(dev
->phy
.mode
)) {
2073 * Make GPCS PHY address equal to EMAC index.
2074 * We probably should take into account busy_phy_map
2075 * and/or phy_map here.
2077 dev
->phy
.address
= dev
->def
->index
;
2080 emac_configure(dev
);
2082 for (i
= 0; i
< 0x20; phy_map
>>= 1, ++i
)
2083 if (!(phy_map
& 1)) {
2085 busy_phy_map
|= 1 << i
;
2087 /* Quick check if there is a PHY at the address */
2088 r
= emac_mdio_read(dev
->ndev
, i
, MII_BMCR
);
2089 if (r
== 0xffff || r
< 0)
2091 if (!mii_phy_probe(&dev
->phy
, i
))
2095 printk(KERN_WARNING
"emac%d: can't find PHY!\n",
2101 if (dev
->phy
.def
->ops
->init
)
2102 dev
->phy
.def
->ops
->init(&dev
->phy
);
2104 /* Disable any PHY features not supported by the platform */
2105 dev
->phy
.def
->features
&= ~emacdata
->phy_feat_exc
;
2107 /* Setup initial link parameters */
2108 if (dev
->phy
.features
& SUPPORTED_Autoneg
) {
2109 adv
= dev
->phy
.features
;
2110 #if !defined(CONFIG_40x)
2111 adv
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
2113 /* Restart autonegotiation */
2114 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, adv
);
2116 u32 f
= dev
->phy
.def
->features
;
2117 int speed
= SPEED_10
, fd
= DUPLEX_HALF
;
2119 /* Select highest supported speed/duplex */
2120 if (f
& SUPPORTED_1000baseT_Full
) {
2123 } else if (f
& SUPPORTED_1000baseT_Half
)
2125 else if (f
& SUPPORTED_100baseT_Full
) {
2128 } else if (f
& SUPPORTED_100baseT_Half
)
2130 else if (f
& SUPPORTED_10baseT_Full
)
2133 /* Force link parameters */
2134 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, speed
, fd
);
2139 /* PHY-less configuration.
2140 * XXX I probably should move these settings to emacdata
2142 dev
->phy
.address
= -1;
2143 dev
->phy
.features
= SUPPORTED_100baseT_Full
| SUPPORTED_MII
;
2147 /* Fill in the driver function table */
2148 ndev
->open
= &emac_open
;
2150 ndev
->hard_start_xmit
= &emac_start_xmit_sg
;
2151 ndev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
2153 ndev
->hard_start_xmit
= &emac_start_xmit
;
2154 ndev
->tx_timeout
= &emac_full_tx_reset
;
2155 ndev
->watchdog_timeo
= 5 * HZ
;
2156 ndev
->stop
= &emac_close
;
2157 ndev
->get_stats
= &emac_stats
;
2158 ndev
->set_multicast_list
= &emac_set_multicast_list
;
2159 ndev
->do_ioctl
= &emac_ioctl
;
2160 if (emac_phy_supports_gige(emacdata
->phy_mode
)) {
2161 ndev
->change_mtu
= &emac_change_mtu
;
2162 dev
->commac
.ops
= &emac_commac_sg_ops
;
2164 SET_ETHTOOL_OPS(ndev
, &emac_ethtool_ops
);
2166 netif_carrier_off(ndev
);
2167 netif_stop_queue(ndev
);
2169 err
= register_netdev(ndev
);
2171 printk(KERN_ERR
"emac%d: failed to register net device (%d)!\n",
2172 dev
->def
->index
, err
);
2176 ocp_set_drvdata(ocpdev
, dev
);
2178 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2179 ndev
->name
, dev
->def
->index
,
2180 ndev
->dev_addr
[0], ndev
->dev_addr
[1], ndev
->dev_addr
[2],
2181 ndev
->dev_addr
[3], ndev
->dev_addr
[4], ndev
->dev_addr
[5]);
2183 if (dev
->phy
.address
>= 0)
2184 printk("%s: found %s PHY (0x%02x)\n", ndev
->name
,
2185 dev
->phy
.def
->name
, dev
->phy
.address
);
2187 emac_dbg_register(dev
->def
->index
, dev
);
2191 iounmap((void *)dev
->emacp
);
2193 tah_fini(dev
->tah_dev
);
2195 rgmii_fini(dev
->rgmii_dev
, dev
->rgmii_input
);
2197 zmii_fini(dev
->zmii_dev
, dev
->zmii_input
);
2199 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2205 static struct ocp_device_id emac_ids
[] = {
2206 { .vendor
= OCP_VENDOR_IBM
, .function
= OCP_FUNC_EMAC
},
2207 { .vendor
= OCP_VENDOR_INVALID
}
2210 static struct ocp_driver emac_driver
= {
2212 .id_table
= emac_ids
,
2213 .probe
= emac_probe
,
2214 .remove
= emac_remove
,
2217 static int __init
emac_init(void)
2219 printk(KERN_INFO DRV_DESC
", version " DRV_VERSION
"\n");
2227 if (ocp_register_driver(&emac_driver
)) {
2229 ocp_unregister_driver(&emac_driver
);
2239 static void __exit
emac_exit(void)
2242 ocp_unregister_driver(&emac_driver
);
2247 module_init(emac_init
);
2248 module_exit(emac_exit
);