1 /* drivers/net/ethernet/freescale/gianfar.c
3 * Gianfar Ethernet Driver
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c
9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13 * Copyright 2007 MontaVista Software, Inc.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Gianfar: AKA Lambda Draconis, "Dragon"
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
35 * last descriptor of the ring.
37 * When a packet is received, the RXF bit in the
38 * IEVENT register is set, triggering an interrupt when the
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
42 * of frames or amount of time have passed). In NAPI, the
43 * interrupt handler will signal there is work to be done, and
44 * exit. This method will start at the last known empty
45 * descriptor, and process every subsequent descriptor until there
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/delay.h>
74 #include <linux/netdevice.h>
75 #include <linux/etherdevice.h>
76 #include <linux/skbuff.h>
77 #include <linux/if_vlan.h>
78 #include <linux/spinlock.h>
80 #include <linux/of_address.h>
81 #include <linux/of_irq.h>
82 #include <linux/of_mdio.h>
83 #include <linux/of_platform.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
88 #include <linux/net_tstamp.h>
92 #include <asm/mpc85xx.h>
94 #include <asm/uaccess.h>
95 #include <linux/module.h>
96 #include <linux/dma-mapping.h>
97 #include <linux/crc32.h>
98 #include <linux/mii.h>
99 #include <linux/phy.h>
100 #include <linux/phy_fixed.h>
101 #include <linux/of.h>
102 #include <linux/of_net.h>
106 #define TX_TIMEOUT (1*HZ)
108 const char gfar_driver_version
[] = "1.3";
110 static int gfar_enet_open(struct net_device
*dev
);
111 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
112 static void gfar_reset_task(struct work_struct
*work
);
113 static void gfar_timeout(struct net_device
*dev
);
114 static int gfar_close(struct net_device
*dev
);
115 struct sk_buff
*gfar_new_skb(struct net_device
*dev
);
116 static void gfar_new_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
117 struct sk_buff
*skb
);
118 static int gfar_set_mac_address(struct net_device
*dev
);
119 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
);
120 static irqreturn_t
gfar_error(int irq
, void *dev_id
);
121 static irqreturn_t
gfar_transmit(int irq
, void *dev_id
);
122 static irqreturn_t
gfar_interrupt(int irq
, void *dev_id
);
123 static void adjust_link(struct net_device
*dev
);
124 static int init_phy(struct net_device
*dev
);
125 static int gfar_probe(struct platform_device
*ofdev
);
126 static int gfar_remove(struct platform_device
*ofdev
);
127 static void free_skb_resources(struct gfar_private
*priv
);
128 static void gfar_set_multi(struct net_device
*dev
);
129 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
);
130 static void gfar_configure_serdes(struct net_device
*dev
);
131 static int gfar_poll_rx(struct napi_struct
*napi
, int budget
);
132 static int gfar_poll_tx(struct napi_struct
*napi
, int budget
);
133 static int gfar_poll_rx_sq(struct napi_struct
*napi
, int budget
);
134 static int gfar_poll_tx_sq(struct napi_struct
*napi
, int budget
);
135 #ifdef CONFIG_NET_POLL_CONTROLLER
136 static void gfar_netpoll(struct net_device
*dev
);
138 int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
, int rx_work_limit
);
139 static void gfar_clean_tx_ring(struct gfar_priv_tx_q
*tx_queue
);
140 static void gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
,
141 int amount_pull
, struct napi_struct
*napi
);
142 static void gfar_halt_nodisable(struct gfar_private
*priv
);
143 static void gfar_clear_exact_match(struct net_device
*dev
);
144 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
,
146 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
148 MODULE_AUTHOR("Freescale Semiconductor, Inc");
149 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
150 MODULE_LICENSE("GPL");
152 static void gfar_init_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
159 lstatus
= BD_LFLAG(RXBD_EMPTY
| RXBD_INTERRUPT
);
160 if (bdp
== rx_queue
->rx_bd_base
+ rx_queue
->rx_ring_size
- 1)
161 lstatus
|= BD_LFLAG(RXBD_WRAP
);
165 bdp
->lstatus
= lstatus
;
168 static int gfar_init_bds(struct net_device
*ndev
)
170 struct gfar_private
*priv
= netdev_priv(ndev
);
171 struct gfar_priv_tx_q
*tx_queue
= NULL
;
172 struct gfar_priv_rx_q
*rx_queue
= NULL
;
177 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
178 tx_queue
= priv
->tx_queue
[i
];
179 /* Initialize some variables in our dev structure */
180 tx_queue
->num_txbdfree
= tx_queue
->tx_ring_size
;
181 tx_queue
->dirty_tx
= tx_queue
->tx_bd_base
;
182 tx_queue
->cur_tx
= tx_queue
->tx_bd_base
;
183 tx_queue
->skb_curtx
= 0;
184 tx_queue
->skb_dirtytx
= 0;
186 /* Initialize Transmit Descriptor Ring */
187 txbdp
= tx_queue
->tx_bd_base
;
188 for (j
= 0; j
< tx_queue
->tx_ring_size
; j
++) {
194 /* Set the last descriptor in the ring to indicate wrap */
196 txbdp
->status
|= TXBD_WRAP
;
199 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
200 rx_queue
= priv
->rx_queue
[i
];
201 rx_queue
->cur_rx
= rx_queue
->rx_bd_base
;
202 rx_queue
->skb_currx
= 0;
203 rxbdp
= rx_queue
->rx_bd_base
;
205 for (j
= 0; j
< rx_queue
->rx_ring_size
; j
++) {
206 struct sk_buff
*skb
= rx_queue
->rx_skbuff
[j
];
209 gfar_init_rxbdp(rx_queue
, rxbdp
,
212 skb
= gfar_new_skb(ndev
);
214 netdev_err(ndev
, "Can't allocate RX buffers\n");
217 rx_queue
->rx_skbuff
[j
] = skb
;
219 gfar_new_rxbdp(rx_queue
, rxbdp
, skb
);
230 static int gfar_alloc_skb_resources(struct net_device
*ndev
)
235 struct gfar_private
*priv
= netdev_priv(ndev
);
236 struct device
*dev
= priv
->dev
;
237 struct gfar_priv_tx_q
*tx_queue
= NULL
;
238 struct gfar_priv_rx_q
*rx_queue
= NULL
;
240 priv
->total_tx_ring_size
= 0;
241 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
242 priv
->total_tx_ring_size
+= priv
->tx_queue
[i
]->tx_ring_size
;
244 priv
->total_rx_ring_size
= 0;
245 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
246 priv
->total_rx_ring_size
+= priv
->rx_queue
[i
]->rx_ring_size
;
248 /* Allocate memory for the buffer descriptors */
249 vaddr
= dma_alloc_coherent(dev
,
250 (priv
->total_tx_ring_size
*
251 sizeof(struct txbd8
)) +
252 (priv
->total_rx_ring_size
*
253 sizeof(struct rxbd8
)),
258 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
259 tx_queue
= priv
->tx_queue
[i
];
260 tx_queue
->tx_bd_base
= vaddr
;
261 tx_queue
->tx_bd_dma_base
= addr
;
262 tx_queue
->dev
= ndev
;
263 /* enet DMA only understands physical addresses */
264 addr
+= sizeof(struct txbd8
) * tx_queue
->tx_ring_size
;
265 vaddr
+= sizeof(struct txbd8
) * tx_queue
->tx_ring_size
;
268 /* Start the rx descriptor ring where the tx ring leaves off */
269 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
270 rx_queue
= priv
->rx_queue
[i
];
271 rx_queue
->rx_bd_base
= vaddr
;
272 rx_queue
->rx_bd_dma_base
= addr
;
273 rx_queue
->dev
= ndev
;
274 addr
+= sizeof(struct rxbd8
) * rx_queue
->rx_ring_size
;
275 vaddr
+= sizeof(struct rxbd8
) * rx_queue
->rx_ring_size
;
278 /* Setup the skbuff rings */
279 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
280 tx_queue
= priv
->tx_queue
[i
];
281 tx_queue
->tx_skbuff
=
282 kmalloc_array(tx_queue
->tx_ring_size
,
283 sizeof(*tx_queue
->tx_skbuff
),
285 if (!tx_queue
->tx_skbuff
)
288 for (k
= 0; k
< tx_queue
->tx_ring_size
; k
++)
289 tx_queue
->tx_skbuff
[k
] = NULL
;
292 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
293 rx_queue
= priv
->rx_queue
[i
];
294 rx_queue
->rx_skbuff
=
295 kmalloc_array(rx_queue
->rx_ring_size
,
296 sizeof(*rx_queue
->rx_skbuff
),
298 if (!rx_queue
->rx_skbuff
)
301 for (j
= 0; j
< rx_queue
->rx_ring_size
; j
++)
302 rx_queue
->rx_skbuff
[j
] = NULL
;
305 if (gfar_init_bds(ndev
))
311 free_skb_resources(priv
);
315 static void gfar_init_tx_rx_base(struct gfar_private
*priv
)
317 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
321 baddr
= ®s
->tbase0
;
322 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
323 gfar_write(baddr
, priv
->tx_queue
[i
]->tx_bd_dma_base
);
327 baddr
= ®s
->rbase0
;
328 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
329 gfar_write(baddr
, priv
->rx_queue
[i
]->rx_bd_dma_base
);
334 static void gfar_rx_buff_size_config(struct gfar_private
*priv
)
336 int frame_size
= priv
->ndev
->mtu
+ ETH_HLEN
;
338 /* set this when rx hw offload (TOE) functions are being used */
339 priv
->uses_rxfcb
= 0;
341 if (priv
->ndev
->features
& (NETIF_F_RXCSUM
| NETIF_F_HW_VLAN_CTAG_RX
))
342 priv
->uses_rxfcb
= 1;
344 if (priv
->hwts_rx_en
)
345 priv
->uses_rxfcb
= 1;
347 if (priv
->uses_rxfcb
)
348 frame_size
+= GMAC_FCB_LEN
;
350 frame_size
+= priv
->padding
;
352 frame_size
= (frame_size
& ~(INCREMENTAL_BUFFER_SIZE
- 1)) +
353 INCREMENTAL_BUFFER_SIZE
;
355 priv
->rx_buffer_size
= frame_size
;
358 static void gfar_mac_rx_config(struct gfar_private
*priv
)
360 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
363 if (priv
->rx_filer_enable
) {
364 rctrl
|= RCTRL_FILREN
;
365 /* Program the RIR0 reg with the required distribution */
366 if (priv
->poll_mode
== GFAR_SQ_POLLING
)
367 gfar_write(®s
->rir0
, DEFAULT_2RXQ_RIR0
);
368 else /* GFAR_MQ_POLLING */
369 gfar_write(®s
->rir0
, DEFAULT_8RXQ_RIR0
);
372 /* Restore PROMISC mode */
373 if (priv
->ndev
->flags
& IFF_PROMISC
)
376 if (priv
->ndev
->features
& NETIF_F_RXCSUM
)
377 rctrl
|= RCTRL_CHECKSUMMING
;
379 if (priv
->extended_hash
)
380 rctrl
|= RCTRL_EXTHASH
| RCTRL_EMEN
;
383 rctrl
&= ~RCTRL_PAL_MASK
;
384 rctrl
|= RCTRL_PADDING(priv
->padding
);
387 /* Enable HW time stamping if requested from user space */
388 if (priv
->hwts_rx_en
)
389 rctrl
|= RCTRL_PRSDEP_INIT
| RCTRL_TS_ENABLE
;
391 if (priv
->ndev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)
392 rctrl
|= RCTRL_VLEX
| RCTRL_PRSDEP_INIT
;
394 /* Init rctrl based on our settings */
395 gfar_write(®s
->rctrl
, rctrl
);
398 static void gfar_mac_tx_config(struct gfar_private
*priv
)
400 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
403 if (priv
->ndev
->features
& NETIF_F_IP_CSUM
)
404 tctrl
|= TCTRL_INIT_CSUM
;
406 if (priv
->prio_sched_en
)
407 tctrl
|= TCTRL_TXSCHED_PRIO
;
409 tctrl
|= TCTRL_TXSCHED_WRRS
;
410 gfar_write(®s
->tr03wt
, DEFAULT_WRRS_WEIGHT
);
411 gfar_write(®s
->tr47wt
, DEFAULT_WRRS_WEIGHT
);
414 if (priv
->ndev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)
415 tctrl
|= TCTRL_VLINS
;
417 gfar_write(®s
->tctrl
, tctrl
);
420 static void gfar_configure_coalescing(struct gfar_private
*priv
,
421 unsigned long tx_mask
, unsigned long rx_mask
)
423 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
426 if (priv
->mode
== MQ_MG_MODE
) {
429 baddr
= ®s
->txic0
;
430 for_each_set_bit(i
, &tx_mask
, priv
->num_tx_queues
) {
431 gfar_write(baddr
+ i
, 0);
432 if (likely(priv
->tx_queue
[i
]->txcoalescing
))
433 gfar_write(baddr
+ i
, priv
->tx_queue
[i
]->txic
);
436 baddr
= ®s
->rxic0
;
437 for_each_set_bit(i
, &rx_mask
, priv
->num_rx_queues
) {
438 gfar_write(baddr
+ i
, 0);
439 if (likely(priv
->rx_queue
[i
]->rxcoalescing
))
440 gfar_write(baddr
+ i
, priv
->rx_queue
[i
]->rxic
);
443 /* Backward compatible case -- even if we enable
444 * multiple queues, there's only single reg to program
446 gfar_write(®s
->txic
, 0);
447 if (likely(priv
->tx_queue
[0]->txcoalescing
))
448 gfar_write(®s
->txic
, priv
->tx_queue
[0]->txic
);
450 gfar_write(®s
->rxic
, 0);
451 if (unlikely(priv
->rx_queue
[0]->rxcoalescing
))
452 gfar_write(®s
->rxic
, priv
->rx_queue
[0]->rxic
);
456 void gfar_configure_coalescing_all(struct gfar_private
*priv
)
458 gfar_configure_coalescing(priv
, 0xFF, 0xFF);
461 static struct net_device_stats
*gfar_get_stats(struct net_device
*dev
)
463 struct gfar_private
*priv
= netdev_priv(dev
);
464 unsigned long rx_packets
= 0, rx_bytes
= 0, rx_dropped
= 0;
465 unsigned long tx_packets
= 0, tx_bytes
= 0;
468 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
469 rx_packets
+= priv
->rx_queue
[i
]->stats
.rx_packets
;
470 rx_bytes
+= priv
->rx_queue
[i
]->stats
.rx_bytes
;
471 rx_dropped
+= priv
->rx_queue
[i
]->stats
.rx_dropped
;
474 dev
->stats
.rx_packets
= rx_packets
;
475 dev
->stats
.rx_bytes
= rx_bytes
;
476 dev
->stats
.rx_dropped
= rx_dropped
;
478 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
479 tx_bytes
+= priv
->tx_queue
[i
]->stats
.tx_bytes
;
480 tx_packets
+= priv
->tx_queue
[i
]->stats
.tx_packets
;
483 dev
->stats
.tx_bytes
= tx_bytes
;
484 dev
->stats
.tx_packets
= tx_packets
;
489 static const struct net_device_ops gfar_netdev_ops
= {
490 .ndo_open
= gfar_enet_open
,
491 .ndo_start_xmit
= gfar_start_xmit
,
492 .ndo_stop
= gfar_close
,
493 .ndo_change_mtu
= gfar_change_mtu
,
494 .ndo_set_features
= gfar_set_features
,
495 .ndo_set_rx_mode
= gfar_set_multi
,
496 .ndo_tx_timeout
= gfar_timeout
,
497 .ndo_do_ioctl
= gfar_ioctl
,
498 .ndo_get_stats
= gfar_get_stats
,
499 .ndo_set_mac_address
= eth_mac_addr
,
500 .ndo_validate_addr
= eth_validate_addr
,
501 #ifdef CONFIG_NET_POLL_CONTROLLER
502 .ndo_poll_controller
= gfar_netpoll
,
506 static void gfar_ints_disable(struct gfar_private
*priv
)
509 for (i
= 0; i
< priv
->num_grps
; i
++) {
510 struct gfar __iomem
*regs
= priv
->gfargrp
[i
].regs
;
512 gfar_write(®s
->ievent
, IEVENT_INIT_CLEAR
);
514 /* Initialize IMASK */
515 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
519 static void gfar_ints_enable(struct gfar_private
*priv
)
522 for (i
= 0; i
< priv
->num_grps
; i
++) {
523 struct gfar __iomem
*regs
= priv
->gfargrp
[i
].regs
;
524 /* Unmask the interrupts we look for */
525 gfar_write(®s
->imask
, IMASK_DEFAULT
);
529 void lock_tx_qs(struct gfar_private
*priv
)
533 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
534 spin_lock(&priv
->tx_queue
[i
]->txlock
);
537 void unlock_tx_qs(struct gfar_private
*priv
)
541 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
542 spin_unlock(&priv
->tx_queue
[i
]->txlock
);
545 static int gfar_alloc_tx_queues(struct gfar_private
*priv
)
549 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
550 priv
->tx_queue
[i
] = kzalloc(sizeof(struct gfar_priv_tx_q
),
552 if (!priv
->tx_queue
[i
])
555 priv
->tx_queue
[i
]->tx_skbuff
= NULL
;
556 priv
->tx_queue
[i
]->qindex
= i
;
557 priv
->tx_queue
[i
]->dev
= priv
->ndev
;
558 spin_lock_init(&(priv
->tx_queue
[i
]->txlock
));
563 static int gfar_alloc_rx_queues(struct gfar_private
*priv
)
567 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
568 priv
->rx_queue
[i
] = kzalloc(sizeof(struct gfar_priv_rx_q
),
570 if (!priv
->rx_queue
[i
])
573 priv
->rx_queue
[i
]->rx_skbuff
= NULL
;
574 priv
->rx_queue
[i
]->qindex
= i
;
575 priv
->rx_queue
[i
]->dev
= priv
->ndev
;
580 static void gfar_free_tx_queues(struct gfar_private
*priv
)
584 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
585 kfree(priv
->tx_queue
[i
]);
588 static void gfar_free_rx_queues(struct gfar_private
*priv
)
592 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
593 kfree(priv
->rx_queue
[i
]);
596 static void unmap_group_regs(struct gfar_private
*priv
)
600 for (i
= 0; i
< MAXGROUPS
; i
++)
601 if (priv
->gfargrp
[i
].regs
)
602 iounmap(priv
->gfargrp
[i
].regs
);
605 static void free_gfar_dev(struct gfar_private
*priv
)
609 for (i
= 0; i
< priv
->num_grps
; i
++)
610 for (j
= 0; j
< GFAR_NUM_IRQS
; j
++) {
611 kfree(priv
->gfargrp
[i
].irqinfo
[j
]);
612 priv
->gfargrp
[i
].irqinfo
[j
] = NULL
;
615 free_netdev(priv
->ndev
);
618 static void disable_napi(struct gfar_private
*priv
)
622 for (i
= 0; i
< priv
->num_grps
; i
++) {
623 napi_disable(&priv
->gfargrp
[i
].napi_rx
);
624 napi_disable(&priv
->gfargrp
[i
].napi_tx
);
628 static void enable_napi(struct gfar_private
*priv
)
632 for (i
= 0; i
< priv
->num_grps
; i
++) {
633 napi_enable(&priv
->gfargrp
[i
].napi_rx
);
634 napi_enable(&priv
->gfargrp
[i
].napi_tx
);
638 static int gfar_parse_group(struct device_node
*np
,
639 struct gfar_private
*priv
, const char *model
)
641 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[priv
->num_grps
];
644 for (i
= 0; i
< GFAR_NUM_IRQS
; i
++) {
645 grp
->irqinfo
[i
] = kzalloc(sizeof(struct gfar_irqinfo
),
647 if (!grp
->irqinfo
[i
])
651 grp
->regs
= of_iomap(np
, 0);
655 gfar_irq(grp
, TX
)->irq
= irq_of_parse_and_map(np
, 0);
657 /* If we aren't the FEC we have multiple interrupts */
658 if (model
&& strcasecmp(model
, "FEC")) {
659 gfar_irq(grp
, RX
)->irq
= irq_of_parse_and_map(np
, 1);
660 gfar_irq(grp
, ER
)->irq
= irq_of_parse_and_map(np
, 2);
661 if (gfar_irq(grp
, TX
)->irq
== NO_IRQ
||
662 gfar_irq(grp
, RX
)->irq
== NO_IRQ
||
663 gfar_irq(grp
, ER
)->irq
== NO_IRQ
)
668 spin_lock_init(&grp
->grplock
);
669 if (priv
->mode
== MQ_MG_MODE
) {
670 u32
*rxq_mask
, *txq_mask
;
671 rxq_mask
= (u32
*)of_get_property(np
, "fsl,rx-bit-map", NULL
);
672 txq_mask
= (u32
*)of_get_property(np
, "fsl,tx-bit-map", NULL
);
674 if (priv
->poll_mode
== GFAR_SQ_POLLING
) {
675 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
676 grp
->rx_bit_map
= (DEFAULT_MAPPING
>> priv
->num_grps
);
677 grp
->tx_bit_map
= (DEFAULT_MAPPING
>> priv
->num_grps
);
678 } else { /* GFAR_MQ_POLLING */
679 grp
->rx_bit_map
= rxq_mask
?
680 *rxq_mask
: (DEFAULT_MAPPING
>> priv
->num_grps
);
681 grp
->tx_bit_map
= txq_mask
?
682 *txq_mask
: (DEFAULT_MAPPING
>> priv
->num_grps
);
685 grp
->rx_bit_map
= 0xFF;
686 grp
->tx_bit_map
= 0xFF;
689 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
690 * right to left, so we need to revert the 8 bits to get the q index
692 grp
->rx_bit_map
= bitrev8(grp
->rx_bit_map
);
693 grp
->tx_bit_map
= bitrev8(grp
->tx_bit_map
);
695 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
696 * also assign queues to groups
698 for_each_set_bit(i
, &grp
->rx_bit_map
, priv
->num_rx_queues
) {
700 grp
->rx_queue
= priv
->rx_queue
[i
];
701 grp
->num_rx_queues
++;
702 grp
->rstat
|= (RSTAT_CLEAR_RHALT
>> i
);
703 priv
->rqueue
|= ((RQUEUE_EN0
| RQUEUE_EX0
) >> i
);
704 priv
->rx_queue
[i
]->grp
= grp
;
707 for_each_set_bit(i
, &grp
->tx_bit_map
, priv
->num_tx_queues
) {
709 grp
->tx_queue
= priv
->tx_queue
[i
];
710 grp
->num_tx_queues
++;
711 grp
->tstat
|= (TSTAT_CLEAR_THALT
>> i
);
712 priv
->tqueue
|= (TQUEUE_EN0
>> i
);
713 priv
->tx_queue
[i
]->grp
= grp
;
721 static int gfar_of_init(struct platform_device
*ofdev
, struct net_device
**pdev
)
725 const void *mac_addr
;
727 struct net_device
*dev
= NULL
;
728 struct gfar_private
*priv
= NULL
;
729 struct device_node
*np
= ofdev
->dev
.of_node
;
730 struct device_node
*child
= NULL
;
732 const u32
*stash_len
;
733 const u32
*stash_idx
;
734 unsigned int num_tx_qs
, num_rx_qs
;
735 u32
*tx_queues
, *rx_queues
;
736 unsigned short mode
, poll_mode
;
738 if (!np
|| !of_device_is_available(np
))
741 if (of_device_is_compatible(np
, "fsl,etsec2")) {
743 poll_mode
= GFAR_SQ_POLLING
;
746 poll_mode
= GFAR_SQ_POLLING
;
749 /* parse the num of HW tx and rx queues */
750 tx_queues
= (u32
*)of_get_property(np
, "fsl,num_tx_queues", NULL
);
751 rx_queues
= (u32
*)of_get_property(np
, "fsl,num_rx_queues", NULL
);
753 if (mode
== SQ_SG_MODE
) {
756 } else { /* MQ_MG_MODE */
757 /* get the actual number of supported groups */
758 unsigned int num_grps
= of_get_available_child_count(np
);
760 if (num_grps
== 0 || num_grps
> MAXGROUPS
) {
761 dev_err(&ofdev
->dev
, "Invalid # of int groups(%d)\n",
763 pr_err("Cannot do alloc_etherdev, aborting\n");
767 if (poll_mode
== GFAR_SQ_POLLING
) {
768 num_tx_qs
= num_grps
; /* one txq per int group */
769 num_rx_qs
= num_grps
; /* one rxq per int group */
770 } else { /* GFAR_MQ_POLLING */
771 num_tx_qs
= tx_queues
? *tx_queues
: 1;
772 num_rx_qs
= rx_queues
? *rx_queues
: 1;
776 if (num_tx_qs
> MAX_TX_QS
) {
777 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
778 num_tx_qs
, MAX_TX_QS
);
779 pr_err("Cannot do alloc_etherdev, aborting\n");
783 if (num_rx_qs
> MAX_RX_QS
) {
784 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
785 num_rx_qs
, MAX_RX_QS
);
786 pr_err("Cannot do alloc_etherdev, aborting\n");
790 *pdev
= alloc_etherdev_mq(sizeof(*priv
), num_tx_qs
);
795 priv
= netdev_priv(dev
);
799 priv
->poll_mode
= poll_mode
;
801 priv
->num_tx_queues
= num_tx_qs
;
802 netif_set_real_num_rx_queues(dev
, num_rx_qs
);
803 priv
->num_rx_queues
= num_rx_qs
;
805 err
= gfar_alloc_tx_queues(priv
);
807 goto tx_alloc_failed
;
809 err
= gfar_alloc_rx_queues(priv
);
811 goto rx_alloc_failed
;
813 /* Init Rx queue filer rule set linked list */
814 INIT_LIST_HEAD(&priv
->rx_list
.list
);
815 priv
->rx_list
.count
= 0;
816 mutex_init(&priv
->rx_queue_access
);
818 model
= of_get_property(np
, "model", NULL
);
820 for (i
= 0; i
< MAXGROUPS
; i
++)
821 priv
->gfargrp
[i
].regs
= NULL
;
823 /* Parse and initialize group specific information */
824 if (priv
->mode
== MQ_MG_MODE
) {
825 for_each_child_of_node(np
, child
) {
826 err
= gfar_parse_group(child
, priv
, model
);
830 } else { /* SQ_SG_MODE */
831 err
= gfar_parse_group(np
, priv
, model
);
836 stash
= of_get_property(np
, "bd-stash", NULL
);
839 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BD_STASHING
;
840 priv
->bd_stash_en
= 1;
843 stash_len
= of_get_property(np
, "rx-stash-len", NULL
);
846 priv
->rx_stash_size
= *stash_len
;
848 stash_idx
= of_get_property(np
, "rx-stash-idx", NULL
);
851 priv
->rx_stash_index
= *stash_idx
;
853 if (stash_len
|| stash_idx
)
854 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BUF_STASHING
;
856 mac_addr
= of_get_mac_address(np
);
859 memcpy(dev
->dev_addr
, mac_addr
, ETH_ALEN
);
861 if (model
&& !strcasecmp(model
, "TSEC"))
862 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_GIGABIT
|
863 FSL_GIANFAR_DEV_HAS_COALESCE
|
864 FSL_GIANFAR_DEV_HAS_RMON
|
865 FSL_GIANFAR_DEV_HAS_MULTI_INTR
;
867 if (model
&& !strcasecmp(model
, "eTSEC"))
868 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_GIGABIT
|
869 FSL_GIANFAR_DEV_HAS_COALESCE
|
870 FSL_GIANFAR_DEV_HAS_RMON
|
871 FSL_GIANFAR_DEV_HAS_MULTI_INTR
|
872 FSL_GIANFAR_DEV_HAS_CSUM
|
873 FSL_GIANFAR_DEV_HAS_VLAN
|
874 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
|
875 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
|
876 FSL_GIANFAR_DEV_HAS_TIMER
;
878 ctype
= of_get_property(np
, "phy-connection-type", NULL
);
880 /* We only care about rgmii-id. The rest are autodetected */
881 if (ctype
&& !strcmp(ctype
, "rgmii-id"))
882 priv
->interface
= PHY_INTERFACE_MODE_RGMII_ID
;
884 priv
->interface
= PHY_INTERFACE_MODE_MII
;
886 if (of_get_property(np
, "fsl,magic-packet", NULL
))
887 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
;
889 priv
->phy_node
= of_parse_phandle(np
, "phy-handle", 0);
891 /* Find the TBI PHY. If it's not there, we don't support SGMII */
892 priv
->tbi_node
= of_parse_phandle(np
, "tbi-handle", 0);
897 unmap_group_regs(priv
);
899 gfar_free_rx_queues(priv
);
901 gfar_free_tx_queues(priv
);
906 static int gfar_hwtstamp_set(struct net_device
*netdev
, struct ifreq
*ifr
)
908 struct hwtstamp_config config
;
909 struct gfar_private
*priv
= netdev_priv(netdev
);
911 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
914 /* reserved for future extensions */
918 switch (config
.tx_type
) {
919 case HWTSTAMP_TX_OFF
:
920 priv
->hwts_tx_en
= 0;
923 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
))
925 priv
->hwts_tx_en
= 1;
931 switch (config
.rx_filter
) {
932 case HWTSTAMP_FILTER_NONE
:
933 if (priv
->hwts_rx_en
) {
934 priv
->hwts_rx_en
= 0;
939 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
))
941 if (!priv
->hwts_rx_en
) {
942 priv
->hwts_rx_en
= 1;
945 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
949 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
953 static int gfar_hwtstamp_get(struct net_device
*netdev
, struct ifreq
*ifr
)
955 struct hwtstamp_config config
;
956 struct gfar_private
*priv
= netdev_priv(netdev
);
959 config
.tx_type
= priv
->hwts_tx_en
? HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
;
960 config
.rx_filter
= (priv
->hwts_rx_en
?
961 HWTSTAMP_FILTER_ALL
: HWTSTAMP_FILTER_NONE
);
963 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
967 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
969 struct gfar_private
*priv
= netdev_priv(dev
);
971 if (!netif_running(dev
))
974 if (cmd
== SIOCSHWTSTAMP
)
975 return gfar_hwtstamp_set(dev
, rq
);
976 if (cmd
== SIOCGHWTSTAMP
)
977 return gfar_hwtstamp_get(dev
, rq
);
982 return phy_mii_ioctl(priv
->phydev
, rq
, cmd
);
985 static u32
cluster_entry_per_class(struct gfar_private
*priv
, u32 rqfar
,
988 u32 rqfpr
= FPR_FILER_MASK
;
992 rqfcr
= RQFCR_CLE
| RQFCR_PID_MASK
| RQFCR_CMP_EXACT
;
993 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
994 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
995 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
998 rqfcr
= RQFCR_CMP_NOMATCH
;
999 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1000 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1001 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1004 rqfcr
= RQFCR_CMP_EXACT
| RQFCR_PID_PARSE
| RQFCR_CLE
| RQFCR_AND
;
1006 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1007 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1008 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1011 rqfcr
= RQFCR_CMP_EXACT
| RQFCR_PID_MASK
| RQFCR_AND
;
1013 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1014 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1015 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1020 static void gfar_init_filer_table(struct gfar_private
*priv
)
1023 u32 rqfar
= MAX_FILER_IDX
;
1025 u32 rqfpr
= FPR_FILER_MASK
;
1028 rqfcr
= RQFCR_CMP_MATCH
;
1029 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1030 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1031 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1033 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
);
1034 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
| RQFPR_UDP
);
1035 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
| RQFPR_TCP
);
1036 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
);
1037 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
| RQFPR_UDP
);
1038 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
| RQFPR_TCP
);
1040 /* cur_filer_idx indicated the first non-masked rule */
1041 priv
->cur_filer_idx
= rqfar
;
1043 /* Rest are masked rules */
1044 rqfcr
= RQFCR_CMP_NOMATCH
;
1045 for (i
= 0; i
< rqfar
; i
++) {
1046 priv
->ftp_rqfcr
[i
] = rqfcr
;
1047 priv
->ftp_rqfpr
[i
] = rqfpr
;
1048 gfar_write_filer(priv
, i
, rqfcr
, rqfpr
);
1052 static void __gfar_detect_errata_83xx(struct gfar_private
*priv
)
1054 unsigned int pvr
= mfspr(SPRN_PVR
);
1055 unsigned int svr
= mfspr(SPRN_SVR
);
1056 unsigned int mod
= (svr
>> 16) & 0xfff6; /* w/o E suffix */
1057 unsigned int rev
= svr
& 0xffff;
1059 /* MPC8313 Rev 2.0 and higher; All MPC837x */
1060 if ((pvr
== 0x80850010 && mod
== 0x80b0 && rev
>= 0x0020) ||
1061 (pvr
== 0x80861010 && (mod
& 0xfff9) == 0x80c0))
1062 priv
->errata
|= GFAR_ERRATA_74
;
1064 /* MPC8313 and MPC837x all rev */
1065 if ((pvr
== 0x80850010 && mod
== 0x80b0) ||
1066 (pvr
== 0x80861010 && (mod
& 0xfff9) == 0x80c0))
1067 priv
->errata
|= GFAR_ERRATA_76
;
1069 /* MPC8313 Rev < 2.0 */
1070 if (pvr
== 0x80850010 && mod
== 0x80b0 && rev
< 0x0020)
1071 priv
->errata
|= GFAR_ERRATA_12
;
1074 static void __gfar_detect_errata_85xx(struct gfar_private
*priv
)
1076 unsigned int svr
= mfspr(SPRN_SVR
);
1078 if ((SVR_SOC_VER(svr
) == SVR_8548
) && (SVR_REV(svr
) == 0x20))
1079 priv
->errata
|= GFAR_ERRATA_12
;
1080 if (((SVR_SOC_VER(svr
) == SVR_P2020
) && (SVR_REV(svr
) < 0x20)) ||
1081 ((SVR_SOC_VER(svr
) == SVR_P2010
) && (SVR_REV(svr
) < 0x20)))
1082 priv
->errata
|= GFAR_ERRATA_76
; /* aka eTSEC 20 */
1085 static void gfar_detect_errata(struct gfar_private
*priv
)
1087 struct device
*dev
= &priv
->ofdev
->dev
;
1089 /* no plans to fix */
1090 priv
->errata
|= GFAR_ERRATA_A002
;
1092 if (pvr_version_is(PVR_VER_E500V1
) || pvr_version_is(PVR_VER_E500V2
))
1093 __gfar_detect_errata_85xx(priv
);
1094 else /* non-mpc85xx parts, i.e. e300 core based */
1095 __gfar_detect_errata_83xx(priv
);
1098 dev_info(dev
, "enabled errata workarounds, flags: 0x%x\n",
1102 void gfar_mac_reset(struct gfar_private
*priv
)
1104 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1107 /* Reset MAC layer */
1108 gfar_write(®s
->maccfg1
, MACCFG1_SOFT_RESET
);
1110 /* We need to delay at least 3 TX clocks */
1113 /* the soft reset bit is not self-resetting, so we need to
1114 * clear it before resuming normal operation
1116 gfar_write(®s
->maccfg1
, 0);
1120 /* Compute rx_buff_size based on config flags */
1121 gfar_rx_buff_size_config(priv
);
1123 /* Initialize the max receive frame/buffer lengths */
1124 gfar_write(®s
->maxfrm
, priv
->rx_buffer_size
);
1125 gfar_write(®s
->mrblr
, priv
->rx_buffer_size
);
1127 /* Initialize the Minimum Frame Length Register */
1128 gfar_write(®s
->minflr
, MINFLR_INIT_SETTINGS
);
1130 /* Initialize MACCFG2. */
1131 tempval
= MACCFG2_INIT_SETTINGS
;
1133 /* If the mtu is larger than the max size for standard
1134 * ethernet frames (ie, a jumbo frame), then set maccfg2
1135 * to allow huge frames, and to check the length
1137 if (priv
->rx_buffer_size
> DEFAULT_RX_BUFFER_SIZE
||
1138 gfar_has_errata(priv
, GFAR_ERRATA_74
))
1139 tempval
|= MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
;
1141 gfar_write(®s
->maccfg2
, tempval
);
1143 /* Clear mac addr hash registers */
1144 gfar_write(®s
->igaddr0
, 0);
1145 gfar_write(®s
->igaddr1
, 0);
1146 gfar_write(®s
->igaddr2
, 0);
1147 gfar_write(®s
->igaddr3
, 0);
1148 gfar_write(®s
->igaddr4
, 0);
1149 gfar_write(®s
->igaddr5
, 0);
1150 gfar_write(®s
->igaddr6
, 0);
1151 gfar_write(®s
->igaddr7
, 0);
1153 gfar_write(®s
->gaddr0
, 0);
1154 gfar_write(®s
->gaddr1
, 0);
1155 gfar_write(®s
->gaddr2
, 0);
1156 gfar_write(®s
->gaddr3
, 0);
1157 gfar_write(®s
->gaddr4
, 0);
1158 gfar_write(®s
->gaddr5
, 0);
1159 gfar_write(®s
->gaddr6
, 0);
1160 gfar_write(®s
->gaddr7
, 0);
1162 if (priv
->extended_hash
)
1163 gfar_clear_exact_match(priv
->ndev
);
1165 gfar_mac_rx_config(priv
);
1167 gfar_mac_tx_config(priv
);
1169 gfar_set_mac_address(priv
->ndev
);
1171 gfar_set_multi(priv
->ndev
);
1173 /* clear ievent and imask before configuring coalescing */
1174 gfar_ints_disable(priv
);
1176 /* Configure the coalescing support */
1177 gfar_configure_coalescing_all(priv
);
1180 static void gfar_hw_init(struct gfar_private
*priv
)
1182 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1185 /* Stop the DMA engine now, in case it was running before
1186 * (The firmware could have used it, and left it running).
1190 gfar_mac_reset(priv
);
1192 /* Zero out the rmon mib registers if it has them */
1193 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
) {
1194 memset_io(&(regs
->rmon
), 0, sizeof(struct rmon_mib
));
1196 /* Mask off the CAM interrupts */
1197 gfar_write(®s
->rmon
.cam1
, 0xffffffff);
1198 gfar_write(®s
->rmon
.cam2
, 0xffffffff);
1201 /* Initialize ECNTRL */
1202 gfar_write(®s
->ecntrl
, ECNTRL_INIT_SETTINGS
);
1204 /* Set the extraction length and index */
1205 attrs
= ATTRELI_EL(priv
->rx_stash_size
) |
1206 ATTRELI_EI(priv
->rx_stash_index
);
1208 gfar_write(®s
->attreli
, attrs
);
1210 /* Start with defaults, and add stashing
1211 * depending on driver parameters
1213 attrs
= ATTR_INIT_SETTINGS
;
1215 if (priv
->bd_stash_en
)
1216 attrs
|= ATTR_BDSTASH
;
1218 if (priv
->rx_stash_size
!= 0)
1219 attrs
|= ATTR_BUFSTASH
;
1221 gfar_write(®s
->attr
, attrs
);
1224 gfar_write(®s
->fifo_tx_thr
, DEFAULT_FIFO_TX_THR
);
1225 gfar_write(®s
->fifo_tx_starve
, DEFAULT_FIFO_TX_STARVE
);
1226 gfar_write(®s
->fifo_tx_starve_shutoff
, DEFAULT_FIFO_TX_STARVE_OFF
);
1228 /* Program the interrupt steering regs, only for MG devices */
1229 if (priv
->num_grps
> 1)
1230 gfar_write_isrg(priv
);
1233 static void __init
gfar_init_addr_hash_table(struct gfar_private
*priv
)
1235 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1237 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
) {
1238 priv
->extended_hash
= 1;
1239 priv
->hash_width
= 9;
1241 priv
->hash_regs
[0] = ®s
->igaddr0
;
1242 priv
->hash_regs
[1] = ®s
->igaddr1
;
1243 priv
->hash_regs
[2] = ®s
->igaddr2
;
1244 priv
->hash_regs
[3] = ®s
->igaddr3
;
1245 priv
->hash_regs
[4] = ®s
->igaddr4
;
1246 priv
->hash_regs
[5] = ®s
->igaddr5
;
1247 priv
->hash_regs
[6] = ®s
->igaddr6
;
1248 priv
->hash_regs
[7] = ®s
->igaddr7
;
1249 priv
->hash_regs
[8] = ®s
->gaddr0
;
1250 priv
->hash_regs
[9] = ®s
->gaddr1
;
1251 priv
->hash_regs
[10] = ®s
->gaddr2
;
1252 priv
->hash_regs
[11] = ®s
->gaddr3
;
1253 priv
->hash_regs
[12] = ®s
->gaddr4
;
1254 priv
->hash_regs
[13] = ®s
->gaddr5
;
1255 priv
->hash_regs
[14] = ®s
->gaddr6
;
1256 priv
->hash_regs
[15] = ®s
->gaddr7
;
1259 priv
->extended_hash
= 0;
1260 priv
->hash_width
= 8;
1262 priv
->hash_regs
[0] = ®s
->gaddr0
;
1263 priv
->hash_regs
[1] = ®s
->gaddr1
;
1264 priv
->hash_regs
[2] = ®s
->gaddr2
;
1265 priv
->hash_regs
[3] = ®s
->gaddr3
;
1266 priv
->hash_regs
[4] = ®s
->gaddr4
;
1267 priv
->hash_regs
[5] = ®s
->gaddr5
;
1268 priv
->hash_regs
[6] = ®s
->gaddr6
;
1269 priv
->hash_regs
[7] = ®s
->gaddr7
;
1273 /* Set up the ethernet device structure, private data,
1274 * and anything else we need before we start
1276 static int gfar_probe(struct platform_device
*ofdev
)
1278 struct net_device
*dev
= NULL
;
1279 struct gfar_private
*priv
= NULL
;
1282 err
= gfar_of_init(ofdev
, &dev
);
1287 priv
= netdev_priv(dev
);
1289 priv
->ofdev
= ofdev
;
1290 priv
->dev
= &ofdev
->dev
;
1291 SET_NETDEV_DEV(dev
, &ofdev
->dev
);
1293 spin_lock_init(&priv
->bflock
);
1294 INIT_WORK(&priv
->reset_task
, gfar_reset_task
);
1296 platform_set_drvdata(ofdev
, priv
);
1298 gfar_detect_errata(priv
);
1300 /* Set the dev->base_addr to the gfar reg region */
1301 dev
->base_addr
= (unsigned long) priv
->gfargrp
[0].regs
;
1303 /* Fill in the dev structure */
1304 dev
->watchdog_timeo
= TX_TIMEOUT
;
1306 dev
->netdev_ops
= &gfar_netdev_ops
;
1307 dev
->ethtool_ops
= &gfar_ethtool_ops
;
1309 /* Register for napi ...We are registering NAPI for each grp */
1310 for (i
= 0; i
< priv
->num_grps
; i
++) {
1311 if (priv
->poll_mode
== GFAR_SQ_POLLING
) {
1312 netif_napi_add(dev
, &priv
->gfargrp
[i
].napi_rx
,
1313 gfar_poll_rx_sq
, GFAR_DEV_WEIGHT
);
1314 netif_napi_add(dev
, &priv
->gfargrp
[i
].napi_tx
,
1315 gfar_poll_tx_sq
, 2);
1317 netif_napi_add(dev
, &priv
->gfargrp
[i
].napi_rx
,
1318 gfar_poll_rx
, GFAR_DEV_WEIGHT
);
1319 netif_napi_add(dev
, &priv
->gfargrp
[i
].napi_tx
,
1324 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_CSUM
) {
1325 dev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_SG
|
1327 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
|
1328 NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
;
1331 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_VLAN
) {
1332 dev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
|
1333 NETIF_F_HW_VLAN_CTAG_RX
;
1334 dev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
;
1337 gfar_init_addr_hash_table(priv
);
1339 /* Insert receive time stamps into padding alignment bytes */
1340 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
)
1343 if (dev
->features
& NETIF_F_IP_CSUM
||
1344 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
)
1345 dev
->needed_headroom
= GMAC_FCB_LEN
;
1347 priv
->rx_buffer_size
= DEFAULT_RX_BUFFER_SIZE
;
1349 /* Initializing some of the rx/tx queue level parameters */
1350 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
1351 priv
->tx_queue
[i
]->tx_ring_size
= DEFAULT_TX_RING_SIZE
;
1352 priv
->tx_queue
[i
]->num_txbdfree
= DEFAULT_TX_RING_SIZE
;
1353 priv
->tx_queue
[i
]->txcoalescing
= DEFAULT_TX_COALESCE
;
1354 priv
->tx_queue
[i
]->txic
= DEFAULT_TXIC
;
1357 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
1358 priv
->rx_queue
[i
]->rx_ring_size
= DEFAULT_RX_RING_SIZE
;
1359 priv
->rx_queue
[i
]->rxcoalescing
= DEFAULT_RX_COALESCE
;
1360 priv
->rx_queue
[i
]->rxic
= DEFAULT_RXIC
;
1363 /* always enable rx filer */
1364 priv
->rx_filer_enable
= 1;
1365 /* Enable most messages by default */
1366 priv
->msg_enable
= (NETIF_MSG_IFUP
<< 1 ) - 1;
1367 /* use pritority h/w tx queue scheduling for single queue devices */
1368 if (priv
->num_tx_queues
== 1)
1369 priv
->prio_sched_en
= 1;
1371 set_bit(GFAR_DOWN
, &priv
->state
);
1375 err
= register_netdev(dev
);
1378 pr_err("%s: Cannot register net device, aborting\n", dev
->name
);
1382 /* Carrier starts down, phylib will bring it up */
1383 netif_carrier_off(dev
);
1385 device_init_wakeup(&dev
->dev
,
1386 priv
->device_flags
&
1387 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1389 /* fill out IRQ number and name fields */
1390 for (i
= 0; i
< priv
->num_grps
; i
++) {
1391 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[i
];
1392 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1393 sprintf(gfar_irq(grp
, TX
)->name
, "%s%s%c%s",
1394 dev
->name
, "_g", '0' + i
, "_tx");
1395 sprintf(gfar_irq(grp
, RX
)->name
, "%s%s%c%s",
1396 dev
->name
, "_g", '0' + i
, "_rx");
1397 sprintf(gfar_irq(grp
, ER
)->name
, "%s%s%c%s",
1398 dev
->name
, "_g", '0' + i
, "_er");
1400 strcpy(gfar_irq(grp
, TX
)->name
, dev
->name
);
1403 /* Initialize the filer table */
1404 gfar_init_filer_table(priv
);
1406 /* Print out the device info */
1407 netdev_info(dev
, "mac: %pM\n", dev
->dev_addr
);
1409 /* Even more device info helps when determining which kernel
1410 * provided which set of benchmarks.
1412 netdev_info(dev
, "Running with NAPI enabled\n");
1413 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
1414 netdev_info(dev
, "RX BD ring size for Q[%d]: %d\n",
1415 i
, priv
->rx_queue
[i
]->rx_ring_size
);
1416 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
1417 netdev_info(dev
, "TX BD ring size for Q[%d]: %d\n",
1418 i
, priv
->tx_queue
[i
]->tx_ring_size
);
1423 unmap_group_regs(priv
);
1424 gfar_free_rx_queues(priv
);
1425 gfar_free_tx_queues(priv
);
1427 of_node_put(priv
->phy_node
);
1429 of_node_put(priv
->tbi_node
);
1430 free_gfar_dev(priv
);
1434 static int gfar_remove(struct platform_device
*ofdev
)
1436 struct gfar_private
*priv
= platform_get_drvdata(ofdev
);
1439 of_node_put(priv
->phy_node
);
1441 of_node_put(priv
->tbi_node
);
1443 unregister_netdev(priv
->ndev
);
1444 unmap_group_regs(priv
);
1445 gfar_free_rx_queues(priv
);
1446 gfar_free_tx_queues(priv
);
1447 free_gfar_dev(priv
);
1454 static int gfar_suspend(struct device
*dev
)
1456 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1457 struct net_device
*ndev
= priv
->ndev
;
1458 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1459 unsigned long flags
;
1462 int magic_packet
= priv
->wol_en
&&
1463 (priv
->device_flags
&
1464 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1466 netif_device_detach(ndev
);
1468 if (netif_running(ndev
)) {
1470 local_irq_save(flags
);
1473 gfar_halt_nodisable(priv
);
1475 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1476 tempval
= gfar_read(®s
->maccfg1
);
1478 tempval
&= ~MACCFG1_TX_EN
;
1481 tempval
&= ~MACCFG1_RX_EN
;
1483 gfar_write(®s
->maccfg1
, tempval
);
1486 local_irq_restore(flags
);
1491 /* Enable interrupt on Magic Packet */
1492 gfar_write(®s
->imask
, IMASK_MAG
);
1494 /* Enable Magic Packet mode */
1495 tempval
= gfar_read(®s
->maccfg2
);
1496 tempval
|= MACCFG2_MPEN
;
1497 gfar_write(®s
->maccfg2
, tempval
);
1499 phy_stop(priv
->phydev
);
1506 static int gfar_resume(struct device
*dev
)
1508 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1509 struct net_device
*ndev
= priv
->ndev
;
1510 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1511 unsigned long flags
;
1513 int magic_packet
= priv
->wol_en
&&
1514 (priv
->device_flags
&
1515 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1517 if (!netif_running(ndev
)) {
1518 netif_device_attach(ndev
);
1522 if (!magic_packet
&& priv
->phydev
)
1523 phy_start(priv
->phydev
);
1525 /* Disable Magic Packet mode, in case something
1528 local_irq_save(flags
);
1531 tempval
= gfar_read(®s
->maccfg2
);
1532 tempval
&= ~MACCFG2_MPEN
;
1533 gfar_write(®s
->maccfg2
, tempval
);
1538 local_irq_restore(flags
);
1540 netif_device_attach(ndev
);
1547 static int gfar_restore(struct device
*dev
)
1549 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1550 struct net_device
*ndev
= priv
->ndev
;
1552 if (!netif_running(ndev
)) {
1553 netif_device_attach(ndev
);
1558 if (gfar_init_bds(ndev
)) {
1559 free_skb_resources(priv
);
1563 gfar_mac_reset(priv
);
1565 gfar_init_tx_rx_base(priv
);
1571 priv
->oldduplex
= -1;
1574 phy_start(priv
->phydev
);
1576 netif_device_attach(ndev
);
1582 static struct dev_pm_ops gfar_pm_ops
= {
1583 .suspend
= gfar_suspend
,
1584 .resume
= gfar_resume
,
1585 .freeze
= gfar_suspend
,
1586 .thaw
= gfar_resume
,
1587 .restore
= gfar_restore
,
1590 #define GFAR_PM_OPS (&gfar_pm_ops)
1594 #define GFAR_PM_OPS NULL
1598 /* Reads the controller's registers to determine what interface
1599 * connects it to the PHY.
1601 static phy_interface_t
gfar_get_interface(struct net_device
*dev
)
1603 struct gfar_private
*priv
= netdev_priv(dev
);
1604 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1607 ecntrl
= gfar_read(®s
->ecntrl
);
1609 if (ecntrl
& ECNTRL_SGMII_MODE
)
1610 return PHY_INTERFACE_MODE_SGMII
;
1612 if (ecntrl
& ECNTRL_TBI_MODE
) {
1613 if (ecntrl
& ECNTRL_REDUCED_MODE
)
1614 return PHY_INTERFACE_MODE_RTBI
;
1616 return PHY_INTERFACE_MODE_TBI
;
1619 if (ecntrl
& ECNTRL_REDUCED_MODE
) {
1620 if (ecntrl
& ECNTRL_REDUCED_MII_MODE
) {
1621 return PHY_INTERFACE_MODE_RMII
;
1624 phy_interface_t interface
= priv
->interface
;
1626 /* This isn't autodetected right now, so it must
1627 * be set by the device tree or platform code.
1629 if (interface
== PHY_INTERFACE_MODE_RGMII_ID
)
1630 return PHY_INTERFACE_MODE_RGMII_ID
;
1632 return PHY_INTERFACE_MODE_RGMII
;
1636 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
)
1637 return PHY_INTERFACE_MODE_GMII
;
1639 return PHY_INTERFACE_MODE_MII
;
1643 /* Initializes driver's PHY state, and attaches to the PHY.
1644 * Returns 0 on success.
1646 static int init_phy(struct net_device
*dev
)
1648 struct gfar_private
*priv
= netdev_priv(dev
);
1649 uint gigabit_support
=
1650 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
?
1651 GFAR_SUPPORTED_GBIT
: 0;
1652 phy_interface_t interface
;
1656 priv
->oldduplex
= -1;
1658 interface
= gfar_get_interface(dev
);
1660 priv
->phydev
= of_phy_connect(dev
, priv
->phy_node
, &adjust_link
, 0,
1663 priv
->phydev
= of_phy_connect_fixed_link(dev
, &adjust_link
,
1665 if (!priv
->phydev
) {
1666 dev_err(&dev
->dev
, "could not attach to PHY\n");
1670 if (interface
== PHY_INTERFACE_MODE_SGMII
)
1671 gfar_configure_serdes(dev
);
1673 /* Remove any features not supported by the controller */
1674 priv
->phydev
->supported
&= (GFAR_SUPPORTED
| gigabit_support
);
1675 priv
->phydev
->advertising
= priv
->phydev
->supported
;
1680 /* Initialize TBI PHY interface for communicating with the
1681 * SERDES lynx PHY on the chip. We communicate with this PHY
1682 * through the MDIO bus on each controller, treating it as a
1683 * "normal" PHY at the address found in the TBIPA register. We assume
1684 * that the TBIPA register is valid. Either the MDIO bus code will set
1685 * it to a value that doesn't conflict with other PHYs on the bus, or the
1686 * value doesn't matter, as there are no other PHYs on the bus.
1688 static void gfar_configure_serdes(struct net_device
*dev
)
1690 struct gfar_private
*priv
= netdev_priv(dev
);
1691 struct phy_device
*tbiphy
;
1693 if (!priv
->tbi_node
) {
1694 dev_warn(&dev
->dev
, "error: SGMII mode requires that the "
1695 "device tree specify a tbi-handle\n");
1699 tbiphy
= of_phy_find_device(priv
->tbi_node
);
1701 dev_err(&dev
->dev
, "error: Could not get TBI device\n");
1705 /* If the link is already up, we must already be ok, and don't need to
1706 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1707 * everything for us? Resetting it takes the link down and requires
1708 * several seconds for it to come back.
1710 if (phy_read(tbiphy
, MII_BMSR
) & BMSR_LSTATUS
)
1713 /* Single clk mode, mii mode off(for serdes communication) */
1714 phy_write(tbiphy
, MII_TBICON
, TBICON_CLK_SELECT
);
1716 phy_write(tbiphy
, MII_ADVERTISE
,
1717 ADVERTISE_1000XFULL
| ADVERTISE_1000XPAUSE
|
1718 ADVERTISE_1000XPSE_ASYM
);
1720 phy_write(tbiphy
, MII_BMCR
,
1721 BMCR_ANENABLE
| BMCR_ANRESTART
| BMCR_FULLDPLX
|
1725 static int __gfar_is_rx_idle(struct gfar_private
*priv
)
1729 /* Normaly TSEC should not hang on GRS commands, so we should
1730 * actually wait for IEVENT_GRSC flag.
1732 if (!gfar_has_errata(priv
, GFAR_ERRATA_A002
))
1735 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1736 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1737 * and the Rx can be safely reset.
1739 res
= gfar_read((void __iomem
*)priv
->gfargrp
[0].regs
+ 0xd1c);
1741 if ((res
& 0xffff) == (res
>> 16))
1747 /* Halt the receive and transmit queues */
1748 static void gfar_halt_nodisable(struct gfar_private
*priv
)
1750 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1753 gfar_ints_disable(priv
);
1755 /* Stop the DMA, and wait for it to stop */
1756 tempval
= gfar_read(®s
->dmactrl
);
1757 if ((tempval
& (DMACTRL_GRS
| DMACTRL_GTS
)) !=
1758 (DMACTRL_GRS
| DMACTRL_GTS
)) {
1761 tempval
|= (DMACTRL_GRS
| DMACTRL_GTS
);
1762 gfar_write(®s
->dmactrl
, tempval
);
1765 ret
= spin_event_timeout(((gfar_read(®s
->ievent
) &
1766 (IEVENT_GRSC
| IEVENT_GTSC
)) ==
1767 (IEVENT_GRSC
| IEVENT_GTSC
)), 1000000, 0);
1768 if (!ret
&& !(gfar_read(®s
->ievent
) & IEVENT_GRSC
))
1769 ret
= __gfar_is_rx_idle(priv
);
1774 /* Halt the receive and transmit queues */
1775 void gfar_halt(struct gfar_private
*priv
)
1777 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1780 /* Dissable the Rx/Tx hw queues */
1781 gfar_write(®s
->rqueue
, 0);
1782 gfar_write(®s
->tqueue
, 0);
1786 gfar_halt_nodisable(priv
);
1788 /* Disable Rx/Tx DMA */
1789 tempval
= gfar_read(®s
->maccfg1
);
1790 tempval
&= ~(MACCFG1_RX_EN
| MACCFG1_TX_EN
);
1791 gfar_write(®s
->maccfg1
, tempval
);
1794 void stop_gfar(struct net_device
*dev
)
1796 struct gfar_private
*priv
= netdev_priv(dev
);
1798 netif_tx_stop_all_queues(dev
);
1800 smp_mb__before_atomic();
1801 set_bit(GFAR_DOWN
, &priv
->state
);
1802 smp_mb__after_atomic();
1806 /* disable ints and gracefully shut down Rx/Tx DMA */
1809 phy_stop(priv
->phydev
);
1811 free_skb_resources(priv
);
1814 static void free_skb_tx_queue(struct gfar_priv_tx_q
*tx_queue
)
1816 struct txbd8
*txbdp
;
1817 struct gfar_private
*priv
= netdev_priv(tx_queue
->dev
);
1820 txbdp
= tx_queue
->tx_bd_base
;
1822 for (i
= 0; i
< tx_queue
->tx_ring_size
; i
++) {
1823 if (!tx_queue
->tx_skbuff
[i
])
1826 dma_unmap_single(priv
->dev
, txbdp
->bufPtr
,
1827 txbdp
->length
, DMA_TO_DEVICE
);
1829 for (j
= 0; j
< skb_shinfo(tx_queue
->tx_skbuff
[i
])->nr_frags
;
1832 dma_unmap_page(priv
->dev
, txbdp
->bufPtr
,
1833 txbdp
->length
, DMA_TO_DEVICE
);
1836 dev_kfree_skb_any(tx_queue
->tx_skbuff
[i
]);
1837 tx_queue
->tx_skbuff
[i
] = NULL
;
1839 kfree(tx_queue
->tx_skbuff
);
1840 tx_queue
->tx_skbuff
= NULL
;
1843 static void free_skb_rx_queue(struct gfar_priv_rx_q
*rx_queue
)
1845 struct rxbd8
*rxbdp
;
1846 struct gfar_private
*priv
= netdev_priv(rx_queue
->dev
);
1849 rxbdp
= rx_queue
->rx_bd_base
;
1851 for (i
= 0; i
< rx_queue
->rx_ring_size
; i
++) {
1852 if (rx_queue
->rx_skbuff
[i
]) {
1853 dma_unmap_single(priv
->dev
, rxbdp
->bufPtr
,
1854 priv
->rx_buffer_size
,
1856 dev_kfree_skb_any(rx_queue
->rx_skbuff
[i
]);
1857 rx_queue
->rx_skbuff
[i
] = NULL
;
1863 kfree(rx_queue
->rx_skbuff
);
1864 rx_queue
->rx_skbuff
= NULL
;
1867 /* If there are any tx skbs or rx skbs still around, free them.
1868 * Then free tx_skbuff and rx_skbuff
1870 static void free_skb_resources(struct gfar_private
*priv
)
1872 struct gfar_priv_tx_q
*tx_queue
= NULL
;
1873 struct gfar_priv_rx_q
*rx_queue
= NULL
;
1876 /* Go through all the buffer descriptors and free their data buffers */
1877 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
1878 struct netdev_queue
*txq
;
1880 tx_queue
= priv
->tx_queue
[i
];
1881 txq
= netdev_get_tx_queue(tx_queue
->dev
, tx_queue
->qindex
);
1882 if (tx_queue
->tx_skbuff
)
1883 free_skb_tx_queue(tx_queue
);
1884 netdev_tx_reset_queue(txq
);
1887 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
1888 rx_queue
= priv
->rx_queue
[i
];
1889 if (rx_queue
->rx_skbuff
)
1890 free_skb_rx_queue(rx_queue
);
1893 dma_free_coherent(priv
->dev
,
1894 sizeof(struct txbd8
) * priv
->total_tx_ring_size
+
1895 sizeof(struct rxbd8
) * priv
->total_rx_ring_size
,
1896 priv
->tx_queue
[0]->tx_bd_base
,
1897 priv
->tx_queue
[0]->tx_bd_dma_base
);
1900 void gfar_start(struct gfar_private
*priv
)
1902 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1906 /* Enable Rx/Tx hw queues */
1907 gfar_write(®s
->rqueue
, priv
->rqueue
);
1908 gfar_write(®s
->tqueue
, priv
->tqueue
);
1910 /* Initialize DMACTRL to have WWR and WOP */
1911 tempval
= gfar_read(®s
->dmactrl
);
1912 tempval
|= DMACTRL_INIT_SETTINGS
;
1913 gfar_write(®s
->dmactrl
, tempval
);
1915 /* Make sure we aren't stopped */
1916 tempval
= gfar_read(®s
->dmactrl
);
1917 tempval
&= ~(DMACTRL_GRS
| DMACTRL_GTS
);
1918 gfar_write(®s
->dmactrl
, tempval
);
1920 for (i
= 0; i
< priv
->num_grps
; i
++) {
1921 regs
= priv
->gfargrp
[i
].regs
;
1922 /* Clear THLT/RHLT, so that the DMA starts polling now */
1923 gfar_write(®s
->tstat
, priv
->gfargrp
[i
].tstat
);
1924 gfar_write(®s
->rstat
, priv
->gfargrp
[i
].rstat
);
1927 /* Enable Rx/Tx DMA */
1928 tempval
= gfar_read(®s
->maccfg1
);
1929 tempval
|= (MACCFG1_RX_EN
| MACCFG1_TX_EN
);
1930 gfar_write(®s
->maccfg1
, tempval
);
1932 gfar_ints_enable(priv
);
1934 priv
->ndev
->trans_start
= jiffies
; /* prevent tx timeout */
1937 static void free_grp_irqs(struct gfar_priv_grp
*grp
)
1939 free_irq(gfar_irq(grp
, TX
)->irq
, grp
);
1940 free_irq(gfar_irq(grp
, RX
)->irq
, grp
);
1941 free_irq(gfar_irq(grp
, ER
)->irq
, grp
);
1944 static int register_grp_irqs(struct gfar_priv_grp
*grp
)
1946 struct gfar_private
*priv
= grp
->priv
;
1947 struct net_device
*dev
= priv
->ndev
;
1950 /* If the device has multiple interrupts, register for
1951 * them. Otherwise, only register for the one
1953 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1954 /* Install our interrupt handlers for Error,
1955 * Transmit, and Receive
1957 err
= request_irq(gfar_irq(grp
, ER
)->irq
, gfar_error
, 0,
1958 gfar_irq(grp
, ER
)->name
, grp
);
1960 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
1961 gfar_irq(grp
, ER
)->irq
);
1965 err
= request_irq(gfar_irq(grp
, TX
)->irq
, gfar_transmit
, 0,
1966 gfar_irq(grp
, TX
)->name
, grp
);
1968 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
1969 gfar_irq(grp
, TX
)->irq
);
1972 err
= request_irq(gfar_irq(grp
, RX
)->irq
, gfar_receive
, 0,
1973 gfar_irq(grp
, RX
)->name
, grp
);
1975 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
1976 gfar_irq(grp
, RX
)->irq
);
1980 err
= request_irq(gfar_irq(grp
, TX
)->irq
, gfar_interrupt
, 0,
1981 gfar_irq(grp
, TX
)->name
, grp
);
1983 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
1984 gfar_irq(grp
, TX
)->irq
);
1992 free_irq(gfar_irq(grp
, TX
)->irq
, grp
);
1994 free_irq(gfar_irq(grp
, ER
)->irq
, grp
);
2000 static void gfar_free_irq(struct gfar_private
*priv
)
2005 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
2006 for (i
= 0; i
< priv
->num_grps
; i
++)
2007 free_grp_irqs(&priv
->gfargrp
[i
]);
2009 for (i
= 0; i
< priv
->num_grps
; i
++)
2010 free_irq(gfar_irq(&priv
->gfargrp
[i
], TX
)->irq
,
2015 static int gfar_request_irq(struct gfar_private
*priv
)
2019 for (i
= 0; i
< priv
->num_grps
; i
++) {
2020 err
= register_grp_irqs(&priv
->gfargrp
[i
]);
2022 for (j
= 0; j
< i
; j
++)
2023 free_grp_irqs(&priv
->gfargrp
[j
]);
2031 /* Bring the controller up and running */
2032 int startup_gfar(struct net_device
*ndev
)
2034 struct gfar_private
*priv
= netdev_priv(ndev
);
2037 gfar_mac_reset(priv
);
2039 err
= gfar_alloc_skb_resources(ndev
);
2043 gfar_init_tx_rx_base(priv
);
2045 smp_mb__before_atomic();
2046 clear_bit(GFAR_DOWN
, &priv
->state
);
2047 smp_mb__after_atomic();
2049 /* Start Rx/Tx DMA and enable the interrupts */
2052 phy_start(priv
->phydev
);
2056 netif_tx_wake_all_queues(ndev
);
2061 /* Called when something needs to use the ethernet device
2062 * Returns 0 for success.
2064 static int gfar_enet_open(struct net_device
*dev
)
2066 struct gfar_private
*priv
= netdev_priv(dev
);
2069 err
= init_phy(dev
);
2073 err
= gfar_request_irq(priv
);
2077 err
= startup_gfar(dev
);
2081 device_set_wakeup_enable(&dev
->dev
, priv
->wol_en
);
2086 static inline struct txfcb
*gfar_add_fcb(struct sk_buff
*skb
)
2088 struct txfcb
*fcb
= (struct txfcb
*)skb_push(skb
, GMAC_FCB_LEN
);
2090 memset(fcb
, 0, GMAC_FCB_LEN
);
2095 static inline void gfar_tx_checksum(struct sk_buff
*skb
, struct txfcb
*fcb
,
2098 /* If we're here, it's a IP packet with a TCP or UDP
2099 * payload. We set it to checksum, using a pseudo-header
2102 u8 flags
= TXFCB_DEFAULT
;
2104 /* Tell the controller what the protocol is
2105 * And provide the already calculated phcs
2107 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
) {
2109 fcb
->phcs
= udp_hdr(skb
)->check
;
2111 fcb
->phcs
= tcp_hdr(skb
)->check
;
2113 /* l3os is the distance between the start of the
2114 * frame (skb->data) and the start of the IP hdr.
2115 * l4os is the distance between the start of the
2116 * l3 hdr and the l4 hdr
2118 fcb
->l3os
= (u16
)(skb_network_offset(skb
) - fcb_length
);
2119 fcb
->l4os
= skb_network_header_len(skb
);
2124 void inline gfar_tx_vlan(struct sk_buff
*skb
, struct txfcb
*fcb
)
2126 fcb
->flags
|= TXFCB_VLN
;
2127 fcb
->vlctl
= vlan_tx_tag_get(skb
);
2130 static inline struct txbd8
*skip_txbd(struct txbd8
*bdp
, int stride
,
2131 struct txbd8
*base
, int ring_size
)
2133 struct txbd8
*new_bd
= bdp
+ stride
;
2135 return (new_bd
>= (base
+ ring_size
)) ? (new_bd
- ring_size
) : new_bd
;
2138 static inline struct txbd8
*next_txbd(struct txbd8
*bdp
, struct txbd8
*base
,
2141 return skip_txbd(bdp
, 1, base
, ring_size
);
2144 /* eTSEC12: csum generation not supported for some fcb offsets */
2145 static inline bool gfar_csum_errata_12(struct gfar_private
*priv
,
2146 unsigned long fcb_addr
)
2148 return (gfar_has_errata(priv
, GFAR_ERRATA_12
) &&
2149 (fcb_addr
% 0x20) > 0x18);
2152 /* eTSEC76: csum generation for frames larger than 2500 may
2153 * cause excess delays before start of transmission
2155 static inline bool gfar_csum_errata_76(struct gfar_private
*priv
,
2158 return (gfar_has_errata(priv
, GFAR_ERRATA_76
) &&
2162 /* This is called by the kernel when a frame is ready for transmission.
2163 * It is pointed to by the dev->hard_start_xmit function pointer
2165 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2167 struct gfar_private
*priv
= netdev_priv(dev
);
2168 struct gfar_priv_tx_q
*tx_queue
= NULL
;
2169 struct netdev_queue
*txq
;
2170 struct gfar __iomem
*regs
= NULL
;
2171 struct txfcb
*fcb
= NULL
;
2172 struct txbd8
*txbdp
, *txbdp_start
, *base
, *txbdp_tstamp
= NULL
;
2175 int do_tstamp
, do_csum
, do_vlan
;
2177 unsigned long flags
;
2178 unsigned int nr_frags
, nr_txbds
, bytes_sent
, fcb_len
= 0;
2180 rq
= skb
->queue_mapping
;
2181 tx_queue
= priv
->tx_queue
[rq
];
2182 txq
= netdev_get_tx_queue(dev
, rq
);
2183 base
= tx_queue
->tx_bd_base
;
2184 regs
= tx_queue
->grp
->regs
;
2186 do_csum
= (CHECKSUM_PARTIAL
== skb
->ip_summed
);
2187 do_vlan
= vlan_tx_tag_present(skb
);
2188 do_tstamp
= (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
2191 if (do_csum
|| do_vlan
)
2192 fcb_len
= GMAC_FCB_LEN
;
2194 /* check if time stamp should be generated */
2195 if (unlikely(do_tstamp
))
2196 fcb_len
= GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
;
2198 /* make space for additional header when fcb is needed */
2199 if (fcb_len
&& unlikely(skb_headroom(skb
) < fcb_len
)) {
2200 struct sk_buff
*skb_new
;
2202 skb_new
= skb_realloc_headroom(skb
, fcb_len
);
2204 dev
->stats
.tx_errors
++;
2205 dev_kfree_skb_any(skb
);
2206 return NETDEV_TX_OK
;
2210 skb_set_owner_w(skb_new
, skb
->sk
);
2211 dev_consume_skb_any(skb
);
2215 /* total number of fragments in the SKB */
2216 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2218 /* calculate the required number of TxBDs for this skb */
2219 if (unlikely(do_tstamp
))
2220 nr_txbds
= nr_frags
+ 2;
2222 nr_txbds
= nr_frags
+ 1;
2224 /* check if there is space to queue this packet */
2225 if (nr_txbds
> tx_queue
->num_txbdfree
) {
2226 /* no space, stop the queue */
2227 netif_tx_stop_queue(txq
);
2228 dev
->stats
.tx_fifo_errors
++;
2229 return NETDEV_TX_BUSY
;
2232 /* Update transmit stats */
2233 bytes_sent
= skb
->len
;
2234 tx_queue
->stats
.tx_bytes
+= bytes_sent
;
2235 /* keep Tx bytes on wire for BQL accounting */
2236 GFAR_CB(skb
)->bytes_sent
= bytes_sent
;
2237 tx_queue
->stats
.tx_packets
++;
2239 txbdp
= txbdp_start
= tx_queue
->cur_tx
;
2240 lstatus
= txbdp
->lstatus
;
2242 /* Time stamp insertion requires one additional TxBD */
2243 if (unlikely(do_tstamp
))
2244 txbdp_tstamp
= txbdp
= next_txbd(txbdp
, base
,
2245 tx_queue
->tx_ring_size
);
2247 if (nr_frags
== 0) {
2248 if (unlikely(do_tstamp
))
2249 txbdp_tstamp
->lstatus
|= BD_LFLAG(TXBD_LAST
|
2252 lstatus
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2254 /* Place the fragment addresses and lengths into the TxBDs */
2255 for (i
= 0; i
< nr_frags
; i
++) {
2256 unsigned int frag_len
;
2257 /* Point at the next BD, wrapping as needed */
2258 txbdp
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2260 frag_len
= skb_shinfo(skb
)->frags
[i
].size
;
2262 lstatus
= txbdp
->lstatus
| frag_len
|
2263 BD_LFLAG(TXBD_READY
);
2265 /* Handle the last BD specially */
2266 if (i
== nr_frags
- 1)
2267 lstatus
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2269 bufaddr
= skb_frag_dma_map(priv
->dev
,
2270 &skb_shinfo(skb
)->frags
[i
],
2275 /* set the TxBD length and buffer pointer */
2276 txbdp
->bufPtr
= bufaddr
;
2277 txbdp
->lstatus
= lstatus
;
2280 lstatus
= txbdp_start
->lstatus
;
2283 /* Add TxPAL between FCB and frame if required */
2284 if (unlikely(do_tstamp
)) {
2285 skb_push(skb
, GMAC_TXPAL_LEN
);
2286 memset(skb
->data
, 0, GMAC_TXPAL_LEN
);
2289 /* Add TxFCB if required */
2291 fcb
= gfar_add_fcb(skb
);
2292 lstatus
|= BD_LFLAG(TXBD_TOE
);
2295 /* Set up checksumming */
2297 gfar_tx_checksum(skb
, fcb
, fcb_len
);
2299 if (unlikely(gfar_csum_errata_12(priv
, (unsigned long)fcb
)) ||
2300 unlikely(gfar_csum_errata_76(priv
, skb
->len
))) {
2301 __skb_pull(skb
, GMAC_FCB_LEN
);
2302 skb_checksum_help(skb
);
2303 if (do_vlan
|| do_tstamp
) {
2304 /* put back a new fcb for vlan/tstamp TOE */
2305 fcb
= gfar_add_fcb(skb
);
2307 /* Tx TOE not used */
2308 lstatus
&= ~(BD_LFLAG(TXBD_TOE
));
2315 gfar_tx_vlan(skb
, fcb
);
2317 /* Setup tx hardware time stamping if requested */
2318 if (unlikely(do_tstamp
)) {
2319 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
2323 txbdp_start
->bufPtr
= dma_map_single(priv
->dev
, skb
->data
,
2324 skb_headlen(skb
), DMA_TO_DEVICE
);
2326 /* If time stamping is requested one additional TxBD must be set up. The
2327 * first TxBD points to the FCB and must have a data length of
2328 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2329 * the full frame length.
2331 if (unlikely(do_tstamp
)) {
2332 txbdp_tstamp
->bufPtr
= txbdp_start
->bufPtr
+ fcb_len
;
2333 txbdp_tstamp
->lstatus
|= BD_LFLAG(TXBD_READY
) |
2334 (skb_headlen(skb
) - fcb_len
);
2335 lstatus
|= BD_LFLAG(TXBD_CRC
| TXBD_READY
) | GMAC_FCB_LEN
;
2337 lstatus
|= BD_LFLAG(TXBD_CRC
| TXBD_READY
) | skb_headlen(skb
);
2340 netdev_tx_sent_queue(txq
, bytes_sent
);
2342 /* We can work in parallel with gfar_clean_tx_ring(), except
2343 * when modifying num_txbdfree. Note that we didn't grab the lock
2344 * when we were reading the num_txbdfree and checking for available
2345 * space, that's because outside of this function it can only grow,
2346 * and once we've got needed space, it cannot suddenly disappear.
2348 * The lock also protects us from gfar_error(), which can modify
2349 * regs->tstat and thus retrigger the transfers, which is why we
2350 * also must grab the lock before setting ready bit for the first
2351 * to be transmitted BD.
2353 spin_lock_irqsave(&tx_queue
->txlock
, flags
);
2355 /* The powerpc-specific eieio() is used, as wmb() has too strong
2356 * semantics (it requires synchronization between cacheable and
2357 * uncacheable mappings, which eieio doesn't provide and which we
2358 * don't need), thus requiring a more expensive sync instruction. At
2359 * some point, the set of architecture-independent barrier functions
2360 * should be expanded to include weaker barriers.
2364 txbdp_start
->lstatus
= lstatus
;
2366 eieio(); /* force lstatus write before tx_skbuff */
2368 tx_queue
->tx_skbuff
[tx_queue
->skb_curtx
] = skb
;
2370 /* Update the current skb pointer to the next entry we will use
2371 * (wrapping if necessary)
2373 tx_queue
->skb_curtx
= (tx_queue
->skb_curtx
+ 1) &
2374 TX_RING_MOD_MASK(tx_queue
->tx_ring_size
);
2376 tx_queue
->cur_tx
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2378 /* reduce TxBD free count */
2379 tx_queue
->num_txbdfree
-= (nr_txbds
);
2381 /* If the next BD still needs to be cleaned up, then the bds
2382 * are full. We need to tell the kernel to stop sending us stuff.
2384 if (!tx_queue
->num_txbdfree
) {
2385 netif_tx_stop_queue(txq
);
2387 dev
->stats
.tx_fifo_errors
++;
2390 /* Tell the DMA to go go go */
2391 gfar_write(®s
->tstat
, TSTAT_CLEAR_THALT
>> tx_queue
->qindex
);
2394 spin_unlock_irqrestore(&tx_queue
->txlock
, flags
);
2396 return NETDEV_TX_OK
;
2399 /* Stops the kernel queue, and halts the controller */
2400 static int gfar_close(struct net_device
*dev
)
2402 struct gfar_private
*priv
= netdev_priv(dev
);
2404 cancel_work_sync(&priv
->reset_task
);
2407 /* Disconnect from the PHY */
2408 phy_disconnect(priv
->phydev
);
2409 priv
->phydev
= NULL
;
2411 gfar_free_irq(priv
);
2416 /* Changes the mac address if the controller is not running. */
2417 static int gfar_set_mac_address(struct net_device
*dev
)
2419 gfar_set_mac_for_addr(dev
, 0, dev
->dev_addr
);
2424 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
)
2426 struct gfar_private
*priv
= netdev_priv(dev
);
2427 int frame_size
= new_mtu
+ ETH_HLEN
;
2429 if ((frame_size
< 64) || (frame_size
> JUMBO_FRAME_SIZE
)) {
2430 netif_err(priv
, drv
, dev
, "Invalid MTU setting\n");
2434 while (test_and_set_bit_lock(GFAR_RESETTING
, &priv
->state
))
2437 if (dev
->flags
& IFF_UP
)
2442 if (dev
->flags
& IFF_UP
)
2445 clear_bit_unlock(GFAR_RESETTING
, &priv
->state
);
2450 void reset_gfar(struct net_device
*ndev
)
2452 struct gfar_private
*priv
= netdev_priv(ndev
);
2454 while (test_and_set_bit_lock(GFAR_RESETTING
, &priv
->state
))
2460 clear_bit_unlock(GFAR_RESETTING
, &priv
->state
);
2463 /* gfar_reset_task gets scheduled when a packet has not been
2464 * transmitted after a set amount of time.
2465 * For now, assume that clearing out all the structures, and
2466 * starting over will fix the problem.
2468 static void gfar_reset_task(struct work_struct
*work
)
2470 struct gfar_private
*priv
= container_of(work
, struct gfar_private
,
2472 reset_gfar(priv
->ndev
);
2475 static void gfar_timeout(struct net_device
*dev
)
2477 struct gfar_private
*priv
= netdev_priv(dev
);
2479 dev
->stats
.tx_errors
++;
2480 schedule_work(&priv
->reset_task
);
2483 static void gfar_align_skb(struct sk_buff
*skb
)
2485 /* We need the data buffer to be aligned properly. We will reserve
2486 * as many bytes as needed to align the data properly
2488 skb_reserve(skb
, RXBUF_ALIGNMENT
-
2489 (((unsigned long) skb
->data
) & (RXBUF_ALIGNMENT
- 1)));
2492 /* Interrupt Handler for Transmit complete */
2493 static void gfar_clean_tx_ring(struct gfar_priv_tx_q
*tx_queue
)
2495 struct net_device
*dev
= tx_queue
->dev
;
2496 struct netdev_queue
*txq
;
2497 struct gfar_private
*priv
= netdev_priv(dev
);
2498 struct txbd8
*bdp
, *next
= NULL
;
2499 struct txbd8
*lbdp
= NULL
;
2500 struct txbd8
*base
= tx_queue
->tx_bd_base
;
2501 struct sk_buff
*skb
;
2503 int tx_ring_size
= tx_queue
->tx_ring_size
;
2504 int frags
= 0, nr_txbds
= 0;
2507 int tqi
= tx_queue
->qindex
;
2508 unsigned int bytes_sent
= 0;
2512 txq
= netdev_get_tx_queue(dev
, tqi
);
2513 bdp
= tx_queue
->dirty_tx
;
2514 skb_dirtytx
= tx_queue
->skb_dirtytx
;
2516 while ((skb
= tx_queue
->tx_skbuff
[skb_dirtytx
])) {
2517 unsigned long flags
;
2519 frags
= skb_shinfo(skb
)->nr_frags
;
2521 /* When time stamping, one additional TxBD must be freed.
2522 * Also, we need to dma_unmap_single() the TxPAL.
2524 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
))
2525 nr_txbds
= frags
+ 2;
2527 nr_txbds
= frags
+ 1;
2529 lbdp
= skip_txbd(bdp
, nr_txbds
- 1, base
, tx_ring_size
);
2531 lstatus
= lbdp
->lstatus
;
2533 /* Only clean completed frames */
2534 if ((lstatus
& BD_LFLAG(TXBD_READY
)) &&
2535 (lstatus
& BD_LENGTH_MASK
))
2538 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)) {
2539 next
= next_txbd(bdp
, base
, tx_ring_size
);
2540 buflen
= next
->length
+ GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
;
2542 buflen
= bdp
->length
;
2544 dma_unmap_single(priv
->dev
, bdp
->bufPtr
,
2545 buflen
, DMA_TO_DEVICE
);
2547 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)) {
2548 struct skb_shared_hwtstamps shhwtstamps
;
2549 u64
*ns
= (u64
*) (((u32
)skb
->data
+ 0x10) & ~0x7);
2551 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
2552 shhwtstamps
.hwtstamp
= ns_to_ktime(*ns
);
2553 skb_pull(skb
, GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
);
2554 skb_tstamp_tx(skb
, &shhwtstamps
);
2555 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
2559 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
2560 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
2562 for (i
= 0; i
< frags
; i
++) {
2563 dma_unmap_page(priv
->dev
, bdp
->bufPtr
,
2564 bdp
->length
, DMA_TO_DEVICE
);
2565 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
2566 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
2569 bytes_sent
+= GFAR_CB(skb
)->bytes_sent
;
2571 dev_kfree_skb_any(skb
);
2573 tx_queue
->tx_skbuff
[skb_dirtytx
] = NULL
;
2575 skb_dirtytx
= (skb_dirtytx
+ 1) &
2576 TX_RING_MOD_MASK(tx_ring_size
);
2579 spin_lock_irqsave(&tx_queue
->txlock
, flags
);
2580 tx_queue
->num_txbdfree
+= nr_txbds
;
2581 spin_unlock_irqrestore(&tx_queue
->txlock
, flags
);
2584 /* If we freed a buffer, we can restart transmission, if necessary */
2585 if (tx_queue
->num_txbdfree
&&
2586 netif_tx_queue_stopped(txq
) &&
2587 !(test_bit(GFAR_DOWN
, &priv
->state
)))
2588 netif_wake_subqueue(priv
->ndev
, tqi
);
2590 /* Update dirty indicators */
2591 tx_queue
->skb_dirtytx
= skb_dirtytx
;
2592 tx_queue
->dirty_tx
= bdp
;
2594 netdev_tx_completed_queue(txq
, howmany
, bytes_sent
);
2597 static void gfar_new_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
2598 struct sk_buff
*skb
)
2600 struct net_device
*dev
= rx_queue
->dev
;
2601 struct gfar_private
*priv
= netdev_priv(dev
);
2604 buf
= dma_map_single(priv
->dev
, skb
->data
,
2605 priv
->rx_buffer_size
, DMA_FROM_DEVICE
);
2606 gfar_init_rxbdp(rx_queue
, bdp
, buf
);
2609 static struct sk_buff
*gfar_alloc_skb(struct net_device
*dev
)
2611 struct gfar_private
*priv
= netdev_priv(dev
);
2612 struct sk_buff
*skb
;
2614 skb
= netdev_alloc_skb(dev
, priv
->rx_buffer_size
+ RXBUF_ALIGNMENT
);
2618 gfar_align_skb(skb
);
2623 struct sk_buff
*gfar_new_skb(struct net_device
*dev
)
2625 return gfar_alloc_skb(dev
);
2628 static inline void count_errors(unsigned short status
, struct net_device
*dev
)
2630 struct gfar_private
*priv
= netdev_priv(dev
);
2631 struct net_device_stats
*stats
= &dev
->stats
;
2632 struct gfar_extra_stats
*estats
= &priv
->extra_stats
;
2634 /* If the packet was truncated, none of the other errors matter */
2635 if (status
& RXBD_TRUNCATED
) {
2636 stats
->rx_length_errors
++;
2638 atomic64_inc(&estats
->rx_trunc
);
2642 /* Count the errors, if there were any */
2643 if (status
& (RXBD_LARGE
| RXBD_SHORT
)) {
2644 stats
->rx_length_errors
++;
2646 if (status
& RXBD_LARGE
)
2647 atomic64_inc(&estats
->rx_large
);
2649 atomic64_inc(&estats
->rx_short
);
2651 if (status
& RXBD_NONOCTET
) {
2652 stats
->rx_frame_errors
++;
2653 atomic64_inc(&estats
->rx_nonoctet
);
2655 if (status
& RXBD_CRCERR
) {
2656 atomic64_inc(&estats
->rx_crcerr
);
2657 stats
->rx_crc_errors
++;
2659 if (status
& RXBD_OVERRUN
) {
2660 atomic64_inc(&estats
->rx_overrun
);
2661 stats
->rx_crc_errors
++;
2665 irqreturn_t
gfar_receive(int irq
, void *grp_id
)
2667 struct gfar_priv_grp
*grp
= (struct gfar_priv_grp
*)grp_id
;
2668 unsigned long flags
;
2671 if (likely(napi_schedule_prep(&grp
->napi_rx
))) {
2672 spin_lock_irqsave(&grp
->grplock
, flags
);
2673 imask
= gfar_read(&grp
->regs
->imask
);
2674 imask
&= IMASK_RX_DISABLED
;
2675 gfar_write(&grp
->regs
->imask
, imask
);
2676 spin_unlock_irqrestore(&grp
->grplock
, flags
);
2677 __napi_schedule(&grp
->napi_rx
);
2679 /* Clear IEVENT, so interrupts aren't called again
2680 * because of the packets that have already arrived.
2682 gfar_write(&grp
->regs
->ievent
, IEVENT_RX_MASK
);
2688 /* Interrupt Handler for Transmit complete */
2689 static irqreturn_t
gfar_transmit(int irq
, void *grp_id
)
2691 struct gfar_priv_grp
*grp
= (struct gfar_priv_grp
*)grp_id
;
2692 unsigned long flags
;
2695 if (likely(napi_schedule_prep(&grp
->napi_tx
))) {
2696 spin_lock_irqsave(&grp
->grplock
, flags
);
2697 imask
= gfar_read(&grp
->regs
->imask
);
2698 imask
&= IMASK_TX_DISABLED
;
2699 gfar_write(&grp
->regs
->imask
, imask
);
2700 spin_unlock_irqrestore(&grp
->grplock
, flags
);
2701 __napi_schedule(&grp
->napi_tx
);
2703 /* Clear IEVENT, so interrupts aren't called again
2704 * because of the packets that have already arrived.
2706 gfar_write(&grp
->regs
->ievent
, IEVENT_TX_MASK
);
2712 static inline void gfar_rx_checksum(struct sk_buff
*skb
, struct rxfcb
*fcb
)
2714 /* If valid headers were found, and valid sums
2715 * were verified, then we tell the kernel that no
2716 * checksumming is necessary. Otherwise, it is [FIXME]
2718 if ((fcb
->flags
& RXFCB_CSUM_MASK
) == (RXFCB_CIP
| RXFCB_CTU
))
2719 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2721 skb_checksum_none_assert(skb
);
2725 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2726 static void gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
,
2727 int amount_pull
, struct napi_struct
*napi
)
2729 struct gfar_private
*priv
= netdev_priv(dev
);
2730 struct rxfcb
*fcb
= NULL
;
2732 /* fcb is at the beginning if exists */
2733 fcb
= (struct rxfcb
*)skb
->data
;
2735 /* Remove the FCB from the skb
2736 * Remove the padded bytes, if there are any
2739 skb_record_rx_queue(skb
, fcb
->rq
);
2740 skb_pull(skb
, amount_pull
);
2743 /* Get receive timestamp from the skb */
2744 if (priv
->hwts_rx_en
) {
2745 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
2746 u64
*ns
= (u64
*) skb
->data
;
2748 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
2749 shhwtstamps
->hwtstamp
= ns_to_ktime(*ns
);
2753 skb_pull(skb
, priv
->padding
);
2755 if (dev
->features
& NETIF_F_RXCSUM
)
2756 gfar_rx_checksum(skb
, fcb
);
2758 /* Tell the skb what kind of packet this is */
2759 skb
->protocol
= eth_type_trans(skb
, dev
);
2761 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2762 * Even if vlan rx accel is disabled, on some chips
2763 * RXFCB_VLN is pseudo randomly set.
2765 if (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
&&
2766 fcb
->flags
& RXFCB_VLN
)
2767 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), fcb
->vlctl
);
2769 /* Send the packet up the stack */
2770 napi_gro_receive(napi
, skb
);
2774 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2775 * until the budget/quota has been reached. Returns the number
2778 int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
, int rx_work_limit
)
2780 struct net_device
*dev
= rx_queue
->dev
;
2781 struct rxbd8
*bdp
, *base
;
2782 struct sk_buff
*skb
;
2786 struct gfar_private
*priv
= netdev_priv(dev
);
2788 /* Get the first full descriptor */
2789 bdp
= rx_queue
->cur_rx
;
2790 base
= rx_queue
->rx_bd_base
;
2792 amount_pull
= priv
->uses_rxfcb
? GMAC_FCB_LEN
: 0;
2794 while (!((bdp
->status
& RXBD_EMPTY
) || (--rx_work_limit
< 0))) {
2795 struct sk_buff
*newskb
;
2799 /* Add another skb for the future */
2800 newskb
= gfar_new_skb(dev
);
2802 skb
= rx_queue
->rx_skbuff
[rx_queue
->skb_currx
];
2804 dma_unmap_single(priv
->dev
, bdp
->bufPtr
,
2805 priv
->rx_buffer_size
, DMA_FROM_DEVICE
);
2807 if (unlikely(!(bdp
->status
& RXBD_ERR
) &&
2808 bdp
->length
> priv
->rx_buffer_size
))
2809 bdp
->status
= RXBD_LARGE
;
2811 /* We drop the frame if we failed to allocate a new buffer */
2812 if (unlikely(!newskb
|| !(bdp
->status
& RXBD_LAST
) ||
2813 bdp
->status
& RXBD_ERR
)) {
2814 count_errors(bdp
->status
, dev
);
2816 if (unlikely(!newskb
))
2821 /* Increment the number of packets */
2822 rx_queue
->stats
.rx_packets
++;
2826 pkt_len
= bdp
->length
- ETH_FCS_LEN
;
2827 /* Remove the FCS from the packet length */
2828 skb_put(skb
, pkt_len
);
2829 rx_queue
->stats
.rx_bytes
+= pkt_len
;
2830 skb_record_rx_queue(skb
, rx_queue
->qindex
);
2831 gfar_process_frame(dev
, skb
, amount_pull
,
2832 &rx_queue
->grp
->napi_rx
);
2835 netif_warn(priv
, rx_err
, dev
, "Missing skb!\n");
2836 rx_queue
->stats
.rx_dropped
++;
2837 atomic64_inc(&priv
->extra_stats
.rx_skbmissing
);
2842 rx_queue
->rx_skbuff
[rx_queue
->skb_currx
] = newskb
;
2844 /* Setup the new bdp */
2845 gfar_new_rxbdp(rx_queue
, bdp
, newskb
);
2847 /* Update to the next pointer */
2848 bdp
= next_bd(bdp
, base
, rx_queue
->rx_ring_size
);
2850 /* update to point at the next skb */
2851 rx_queue
->skb_currx
= (rx_queue
->skb_currx
+ 1) &
2852 RX_RING_MOD_MASK(rx_queue
->rx_ring_size
);
2855 /* Update the current rxbd pointer to be the next one */
2856 rx_queue
->cur_rx
= bdp
;
2861 static int gfar_poll_rx_sq(struct napi_struct
*napi
, int budget
)
2863 struct gfar_priv_grp
*gfargrp
=
2864 container_of(napi
, struct gfar_priv_grp
, napi_rx
);
2865 struct gfar __iomem
*regs
= gfargrp
->regs
;
2866 struct gfar_priv_rx_q
*rx_queue
= gfargrp
->rx_queue
;
2869 /* Clear IEVENT, so interrupts aren't called again
2870 * because of the packets that have already arrived
2872 gfar_write(®s
->ievent
, IEVENT_RX_MASK
);
2874 work_done
= gfar_clean_rx_ring(rx_queue
, budget
);
2876 if (work_done
< budget
) {
2878 napi_complete(napi
);
2879 /* Clear the halt bit in RSTAT */
2880 gfar_write(®s
->rstat
, gfargrp
->rstat
);
2882 spin_lock_irq(&gfargrp
->grplock
);
2883 imask
= gfar_read(®s
->imask
);
2884 imask
|= IMASK_RX_DEFAULT
;
2885 gfar_write(®s
->imask
, imask
);
2886 spin_unlock_irq(&gfargrp
->grplock
);
2892 static int gfar_poll_tx_sq(struct napi_struct
*napi
, int budget
)
2894 struct gfar_priv_grp
*gfargrp
=
2895 container_of(napi
, struct gfar_priv_grp
, napi_tx
);
2896 struct gfar __iomem
*regs
= gfargrp
->regs
;
2897 struct gfar_priv_tx_q
*tx_queue
= gfargrp
->tx_queue
;
2900 /* Clear IEVENT, so interrupts aren't called again
2901 * because of the packets that have already arrived
2903 gfar_write(®s
->ievent
, IEVENT_TX_MASK
);
2905 /* run Tx cleanup to completion */
2906 if (tx_queue
->tx_skbuff
[tx_queue
->skb_dirtytx
])
2907 gfar_clean_tx_ring(tx_queue
);
2909 napi_complete(napi
);
2911 spin_lock_irq(&gfargrp
->grplock
);
2912 imask
= gfar_read(®s
->imask
);
2913 imask
|= IMASK_TX_DEFAULT
;
2914 gfar_write(®s
->imask
, imask
);
2915 spin_unlock_irq(&gfargrp
->grplock
);
2920 static int gfar_poll_rx(struct napi_struct
*napi
, int budget
)
2922 struct gfar_priv_grp
*gfargrp
=
2923 container_of(napi
, struct gfar_priv_grp
, napi_rx
);
2924 struct gfar_private
*priv
= gfargrp
->priv
;
2925 struct gfar __iomem
*regs
= gfargrp
->regs
;
2926 struct gfar_priv_rx_q
*rx_queue
= NULL
;
2927 int work_done
= 0, work_done_per_q
= 0;
2928 int i
, budget_per_q
= 0;
2929 unsigned long rstat_rxf
;
2932 /* Clear IEVENT, so interrupts aren't called again
2933 * because of the packets that have already arrived
2935 gfar_write(®s
->ievent
, IEVENT_RX_MASK
);
2937 rstat_rxf
= gfar_read(®s
->rstat
) & RSTAT_RXF_MASK
;
2939 num_act_queues
= bitmap_weight(&rstat_rxf
, MAX_RX_QS
);
2941 budget_per_q
= budget
/num_act_queues
;
2943 for_each_set_bit(i
, &gfargrp
->rx_bit_map
, priv
->num_rx_queues
) {
2944 /* skip queue if not active */
2945 if (!(rstat_rxf
& (RSTAT_CLEAR_RXF0
>> i
)))
2948 rx_queue
= priv
->rx_queue
[i
];
2950 gfar_clean_rx_ring(rx_queue
, budget_per_q
);
2951 work_done
+= work_done_per_q
;
2953 /* finished processing this queue */
2954 if (work_done_per_q
< budget_per_q
) {
2955 /* clear active queue hw indication */
2956 gfar_write(®s
->rstat
,
2957 RSTAT_CLEAR_RXF0
>> i
);
2960 if (!num_act_queues
)
2965 if (!num_act_queues
) {
2967 napi_complete(napi
);
2969 /* Clear the halt bit in RSTAT */
2970 gfar_write(®s
->rstat
, gfargrp
->rstat
);
2972 spin_lock_irq(&gfargrp
->grplock
);
2973 imask
= gfar_read(®s
->imask
);
2974 imask
|= IMASK_RX_DEFAULT
;
2975 gfar_write(®s
->imask
, imask
);
2976 spin_unlock_irq(&gfargrp
->grplock
);
2982 static int gfar_poll_tx(struct napi_struct
*napi
, int budget
)
2984 struct gfar_priv_grp
*gfargrp
=
2985 container_of(napi
, struct gfar_priv_grp
, napi_tx
);
2986 struct gfar_private
*priv
= gfargrp
->priv
;
2987 struct gfar __iomem
*regs
= gfargrp
->regs
;
2988 struct gfar_priv_tx_q
*tx_queue
= NULL
;
2989 int has_tx_work
= 0;
2992 /* Clear IEVENT, so interrupts aren't called again
2993 * because of the packets that have already arrived
2995 gfar_write(®s
->ievent
, IEVENT_TX_MASK
);
2997 for_each_set_bit(i
, &gfargrp
->tx_bit_map
, priv
->num_tx_queues
) {
2998 tx_queue
= priv
->tx_queue
[i
];
2999 /* run Tx cleanup to completion */
3000 if (tx_queue
->tx_skbuff
[tx_queue
->skb_dirtytx
]) {
3001 gfar_clean_tx_ring(tx_queue
);
3008 napi_complete(napi
);
3010 spin_lock_irq(&gfargrp
->grplock
);
3011 imask
= gfar_read(®s
->imask
);
3012 imask
|= IMASK_TX_DEFAULT
;
3013 gfar_write(®s
->imask
, imask
);
3014 spin_unlock_irq(&gfargrp
->grplock
);
3021 #ifdef CONFIG_NET_POLL_CONTROLLER
3022 /* Polling 'interrupt' - used by things like netconsole to send skbs
3023 * without having to re-enable interrupts. It's not called while
3024 * the interrupt routine is executing.
3026 static void gfar_netpoll(struct net_device
*dev
)
3028 struct gfar_private
*priv
= netdev_priv(dev
);
3031 /* If the device has multiple interrupts, run tx/rx */
3032 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
3033 for (i
= 0; i
< priv
->num_grps
; i
++) {
3034 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[i
];
3036 disable_irq(gfar_irq(grp
, TX
)->irq
);
3037 disable_irq(gfar_irq(grp
, RX
)->irq
);
3038 disable_irq(gfar_irq(grp
, ER
)->irq
);
3039 gfar_interrupt(gfar_irq(grp
, TX
)->irq
, grp
);
3040 enable_irq(gfar_irq(grp
, ER
)->irq
);
3041 enable_irq(gfar_irq(grp
, RX
)->irq
);
3042 enable_irq(gfar_irq(grp
, TX
)->irq
);
3045 for (i
= 0; i
< priv
->num_grps
; i
++) {
3046 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[i
];
3048 disable_irq(gfar_irq(grp
, TX
)->irq
);
3049 gfar_interrupt(gfar_irq(grp
, TX
)->irq
, grp
);
3050 enable_irq(gfar_irq(grp
, TX
)->irq
);
3056 /* The interrupt handler for devices with one interrupt */
3057 static irqreturn_t
gfar_interrupt(int irq
, void *grp_id
)
3059 struct gfar_priv_grp
*gfargrp
= grp_id
;
3061 /* Save ievent for future reference */
3062 u32 events
= gfar_read(&gfargrp
->regs
->ievent
);
3064 /* Check for reception */
3065 if (events
& IEVENT_RX_MASK
)
3066 gfar_receive(irq
, grp_id
);
3068 /* Check for transmit completion */
3069 if (events
& IEVENT_TX_MASK
)
3070 gfar_transmit(irq
, grp_id
);
3072 /* Check for errors */
3073 if (events
& IEVENT_ERR_MASK
)
3074 gfar_error(irq
, grp_id
);
3079 static u32
gfar_get_flowctrl_cfg(struct gfar_private
*priv
)
3081 struct phy_device
*phydev
= priv
->phydev
;
3084 if (!phydev
->duplex
)
3087 if (!priv
->pause_aneg_en
) {
3088 if (priv
->tx_pause_en
)
3089 val
|= MACCFG1_TX_FLOW
;
3090 if (priv
->rx_pause_en
)
3091 val
|= MACCFG1_RX_FLOW
;
3093 u16 lcl_adv
, rmt_adv
;
3095 /* get link partner capabilities */
3098 rmt_adv
= LPA_PAUSE_CAP
;
3099 if (phydev
->asym_pause
)
3100 rmt_adv
|= LPA_PAUSE_ASYM
;
3102 lcl_adv
= mii_advertise_flowctrl(phydev
->advertising
);
3104 flowctrl
= mii_resolve_flowctrl_fdx(lcl_adv
, rmt_adv
);
3105 if (flowctrl
& FLOW_CTRL_TX
)
3106 val
|= MACCFG1_TX_FLOW
;
3107 if (flowctrl
& FLOW_CTRL_RX
)
3108 val
|= MACCFG1_RX_FLOW
;
3114 /* Called every time the controller might need to be made
3115 * aware of new link state. The PHY code conveys this
3116 * information through variables in the phydev structure, and this
3117 * function converts those variables into the appropriate
3118 * register values, and can bring down the device if needed.
3120 static void adjust_link(struct net_device
*dev
)
3122 struct gfar_private
*priv
= netdev_priv(dev
);
3123 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3124 struct phy_device
*phydev
= priv
->phydev
;
3127 if (test_bit(GFAR_RESETTING
, &priv
->state
))
3131 u32 tempval1
= gfar_read(®s
->maccfg1
);
3132 u32 tempval
= gfar_read(®s
->maccfg2
);
3133 u32 ecntrl
= gfar_read(®s
->ecntrl
);
3135 /* Now we make sure that we can be in full duplex mode.
3136 * If not, we operate in half-duplex mode.
3138 if (phydev
->duplex
!= priv
->oldduplex
) {
3140 if (!(phydev
->duplex
))
3141 tempval
&= ~(MACCFG2_FULL_DUPLEX
);
3143 tempval
|= MACCFG2_FULL_DUPLEX
;
3145 priv
->oldduplex
= phydev
->duplex
;
3148 if (phydev
->speed
!= priv
->oldspeed
) {
3150 switch (phydev
->speed
) {
3153 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_GMII
);
3155 ecntrl
&= ~(ECNTRL_R100
);
3160 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_MII
);
3162 /* Reduced mode distinguishes
3163 * between 10 and 100
3165 if (phydev
->speed
== SPEED_100
)
3166 ecntrl
|= ECNTRL_R100
;
3168 ecntrl
&= ~(ECNTRL_R100
);
3171 netif_warn(priv
, link
, dev
,
3172 "Ack! Speed (%d) is not 10/100/1000!\n",
3177 priv
->oldspeed
= phydev
->speed
;
3180 tempval1
&= ~(MACCFG1_TX_FLOW
| MACCFG1_RX_FLOW
);
3181 tempval1
|= gfar_get_flowctrl_cfg(priv
);
3183 gfar_write(®s
->maccfg1
, tempval1
);
3184 gfar_write(®s
->maccfg2
, tempval
);
3185 gfar_write(®s
->ecntrl
, ecntrl
);
3187 if (!priv
->oldlink
) {
3191 } else if (priv
->oldlink
) {
3195 priv
->oldduplex
= -1;
3198 if (new_state
&& netif_msg_link(priv
))
3199 phy_print_status(phydev
);
3202 /* Update the hash table based on the current list of multicast
3203 * addresses we subscribe to. Also, change the promiscuity of
3204 * the device based on the flags (this function is called
3205 * whenever dev->flags is changed
3207 static void gfar_set_multi(struct net_device
*dev
)
3209 struct netdev_hw_addr
*ha
;
3210 struct gfar_private
*priv
= netdev_priv(dev
);
3211 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3214 if (dev
->flags
& IFF_PROMISC
) {
3215 /* Set RCTRL to PROM */
3216 tempval
= gfar_read(®s
->rctrl
);
3217 tempval
|= RCTRL_PROM
;
3218 gfar_write(®s
->rctrl
, tempval
);
3220 /* Set RCTRL to not PROM */
3221 tempval
= gfar_read(®s
->rctrl
);
3222 tempval
&= ~(RCTRL_PROM
);
3223 gfar_write(®s
->rctrl
, tempval
);
3226 if (dev
->flags
& IFF_ALLMULTI
) {
3227 /* Set the hash to rx all multicast frames */
3228 gfar_write(®s
->igaddr0
, 0xffffffff);
3229 gfar_write(®s
->igaddr1
, 0xffffffff);
3230 gfar_write(®s
->igaddr2
, 0xffffffff);
3231 gfar_write(®s
->igaddr3
, 0xffffffff);
3232 gfar_write(®s
->igaddr4
, 0xffffffff);
3233 gfar_write(®s
->igaddr5
, 0xffffffff);
3234 gfar_write(®s
->igaddr6
, 0xffffffff);
3235 gfar_write(®s
->igaddr7
, 0xffffffff);
3236 gfar_write(®s
->gaddr0
, 0xffffffff);
3237 gfar_write(®s
->gaddr1
, 0xffffffff);
3238 gfar_write(®s
->gaddr2
, 0xffffffff);
3239 gfar_write(®s
->gaddr3
, 0xffffffff);
3240 gfar_write(®s
->gaddr4
, 0xffffffff);
3241 gfar_write(®s
->gaddr5
, 0xffffffff);
3242 gfar_write(®s
->gaddr6
, 0xffffffff);
3243 gfar_write(®s
->gaddr7
, 0xffffffff);
3248 /* zero out the hash */
3249 gfar_write(®s
->igaddr0
, 0x0);
3250 gfar_write(®s
->igaddr1
, 0x0);
3251 gfar_write(®s
->igaddr2
, 0x0);
3252 gfar_write(®s
->igaddr3
, 0x0);
3253 gfar_write(®s
->igaddr4
, 0x0);
3254 gfar_write(®s
->igaddr5
, 0x0);
3255 gfar_write(®s
->igaddr6
, 0x0);
3256 gfar_write(®s
->igaddr7
, 0x0);
3257 gfar_write(®s
->gaddr0
, 0x0);
3258 gfar_write(®s
->gaddr1
, 0x0);
3259 gfar_write(®s
->gaddr2
, 0x0);
3260 gfar_write(®s
->gaddr3
, 0x0);
3261 gfar_write(®s
->gaddr4
, 0x0);
3262 gfar_write(®s
->gaddr5
, 0x0);
3263 gfar_write(®s
->gaddr6
, 0x0);
3264 gfar_write(®s
->gaddr7
, 0x0);
3266 /* If we have extended hash tables, we need to
3267 * clear the exact match registers to prepare for
3270 if (priv
->extended_hash
) {
3271 em_num
= GFAR_EM_NUM
+ 1;
3272 gfar_clear_exact_match(dev
);
3279 if (netdev_mc_empty(dev
))
3282 /* Parse the list, and set the appropriate bits */
3283 netdev_for_each_mc_addr(ha
, dev
) {
3285 gfar_set_mac_for_addr(dev
, idx
, ha
->addr
);
3288 gfar_set_hash_for_addr(dev
, ha
->addr
);
3294 /* Clears each of the exact match registers to zero, so they
3295 * don't interfere with normal reception
3297 static void gfar_clear_exact_match(struct net_device
*dev
)
3300 static const u8 zero_arr
[ETH_ALEN
] = {0, 0, 0, 0, 0, 0};
3302 for (idx
= 1; idx
< GFAR_EM_NUM
+ 1; idx
++)
3303 gfar_set_mac_for_addr(dev
, idx
, zero_arr
);
3306 /* Set the appropriate hash bit for the given addr */
3307 /* The algorithm works like so:
3308 * 1) Take the Destination Address (ie the multicast address), and
3309 * do a CRC on it (little endian), and reverse the bits of the
3311 * 2) Use the 8 most significant bits as a hash into a 256-entry
3312 * table. The table is controlled through 8 32-bit registers:
3313 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3314 * gaddr7. This means that the 3 most significant bits in the
3315 * hash index which gaddr register to use, and the 5 other bits
3316 * indicate which bit (assuming an IBM numbering scheme, which
3317 * for PowerPC (tm) is usually the case) in the register holds
3320 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
)
3323 struct gfar_private
*priv
= netdev_priv(dev
);
3324 u32 result
= ether_crc(ETH_ALEN
, addr
);
3325 int width
= priv
->hash_width
;
3326 u8 whichbit
= (result
>> (32 - width
)) & 0x1f;
3327 u8 whichreg
= result
>> (32 - width
+ 5);
3328 u32 value
= (1 << (31-whichbit
));
3330 tempval
= gfar_read(priv
->hash_regs
[whichreg
]);
3332 gfar_write(priv
->hash_regs
[whichreg
], tempval
);
3336 /* There are multiple MAC Address register pairs on some controllers
3337 * This function sets the numth pair to a given address
3339 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
,
3342 struct gfar_private
*priv
= netdev_priv(dev
);
3343 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3345 char tmpbuf
[ETH_ALEN
];
3347 u32 __iomem
*macptr
= ®s
->macstnaddr1
;
3351 /* Now copy it into the mac registers backwards, cuz
3352 * little endian is silly
3354 for (idx
= 0; idx
< ETH_ALEN
; idx
++)
3355 tmpbuf
[ETH_ALEN
- 1 - idx
] = addr
[idx
];
3357 gfar_write(macptr
, *((u32
*) (tmpbuf
)));
3359 tempval
= *((u32
*) (tmpbuf
+ 4));
3361 gfar_write(macptr
+1, tempval
);
3364 /* GFAR error interrupt handler */
3365 static irqreturn_t
gfar_error(int irq
, void *grp_id
)
3367 struct gfar_priv_grp
*gfargrp
= grp_id
;
3368 struct gfar __iomem
*regs
= gfargrp
->regs
;
3369 struct gfar_private
*priv
= gfargrp
->priv
;
3370 struct net_device
*dev
= priv
->ndev
;
3372 /* Save ievent for future reference */
3373 u32 events
= gfar_read(®s
->ievent
);
3376 gfar_write(®s
->ievent
, events
& IEVENT_ERR_MASK
);
3378 /* Magic Packet is not an error. */
3379 if ((priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
) &&
3380 (events
& IEVENT_MAG
))
3381 events
&= ~IEVENT_MAG
;
3384 if (netif_msg_rx_err(priv
) || netif_msg_tx_err(priv
))
3386 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3387 events
, gfar_read(®s
->imask
));
3389 /* Update the error counters */
3390 if (events
& IEVENT_TXE
) {
3391 dev
->stats
.tx_errors
++;
3393 if (events
& IEVENT_LC
)
3394 dev
->stats
.tx_window_errors
++;
3395 if (events
& IEVENT_CRL
)
3396 dev
->stats
.tx_aborted_errors
++;
3397 if (events
& IEVENT_XFUN
) {
3398 unsigned long flags
;
3400 netif_dbg(priv
, tx_err
, dev
,
3401 "TX FIFO underrun, packet dropped\n");
3402 dev
->stats
.tx_dropped
++;
3403 atomic64_inc(&priv
->extra_stats
.tx_underrun
);
3405 local_irq_save(flags
);
3408 /* Reactivate the Tx Queues */
3409 gfar_write(®s
->tstat
, gfargrp
->tstat
);
3412 local_irq_restore(flags
);
3414 netif_dbg(priv
, tx_err
, dev
, "Transmit Error\n");
3416 if (events
& IEVENT_BSY
) {
3417 dev
->stats
.rx_errors
++;
3418 atomic64_inc(&priv
->extra_stats
.rx_bsy
);
3420 gfar_receive(irq
, grp_id
);
3422 netif_dbg(priv
, rx_err
, dev
, "busy error (rstat: %x)\n",
3423 gfar_read(®s
->rstat
));
3425 if (events
& IEVENT_BABR
) {
3426 dev
->stats
.rx_errors
++;
3427 atomic64_inc(&priv
->extra_stats
.rx_babr
);
3429 netif_dbg(priv
, rx_err
, dev
, "babbling RX error\n");
3431 if (events
& IEVENT_EBERR
) {
3432 atomic64_inc(&priv
->extra_stats
.eberr
);
3433 netif_dbg(priv
, rx_err
, dev
, "bus error\n");
3435 if (events
& IEVENT_RXC
)
3436 netif_dbg(priv
, rx_status
, dev
, "control frame\n");
3438 if (events
& IEVENT_BABT
) {
3439 atomic64_inc(&priv
->extra_stats
.tx_babt
);
3440 netif_dbg(priv
, tx_err
, dev
, "babbling TX error\n");
3445 static struct of_device_id gfar_match
[] =
3449 .compatible
= "gianfar",
3452 .compatible
= "fsl,etsec2",
3456 MODULE_DEVICE_TABLE(of
, gfar_match
);
3458 /* Structure for a device driver */
3459 static struct platform_driver gfar_driver
= {
3461 .name
= "fsl-gianfar",
3462 .owner
= THIS_MODULE
,
3464 .of_match_table
= gfar_match
,
3466 .probe
= gfar_probe
,
3467 .remove
= gfar_remove
,
3470 module_platform_driver(gfar_driver
);