Commit | Line | Data |
---|---|---|
0bbaf069 | 1 | /* |
1da177e4 LT |
2 | * drivers/net/gianfar.c |
3 | * | |
4 | * Gianfar Ethernet Driver | |
7f7f5316 AF |
5 | * This driver is designed for the non-CPM ethernet controllers |
6 | * on the 85xx and 83xx family of integrated processors | |
1da177e4 LT |
7 | * Based on 8260_io/fcc_enet.c |
8 | * | |
9 | * Author: Andy Fleming | |
4c8d3d99 | 10 | * Maintainer: Kumar Gala |
a12f801d | 11 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> |
1da177e4 | 12 | * |
a12f801d SG |
13 | * Copyright 2002-2009 Freescale Semiconductor, Inc. |
14 | * Copyright 2007 MontaVista Software, Inc. | |
1da177e4 LT |
15 | * |
16 | * This program is free software; you can redistribute it and/or modify it | |
17 | * under the terms of the GNU General Public License as published by the | |
18 | * Free Software Foundation; either version 2 of the License, or (at your | |
19 | * option) any later version. | |
20 | * | |
21 | * Gianfar: AKA Lambda Draconis, "Dragon" | |
22 | * RA 11 31 24.2 | |
23 | * Dec +69 19 52 | |
24 | * V 3.84 | |
25 | * B-V +1.62 | |
26 | * | |
27 | * Theory of operation | |
0bbaf069 | 28 | * |
b31a1d8b AF |
29 | * The driver is initialized through of_device. Configuration information |
30 | * is therefore conveyed through an OF-style device tree. | |
1da177e4 LT |
31 | * |
32 | * The Gianfar Ethernet Controller uses a ring of buffer | |
33 | * descriptors. The beginning is indicated by a register | |
0bbaf069 KG |
34 | * pointing to the physical address of the start of the ring. |
35 | * The end is determined by a "wrap" bit being set in the | |
1da177e4 LT |
36 | * last descriptor of the ring. |
37 | * | |
38 | * When a packet is received, the RXF bit in the | |
0bbaf069 | 39 | * IEVENT register is set, triggering an interrupt when the |
1da177e4 LT |
40 | * corresponding bit in the IMASK register is also set (if |
41 | * interrupt coalescing is active, then the interrupt may not | |
42 | * happen immediately, but will wait until either a set number | |
bb40dcbb | 43 | * of frames or amount of time have passed). In NAPI, the |
1da177e4 | 44 | * interrupt handler will signal there is work to be done, and |
0aa1538f | 45 | * exit. This method will start at the last known empty |
0bbaf069 | 46 | * descriptor, and process every subsequent descriptor until there |
1da177e4 LT |
47 | * are none left with data (NAPI will stop after a set number of |
48 | * packets to give time to other tasks, but will eventually | |
49 | * process all the packets). The data arrives inside a | |
50 | * pre-allocated skb, and so after the skb is passed up to the | |
51 | * stack, a new skb must be allocated, and the address field in | |
52 | * the buffer descriptor must be updated to indicate this new | |
53 | * skb. | |
54 | * | |
55 | * When the kernel requests that a packet be transmitted, the | |
56 | * driver starts where it left off last time, and points the | |
57 | * descriptor at the buffer which was passed in. The driver | |
58 | * then informs the DMA engine that there are packets ready to | |
59 | * be transmitted. Once the controller is finished transmitting | |
60 | * the packet, an interrupt may be triggered (under the same | |
61 | * conditions as for reception, but depending on the TXF bit). | |
62 | * The driver then cleans up the buffer. | |
63 | */ | |
64 | ||
1da177e4 | 65 | #include <linux/kernel.h> |
1da177e4 LT |
66 | #include <linux/string.h> |
67 | #include <linux/errno.h> | |
bb40dcbb | 68 | #include <linux/unistd.h> |
1da177e4 LT |
69 | #include <linux/slab.h> |
70 | #include <linux/interrupt.h> | |
71 | #include <linux/init.h> | |
72 | #include <linux/delay.h> | |
73 | #include <linux/netdevice.h> | |
74 | #include <linux/etherdevice.h> | |
75 | #include <linux/skbuff.h> | |
0bbaf069 | 76 | #include <linux/if_vlan.h> |
1da177e4 LT |
77 | #include <linux/spinlock.h> |
78 | #include <linux/mm.h> | |
fe192a49 | 79 | #include <linux/of_mdio.h> |
b31a1d8b | 80 | #include <linux/of_platform.h> |
0bbaf069 KG |
81 | #include <linux/ip.h> |
82 | #include <linux/tcp.h> | |
83 | #include <linux/udp.h> | |
9c07b884 | 84 | #include <linux/in.h> |
cc772ab7 | 85 | #include <linux/net_tstamp.h> |
1da177e4 LT |
86 | |
87 | #include <asm/io.h> | |
88 | #include <asm/irq.h> | |
89 | #include <asm/uaccess.h> | |
90 | #include <linux/module.h> | |
1da177e4 LT |
91 | #include <linux/dma-mapping.h> |
92 | #include <linux/crc32.h> | |
bb40dcbb AF |
93 | #include <linux/mii.h> |
94 | #include <linux/phy.h> | |
b31a1d8b AF |
95 | #include <linux/phy_fixed.h> |
96 | #include <linux/of.h> | |
1da177e4 LT |
97 | |
98 | #include "gianfar.h" | |
1577ecef | 99 | #include "fsl_pq_mdio.h" |
1da177e4 LT |
100 | |
101 | #define TX_TIMEOUT (1*HZ) | |
1da177e4 LT |
102 | #undef BRIEF_GFAR_ERRORS |
103 | #undef VERBOSE_GFAR_ERRORS | |
104 | ||
1da177e4 | 105 | const char gfar_driver_name[] = "Gianfar Ethernet"; |
7f7f5316 | 106 | const char gfar_driver_version[] = "1.3"; |
1da177e4 | 107 | |
1da177e4 LT |
108 | static int gfar_enet_open(struct net_device *dev); |
109 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | |
ab939905 | 110 | static void gfar_reset_task(struct work_struct *work); |
1da177e4 LT |
111 | static void gfar_timeout(struct net_device *dev); |
112 | static int gfar_close(struct net_device *dev); | |
815b97c6 | 113 | struct sk_buff *gfar_new_skb(struct net_device *dev); |
a12f801d | 114 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
815b97c6 | 115 | struct sk_buff *skb); |
1da177e4 LT |
116 | static int gfar_set_mac_address(struct net_device *dev); |
117 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | |
7d12e780 DH |
118 | static irqreturn_t gfar_error(int irq, void *dev_id); |
119 | static irqreturn_t gfar_transmit(int irq, void *dev_id); | |
120 | static irqreturn_t gfar_interrupt(int irq, void *dev_id); | |
1da177e4 LT |
121 | static void adjust_link(struct net_device *dev); |
122 | static void init_registers(struct net_device *dev); | |
123 | static int init_phy(struct net_device *dev); | |
b31a1d8b AF |
124 | static int gfar_probe(struct of_device *ofdev, |
125 | const struct of_device_id *match); | |
126 | static int gfar_remove(struct of_device *ofdev); | |
bb40dcbb | 127 | static void free_skb_resources(struct gfar_private *priv); |
1da177e4 LT |
128 | static void gfar_set_multi(struct net_device *dev); |
129 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); | |
d3c12873 | 130 | static void gfar_configure_serdes(struct net_device *dev); |
bea3348e | 131 | static int gfar_poll(struct napi_struct *napi, int budget); |
f2d71c2d VW |
132 | #ifdef CONFIG_NET_POLL_CONTROLLER |
133 | static void gfar_netpoll(struct net_device *dev); | |
134 | #endif | |
a12f801d SG |
135 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); |
136 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); | |
2c2db48a DH |
137 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
138 | int amount_pull); | |
0bbaf069 KG |
139 | static void gfar_vlan_rx_register(struct net_device *netdev, |
140 | struct vlan_group *grp); | |
7f7f5316 | 141 | void gfar_halt(struct net_device *dev); |
d87eb127 | 142 | static void gfar_halt_nodisable(struct net_device *dev); |
7f7f5316 AF |
143 | void gfar_start(struct net_device *dev); |
144 | static void gfar_clear_exact_match(struct net_device *dev); | |
145 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); | |
26ccfc37 | 146 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
1da177e4 | 147 | |
1da177e4 LT |
148 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
149 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); | |
150 | MODULE_LICENSE("GPL"); | |
151 | ||
a12f801d | 152 | static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
8a102fe0 AV |
153 | dma_addr_t buf) |
154 | { | |
8a102fe0 AV |
155 | u32 lstatus; |
156 | ||
157 | bdp->bufPtr = buf; | |
158 | ||
159 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); | |
a12f801d | 160 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) |
8a102fe0 AV |
161 | lstatus |= BD_LFLAG(RXBD_WRAP); |
162 | ||
163 | eieio(); | |
164 | ||
165 | bdp->lstatus = lstatus; | |
166 | } | |
167 | ||
8728327e | 168 | static int gfar_init_bds(struct net_device *ndev) |
826aa4a0 | 169 | { |
8728327e | 170 | struct gfar_private *priv = netdev_priv(ndev); |
a12f801d SG |
171 | struct gfar_priv_tx_q *tx_queue = NULL; |
172 | struct gfar_priv_rx_q *rx_queue = NULL; | |
826aa4a0 AV |
173 | struct txbd8 *txbdp; |
174 | struct rxbd8 *rxbdp; | |
fba4ed03 | 175 | int i, j; |
a12f801d | 176 | |
fba4ed03 SG |
177 | for (i = 0; i < priv->num_tx_queues; i++) { |
178 | tx_queue = priv->tx_queue[i]; | |
179 | /* Initialize some variables in our dev structure */ | |
180 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; | |
181 | tx_queue->dirty_tx = tx_queue->tx_bd_base; | |
182 | tx_queue->cur_tx = tx_queue->tx_bd_base; | |
183 | tx_queue->skb_curtx = 0; | |
184 | tx_queue->skb_dirtytx = 0; | |
185 | ||
186 | /* Initialize Transmit Descriptor Ring */ | |
187 | txbdp = tx_queue->tx_bd_base; | |
188 | for (j = 0; j < tx_queue->tx_ring_size; j++) { | |
189 | txbdp->lstatus = 0; | |
190 | txbdp->bufPtr = 0; | |
191 | txbdp++; | |
192 | } | |
8728327e | 193 | |
fba4ed03 SG |
194 | /* Set the last descriptor in the ring to indicate wrap */ |
195 | txbdp--; | |
196 | txbdp->status |= TXBD_WRAP; | |
8728327e AV |
197 | } |
198 | ||
fba4ed03 SG |
199 | for (i = 0; i < priv->num_rx_queues; i++) { |
200 | rx_queue = priv->rx_queue[i]; | |
201 | rx_queue->cur_rx = rx_queue->rx_bd_base; | |
202 | rx_queue->skb_currx = 0; | |
203 | rxbdp = rx_queue->rx_bd_base; | |
8728327e | 204 | |
fba4ed03 SG |
205 | for (j = 0; j < rx_queue->rx_ring_size; j++) { |
206 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; | |
8728327e | 207 | |
fba4ed03 SG |
208 | if (skb) { |
209 | gfar_init_rxbdp(rx_queue, rxbdp, | |
210 | rxbdp->bufPtr); | |
211 | } else { | |
212 | skb = gfar_new_skb(ndev); | |
213 | if (!skb) { | |
214 | pr_err("%s: Can't allocate RX buffers\n", | |
215 | ndev->name); | |
216 | goto err_rxalloc_fail; | |
217 | } | |
218 | rx_queue->rx_skbuff[j] = skb; | |
219 | ||
220 | gfar_new_rxbdp(rx_queue, rxbdp, skb); | |
8728327e | 221 | } |
8728327e | 222 | |
fba4ed03 | 223 | rxbdp++; |
8728327e AV |
224 | } |
225 | ||
8728327e AV |
226 | } |
227 | ||
228 | return 0; | |
fba4ed03 SG |
229 | |
230 | err_rxalloc_fail: | |
231 | free_skb_resources(priv); | |
232 | return -ENOMEM; | |
8728327e AV |
233 | } |
234 | ||
235 | static int gfar_alloc_skb_resources(struct net_device *ndev) | |
236 | { | |
826aa4a0 | 237 | void *vaddr; |
fba4ed03 SG |
238 | dma_addr_t addr; |
239 | int i, j, k; | |
826aa4a0 AV |
240 | struct gfar_private *priv = netdev_priv(ndev); |
241 | struct device *dev = &priv->ofdev->dev; | |
a12f801d SG |
242 | struct gfar_priv_tx_q *tx_queue = NULL; |
243 | struct gfar_priv_rx_q *rx_queue = NULL; | |
244 | ||
fba4ed03 SG |
245 | priv->total_tx_ring_size = 0; |
246 | for (i = 0; i < priv->num_tx_queues; i++) | |
247 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; | |
248 | ||
249 | priv->total_rx_ring_size = 0; | |
250 | for (i = 0; i < priv->num_rx_queues; i++) | |
251 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; | |
826aa4a0 AV |
252 | |
253 | /* Allocate memory for the buffer descriptors */ | |
8728327e | 254 | vaddr = dma_alloc_coherent(dev, |
fba4ed03 SG |
255 | sizeof(struct txbd8) * priv->total_tx_ring_size + |
256 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | |
257 | &addr, GFP_KERNEL); | |
826aa4a0 AV |
258 | if (!vaddr) { |
259 | if (netif_msg_ifup(priv)) | |
260 | pr_err("%s: Could not allocate buffer descriptors!\n", | |
261 | ndev->name); | |
262 | return -ENOMEM; | |
263 | } | |
264 | ||
fba4ed03 SG |
265 | for (i = 0; i < priv->num_tx_queues; i++) { |
266 | tx_queue = priv->tx_queue[i]; | |
267 | tx_queue->tx_bd_base = (struct txbd8 *) vaddr; | |
268 | tx_queue->tx_bd_dma_base = addr; | |
269 | tx_queue->dev = ndev; | |
270 | /* enet DMA only understands physical addresses */ | |
271 | addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | |
272 | vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | |
273 | } | |
826aa4a0 | 274 | |
826aa4a0 | 275 | /* Start the rx descriptor ring where the tx ring leaves off */ |
fba4ed03 SG |
276 | for (i = 0; i < priv->num_rx_queues; i++) { |
277 | rx_queue = priv->rx_queue[i]; | |
278 | rx_queue->rx_bd_base = (struct rxbd8 *) vaddr; | |
279 | rx_queue->rx_bd_dma_base = addr; | |
280 | rx_queue->dev = ndev; | |
281 | addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | |
282 | vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | |
283 | } | |
826aa4a0 AV |
284 | |
285 | /* Setup the skbuff rings */ | |
fba4ed03 SG |
286 | for (i = 0; i < priv->num_tx_queues; i++) { |
287 | tx_queue = priv->tx_queue[i]; | |
288 | tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * | |
a12f801d | 289 | tx_queue->tx_ring_size, GFP_KERNEL); |
fba4ed03 SG |
290 | if (!tx_queue->tx_skbuff) { |
291 | if (netif_msg_ifup(priv)) | |
292 | pr_err("%s: Could not allocate tx_skbuff\n", | |
293 | ndev->name); | |
294 | goto cleanup; | |
295 | } | |
826aa4a0 | 296 | |
fba4ed03 SG |
297 | for (k = 0; k < tx_queue->tx_ring_size; k++) |
298 | tx_queue->tx_skbuff[k] = NULL; | |
299 | } | |
826aa4a0 | 300 | |
fba4ed03 SG |
301 | for (i = 0; i < priv->num_rx_queues; i++) { |
302 | rx_queue = priv->rx_queue[i]; | |
303 | rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * | |
a12f801d | 304 | rx_queue->rx_ring_size, GFP_KERNEL); |
826aa4a0 | 305 | |
fba4ed03 SG |
306 | if (!rx_queue->rx_skbuff) { |
307 | if (netif_msg_ifup(priv)) | |
308 | pr_err("%s: Could not allocate rx_skbuff\n", | |
309 | ndev->name); | |
310 | goto cleanup; | |
311 | } | |
312 | ||
313 | for (j = 0; j < rx_queue->rx_ring_size; j++) | |
314 | rx_queue->rx_skbuff[j] = NULL; | |
315 | } | |
826aa4a0 | 316 | |
8728327e AV |
317 | if (gfar_init_bds(ndev)) |
318 | goto cleanup; | |
826aa4a0 AV |
319 | |
320 | return 0; | |
321 | ||
322 | cleanup: | |
323 | free_skb_resources(priv); | |
324 | return -ENOMEM; | |
325 | } | |
326 | ||
fba4ed03 SG |
327 | static void gfar_init_tx_rx_base(struct gfar_private *priv) |
328 | { | |
46ceb60c | 329 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
18294ad1 | 330 | u32 __iomem *baddr; |
fba4ed03 SG |
331 | int i; |
332 | ||
333 | baddr = ®s->tbase0; | |
334 | for(i = 0; i < priv->num_tx_queues; i++) { | |
335 | gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); | |
336 | baddr += 2; | |
337 | } | |
338 | ||
339 | baddr = ®s->rbase0; | |
340 | for(i = 0; i < priv->num_rx_queues; i++) { | |
341 | gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); | |
342 | baddr += 2; | |
343 | } | |
344 | } | |
345 | ||
826aa4a0 AV |
346 | static void gfar_init_mac(struct net_device *ndev) |
347 | { | |
348 | struct gfar_private *priv = netdev_priv(ndev); | |
46ceb60c | 349 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
826aa4a0 AV |
350 | u32 rctrl = 0; |
351 | u32 tctrl = 0; | |
352 | u32 attrs = 0; | |
353 | ||
fba4ed03 SG |
354 | /* write the tx/rx base registers */ |
355 | gfar_init_tx_rx_base(priv); | |
32c513bc | 356 | |
826aa4a0 | 357 | /* Configure the coalescing support */ |
46ceb60c | 358 | gfar_configure_coalescing(priv, 0xFF, 0xFF); |
fba4ed03 | 359 | |
1ccb8389 | 360 | if (priv->rx_filer_enable) { |
fba4ed03 | 361 | rctrl |= RCTRL_FILREN; |
1ccb8389 SG |
362 | /* Program the RIR0 reg with the required distribution */ |
363 | gfar_write(®s->rir0, DEFAULT_RIR0); | |
364 | } | |
826aa4a0 AV |
365 | |
366 | if (priv->rx_csum_enable) | |
367 | rctrl |= RCTRL_CHECKSUMMING; | |
368 | ||
369 | if (priv->extended_hash) { | |
370 | rctrl |= RCTRL_EXTHASH; | |
371 | ||
372 | gfar_clear_exact_match(ndev); | |
373 | rctrl |= RCTRL_EMEN; | |
374 | } | |
375 | ||
376 | if (priv->padding) { | |
377 | rctrl &= ~RCTRL_PAL_MASK; | |
378 | rctrl |= RCTRL_PADDING(priv->padding); | |
379 | } | |
380 | ||
cc772ab7 MR |
381 | /* Insert receive time stamps into padding alignment bytes */ |
382 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { | |
383 | rctrl &= ~RCTRL_PAL_MASK; | |
384 | rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8); | |
385 | priv->padding = 8; | |
386 | } | |
387 | ||
826aa4a0 AV |
388 | /* keep vlan related bits if it's enabled */ |
389 | if (priv->vlgrp) { | |
390 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; | |
391 | tctrl |= TCTRL_VLINS; | |
392 | } | |
393 | ||
394 | /* Init rctrl based on our settings */ | |
395 | gfar_write(®s->rctrl, rctrl); | |
396 | ||
397 | if (ndev->features & NETIF_F_IP_CSUM) | |
398 | tctrl |= TCTRL_INIT_CSUM; | |
399 | ||
fba4ed03 SG |
400 | tctrl |= TCTRL_TXSCHED_PRIO; |
401 | ||
826aa4a0 AV |
402 | gfar_write(®s->tctrl, tctrl); |
403 | ||
404 | /* Set the extraction length and index */ | |
405 | attrs = ATTRELI_EL(priv->rx_stash_size) | | |
406 | ATTRELI_EI(priv->rx_stash_index); | |
407 | ||
408 | gfar_write(®s->attreli, attrs); | |
409 | ||
410 | /* Start with defaults, and add stashing or locking | |
411 | * depending on the approprate variables */ | |
412 | attrs = ATTR_INIT_SETTINGS; | |
413 | ||
414 | if (priv->bd_stash_en) | |
415 | attrs |= ATTR_BDSTASH; | |
416 | ||
417 | if (priv->rx_stash_size != 0) | |
418 | attrs |= ATTR_BUFSTASH; | |
419 | ||
420 | gfar_write(®s->attr, attrs); | |
421 | ||
422 | gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); | |
423 | gfar_write(®s->fifo_tx_starve, priv->fifo_starve); | |
424 | gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); | |
425 | } | |
426 | ||
a7f38041 SG |
427 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) |
428 | { | |
429 | struct gfar_private *priv = netdev_priv(dev); | |
430 | struct netdev_queue *txq; | |
431 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; | |
432 | unsigned long tx_packets = 0, tx_bytes = 0; | |
433 | int i = 0; | |
434 | ||
435 | for (i = 0; i < priv->num_rx_queues; i++) { | |
436 | rx_packets += priv->rx_queue[i]->stats.rx_packets; | |
437 | rx_bytes += priv->rx_queue[i]->stats.rx_bytes; | |
438 | rx_dropped += priv->rx_queue[i]->stats.rx_dropped; | |
439 | } | |
440 | ||
441 | dev->stats.rx_packets = rx_packets; | |
442 | dev->stats.rx_bytes = rx_bytes; | |
443 | dev->stats.rx_dropped = rx_dropped; | |
444 | ||
445 | for (i = 0; i < priv->num_tx_queues; i++) { | |
446 | txq = netdev_get_tx_queue(dev, i); | |
447 | tx_bytes += txq->tx_bytes; | |
448 | tx_packets += txq->tx_packets; | |
449 | } | |
450 | ||
451 | dev->stats.tx_bytes = tx_bytes; | |
452 | dev->stats.tx_packets = tx_packets; | |
453 | ||
454 | return &dev->stats; | |
455 | } | |
456 | ||
26ccfc37 AF |
457 | static const struct net_device_ops gfar_netdev_ops = { |
458 | .ndo_open = gfar_enet_open, | |
459 | .ndo_start_xmit = gfar_start_xmit, | |
460 | .ndo_stop = gfar_close, | |
461 | .ndo_change_mtu = gfar_change_mtu, | |
462 | .ndo_set_multicast_list = gfar_set_multi, | |
463 | .ndo_tx_timeout = gfar_timeout, | |
464 | .ndo_do_ioctl = gfar_ioctl, | |
a7f38041 | 465 | .ndo_get_stats = gfar_get_stats, |
26ccfc37 | 466 | .ndo_vlan_rx_register = gfar_vlan_rx_register, |
240c102d BH |
467 | .ndo_set_mac_address = eth_mac_addr, |
468 | .ndo_validate_addr = eth_validate_addr, | |
26ccfc37 AF |
469 | #ifdef CONFIG_NET_POLL_CONTROLLER |
470 | .ndo_poll_controller = gfar_netpoll, | |
471 | #endif | |
472 | }; | |
473 | ||
7a8b3372 SG |
474 | unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; |
475 | unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; | |
476 | ||
fba4ed03 SG |
477 | void lock_rx_qs(struct gfar_private *priv) |
478 | { | |
479 | int i = 0x0; | |
480 | ||
481 | for (i = 0; i < priv->num_rx_queues; i++) | |
482 | spin_lock(&priv->rx_queue[i]->rxlock); | |
483 | } | |
484 | ||
485 | void lock_tx_qs(struct gfar_private *priv) | |
486 | { | |
487 | int i = 0x0; | |
488 | ||
489 | for (i = 0; i < priv->num_tx_queues; i++) | |
490 | spin_lock(&priv->tx_queue[i]->txlock); | |
491 | } | |
492 | ||
493 | void unlock_rx_qs(struct gfar_private *priv) | |
494 | { | |
495 | int i = 0x0; | |
496 | ||
497 | for (i = 0; i < priv->num_rx_queues; i++) | |
498 | spin_unlock(&priv->rx_queue[i]->rxlock); | |
499 | } | |
500 | ||
501 | void unlock_tx_qs(struct gfar_private *priv) | |
502 | { | |
503 | int i = 0x0; | |
504 | ||
505 | for (i = 0; i < priv->num_tx_queues; i++) | |
506 | spin_unlock(&priv->tx_queue[i]->txlock); | |
507 | } | |
508 | ||
7f7f5316 AF |
509 | /* Returns 1 if incoming frames use an FCB */ |
510 | static inline int gfar_uses_fcb(struct gfar_private *priv) | |
0bbaf069 | 511 | { |
cc772ab7 MR |
512 | return priv->vlgrp || priv->rx_csum_enable || |
513 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); | |
0bbaf069 | 514 | } |
bb40dcbb | 515 | |
fba4ed03 SG |
516 | static void free_tx_pointers(struct gfar_private *priv) |
517 | { | |
518 | int i = 0; | |
519 | ||
520 | for (i = 0; i < priv->num_tx_queues; i++) | |
521 | kfree(priv->tx_queue[i]); | |
522 | } | |
523 | ||
524 | static void free_rx_pointers(struct gfar_private *priv) | |
525 | { | |
526 | int i = 0; | |
527 | ||
528 | for (i = 0; i < priv->num_rx_queues; i++) | |
529 | kfree(priv->rx_queue[i]); | |
530 | } | |
531 | ||
46ceb60c SG |
532 | static void unmap_group_regs(struct gfar_private *priv) |
533 | { | |
534 | int i = 0; | |
535 | ||
536 | for (i = 0; i < MAXGROUPS; i++) | |
537 | if (priv->gfargrp[i].regs) | |
538 | iounmap(priv->gfargrp[i].regs); | |
539 | } | |
540 | ||
541 | static void disable_napi(struct gfar_private *priv) | |
542 | { | |
543 | int i = 0; | |
544 | ||
545 | for (i = 0; i < priv->num_grps; i++) | |
546 | napi_disable(&priv->gfargrp[i].napi); | |
547 | } | |
548 | ||
549 | static void enable_napi(struct gfar_private *priv) | |
550 | { | |
551 | int i = 0; | |
552 | ||
553 | for (i = 0; i < priv->num_grps; i++) | |
554 | napi_enable(&priv->gfargrp[i].napi); | |
555 | } | |
556 | ||
557 | static int gfar_parse_group(struct device_node *np, | |
558 | struct gfar_private *priv, const char *model) | |
559 | { | |
560 | u32 *queue_mask; | |
46ceb60c | 561 | |
7ce97d4f | 562 | priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0); |
46ceb60c SG |
563 | if (!priv->gfargrp[priv->num_grps].regs) |
564 | return -ENOMEM; | |
565 | ||
566 | priv->gfargrp[priv->num_grps].interruptTransmit = | |
567 | irq_of_parse_and_map(np, 0); | |
568 | ||
569 | /* If we aren't the FEC we have multiple interrupts */ | |
570 | if (model && strcasecmp(model, "FEC")) { | |
571 | priv->gfargrp[priv->num_grps].interruptReceive = | |
572 | irq_of_parse_and_map(np, 1); | |
573 | priv->gfargrp[priv->num_grps].interruptError = | |
574 | irq_of_parse_and_map(np,2); | |
575 | if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || | |
576 | priv->gfargrp[priv->num_grps].interruptReceive < 0 || | |
577 | priv->gfargrp[priv->num_grps].interruptError < 0) { | |
578 | return -EINVAL; | |
579 | } | |
580 | } | |
581 | ||
582 | priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; | |
583 | priv->gfargrp[priv->num_grps].priv = priv; | |
584 | spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); | |
585 | if(priv->mode == MQ_MG_MODE) { | |
586 | queue_mask = (u32 *)of_get_property(np, | |
587 | "fsl,rx-bit-map", NULL); | |
588 | priv->gfargrp[priv->num_grps].rx_bit_map = | |
589 | queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); | |
590 | queue_mask = (u32 *)of_get_property(np, | |
591 | "fsl,tx-bit-map", NULL); | |
592 | priv->gfargrp[priv->num_grps].tx_bit_map = | |
593 | queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); | |
594 | } else { | |
595 | priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; | |
596 | priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; | |
597 | } | |
598 | priv->num_grps++; | |
599 | ||
600 | return 0; | |
601 | } | |
602 | ||
fba4ed03 | 603 | static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) |
b31a1d8b | 604 | { |
b31a1d8b AF |
605 | const char *model; |
606 | const char *ctype; | |
607 | const void *mac_addr; | |
fba4ed03 SG |
608 | int err = 0, i; |
609 | struct net_device *dev = NULL; | |
610 | struct gfar_private *priv = NULL; | |
611 | struct device_node *np = ofdev->node; | |
46ceb60c | 612 | struct device_node *child = NULL; |
4d7902f2 AF |
613 | const u32 *stash; |
614 | const u32 *stash_len; | |
615 | const u32 *stash_idx; | |
fba4ed03 SG |
616 | unsigned int num_tx_qs, num_rx_qs; |
617 | u32 *tx_queues, *rx_queues; | |
b31a1d8b AF |
618 | |
619 | if (!np || !of_device_is_available(np)) | |
620 | return -ENODEV; | |
621 | ||
fba4ed03 SG |
622 | /* parse the num of tx and rx queues */ |
623 | tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); | |
624 | num_tx_qs = tx_queues ? *tx_queues : 1; | |
625 | ||
626 | if (num_tx_qs > MAX_TX_QS) { | |
627 | printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", | |
628 | num_tx_qs, MAX_TX_QS); | |
629 | printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); | |
630 | return -EINVAL; | |
631 | } | |
632 | ||
633 | rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); | |
634 | num_rx_qs = rx_queues ? *rx_queues : 1; | |
635 | ||
636 | if (num_rx_qs > MAX_RX_QS) { | |
637 | printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", | |
638 | num_tx_qs, MAX_TX_QS); | |
639 | printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); | |
640 | return -EINVAL; | |
641 | } | |
642 | ||
643 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); | |
644 | dev = *pdev; | |
645 | if (NULL == dev) | |
646 | return -ENOMEM; | |
647 | ||
648 | priv = netdev_priv(dev); | |
649 | priv->node = ofdev->node; | |
650 | priv->ndev = dev; | |
651 | ||
652 | dev->num_tx_queues = num_tx_qs; | |
653 | dev->real_num_tx_queues = num_tx_qs; | |
654 | priv->num_tx_queues = num_tx_qs; | |
655 | priv->num_rx_queues = num_rx_qs; | |
46ceb60c | 656 | priv->num_grps = 0x0; |
b31a1d8b AF |
657 | |
658 | model = of_get_property(np, "model", NULL); | |
659 | ||
46ceb60c SG |
660 | for (i = 0; i < MAXGROUPS; i++) |
661 | priv->gfargrp[i].regs = NULL; | |
b31a1d8b | 662 | |
46ceb60c SG |
663 | /* Parse and initialize group specific information */ |
664 | if (of_device_is_compatible(np, "fsl,etsec2")) { | |
665 | priv->mode = MQ_MG_MODE; | |
666 | for_each_child_of_node(np, child) { | |
667 | err = gfar_parse_group(child, priv, model); | |
668 | if (err) | |
669 | goto err_grp_init; | |
b31a1d8b | 670 | } |
46ceb60c SG |
671 | } else { |
672 | priv->mode = SQ_SG_MODE; | |
673 | err = gfar_parse_group(np, priv, model); | |
674 | if(err) | |
675 | goto err_grp_init; | |
b31a1d8b AF |
676 | } |
677 | ||
fba4ed03 SG |
678 | for (i = 0; i < priv->num_tx_queues; i++) |
679 | priv->tx_queue[i] = NULL; | |
680 | for (i = 0; i < priv->num_rx_queues; i++) | |
681 | priv->rx_queue[i] = NULL; | |
682 | ||
683 | for (i = 0; i < priv->num_tx_queues; i++) { | |
ed130589 | 684 | priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc( |
fba4ed03 SG |
685 | sizeof (struct gfar_priv_tx_q), GFP_KERNEL); |
686 | if (!priv->tx_queue[i]) { | |
687 | err = -ENOMEM; | |
688 | goto tx_alloc_failed; | |
689 | } | |
690 | priv->tx_queue[i]->tx_skbuff = NULL; | |
691 | priv->tx_queue[i]->qindex = i; | |
692 | priv->tx_queue[i]->dev = dev; | |
693 | spin_lock_init(&(priv->tx_queue[i]->txlock)); | |
694 | } | |
695 | ||
696 | for (i = 0; i < priv->num_rx_queues; i++) { | |
ed130589 | 697 | priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc( |
fba4ed03 SG |
698 | sizeof (struct gfar_priv_rx_q), GFP_KERNEL); |
699 | if (!priv->rx_queue[i]) { | |
700 | err = -ENOMEM; | |
701 | goto rx_alloc_failed; | |
702 | } | |
703 | priv->rx_queue[i]->rx_skbuff = NULL; | |
704 | priv->rx_queue[i]->qindex = i; | |
705 | priv->rx_queue[i]->dev = dev; | |
706 | spin_lock_init(&(priv->rx_queue[i]->rxlock)); | |
707 | } | |
708 | ||
709 | ||
4d7902f2 AF |
710 | stash = of_get_property(np, "bd-stash", NULL); |
711 | ||
a12f801d | 712 | if (stash) { |
4d7902f2 AF |
713 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; |
714 | priv->bd_stash_en = 1; | |
715 | } | |
716 | ||
717 | stash_len = of_get_property(np, "rx-stash-len", NULL); | |
718 | ||
719 | if (stash_len) | |
720 | priv->rx_stash_size = *stash_len; | |
721 | ||
722 | stash_idx = of_get_property(np, "rx-stash-idx", NULL); | |
723 | ||
724 | if (stash_idx) | |
725 | priv->rx_stash_index = *stash_idx; | |
726 | ||
727 | if (stash_len || stash_idx) | |
728 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; | |
729 | ||
b31a1d8b AF |
730 | mac_addr = of_get_mac_address(np); |
731 | if (mac_addr) | |
732 | memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); | |
733 | ||
734 | if (model && !strcasecmp(model, "TSEC")) | |
735 | priv->device_flags = | |
736 | FSL_GIANFAR_DEV_HAS_GIGABIT | | |
737 | FSL_GIANFAR_DEV_HAS_COALESCE | | |
738 | FSL_GIANFAR_DEV_HAS_RMON | | |
739 | FSL_GIANFAR_DEV_HAS_MULTI_INTR; | |
740 | if (model && !strcasecmp(model, "eTSEC")) | |
741 | priv->device_flags = | |
742 | FSL_GIANFAR_DEV_HAS_GIGABIT | | |
743 | FSL_GIANFAR_DEV_HAS_COALESCE | | |
744 | FSL_GIANFAR_DEV_HAS_RMON | | |
745 | FSL_GIANFAR_DEV_HAS_MULTI_INTR | | |
2c2db48a | 746 | FSL_GIANFAR_DEV_HAS_PADDING | |
b31a1d8b AF |
747 | FSL_GIANFAR_DEV_HAS_CSUM | |
748 | FSL_GIANFAR_DEV_HAS_VLAN | | |
749 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | | |
cc772ab7 MR |
750 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | |
751 | FSL_GIANFAR_DEV_HAS_TIMER; | |
b31a1d8b AF |
752 | |
753 | ctype = of_get_property(np, "phy-connection-type", NULL); | |
754 | ||
755 | /* We only care about rgmii-id. The rest are autodetected */ | |
756 | if (ctype && !strcmp(ctype, "rgmii-id")) | |
757 | priv->interface = PHY_INTERFACE_MODE_RGMII_ID; | |
758 | else | |
759 | priv->interface = PHY_INTERFACE_MODE_MII; | |
760 | ||
761 | if (of_get_property(np, "fsl,magic-packet", NULL)) | |
762 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; | |
763 | ||
fe192a49 | 764 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); |
b31a1d8b AF |
765 | |
766 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ | |
fe192a49 | 767 | priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); |
b31a1d8b AF |
768 | |
769 | return 0; | |
770 | ||
fba4ed03 SG |
771 | rx_alloc_failed: |
772 | free_rx_pointers(priv); | |
773 | tx_alloc_failed: | |
774 | free_tx_pointers(priv); | |
46ceb60c SG |
775 | err_grp_init: |
776 | unmap_group_regs(priv); | |
fba4ed03 | 777 | free_netdev(dev); |
b31a1d8b AF |
778 | return err; |
779 | } | |
780 | ||
cc772ab7 MR |
781 | static int gfar_hwtstamp_ioctl(struct net_device *netdev, |
782 | struct ifreq *ifr, int cmd) | |
783 | { | |
784 | struct hwtstamp_config config; | |
785 | struct gfar_private *priv = netdev_priv(netdev); | |
786 | ||
787 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) | |
788 | return -EFAULT; | |
789 | ||
790 | /* reserved for future extensions */ | |
791 | if (config.flags) | |
792 | return -EINVAL; | |
793 | ||
f0ee7acf MR |
794 | switch (config.tx_type) { |
795 | case HWTSTAMP_TX_OFF: | |
796 | priv->hwts_tx_en = 0; | |
797 | break; | |
798 | case HWTSTAMP_TX_ON: | |
799 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | |
800 | return -ERANGE; | |
801 | priv->hwts_tx_en = 1; | |
802 | break; | |
803 | default: | |
cc772ab7 | 804 | return -ERANGE; |
f0ee7acf | 805 | } |
cc772ab7 MR |
806 | |
807 | switch (config.rx_filter) { | |
808 | case HWTSTAMP_FILTER_NONE: | |
809 | priv->hwts_rx_en = 0; | |
810 | break; | |
811 | default: | |
812 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | |
813 | return -ERANGE; | |
814 | priv->hwts_rx_en = 1; | |
815 | config.rx_filter = HWTSTAMP_FILTER_ALL; | |
816 | break; | |
817 | } | |
818 | ||
819 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? | |
820 | -EFAULT : 0; | |
821 | } | |
822 | ||
0faac9f7 CW |
823 | /* Ioctl MII Interface */ |
824 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
825 | { | |
826 | struct gfar_private *priv = netdev_priv(dev); | |
827 | ||
828 | if (!netif_running(dev)) | |
829 | return -EINVAL; | |
830 | ||
cc772ab7 MR |
831 | if (cmd == SIOCSHWTSTAMP) |
832 | return gfar_hwtstamp_ioctl(dev, rq, cmd); | |
833 | ||
0faac9f7 CW |
834 | if (!priv->phydev) |
835 | return -ENODEV; | |
836 | ||
837 | return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); | |
838 | } | |
839 | ||
fba4ed03 SG |
840 | static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) |
841 | { | |
842 | unsigned int new_bit_map = 0x0; | |
843 | int mask = 0x1 << (max_qs - 1), i; | |
844 | for (i = 0; i < max_qs; i++) { | |
845 | if (bit_map & mask) | |
846 | new_bit_map = new_bit_map + (1 << i); | |
847 | mask = mask >> 0x1; | |
848 | } | |
849 | return new_bit_map; | |
850 | } | |
7a8b3372 | 851 | |
18294ad1 AV |
852 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, |
853 | u32 class) | |
7a8b3372 SG |
854 | { |
855 | u32 rqfpr = FPR_FILER_MASK; | |
856 | u32 rqfcr = 0x0; | |
857 | ||
858 | rqfar--; | |
859 | rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; | |
860 | ftp_rqfpr[rqfar] = rqfpr; | |
861 | ftp_rqfcr[rqfar] = rqfcr; | |
862 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
863 | ||
864 | rqfar--; | |
865 | rqfcr = RQFCR_CMP_NOMATCH; | |
866 | ftp_rqfpr[rqfar] = rqfpr; | |
867 | ftp_rqfcr[rqfar] = rqfcr; | |
868 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
869 | ||
870 | rqfar--; | |
871 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; | |
872 | rqfpr = class; | |
873 | ftp_rqfcr[rqfar] = rqfcr; | |
874 | ftp_rqfpr[rqfar] = rqfpr; | |
875 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
876 | ||
877 | rqfar--; | |
878 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; | |
879 | rqfpr = class; | |
880 | ftp_rqfcr[rqfar] = rqfcr; | |
881 | ftp_rqfpr[rqfar] = rqfpr; | |
882 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
883 | ||
884 | return rqfar; | |
885 | } | |
886 | ||
887 | static void gfar_init_filer_table(struct gfar_private *priv) | |
888 | { | |
889 | int i = 0x0; | |
890 | u32 rqfar = MAX_FILER_IDX; | |
891 | u32 rqfcr = 0x0; | |
892 | u32 rqfpr = FPR_FILER_MASK; | |
893 | ||
894 | /* Default rule */ | |
895 | rqfcr = RQFCR_CMP_MATCH; | |
896 | ftp_rqfcr[rqfar] = rqfcr; | |
897 | ftp_rqfpr[rqfar] = rqfpr; | |
898 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
899 | ||
900 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); | |
901 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); | |
902 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); | |
903 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); | |
904 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); | |
905 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); | |
906 | ||
907 | /* cur_filer_idx indicated the fisrt non-masked rule */ | |
908 | priv->cur_filer_idx = rqfar; | |
909 | ||
910 | /* Rest are masked rules */ | |
911 | rqfcr = RQFCR_CMP_NOMATCH; | |
912 | for (i = 0; i < rqfar; i++) { | |
913 | ftp_rqfcr[i] = rqfcr; | |
914 | ftp_rqfpr[i] = rqfpr; | |
915 | gfar_write_filer(priv, i, rqfcr, rqfpr); | |
916 | } | |
917 | } | |
918 | ||
bb40dcbb AF |
919 | /* Set up the ethernet device structure, private data, |
920 | * and anything else we need before we start */ | |
b31a1d8b AF |
921 | static int gfar_probe(struct of_device *ofdev, |
922 | const struct of_device_id *match) | |
1da177e4 LT |
923 | { |
924 | u32 tempval; | |
925 | struct net_device *dev = NULL; | |
926 | struct gfar_private *priv = NULL; | |
f4983704 | 927 | struct gfar __iomem *regs = NULL; |
46ceb60c | 928 | int err = 0, i, grp_idx = 0; |
c50a5d9a | 929 | int len_devname; |
fba4ed03 | 930 | u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; |
46ceb60c | 931 | u32 isrg = 0; |
18294ad1 | 932 | u32 __iomem *baddr; |
1da177e4 | 933 | |
fba4ed03 | 934 | err = gfar_of_init(ofdev, &dev); |
1da177e4 | 935 | |
fba4ed03 SG |
936 | if (err) |
937 | return err; | |
1da177e4 LT |
938 | |
939 | priv = netdev_priv(dev); | |
4826857f KG |
940 | priv->ndev = dev; |
941 | priv->ofdev = ofdev; | |
b31a1d8b | 942 | priv->node = ofdev->node; |
4826857f | 943 | SET_NETDEV_DEV(dev, &ofdev->dev); |
1da177e4 | 944 | |
d87eb127 | 945 | spin_lock_init(&priv->bflock); |
ab939905 | 946 | INIT_WORK(&priv->reset_task, gfar_reset_task); |
1da177e4 | 947 | |
b31a1d8b | 948 | dev_set_drvdata(&ofdev->dev, priv); |
46ceb60c | 949 | regs = priv->gfargrp[0].regs; |
1da177e4 LT |
950 | |
951 | /* Stop the DMA engine now, in case it was running before */ | |
952 | /* (The firmware could have used it, and left it running). */ | |
257d938a | 953 | gfar_halt(dev); |
1da177e4 LT |
954 | |
955 | /* Reset MAC layer */ | |
f4983704 | 956 | gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); |
1da177e4 | 957 | |
b98ac702 AF |
958 | /* We need to delay at least 3 TX clocks */ |
959 | udelay(2); | |
960 | ||
1da177e4 | 961 | tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); |
f4983704 | 962 | gfar_write(®s->maccfg1, tempval); |
1da177e4 LT |
963 | |
964 | /* Initialize MACCFG2. */ | |
f4983704 | 965 | gfar_write(®s->maccfg2, MACCFG2_INIT_SETTINGS); |
1da177e4 LT |
966 | |
967 | /* Initialize ECNTRL */ | |
f4983704 | 968 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); |
1da177e4 | 969 | |
1da177e4 | 970 | /* Set the dev->base_addr to the gfar reg region */ |
f4983704 | 971 | dev->base_addr = (unsigned long) regs; |
1da177e4 | 972 | |
b31a1d8b | 973 | SET_NETDEV_DEV(dev, &ofdev->dev); |
1da177e4 LT |
974 | |
975 | /* Fill in the dev structure */ | |
1da177e4 | 976 | dev->watchdog_timeo = TX_TIMEOUT; |
1da177e4 | 977 | dev->mtu = 1500; |
26ccfc37 | 978 | dev->netdev_ops = &gfar_netdev_ops; |
0bbaf069 KG |
979 | dev->ethtool_ops = &gfar_ethtool_ops; |
980 | ||
fba4ed03 | 981 | /* Register for napi ...We are registering NAPI for each grp */ |
46ceb60c SG |
982 | for (i = 0; i < priv->num_grps; i++) |
983 | netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); | |
a12f801d | 984 | |
b31a1d8b | 985 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { |
0bbaf069 | 986 | priv->rx_csum_enable = 1; |
4669bc90 | 987 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; |
0bbaf069 KG |
988 | } else |
989 | priv->rx_csum_enable = 0; | |
990 | ||
991 | priv->vlgrp = NULL; | |
1da177e4 | 992 | |
26ccfc37 | 993 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) |
0bbaf069 | 994 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
0bbaf069 | 995 | |
b31a1d8b | 996 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { |
0bbaf069 KG |
997 | priv->extended_hash = 1; |
998 | priv->hash_width = 9; | |
999 | ||
f4983704 SG |
1000 | priv->hash_regs[0] = ®s->igaddr0; |
1001 | priv->hash_regs[1] = ®s->igaddr1; | |
1002 | priv->hash_regs[2] = ®s->igaddr2; | |
1003 | priv->hash_regs[3] = ®s->igaddr3; | |
1004 | priv->hash_regs[4] = ®s->igaddr4; | |
1005 | priv->hash_regs[5] = ®s->igaddr5; | |
1006 | priv->hash_regs[6] = ®s->igaddr6; | |
1007 | priv->hash_regs[7] = ®s->igaddr7; | |
1008 | priv->hash_regs[8] = ®s->gaddr0; | |
1009 | priv->hash_regs[9] = ®s->gaddr1; | |
1010 | priv->hash_regs[10] = ®s->gaddr2; | |
1011 | priv->hash_regs[11] = ®s->gaddr3; | |
1012 | priv->hash_regs[12] = ®s->gaddr4; | |
1013 | priv->hash_regs[13] = ®s->gaddr5; | |
1014 | priv->hash_regs[14] = ®s->gaddr6; | |
1015 | priv->hash_regs[15] = ®s->gaddr7; | |
0bbaf069 KG |
1016 | |
1017 | } else { | |
1018 | priv->extended_hash = 0; | |
1019 | priv->hash_width = 8; | |
1020 | ||
f4983704 SG |
1021 | priv->hash_regs[0] = ®s->gaddr0; |
1022 | priv->hash_regs[1] = ®s->gaddr1; | |
1023 | priv->hash_regs[2] = ®s->gaddr2; | |
1024 | priv->hash_regs[3] = ®s->gaddr3; | |
1025 | priv->hash_regs[4] = ®s->gaddr4; | |
1026 | priv->hash_regs[5] = ®s->gaddr5; | |
1027 | priv->hash_regs[6] = ®s->gaddr6; | |
1028 | priv->hash_regs[7] = ®s->gaddr7; | |
0bbaf069 KG |
1029 | } |
1030 | ||
b31a1d8b | 1031 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) |
0bbaf069 KG |
1032 | priv->padding = DEFAULT_PADDING; |
1033 | else | |
1034 | priv->padding = 0; | |
1035 | ||
cc772ab7 MR |
1036 | if (dev->features & NETIF_F_IP_CSUM || |
1037 | priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) | |
0bbaf069 | 1038 | dev->hard_header_len += GMAC_FCB_LEN; |
1da177e4 | 1039 | |
46ceb60c SG |
1040 | /* Program the isrg regs only if number of grps > 1 */ |
1041 | if (priv->num_grps > 1) { | |
1042 | baddr = ®s->isrg0; | |
1043 | for (i = 0; i < priv->num_grps; i++) { | |
1044 | isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); | |
1045 | isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); | |
1046 | gfar_write(baddr, isrg); | |
1047 | baddr++; | |
1048 | isrg = 0x0; | |
1049 | } | |
1050 | } | |
1051 | ||
fba4ed03 | 1052 | /* Need to reverse the bit maps as bit_map's MSB is q0 |
984b3f57 | 1053 | * but, for_each_set_bit parses from right to left, which |
fba4ed03 | 1054 | * basically reverses the queue numbers */ |
46ceb60c SG |
1055 | for (i = 0; i< priv->num_grps; i++) { |
1056 | priv->gfargrp[i].tx_bit_map = reverse_bitmap( | |
1057 | priv->gfargrp[i].tx_bit_map, MAX_TX_QS); | |
1058 | priv->gfargrp[i].rx_bit_map = reverse_bitmap( | |
1059 | priv->gfargrp[i].rx_bit_map, MAX_RX_QS); | |
1060 | } | |
1061 | ||
1062 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, | |
1063 | * also assign queues to groups */ | |
1064 | for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { | |
1065 | priv->gfargrp[grp_idx].num_rx_queues = 0x0; | |
984b3f57 | 1066 | for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, |
46ceb60c SG |
1067 | priv->num_rx_queues) { |
1068 | priv->gfargrp[grp_idx].num_rx_queues++; | |
1069 | priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; | |
1070 | rstat = rstat | (RSTAT_CLEAR_RHALT >> i); | |
1071 | rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); | |
1072 | } | |
1073 | priv->gfargrp[grp_idx].num_tx_queues = 0x0; | |
984b3f57 | 1074 | for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, |
46ceb60c SG |
1075 | priv->num_tx_queues) { |
1076 | priv->gfargrp[grp_idx].num_tx_queues++; | |
1077 | priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; | |
1078 | tstat = tstat | (TSTAT_CLEAR_THALT >> i); | |
1079 | tqueue = tqueue | (TQUEUE_EN0 >> i); | |
1080 | } | |
1081 | priv->gfargrp[grp_idx].rstat = rstat; | |
1082 | priv->gfargrp[grp_idx].tstat = tstat; | |
1083 | rstat = tstat =0; | |
fba4ed03 | 1084 | } |
fba4ed03 SG |
1085 | |
1086 | gfar_write(®s->rqueue, rqueue); | |
1087 | gfar_write(®s->tqueue, tqueue); | |
1088 | ||
1da177e4 | 1089 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; |
1da177e4 | 1090 | |
a12f801d | 1091 | /* Initializing some of the rx/tx queue level parameters */ |
fba4ed03 SG |
1092 | for (i = 0; i < priv->num_tx_queues; i++) { |
1093 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; | |
1094 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; | |
1095 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; | |
1096 | priv->tx_queue[i]->txic = DEFAULT_TXIC; | |
1097 | } | |
a12f801d | 1098 | |
fba4ed03 SG |
1099 | for (i = 0; i < priv->num_rx_queues; i++) { |
1100 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; | |
1101 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; | |
1102 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | |
1103 | } | |
1da177e4 | 1104 | |
1ccb8389 SG |
1105 | /* enable filer if using multiple RX queues*/ |
1106 | if(priv->num_rx_queues > 1) | |
1107 | priv->rx_filer_enable = 1; | |
0bbaf069 KG |
1108 | /* Enable most messages by default */ |
1109 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | |
1110 | ||
d3eab82b TP |
1111 | /* Carrier starts down, phylib will bring it up */ |
1112 | netif_carrier_off(dev); | |
1113 | ||
1da177e4 LT |
1114 | err = register_netdev(dev); |
1115 | ||
1116 | if (err) { | |
1117 | printk(KERN_ERR "%s: Cannot register net device, aborting.\n", | |
1118 | dev->name); | |
1119 | goto register_fail; | |
1120 | } | |
1121 | ||
2884e5cc AV |
1122 | device_init_wakeup(&dev->dev, |
1123 | priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | |
1124 | ||
c50a5d9a DH |
1125 | /* fill out IRQ number and name fields */ |
1126 | len_devname = strlen(dev->name); | |
46ceb60c SG |
1127 | for (i = 0; i < priv->num_grps; i++) { |
1128 | strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, | |
1129 | len_devname); | |
1130 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | |
1131 | strncpy(&priv->gfargrp[i].int_name_tx[len_devname], | |
1132 | "_g", sizeof("_g")); | |
1133 | priv->gfargrp[i].int_name_tx[ | |
1134 | strlen(priv->gfargrp[i].int_name_tx)] = i+48; | |
1135 | strncpy(&priv->gfargrp[i].int_name_tx[strlen( | |
1136 | priv->gfargrp[i].int_name_tx)], | |
1137 | "_tx", sizeof("_tx") + 1); | |
1138 | ||
1139 | strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, | |
1140 | len_devname); | |
1141 | strncpy(&priv->gfargrp[i].int_name_rx[len_devname], | |
1142 | "_g", sizeof("_g")); | |
1143 | priv->gfargrp[i].int_name_rx[ | |
1144 | strlen(priv->gfargrp[i].int_name_rx)] = i+48; | |
1145 | strncpy(&priv->gfargrp[i].int_name_rx[strlen( | |
1146 | priv->gfargrp[i].int_name_rx)], | |
1147 | "_rx", sizeof("_rx") + 1); | |
1148 | ||
1149 | strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, | |
1150 | len_devname); | |
1151 | strncpy(&priv->gfargrp[i].int_name_er[len_devname], | |
1152 | "_g", sizeof("_g")); | |
1153 | priv->gfargrp[i].int_name_er[strlen( | |
1154 | priv->gfargrp[i].int_name_er)] = i+48; | |
1155 | strncpy(&priv->gfargrp[i].int_name_er[strlen(\ | |
1156 | priv->gfargrp[i].int_name_er)], | |
1157 | "_er", sizeof("_er") + 1); | |
1158 | } else | |
1159 | priv->gfargrp[i].int_name_tx[len_devname] = '\0'; | |
1160 | } | |
c50a5d9a | 1161 | |
7a8b3372 SG |
1162 | /* Initialize the filer table */ |
1163 | gfar_init_filer_table(priv); | |
1164 | ||
7f7f5316 AF |
1165 | /* Create all the sysfs files */ |
1166 | gfar_init_sysfs(dev); | |
1167 | ||
1da177e4 | 1168 | /* Print out the device info */ |
e174961c | 1169 | printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr); |
1da177e4 LT |
1170 | |
1171 | /* Even more device info helps when determining which kernel */ | |
7f7f5316 | 1172 | /* provided which set of benchmarks. */ |
1da177e4 | 1173 | printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); |
fba4ed03 | 1174 | for (i = 0; i < priv->num_rx_queues; i++) |
ddc01b3b | 1175 | printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n", |
fba4ed03 SG |
1176 | dev->name, i, priv->rx_queue[i]->rx_ring_size); |
1177 | for(i = 0; i < priv->num_tx_queues; i++) | |
ddc01b3b | 1178 | printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n", |
fba4ed03 | 1179 | dev->name, i, priv->tx_queue[i]->tx_ring_size); |
1da177e4 LT |
1180 | |
1181 | return 0; | |
1182 | ||
1183 | register_fail: | |
46ceb60c | 1184 | unmap_group_regs(priv); |
fba4ed03 SG |
1185 | free_tx_pointers(priv); |
1186 | free_rx_pointers(priv); | |
fe192a49 GL |
1187 | if (priv->phy_node) |
1188 | of_node_put(priv->phy_node); | |
1189 | if (priv->tbi_node) | |
1190 | of_node_put(priv->tbi_node); | |
1da177e4 | 1191 | free_netdev(dev); |
bb40dcbb | 1192 | return err; |
1da177e4 LT |
1193 | } |
1194 | ||
b31a1d8b | 1195 | static int gfar_remove(struct of_device *ofdev) |
1da177e4 | 1196 | { |
b31a1d8b | 1197 | struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); |
1da177e4 | 1198 | |
fe192a49 GL |
1199 | if (priv->phy_node) |
1200 | of_node_put(priv->phy_node); | |
1201 | if (priv->tbi_node) | |
1202 | of_node_put(priv->tbi_node); | |
1203 | ||
b31a1d8b | 1204 | dev_set_drvdata(&ofdev->dev, NULL); |
1da177e4 | 1205 | |
d9d8e041 | 1206 | unregister_netdev(priv->ndev); |
46ceb60c | 1207 | unmap_group_regs(priv); |
4826857f | 1208 | free_netdev(priv->ndev); |
1da177e4 LT |
1209 | |
1210 | return 0; | |
1211 | } | |
1212 | ||
d87eb127 | 1213 | #ifdef CONFIG_PM |
be926fc4 AV |
1214 | |
1215 | static int gfar_suspend(struct device *dev) | |
d87eb127 | 1216 | { |
be926fc4 AV |
1217 | struct gfar_private *priv = dev_get_drvdata(dev); |
1218 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1219 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 SW |
1220 | unsigned long flags; |
1221 | u32 tempval; | |
1222 | ||
1223 | int magic_packet = priv->wol_en && | |
b31a1d8b | 1224 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
d87eb127 | 1225 | |
be926fc4 | 1226 | netif_device_detach(ndev); |
d87eb127 | 1227 | |
be926fc4 | 1228 | if (netif_running(ndev)) { |
fba4ed03 SG |
1229 | |
1230 | local_irq_save(flags); | |
1231 | lock_tx_qs(priv); | |
1232 | lock_rx_qs(priv); | |
d87eb127 | 1233 | |
be926fc4 | 1234 | gfar_halt_nodisable(ndev); |
d87eb127 SW |
1235 | |
1236 | /* Disable Tx, and Rx if wake-on-LAN is disabled. */ | |
f4983704 | 1237 | tempval = gfar_read(®s->maccfg1); |
d87eb127 SW |
1238 | |
1239 | tempval &= ~MACCFG1_TX_EN; | |
1240 | ||
1241 | if (!magic_packet) | |
1242 | tempval &= ~MACCFG1_RX_EN; | |
1243 | ||
f4983704 | 1244 | gfar_write(®s->maccfg1, tempval); |
d87eb127 | 1245 | |
fba4ed03 SG |
1246 | unlock_rx_qs(priv); |
1247 | unlock_tx_qs(priv); | |
1248 | local_irq_restore(flags); | |
d87eb127 | 1249 | |
46ceb60c | 1250 | disable_napi(priv); |
d87eb127 SW |
1251 | |
1252 | if (magic_packet) { | |
1253 | /* Enable interrupt on Magic Packet */ | |
f4983704 | 1254 | gfar_write(®s->imask, IMASK_MAG); |
d87eb127 SW |
1255 | |
1256 | /* Enable Magic Packet mode */ | |
f4983704 | 1257 | tempval = gfar_read(®s->maccfg2); |
d87eb127 | 1258 | tempval |= MACCFG2_MPEN; |
f4983704 | 1259 | gfar_write(®s->maccfg2, tempval); |
d87eb127 SW |
1260 | } else { |
1261 | phy_stop(priv->phydev); | |
1262 | } | |
1263 | } | |
1264 | ||
1265 | return 0; | |
1266 | } | |
1267 | ||
be926fc4 | 1268 | static int gfar_resume(struct device *dev) |
d87eb127 | 1269 | { |
be926fc4 AV |
1270 | struct gfar_private *priv = dev_get_drvdata(dev); |
1271 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1272 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 SW |
1273 | unsigned long flags; |
1274 | u32 tempval; | |
1275 | int magic_packet = priv->wol_en && | |
b31a1d8b | 1276 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
d87eb127 | 1277 | |
be926fc4 AV |
1278 | if (!netif_running(ndev)) { |
1279 | netif_device_attach(ndev); | |
d87eb127 SW |
1280 | return 0; |
1281 | } | |
1282 | ||
1283 | if (!magic_packet && priv->phydev) | |
1284 | phy_start(priv->phydev); | |
1285 | ||
1286 | /* Disable Magic Packet mode, in case something | |
1287 | * else woke us up. | |
1288 | */ | |
fba4ed03 SG |
1289 | local_irq_save(flags); |
1290 | lock_tx_qs(priv); | |
1291 | lock_rx_qs(priv); | |
d87eb127 | 1292 | |
f4983704 | 1293 | tempval = gfar_read(®s->maccfg2); |
d87eb127 | 1294 | tempval &= ~MACCFG2_MPEN; |
f4983704 | 1295 | gfar_write(®s->maccfg2, tempval); |
d87eb127 | 1296 | |
be926fc4 | 1297 | gfar_start(ndev); |
d87eb127 | 1298 | |
fba4ed03 SG |
1299 | unlock_rx_qs(priv); |
1300 | unlock_tx_qs(priv); | |
1301 | local_irq_restore(flags); | |
d87eb127 | 1302 | |
be926fc4 AV |
1303 | netif_device_attach(ndev); |
1304 | ||
46ceb60c | 1305 | enable_napi(priv); |
be926fc4 AV |
1306 | |
1307 | return 0; | |
1308 | } | |
1309 | ||
1310 | static int gfar_restore(struct device *dev) | |
1311 | { | |
1312 | struct gfar_private *priv = dev_get_drvdata(dev); | |
1313 | struct net_device *ndev = priv->ndev; | |
1314 | ||
1315 | if (!netif_running(ndev)) | |
1316 | return 0; | |
1317 | ||
1318 | gfar_init_bds(ndev); | |
1319 | init_registers(ndev); | |
1320 | gfar_set_mac_address(ndev); | |
1321 | gfar_init_mac(ndev); | |
1322 | gfar_start(ndev); | |
1323 | ||
1324 | priv->oldlink = 0; | |
1325 | priv->oldspeed = 0; | |
1326 | priv->oldduplex = -1; | |
1327 | ||
1328 | if (priv->phydev) | |
1329 | phy_start(priv->phydev); | |
d87eb127 | 1330 | |
be926fc4 | 1331 | netif_device_attach(ndev); |
5ea681d4 | 1332 | enable_napi(priv); |
d87eb127 SW |
1333 | |
1334 | return 0; | |
1335 | } | |
be926fc4 AV |
1336 | |
1337 | static struct dev_pm_ops gfar_pm_ops = { | |
1338 | .suspend = gfar_suspend, | |
1339 | .resume = gfar_resume, | |
1340 | .freeze = gfar_suspend, | |
1341 | .thaw = gfar_resume, | |
1342 | .restore = gfar_restore, | |
1343 | }; | |
1344 | ||
1345 | #define GFAR_PM_OPS (&gfar_pm_ops) | |
1346 | ||
d87eb127 | 1347 | #else |
be926fc4 AV |
1348 | |
1349 | #define GFAR_PM_OPS NULL | |
be926fc4 | 1350 | |
d87eb127 | 1351 | #endif |
1da177e4 | 1352 | |
e8a2b6a4 AF |
1353 | /* Reads the controller's registers to determine what interface |
1354 | * connects it to the PHY. | |
1355 | */ | |
1356 | static phy_interface_t gfar_get_interface(struct net_device *dev) | |
1357 | { | |
1358 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1359 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
f4983704 SG |
1360 | u32 ecntrl; |
1361 | ||
f4983704 | 1362 | ecntrl = gfar_read(®s->ecntrl); |
e8a2b6a4 AF |
1363 | |
1364 | if (ecntrl & ECNTRL_SGMII_MODE) | |
1365 | return PHY_INTERFACE_MODE_SGMII; | |
1366 | ||
1367 | if (ecntrl & ECNTRL_TBI_MODE) { | |
1368 | if (ecntrl & ECNTRL_REDUCED_MODE) | |
1369 | return PHY_INTERFACE_MODE_RTBI; | |
1370 | else | |
1371 | return PHY_INTERFACE_MODE_TBI; | |
1372 | } | |
1373 | ||
1374 | if (ecntrl & ECNTRL_REDUCED_MODE) { | |
1375 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) | |
1376 | return PHY_INTERFACE_MODE_RMII; | |
7132ab7f | 1377 | else { |
b31a1d8b | 1378 | phy_interface_t interface = priv->interface; |
7132ab7f AF |
1379 | |
1380 | /* | |
1381 | * This isn't autodetected right now, so it must | |
1382 | * be set by the device tree or platform code. | |
1383 | */ | |
1384 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) | |
1385 | return PHY_INTERFACE_MODE_RGMII_ID; | |
1386 | ||
e8a2b6a4 | 1387 | return PHY_INTERFACE_MODE_RGMII; |
7132ab7f | 1388 | } |
e8a2b6a4 AF |
1389 | } |
1390 | ||
b31a1d8b | 1391 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
e8a2b6a4 AF |
1392 | return PHY_INTERFACE_MODE_GMII; |
1393 | ||
1394 | return PHY_INTERFACE_MODE_MII; | |
1395 | } | |
1396 | ||
1397 | ||
bb40dcbb AF |
1398 | /* Initializes driver's PHY state, and attaches to the PHY. |
1399 | * Returns 0 on success. | |
1da177e4 LT |
1400 | */ |
1401 | static int init_phy(struct net_device *dev) | |
1402 | { | |
1403 | struct gfar_private *priv = netdev_priv(dev); | |
bb40dcbb | 1404 | uint gigabit_support = |
b31a1d8b | 1405 | priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? |
bb40dcbb | 1406 | SUPPORTED_1000baseT_Full : 0; |
e8a2b6a4 | 1407 | phy_interface_t interface; |
1da177e4 LT |
1408 | |
1409 | priv->oldlink = 0; | |
1410 | priv->oldspeed = 0; | |
1411 | priv->oldduplex = -1; | |
1412 | ||
e8a2b6a4 AF |
1413 | interface = gfar_get_interface(dev); |
1414 | ||
1db780f8 AV |
1415 | priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, |
1416 | interface); | |
1417 | if (!priv->phydev) | |
1418 | priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, | |
1419 | interface); | |
1420 | if (!priv->phydev) { | |
1421 | dev_err(&dev->dev, "could not attach to PHY\n"); | |
1422 | return -ENODEV; | |
fe192a49 | 1423 | } |
1da177e4 | 1424 | |
d3c12873 KJ |
1425 | if (interface == PHY_INTERFACE_MODE_SGMII) |
1426 | gfar_configure_serdes(dev); | |
1427 | ||
bb40dcbb | 1428 | /* Remove any features not supported by the controller */ |
fe192a49 GL |
1429 | priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); |
1430 | priv->phydev->advertising = priv->phydev->supported; | |
1da177e4 LT |
1431 | |
1432 | return 0; | |
1da177e4 LT |
1433 | } |
1434 | ||
d0313587 PG |
1435 | /* |
1436 | * Initialize TBI PHY interface for communicating with the | |
1437 | * SERDES lynx PHY on the chip. We communicate with this PHY | |
1438 | * through the MDIO bus on each controller, treating it as a | |
1439 | * "normal" PHY at the address found in the TBIPA register. We assume | |
1440 | * that the TBIPA register is valid. Either the MDIO bus code will set | |
1441 | * it to a value that doesn't conflict with other PHYs on the bus, or the | |
1442 | * value doesn't matter, as there are no other PHYs on the bus. | |
1443 | */ | |
d3c12873 KJ |
1444 | static void gfar_configure_serdes(struct net_device *dev) |
1445 | { | |
1446 | struct gfar_private *priv = netdev_priv(dev); | |
fe192a49 GL |
1447 | struct phy_device *tbiphy; |
1448 | ||
1449 | if (!priv->tbi_node) { | |
1450 | dev_warn(&dev->dev, "error: SGMII mode requires that the " | |
1451 | "device tree specify a tbi-handle\n"); | |
1452 | return; | |
1453 | } | |
c132419e | 1454 | |
fe192a49 GL |
1455 | tbiphy = of_phy_find_device(priv->tbi_node); |
1456 | if (!tbiphy) { | |
1457 | dev_err(&dev->dev, "error: Could not get TBI device\n"); | |
b31a1d8b AF |
1458 | return; |
1459 | } | |
d3c12873 | 1460 | |
b31a1d8b AF |
1461 | /* |
1462 | * If the link is already up, we must already be ok, and don't need to | |
bdb59f94 TP |
1463 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
1464 | * everything for us? Resetting it takes the link down and requires | |
1465 | * several seconds for it to come back. | |
1466 | */ | |
fe192a49 | 1467 | if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) |
b31a1d8b | 1468 | return; |
d3c12873 | 1469 | |
d0313587 | 1470 | /* Single clk mode, mii mode off(for serdes communication) */ |
fe192a49 | 1471 | phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); |
d3c12873 | 1472 | |
fe192a49 | 1473 | phy_write(tbiphy, MII_ADVERTISE, |
d3c12873 KJ |
1474 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | |
1475 | ADVERTISE_1000XPSE_ASYM); | |
1476 | ||
fe192a49 | 1477 | phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | |
d3c12873 KJ |
1478 | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); |
1479 | } | |
1480 | ||
1da177e4 LT |
1481 | static void init_registers(struct net_device *dev) |
1482 | { | |
1483 | struct gfar_private *priv = netdev_priv(dev); | |
f4983704 | 1484 | struct gfar __iomem *regs = NULL; |
46ceb60c | 1485 | int i = 0; |
1da177e4 | 1486 | |
46ceb60c SG |
1487 | for (i = 0; i < priv->num_grps; i++) { |
1488 | regs = priv->gfargrp[i].regs; | |
1489 | /* Clear IEVENT */ | |
1490 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | |
1da177e4 | 1491 | |
46ceb60c SG |
1492 | /* Initialize IMASK */ |
1493 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
1494 | } | |
1da177e4 | 1495 | |
46ceb60c | 1496 | regs = priv->gfargrp[0].regs; |
1da177e4 | 1497 | /* Init hash registers to zero */ |
f4983704 SG |
1498 | gfar_write(®s->igaddr0, 0); |
1499 | gfar_write(®s->igaddr1, 0); | |
1500 | gfar_write(®s->igaddr2, 0); | |
1501 | gfar_write(®s->igaddr3, 0); | |
1502 | gfar_write(®s->igaddr4, 0); | |
1503 | gfar_write(®s->igaddr5, 0); | |
1504 | gfar_write(®s->igaddr6, 0); | |
1505 | gfar_write(®s->igaddr7, 0); | |
1506 | ||
1507 | gfar_write(®s->gaddr0, 0); | |
1508 | gfar_write(®s->gaddr1, 0); | |
1509 | gfar_write(®s->gaddr2, 0); | |
1510 | gfar_write(®s->gaddr3, 0); | |
1511 | gfar_write(®s->gaddr4, 0); | |
1512 | gfar_write(®s->gaddr5, 0); | |
1513 | gfar_write(®s->gaddr6, 0); | |
1514 | gfar_write(®s->gaddr7, 0); | |
1da177e4 | 1515 | |
1da177e4 | 1516 | /* Zero out the rmon mib registers if it has them */ |
b31a1d8b | 1517 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { |
f4983704 | 1518 | memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); |
1da177e4 LT |
1519 | |
1520 | /* Mask off the CAM interrupts */ | |
f4983704 SG |
1521 | gfar_write(®s->rmon.cam1, 0xffffffff); |
1522 | gfar_write(®s->rmon.cam2, 0xffffffff); | |
1da177e4 LT |
1523 | } |
1524 | ||
1525 | /* Initialize the max receive buffer length */ | |
f4983704 | 1526 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
1da177e4 | 1527 | |
1da177e4 | 1528 | /* Initialize the Minimum Frame Length Register */ |
f4983704 | 1529 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); |
1da177e4 LT |
1530 | } |
1531 | ||
0bbaf069 KG |
1532 | |
1533 | /* Halt the receive and transmit queues */ | |
d87eb127 | 1534 | static void gfar_halt_nodisable(struct net_device *dev) |
1da177e4 LT |
1535 | { |
1536 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1537 | struct gfar __iomem *regs = NULL; |
1da177e4 | 1538 | u32 tempval; |
46ceb60c | 1539 | int i = 0; |
1da177e4 | 1540 | |
46ceb60c SG |
1541 | for (i = 0; i < priv->num_grps; i++) { |
1542 | regs = priv->gfargrp[i].regs; | |
1543 | /* Mask all interrupts */ | |
1544 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
1da177e4 | 1545 | |
46ceb60c SG |
1546 | /* Clear all interrupts */ |
1547 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | |
1548 | } | |
1da177e4 | 1549 | |
46ceb60c | 1550 | regs = priv->gfargrp[0].regs; |
1da177e4 | 1551 | /* Stop the DMA, and wait for it to stop */ |
f4983704 | 1552 | tempval = gfar_read(®s->dmactrl); |
1da177e4 LT |
1553 | if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) |
1554 | != (DMACTRL_GRS | DMACTRL_GTS)) { | |
1555 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); | |
f4983704 | 1556 | gfar_write(®s->dmactrl, tempval); |
1da177e4 | 1557 | |
761ed01b AF |
1558 | spin_event_timeout(((gfar_read(®s->ievent) & |
1559 | (IEVENT_GRSC | IEVENT_GTSC)) == | |
1560 | (IEVENT_GRSC | IEVENT_GTSC)), -1, 0); | |
1da177e4 | 1561 | } |
d87eb127 | 1562 | } |
d87eb127 SW |
1563 | |
1564 | /* Halt the receive and transmit queues */ | |
1565 | void gfar_halt(struct net_device *dev) | |
1566 | { | |
1567 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1568 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 | 1569 | u32 tempval; |
1da177e4 | 1570 | |
2a54adc3 SW |
1571 | gfar_halt_nodisable(dev); |
1572 | ||
1da177e4 LT |
1573 | /* Disable Rx and Tx */ |
1574 | tempval = gfar_read(®s->maccfg1); | |
1575 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); | |
1576 | gfar_write(®s->maccfg1, tempval); | |
0bbaf069 KG |
1577 | } |
1578 | ||
46ceb60c SG |
1579 | static void free_grp_irqs(struct gfar_priv_grp *grp) |
1580 | { | |
1581 | free_irq(grp->interruptError, grp); | |
1582 | free_irq(grp->interruptTransmit, grp); | |
1583 | free_irq(grp->interruptReceive, grp); | |
1584 | } | |
1585 | ||
0bbaf069 KG |
1586 | void stop_gfar(struct net_device *dev) |
1587 | { | |
1588 | struct gfar_private *priv = netdev_priv(dev); | |
0bbaf069 | 1589 | unsigned long flags; |
46ceb60c | 1590 | int i; |
0bbaf069 | 1591 | |
bb40dcbb AF |
1592 | phy_stop(priv->phydev); |
1593 | ||
a12f801d | 1594 | |
0bbaf069 | 1595 | /* Lock it down */ |
fba4ed03 SG |
1596 | local_irq_save(flags); |
1597 | lock_tx_qs(priv); | |
1598 | lock_rx_qs(priv); | |
0bbaf069 | 1599 | |
0bbaf069 | 1600 | gfar_halt(dev); |
1da177e4 | 1601 | |
fba4ed03 SG |
1602 | unlock_rx_qs(priv); |
1603 | unlock_tx_qs(priv); | |
1604 | local_irq_restore(flags); | |
1da177e4 LT |
1605 | |
1606 | /* Free the IRQs */ | |
b31a1d8b | 1607 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
46ceb60c SG |
1608 | for (i = 0; i < priv->num_grps; i++) |
1609 | free_grp_irqs(&priv->gfargrp[i]); | |
1da177e4 | 1610 | } else { |
46ceb60c SG |
1611 | for (i = 0; i < priv->num_grps; i++) |
1612 | free_irq(priv->gfargrp[i].interruptTransmit, | |
1613 | &priv->gfargrp[i]); | |
1da177e4 LT |
1614 | } |
1615 | ||
1616 | free_skb_resources(priv); | |
1da177e4 LT |
1617 | } |
1618 | ||
fba4ed03 | 1619 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 1620 | { |
1da177e4 | 1621 | struct txbd8 *txbdp; |
fba4ed03 | 1622 | struct gfar_private *priv = netdev_priv(tx_queue->dev); |
4669bc90 | 1623 | int i, j; |
1da177e4 | 1624 | |
a12f801d | 1625 | txbdp = tx_queue->tx_bd_base; |
1da177e4 | 1626 | |
a12f801d SG |
1627 | for (i = 0; i < tx_queue->tx_ring_size; i++) { |
1628 | if (!tx_queue->tx_skbuff[i]) | |
4669bc90 | 1629 | continue; |
1da177e4 | 1630 | |
4826857f | 1631 | dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, |
4669bc90 DH |
1632 | txbdp->length, DMA_TO_DEVICE); |
1633 | txbdp->lstatus = 0; | |
fba4ed03 SG |
1634 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
1635 | j++) { | |
4669bc90 | 1636 | txbdp++; |
4826857f | 1637 | dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, |
4669bc90 | 1638 | txbdp->length, DMA_TO_DEVICE); |
1da177e4 | 1639 | } |
ad5da7ab | 1640 | txbdp++; |
a12f801d SG |
1641 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); |
1642 | tx_queue->tx_skbuff[i] = NULL; | |
1da177e4 | 1643 | } |
a12f801d | 1644 | kfree(tx_queue->tx_skbuff); |
fba4ed03 | 1645 | } |
1da177e4 | 1646 | |
fba4ed03 SG |
1647 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
1648 | { | |
1649 | struct rxbd8 *rxbdp; | |
1650 | struct gfar_private *priv = netdev_priv(rx_queue->dev); | |
1651 | int i; | |
1da177e4 | 1652 | |
fba4ed03 | 1653 | rxbdp = rx_queue->rx_bd_base; |
1da177e4 | 1654 | |
a12f801d SG |
1655 | for (i = 0; i < rx_queue->rx_ring_size; i++) { |
1656 | if (rx_queue->rx_skbuff[i]) { | |
fba4ed03 SG |
1657 | dma_unmap_single(&priv->ofdev->dev, |
1658 | rxbdp->bufPtr, priv->rx_buffer_size, | |
e69edd21 | 1659 | DMA_FROM_DEVICE); |
a12f801d SG |
1660 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); |
1661 | rx_queue->rx_skbuff[i] = NULL; | |
1da177e4 | 1662 | } |
e69edd21 AV |
1663 | rxbdp->lstatus = 0; |
1664 | rxbdp->bufPtr = 0; | |
1665 | rxbdp++; | |
1da177e4 | 1666 | } |
a12f801d | 1667 | kfree(rx_queue->rx_skbuff); |
fba4ed03 | 1668 | } |
e69edd21 | 1669 | |
fba4ed03 SG |
1670 | /* If there are any tx skbs or rx skbs still around, free them. |
1671 | * Then free tx_skbuff and rx_skbuff */ | |
1672 | static void free_skb_resources(struct gfar_private *priv) | |
1673 | { | |
1674 | struct gfar_priv_tx_q *tx_queue = NULL; | |
1675 | struct gfar_priv_rx_q *rx_queue = NULL; | |
1676 | int i; | |
1677 | ||
1678 | /* Go through all the buffer descriptors and free their data buffers */ | |
1679 | for (i = 0; i < priv->num_tx_queues; i++) { | |
1680 | tx_queue = priv->tx_queue[i]; | |
7c0d10d3 | 1681 | if(tx_queue->tx_skbuff) |
fba4ed03 SG |
1682 | free_skb_tx_queue(tx_queue); |
1683 | } | |
1684 | ||
1685 | for (i = 0; i < priv->num_rx_queues; i++) { | |
1686 | rx_queue = priv->rx_queue[i]; | |
7c0d10d3 | 1687 | if(rx_queue->rx_skbuff) |
fba4ed03 SG |
1688 | free_skb_rx_queue(rx_queue); |
1689 | } | |
1690 | ||
1691 | dma_free_coherent(&priv->ofdev->dev, | |
1692 | sizeof(struct txbd8) * priv->total_tx_ring_size + | |
1693 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | |
1694 | priv->tx_queue[0]->tx_bd_base, | |
1695 | priv->tx_queue[0]->tx_bd_dma_base); | |
7df9c43f | 1696 | skb_queue_purge(&priv->rx_recycle); |
1da177e4 LT |
1697 | } |
1698 | ||
0bbaf069 KG |
1699 | void gfar_start(struct net_device *dev) |
1700 | { | |
1701 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1702 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
0bbaf069 | 1703 | u32 tempval; |
46ceb60c | 1704 | int i = 0; |
0bbaf069 KG |
1705 | |
1706 | /* Enable Rx and Tx in MACCFG1 */ | |
1707 | tempval = gfar_read(®s->maccfg1); | |
1708 | tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); | |
1709 | gfar_write(®s->maccfg1, tempval); | |
1710 | ||
1711 | /* Initialize DMACTRL to have WWR and WOP */ | |
f4983704 | 1712 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 1713 | tempval |= DMACTRL_INIT_SETTINGS; |
f4983704 | 1714 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 1715 | |
0bbaf069 | 1716 | /* Make sure we aren't stopped */ |
f4983704 | 1717 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 1718 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); |
f4983704 | 1719 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 1720 | |
46ceb60c SG |
1721 | for (i = 0; i < priv->num_grps; i++) { |
1722 | regs = priv->gfargrp[i].regs; | |
1723 | /* Clear THLT/RHLT, so that the DMA starts polling now */ | |
1724 | gfar_write(®s->tstat, priv->gfargrp[i].tstat); | |
1725 | gfar_write(®s->rstat, priv->gfargrp[i].rstat); | |
1726 | /* Unmask the interrupts we look for */ | |
1727 | gfar_write(®s->imask, IMASK_DEFAULT); | |
1728 | } | |
12dea57b | 1729 | |
1ae5dc34 | 1730 | dev->trans_start = jiffies; /* prevent tx timeout */ |
0bbaf069 KG |
1731 | } |
1732 | ||
46ceb60c | 1733 | void gfar_configure_coalescing(struct gfar_private *priv, |
18294ad1 | 1734 | unsigned long tx_mask, unsigned long rx_mask) |
1da177e4 | 1735 | { |
46ceb60c | 1736 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
18294ad1 | 1737 | u32 __iomem *baddr; |
46ceb60c | 1738 | int i = 0; |
1da177e4 | 1739 | |
46ceb60c SG |
1740 | /* Backward compatible case ---- even if we enable |
1741 | * multiple queues, there's only single reg to program | |
1742 | */ | |
1743 | gfar_write(®s->txic, 0); | |
1744 | if(likely(priv->tx_queue[0]->txcoalescing)) | |
1745 | gfar_write(®s->txic, priv->tx_queue[0]->txic); | |
1da177e4 | 1746 | |
46ceb60c SG |
1747 | gfar_write(®s->rxic, 0); |
1748 | if(unlikely(priv->rx_queue[0]->rxcoalescing)) | |
1749 | gfar_write(®s->rxic, priv->rx_queue[0]->rxic); | |
815b97c6 | 1750 | |
46ceb60c SG |
1751 | if (priv->mode == MQ_MG_MODE) { |
1752 | baddr = ®s->txic0; | |
984b3f57 | 1753 | for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { |
46ceb60c SG |
1754 | if (likely(priv->tx_queue[i]->txcoalescing)) { |
1755 | gfar_write(baddr + i, 0); | |
1756 | gfar_write(baddr + i, priv->tx_queue[i]->txic); | |
1757 | } | |
1758 | } | |
1759 | ||
1760 | baddr = ®s->rxic0; | |
984b3f57 | 1761 | for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { |
46ceb60c SG |
1762 | if (likely(priv->rx_queue[i]->rxcoalescing)) { |
1763 | gfar_write(baddr + i, 0); | |
1764 | gfar_write(baddr + i, priv->rx_queue[i]->rxic); | |
1765 | } | |
1766 | } | |
1767 | } | |
1768 | } | |
1769 | ||
1770 | static int register_grp_irqs(struct gfar_priv_grp *grp) | |
1771 | { | |
1772 | struct gfar_private *priv = grp->priv; | |
1773 | struct net_device *dev = priv->ndev; | |
1774 | int err; | |
1da177e4 | 1775 | |
1da177e4 LT |
1776 | /* If the device has multiple interrupts, register for |
1777 | * them. Otherwise, only register for the one */ | |
b31a1d8b | 1778 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
0bbaf069 | 1779 | /* Install our interrupt handlers for Error, |
1da177e4 | 1780 | * Transmit, and Receive */ |
46ceb60c SG |
1781 | if ((err = request_irq(grp->interruptError, gfar_error, 0, |
1782 | grp->int_name_er,grp)) < 0) { | |
0bbaf069 | 1783 | if (netif_msg_intr(priv)) |
46ceb60c SG |
1784 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
1785 | dev->name, grp->interruptError); | |
1786 | ||
1787 | goto err_irq_fail; | |
1da177e4 LT |
1788 | } |
1789 | ||
46ceb60c SG |
1790 | if ((err = request_irq(grp->interruptTransmit, gfar_transmit, |
1791 | 0, grp->int_name_tx, grp)) < 0) { | |
0bbaf069 | 1792 | if (netif_msg_intr(priv)) |
46ceb60c SG |
1793 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
1794 | dev->name, grp->interruptTransmit); | |
1da177e4 LT |
1795 | goto tx_irq_fail; |
1796 | } | |
1797 | ||
46ceb60c SG |
1798 | if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, |
1799 | grp->int_name_rx, grp)) < 0) { | |
0bbaf069 | 1800 | if (netif_msg_intr(priv)) |
46ceb60c SG |
1801 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
1802 | dev->name, grp->interruptReceive); | |
1da177e4 LT |
1803 | goto rx_irq_fail; |
1804 | } | |
1805 | } else { | |
46ceb60c SG |
1806 | if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, |
1807 | grp->int_name_tx, grp)) < 0) { | |
0bbaf069 | 1808 | if (netif_msg_intr(priv)) |
46ceb60c SG |
1809 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
1810 | dev->name, grp->interruptTransmit); | |
1da177e4 LT |
1811 | goto err_irq_fail; |
1812 | } | |
1813 | } | |
1814 | ||
46ceb60c SG |
1815 | return 0; |
1816 | ||
1817 | rx_irq_fail: | |
1818 | free_irq(grp->interruptTransmit, grp); | |
1819 | tx_irq_fail: | |
1820 | free_irq(grp->interruptError, grp); | |
1821 | err_irq_fail: | |
1822 | return err; | |
1823 | ||
1824 | } | |
1825 | ||
1826 | /* Bring the controller up and running */ | |
1827 | int startup_gfar(struct net_device *ndev) | |
1828 | { | |
1829 | struct gfar_private *priv = netdev_priv(ndev); | |
1830 | struct gfar __iomem *regs = NULL; | |
1831 | int err, i, j; | |
1832 | ||
1833 | for (i = 0; i < priv->num_grps; i++) { | |
1834 | regs= priv->gfargrp[i].regs; | |
1835 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
1836 | } | |
1837 | ||
1838 | regs= priv->gfargrp[0].regs; | |
1839 | err = gfar_alloc_skb_resources(ndev); | |
1840 | if (err) | |
1841 | return err; | |
1842 | ||
1843 | gfar_init_mac(ndev); | |
1844 | ||
1845 | for (i = 0; i < priv->num_grps; i++) { | |
1846 | err = register_grp_irqs(&priv->gfargrp[i]); | |
1847 | if (err) { | |
1848 | for (j = 0; j < i; j++) | |
1849 | free_grp_irqs(&priv->gfargrp[j]); | |
1850 | goto irq_fail; | |
1851 | } | |
1852 | } | |
1853 | ||
7f7f5316 | 1854 | /* Start the controller */ |
ccc05c6e | 1855 | gfar_start(ndev); |
1da177e4 | 1856 | |
826aa4a0 AV |
1857 | phy_start(priv->phydev); |
1858 | ||
46ceb60c SG |
1859 | gfar_configure_coalescing(priv, 0xFF, 0xFF); |
1860 | ||
1da177e4 LT |
1861 | return 0; |
1862 | ||
46ceb60c | 1863 | irq_fail: |
e69edd21 | 1864 | free_skb_resources(priv); |
1da177e4 LT |
1865 | return err; |
1866 | } | |
1867 | ||
1868 | /* Called when something needs to use the ethernet device */ | |
1869 | /* Returns 0 for success. */ | |
1870 | static int gfar_enet_open(struct net_device *dev) | |
1871 | { | |
94e8cc35 | 1872 | struct gfar_private *priv = netdev_priv(dev); |
1da177e4 LT |
1873 | int err; |
1874 | ||
46ceb60c | 1875 | enable_napi(priv); |
bea3348e | 1876 | |
0fd56bb5 AF |
1877 | skb_queue_head_init(&priv->rx_recycle); |
1878 | ||
1da177e4 LT |
1879 | /* Initialize a bunch of registers */ |
1880 | init_registers(dev); | |
1881 | ||
1882 | gfar_set_mac_address(dev); | |
1883 | ||
1884 | err = init_phy(dev); | |
1885 | ||
a12f801d | 1886 | if (err) { |
46ceb60c | 1887 | disable_napi(priv); |
1da177e4 | 1888 | return err; |
bea3348e | 1889 | } |
1da177e4 LT |
1890 | |
1891 | err = startup_gfar(dev); | |
db0e8e3f | 1892 | if (err) { |
46ceb60c | 1893 | disable_napi(priv); |
db0e8e3f AV |
1894 | return err; |
1895 | } | |
1da177e4 | 1896 | |
fba4ed03 | 1897 | netif_tx_start_all_queues(dev); |
1da177e4 | 1898 | |
2884e5cc AV |
1899 | device_set_wakeup_enable(&dev->dev, priv->wol_en); |
1900 | ||
1da177e4 LT |
1901 | return err; |
1902 | } | |
1903 | ||
54dc79fe | 1904 | static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) |
0bbaf069 | 1905 | { |
54dc79fe | 1906 | struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); |
6c31d55f KG |
1907 | |
1908 | memset(fcb, 0, GMAC_FCB_LEN); | |
0bbaf069 | 1909 | |
0bbaf069 KG |
1910 | return fcb; |
1911 | } | |
1912 | ||
1913 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) | |
1914 | { | |
7f7f5316 | 1915 | u8 flags = 0; |
0bbaf069 KG |
1916 | |
1917 | /* If we're here, it's a IP packet with a TCP or UDP | |
1918 | * payload. We set it to checksum, using a pseudo-header | |
1919 | * we provide | |
1920 | */ | |
7f7f5316 | 1921 | flags = TXFCB_DEFAULT; |
0bbaf069 | 1922 | |
7f7f5316 AF |
1923 | /* Tell the controller what the protocol is */ |
1924 | /* And provide the already calculated phcs */ | |
eddc9ec5 | 1925 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
7f7f5316 | 1926 | flags |= TXFCB_UDP; |
4bedb452 | 1927 | fcb->phcs = udp_hdr(skb)->check; |
7f7f5316 | 1928 | } else |
8da32de5 | 1929 | fcb->phcs = tcp_hdr(skb)->check; |
0bbaf069 KG |
1930 | |
1931 | /* l3os is the distance between the start of the | |
1932 | * frame (skb->data) and the start of the IP hdr. | |
1933 | * l4os is the distance between the start of the | |
1934 | * l3 hdr and the l4 hdr */ | |
bbe735e4 | 1935 | fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); |
cfe1fc77 | 1936 | fcb->l4os = skb_network_header_len(skb); |
0bbaf069 | 1937 | |
7f7f5316 | 1938 | fcb->flags = flags; |
0bbaf069 KG |
1939 | } |
1940 | ||
7f7f5316 | 1941 | void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) |
0bbaf069 | 1942 | { |
7f7f5316 | 1943 | fcb->flags |= TXFCB_VLN; |
0bbaf069 KG |
1944 | fcb->vlctl = vlan_tx_tag_get(skb); |
1945 | } | |
1946 | ||
4669bc90 DH |
1947 | static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, |
1948 | struct txbd8 *base, int ring_size) | |
1949 | { | |
1950 | struct txbd8 *new_bd = bdp + stride; | |
1951 | ||
1952 | return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; | |
1953 | } | |
1954 | ||
1955 | static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, | |
1956 | int ring_size) | |
1957 | { | |
1958 | return skip_txbd(bdp, 1, base, ring_size); | |
1959 | } | |
1960 | ||
1da177e4 LT |
1961 | /* This is called by the kernel when a frame is ready for transmission. */ |
1962 | /* It is pointed to by the dev->hard_start_xmit function pointer */ | |
1963 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1964 | { | |
1965 | struct gfar_private *priv = netdev_priv(dev); | |
a12f801d | 1966 | struct gfar_priv_tx_q *tx_queue = NULL; |
fba4ed03 | 1967 | struct netdev_queue *txq; |
f4983704 | 1968 | struct gfar __iomem *regs = NULL; |
0bbaf069 | 1969 | struct txfcb *fcb = NULL; |
f0ee7acf | 1970 | struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; |
5a5efed4 | 1971 | u32 lstatus; |
f0ee7acf | 1972 | int i, rq = 0, do_tstamp = 0; |
4669bc90 | 1973 | u32 bufaddr; |
fef6108d | 1974 | unsigned long flags; |
f0ee7acf MR |
1975 | unsigned int nr_frags, nr_txbds, length; |
1976 | union skb_shared_tx *shtx; | |
fba4ed03 SG |
1977 | |
1978 | rq = skb->queue_mapping; | |
1979 | tx_queue = priv->tx_queue[rq]; | |
1980 | txq = netdev_get_tx_queue(dev, rq); | |
a12f801d | 1981 | base = tx_queue->tx_bd_base; |
46ceb60c | 1982 | regs = tx_queue->grp->regs; |
f0ee7acf MR |
1983 | shtx = skb_tx(skb); |
1984 | ||
1985 | /* check if time stamp should be generated */ | |
1986 | if (unlikely(shtx->hardware && priv->hwts_tx_en)) | |
1987 | do_tstamp = 1; | |
4669bc90 | 1988 | |
5b28beaf LY |
1989 | /* make space for additional header when fcb is needed */ |
1990 | if (((skb->ip_summed == CHECKSUM_PARTIAL) || | |
f0ee7acf MR |
1991 | (priv->vlgrp && vlan_tx_tag_present(skb)) || |
1992 | unlikely(do_tstamp)) && | |
5b28beaf | 1993 | (skb_headroom(skb) < GMAC_FCB_LEN)) { |
54dc79fe SH |
1994 | struct sk_buff *skb_new; |
1995 | ||
1996 | skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); | |
1997 | if (!skb_new) { | |
1998 | dev->stats.tx_errors++; | |
bd14ba84 | 1999 | kfree_skb(skb); |
54dc79fe SH |
2000 | return NETDEV_TX_OK; |
2001 | } | |
2002 | kfree_skb(skb); | |
2003 | skb = skb_new; | |
2004 | } | |
2005 | ||
4669bc90 DH |
2006 | /* total number of fragments in the SKB */ |
2007 | nr_frags = skb_shinfo(skb)->nr_frags; | |
2008 | ||
f0ee7acf MR |
2009 | /* calculate the required number of TxBDs for this skb */ |
2010 | if (unlikely(do_tstamp)) | |
2011 | nr_txbds = nr_frags + 2; | |
2012 | else | |
2013 | nr_txbds = nr_frags + 1; | |
2014 | ||
4669bc90 | 2015 | /* check if there is space to queue this packet */ |
f0ee7acf | 2016 | if (nr_txbds > tx_queue->num_txbdfree) { |
4669bc90 | 2017 | /* no space, stop the queue */ |
fba4ed03 | 2018 | netif_tx_stop_queue(txq); |
4669bc90 | 2019 | dev->stats.tx_fifo_errors++; |
4669bc90 DH |
2020 | return NETDEV_TX_BUSY; |
2021 | } | |
1da177e4 LT |
2022 | |
2023 | /* Update transmit stats */ | |
a7f38041 SG |
2024 | txq->tx_bytes += skb->len; |
2025 | txq->tx_packets ++; | |
1da177e4 | 2026 | |
a12f801d | 2027 | txbdp = txbdp_start = tx_queue->cur_tx; |
f0ee7acf MR |
2028 | lstatus = txbdp->lstatus; |
2029 | ||
2030 | /* Time stamp insertion requires one additional TxBD */ | |
2031 | if (unlikely(do_tstamp)) | |
2032 | txbdp_tstamp = txbdp = next_txbd(txbdp, base, | |
2033 | tx_queue->tx_ring_size); | |
1da177e4 | 2034 | |
4669bc90 | 2035 | if (nr_frags == 0) { |
f0ee7acf MR |
2036 | if (unlikely(do_tstamp)) |
2037 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | | |
2038 | TXBD_INTERRUPT); | |
2039 | else | |
2040 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
4669bc90 DH |
2041 | } else { |
2042 | /* Place the fragment addresses and lengths into the TxBDs */ | |
2043 | for (i = 0; i < nr_frags; i++) { | |
2044 | /* Point at the next BD, wrapping as needed */ | |
a12f801d | 2045 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 DH |
2046 | |
2047 | length = skb_shinfo(skb)->frags[i].size; | |
2048 | ||
2049 | lstatus = txbdp->lstatus | length | | |
2050 | BD_LFLAG(TXBD_READY); | |
2051 | ||
2052 | /* Handle the last BD specially */ | |
2053 | if (i == nr_frags - 1) | |
2054 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
1da177e4 | 2055 | |
4826857f | 2056 | bufaddr = dma_map_page(&priv->ofdev->dev, |
4669bc90 DH |
2057 | skb_shinfo(skb)->frags[i].page, |
2058 | skb_shinfo(skb)->frags[i].page_offset, | |
2059 | length, | |
2060 | DMA_TO_DEVICE); | |
2061 | ||
2062 | /* set the TxBD length and buffer pointer */ | |
2063 | txbdp->bufPtr = bufaddr; | |
2064 | txbdp->lstatus = lstatus; | |
2065 | } | |
2066 | ||
2067 | lstatus = txbdp_start->lstatus; | |
2068 | } | |
1da177e4 | 2069 | |
0bbaf069 | 2070 | /* Set up checksumming */ |
12dea57b | 2071 | if (CHECKSUM_PARTIAL == skb->ip_summed) { |
54dc79fe SH |
2072 | fcb = gfar_add_fcb(skb); |
2073 | lstatus |= BD_LFLAG(TXBD_TOE); | |
2074 | gfar_tx_checksum(skb, fcb); | |
0bbaf069 KG |
2075 | } |
2076 | ||
77ecaf2d | 2077 | if (priv->vlgrp && vlan_tx_tag_present(skb)) { |
54dc79fe SH |
2078 | if (unlikely(NULL == fcb)) { |
2079 | fcb = gfar_add_fcb(skb); | |
5a5efed4 | 2080 | lstatus |= BD_LFLAG(TXBD_TOE); |
7f7f5316 | 2081 | } |
54dc79fe SH |
2082 | |
2083 | gfar_tx_vlan(skb, fcb); | |
0bbaf069 KG |
2084 | } |
2085 | ||
f0ee7acf MR |
2086 | /* Setup tx hardware time stamping if requested */ |
2087 | if (unlikely(do_tstamp)) { | |
2088 | shtx->in_progress = 1; | |
2089 | if (fcb == NULL) | |
2090 | fcb = gfar_add_fcb(skb); | |
2091 | fcb->ptp = 1; | |
2092 | lstatus |= BD_LFLAG(TXBD_TOE); | |
2093 | } | |
2094 | ||
4826857f | 2095 | txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, |
4669bc90 | 2096 | skb_headlen(skb), DMA_TO_DEVICE); |
1da177e4 | 2097 | |
f0ee7acf MR |
2098 | /* |
2099 | * If time stamping is requested one additional TxBD must be set up. The | |
2100 | * first TxBD points to the FCB and must have a data length of | |
2101 | * GMAC_FCB_LEN. The second TxBD points to the actual frame data with | |
2102 | * the full frame length. | |
2103 | */ | |
2104 | if (unlikely(do_tstamp)) { | |
2105 | txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN; | |
2106 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | | |
2107 | (skb_headlen(skb) - GMAC_FCB_LEN); | |
2108 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; | |
2109 | } else { | |
2110 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | |
2111 | } | |
1da177e4 | 2112 | |
a3bc1f11 AV |
2113 | /* |
2114 | * We can work in parallel with gfar_clean_tx_ring(), except | |
2115 | * when modifying num_txbdfree. Note that we didn't grab the lock | |
2116 | * when we were reading the num_txbdfree and checking for available | |
2117 | * space, that's because outside of this function it can only grow, | |
2118 | * and once we've got needed space, it cannot suddenly disappear. | |
2119 | * | |
2120 | * The lock also protects us from gfar_error(), which can modify | |
2121 | * regs->tstat and thus retrigger the transfers, which is why we | |
2122 | * also must grab the lock before setting ready bit for the first | |
2123 | * to be transmitted BD. | |
2124 | */ | |
2125 | spin_lock_irqsave(&tx_queue->txlock, flags); | |
2126 | ||
4669bc90 DH |
2127 | /* |
2128 | * The powerpc-specific eieio() is used, as wmb() has too strong | |
3b6330ce SW |
2129 | * semantics (it requires synchronization between cacheable and |
2130 | * uncacheable mappings, which eieio doesn't provide and which we | |
2131 | * don't need), thus requiring a more expensive sync instruction. At | |
2132 | * some point, the set of architecture-independent barrier functions | |
2133 | * should be expanded to include weaker barriers. | |
2134 | */ | |
3b6330ce | 2135 | eieio(); |
7f7f5316 | 2136 | |
4669bc90 DH |
2137 | txbdp_start->lstatus = lstatus; |
2138 | ||
0eddba52 AV |
2139 | eieio(); /* force lstatus write before tx_skbuff */ |
2140 | ||
2141 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; | |
2142 | ||
4669bc90 DH |
2143 | /* Update the current skb pointer to the next entry we will use |
2144 | * (wrapping if necessary) */ | |
a12f801d SG |
2145 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
2146 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); | |
4669bc90 | 2147 | |
a12f801d | 2148 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 DH |
2149 | |
2150 | /* reduce TxBD free count */ | |
f0ee7acf | 2151 | tx_queue->num_txbdfree -= (nr_txbds); |
4669bc90 | 2152 | |
1da177e4 LT |
2153 | /* If the next BD still needs to be cleaned up, then the bds |
2154 | are full. We need to tell the kernel to stop sending us stuff. */ | |
a12f801d | 2155 | if (!tx_queue->num_txbdfree) { |
fba4ed03 | 2156 | netif_tx_stop_queue(txq); |
1da177e4 | 2157 | |
09f75cd7 | 2158 | dev->stats.tx_fifo_errors++; |
1da177e4 LT |
2159 | } |
2160 | ||
1da177e4 | 2161 | /* Tell the DMA to go go go */ |
fba4ed03 | 2162 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
1da177e4 LT |
2163 | |
2164 | /* Unlock priv */ | |
a12f801d | 2165 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
1da177e4 | 2166 | |
54dc79fe | 2167 | return NETDEV_TX_OK; |
1da177e4 LT |
2168 | } |
2169 | ||
2170 | /* Stops the kernel queue, and halts the controller */ | |
2171 | static int gfar_close(struct net_device *dev) | |
2172 | { | |
2173 | struct gfar_private *priv = netdev_priv(dev); | |
bea3348e | 2174 | |
46ceb60c | 2175 | disable_napi(priv); |
bea3348e | 2176 | |
ab939905 | 2177 | cancel_work_sync(&priv->reset_task); |
1da177e4 LT |
2178 | stop_gfar(dev); |
2179 | ||
bb40dcbb AF |
2180 | /* Disconnect from the PHY */ |
2181 | phy_disconnect(priv->phydev); | |
2182 | priv->phydev = NULL; | |
1da177e4 | 2183 | |
fba4ed03 | 2184 | netif_tx_stop_all_queues(dev); |
1da177e4 LT |
2185 | |
2186 | return 0; | |
2187 | } | |
2188 | ||
1da177e4 | 2189 | /* Changes the mac address if the controller is not running. */ |
f162b9d5 | 2190 | static int gfar_set_mac_address(struct net_device *dev) |
1da177e4 | 2191 | { |
7f7f5316 | 2192 | gfar_set_mac_for_addr(dev, 0, dev->dev_addr); |
1da177e4 LT |
2193 | |
2194 | return 0; | |
2195 | } | |
2196 | ||
2197 | ||
0bbaf069 KG |
2198 | /* Enables and disables VLAN insertion/extraction */ |
2199 | static void gfar_vlan_rx_register(struct net_device *dev, | |
2200 | struct vlan_group *grp) | |
2201 | { | |
2202 | struct gfar_private *priv = netdev_priv(dev); | |
f4983704 | 2203 | struct gfar __iomem *regs = NULL; |
0bbaf069 KG |
2204 | unsigned long flags; |
2205 | u32 tempval; | |
2206 | ||
46ceb60c | 2207 | regs = priv->gfargrp[0].regs; |
fba4ed03 SG |
2208 | local_irq_save(flags); |
2209 | lock_rx_qs(priv); | |
0bbaf069 | 2210 | |
cd1f55a5 | 2211 | priv->vlgrp = grp; |
0bbaf069 KG |
2212 | |
2213 | if (grp) { | |
2214 | /* Enable VLAN tag insertion */ | |
f4983704 | 2215 | tempval = gfar_read(®s->tctrl); |
0bbaf069 KG |
2216 | tempval |= TCTRL_VLINS; |
2217 | ||
f4983704 | 2218 | gfar_write(®s->tctrl, tempval); |
6aa20a22 | 2219 | |
0bbaf069 | 2220 | /* Enable VLAN tag extraction */ |
f4983704 | 2221 | tempval = gfar_read(®s->rctrl); |
77ecaf2d | 2222 | tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); |
f4983704 | 2223 | gfar_write(®s->rctrl, tempval); |
0bbaf069 KG |
2224 | } else { |
2225 | /* Disable VLAN tag insertion */ | |
f4983704 | 2226 | tempval = gfar_read(®s->tctrl); |
0bbaf069 | 2227 | tempval &= ~TCTRL_VLINS; |
f4983704 | 2228 | gfar_write(®s->tctrl, tempval); |
0bbaf069 KG |
2229 | |
2230 | /* Disable VLAN tag extraction */ | |
f4983704 | 2231 | tempval = gfar_read(®s->rctrl); |
0bbaf069 | 2232 | tempval &= ~RCTRL_VLEX; |
77ecaf2d DH |
2233 | /* If parse is no longer required, then disable parser */ |
2234 | if (tempval & RCTRL_REQ_PARSER) | |
2235 | tempval |= RCTRL_PRSDEP_INIT; | |
2236 | else | |
2237 | tempval &= ~RCTRL_PRSDEP_INIT; | |
f4983704 | 2238 | gfar_write(®s->rctrl, tempval); |
0bbaf069 KG |
2239 | } |
2240 | ||
77ecaf2d DH |
2241 | gfar_change_mtu(dev, dev->mtu); |
2242 | ||
fba4ed03 SG |
2243 | unlock_rx_qs(priv); |
2244 | local_irq_restore(flags); | |
0bbaf069 KG |
2245 | } |
2246 | ||
1da177e4 LT |
2247 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
2248 | { | |
2249 | int tempsize, tempval; | |
2250 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2251 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 | 2252 | int oldsize = priv->rx_buffer_size; |
0bbaf069 KG |
2253 | int frame_size = new_mtu + ETH_HLEN; |
2254 | ||
77ecaf2d | 2255 | if (priv->vlgrp) |
faa89577 | 2256 | frame_size += VLAN_HLEN; |
0bbaf069 | 2257 | |
1da177e4 | 2258 | if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { |
0bbaf069 KG |
2259 | if (netif_msg_drv(priv)) |
2260 | printk(KERN_ERR "%s: Invalid MTU setting\n", | |
2261 | dev->name); | |
1da177e4 LT |
2262 | return -EINVAL; |
2263 | } | |
2264 | ||
77ecaf2d DH |
2265 | if (gfar_uses_fcb(priv)) |
2266 | frame_size += GMAC_FCB_LEN; | |
2267 | ||
2268 | frame_size += priv->padding; | |
2269 | ||
1da177e4 LT |
2270 | tempsize = |
2271 | (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + | |
2272 | INCREMENTAL_BUFFER_SIZE; | |
2273 | ||
2274 | /* Only stop and start the controller if it isn't already | |
7f7f5316 | 2275 | * stopped, and we changed something */ |
1da177e4 LT |
2276 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) |
2277 | stop_gfar(dev); | |
2278 | ||
2279 | priv->rx_buffer_size = tempsize; | |
2280 | ||
2281 | dev->mtu = new_mtu; | |
2282 | ||
f4983704 SG |
2283 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
2284 | gfar_write(®s->maxfrm, priv->rx_buffer_size); | |
1da177e4 LT |
2285 | |
2286 | /* If the mtu is larger than the max size for standard | |
2287 | * ethernet frames (ie, a jumbo frame), then set maccfg2 | |
2288 | * to allow huge frames, and to check the length */ | |
f4983704 | 2289 | tempval = gfar_read(®s->maccfg2); |
1da177e4 LT |
2290 | |
2291 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) | |
2292 | tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | |
2293 | else | |
2294 | tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | |
2295 | ||
f4983704 | 2296 | gfar_write(®s->maccfg2, tempval); |
1da177e4 LT |
2297 | |
2298 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) | |
2299 | startup_gfar(dev); | |
2300 | ||
2301 | return 0; | |
2302 | } | |
2303 | ||
ab939905 | 2304 | /* gfar_reset_task gets scheduled when a packet has not been |
1da177e4 LT |
2305 | * transmitted after a set amount of time. |
2306 | * For now, assume that clearing out all the structures, and | |
ab939905 SS |
2307 | * starting over will fix the problem. |
2308 | */ | |
2309 | static void gfar_reset_task(struct work_struct *work) | |
1da177e4 | 2310 | { |
ab939905 SS |
2311 | struct gfar_private *priv = container_of(work, struct gfar_private, |
2312 | reset_task); | |
4826857f | 2313 | struct net_device *dev = priv->ndev; |
1da177e4 LT |
2314 | |
2315 | if (dev->flags & IFF_UP) { | |
fba4ed03 | 2316 | netif_tx_stop_all_queues(dev); |
1da177e4 LT |
2317 | stop_gfar(dev); |
2318 | startup_gfar(dev); | |
fba4ed03 | 2319 | netif_tx_start_all_queues(dev); |
1da177e4 LT |
2320 | } |
2321 | ||
263ba320 | 2322 | netif_tx_schedule_all(dev); |
1da177e4 LT |
2323 | } |
2324 | ||
ab939905 SS |
2325 | static void gfar_timeout(struct net_device *dev) |
2326 | { | |
2327 | struct gfar_private *priv = netdev_priv(dev); | |
2328 | ||
2329 | dev->stats.tx_errors++; | |
2330 | schedule_work(&priv->reset_task); | |
2331 | } | |
2332 | ||
1da177e4 | 2333 | /* Interrupt Handler for Transmit complete */ |
a12f801d | 2334 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 2335 | { |
a12f801d | 2336 | struct net_device *dev = tx_queue->dev; |
d080cd63 | 2337 | struct gfar_private *priv = netdev_priv(dev); |
a12f801d | 2338 | struct gfar_priv_rx_q *rx_queue = NULL; |
f0ee7acf | 2339 | struct txbd8 *bdp, *next = NULL; |
4669bc90 | 2340 | struct txbd8 *lbdp = NULL; |
a12f801d | 2341 | struct txbd8 *base = tx_queue->tx_bd_base; |
4669bc90 DH |
2342 | struct sk_buff *skb; |
2343 | int skb_dirtytx; | |
a12f801d | 2344 | int tx_ring_size = tx_queue->tx_ring_size; |
f0ee7acf | 2345 | int frags = 0, nr_txbds = 0; |
4669bc90 | 2346 | int i; |
d080cd63 | 2347 | int howmany = 0; |
4669bc90 | 2348 | u32 lstatus; |
f0ee7acf MR |
2349 | size_t buflen; |
2350 | union skb_shared_tx *shtx; | |
1da177e4 | 2351 | |
fba4ed03 | 2352 | rx_queue = priv->rx_queue[tx_queue->qindex]; |
a12f801d SG |
2353 | bdp = tx_queue->dirty_tx; |
2354 | skb_dirtytx = tx_queue->skb_dirtytx; | |
1da177e4 | 2355 | |
a12f801d | 2356 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { |
a3bc1f11 AV |
2357 | unsigned long flags; |
2358 | ||
4669bc90 | 2359 | frags = skb_shinfo(skb)->nr_frags; |
f0ee7acf MR |
2360 | |
2361 | /* | |
2362 | * When time stamping, one additional TxBD must be freed. | |
2363 | * Also, we need to dma_unmap_single() the TxPAL. | |
2364 | */ | |
2365 | shtx = skb_tx(skb); | |
2366 | if (unlikely(shtx->in_progress)) | |
2367 | nr_txbds = frags + 2; | |
2368 | else | |
2369 | nr_txbds = frags + 1; | |
2370 | ||
2371 | lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); | |
1da177e4 | 2372 | |
4669bc90 | 2373 | lstatus = lbdp->lstatus; |
1da177e4 | 2374 | |
4669bc90 DH |
2375 | /* Only clean completed frames */ |
2376 | if ((lstatus & BD_LFLAG(TXBD_READY)) && | |
2377 | (lstatus & BD_LENGTH_MASK)) | |
2378 | break; | |
2379 | ||
f0ee7acf MR |
2380 | if (unlikely(shtx->in_progress)) { |
2381 | next = next_txbd(bdp, base, tx_ring_size); | |
2382 | buflen = next->length + GMAC_FCB_LEN; | |
2383 | } else | |
2384 | buflen = bdp->length; | |
2385 | ||
2386 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, | |
2387 | buflen, DMA_TO_DEVICE); | |
2388 | ||
2389 | if (unlikely(shtx->in_progress)) { | |
2390 | struct skb_shared_hwtstamps shhwtstamps; | |
2391 | u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); | |
2392 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); | |
2393 | shhwtstamps.hwtstamp = ns_to_ktime(*ns); | |
2394 | skb_tstamp_tx(skb, &shhwtstamps); | |
2395 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | |
2396 | bdp = next; | |
2397 | } | |
81183059 | 2398 | |
4669bc90 DH |
2399 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
2400 | bdp = next_txbd(bdp, base, tx_ring_size); | |
d080cd63 | 2401 | |
4669bc90 | 2402 | for (i = 0; i < frags; i++) { |
4826857f | 2403 | dma_unmap_page(&priv->ofdev->dev, |
4669bc90 DH |
2404 | bdp->bufPtr, |
2405 | bdp->length, | |
2406 | DMA_TO_DEVICE); | |
2407 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | |
2408 | bdp = next_txbd(bdp, base, tx_ring_size); | |
2409 | } | |
1da177e4 | 2410 | |
0fd56bb5 AF |
2411 | /* |
2412 | * If there's room in the queue (limit it to rx_buffer_size) | |
2413 | * we add this skb back into the pool, if it's the right size | |
2414 | */ | |
a12f801d | 2415 | if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && |
0fd56bb5 AF |
2416 | skb_recycle_check(skb, priv->rx_buffer_size + |
2417 | RXBUF_ALIGNMENT)) | |
2418 | __skb_queue_head(&priv->rx_recycle, skb); | |
2419 | else | |
2420 | dev_kfree_skb_any(skb); | |
2421 | ||
a12f801d | 2422 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
d080cd63 | 2423 | |
4669bc90 DH |
2424 | skb_dirtytx = (skb_dirtytx + 1) & |
2425 | TX_RING_MOD_MASK(tx_ring_size); | |
2426 | ||
2427 | howmany++; | |
a3bc1f11 | 2428 | spin_lock_irqsave(&tx_queue->txlock, flags); |
f0ee7acf | 2429 | tx_queue->num_txbdfree += nr_txbds; |
a3bc1f11 | 2430 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
4669bc90 | 2431 | } |
1da177e4 | 2432 | |
4669bc90 | 2433 | /* If we freed a buffer, we can restart transmission, if necessary */ |
fba4ed03 SG |
2434 | if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) |
2435 | netif_wake_subqueue(dev, tx_queue->qindex); | |
1da177e4 | 2436 | |
4669bc90 | 2437 | /* Update dirty indicators */ |
a12f801d SG |
2438 | tx_queue->skb_dirtytx = skb_dirtytx; |
2439 | tx_queue->dirty_tx = bdp; | |
1da177e4 | 2440 | |
d080cd63 DH |
2441 | return howmany; |
2442 | } | |
2443 | ||
f4983704 | 2444 | static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) |
d080cd63 | 2445 | { |
a6d0b91a AV |
2446 | unsigned long flags; |
2447 | ||
fba4ed03 SG |
2448 | spin_lock_irqsave(&gfargrp->grplock, flags); |
2449 | if (napi_schedule_prep(&gfargrp->napi)) { | |
f4983704 | 2450 | gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); |
fba4ed03 | 2451 | __napi_schedule(&gfargrp->napi); |
8707bdd4 JP |
2452 | } else { |
2453 | /* | |
2454 | * Clear IEVENT, so interrupts aren't called again | |
2455 | * because of the packets that have already arrived. | |
2456 | */ | |
f4983704 | 2457 | gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); |
2f448911 | 2458 | } |
fba4ed03 | 2459 | spin_unlock_irqrestore(&gfargrp->grplock, flags); |
a6d0b91a | 2460 | |
8c7396ae | 2461 | } |
1da177e4 | 2462 | |
8c7396ae | 2463 | /* Interrupt Handler for Transmit complete */ |
f4983704 | 2464 | static irqreturn_t gfar_transmit(int irq, void *grp_id) |
8c7396ae | 2465 | { |
f4983704 | 2466 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); |
1da177e4 LT |
2467 | return IRQ_HANDLED; |
2468 | } | |
2469 | ||
a12f801d | 2470 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
815b97c6 AF |
2471 | struct sk_buff *skb) |
2472 | { | |
a12f801d | 2473 | struct net_device *dev = rx_queue->dev; |
815b97c6 | 2474 | struct gfar_private *priv = netdev_priv(dev); |
8a102fe0 | 2475 | dma_addr_t buf; |
815b97c6 | 2476 | |
8a102fe0 AV |
2477 | buf = dma_map_single(&priv->ofdev->dev, skb->data, |
2478 | priv->rx_buffer_size, DMA_FROM_DEVICE); | |
a12f801d | 2479 | gfar_init_rxbdp(rx_queue, bdp, buf); |
815b97c6 AF |
2480 | } |
2481 | ||
2482 | ||
2483 | struct sk_buff * gfar_new_skb(struct net_device *dev) | |
1da177e4 | 2484 | { |
7f7f5316 | 2485 | unsigned int alignamount; |
1da177e4 LT |
2486 | struct gfar_private *priv = netdev_priv(dev); |
2487 | struct sk_buff *skb = NULL; | |
1da177e4 | 2488 | |
0fd56bb5 AF |
2489 | skb = __skb_dequeue(&priv->rx_recycle); |
2490 | if (!skb) | |
2491 | skb = netdev_alloc_skb(dev, | |
2492 | priv->rx_buffer_size + RXBUF_ALIGNMENT); | |
1da177e4 | 2493 | |
815b97c6 | 2494 | if (!skb) |
1da177e4 LT |
2495 | return NULL; |
2496 | ||
7f7f5316 | 2497 | alignamount = RXBUF_ALIGNMENT - |
bea3348e | 2498 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); |
7f7f5316 | 2499 | |
1da177e4 LT |
2500 | /* We need the data buffer to be aligned properly. We will reserve |
2501 | * as many bytes as needed to align the data properly | |
2502 | */ | |
7f7f5316 | 2503 | skb_reserve(skb, alignamount); |
a6d36d56 | 2504 | GFAR_CB(skb)->alignamount = alignamount; |
1da177e4 | 2505 | |
1da177e4 LT |
2506 | return skb; |
2507 | } | |
2508 | ||
298e1a9e | 2509 | static inline void count_errors(unsigned short status, struct net_device *dev) |
1da177e4 | 2510 | { |
298e1a9e | 2511 | struct gfar_private *priv = netdev_priv(dev); |
09f75cd7 | 2512 | struct net_device_stats *stats = &dev->stats; |
1da177e4 LT |
2513 | struct gfar_extra_stats *estats = &priv->extra_stats; |
2514 | ||
2515 | /* If the packet was truncated, none of the other errors | |
2516 | * matter */ | |
2517 | if (status & RXBD_TRUNCATED) { | |
2518 | stats->rx_length_errors++; | |
2519 | ||
2520 | estats->rx_trunc++; | |
2521 | ||
2522 | return; | |
2523 | } | |
2524 | /* Count the errors, if there were any */ | |
2525 | if (status & (RXBD_LARGE | RXBD_SHORT)) { | |
2526 | stats->rx_length_errors++; | |
2527 | ||
2528 | if (status & RXBD_LARGE) | |
2529 | estats->rx_large++; | |
2530 | else | |
2531 | estats->rx_short++; | |
2532 | } | |
2533 | if (status & RXBD_NONOCTET) { | |
2534 | stats->rx_frame_errors++; | |
2535 | estats->rx_nonoctet++; | |
2536 | } | |
2537 | if (status & RXBD_CRCERR) { | |
2538 | estats->rx_crcerr++; | |
2539 | stats->rx_crc_errors++; | |
2540 | } | |
2541 | if (status & RXBD_OVERRUN) { | |
2542 | estats->rx_overrun++; | |
2543 | stats->rx_crc_errors++; | |
2544 | } | |
2545 | } | |
2546 | ||
f4983704 | 2547 | irqreturn_t gfar_receive(int irq, void *grp_id) |
1da177e4 | 2548 | { |
f4983704 | 2549 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); |
1da177e4 LT |
2550 | return IRQ_HANDLED; |
2551 | } | |
2552 | ||
0bbaf069 KG |
2553 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) |
2554 | { | |
2555 | /* If valid headers were found, and valid sums | |
2556 | * were verified, then we tell the kernel that no | |
2557 | * checksumming is necessary. Otherwise, it is */ | |
7f7f5316 | 2558 | if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) |
0bbaf069 KG |
2559 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2560 | else | |
2561 | skb->ip_summed = CHECKSUM_NONE; | |
2562 | } | |
2563 | ||
2564 | ||
1da177e4 LT |
2565 | /* gfar_process_frame() -- handle one incoming packet if skb |
2566 | * isn't NULL. */ | |
2567 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |
2c2db48a | 2568 | int amount_pull) |
1da177e4 LT |
2569 | { |
2570 | struct gfar_private *priv = netdev_priv(dev); | |
0bbaf069 | 2571 | struct rxfcb *fcb = NULL; |
1da177e4 | 2572 | |
2c2db48a | 2573 | int ret; |
1da177e4 | 2574 | |
2c2db48a DH |
2575 | /* fcb is at the beginning if exists */ |
2576 | fcb = (struct rxfcb *)skb->data; | |
0bbaf069 | 2577 | |
2c2db48a DH |
2578 | /* Remove the FCB from the skb */ |
2579 | /* Remove the padded bytes, if there are any */ | |
f74dac08 SG |
2580 | if (amount_pull) { |
2581 | skb_record_rx_queue(skb, fcb->rq); | |
2c2db48a | 2582 | skb_pull(skb, amount_pull); |
f74dac08 | 2583 | } |
0bbaf069 | 2584 | |
cc772ab7 MR |
2585 | /* Get receive timestamp from the skb */ |
2586 | if (priv->hwts_rx_en) { | |
2587 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | |
2588 | u64 *ns = (u64 *) skb->data; | |
2589 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); | |
2590 | shhwtstamps->hwtstamp = ns_to_ktime(*ns); | |
2591 | } | |
2592 | ||
2593 | if (priv->padding) | |
2594 | skb_pull(skb, priv->padding); | |
2595 | ||
2c2db48a DH |
2596 | if (priv->rx_csum_enable) |
2597 | gfar_rx_checksum(skb, fcb); | |
0bbaf069 | 2598 | |
2c2db48a DH |
2599 | /* Tell the skb what kind of packet this is */ |
2600 | skb->protocol = eth_type_trans(skb, dev); | |
1da177e4 | 2601 | |
2c2db48a DH |
2602 | /* Send the packet up the stack */ |
2603 | if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) | |
2604 | ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl); | |
2605 | else | |
2606 | ret = netif_receive_skb(skb); | |
0bbaf069 | 2607 | |
2c2db48a DH |
2608 | if (NET_RX_DROP == ret) |
2609 | priv->extra_stats.kernel_dropped++; | |
1da177e4 LT |
2610 | |
2611 | return 0; | |
2612 | } | |
2613 | ||
2614 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring | |
0bbaf069 | 2615 | * until the budget/quota has been reached. Returns the number |
1da177e4 LT |
2616 | * of frames handled |
2617 | */ | |
a12f801d | 2618 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) |
1da177e4 | 2619 | { |
a12f801d | 2620 | struct net_device *dev = rx_queue->dev; |
31de198b | 2621 | struct rxbd8 *bdp, *base; |
1da177e4 | 2622 | struct sk_buff *skb; |
2c2db48a DH |
2623 | int pkt_len; |
2624 | int amount_pull; | |
1da177e4 LT |
2625 | int howmany = 0; |
2626 | struct gfar_private *priv = netdev_priv(dev); | |
2627 | ||
2628 | /* Get the first full descriptor */ | |
a12f801d SG |
2629 | bdp = rx_queue->cur_rx; |
2630 | base = rx_queue->rx_bd_base; | |
1da177e4 | 2631 | |
cc772ab7 | 2632 | amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0); |
2c2db48a | 2633 | |
1da177e4 | 2634 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { |
815b97c6 | 2635 | struct sk_buff *newskb; |
3b6330ce | 2636 | rmb(); |
815b97c6 AF |
2637 | |
2638 | /* Add another skb for the future */ | |
2639 | newskb = gfar_new_skb(dev); | |
2640 | ||
a12f801d | 2641 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; |
1da177e4 | 2642 | |
4826857f | 2643 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, |
81183059 AF |
2644 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
2645 | ||
815b97c6 AF |
2646 | /* We drop the frame if we failed to allocate a new buffer */ |
2647 | if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || | |
2648 | bdp->status & RXBD_ERR)) { | |
2649 | count_errors(bdp->status, dev); | |
2650 | ||
2651 | if (unlikely(!newskb)) | |
2652 | newskb = skb; | |
4e2fd555 LB |
2653 | else if (skb) { |
2654 | /* | |
a6d36d56 | 2655 | * We need to un-reserve() the skb to what it |
4e2fd555 LB |
2656 | * was before gfar_new_skb() re-aligned |
2657 | * it to an RXBUF_ALIGNMENT boundary | |
2658 | * before we put the skb back on the | |
2659 | * recycle list. | |
2660 | */ | |
a6d36d56 | 2661 | skb_reserve(skb, -GFAR_CB(skb)->alignamount); |
0fd56bb5 | 2662 | __skb_queue_head(&priv->rx_recycle, skb); |
4e2fd555 | 2663 | } |
815b97c6 | 2664 | } else { |
1da177e4 | 2665 | /* Increment the number of packets */ |
a7f38041 | 2666 | rx_queue->stats.rx_packets++; |
1da177e4 LT |
2667 | howmany++; |
2668 | ||
2c2db48a DH |
2669 | if (likely(skb)) { |
2670 | pkt_len = bdp->length - ETH_FCS_LEN; | |
2671 | /* Remove the FCS from the packet length */ | |
2672 | skb_put(skb, pkt_len); | |
a7f38041 | 2673 | rx_queue->stats.rx_bytes += pkt_len; |
f74dac08 | 2674 | skb_record_rx_queue(skb, rx_queue->qindex); |
2c2db48a DH |
2675 | gfar_process_frame(dev, skb, amount_pull); |
2676 | ||
2677 | } else { | |
2678 | if (netif_msg_rx_err(priv)) | |
2679 | printk(KERN_WARNING | |
2680 | "%s: Missing skb!\n", dev->name); | |
a7f38041 | 2681 | rx_queue->stats.rx_dropped++; |
2c2db48a DH |
2682 | priv->extra_stats.rx_skbmissing++; |
2683 | } | |
1da177e4 | 2684 | |
1da177e4 LT |
2685 | } |
2686 | ||
a12f801d | 2687 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; |
1da177e4 | 2688 | |
815b97c6 | 2689 | /* Setup the new bdp */ |
a12f801d | 2690 | gfar_new_rxbdp(rx_queue, bdp, newskb); |
1da177e4 LT |
2691 | |
2692 | /* Update to the next pointer */ | |
a12f801d | 2693 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); |
1da177e4 LT |
2694 | |
2695 | /* update to point at the next skb */ | |
a12f801d SG |
2696 | rx_queue->skb_currx = |
2697 | (rx_queue->skb_currx + 1) & | |
2698 | RX_RING_MOD_MASK(rx_queue->rx_ring_size); | |
1da177e4 LT |
2699 | } |
2700 | ||
2701 | /* Update the current rxbd pointer to be the next one */ | |
a12f801d | 2702 | rx_queue->cur_rx = bdp; |
1da177e4 | 2703 | |
1da177e4 LT |
2704 | return howmany; |
2705 | } | |
2706 | ||
bea3348e | 2707 | static int gfar_poll(struct napi_struct *napi, int budget) |
1da177e4 | 2708 | { |
fba4ed03 SG |
2709 | struct gfar_priv_grp *gfargrp = container_of(napi, |
2710 | struct gfar_priv_grp, napi); | |
2711 | struct gfar_private *priv = gfargrp->priv; | |
46ceb60c | 2712 | struct gfar __iomem *regs = gfargrp->regs; |
a12f801d | 2713 | struct gfar_priv_tx_q *tx_queue = NULL; |
fba4ed03 SG |
2714 | struct gfar_priv_rx_q *rx_queue = NULL; |
2715 | int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; | |
18294ad1 AV |
2716 | int tx_cleaned = 0, i, left_over_budget = budget; |
2717 | unsigned long serviced_queues = 0; | |
fba4ed03 | 2718 | int num_queues = 0; |
d080cd63 | 2719 | |
fba4ed03 SG |
2720 | num_queues = gfargrp->num_rx_queues; |
2721 | budget_per_queue = budget/num_queues; | |
2722 | ||
8c7396ae DH |
2723 | /* Clear IEVENT, so interrupts aren't called again |
2724 | * because of the packets that have already arrived */ | |
f4983704 | 2725 | gfar_write(®s->ievent, IEVENT_RTX_MASK); |
8c7396ae | 2726 | |
fba4ed03 | 2727 | while (num_queues && left_over_budget) { |
1da177e4 | 2728 | |
fba4ed03 SG |
2729 | budget_per_queue = left_over_budget/num_queues; |
2730 | left_over_budget = 0; | |
2731 | ||
984b3f57 | 2732 | for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { |
fba4ed03 SG |
2733 | if (test_bit(i, &serviced_queues)) |
2734 | continue; | |
2735 | rx_queue = priv->rx_queue[i]; | |
2736 | tx_queue = priv->tx_queue[rx_queue->qindex]; | |
2737 | ||
a3bc1f11 | 2738 | tx_cleaned += gfar_clean_tx_ring(tx_queue); |
fba4ed03 SG |
2739 | rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, |
2740 | budget_per_queue); | |
2741 | rx_cleaned += rx_cleaned_per_queue; | |
2742 | if(rx_cleaned_per_queue < budget_per_queue) { | |
2743 | left_over_budget = left_over_budget + | |
2744 | (budget_per_queue - rx_cleaned_per_queue); | |
2745 | set_bit(i, &serviced_queues); | |
2746 | num_queues--; | |
2747 | } | |
2748 | } | |
2749 | } | |
1da177e4 | 2750 | |
42199884 AF |
2751 | if (tx_cleaned) |
2752 | return budget; | |
2753 | ||
2754 | if (rx_cleaned < budget) { | |
288379f0 | 2755 | napi_complete(napi); |
1da177e4 LT |
2756 | |
2757 | /* Clear the halt bit in RSTAT */ | |
fba4ed03 | 2758 | gfar_write(®s->rstat, gfargrp->rstat); |
1da177e4 | 2759 | |
f4983704 | 2760 | gfar_write(®s->imask, IMASK_DEFAULT); |
1da177e4 LT |
2761 | |
2762 | /* If we are coalescing interrupts, update the timer */ | |
2763 | /* Otherwise, clear it */ | |
46ceb60c SG |
2764 | gfar_configure_coalescing(priv, |
2765 | gfargrp->rx_bit_map, gfargrp->tx_bit_map); | |
1da177e4 LT |
2766 | } |
2767 | ||
42199884 | 2768 | return rx_cleaned; |
1da177e4 | 2769 | } |
1da177e4 | 2770 | |
f2d71c2d VW |
2771 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2772 | /* | |
2773 | * Polling 'interrupt' - used by things like netconsole to send skbs | |
2774 | * without having to re-enable interrupts. It's not called while | |
2775 | * the interrupt routine is executing. | |
2776 | */ | |
2777 | static void gfar_netpoll(struct net_device *dev) | |
2778 | { | |
2779 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2780 | int i = 0; |
f2d71c2d VW |
2781 | |
2782 | /* If the device has multiple interrupts, run tx/rx */ | |
b31a1d8b | 2783 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
46ceb60c SG |
2784 | for (i = 0; i < priv->num_grps; i++) { |
2785 | disable_irq(priv->gfargrp[i].interruptTransmit); | |
2786 | disable_irq(priv->gfargrp[i].interruptReceive); | |
2787 | disable_irq(priv->gfargrp[i].interruptError); | |
2788 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, | |
2789 | &priv->gfargrp[i]); | |
2790 | enable_irq(priv->gfargrp[i].interruptError); | |
2791 | enable_irq(priv->gfargrp[i].interruptReceive); | |
2792 | enable_irq(priv->gfargrp[i].interruptTransmit); | |
2793 | } | |
f2d71c2d | 2794 | } else { |
46ceb60c SG |
2795 | for (i = 0; i < priv->num_grps; i++) { |
2796 | disable_irq(priv->gfargrp[i].interruptTransmit); | |
2797 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, | |
2798 | &priv->gfargrp[i]); | |
2799 | enable_irq(priv->gfargrp[i].interruptTransmit); | |
43de004b | 2800 | } |
f2d71c2d VW |
2801 | } |
2802 | } | |
2803 | #endif | |
2804 | ||
1da177e4 | 2805 | /* The interrupt handler for devices with one interrupt */ |
f4983704 | 2806 | static irqreturn_t gfar_interrupt(int irq, void *grp_id) |
1da177e4 | 2807 | { |
f4983704 | 2808 | struct gfar_priv_grp *gfargrp = grp_id; |
1da177e4 LT |
2809 | |
2810 | /* Save ievent for future reference */ | |
f4983704 | 2811 | u32 events = gfar_read(&gfargrp->regs->ievent); |
1da177e4 | 2812 | |
1da177e4 | 2813 | /* Check for reception */ |
538cc7ee | 2814 | if (events & IEVENT_RX_MASK) |
f4983704 | 2815 | gfar_receive(irq, grp_id); |
1da177e4 LT |
2816 | |
2817 | /* Check for transmit completion */ | |
538cc7ee | 2818 | if (events & IEVENT_TX_MASK) |
f4983704 | 2819 | gfar_transmit(irq, grp_id); |
1da177e4 | 2820 | |
538cc7ee SS |
2821 | /* Check for errors */ |
2822 | if (events & IEVENT_ERR_MASK) | |
f4983704 | 2823 | gfar_error(irq, grp_id); |
1da177e4 LT |
2824 | |
2825 | return IRQ_HANDLED; | |
2826 | } | |
2827 | ||
1da177e4 LT |
2828 | /* Called every time the controller might need to be made |
2829 | * aware of new link state. The PHY code conveys this | |
bb40dcbb | 2830 | * information through variables in the phydev structure, and this |
1da177e4 LT |
2831 | * function converts those variables into the appropriate |
2832 | * register values, and can bring down the device if needed. | |
2833 | */ | |
2834 | static void adjust_link(struct net_device *dev) | |
2835 | { | |
2836 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2837 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
bb40dcbb AF |
2838 | unsigned long flags; |
2839 | struct phy_device *phydev = priv->phydev; | |
2840 | int new_state = 0; | |
2841 | ||
fba4ed03 SG |
2842 | local_irq_save(flags); |
2843 | lock_tx_qs(priv); | |
2844 | ||
bb40dcbb AF |
2845 | if (phydev->link) { |
2846 | u32 tempval = gfar_read(®s->maccfg2); | |
7f7f5316 | 2847 | u32 ecntrl = gfar_read(®s->ecntrl); |
1da177e4 | 2848 | |
1da177e4 LT |
2849 | /* Now we make sure that we can be in full duplex mode. |
2850 | * If not, we operate in half-duplex mode. */ | |
bb40dcbb AF |
2851 | if (phydev->duplex != priv->oldduplex) { |
2852 | new_state = 1; | |
2853 | if (!(phydev->duplex)) | |
1da177e4 | 2854 | tempval &= ~(MACCFG2_FULL_DUPLEX); |
bb40dcbb | 2855 | else |
1da177e4 | 2856 | tempval |= MACCFG2_FULL_DUPLEX; |
1da177e4 | 2857 | |
bb40dcbb | 2858 | priv->oldduplex = phydev->duplex; |
1da177e4 LT |
2859 | } |
2860 | ||
bb40dcbb AF |
2861 | if (phydev->speed != priv->oldspeed) { |
2862 | new_state = 1; | |
2863 | switch (phydev->speed) { | |
1da177e4 | 2864 | case 1000: |
1da177e4 LT |
2865 | tempval = |
2866 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); | |
f430e49e LY |
2867 | |
2868 | ecntrl &= ~(ECNTRL_R100); | |
1da177e4 LT |
2869 | break; |
2870 | case 100: | |
2871 | case 10: | |
1da177e4 LT |
2872 | tempval = |
2873 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); | |
7f7f5316 AF |
2874 | |
2875 | /* Reduced mode distinguishes | |
2876 | * between 10 and 100 */ | |
2877 | if (phydev->speed == SPEED_100) | |
2878 | ecntrl |= ECNTRL_R100; | |
2879 | else | |
2880 | ecntrl &= ~(ECNTRL_R100); | |
1da177e4 LT |
2881 | break; |
2882 | default: | |
0bbaf069 KG |
2883 | if (netif_msg_link(priv)) |
2884 | printk(KERN_WARNING | |
bb40dcbb AF |
2885 | "%s: Ack! Speed (%d) is not 10/100/1000!\n", |
2886 | dev->name, phydev->speed); | |
1da177e4 LT |
2887 | break; |
2888 | } | |
2889 | ||
bb40dcbb | 2890 | priv->oldspeed = phydev->speed; |
1da177e4 LT |
2891 | } |
2892 | ||
bb40dcbb | 2893 | gfar_write(®s->maccfg2, tempval); |
7f7f5316 | 2894 | gfar_write(®s->ecntrl, ecntrl); |
bb40dcbb | 2895 | |
1da177e4 | 2896 | if (!priv->oldlink) { |
bb40dcbb | 2897 | new_state = 1; |
1da177e4 | 2898 | priv->oldlink = 1; |
1da177e4 | 2899 | } |
bb40dcbb AF |
2900 | } else if (priv->oldlink) { |
2901 | new_state = 1; | |
2902 | priv->oldlink = 0; | |
2903 | priv->oldspeed = 0; | |
2904 | priv->oldduplex = -1; | |
1da177e4 | 2905 | } |
1da177e4 | 2906 | |
bb40dcbb AF |
2907 | if (new_state && netif_msg_link(priv)) |
2908 | phy_print_status(phydev); | |
fba4ed03 SG |
2909 | unlock_tx_qs(priv); |
2910 | local_irq_restore(flags); | |
bb40dcbb | 2911 | } |
1da177e4 LT |
2912 | |
2913 | /* Update the hash table based on the current list of multicast | |
2914 | * addresses we subscribe to. Also, change the promiscuity of | |
2915 | * the device based on the flags (this function is called | |
2916 | * whenever dev->flags is changed */ | |
2917 | static void gfar_set_multi(struct net_device *dev) | |
2918 | { | |
22bedad3 | 2919 | struct netdev_hw_addr *ha; |
1da177e4 | 2920 | struct gfar_private *priv = netdev_priv(dev); |
46ceb60c | 2921 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 LT |
2922 | u32 tempval; |
2923 | ||
a12f801d | 2924 | if (dev->flags & IFF_PROMISC) { |
1da177e4 LT |
2925 | /* Set RCTRL to PROM */ |
2926 | tempval = gfar_read(®s->rctrl); | |
2927 | tempval |= RCTRL_PROM; | |
2928 | gfar_write(®s->rctrl, tempval); | |
2929 | } else { | |
2930 | /* Set RCTRL to not PROM */ | |
2931 | tempval = gfar_read(®s->rctrl); | |
2932 | tempval &= ~(RCTRL_PROM); | |
2933 | gfar_write(®s->rctrl, tempval); | |
2934 | } | |
6aa20a22 | 2935 | |
a12f801d | 2936 | if (dev->flags & IFF_ALLMULTI) { |
1da177e4 | 2937 | /* Set the hash to rx all multicast frames */ |
0bbaf069 KG |
2938 | gfar_write(®s->igaddr0, 0xffffffff); |
2939 | gfar_write(®s->igaddr1, 0xffffffff); | |
2940 | gfar_write(®s->igaddr2, 0xffffffff); | |
2941 | gfar_write(®s->igaddr3, 0xffffffff); | |
2942 | gfar_write(®s->igaddr4, 0xffffffff); | |
2943 | gfar_write(®s->igaddr5, 0xffffffff); | |
2944 | gfar_write(®s->igaddr6, 0xffffffff); | |
2945 | gfar_write(®s->igaddr7, 0xffffffff); | |
1da177e4 LT |
2946 | gfar_write(®s->gaddr0, 0xffffffff); |
2947 | gfar_write(®s->gaddr1, 0xffffffff); | |
2948 | gfar_write(®s->gaddr2, 0xffffffff); | |
2949 | gfar_write(®s->gaddr3, 0xffffffff); | |
2950 | gfar_write(®s->gaddr4, 0xffffffff); | |
2951 | gfar_write(®s->gaddr5, 0xffffffff); | |
2952 | gfar_write(®s->gaddr6, 0xffffffff); | |
2953 | gfar_write(®s->gaddr7, 0xffffffff); | |
2954 | } else { | |
7f7f5316 AF |
2955 | int em_num; |
2956 | int idx; | |
2957 | ||
1da177e4 | 2958 | /* zero out the hash */ |
0bbaf069 KG |
2959 | gfar_write(®s->igaddr0, 0x0); |
2960 | gfar_write(®s->igaddr1, 0x0); | |
2961 | gfar_write(®s->igaddr2, 0x0); | |
2962 | gfar_write(®s->igaddr3, 0x0); | |
2963 | gfar_write(®s->igaddr4, 0x0); | |
2964 | gfar_write(®s->igaddr5, 0x0); | |
2965 | gfar_write(®s->igaddr6, 0x0); | |
2966 | gfar_write(®s->igaddr7, 0x0); | |
1da177e4 LT |
2967 | gfar_write(®s->gaddr0, 0x0); |
2968 | gfar_write(®s->gaddr1, 0x0); | |
2969 | gfar_write(®s->gaddr2, 0x0); | |
2970 | gfar_write(®s->gaddr3, 0x0); | |
2971 | gfar_write(®s->gaddr4, 0x0); | |
2972 | gfar_write(®s->gaddr5, 0x0); | |
2973 | gfar_write(®s->gaddr6, 0x0); | |
2974 | gfar_write(®s->gaddr7, 0x0); | |
2975 | ||
7f7f5316 AF |
2976 | /* If we have extended hash tables, we need to |
2977 | * clear the exact match registers to prepare for | |
2978 | * setting them */ | |
2979 | if (priv->extended_hash) { | |
2980 | em_num = GFAR_EM_NUM + 1; | |
2981 | gfar_clear_exact_match(dev); | |
2982 | idx = 1; | |
2983 | } else { | |
2984 | idx = 0; | |
2985 | em_num = 0; | |
2986 | } | |
2987 | ||
4cd24eaf | 2988 | if (netdev_mc_empty(dev)) |
1da177e4 LT |
2989 | return; |
2990 | ||
2991 | /* Parse the list, and set the appropriate bits */ | |
22bedad3 | 2992 | netdev_for_each_mc_addr(ha, dev) { |
7f7f5316 | 2993 | if (idx < em_num) { |
22bedad3 | 2994 | gfar_set_mac_for_addr(dev, idx, ha->addr); |
7f7f5316 AF |
2995 | idx++; |
2996 | } else | |
22bedad3 | 2997 | gfar_set_hash_for_addr(dev, ha->addr); |
1da177e4 LT |
2998 | } |
2999 | } | |
1da177e4 LT |
3000 | } |
3001 | ||
7f7f5316 AF |
3002 | |
3003 | /* Clears each of the exact match registers to zero, so they | |
3004 | * don't interfere with normal reception */ | |
3005 | static void gfar_clear_exact_match(struct net_device *dev) | |
3006 | { | |
3007 | int idx; | |
3008 | u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0}; | |
3009 | ||
3010 | for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) | |
3011 | gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr); | |
3012 | } | |
3013 | ||
1da177e4 LT |
3014 | /* Set the appropriate hash bit for the given addr */ |
3015 | /* The algorithm works like so: | |
3016 | * 1) Take the Destination Address (ie the multicast address), and | |
3017 | * do a CRC on it (little endian), and reverse the bits of the | |
3018 | * result. | |
3019 | * 2) Use the 8 most significant bits as a hash into a 256-entry | |
3020 | * table. The table is controlled through 8 32-bit registers: | |
3021 | * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is | |
3022 | * gaddr7. This means that the 3 most significant bits in the | |
3023 | * hash index which gaddr register to use, and the 5 other bits | |
3024 | * indicate which bit (assuming an IBM numbering scheme, which | |
3025 | * for PowerPC (tm) is usually the case) in the register holds | |
3026 | * the entry. */ | |
3027 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) | |
3028 | { | |
3029 | u32 tempval; | |
3030 | struct gfar_private *priv = netdev_priv(dev); | |
1da177e4 | 3031 | u32 result = ether_crc(MAC_ADDR_LEN, addr); |
0bbaf069 KG |
3032 | int width = priv->hash_width; |
3033 | u8 whichbit = (result >> (32 - width)) & 0x1f; | |
3034 | u8 whichreg = result >> (32 - width + 5); | |
1da177e4 LT |
3035 | u32 value = (1 << (31-whichbit)); |
3036 | ||
0bbaf069 | 3037 | tempval = gfar_read(priv->hash_regs[whichreg]); |
1da177e4 | 3038 | tempval |= value; |
0bbaf069 | 3039 | gfar_write(priv->hash_regs[whichreg], tempval); |
1da177e4 LT |
3040 | } |
3041 | ||
7f7f5316 AF |
3042 | |
3043 | /* There are multiple MAC Address register pairs on some controllers | |
3044 | * This function sets the numth pair to a given address | |
3045 | */ | |
3046 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) | |
3047 | { | |
3048 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 3049 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
7f7f5316 AF |
3050 | int idx; |
3051 | char tmpbuf[MAC_ADDR_LEN]; | |
3052 | u32 tempval; | |
f4983704 | 3053 | u32 __iomem *macptr = ®s->macstnaddr1; |
7f7f5316 AF |
3054 | |
3055 | macptr += num*2; | |
3056 | ||
3057 | /* Now copy it into the mac registers backwards, cuz */ | |
3058 | /* little endian is silly */ | |
3059 | for (idx = 0; idx < MAC_ADDR_LEN; idx++) | |
3060 | tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; | |
3061 | ||
3062 | gfar_write(macptr, *((u32 *) (tmpbuf))); | |
3063 | ||
3064 | tempval = *((u32 *) (tmpbuf + 4)); | |
3065 | ||
3066 | gfar_write(macptr+1, tempval); | |
3067 | } | |
3068 | ||
1da177e4 | 3069 | /* GFAR error interrupt handler */ |
f4983704 | 3070 | static irqreturn_t gfar_error(int irq, void *grp_id) |
1da177e4 | 3071 | { |
f4983704 SG |
3072 | struct gfar_priv_grp *gfargrp = grp_id; |
3073 | struct gfar __iomem *regs = gfargrp->regs; | |
3074 | struct gfar_private *priv= gfargrp->priv; | |
3075 | struct net_device *dev = priv->ndev; | |
1da177e4 LT |
3076 | |
3077 | /* Save ievent for future reference */ | |
f4983704 | 3078 | u32 events = gfar_read(®s->ievent); |
1da177e4 LT |
3079 | |
3080 | /* Clear IEVENT */ | |
f4983704 | 3081 | gfar_write(®s->ievent, events & IEVENT_ERR_MASK); |
d87eb127 SW |
3082 | |
3083 | /* Magic Packet is not an error. */ | |
b31a1d8b | 3084 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && |
d87eb127 SW |
3085 | (events & IEVENT_MAG)) |
3086 | events &= ~IEVENT_MAG; | |
1da177e4 LT |
3087 | |
3088 | /* Hmm... */ | |
0bbaf069 KG |
3089 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) |
3090 | printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", | |
f4983704 | 3091 | dev->name, events, gfar_read(®s->imask)); |
1da177e4 LT |
3092 | |
3093 | /* Update the error counters */ | |
3094 | if (events & IEVENT_TXE) { | |
09f75cd7 | 3095 | dev->stats.tx_errors++; |
1da177e4 LT |
3096 | |
3097 | if (events & IEVENT_LC) | |
09f75cd7 | 3098 | dev->stats.tx_window_errors++; |
1da177e4 | 3099 | if (events & IEVENT_CRL) |
09f75cd7 | 3100 | dev->stats.tx_aborted_errors++; |
1da177e4 | 3101 | if (events & IEVENT_XFUN) { |
836cf7fa AV |
3102 | unsigned long flags; |
3103 | ||
0bbaf069 | 3104 | if (netif_msg_tx_err(priv)) |
538cc7ee SS |
3105 | printk(KERN_DEBUG "%s: TX FIFO underrun, " |
3106 | "packet dropped.\n", dev->name); | |
09f75cd7 | 3107 | dev->stats.tx_dropped++; |
1da177e4 LT |
3108 | priv->extra_stats.tx_underrun++; |
3109 | ||
836cf7fa AV |
3110 | local_irq_save(flags); |
3111 | lock_tx_qs(priv); | |
3112 | ||
1da177e4 | 3113 | /* Reactivate the Tx Queues */ |
fba4ed03 | 3114 | gfar_write(®s->tstat, gfargrp->tstat); |
836cf7fa AV |
3115 | |
3116 | unlock_tx_qs(priv); | |
3117 | local_irq_restore(flags); | |
1da177e4 | 3118 | } |
0bbaf069 KG |
3119 | if (netif_msg_tx_err(priv)) |
3120 | printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); | |
1da177e4 LT |
3121 | } |
3122 | if (events & IEVENT_BSY) { | |
09f75cd7 | 3123 | dev->stats.rx_errors++; |
1da177e4 LT |
3124 | priv->extra_stats.rx_bsy++; |
3125 | ||
f4983704 | 3126 | gfar_receive(irq, grp_id); |
1da177e4 | 3127 | |
0bbaf069 | 3128 | if (netif_msg_rx_err(priv)) |
538cc7ee | 3129 | printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", |
f4983704 | 3130 | dev->name, gfar_read(®s->rstat)); |
1da177e4 LT |
3131 | } |
3132 | if (events & IEVENT_BABR) { | |
09f75cd7 | 3133 | dev->stats.rx_errors++; |
1da177e4 LT |
3134 | priv->extra_stats.rx_babr++; |
3135 | ||
0bbaf069 | 3136 | if (netif_msg_rx_err(priv)) |
538cc7ee | 3137 | printk(KERN_DEBUG "%s: babbling RX error\n", dev->name); |
1da177e4 LT |
3138 | } |
3139 | if (events & IEVENT_EBERR) { | |
3140 | priv->extra_stats.eberr++; | |
0bbaf069 | 3141 | if (netif_msg_rx_err(priv)) |
538cc7ee | 3142 | printk(KERN_DEBUG "%s: bus error\n", dev->name); |
1da177e4 | 3143 | } |
0bbaf069 | 3144 | if ((events & IEVENT_RXC) && netif_msg_rx_status(priv)) |
538cc7ee | 3145 | printk(KERN_DEBUG "%s: control frame\n", dev->name); |
1da177e4 LT |
3146 | |
3147 | if (events & IEVENT_BABT) { | |
3148 | priv->extra_stats.tx_babt++; | |
0bbaf069 | 3149 | if (netif_msg_tx_err(priv)) |
538cc7ee | 3150 | printk(KERN_DEBUG "%s: babbling TX error\n", dev->name); |
1da177e4 LT |
3151 | } |
3152 | return IRQ_HANDLED; | |
3153 | } | |
3154 | ||
b31a1d8b AF |
3155 | static struct of_device_id gfar_match[] = |
3156 | { | |
3157 | { | |
3158 | .type = "network", | |
3159 | .compatible = "gianfar", | |
3160 | }, | |
46ceb60c SG |
3161 | { |
3162 | .compatible = "fsl,etsec2", | |
3163 | }, | |
b31a1d8b AF |
3164 | {}, |
3165 | }; | |
e72701ac | 3166 | MODULE_DEVICE_TABLE(of, gfar_match); |
b31a1d8b | 3167 | |
1da177e4 | 3168 | /* Structure for a device driver */ |
b31a1d8b AF |
3169 | static struct of_platform_driver gfar_driver = { |
3170 | .name = "fsl-gianfar", | |
3171 | .match_table = gfar_match, | |
3172 | ||
1da177e4 LT |
3173 | .probe = gfar_probe, |
3174 | .remove = gfar_remove, | |
be926fc4 | 3175 | .driver.pm = GFAR_PM_OPS, |
1da177e4 LT |
3176 | }; |
3177 | ||
3178 | static int __init gfar_init(void) | |
3179 | { | |
1577ecef | 3180 | return of_register_platform_driver(&gfar_driver); |
1da177e4 LT |
3181 | } |
3182 | ||
3183 | static void __exit gfar_exit(void) | |
3184 | { | |
b31a1d8b | 3185 | of_unregister_platform_driver(&gfar_driver); |
1da177e4 LT |
3186 | } |
3187 | ||
3188 | module_init(gfar_init); | |
3189 | module_exit(gfar_exit); | |
3190 |