Commit | Line | Data |
---|---|---|
0bbaf069 | 1 | /* |
1da177e4 LT |
2 | * drivers/net/gianfar.c |
3 | * | |
4 | * Gianfar Ethernet Driver | |
7f7f5316 AF |
5 | * This driver is designed for the non-CPM ethernet controllers |
6 | * on the 85xx and 83xx family of integrated processors | |
1da177e4 LT |
7 | * Based on 8260_io/fcc_enet.c |
8 | * | |
9 | * Author: Andy Fleming | |
4c8d3d99 | 10 | * Maintainer: Kumar Gala |
a12f801d | 11 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> |
1da177e4 | 12 | * |
6c43e046 | 13 | * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. |
a12f801d | 14 | * Copyright 2007 MontaVista Software, Inc. |
1da177e4 LT |
15 | * |
16 | * This program is free software; you can redistribute it and/or modify it | |
17 | * under the terms of the GNU General Public License as published by the | |
18 | * Free Software Foundation; either version 2 of the License, or (at your | |
19 | * option) any later version. | |
20 | * | |
21 | * Gianfar: AKA Lambda Draconis, "Dragon" | |
22 | * RA 11 31 24.2 | |
23 | * Dec +69 19 52 | |
24 | * V 3.84 | |
25 | * B-V +1.62 | |
26 | * | |
27 | * Theory of operation | |
0bbaf069 | 28 | * |
b31a1d8b AF |
29 | * The driver is initialized through of_device. Configuration information |
30 | * is therefore conveyed through an OF-style device tree. | |
1da177e4 LT |
31 | * |
32 | * The Gianfar Ethernet Controller uses a ring of buffer | |
33 | * descriptors. The beginning is indicated by a register | |
0bbaf069 KG |
34 | * pointing to the physical address of the start of the ring. |
35 | * The end is determined by a "wrap" bit being set in the | |
1da177e4 LT |
36 | * last descriptor of the ring. |
37 | * | |
38 | * When a packet is received, the RXF bit in the | |
0bbaf069 | 39 | * IEVENT register is set, triggering an interrupt when the |
1da177e4 LT |
40 | * corresponding bit in the IMASK register is also set (if |
41 | * interrupt coalescing is active, then the interrupt may not | |
42 | * happen immediately, but will wait until either a set number | |
bb40dcbb | 43 | * of frames or amount of time have passed). In NAPI, the |
1da177e4 | 44 | * interrupt handler will signal there is work to be done, and |
0aa1538f | 45 | * exit. This method will start at the last known empty |
0bbaf069 | 46 | * descriptor, and process every subsequent descriptor until there |
1da177e4 LT |
47 | * are none left with data (NAPI will stop after a set number of |
48 | * packets to give time to other tasks, but will eventually | |
49 | * process all the packets). The data arrives inside a | |
50 | * pre-allocated skb, and so after the skb is passed up to the | |
51 | * stack, a new skb must be allocated, and the address field in | |
52 | * the buffer descriptor must be updated to indicate this new | |
53 | * skb. | |
54 | * | |
55 | * When the kernel requests that a packet be transmitted, the | |
56 | * driver starts where it left off last time, and points the | |
57 | * descriptor at the buffer which was passed in. The driver | |
58 | * then informs the DMA engine that there are packets ready to | |
59 | * be transmitted. Once the controller is finished transmitting | |
60 | * the packet, an interrupt may be triggered (under the same | |
61 | * conditions as for reception, but depending on the TXF bit). | |
62 | * The driver then cleans up the buffer. | |
63 | */ | |
64 | ||
59deab26 JP |
65 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
66 | #define DEBUG | |
67 | ||
1da177e4 | 68 | #include <linux/kernel.h> |
1da177e4 LT |
69 | #include <linux/string.h> |
70 | #include <linux/errno.h> | |
bb40dcbb | 71 | #include <linux/unistd.h> |
1da177e4 LT |
72 | #include <linux/slab.h> |
73 | #include <linux/interrupt.h> | |
74 | #include <linux/init.h> | |
75 | #include <linux/delay.h> | |
76 | #include <linux/netdevice.h> | |
77 | #include <linux/etherdevice.h> | |
78 | #include <linux/skbuff.h> | |
0bbaf069 | 79 | #include <linux/if_vlan.h> |
1da177e4 LT |
80 | #include <linux/spinlock.h> |
81 | #include <linux/mm.h> | |
fe192a49 | 82 | #include <linux/of_mdio.h> |
b31a1d8b | 83 | #include <linux/of_platform.h> |
0bbaf069 KG |
84 | #include <linux/ip.h> |
85 | #include <linux/tcp.h> | |
86 | #include <linux/udp.h> | |
9c07b884 | 87 | #include <linux/in.h> |
cc772ab7 | 88 | #include <linux/net_tstamp.h> |
1da177e4 LT |
89 | |
90 | #include <asm/io.h> | |
7d350977 | 91 | #include <asm/reg.h> |
1da177e4 LT |
92 | #include <asm/irq.h> |
93 | #include <asm/uaccess.h> | |
94 | #include <linux/module.h> | |
1da177e4 LT |
95 | #include <linux/dma-mapping.h> |
96 | #include <linux/crc32.h> | |
bb40dcbb AF |
97 | #include <linux/mii.h> |
98 | #include <linux/phy.h> | |
b31a1d8b AF |
99 | #include <linux/phy_fixed.h> |
100 | #include <linux/of.h> | |
4b6ba8aa | 101 | #include <linux/of_net.h> |
1da177e4 LT |
102 | |
103 | #include "gianfar.h" | |
1577ecef | 104 | #include "fsl_pq_mdio.h" |
1da177e4 LT |
105 | |
106 | #define TX_TIMEOUT (1*HZ) | |
1da177e4 LT |
107 | #undef BRIEF_GFAR_ERRORS |
108 | #undef VERBOSE_GFAR_ERRORS | |
109 | ||
1da177e4 | 110 | const char gfar_driver_name[] = "Gianfar Ethernet"; |
7f7f5316 | 111 | const char gfar_driver_version[] = "1.3"; |
1da177e4 | 112 | |
1da177e4 LT |
113 | static int gfar_enet_open(struct net_device *dev); |
114 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | |
ab939905 | 115 | static void gfar_reset_task(struct work_struct *work); |
1da177e4 LT |
116 | static void gfar_timeout(struct net_device *dev); |
117 | static int gfar_close(struct net_device *dev); | |
815b97c6 | 118 | struct sk_buff *gfar_new_skb(struct net_device *dev); |
a12f801d | 119 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
815b97c6 | 120 | struct sk_buff *skb); |
1da177e4 LT |
121 | static int gfar_set_mac_address(struct net_device *dev); |
122 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | |
7d12e780 DH |
123 | static irqreturn_t gfar_error(int irq, void *dev_id); |
124 | static irqreturn_t gfar_transmit(int irq, void *dev_id); | |
125 | static irqreturn_t gfar_interrupt(int irq, void *dev_id); | |
1da177e4 LT |
126 | static void adjust_link(struct net_device *dev); |
127 | static void init_registers(struct net_device *dev); | |
128 | static int init_phy(struct net_device *dev); | |
74888760 | 129 | static int gfar_probe(struct platform_device *ofdev); |
2dc11581 | 130 | static int gfar_remove(struct platform_device *ofdev); |
bb40dcbb | 131 | static void free_skb_resources(struct gfar_private *priv); |
1da177e4 LT |
132 | static void gfar_set_multi(struct net_device *dev); |
133 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); | |
d3c12873 | 134 | static void gfar_configure_serdes(struct net_device *dev); |
bea3348e | 135 | static int gfar_poll(struct napi_struct *napi, int budget); |
f2d71c2d VW |
136 | #ifdef CONFIG_NET_POLL_CONTROLLER |
137 | static void gfar_netpoll(struct net_device *dev); | |
138 | #endif | |
a12f801d SG |
139 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); |
140 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); | |
2c2db48a DH |
141 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
142 | int amount_pull); | |
7f7f5316 | 143 | void gfar_halt(struct net_device *dev); |
d87eb127 | 144 | static void gfar_halt_nodisable(struct net_device *dev); |
7f7f5316 AF |
145 | void gfar_start(struct net_device *dev); |
146 | static void gfar_clear_exact_match(struct net_device *dev); | |
b6bc7650 JP |
147 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
148 | const u8 *addr); | |
26ccfc37 | 149 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
1da177e4 | 150 | |
1da177e4 LT |
151 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
152 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); | |
153 | MODULE_LICENSE("GPL"); | |
154 | ||
a12f801d | 155 | static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
8a102fe0 AV |
156 | dma_addr_t buf) |
157 | { | |
8a102fe0 AV |
158 | u32 lstatus; |
159 | ||
160 | bdp->bufPtr = buf; | |
161 | ||
162 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); | |
a12f801d | 163 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) |
8a102fe0 AV |
164 | lstatus |= BD_LFLAG(RXBD_WRAP); |
165 | ||
166 | eieio(); | |
167 | ||
168 | bdp->lstatus = lstatus; | |
169 | } | |
170 | ||
8728327e | 171 | static int gfar_init_bds(struct net_device *ndev) |
826aa4a0 | 172 | { |
8728327e | 173 | struct gfar_private *priv = netdev_priv(ndev); |
a12f801d SG |
174 | struct gfar_priv_tx_q *tx_queue = NULL; |
175 | struct gfar_priv_rx_q *rx_queue = NULL; | |
826aa4a0 AV |
176 | struct txbd8 *txbdp; |
177 | struct rxbd8 *rxbdp; | |
fba4ed03 | 178 | int i, j; |
a12f801d | 179 | |
fba4ed03 SG |
180 | for (i = 0; i < priv->num_tx_queues; i++) { |
181 | tx_queue = priv->tx_queue[i]; | |
182 | /* Initialize some variables in our dev structure */ | |
183 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; | |
184 | tx_queue->dirty_tx = tx_queue->tx_bd_base; | |
185 | tx_queue->cur_tx = tx_queue->tx_bd_base; | |
186 | tx_queue->skb_curtx = 0; | |
187 | tx_queue->skb_dirtytx = 0; | |
188 | ||
189 | /* Initialize Transmit Descriptor Ring */ | |
190 | txbdp = tx_queue->tx_bd_base; | |
191 | for (j = 0; j < tx_queue->tx_ring_size; j++) { | |
192 | txbdp->lstatus = 0; | |
193 | txbdp->bufPtr = 0; | |
194 | txbdp++; | |
195 | } | |
8728327e | 196 | |
fba4ed03 SG |
197 | /* Set the last descriptor in the ring to indicate wrap */ |
198 | txbdp--; | |
199 | txbdp->status |= TXBD_WRAP; | |
8728327e AV |
200 | } |
201 | ||
fba4ed03 SG |
202 | for (i = 0; i < priv->num_rx_queues; i++) { |
203 | rx_queue = priv->rx_queue[i]; | |
204 | rx_queue->cur_rx = rx_queue->rx_bd_base; | |
205 | rx_queue->skb_currx = 0; | |
206 | rxbdp = rx_queue->rx_bd_base; | |
8728327e | 207 | |
fba4ed03 SG |
208 | for (j = 0; j < rx_queue->rx_ring_size; j++) { |
209 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; | |
8728327e | 210 | |
fba4ed03 SG |
211 | if (skb) { |
212 | gfar_init_rxbdp(rx_queue, rxbdp, | |
213 | rxbdp->bufPtr); | |
214 | } else { | |
215 | skb = gfar_new_skb(ndev); | |
216 | if (!skb) { | |
59deab26 | 217 | netdev_err(ndev, "Can't allocate RX buffers\n"); |
fba4ed03 SG |
218 | goto err_rxalloc_fail; |
219 | } | |
220 | rx_queue->rx_skbuff[j] = skb; | |
221 | ||
222 | gfar_new_rxbdp(rx_queue, rxbdp, skb); | |
8728327e | 223 | } |
8728327e | 224 | |
fba4ed03 | 225 | rxbdp++; |
8728327e AV |
226 | } |
227 | ||
8728327e AV |
228 | } |
229 | ||
230 | return 0; | |
fba4ed03 SG |
231 | |
232 | err_rxalloc_fail: | |
233 | free_skb_resources(priv); | |
234 | return -ENOMEM; | |
8728327e AV |
235 | } |
236 | ||
237 | static int gfar_alloc_skb_resources(struct net_device *ndev) | |
238 | { | |
826aa4a0 | 239 | void *vaddr; |
fba4ed03 SG |
240 | dma_addr_t addr; |
241 | int i, j, k; | |
826aa4a0 AV |
242 | struct gfar_private *priv = netdev_priv(ndev); |
243 | struct device *dev = &priv->ofdev->dev; | |
a12f801d SG |
244 | struct gfar_priv_tx_q *tx_queue = NULL; |
245 | struct gfar_priv_rx_q *rx_queue = NULL; | |
246 | ||
fba4ed03 SG |
247 | priv->total_tx_ring_size = 0; |
248 | for (i = 0; i < priv->num_tx_queues; i++) | |
249 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; | |
250 | ||
251 | priv->total_rx_ring_size = 0; | |
252 | for (i = 0; i < priv->num_rx_queues; i++) | |
253 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; | |
826aa4a0 AV |
254 | |
255 | /* Allocate memory for the buffer descriptors */ | |
8728327e | 256 | vaddr = dma_alloc_coherent(dev, |
fba4ed03 SG |
257 | sizeof(struct txbd8) * priv->total_tx_ring_size + |
258 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | |
259 | &addr, GFP_KERNEL); | |
826aa4a0 | 260 | if (!vaddr) { |
59deab26 JP |
261 | netif_err(priv, ifup, ndev, |
262 | "Could not allocate buffer descriptors!\n"); | |
826aa4a0 AV |
263 | return -ENOMEM; |
264 | } | |
265 | ||
fba4ed03 SG |
266 | for (i = 0; i < priv->num_tx_queues; i++) { |
267 | tx_queue = priv->tx_queue[i]; | |
43d620c8 | 268 | tx_queue->tx_bd_base = vaddr; |
fba4ed03 SG |
269 | tx_queue->tx_bd_dma_base = addr; |
270 | tx_queue->dev = ndev; | |
271 | /* enet DMA only understands physical addresses */ | |
272 | addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | |
273 | vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | |
274 | } | |
826aa4a0 | 275 | |
826aa4a0 | 276 | /* Start the rx descriptor ring where the tx ring leaves off */ |
fba4ed03 SG |
277 | for (i = 0; i < priv->num_rx_queues; i++) { |
278 | rx_queue = priv->rx_queue[i]; | |
43d620c8 | 279 | rx_queue->rx_bd_base = vaddr; |
fba4ed03 SG |
280 | rx_queue->rx_bd_dma_base = addr; |
281 | rx_queue->dev = ndev; | |
282 | addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | |
283 | vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | |
284 | } | |
826aa4a0 AV |
285 | |
286 | /* Setup the skbuff rings */ | |
fba4ed03 SG |
287 | for (i = 0; i < priv->num_tx_queues; i++) { |
288 | tx_queue = priv->tx_queue[i]; | |
289 | tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * | |
a12f801d | 290 | tx_queue->tx_ring_size, GFP_KERNEL); |
fba4ed03 | 291 | if (!tx_queue->tx_skbuff) { |
59deab26 JP |
292 | netif_err(priv, ifup, ndev, |
293 | "Could not allocate tx_skbuff\n"); | |
fba4ed03 SG |
294 | goto cleanup; |
295 | } | |
826aa4a0 | 296 | |
fba4ed03 SG |
297 | for (k = 0; k < tx_queue->tx_ring_size; k++) |
298 | tx_queue->tx_skbuff[k] = NULL; | |
299 | } | |
826aa4a0 | 300 | |
fba4ed03 SG |
301 | for (i = 0; i < priv->num_rx_queues; i++) { |
302 | rx_queue = priv->rx_queue[i]; | |
303 | rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * | |
a12f801d | 304 | rx_queue->rx_ring_size, GFP_KERNEL); |
826aa4a0 | 305 | |
fba4ed03 | 306 | if (!rx_queue->rx_skbuff) { |
59deab26 JP |
307 | netif_err(priv, ifup, ndev, |
308 | "Could not allocate rx_skbuff\n"); | |
fba4ed03 SG |
309 | goto cleanup; |
310 | } | |
311 | ||
312 | for (j = 0; j < rx_queue->rx_ring_size; j++) | |
313 | rx_queue->rx_skbuff[j] = NULL; | |
314 | } | |
826aa4a0 | 315 | |
8728327e AV |
316 | if (gfar_init_bds(ndev)) |
317 | goto cleanup; | |
826aa4a0 AV |
318 | |
319 | return 0; | |
320 | ||
321 | cleanup: | |
322 | free_skb_resources(priv); | |
323 | return -ENOMEM; | |
324 | } | |
325 | ||
fba4ed03 SG |
326 | static void gfar_init_tx_rx_base(struct gfar_private *priv) |
327 | { | |
46ceb60c | 328 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
18294ad1 | 329 | u32 __iomem *baddr; |
fba4ed03 SG |
330 | int i; |
331 | ||
332 | baddr = ®s->tbase0; | |
333 | for(i = 0; i < priv->num_tx_queues; i++) { | |
334 | gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); | |
335 | baddr += 2; | |
336 | } | |
337 | ||
338 | baddr = ®s->rbase0; | |
339 | for(i = 0; i < priv->num_rx_queues; i++) { | |
340 | gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); | |
341 | baddr += 2; | |
342 | } | |
343 | } | |
344 | ||
826aa4a0 AV |
345 | static void gfar_init_mac(struct net_device *ndev) |
346 | { | |
347 | struct gfar_private *priv = netdev_priv(ndev); | |
46ceb60c | 348 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
826aa4a0 AV |
349 | u32 rctrl = 0; |
350 | u32 tctrl = 0; | |
351 | u32 attrs = 0; | |
352 | ||
fba4ed03 SG |
353 | /* write the tx/rx base registers */ |
354 | gfar_init_tx_rx_base(priv); | |
32c513bc | 355 | |
826aa4a0 | 356 | /* Configure the coalescing support */ |
46ceb60c | 357 | gfar_configure_coalescing(priv, 0xFF, 0xFF); |
fba4ed03 | 358 | |
1ccb8389 | 359 | if (priv->rx_filer_enable) { |
fba4ed03 | 360 | rctrl |= RCTRL_FILREN; |
1ccb8389 SG |
361 | /* Program the RIR0 reg with the required distribution */ |
362 | gfar_write(®s->rir0, DEFAULT_RIR0); | |
363 | } | |
826aa4a0 | 364 | |
8b3afe95 | 365 | if (ndev->features & NETIF_F_RXCSUM) |
826aa4a0 AV |
366 | rctrl |= RCTRL_CHECKSUMMING; |
367 | ||
368 | if (priv->extended_hash) { | |
369 | rctrl |= RCTRL_EXTHASH; | |
370 | ||
371 | gfar_clear_exact_match(ndev); | |
372 | rctrl |= RCTRL_EMEN; | |
373 | } | |
374 | ||
375 | if (priv->padding) { | |
376 | rctrl &= ~RCTRL_PAL_MASK; | |
377 | rctrl |= RCTRL_PADDING(priv->padding); | |
378 | } | |
379 | ||
cc772ab7 MR |
380 | /* Insert receive time stamps into padding alignment bytes */ |
381 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { | |
382 | rctrl &= ~RCTRL_PAL_MASK; | |
97553f7f | 383 | rctrl |= RCTRL_PADDING(8); |
cc772ab7 MR |
384 | priv->padding = 8; |
385 | } | |
386 | ||
97553f7f MR |
387 | /* Enable HW time stamping if requested from user space */ |
388 | if (priv->hwts_rx_en) | |
389 | rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; | |
390 | ||
87c288c6 | 391 | if (ndev->features & NETIF_F_HW_VLAN_RX) |
b852b720 | 392 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; |
826aa4a0 AV |
393 | |
394 | /* Init rctrl based on our settings */ | |
395 | gfar_write(®s->rctrl, rctrl); | |
396 | ||
397 | if (ndev->features & NETIF_F_IP_CSUM) | |
398 | tctrl |= TCTRL_INIT_CSUM; | |
399 | ||
fba4ed03 SG |
400 | tctrl |= TCTRL_TXSCHED_PRIO; |
401 | ||
826aa4a0 AV |
402 | gfar_write(®s->tctrl, tctrl); |
403 | ||
404 | /* Set the extraction length and index */ | |
405 | attrs = ATTRELI_EL(priv->rx_stash_size) | | |
406 | ATTRELI_EI(priv->rx_stash_index); | |
407 | ||
408 | gfar_write(®s->attreli, attrs); | |
409 | ||
410 | /* Start with defaults, and add stashing or locking | |
411 | * depending on the approprate variables */ | |
412 | attrs = ATTR_INIT_SETTINGS; | |
413 | ||
414 | if (priv->bd_stash_en) | |
415 | attrs |= ATTR_BDSTASH; | |
416 | ||
417 | if (priv->rx_stash_size != 0) | |
418 | attrs |= ATTR_BUFSTASH; | |
419 | ||
420 | gfar_write(®s->attr, attrs); | |
421 | ||
422 | gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); | |
423 | gfar_write(®s->fifo_tx_starve, priv->fifo_starve); | |
424 | gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); | |
425 | } | |
426 | ||
a7f38041 SG |
427 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) |
428 | { | |
429 | struct gfar_private *priv = netdev_priv(dev); | |
a7f38041 SG |
430 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; |
431 | unsigned long tx_packets = 0, tx_bytes = 0; | |
432 | int i = 0; | |
433 | ||
434 | for (i = 0; i < priv->num_rx_queues; i++) { | |
435 | rx_packets += priv->rx_queue[i]->stats.rx_packets; | |
436 | rx_bytes += priv->rx_queue[i]->stats.rx_bytes; | |
437 | rx_dropped += priv->rx_queue[i]->stats.rx_dropped; | |
438 | } | |
439 | ||
440 | dev->stats.rx_packets = rx_packets; | |
441 | dev->stats.rx_bytes = rx_bytes; | |
442 | dev->stats.rx_dropped = rx_dropped; | |
443 | ||
444 | for (i = 0; i < priv->num_tx_queues; i++) { | |
1ac9ad13 ED |
445 | tx_bytes += priv->tx_queue[i]->stats.tx_bytes; |
446 | tx_packets += priv->tx_queue[i]->stats.tx_packets; | |
a7f38041 SG |
447 | } |
448 | ||
449 | dev->stats.tx_bytes = tx_bytes; | |
450 | dev->stats.tx_packets = tx_packets; | |
451 | ||
452 | return &dev->stats; | |
453 | } | |
454 | ||
26ccfc37 AF |
455 | static const struct net_device_ops gfar_netdev_ops = { |
456 | .ndo_open = gfar_enet_open, | |
457 | .ndo_start_xmit = gfar_start_xmit, | |
458 | .ndo_stop = gfar_close, | |
459 | .ndo_change_mtu = gfar_change_mtu, | |
8b3afe95 | 460 | .ndo_set_features = gfar_set_features, |
26ccfc37 AF |
461 | .ndo_set_multicast_list = gfar_set_multi, |
462 | .ndo_tx_timeout = gfar_timeout, | |
463 | .ndo_do_ioctl = gfar_ioctl, | |
a7f38041 | 464 | .ndo_get_stats = gfar_get_stats, |
240c102d BH |
465 | .ndo_set_mac_address = eth_mac_addr, |
466 | .ndo_validate_addr = eth_validate_addr, | |
26ccfc37 AF |
467 | #ifdef CONFIG_NET_POLL_CONTROLLER |
468 | .ndo_poll_controller = gfar_netpoll, | |
469 | #endif | |
470 | }; | |
471 | ||
fba4ed03 SG |
472 | void lock_rx_qs(struct gfar_private *priv) |
473 | { | |
474 | int i = 0x0; | |
475 | ||
476 | for (i = 0; i < priv->num_rx_queues; i++) | |
477 | spin_lock(&priv->rx_queue[i]->rxlock); | |
478 | } | |
479 | ||
480 | void lock_tx_qs(struct gfar_private *priv) | |
481 | { | |
482 | int i = 0x0; | |
483 | ||
484 | for (i = 0; i < priv->num_tx_queues; i++) | |
485 | spin_lock(&priv->tx_queue[i]->txlock); | |
486 | } | |
487 | ||
488 | void unlock_rx_qs(struct gfar_private *priv) | |
489 | { | |
490 | int i = 0x0; | |
491 | ||
492 | for (i = 0; i < priv->num_rx_queues; i++) | |
493 | spin_unlock(&priv->rx_queue[i]->rxlock); | |
494 | } | |
495 | ||
496 | void unlock_tx_qs(struct gfar_private *priv) | |
497 | { | |
498 | int i = 0x0; | |
499 | ||
500 | for (i = 0; i < priv->num_tx_queues; i++) | |
501 | spin_unlock(&priv->tx_queue[i]->txlock); | |
502 | } | |
503 | ||
87c288c6 JP |
504 | static bool gfar_is_vlan_on(struct gfar_private *priv) |
505 | { | |
506 | return (priv->ndev->features & NETIF_F_HW_VLAN_RX) || | |
507 | (priv->ndev->features & NETIF_F_HW_VLAN_TX); | |
508 | } | |
509 | ||
7f7f5316 AF |
510 | /* Returns 1 if incoming frames use an FCB */ |
511 | static inline int gfar_uses_fcb(struct gfar_private *priv) | |
0bbaf069 | 512 | { |
87c288c6 JP |
513 | return gfar_is_vlan_on(priv) || |
514 | (priv->ndev->features & NETIF_F_RXCSUM) || | |
cc772ab7 | 515 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); |
0bbaf069 | 516 | } |
bb40dcbb | 517 | |
fba4ed03 SG |
518 | static void free_tx_pointers(struct gfar_private *priv) |
519 | { | |
520 | int i = 0; | |
521 | ||
522 | for (i = 0; i < priv->num_tx_queues; i++) | |
523 | kfree(priv->tx_queue[i]); | |
524 | } | |
525 | ||
526 | static void free_rx_pointers(struct gfar_private *priv) | |
527 | { | |
528 | int i = 0; | |
529 | ||
530 | for (i = 0; i < priv->num_rx_queues; i++) | |
531 | kfree(priv->rx_queue[i]); | |
532 | } | |
533 | ||
46ceb60c SG |
534 | static void unmap_group_regs(struct gfar_private *priv) |
535 | { | |
536 | int i = 0; | |
537 | ||
538 | for (i = 0; i < MAXGROUPS; i++) | |
539 | if (priv->gfargrp[i].regs) | |
540 | iounmap(priv->gfargrp[i].regs); | |
541 | } | |
542 | ||
543 | static void disable_napi(struct gfar_private *priv) | |
544 | { | |
545 | int i = 0; | |
546 | ||
547 | for (i = 0; i < priv->num_grps; i++) | |
548 | napi_disable(&priv->gfargrp[i].napi); | |
549 | } | |
550 | ||
551 | static void enable_napi(struct gfar_private *priv) | |
552 | { | |
553 | int i = 0; | |
554 | ||
555 | for (i = 0; i < priv->num_grps; i++) | |
556 | napi_enable(&priv->gfargrp[i].napi); | |
557 | } | |
558 | ||
559 | static int gfar_parse_group(struct device_node *np, | |
560 | struct gfar_private *priv, const char *model) | |
561 | { | |
562 | u32 *queue_mask; | |
46ceb60c | 563 | |
7ce97d4f | 564 | priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0); |
46ceb60c SG |
565 | if (!priv->gfargrp[priv->num_grps].regs) |
566 | return -ENOMEM; | |
567 | ||
568 | priv->gfargrp[priv->num_grps].interruptTransmit = | |
569 | irq_of_parse_and_map(np, 0); | |
570 | ||
571 | /* If we aren't the FEC we have multiple interrupts */ | |
572 | if (model && strcasecmp(model, "FEC")) { | |
573 | priv->gfargrp[priv->num_grps].interruptReceive = | |
574 | irq_of_parse_and_map(np, 1); | |
575 | priv->gfargrp[priv->num_grps].interruptError = | |
576 | irq_of_parse_and_map(np,2); | |
28cb6ccd NK |
577 | if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ || |
578 | priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ || | |
579 | priv->gfargrp[priv->num_grps].interruptError == NO_IRQ) | |
46ceb60c | 580 | return -EINVAL; |
46ceb60c SG |
581 | } |
582 | ||
583 | priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; | |
584 | priv->gfargrp[priv->num_grps].priv = priv; | |
585 | spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); | |
586 | if(priv->mode == MQ_MG_MODE) { | |
587 | queue_mask = (u32 *)of_get_property(np, | |
588 | "fsl,rx-bit-map", NULL); | |
589 | priv->gfargrp[priv->num_grps].rx_bit_map = | |
590 | queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); | |
591 | queue_mask = (u32 *)of_get_property(np, | |
592 | "fsl,tx-bit-map", NULL); | |
593 | priv->gfargrp[priv->num_grps].tx_bit_map = | |
594 | queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); | |
595 | } else { | |
596 | priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; | |
597 | priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; | |
598 | } | |
599 | priv->num_grps++; | |
600 | ||
601 | return 0; | |
602 | } | |
603 | ||
2dc11581 | 604 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) |
b31a1d8b | 605 | { |
b31a1d8b AF |
606 | const char *model; |
607 | const char *ctype; | |
608 | const void *mac_addr; | |
fba4ed03 SG |
609 | int err = 0, i; |
610 | struct net_device *dev = NULL; | |
611 | struct gfar_private *priv = NULL; | |
61c7a080 | 612 | struct device_node *np = ofdev->dev.of_node; |
46ceb60c | 613 | struct device_node *child = NULL; |
4d7902f2 AF |
614 | const u32 *stash; |
615 | const u32 *stash_len; | |
616 | const u32 *stash_idx; | |
fba4ed03 SG |
617 | unsigned int num_tx_qs, num_rx_qs; |
618 | u32 *tx_queues, *rx_queues; | |
b31a1d8b AF |
619 | |
620 | if (!np || !of_device_is_available(np)) | |
621 | return -ENODEV; | |
622 | ||
fba4ed03 SG |
623 | /* parse the num of tx and rx queues */ |
624 | tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); | |
625 | num_tx_qs = tx_queues ? *tx_queues : 1; | |
626 | ||
627 | if (num_tx_qs > MAX_TX_QS) { | |
59deab26 JP |
628 | pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", |
629 | num_tx_qs, MAX_TX_QS); | |
630 | pr_err("Cannot do alloc_etherdev, aborting\n"); | |
fba4ed03 SG |
631 | return -EINVAL; |
632 | } | |
633 | ||
634 | rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); | |
635 | num_rx_qs = rx_queues ? *rx_queues : 1; | |
636 | ||
637 | if (num_rx_qs > MAX_RX_QS) { | |
59deab26 JP |
638 | pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", |
639 | num_rx_qs, MAX_RX_QS); | |
640 | pr_err("Cannot do alloc_etherdev, aborting\n"); | |
fba4ed03 SG |
641 | return -EINVAL; |
642 | } | |
643 | ||
644 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); | |
645 | dev = *pdev; | |
646 | if (NULL == dev) | |
647 | return -ENOMEM; | |
648 | ||
649 | priv = netdev_priv(dev); | |
61c7a080 | 650 | priv->node = ofdev->dev.of_node; |
fba4ed03 SG |
651 | priv->ndev = dev; |
652 | ||
fba4ed03 | 653 | priv->num_tx_queues = num_tx_qs; |
fe069123 | 654 | netif_set_real_num_rx_queues(dev, num_rx_qs); |
fba4ed03 | 655 | priv->num_rx_queues = num_rx_qs; |
46ceb60c | 656 | priv->num_grps = 0x0; |
b31a1d8b | 657 | |
4aa3a715 SP |
658 | /* Init Rx queue filer rule set linked list*/ |
659 | INIT_LIST_HEAD(&priv->rx_list.list); | |
660 | priv->rx_list.count = 0; | |
661 | mutex_init(&priv->rx_queue_access); | |
662 | ||
b31a1d8b AF |
663 | model = of_get_property(np, "model", NULL); |
664 | ||
46ceb60c SG |
665 | for (i = 0; i < MAXGROUPS; i++) |
666 | priv->gfargrp[i].regs = NULL; | |
b31a1d8b | 667 | |
46ceb60c SG |
668 | /* Parse and initialize group specific information */ |
669 | if (of_device_is_compatible(np, "fsl,etsec2")) { | |
670 | priv->mode = MQ_MG_MODE; | |
671 | for_each_child_of_node(np, child) { | |
672 | err = gfar_parse_group(child, priv, model); | |
673 | if (err) | |
674 | goto err_grp_init; | |
b31a1d8b | 675 | } |
46ceb60c SG |
676 | } else { |
677 | priv->mode = SQ_SG_MODE; | |
678 | err = gfar_parse_group(np, priv, model); | |
679 | if(err) | |
680 | goto err_grp_init; | |
b31a1d8b AF |
681 | } |
682 | ||
fba4ed03 SG |
683 | for (i = 0; i < priv->num_tx_queues; i++) |
684 | priv->tx_queue[i] = NULL; | |
685 | for (i = 0; i < priv->num_rx_queues; i++) | |
686 | priv->rx_queue[i] = NULL; | |
687 | ||
688 | for (i = 0; i < priv->num_tx_queues; i++) { | |
de47f072 JP |
689 | priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), |
690 | GFP_KERNEL); | |
fba4ed03 SG |
691 | if (!priv->tx_queue[i]) { |
692 | err = -ENOMEM; | |
693 | goto tx_alloc_failed; | |
694 | } | |
695 | priv->tx_queue[i]->tx_skbuff = NULL; | |
696 | priv->tx_queue[i]->qindex = i; | |
697 | priv->tx_queue[i]->dev = dev; | |
698 | spin_lock_init(&(priv->tx_queue[i]->txlock)); | |
699 | } | |
700 | ||
701 | for (i = 0; i < priv->num_rx_queues; i++) { | |
de47f072 JP |
702 | priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), |
703 | GFP_KERNEL); | |
fba4ed03 SG |
704 | if (!priv->rx_queue[i]) { |
705 | err = -ENOMEM; | |
706 | goto rx_alloc_failed; | |
707 | } | |
708 | priv->rx_queue[i]->rx_skbuff = NULL; | |
709 | priv->rx_queue[i]->qindex = i; | |
710 | priv->rx_queue[i]->dev = dev; | |
711 | spin_lock_init(&(priv->rx_queue[i]->rxlock)); | |
712 | } | |
713 | ||
714 | ||
4d7902f2 AF |
715 | stash = of_get_property(np, "bd-stash", NULL); |
716 | ||
a12f801d | 717 | if (stash) { |
4d7902f2 AF |
718 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; |
719 | priv->bd_stash_en = 1; | |
720 | } | |
721 | ||
722 | stash_len = of_get_property(np, "rx-stash-len", NULL); | |
723 | ||
724 | if (stash_len) | |
725 | priv->rx_stash_size = *stash_len; | |
726 | ||
727 | stash_idx = of_get_property(np, "rx-stash-idx", NULL); | |
728 | ||
729 | if (stash_idx) | |
730 | priv->rx_stash_index = *stash_idx; | |
731 | ||
732 | if (stash_len || stash_idx) | |
733 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; | |
734 | ||
b31a1d8b AF |
735 | mac_addr = of_get_mac_address(np); |
736 | if (mac_addr) | |
737 | memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); | |
738 | ||
739 | if (model && !strcasecmp(model, "TSEC")) | |
740 | priv->device_flags = | |
741 | FSL_GIANFAR_DEV_HAS_GIGABIT | | |
742 | FSL_GIANFAR_DEV_HAS_COALESCE | | |
743 | FSL_GIANFAR_DEV_HAS_RMON | | |
744 | FSL_GIANFAR_DEV_HAS_MULTI_INTR; | |
745 | if (model && !strcasecmp(model, "eTSEC")) | |
746 | priv->device_flags = | |
747 | FSL_GIANFAR_DEV_HAS_GIGABIT | | |
748 | FSL_GIANFAR_DEV_HAS_COALESCE | | |
749 | FSL_GIANFAR_DEV_HAS_RMON | | |
750 | FSL_GIANFAR_DEV_HAS_MULTI_INTR | | |
2c2db48a | 751 | FSL_GIANFAR_DEV_HAS_PADDING | |
b31a1d8b AF |
752 | FSL_GIANFAR_DEV_HAS_CSUM | |
753 | FSL_GIANFAR_DEV_HAS_VLAN | | |
754 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | | |
97553f7f MR |
755 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | |
756 | FSL_GIANFAR_DEV_HAS_TIMER; | |
b31a1d8b AF |
757 | |
758 | ctype = of_get_property(np, "phy-connection-type", NULL); | |
759 | ||
760 | /* We only care about rgmii-id. The rest are autodetected */ | |
761 | if (ctype && !strcmp(ctype, "rgmii-id")) | |
762 | priv->interface = PHY_INTERFACE_MODE_RGMII_ID; | |
763 | else | |
764 | priv->interface = PHY_INTERFACE_MODE_MII; | |
765 | ||
766 | if (of_get_property(np, "fsl,magic-packet", NULL)) | |
767 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; | |
768 | ||
fe192a49 | 769 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); |
b31a1d8b AF |
770 | |
771 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ | |
fe192a49 | 772 | priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); |
b31a1d8b AF |
773 | |
774 | return 0; | |
775 | ||
fba4ed03 SG |
776 | rx_alloc_failed: |
777 | free_rx_pointers(priv); | |
778 | tx_alloc_failed: | |
779 | free_tx_pointers(priv); | |
46ceb60c SG |
780 | err_grp_init: |
781 | unmap_group_regs(priv); | |
fba4ed03 | 782 | free_netdev(dev); |
b31a1d8b AF |
783 | return err; |
784 | } | |
785 | ||
cc772ab7 MR |
786 | static int gfar_hwtstamp_ioctl(struct net_device *netdev, |
787 | struct ifreq *ifr, int cmd) | |
788 | { | |
789 | struct hwtstamp_config config; | |
790 | struct gfar_private *priv = netdev_priv(netdev); | |
791 | ||
792 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) | |
793 | return -EFAULT; | |
794 | ||
795 | /* reserved for future extensions */ | |
796 | if (config.flags) | |
797 | return -EINVAL; | |
798 | ||
f0ee7acf MR |
799 | switch (config.tx_type) { |
800 | case HWTSTAMP_TX_OFF: | |
801 | priv->hwts_tx_en = 0; | |
802 | break; | |
803 | case HWTSTAMP_TX_ON: | |
804 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | |
805 | return -ERANGE; | |
806 | priv->hwts_tx_en = 1; | |
807 | break; | |
808 | default: | |
cc772ab7 | 809 | return -ERANGE; |
f0ee7acf | 810 | } |
cc772ab7 MR |
811 | |
812 | switch (config.rx_filter) { | |
813 | case HWTSTAMP_FILTER_NONE: | |
97553f7f MR |
814 | if (priv->hwts_rx_en) { |
815 | stop_gfar(netdev); | |
816 | priv->hwts_rx_en = 0; | |
817 | startup_gfar(netdev); | |
818 | } | |
cc772ab7 MR |
819 | break; |
820 | default: | |
821 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | |
822 | return -ERANGE; | |
97553f7f MR |
823 | if (!priv->hwts_rx_en) { |
824 | stop_gfar(netdev); | |
825 | priv->hwts_rx_en = 1; | |
826 | startup_gfar(netdev); | |
827 | } | |
cc772ab7 MR |
828 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
829 | break; | |
830 | } | |
831 | ||
832 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? | |
833 | -EFAULT : 0; | |
834 | } | |
835 | ||
0faac9f7 CW |
836 | /* Ioctl MII Interface */ |
837 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
838 | { | |
839 | struct gfar_private *priv = netdev_priv(dev); | |
840 | ||
841 | if (!netif_running(dev)) | |
842 | return -EINVAL; | |
843 | ||
cc772ab7 MR |
844 | if (cmd == SIOCSHWTSTAMP) |
845 | return gfar_hwtstamp_ioctl(dev, rq, cmd); | |
846 | ||
0faac9f7 CW |
847 | if (!priv->phydev) |
848 | return -ENODEV; | |
849 | ||
28b04113 | 850 | return phy_mii_ioctl(priv->phydev, rq, cmd); |
0faac9f7 CW |
851 | } |
852 | ||
fba4ed03 SG |
853 | static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) |
854 | { | |
855 | unsigned int new_bit_map = 0x0; | |
856 | int mask = 0x1 << (max_qs - 1), i; | |
857 | for (i = 0; i < max_qs; i++) { | |
858 | if (bit_map & mask) | |
859 | new_bit_map = new_bit_map + (1 << i); | |
860 | mask = mask >> 0x1; | |
861 | } | |
862 | return new_bit_map; | |
863 | } | |
7a8b3372 | 864 | |
18294ad1 AV |
865 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, |
866 | u32 class) | |
7a8b3372 SG |
867 | { |
868 | u32 rqfpr = FPR_FILER_MASK; | |
869 | u32 rqfcr = 0x0; | |
870 | ||
871 | rqfar--; | |
872 | rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; | |
6c43e046 WJB |
873 | priv->ftp_rqfpr[rqfar] = rqfpr; |
874 | priv->ftp_rqfcr[rqfar] = rqfcr; | |
7a8b3372 SG |
875 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
876 | ||
877 | rqfar--; | |
878 | rqfcr = RQFCR_CMP_NOMATCH; | |
6c43e046 WJB |
879 | priv->ftp_rqfpr[rqfar] = rqfpr; |
880 | priv->ftp_rqfcr[rqfar] = rqfcr; | |
7a8b3372 SG |
881 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
882 | ||
883 | rqfar--; | |
884 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; | |
885 | rqfpr = class; | |
6c43e046 WJB |
886 | priv->ftp_rqfcr[rqfar] = rqfcr; |
887 | priv->ftp_rqfpr[rqfar] = rqfpr; | |
7a8b3372 SG |
888 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
889 | ||
890 | rqfar--; | |
891 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; | |
892 | rqfpr = class; | |
6c43e046 WJB |
893 | priv->ftp_rqfcr[rqfar] = rqfcr; |
894 | priv->ftp_rqfpr[rqfar] = rqfpr; | |
7a8b3372 SG |
895 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
896 | ||
897 | return rqfar; | |
898 | } | |
899 | ||
900 | static void gfar_init_filer_table(struct gfar_private *priv) | |
901 | { | |
902 | int i = 0x0; | |
903 | u32 rqfar = MAX_FILER_IDX; | |
904 | u32 rqfcr = 0x0; | |
905 | u32 rqfpr = FPR_FILER_MASK; | |
906 | ||
907 | /* Default rule */ | |
908 | rqfcr = RQFCR_CMP_MATCH; | |
6c43e046 WJB |
909 | priv->ftp_rqfcr[rqfar] = rqfcr; |
910 | priv->ftp_rqfpr[rqfar] = rqfpr; | |
7a8b3372 SG |
911 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
912 | ||
913 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); | |
914 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); | |
915 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); | |
916 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); | |
917 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); | |
918 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); | |
919 | ||
85dd08eb | 920 | /* cur_filer_idx indicated the first non-masked rule */ |
7a8b3372 SG |
921 | priv->cur_filer_idx = rqfar; |
922 | ||
923 | /* Rest are masked rules */ | |
924 | rqfcr = RQFCR_CMP_NOMATCH; | |
925 | for (i = 0; i < rqfar; i++) { | |
6c43e046 WJB |
926 | priv->ftp_rqfcr[i] = rqfcr; |
927 | priv->ftp_rqfpr[i] = rqfpr; | |
7a8b3372 SG |
928 | gfar_write_filer(priv, i, rqfcr, rqfpr); |
929 | } | |
930 | } | |
931 | ||
7d350977 AV |
932 | static void gfar_detect_errata(struct gfar_private *priv) |
933 | { | |
934 | struct device *dev = &priv->ofdev->dev; | |
935 | unsigned int pvr = mfspr(SPRN_PVR); | |
936 | unsigned int svr = mfspr(SPRN_SVR); | |
937 | unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ | |
938 | unsigned int rev = svr & 0xffff; | |
939 | ||
940 | /* MPC8313 Rev 2.0 and higher; All MPC837x */ | |
941 | if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || | |
942 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) | |
943 | priv->errata |= GFAR_ERRATA_74; | |
944 | ||
deb90eac AV |
945 | /* MPC8313 and MPC837x all rev */ |
946 | if ((pvr == 0x80850010 && mod == 0x80b0) || | |
947 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) | |
948 | priv->errata |= GFAR_ERRATA_76; | |
949 | ||
511d934f AV |
950 | /* MPC8313 and MPC837x all rev */ |
951 | if ((pvr == 0x80850010 && mod == 0x80b0) || | |
952 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) | |
953 | priv->errata |= GFAR_ERRATA_A002; | |
954 | ||
4363c2fd AD |
955 | /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ |
956 | if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || | |
957 | (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) | |
958 | priv->errata |= GFAR_ERRATA_12; | |
959 | ||
7d350977 AV |
960 | if (priv->errata) |
961 | dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", | |
962 | priv->errata); | |
963 | } | |
964 | ||
bb40dcbb AF |
965 | /* Set up the ethernet device structure, private data, |
966 | * and anything else we need before we start */ | |
74888760 | 967 | static int gfar_probe(struct platform_device *ofdev) |
1da177e4 LT |
968 | { |
969 | u32 tempval; | |
970 | struct net_device *dev = NULL; | |
971 | struct gfar_private *priv = NULL; | |
f4983704 | 972 | struct gfar __iomem *regs = NULL; |
46ceb60c | 973 | int err = 0, i, grp_idx = 0; |
c50a5d9a | 974 | int len_devname; |
fba4ed03 | 975 | u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; |
46ceb60c | 976 | u32 isrg = 0; |
18294ad1 | 977 | u32 __iomem *baddr; |
1da177e4 | 978 | |
fba4ed03 | 979 | err = gfar_of_init(ofdev, &dev); |
1da177e4 | 980 | |
fba4ed03 SG |
981 | if (err) |
982 | return err; | |
1da177e4 LT |
983 | |
984 | priv = netdev_priv(dev); | |
4826857f KG |
985 | priv->ndev = dev; |
986 | priv->ofdev = ofdev; | |
61c7a080 | 987 | priv->node = ofdev->dev.of_node; |
4826857f | 988 | SET_NETDEV_DEV(dev, &ofdev->dev); |
1da177e4 | 989 | |
d87eb127 | 990 | spin_lock_init(&priv->bflock); |
ab939905 | 991 | INIT_WORK(&priv->reset_task, gfar_reset_task); |
1da177e4 | 992 | |
b31a1d8b | 993 | dev_set_drvdata(&ofdev->dev, priv); |
46ceb60c | 994 | regs = priv->gfargrp[0].regs; |
1da177e4 | 995 | |
7d350977 AV |
996 | gfar_detect_errata(priv); |
997 | ||
1da177e4 LT |
998 | /* Stop the DMA engine now, in case it was running before */ |
999 | /* (The firmware could have used it, and left it running). */ | |
257d938a | 1000 | gfar_halt(dev); |
1da177e4 LT |
1001 | |
1002 | /* Reset MAC layer */ | |
f4983704 | 1003 | gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); |
1da177e4 | 1004 | |
b98ac702 AF |
1005 | /* We need to delay at least 3 TX clocks */ |
1006 | udelay(2); | |
1007 | ||
1da177e4 | 1008 | tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); |
f4983704 | 1009 | gfar_write(®s->maccfg1, tempval); |
1da177e4 LT |
1010 | |
1011 | /* Initialize MACCFG2. */ | |
7d350977 AV |
1012 | tempval = MACCFG2_INIT_SETTINGS; |
1013 | if (gfar_has_errata(priv, GFAR_ERRATA_74)) | |
1014 | tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; | |
1015 | gfar_write(®s->maccfg2, tempval); | |
1da177e4 LT |
1016 | |
1017 | /* Initialize ECNTRL */ | |
f4983704 | 1018 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); |
1da177e4 | 1019 | |
1da177e4 | 1020 | /* Set the dev->base_addr to the gfar reg region */ |
f4983704 | 1021 | dev->base_addr = (unsigned long) regs; |
1da177e4 | 1022 | |
b31a1d8b | 1023 | SET_NETDEV_DEV(dev, &ofdev->dev); |
1da177e4 LT |
1024 | |
1025 | /* Fill in the dev structure */ | |
1da177e4 | 1026 | dev->watchdog_timeo = TX_TIMEOUT; |
1da177e4 | 1027 | dev->mtu = 1500; |
26ccfc37 | 1028 | dev->netdev_ops = &gfar_netdev_ops; |
0bbaf069 KG |
1029 | dev->ethtool_ops = &gfar_ethtool_ops; |
1030 | ||
fba4ed03 | 1031 | /* Register for napi ...We are registering NAPI for each grp */ |
46ceb60c SG |
1032 | for (i = 0; i < priv->num_grps; i++) |
1033 | netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); | |
a12f801d | 1034 | |
b31a1d8b | 1035 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { |
8b3afe95 MM |
1036 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
1037 | NETIF_F_RXCSUM; | |
1038 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | | |
1039 | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; | |
1040 | } | |
0bbaf069 | 1041 | |
87c288c6 JP |
1042 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { |
1043 | dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | |
0bbaf069 | 1044 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
87c288c6 | 1045 | } |
0bbaf069 | 1046 | |
b31a1d8b | 1047 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { |
0bbaf069 KG |
1048 | priv->extended_hash = 1; |
1049 | priv->hash_width = 9; | |
1050 | ||
f4983704 SG |
1051 | priv->hash_regs[0] = ®s->igaddr0; |
1052 | priv->hash_regs[1] = ®s->igaddr1; | |
1053 | priv->hash_regs[2] = ®s->igaddr2; | |
1054 | priv->hash_regs[3] = ®s->igaddr3; | |
1055 | priv->hash_regs[4] = ®s->igaddr4; | |
1056 | priv->hash_regs[5] = ®s->igaddr5; | |
1057 | priv->hash_regs[6] = ®s->igaddr6; | |
1058 | priv->hash_regs[7] = ®s->igaddr7; | |
1059 | priv->hash_regs[8] = ®s->gaddr0; | |
1060 | priv->hash_regs[9] = ®s->gaddr1; | |
1061 | priv->hash_regs[10] = ®s->gaddr2; | |
1062 | priv->hash_regs[11] = ®s->gaddr3; | |
1063 | priv->hash_regs[12] = ®s->gaddr4; | |
1064 | priv->hash_regs[13] = ®s->gaddr5; | |
1065 | priv->hash_regs[14] = ®s->gaddr6; | |
1066 | priv->hash_regs[15] = ®s->gaddr7; | |
0bbaf069 KG |
1067 | |
1068 | } else { | |
1069 | priv->extended_hash = 0; | |
1070 | priv->hash_width = 8; | |
1071 | ||
f4983704 SG |
1072 | priv->hash_regs[0] = ®s->gaddr0; |
1073 | priv->hash_regs[1] = ®s->gaddr1; | |
1074 | priv->hash_regs[2] = ®s->gaddr2; | |
1075 | priv->hash_regs[3] = ®s->gaddr3; | |
1076 | priv->hash_regs[4] = ®s->gaddr4; | |
1077 | priv->hash_regs[5] = ®s->gaddr5; | |
1078 | priv->hash_regs[6] = ®s->gaddr6; | |
1079 | priv->hash_regs[7] = ®s->gaddr7; | |
0bbaf069 KG |
1080 | } |
1081 | ||
b31a1d8b | 1082 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) |
0bbaf069 KG |
1083 | priv->padding = DEFAULT_PADDING; |
1084 | else | |
1085 | priv->padding = 0; | |
1086 | ||
cc772ab7 MR |
1087 | if (dev->features & NETIF_F_IP_CSUM || |
1088 | priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) | |
0bbaf069 | 1089 | dev->hard_header_len += GMAC_FCB_LEN; |
1da177e4 | 1090 | |
46ceb60c SG |
1091 | /* Program the isrg regs only if number of grps > 1 */ |
1092 | if (priv->num_grps > 1) { | |
1093 | baddr = ®s->isrg0; | |
1094 | for (i = 0; i < priv->num_grps; i++) { | |
1095 | isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); | |
1096 | isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); | |
1097 | gfar_write(baddr, isrg); | |
1098 | baddr++; | |
1099 | isrg = 0x0; | |
1100 | } | |
1101 | } | |
1102 | ||
fba4ed03 | 1103 | /* Need to reverse the bit maps as bit_map's MSB is q0 |
984b3f57 | 1104 | * but, for_each_set_bit parses from right to left, which |
fba4ed03 | 1105 | * basically reverses the queue numbers */ |
46ceb60c SG |
1106 | for (i = 0; i< priv->num_grps; i++) { |
1107 | priv->gfargrp[i].tx_bit_map = reverse_bitmap( | |
1108 | priv->gfargrp[i].tx_bit_map, MAX_TX_QS); | |
1109 | priv->gfargrp[i].rx_bit_map = reverse_bitmap( | |
1110 | priv->gfargrp[i].rx_bit_map, MAX_RX_QS); | |
1111 | } | |
1112 | ||
1113 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, | |
1114 | * also assign queues to groups */ | |
1115 | for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { | |
1116 | priv->gfargrp[grp_idx].num_rx_queues = 0x0; | |
984b3f57 | 1117 | for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, |
46ceb60c SG |
1118 | priv->num_rx_queues) { |
1119 | priv->gfargrp[grp_idx].num_rx_queues++; | |
1120 | priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; | |
1121 | rstat = rstat | (RSTAT_CLEAR_RHALT >> i); | |
1122 | rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); | |
1123 | } | |
1124 | priv->gfargrp[grp_idx].num_tx_queues = 0x0; | |
984b3f57 | 1125 | for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, |
46ceb60c SG |
1126 | priv->num_tx_queues) { |
1127 | priv->gfargrp[grp_idx].num_tx_queues++; | |
1128 | priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; | |
1129 | tstat = tstat | (TSTAT_CLEAR_THALT >> i); | |
1130 | tqueue = tqueue | (TQUEUE_EN0 >> i); | |
1131 | } | |
1132 | priv->gfargrp[grp_idx].rstat = rstat; | |
1133 | priv->gfargrp[grp_idx].tstat = tstat; | |
1134 | rstat = tstat =0; | |
fba4ed03 | 1135 | } |
fba4ed03 SG |
1136 | |
1137 | gfar_write(®s->rqueue, rqueue); | |
1138 | gfar_write(®s->tqueue, tqueue); | |
1139 | ||
1da177e4 | 1140 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; |
1da177e4 | 1141 | |
a12f801d | 1142 | /* Initializing some of the rx/tx queue level parameters */ |
fba4ed03 SG |
1143 | for (i = 0; i < priv->num_tx_queues; i++) { |
1144 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; | |
1145 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; | |
1146 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; | |
1147 | priv->tx_queue[i]->txic = DEFAULT_TXIC; | |
1148 | } | |
a12f801d | 1149 | |
fba4ed03 SG |
1150 | for (i = 0; i < priv->num_rx_queues; i++) { |
1151 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; | |
1152 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; | |
1153 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | |
1154 | } | |
1da177e4 | 1155 | |
4aa3a715 SP |
1156 | /* always enable rx filer*/ |
1157 | priv->rx_filer_enable = 1; | |
0bbaf069 KG |
1158 | /* Enable most messages by default */ |
1159 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | |
1160 | ||
d3eab82b TP |
1161 | /* Carrier starts down, phylib will bring it up */ |
1162 | netif_carrier_off(dev); | |
1163 | ||
1da177e4 LT |
1164 | err = register_netdev(dev); |
1165 | ||
1166 | if (err) { | |
59deab26 | 1167 | pr_err("%s: Cannot register net device, aborting\n", dev->name); |
1da177e4 LT |
1168 | goto register_fail; |
1169 | } | |
1170 | ||
2884e5cc AV |
1171 | device_init_wakeup(&dev->dev, |
1172 | priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | |
1173 | ||
c50a5d9a DH |
1174 | /* fill out IRQ number and name fields */ |
1175 | len_devname = strlen(dev->name); | |
46ceb60c SG |
1176 | for (i = 0; i < priv->num_grps; i++) { |
1177 | strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, | |
1178 | len_devname); | |
1179 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | |
1180 | strncpy(&priv->gfargrp[i].int_name_tx[len_devname], | |
1181 | "_g", sizeof("_g")); | |
1182 | priv->gfargrp[i].int_name_tx[ | |
1183 | strlen(priv->gfargrp[i].int_name_tx)] = i+48; | |
1184 | strncpy(&priv->gfargrp[i].int_name_tx[strlen( | |
1185 | priv->gfargrp[i].int_name_tx)], | |
1186 | "_tx", sizeof("_tx") + 1); | |
1187 | ||
1188 | strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, | |
1189 | len_devname); | |
1190 | strncpy(&priv->gfargrp[i].int_name_rx[len_devname], | |
1191 | "_g", sizeof("_g")); | |
1192 | priv->gfargrp[i].int_name_rx[ | |
1193 | strlen(priv->gfargrp[i].int_name_rx)] = i+48; | |
1194 | strncpy(&priv->gfargrp[i].int_name_rx[strlen( | |
1195 | priv->gfargrp[i].int_name_rx)], | |
1196 | "_rx", sizeof("_rx") + 1); | |
1197 | ||
1198 | strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, | |
1199 | len_devname); | |
1200 | strncpy(&priv->gfargrp[i].int_name_er[len_devname], | |
1201 | "_g", sizeof("_g")); | |
1202 | priv->gfargrp[i].int_name_er[strlen( | |
1203 | priv->gfargrp[i].int_name_er)] = i+48; | |
1204 | strncpy(&priv->gfargrp[i].int_name_er[strlen(\ | |
1205 | priv->gfargrp[i].int_name_er)], | |
1206 | "_er", sizeof("_er") + 1); | |
1207 | } else | |
1208 | priv->gfargrp[i].int_name_tx[len_devname] = '\0'; | |
1209 | } | |
c50a5d9a | 1210 | |
7a8b3372 SG |
1211 | /* Initialize the filer table */ |
1212 | gfar_init_filer_table(priv); | |
1213 | ||
7f7f5316 AF |
1214 | /* Create all the sysfs files */ |
1215 | gfar_init_sysfs(dev); | |
1216 | ||
1da177e4 | 1217 | /* Print out the device info */ |
59deab26 | 1218 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); |
1da177e4 LT |
1219 | |
1220 | /* Even more device info helps when determining which kernel */ | |
7f7f5316 | 1221 | /* provided which set of benchmarks. */ |
59deab26 | 1222 | netdev_info(dev, "Running with NAPI enabled\n"); |
fba4ed03 | 1223 | for (i = 0; i < priv->num_rx_queues; i++) |
59deab26 JP |
1224 | netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", |
1225 | i, priv->rx_queue[i]->rx_ring_size); | |
fba4ed03 | 1226 | for(i = 0; i < priv->num_tx_queues; i++) |
59deab26 JP |
1227 | netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", |
1228 | i, priv->tx_queue[i]->tx_ring_size); | |
1da177e4 LT |
1229 | |
1230 | return 0; | |
1231 | ||
1232 | register_fail: | |
46ceb60c | 1233 | unmap_group_regs(priv); |
fba4ed03 SG |
1234 | free_tx_pointers(priv); |
1235 | free_rx_pointers(priv); | |
fe192a49 GL |
1236 | if (priv->phy_node) |
1237 | of_node_put(priv->phy_node); | |
1238 | if (priv->tbi_node) | |
1239 | of_node_put(priv->tbi_node); | |
1da177e4 | 1240 | free_netdev(dev); |
bb40dcbb | 1241 | return err; |
1da177e4 LT |
1242 | } |
1243 | ||
2dc11581 | 1244 | static int gfar_remove(struct platform_device *ofdev) |
1da177e4 | 1245 | { |
b31a1d8b | 1246 | struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); |
1da177e4 | 1247 | |
fe192a49 GL |
1248 | if (priv->phy_node) |
1249 | of_node_put(priv->phy_node); | |
1250 | if (priv->tbi_node) | |
1251 | of_node_put(priv->tbi_node); | |
1252 | ||
b31a1d8b | 1253 | dev_set_drvdata(&ofdev->dev, NULL); |
1da177e4 | 1254 | |
d9d8e041 | 1255 | unregister_netdev(priv->ndev); |
46ceb60c | 1256 | unmap_group_regs(priv); |
4826857f | 1257 | free_netdev(priv->ndev); |
1da177e4 LT |
1258 | |
1259 | return 0; | |
1260 | } | |
1261 | ||
d87eb127 | 1262 | #ifdef CONFIG_PM |
be926fc4 AV |
1263 | |
1264 | static int gfar_suspend(struct device *dev) | |
d87eb127 | 1265 | { |
be926fc4 AV |
1266 | struct gfar_private *priv = dev_get_drvdata(dev); |
1267 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1268 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 SW |
1269 | unsigned long flags; |
1270 | u32 tempval; | |
1271 | ||
1272 | int magic_packet = priv->wol_en && | |
b31a1d8b | 1273 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
d87eb127 | 1274 | |
be926fc4 | 1275 | netif_device_detach(ndev); |
d87eb127 | 1276 | |
be926fc4 | 1277 | if (netif_running(ndev)) { |
fba4ed03 SG |
1278 | |
1279 | local_irq_save(flags); | |
1280 | lock_tx_qs(priv); | |
1281 | lock_rx_qs(priv); | |
d87eb127 | 1282 | |
be926fc4 | 1283 | gfar_halt_nodisable(ndev); |
d87eb127 SW |
1284 | |
1285 | /* Disable Tx, and Rx if wake-on-LAN is disabled. */ | |
f4983704 | 1286 | tempval = gfar_read(®s->maccfg1); |
d87eb127 SW |
1287 | |
1288 | tempval &= ~MACCFG1_TX_EN; | |
1289 | ||
1290 | if (!magic_packet) | |
1291 | tempval &= ~MACCFG1_RX_EN; | |
1292 | ||
f4983704 | 1293 | gfar_write(®s->maccfg1, tempval); |
d87eb127 | 1294 | |
fba4ed03 SG |
1295 | unlock_rx_qs(priv); |
1296 | unlock_tx_qs(priv); | |
1297 | local_irq_restore(flags); | |
d87eb127 | 1298 | |
46ceb60c | 1299 | disable_napi(priv); |
d87eb127 SW |
1300 | |
1301 | if (magic_packet) { | |
1302 | /* Enable interrupt on Magic Packet */ | |
f4983704 | 1303 | gfar_write(®s->imask, IMASK_MAG); |
d87eb127 SW |
1304 | |
1305 | /* Enable Magic Packet mode */ | |
f4983704 | 1306 | tempval = gfar_read(®s->maccfg2); |
d87eb127 | 1307 | tempval |= MACCFG2_MPEN; |
f4983704 | 1308 | gfar_write(®s->maccfg2, tempval); |
d87eb127 SW |
1309 | } else { |
1310 | phy_stop(priv->phydev); | |
1311 | } | |
1312 | } | |
1313 | ||
1314 | return 0; | |
1315 | } | |
1316 | ||
be926fc4 | 1317 | static int gfar_resume(struct device *dev) |
d87eb127 | 1318 | { |
be926fc4 AV |
1319 | struct gfar_private *priv = dev_get_drvdata(dev); |
1320 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1321 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 SW |
1322 | unsigned long flags; |
1323 | u32 tempval; | |
1324 | int magic_packet = priv->wol_en && | |
b31a1d8b | 1325 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
d87eb127 | 1326 | |
be926fc4 AV |
1327 | if (!netif_running(ndev)) { |
1328 | netif_device_attach(ndev); | |
d87eb127 SW |
1329 | return 0; |
1330 | } | |
1331 | ||
1332 | if (!magic_packet && priv->phydev) | |
1333 | phy_start(priv->phydev); | |
1334 | ||
1335 | /* Disable Magic Packet mode, in case something | |
1336 | * else woke us up. | |
1337 | */ | |
fba4ed03 SG |
1338 | local_irq_save(flags); |
1339 | lock_tx_qs(priv); | |
1340 | lock_rx_qs(priv); | |
d87eb127 | 1341 | |
f4983704 | 1342 | tempval = gfar_read(®s->maccfg2); |
d87eb127 | 1343 | tempval &= ~MACCFG2_MPEN; |
f4983704 | 1344 | gfar_write(®s->maccfg2, tempval); |
d87eb127 | 1345 | |
be926fc4 | 1346 | gfar_start(ndev); |
d87eb127 | 1347 | |
fba4ed03 SG |
1348 | unlock_rx_qs(priv); |
1349 | unlock_tx_qs(priv); | |
1350 | local_irq_restore(flags); | |
d87eb127 | 1351 | |
be926fc4 AV |
1352 | netif_device_attach(ndev); |
1353 | ||
46ceb60c | 1354 | enable_napi(priv); |
be926fc4 AV |
1355 | |
1356 | return 0; | |
1357 | } | |
1358 | ||
1359 | static int gfar_restore(struct device *dev) | |
1360 | { | |
1361 | struct gfar_private *priv = dev_get_drvdata(dev); | |
1362 | struct net_device *ndev = priv->ndev; | |
1363 | ||
1364 | if (!netif_running(ndev)) | |
1365 | return 0; | |
1366 | ||
1367 | gfar_init_bds(ndev); | |
1368 | init_registers(ndev); | |
1369 | gfar_set_mac_address(ndev); | |
1370 | gfar_init_mac(ndev); | |
1371 | gfar_start(ndev); | |
1372 | ||
1373 | priv->oldlink = 0; | |
1374 | priv->oldspeed = 0; | |
1375 | priv->oldduplex = -1; | |
1376 | ||
1377 | if (priv->phydev) | |
1378 | phy_start(priv->phydev); | |
d87eb127 | 1379 | |
be926fc4 | 1380 | netif_device_attach(ndev); |
5ea681d4 | 1381 | enable_napi(priv); |
d87eb127 SW |
1382 | |
1383 | return 0; | |
1384 | } | |
be926fc4 AV |
1385 | |
1386 | static struct dev_pm_ops gfar_pm_ops = { | |
1387 | .suspend = gfar_suspend, | |
1388 | .resume = gfar_resume, | |
1389 | .freeze = gfar_suspend, | |
1390 | .thaw = gfar_resume, | |
1391 | .restore = gfar_restore, | |
1392 | }; | |
1393 | ||
1394 | #define GFAR_PM_OPS (&gfar_pm_ops) | |
1395 | ||
d87eb127 | 1396 | #else |
be926fc4 AV |
1397 | |
1398 | #define GFAR_PM_OPS NULL | |
be926fc4 | 1399 | |
d87eb127 | 1400 | #endif |
1da177e4 | 1401 | |
e8a2b6a4 AF |
1402 | /* Reads the controller's registers to determine what interface |
1403 | * connects it to the PHY. | |
1404 | */ | |
1405 | static phy_interface_t gfar_get_interface(struct net_device *dev) | |
1406 | { | |
1407 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1408 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
f4983704 SG |
1409 | u32 ecntrl; |
1410 | ||
f4983704 | 1411 | ecntrl = gfar_read(®s->ecntrl); |
e8a2b6a4 AF |
1412 | |
1413 | if (ecntrl & ECNTRL_SGMII_MODE) | |
1414 | return PHY_INTERFACE_MODE_SGMII; | |
1415 | ||
1416 | if (ecntrl & ECNTRL_TBI_MODE) { | |
1417 | if (ecntrl & ECNTRL_REDUCED_MODE) | |
1418 | return PHY_INTERFACE_MODE_RTBI; | |
1419 | else | |
1420 | return PHY_INTERFACE_MODE_TBI; | |
1421 | } | |
1422 | ||
1423 | if (ecntrl & ECNTRL_REDUCED_MODE) { | |
1424 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) | |
1425 | return PHY_INTERFACE_MODE_RMII; | |
7132ab7f | 1426 | else { |
b31a1d8b | 1427 | phy_interface_t interface = priv->interface; |
7132ab7f AF |
1428 | |
1429 | /* | |
1430 | * This isn't autodetected right now, so it must | |
1431 | * be set by the device tree or platform code. | |
1432 | */ | |
1433 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) | |
1434 | return PHY_INTERFACE_MODE_RGMII_ID; | |
1435 | ||
e8a2b6a4 | 1436 | return PHY_INTERFACE_MODE_RGMII; |
7132ab7f | 1437 | } |
e8a2b6a4 AF |
1438 | } |
1439 | ||
b31a1d8b | 1440 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
e8a2b6a4 AF |
1441 | return PHY_INTERFACE_MODE_GMII; |
1442 | ||
1443 | return PHY_INTERFACE_MODE_MII; | |
1444 | } | |
1445 | ||
1446 | ||
bb40dcbb AF |
1447 | /* Initializes driver's PHY state, and attaches to the PHY. |
1448 | * Returns 0 on success. | |
1da177e4 LT |
1449 | */ |
1450 | static int init_phy(struct net_device *dev) | |
1451 | { | |
1452 | struct gfar_private *priv = netdev_priv(dev); | |
bb40dcbb | 1453 | uint gigabit_support = |
b31a1d8b | 1454 | priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? |
bb40dcbb | 1455 | SUPPORTED_1000baseT_Full : 0; |
e8a2b6a4 | 1456 | phy_interface_t interface; |
1da177e4 LT |
1457 | |
1458 | priv->oldlink = 0; | |
1459 | priv->oldspeed = 0; | |
1460 | priv->oldduplex = -1; | |
1461 | ||
e8a2b6a4 AF |
1462 | interface = gfar_get_interface(dev); |
1463 | ||
1db780f8 AV |
1464 | priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, |
1465 | interface); | |
1466 | if (!priv->phydev) | |
1467 | priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, | |
1468 | interface); | |
1469 | if (!priv->phydev) { | |
1470 | dev_err(&dev->dev, "could not attach to PHY\n"); | |
1471 | return -ENODEV; | |
fe192a49 | 1472 | } |
1da177e4 | 1473 | |
d3c12873 KJ |
1474 | if (interface == PHY_INTERFACE_MODE_SGMII) |
1475 | gfar_configure_serdes(dev); | |
1476 | ||
bb40dcbb | 1477 | /* Remove any features not supported by the controller */ |
fe192a49 GL |
1478 | priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); |
1479 | priv->phydev->advertising = priv->phydev->supported; | |
1da177e4 LT |
1480 | |
1481 | return 0; | |
1da177e4 LT |
1482 | } |
1483 | ||
d0313587 PG |
1484 | /* |
1485 | * Initialize TBI PHY interface for communicating with the | |
1486 | * SERDES lynx PHY on the chip. We communicate with this PHY | |
1487 | * through the MDIO bus on each controller, treating it as a | |
1488 | * "normal" PHY at the address found in the TBIPA register. We assume | |
1489 | * that the TBIPA register is valid. Either the MDIO bus code will set | |
1490 | * it to a value that doesn't conflict with other PHYs on the bus, or the | |
1491 | * value doesn't matter, as there are no other PHYs on the bus. | |
1492 | */ | |
d3c12873 KJ |
1493 | static void gfar_configure_serdes(struct net_device *dev) |
1494 | { | |
1495 | struct gfar_private *priv = netdev_priv(dev); | |
fe192a49 GL |
1496 | struct phy_device *tbiphy; |
1497 | ||
1498 | if (!priv->tbi_node) { | |
1499 | dev_warn(&dev->dev, "error: SGMII mode requires that the " | |
1500 | "device tree specify a tbi-handle\n"); | |
1501 | return; | |
1502 | } | |
c132419e | 1503 | |
fe192a49 GL |
1504 | tbiphy = of_phy_find_device(priv->tbi_node); |
1505 | if (!tbiphy) { | |
1506 | dev_err(&dev->dev, "error: Could not get TBI device\n"); | |
b31a1d8b AF |
1507 | return; |
1508 | } | |
d3c12873 | 1509 | |
b31a1d8b AF |
1510 | /* |
1511 | * If the link is already up, we must already be ok, and don't need to | |
bdb59f94 TP |
1512 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
1513 | * everything for us? Resetting it takes the link down and requires | |
1514 | * several seconds for it to come back. | |
1515 | */ | |
fe192a49 | 1516 | if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) |
b31a1d8b | 1517 | return; |
d3c12873 | 1518 | |
d0313587 | 1519 | /* Single clk mode, mii mode off(for serdes communication) */ |
fe192a49 | 1520 | phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); |
d3c12873 | 1521 | |
fe192a49 | 1522 | phy_write(tbiphy, MII_ADVERTISE, |
d3c12873 KJ |
1523 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | |
1524 | ADVERTISE_1000XPSE_ASYM); | |
1525 | ||
fe192a49 | 1526 | phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | |
d3c12873 KJ |
1527 | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); |
1528 | } | |
1529 | ||
1da177e4 LT |
1530 | static void init_registers(struct net_device *dev) |
1531 | { | |
1532 | struct gfar_private *priv = netdev_priv(dev); | |
f4983704 | 1533 | struct gfar __iomem *regs = NULL; |
46ceb60c | 1534 | int i = 0; |
1da177e4 | 1535 | |
46ceb60c SG |
1536 | for (i = 0; i < priv->num_grps; i++) { |
1537 | regs = priv->gfargrp[i].regs; | |
1538 | /* Clear IEVENT */ | |
1539 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | |
1da177e4 | 1540 | |
46ceb60c SG |
1541 | /* Initialize IMASK */ |
1542 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
1543 | } | |
1da177e4 | 1544 | |
46ceb60c | 1545 | regs = priv->gfargrp[0].regs; |
1da177e4 | 1546 | /* Init hash registers to zero */ |
f4983704 SG |
1547 | gfar_write(®s->igaddr0, 0); |
1548 | gfar_write(®s->igaddr1, 0); | |
1549 | gfar_write(®s->igaddr2, 0); | |
1550 | gfar_write(®s->igaddr3, 0); | |
1551 | gfar_write(®s->igaddr4, 0); | |
1552 | gfar_write(®s->igaddr5, 0); | |
1553 | gfar_write(®s->igaddr6, 0); | |
1554 | gfar_write(®s->igaddr7, 0); | |
1555 | ||
1556 | gfar_write(®s->gaddr0, 0); | |
1557 | gfar_write(®s->gaddr1, 0); | |
1558 | gfar_write(®s->gaddr2, 0); | |
1559 | gfar_write(®s->gaddr3, 0); | |
1560 | gfar_write(®s->gaddr4, 0); | |
1561 | gfar_write(®s->gaddr5, 0); | |
1562 | gfar_write(®s->gaddr6, 0); | |
1563 | gfar_write(®s->gaddr7, 0); | |
1da177e4 | 1564 | |
1da177e4 | 1565 | /* Zero out the rmon mib registers if it has them */ |
b31a1d8b | 1566 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { |
f4983704 | 1567 | memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); |
1da177e4 LT |
1568 | |
1569 | /* Mask off the CAM interrupts */ | |
f4983704 SG |
1570 | gfar_write(®s->rmon.cam1, 0xffffffff); |
1571 | gfar_write(®s->rmon.cam2, 0xffffffff); | |
1da177e4 LT |
1572 | } |
1573 | ||
1574 | /* Initialize the max receive buffer length */ | |
f4983704 | 1575 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
1da177e4 | 1576 | |
1da177e4 | 1577 | /* Initialize the Minimum Frame Length Register */ |
f4983704 | 1578 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); |
1da177e4 LT |
1579 | } |
1580 | ||
511d934f AV |
1581 | static int __gfar_is_rx_idle(struct gfar_private *priv) |
1582 | { | |
1583 | u32 res; | |
1584 | ||
1585 | /* | |
1586 | * Normaly TSEC should not hang on GRS commands, so we should | |
1587 | * actually wait for IEVENT_GRSC flag. | |
1588 | */ | |
1589 | if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) | |
1590 | return 0; | |
1591 | ||
1592 | /* | |
1593 | * Read the eTSEC register at offset 0xD1C. If bits 7-14 are | |
1594 | * the same as bits 23-30, the eTSEC Rx is assumed to be idle | |
1595 | * and the Rx can be safely reset. | |
1596 | */ | |
1597 | res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); | |
1598 | res &= 0x7f807f80; | |
1599 | if ((res & 0xffff) == (res >> 16)) | |
1600 | return 1; | |
1601 | ||
1602 | return 0; | |
1603 | } | |
0bbaf069 KG |
1604 | |
1605 | /* Halt the receive and transmit queues */ | |
d87eb127 | 1606 | static void gfar_halt_nodisable(struct net_device *dev) |
1da177e4 LT |
1607 | { |
1608 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1609 | struct gfar __iomem *regs = NULL; |
1da177e4 | 1610 | u32 tempval; |
46ceb60c | 1611 | int i = 0; |
1da177e4 | 1612 | |
46ceb60c SG |
1613 | for (i = 0; i < priv->num_grps; i++) { |
1614 | regs = priv->gfargrp[i].regs; | |
1615 | /* Mask all interrupts */ | |
1616 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
1da177e4 | 1617 | |
46ceb60c SG |
1618 | /* Clear all interrupts */ |
1619 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | |
1620 | } | |
1da177e4 | 1621 | |
46ceb60c | 1622 | regs = priv->gfargrp[0].regs; |
1da177e4 | 1623 | /* Stop the DMA, and wait for it to stop */ |
f4983704 | 1624 | tempval = gfar_read(®s->dmactrl); |
1da177e4 LT |
1625 | if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) |
1626 | != (DMACTRL_GRS | DMACTRL_GTS)) { | |
511d934f AV |
1627 | int ret; |
1628 | ||
1da177e4 | 1629 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
f4983704 | 1630 | gfar_write(®s->dmactrl, tempval); |
1da177e4 | 1631 | |
511d934f AV |
1632 | do { |
1633 | ret = spin_event_timeout(((gfar_read(®s->ievent) & | |
1634 | (IEVENT_GRSC | IEVENT_GTSC)) == | |
1635 | (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); | |
1636 | if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) | |
1637 | ret = __gfar_is_rx_idle(priv); | |
1638 | } while (!ret); | |
1da177e4 | 1639 | } |
d87eb127 | 1640 | } |
d87eb127 SW |
1641 | |
1642 | /* Halt the receive and transmit queues */ | |
1643 | void gfar_halt(struct net_device *dev) | |
1644 | { | |
1645 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1646 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 | 1647 | u32 tempval; |
1da177e4 | 1648 | |
2a54adc3 SW |
1649 | gfar_halt_nodisable(dev); |
1650 | ||
1da177e4 LT |
1651 | /* Disable Rx and Tx */ |
1652 | tempval = gfar_read(®s->maccfg1); | |
1653 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); | |
1654 | gfar_write(®s->maccfg1, tempval); | |
0bbaf069 KG |
1655 | } |
1656 | ||
46ceb60c SG |
1657 | static void free_grp_irqs(struct gfar_priv_grp *grp) |
1658 | { | |
1659 | free_irq(grp->interruptError, grp); | |
1660 | free_irq(grp->interruptTransmit, grp); | |
1661 | free_irq(grp->interruptReceive, grp); | |
1662 | } | |
1663 | ||
0bbaf069 KG |
1664 | void stop_gfar(struct net_device *dev) |
1665 | { | |
1666 | struct gfar_private *priv = netdev_priv(dev); | |
0bbaf069 | 1667 | unsigned long flags; |
46ceb60c | 1668 | int i; |
0bbaf069 | 1669 | |
bb40dcbb AF |
1670 | phy_stop(priv->phydev); |
1671 | ||
a12f801d | 1672 | |
0bbaf069 | 1673 | /* Lock it down */ |
fba4ed03 SG |
1674 | local_irq_save(flags); |
1675 | lock_tx_qs(priv); | |
1676 | lock_rx_qs(priv); | |
0bbaf069 | 1677 | |
0bbaf069 | 1678 | gfar_halt(dev); |
1da177e4 | 1679 | |
fba4ed03 SG |
1680 | unlock_rx_qs(priv); |
1681 | unlock_tx_qs(priv); | |
1682 | local_irq_restore(flags); | |
1da177e4 LT |
1683 | |
1684 | /* Free the IRQs */ | |
b31a1d8b | 1685 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
46ceb60c SG |
1686 | for (i = 0; i < priv->num_grps; i++) |
1687 | free_grp_irqs(&priv->gfargrp[i]); | |
1da177e4 | 1688 | } else { |
46ceb60c SG |
1689 | for (i = 0; i < priv->num_grps; i++) |
1690 | free_irq(priv->gfargrp[i].interruptTransmit, | |
1691 | &priv->gfargrp[i]); | |
1da177e4 LT |
1692 | } |
1693 | ||
1694 | free_skb_resources(priv); | |
1da177e4 LT |
1695 | } |
1696 | ||
fba4ed03 | 1697 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 1698 | { |
1da177e4 | 1699 | struct txbd8 *txbdp; |
fba4ed03 | 1700 | struct gfar_private *priv = netdev_priv(tx_queue->dev); |
4669bc90 | 1701 | int i, j; |
1da177e4 | 1702 | |
a12f801d | 1703 | txbdp = tx_queue->tx_bd_base; |
1da177e4 | 1704 | |
a12f801d SG |
1705 | for (i = 0; i < tx_queue->tx_ring_size; i++) { |
1706 | if (!tx_queue->tx_skbuff[i]) | |
4669bc90 | 1707 | continue; |
1da177e4 | 1708 | |
4826857f | 1709 | dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, |
4669bc90 DH |
1710 | txbdp->length, DMA_TO_DEVICE); |
1711 | txbdp->lstatus = 0; | |
fba4ed03 SG |
1712 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
1713 | j++) { | |
4669bc90 | 1714 | txbdp++; |
4826857f | 1715 | dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, |
4669bc90 | 1716 | txbdp->length, DMA_TO_DEVICE); |
1da177e4 | 1717 | } |
ad5da7ab | 1718 | txbdp++; |
a12f801d SG |
1719 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); |
1720 | tx_queue->tx_skbuff[i] = NULL; | |
1da177e4 | 1721 | } |
a12f801d | 1722 | kfree(tx_queue->tx_skbuff); |
fba4ed03 | 1723 | } |
1da177e4 | 1724 | |
fba4ed03 SG |
1725 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
1726 | { | |
1727 | struct rxbd8 *rxbdp; | |
1728 | struct gfar_private *priv = netdev_priv(rx_queue->dev); | |
1729 | int i; | |
1da177e4 | 1730 | |
fba4ed03 | 1731 | rxbdp = rx_queue->rx_bd_base; |
1da177e4 | 1732 | |
a12f801d SG |
1733 | for (i = 0; i < rx_queue->rx_ring_size; i++) { |
1734 | if (rx_queue->rx_skbuff[i]) { | |
fba4ed03 SG |
1735 | dma_unmap_single(&priv->ofdev->dev, |
1736 | rxbdp->bufPtr, priv->rx_buffer_size, | |
e69edd21 | 1737 | DMA_FROM_DEVICE); |
a12f801d SG |
1738 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); |
1739 | rx_queue->rx_skbuff[i] = NULL; | |
1da177e4 | 1740 | } |
e69edd21 AV |
1741 | rxbdp->lstatus = 0; |
1742 | rxbdp->bufPtr = 0; | |
1743 | rxbdp++; | |
1da177e4 | 1744 | } |
a12f801d | 1745 | kfree(rx_queue->rx_skbuff); |
fba4ed03 | 1746 | } |
e69edd21 | 1747 | |
fba4ed03 SG |
1748 | /* If there are any tx skbs or rx skbs still around, free them. |
1749 | * Then free tx_skbuff and rx_skbuff */ | |
1750 | static void free_skb_resources(struct gfar_private *priv) | |
1751 | { | |
1752 | struct gfar_priv_tx_q *tx_queue = NULL; | |
1753 | struct gfar_priv_rx_q *rx_queue = NULL; | |
1754 | int i; | |
1755 | ||
1756 | /* Go through all the buffer descriptors and free their data buffers */ | |
1757 | for (i = 0; i < priv->num_tx_queues; i++) { | |
1758 | tx_queue = priv->tx_queue[i]; | |
7c0d10d3 | 1759 | if(tx_queue->tx_skbuff) |
fba4ed03 SG |
1760 | free_skb_tx_queue(tx_queue); |
1761 | } | |
1762 | ||
1763 | for (i = 0; i < priv->num_rx_queues; i++) { | |
1764 | rx_queue = priv->rx_queue[i]; | |
7c0d10d3 | 1765 | if(rx_queue->rx_skbuff) |
fba4ed03 SG |
1766 | free_skb_rx_queue(rx_queue); |
1767 | } | |
1768 | ||
1769 | dma_free_coherent(&priv->ofdev->dev, | |
1770 | sizeof(struct txbd8) * priv->total_tx_ring_size + | |
1771 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | |
1772 | priv->tx_queue[0]->tx_bd_base, | |
1773 | priv->tx_queue[0]->tx_bd_dma_base); | |
7df9c43f | 1774 | skb_queue_purge(&priv->rx_recycle); |
1da177e4 LT |
1775 | } |
1776 | ||
0bbaf069 KG |
1777 | void gfar_start(struct net_device *dev) |
1778 | { | |
1779 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1780 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
0bbaf069 | 1781 | u32 tempval; |
46ceb60c | 1782 | int i = 0; |
0bbaf069 KG |
1783 | |
1784 | /* Enable Rx and Tx in MACCFG1 */ | |
1785 | tempval = gfar_read(®s->maccfg1); | |
1786 | tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); | |
1787 | gfar_write(®s->maccfg1, tempval); | |
1788 | ||
1789 | /* Initialize DMACTRL to have WWR and WOP */ | |
f4983704 | 1790 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 1791 | tempval |= DMACTRL_INIT_SETTINGS; |
f4983704 | 1792 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 1793 | |
0bbaf069 | 1794 | /* Make sure we aren't stopped */ |
f4983704 | 1795 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 1796 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); |
f4983704 | 1797 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 1798 | |
46ceb60c SG |
1799 | for (i = 0; i < priv->num_grps; i++) { |
1800 | regs = priv->gfargrp[i].regs; | |
1801 | /* Clear THLT/RHLT, so that the DMA starts polling now */ | |
1802 | gfar_write(®s->tstat, priv->gfargrp[i].tstat); | |
1803 | gfar_write(®s->rstat, priv->gfargrp[i].rstat); | |
1804 | /* Unmask the interrupts we look for */ | |
1805 | gfar_write(®s->imask, IMASK_DEFAULT); | |
1806 | } | |
12dea57b | 1807 | |
1ae5dc34 | 1808 | dev->trans_start = jiffies; /* prevent tx timeout */ |
0bbaf069 KG |
1809 | } |
1810 | ||
46ceb60c | 1811 | void gfar_configure_coalescing(struct gfar_private *priv, |
18294ad1 | 1812 | unsigned long tx_mask, unsigned long rx_mask) |
1da177e4 | 1813 | { |
46ceb60c | 1814 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
18294ad1 | 1815 | u32 __iomem *baddr; |
46ceb60c | 1816 | int i = 0; |
1da177e4 | 1817 | |
46ceb60c SG |
1818 | /* Backward compatible case ---- even if we enable |
1819 | * multiple queues, there's only single reg to program | |
1820 | */ | |
1821 | gfar_write(®s->txic, 0); | |
1822 | if(likely(priv->tx_queue[0]->txcoalescing)) | |
1823 | gfar_write(®s->txic, priv->tx_queue[0]->txic); | |
1da177e4 | 1824 | |
46ceb60c SG |
1825 | gfar_write(®s->rxic, 0); |
1826 | if(unlikely(priv->rx_queue[0]->rxcoalescing)) | |
1827 | gfar_write(®s->rxic, priv->rx_queue[0]->rxic); | |
815b97c6 | 1828 | |
46ceb60c SG |
1829 | if (priv->mode == MQ_MG_MODE) { |
1830 | baddr = ®s->txic0; | |
984b3f57 | 1831 | for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { |
46ceb60c SG |
1832 | if (likely(priv->tx_queue[i]->txcoalescing)) { |
1833 | gfar_write(baddr + i, 0); | |
1834 | gfar_write(baddr + i, priv->tx_queue[i]->txic); | |
1835 | } | |
1836 | } | |
1837 | ||
1838 | baddr = ®s->rxic0; | |
984b3f57 | 1839 | for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { |
46ceb60c SG |
1840 | if (likely(priv->rx_queue[i]->rxcoalescing)) { |
1841 | gfar_write(baddr + i, 0); | |
1842 | gfar_write(baddr + i, priv->rx_queue[i]->rxic); | |
1843 | } | |
1844 | } | |
1845 | } | |
1846 | } | |
1847 | ||
1848 | static int register_grp_irqs(struct gfar_priv_grp *grp) | |
1849 | { | |
1850 | struct gfar_private *priv = grp->priv; | |
1851 | struct net_device *dev = priv->ndev; | |
1852 | int err; | |
1da177e4 | 1853 | |
1da177e4 LT |
1854 | /* If the device has multiple interrupts, register for |
1855 | * them. Otherwise, only register for the one */ | |
b31a1d8b | 1856 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
0bbaf069 | 1857 | /* Install our interrupt handlers for Error, |
1da177e4 | 1858 | * Transmit, and Receive */ |
46ceb60c SG |
1859 | if ((err = request_irq(grp->interruptError, gfar_error, 0, |
1860 | grp->int_name_er,grp)) < 0) { | |
59deab26 JP |
1861 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
1862 | grp->interruptError); | |
46ceb60c | 1863 | |
2145f1af | 1864 | goto err_irq_fail; |
1da177e4 LT |
1865 | } |
1866 | ||
46ceb60c SG |
1867 | if ((err = request_irq(grp->interruptTransmit, gfar_transmit, |
1868 | 0, grp->int_name_tx, grp)) < 0) { | |
59deab26 JP |
1869 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
1870 | grp->interruptTransmit); | |
1da177e4 LT |
1871 | goto tx_irq_fail; |
1872 | } | |
1873 | ||
46ceb60c SG |
1874 | if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, |
1875 | grp->int_name_rx, grp)) < 0) { | |
59deab26 JP |
1876 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
1877 | grp->interruptReceive); | |
1da177e4 LT |
1878 | goto rx_irq_fail; |
1879 | } | |
1880 | } else { | |
46ceb60c SG |
1881 | if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, |
1882 | grp->int_name_tx, grp)) < 0) { | |
59deab26 JP |
1883 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
1884 | grp->interruptTransmit); | |
1da177e4 LT |
1885 | goto err_irq_fail; |
1886 | } | |
1887 | } | |
1888 | ||
46ceb60c SG |
1889 | return 0; |
1890 | ||
1891 | rx_irq_fail: | |
1892 | free_irq(grp->interruptTransmit, grp); | |
1893 | tx_irq_fail: | |
1894 | free_irq(grp->interruptError, grp); | |
1895 | err_irq_fail: | |
1896 | return err; | |
1897 | ||
1898 | } | |
1899 | ||
1900 | /* Bring the controller up and running */ | |
1901 | int startup_gfar(struct net_device *ndev) | |
1902 | { | |
1903 | struct gfar_private *priv = netdev_priv(ndev); | |
1904 | struct gfar __iomem *regs = NULL; | |
1905 | int err, i, j; | |
1906 | ||
1907 | for (i = 0; i < priv->num_grps; i++) { | |
1908 | regs= priv->gfargrp[i].regs; | |
1909 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
1910 | } | |
1911 | ||
1912 | regs= priv->gfargrp[0].regs; | |
1913 | err = gfar_alloc_skb_resources(ndev); | |
1914 | if (err) | |
1915 | return err; | |
1916 | ||
1917 | gfar_init_mac(ndev); | |
1918 | ||
1919 | for (i = 0; i < priv->num_grps; i++) { | |
1920 | err = register_grp_irqs(&priv->gfargrp[i]); | |
1921 | if (err) { | |
1922 | for (j = 0; j < i; j++) | |
1923 | free_grp_irqs(&priv->gfargrp[j]); | |
ff76015f | 1924 | goto irq_fail; |
46ceb60c SG |
1925 | } |
1926 | } | |
1927 | ||
7f7f5316 | 1928 | /* Start the controller */ |
ccc05c6e | 1929 | gfar_start(ndev); |
1da177e4 | 1930 | |
826aa4a0 AV |
1931 | phy_start(priv->phydev); |
1932 | ||
46ceb60c SG |
1933 | gfar_configure_coalescing(priv, 0xFF, 0xFF); |
1934 | ||
1da177e4 LT |
1935 | return 0; |
1936 | ||
46ceb60c | 1937 | irq_fail: |
e69edd21 | 1938 | free_skb_resources(priv); |
1da177e4 LT |
1939 | return err; |
1940 | } | |
1941 | ||
1942 | /* Called when something needs to use the ethernet device */ | |
1943 | /* Returns 0 for success. */ | |
1944 | static int gfar_enet_open(struct net_device *dev) | |
1945 | { | |
94e8cc35 | 1946 | struct gfar_private *priv = netdev_priv(dev); |
1da177e4 LT |
1947 | int err; |
1948 | ||
46ceb60c | 1949 | enable_napi(priv); |
bea3348e | 1950 | |
0fd56bb5 AF |
1951 | skb_queue_head_init(&priv->rx_recycle); |
1952 | ||
1da177e4 LT |
1953 | /* Initialize a bunch of registers */ |
1954 | init_registers(dev); | |
1955 | ||
1956 | gfar_set_mac_address(dev); | |
1957 | ||
1958 | err = init_phy(dev); | |
1959 | ||
a12f801d | 1960 | if (err) { |
46ceb60c | 1961 | disable_napi(priv); |
1da177e4 | 1962 | return err; |
bea3348e | 1963 | } |
1da177e4 LT |
1964 | |
1965 | err = startup_gfar(dev); | |
db0e8e3f | 1966 | if (err) { |
46ceb60c | 1967 | disable_napi(priv); |
db0e8e3f AV |
1968 | return err; |
1969 | } | |
1da177e4 | 1970 | |
fba4ed03 | 1971 | netif_tx_start_all_queues(dev); |
1da177e4 | 1972 | |
2884e5cc AV |
1973 | device_set_wakeup_enable(&dev->dev, priv->wol_en); |
1974 | ||
1da177e4 LT |
1975 | return err; |
1976 | } | |
1977 | ||
54dc79fe | 1978 | static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) |
0bbaf069 | 1979 | { |
54dc79fe | 1980 | struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); |
6c31d55f KG |
1981 | |
1982 | memset(fcb, 0, GMAC_FCB_LEN); | |
0bbaf069 | 1983 | |
0bbaf069 KG |
1984 | return fcb; |
1985 | } | |
1986 | ||
1987 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) | |
1988 | { | |
7f7f5316 | 1989 | u8 flags = 0; |
0bbaf069 KG |
1990 | |
1991 | /* If we're here, it's a IP packet with a TCP or UDP | |
1992 | * payload. We set it to checksum, using a pseudo-header | |
1993 | * we provide | |
1994 | */ | |
7f7f5316 | 1995 | flags = TXFCB_DEFAULT; |
0bbaf069 | 1996 | |
7f7f5316 AF |
1997 | /* Tell the controller what the protocol is */ |
1998 | /* And provide the already calculated phcs */ | |
eddc9ec5 | 1999 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
7f7f5316 | 2000 | flags |= TXFCB_UDP; |
4bedb452 | 2001 | fcb->phcs = udp_hdr(skb)->check; |
7f7f5316 | 2002 | } else |
8da32de5 | 2003 | fcb->phcs = tcp_hdr(skb)->check; |
0bbaf069 KG |
2004 | |
2005 | /* l3os is the distance between the start of the | |
2006 | * frame (skb->data) and the start of the IP hdr. | |
2007 | * l4os is the distance between the start of the | |
2008 | * l3 hdr and the l4 hdr */ | |
bbe735e4 | 2009 | fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); |
cfe1fc77 | 2010 | fcb->l4os = skb_network_header_len(skb); |
0bbaf069 | 2011 | |
7f7f5316 | 2012 | fcb->flags = flags; |
0bbaf069 KG |
2013 | } |
2014 | ||
7f7f5316 | 2015 | void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) |
0bbaf069 | 2016 | { |
7f7f5316 | 2017 | fcb->flags |= TXFCB_VLN; |
0bbaf069 KG |
2018 | fcb->vlctl = vlan_tx_tag_get(skb); |
2019 | } | |
2020 | ||
4669bc90 DH |
2021 | static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, |
2022 | struct txbd8 *base, int ring_size) | |
2023 | { | |
2024 | struct txbd8 *new_bd = bdp + stride; | |
2025 | ||
2026 | return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; | |
2027 | } | |
2028 | ||
2029 | static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, | |
2030 | int ring_size) | |
2031 | { | |
2032 | return skip_txbd(bdp, 1, base, ring_size); | |
2033 | } | |
2034 | ||
1da177e4 LT |
2035 | /* This is called by the kernel when a frame is ready for transmission. */ |
2036 | /* It is pointed to by the dev->hard_start_xmit function pointer */ | |
2037 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
2038 | { | |
2039 | struct gfar_private *priv = netdev_priv(dev); | |
a12f801d | 2040 | struct gfar_priv_tx_q *tx_queue = NULL; |
fba4ed03 | 2041 | struct netdev_queue *txq; |
f4983704 | 2042 | struct gfar __iomem *regs = NULL; |
0bbaf069 | 2043 | struct txfcb *fcb = NULL; |
f0ee7acf | 2044 | struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; |
5a5efed4 | 2045 | u32 lstatus; |
f0ee7acf | 2046 | int i, rq = 0, do_tstamp = 0; |
4669bc90 | 2047 | u32 bufaddr; |
fef6108d | 2048 | unsigned long flags; |
f0ee7acf | 2049 | unsigned int nr_frags, nr_txbds, length; |
fba4ed03 | 2050 | |
deb90eac AV |
2051 | /* |
2052 | * TOE=1 frames larger than 2500 bytes may see excess delays | |
2053 | * before start of transmission. | |
2054 | */ | |
2055 | if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && | |
2056 | skb->ip_summed == CHECKSUM_PARTIAL && | |
2057 | skb->len > 2500)) { | |
2058 | int ret; | |
2059 | ||
2060 | ret = skb_checksum_help(skb); | |
2061 | if (ret) | |
2062 | return ret; | |
2063 | } | |
2064 | ||
fba4ed03 SG |
2065 | rq = skb->queue_mapping; |
2066 | tx_queue = priv->tx_queue[rq]; | |
2067 | txq = netdev_get_tx_queue(dev, rq); | |
a12f801d | 2068 | base = tx_queue->tx_bd_base; |
46ceb60c | 2069 | regs = tx_queue->grp->regs; |
f0ee7acf MR |
2070 | |
2071 | /* check if time stamp should be generated */ | |
2244d07b OH |
2072 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && |
2073 | priv->hwts_tx_en)) | |
f0ee7acf | 2074 | do_tstamp = 1; |
4669bc90 | 2075 | |
5b28beaf LY |
2076 | /* make space for additional header when fcb is needed */ |
2077 | if (((skb->ip_summed == CHECKSUM_PARTIAL) || | |
eab6d18d | 2078 | vlan_tx_tag_present(skb) || |
f0ee7acf | 2079 | unlikely(do_tstamp)) && |
5b28beaf | 2080 | (skb_headroom(skb) < GMAC_FCB_LEN)) { |
54dc79fe SH |
2081 | struct sk_buff *skb_new; |
2082 | ||
2083 | skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); | |
2084 | if (!skb_new) { | |
2085 | dev->stats.tx_errors++; | |
bd14ba84 | 2086 | kfree_skb(skb); |
54dc79fe SH |
2087 | return NETDEV_TX_OK; |
2088 | } | |
2089 | kfree_skb(skb); | |
2090 | skb = skb_new; | |
2091 | } | |
2092 | ||
4669bc90 DH |
2093 | /* total number of fragments in the SKB */ |
2094 | nr_frags = skb_shinfo(skb)->nr_frags; | |
2095 | ||
f0ee7acf MR |
2096 | /* calculate the required number of TxBDs for this skb */ |
2097 | if (unlikely(do_tstamp)) | |
2098 | nr_txbds = nr_frags + 2; | |
2099 | else | |
2100 | nr_txbds = nr_frags + 1; | |
2101 | ||
4669bc90 | 2102 | /* check if there is space to queue this packet */ |
f0ee7acf | 2103 | if (nr_txbds > tx_queue->num_txbdfree) { |
4669bc90 | 2104 | /* no space, stop the queue */ |
fba4ed03 | 2105 | netif_tx_stop_queue(txq); |
4669bc90 | 2106 | dev->stats.tx_fifo_errors++; |
4669bc90 DH |
2107 | return NETDEV_TX_BUSY; |
2108 | } | |
1da177e4 LT |
2109 | |
2110 | /* Update transmit stats */ | |
1ac9ad13 ED |
2111 | tx_queue->stats.tx_bytes += skb->len; |
2112 | tx_queue->stats.tx_packets++; | |
1da177e4 | 2113 | |
a12f801d | 2114 | txbdp = txbdp_start = tx_queue->cur_tx; |
f0ee7acf MR |
2115 | lstatus = txbdp->lstatus; |
2116 | ||
2117 | /* Time stamp insertion requires one additional TxBD */ | |
2118 | if (unlikely(do_tstamp)) | |
2119 | txbdp_tstamp = txbdp = next_txbd(txbdp, base, | |
2120 | tx_queue->tx_ring_size); | |
1da177e4 | 2121 | |
4669bc90 | 2122 | if (nr_frags == 0) { |
f0ee7acf MR |
2123 | if (unlikely(do_tstamp)) |
2124 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | | |
2125 | TXBD_INTERRUPT); | |
2126 | else | |
2127 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
4669bc90 DH |
2128 | } else { |
2129 | /* Place the fragment addresses and lengths into the TxBDs */ | |
2130 | for (i = 0; i < nr_frags; i++) { | |
2131 | /* Point at the next BD, wrapping as needed */ | |
a12f801d | 2132 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 DH |
2133 | |
2134 | length = skb_shinfo(skb)->frags[i].size; | |
2135 | ||
2136 | lstatus = txbdp->lstatus | length | | |
2137 | BD_LFLAG(TXBD_READY); | |
2138 | ||
2139 | /* Handle the last BD specially */ | |
2140 | if (i == nr_frags - 1) | |
2141 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
1da177e4 | 2142 | |
4826857f | 2143 | bufaddr = dma_map_page(&priv->ofdev->dev, |
4669bc90 DH |
2144 | skb_shinfo(skb)->frags[i].page, |
2145 | skb_shinfo(skb)->frags[i].page_offset, | |
2146 | length, | |
2147 | DMA_TO_DEVICE); | |
2148 | ||
2149 | /* set the TxBD length and buffer pointer */ | |
2150 | txbdp->bufPtr = bufaddr; | |
2151 | txbdp->lstatus = lstatus; | |
2152 | } | |
2153 | ||
2154 | lstatus = txbdp_start->lstatus; | |
2155 | } | |
1da177e4 | 2156 | |
0bbaf069 | 2157 | /* Set up checksumming */ |
12dea57b | 2158 | if (CHECKSUM_PARTIAL == skb->ip_summed) { |
54dc79fe | 2159 | fcb = gfar_add_fcb(skb); |
4363c2fd AD |
2160 | /* as specified by errata */ |
2161 | if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) | |
2162 | && ((unsigned long)fcb % 0x20) > 0x18)) { | |
2163 | __skb_pull(skb, GMAC_FCB_LEN); | |
2164 | skb_checksum_help(skb); | |
2165 | } else { | |
2166 | lstatus |= BD_LFLAG(TXBD_TOE); | |
2167 | gfar_tx_checksum(skb, fcb); | |
2168 | } | |
0bbaf069 KG |
2169 | } |
2170 | ||
eab6d18d | 2171 | if (vlan_tx_tag_present(skb)) { |
54dc79fe SH |
2172 | if (unlikely(NULL == fcb)) { |
2173 | fcb = gfar_add_fcb(skb); | |
5a5efed4 | 2174 | lstatus |= BD_LFLAG(TXBD_TOE); |
7f7f5316 | 2175 | } |
54dc79fe SH |
2176 | |
2177 | gfar_tx_vlan(skb, fcb); | |
0bbaf069 KG |
2178 | } |
2179 | ||
f0ee7acf MR |
2180 | /* Setup tx hardware time stamping if requested */ |
2181 | if (unlikely(do_tstamp)) { | |
2244d07b | 2182 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
f0ee7acf MR |
2183 | if (fcb == NULL) |
2184 | fcb = gfar_add_fcb(skb); | |
2185 | fcb->ptp = 1; | |
2186 | lstatus |= BD_LFLAG(TXBD_TOE); | |
2187 | } | |
2188 | ||
4826857f | 2189 | txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, |
4669bc90 | 2190 | skb_headlen(skb), DMA_TO_DEVICE); |
1da177e4 | 2191 | |
f0ee7acf MR |
2192 | /* |
2193 | * If time stamping is requested one additional TxBD must be set up. The | |
2194 | * first TxBD points to the FCB and must have a data length of | |
2195 | * GMAC_FCB_LEN. The second TxBD points to the actual frame data with | |
2196 | * the full frame length. | |
2197 | */ | |
2198 | if (unlikely(do_tstamp)) { | |
2199 | txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN; | |
2200 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | | |
2201 | (skb_headlen(skb) - GMAC_FCB_LEN); | |
2202 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; | |
2203 | } else { | |
2204 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | |
2205 | } | |
1da177e4 | 2206 | |
a3bc1f11 AV |
2207 | /* |
2208 | * We can work in parallel with gfar_clean_tx_ring(), except | |
2209 | * when modifying num_txbdfree. Note that we didn't grab the lock | |
2210 | * when we were reading the num_txbdfree and checking for available | |
2211 | * space, that's because outside of this function it can only grow, | |
2212 | * and once we've got needed space, it cannot suddenly disappear. | |
2213 | * | |
2214 | * The lock also protects us from gfar_error(), which can modify | |
2215 | * regs->tstat and thus retrigger the transfers, which is why we | |
2216 | * also must grab the lock before setting ready bit for the first | |
2217 | * to be transmitted BD. | |
2218 | */ | |
2219 | spin_lock_irqsave(&tx_queue->txlock, flags); | |
2220 | ||
4669bc90 DH |
2221 | /* |
2222 | * The powerpc-specific eieio() is used, as wmb() has too strong | |
3b6330ce SW |
2223 | * semantics (it requires synchronization between cacheable and |
2224 | * uncacheable mappings, which eieio doesn't provide and which we | |
2225 | * don't need), thus requiring a more expensive sync instruction. At | |
2226 | * some point, the set of architecture-independent barrier functions | |
2227 | * should be expanded to include weaker barriers. | |
2228 | */ | |
3b6330ce | 2229 | eieio(); |
7f7f5316 | 2230 | |
4669bc90 DH |
2231 | txbdp_start->lstatus = lstatus; |
2232 | ||
0eddba52 AV |
2233 | eieio(); /* force lstatus write before tx_skbuff */ |
2234 | ||
2235 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; | |
2236 | ||
4669bc90 DH |
2237 | /* Update the current skb pointer to the next entry we will use |
2238 | * (wrapping if necessary) */ | |
a12f801d SG |
2239 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
2240 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); | |
4669bc90 | 2241 | |
a12f801d | 2242 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 DH |
2243 | |
2244 | /* reduce TxBD free count */ | |
f0ee7acf | 2245 | tx_queue->num_txbdfree -= (nr_txbds); |
1da177e4 LT |
2246 | |
2247 | /* If the next BD still needs to be cleaned up, then the bds | |
2248 | are full. We need to tell the kernel to stop sending us stuff. */ | |
a12f801d | 2249 | if (!tx_queue->num_txbdfree) { |
fba4ed03 | 2250 | netif_tx_stop_queue(txq); |
1da177e4 | 2251 | |
09f75cd7 | 2252 | dev->stats.tx_fifo_errors++; |
1da177e4 LT |
2253 | } |
2254 | ||
1da177e4 | 2255 | /* Tell the DMA to go go go */ |
fba4ed03 | 2256 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
1da177e4 LT |
2257 | |
2258 | /* Unlock priv */ | |
a12f801d | 2259 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
1da177e4 | 2260 | |
54dc79fe | 2261 | return NETDEV_TX_OK; |
1da177e4 LT |
2262 | } |
2263 | ||
2264 | /* Stops the kernel queue, and halts the controller */ | |
2265 | static int gfar_close(struct net_device *dev) | |
2266 | { | |
2267 | struct gfar_private *priv = netdev_priv(dev); | |
bea3348e | 2268 | |
46ceb60c | 2269 | disable_napi(priv); |
bea3348e | 2270 | |
ab939905 | 2271 | cancel_work_sync(&priv->reset_task); |
1da177e4 LT |
2272 | stop_gfar(dev); |
2273 | ||
bb40dcbb AF |
2274 | /* Disconnect from the PHY */ |
2275 | phy_disconnect(priv->phydev); | |
2276 | priv->phydev = NULL; | |
1da177e4 | 2277 | |
fba4ed03 | 2278 | netif_tx_stop_all_queues(dev); |
1da177e4 LT |
2279 | |
2280 | return 0; | |
2281 | } | |
2282 | ||
1da177e4 | 2283 | /* Changes the mac address if the controller is not running. */ |
f162b9d5 | 2284 | static int gfar_set_mac_address(struct net_device *dev) |
1da177e4 | 2285 | { |
7f7f5316 | 2286 | gfar_set_mac_for_addr(dev, 0, dev->dev_addr); |
1da177e4 LT |
2287 | |
2288 | return 0; | |
2289 | } | |
2290 | ||
f3dc1586 SP |
2291 | /* Check if rx parser should be activated */ |
2292 | void gfar_check_rx_parser_mode(struct gfar_private *priv) | |
2293 | { | |
2294 | struct gfar __iomem *regs; | |
2295 | u32 tempval; | |
2296 | ||
2297 | regs = priv->gfargrp[0].regs; | |
2298 | ||
2299 | tempval = gfar_read(®s->rctrl); | |
2300 | /* If parse is no longer required, then disable parser */ | |
2301 | if (tempval & RCTRL_REQ_PARSER) | |
2302 | tempval |= RCTRL_PRSDEP_INIT; | |
2303 | else | |
2304 | tempval &= ~RCTRL_PRSDEP_INIT; | |
2305 | gfar_write(®s->rctrl, tempval); | |
2306 | } | |
2307 | ||
0bbaf069 | 2308 | /* Enables and disables VLAN insertion/extraction */ |
87c288c6 | 2309 | void gfar_vlan_mode(struct net_device *dev, u32 features) |
0bbaf069 KG |
2310 | { |
2311 | struct gfar_private *priv = netdev_priv(dev); | |
f4983704 | 2312 | struct gfar __iomem *regs = NULL; |
0bbaf069 KG |
2313 | unsigned long flags; |
2314 | u32 tempval; | |
2315 | ||
46ceb60c | 2316 | regs = priv->gfargrp[0].regs; |
fba4ed03 SG |
2317 | local_irq_save(flags); |
2318 | lock_rx_qs(priv); | |
0bbaf069 | 2319 | |
87c288c6 | 2320 | if (features & NETIF_F_HW_VLAN_TX) { |
0bbaf069 | 2321 | /* Enable VLAN tag insertion */ |
f4983704 | 2322 | tempval = gfar_read(®s->tctrl); |
0bbaf069 | 2323 | tempval |= TCTRL_VLINS; |
f4983704 | 2324 | gfar_write(®s->tctrl, tempval); |
0bbaf069 KG |
2325 | } else { |
2326 | /* Disable VLAN tag insertion */ | |
f4983704 | 2327 | tempval = gfar_read(®s->tctrl); |
0bbaf069 | 2328 | tempval &= ~TCTRL_VLINS; |
f4983704 | 2329 | gfar_write(®s->tctrl, tempval); |
87c288c6 | 2330 | } |
0bbaf069 | 2331 | |
87c288c6 JP |
2332 | if (features & NETIF_F_HW_VLAN_RX) { |
2333 | /* Enable VLAN tag extraction */ | |
2334 | tempval = gfar_read(®s->rctrl); | |
2335 | tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); | |
2336 | gfar_write(®s->rctrl, tempval); | |
2337 | } else { | |
0bbaf069 | 2338 | /* Disable VLAN tag extraction */ |
f4983704 | 2339 | tempval = gfar_read(®s->rctrl); |
0bbaf069 | 2340 | tempval &= ~RCTRL_VLEX; |
f4983704 | 2341 | gfar_write(®s->rctrl, tempval); |
f3dc1586 SP |
2342 | |
2343 | gfar_check_rx_parser_mode(priv); | |
0bbaf069 KG |
2344 | } |
2345 | ||
77ecaf2d DH |
2346 | gfar_change_mtu(dev, dev->mtu); |
2347 | ||
fba4ed03 SG |
2348 | unlock_rx_qs(priv); |
2349 | local_irq_restore(flags); | |
0bbaf069 KG |
2350 | } |
2351 | ||
1da177e4 LT |
2352 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
2353 | { | |
2354 | int tempsize, tempval; | |
2355 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2356 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 | 2357 | int oldsize = priv->rx_buffer_size; |
0bbaf069 KG |
2358 | int frame_size = new_mtu + ETH_HLEN; |
2359 | ||
87c288c6 | 2360 | if (gfar_is_vlan_on(priv)) |
faa89577 | 2361 | frame_size += VLAN_HLEN; |
0bbaf069 | 2362 | |
1da177e4 | 2363 | if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { |
59deab26 | 2364 | netif_err(priv, drv, dev, "Invalid MTU setting\n"); |
1da177e4 LT |
2365 | return -EINVAL; |
2366 | } | |
2367 | ||
77ecaf2d DH |
2368 | if (gfar_uses_fcb(priv)) |
2369 | frame_size += GMAC_FCB_LEN; | |
2370 | ||
2371 | frame_size += priv->padding; | |
2372 | ||
1da177e4 LT |
2373 | tempsize = |
2374 | (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + | |
2375 | INCREMENTAL_BUFFER_SIZE; | |
2376 | ||
2377 | /* Only stop and start the controller if it isn't already | |
7f7f5316 | 2378 | * stopped, and we changed something */ |
1da177e4 LT |
2379 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) |
2380 | stop_gfar(dev); | |
2381 | ||
2382 | priv->rx_buffer_size = tempsize; | |
2383 | ||
2384 | dev->mtu = new_mtu; | |
2385 | ||
f4983704 SG |
2386 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
2387 | gfar_write(®s->maxfrm, priv->rx_buffer_size); | |
1da177e4 LT |
2388 | |
2389 | /* If the mtu is larger than the max size for standard | |
2390 | * ethernet frames (ie, a jumbo frame), then set maccfg2 | |
2391 | * to allow huge frames, and to check the length */ | |
f4983704 | 2392 | tempval = gfar_read(®s->maccfg2); |
1da177e4 | 2393 | |
7d350977 AV |
2394 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || |
2395 | gfar_has_errata(priv, GFAR_ERRATA_74)) | |
1da177e4 LT |
2396 | tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); |
2397 | else | |
2398 | tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | |
2399 | ||
f4983704 | 2400 | gfar_write(®s->maccfg2, tempval); |
1da177e4 LT |
2401 | |
2402 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) | |
2403 | startup_gfar(dev); | |
2404 | ||
2405 | return 0; | |
2406 | } | |
2407 | ||
ab939905 | 2408 | /* gfar_reset_task gets scheduled when a packet has not been |
1da177e4 LT |
2409 | * transmitted after a set amount of time. |
2410 | * For now, assume that clearing out all the structures, and | |
ab939905 SS |
2411 | * starting over will fix the problem. |
2412 | */ | |
2413 | static void gfar_reset_task(struct work_struct *work) | |
1da177e4 | 2414 | { |
ab939905 SS |
2415 | struct gfar_private *priv = container_of(work, struct gfar_private, |
2416 | reset_task); | |
4826857f | 2417 | struct net_device *dev = priv->ndev; |
1da177e4 LT |
2418 | |
2419 | if (dev->flags & IFF_UP) { | |
fba4ed03 | 2420 | netif_tx_stop_all_queues(dev); |
1da177e4 LT |
2421 | stop_gfar(dev); |
2422 | startup_gfar(dev); | |
fba4ed03 | 2423 | netif_tx_start_all_queues(dev); |
1da177e4 LT |
2424 | } |
2425 | ||
263ba320 | 2426 | netif_tx_schedule_all(dev); |
1da177e4 LT |
2427 | } |
2428 | ||
ab939905 SS |
2429 | static void gfar_timeout(struct net_device *dev) |
2430 | { | |
2431 | struct gfar_private *priv = netdev_priv(dev); | |
2432 | ||
2433 | dev->stats.tx_errors++; | |
2434 | schedule_work(&priv->reset_task); | |
2435 | } | |
2436 | ||
acbc0f03 EL |
2437 | static void gfar_align_skb(struct sk_buff *skb) |
2438 | { | |
2439 | /* We need the data buffer to be aligned properly. We will reserve | |
2440 | * as many bytes as needed to align the data properly | |
2441 | */ | |
2442 | skb_reserve(skb, RXBUF_ALIGNMENT - | |
2443 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); | |
2444 | } | |
2445 | ||
1da177e4 | 2446 | /* Interrupt Handler for Transmit complete */ |
a12f801d | 2447 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 2448 | { |
a12f801d | 2449 | struct net_device *dev = tx_queue->dev; |
d080cd63 | 2450 | struct gfar_private *priv = netdev_priv(dev); |
a12f801d | 2451 | struct gfar_priv_rx_q *rx_queue = NULL; |
f0ee7acf | 2452 | struct txbd8 *bdp, *next = NULL; |
4669bc90 | 2453 | struct txbd8 *lbdp = NULL; |
a12f801d | 2454 | struct txbd8 *base = tx_queue->tx_bd_base; |
4669bc90 DH |
2455 | struct sk_buff *skb; |
2456 | int skb_dirtytx; | |
a12f801d | 2457 | int tx_ring_size = tx_queue->tx_ring_size; |
f0ee7acf | 2458 | int frags = 0, nr_txbds = 0; |
4669bc90 | 2459 | int i; |
d080cd63 | 2460 | int howmany = 0; |
4669bc90 | 2461 | u32 lstatus; |
f0ee7acf | 2462 | size_t buflen; |
1da177e4 | 2463 | |
fba4ed03 | 2464 | rx_queue = priv->rx_queue[tx_queue->qindex]; |
a12f801d SG |
2465 | bdp = tx_queue->dirty_tx; |
2466 | skb_dirtytx = tx_queue->skb_dirtytx; | |
1da177e4 | 2467 | |
a12f801d | 2468 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { |
a3bc1f11 AV |
2469 | unsigned long flags; |
2470 | ||
4669bc90 | 2471 | frags = skb_shinfo(skb)->nr_frags; |
f0ee7acf MR |
2472 | |
2473 | /* | |
2474 | * When time stamping, one additional TxBD must be freed. | |
2475 | * Also, we need to dma_unmap_single() the TxPAL. | |
2476 | */ | |
2244d07b | 2477 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) |
f0ee7acf MR |
2478 | nr_txbds = frags + 2; |
2479 | else | |
2480 | nr_txbds = frags + 1; | |
2481 | ||
2482 | lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); | |
1da177e4 | 2483 | |
4669bc90 | 2484 | lstatus = lbdp->lstatus; |
1da177e4 | 2485 | |
4669bc90 DH |
2486 | /* Only clean completed frames */ |
2487 | if ((lstatus & BD_LFLAG(TXBD_READY)) && | |
2488 | (lstatus & BD_LENGTH_MASK)) | |
2489 | break; | |
2490 | ||
2244d07b | 2491 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
f0ee7acf MR |
2492 | next = next_txbd(bdp, base, tx_ring_size); |
2493 | buflen = next->length + GMAC_FCB_LEN; | |
2494 | } else | |
2495 | buflen = bdp->length; | |
2496 | ||
2497 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, | |
2498 | buflen, DMA_TO_DEVICE); | |
2499 | ||
2244d07b | 2500 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
f0ee7acf MR |
2501 | struct skb_shared_hwtstamps shhwtstamps; |
2502 | u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); | |
2503 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); | |
2504 | shhwtstamps.hwtstamp = ns_to_ktime(*ns); | |
2505 | skb_tstamp_tx(skb, &shhwtstamps); | |
2506 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | |
2507 | bdp = next; | |
2508 | } | |
81183059 | 2509 | |
4669bc90 DH |
2510 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
2511 | bdp = next_txbd(bdp, base, tx_ring_size); | |
d080cd63 | 2512 | |
4669bc90 | 2513 | for (i = 0; i < frags; i++) { |
4826857f | 2514 | dma_unmap_page(&priv->ofdev->dev, |
4669bc90 DH |
2515 | bdp->bufPtr, |
2516 | bdp->length, | |
2517 | DMA_TO_DEVICE); | |
2518 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | |
2519 | bdp = next_txbd(bdp, base, tx_ring_size); | |
2520 | } | |
1da177e4 | 2521 | |
0fd56bb5 AF |
2522 | /* |
2523 | * If there's room in the queue (limit it to rx_buffer_size) | |
2524 | * we add this skb back into the pool, if it's the right size | |
2525 | */ | |
a12f801d | 2526 | if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && |
0fd56bb5 | 2527 | skb_recycle_check(skb, priv->rx_buffer_size + |
acbc0f03 EL |
2528 | RXBUF_ALIGNMENT)) { |
2529 | gfar_align_skb(skb); | |
cd0ea241 | 2530 | skb_queue_head(&priv->rx_recycle, skb); |
acbc0f03 | 2531 | } else |
0fd56bb5 AF |
2532 | dev_kfree_skb_any(skb); |
2533 | ||
a12f801d | 2534 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
d080cd63 | 2535 | |
4669bc90 DH |
2536 | skb_dirtytx = (skb_dirtytx + 1) & |
2537 | TX_RING_MOD_MASK(tx_ring_size); | |
2538 | ||
2539 | howmany++; | |
a3bc1f11 | 2540 | spin_lock_irqsave(&tx_queue->txlock, flags); |
f0ee7acf | 2541 | tx_queue->num_txbdfree += nr_txbds; |
a3bc1f11 | 2542 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
4669bc90 | 2543 | } |
1da177e4 | 2544 | |
4669bc90 | 2545 | /* If we freed a buffer, we can restart transmission, if necessary */ |
fba4ed03 SG |
2546 | if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) |
2547 | netif_wake_subqueue(dev, tx_queue->qindex); | |
1da177e4 | 2548 | |
4669bc90 | 2549 | /* Update dirty indicators */ |
a12f801d SG |
2550 | tx_queue->skb_dirtytx = skb_dirtytx; |
2551 | tx_queue->dirty_tx = bdp; | |
1da177e4 | 2552 | |
d080cd63 DH |
2553 | return howmany; |
2554 | } | |
2555 | ||
f4983704 | 2556 | static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) |
d080cd63 | 2557 | { |
a6d0b91a AV |
2558 | unsigned long flags; |
2559 | ||
fba4ed03 SG |
2560 | spin_lock_irqsave(&gfargrp->grplock, flags); |
2561 | if (napi_schedule_prep(&gfargrp->napi)) { | |
f4983704 | 2562 | gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); |
fba4ed03 | 2563 | __napi_schedule(&gfargrp->napi); |
8707bdd4 JP |
2564 | } else { |
2565 | /* | |
2566 | * Clear IEVENT, so interrupts aren't called again | |
2567 | * because of the packets that have already arrived. | |
2568 | */ | |
f4983704 | 2569 | gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); |
2f448911 | 2570 | } |
fba4ed03 | 2571 | spin_unlock_irqrestore(&gfargrp->grplock, flags); |
a6d0b91a | 2572 | |
8c7396ae | 2573 | } |
1da177e4 | 2574 | |
8c7396ae | 2575 | /* Interrupt Handler for Transmit complete */ |
f4983704 | 2576 | static irqreturn_t gfar_transmit(int irq, void *grp_id) |
8c7396ae | 2577 | { |
f4983704 | 2578 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); |
1da177e4 LT |
2579 | return IRQ_HANDLED; |
2580 | } | |
2581 | ||
a12f801d | 2582 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
815b97c6 AF |
2583 | struct sk_buff *skb) |
2584 | { | |
a12f801d | 2585 | struct net_device *dev = rx_queue->dev; |
815b97c6 | 2586 | struct gfar_private *priv = netdev_priv(dev); |
8a102fe0 | 2587 | dma_addr_t buf; |
815b97c6 | 2588 | |
8a102fe0 AV |
2589 | buf = dma_map_single(&priv->ofdev->dev, skb->data, |
2590 | priv->rx_buffer_size, DMA_FROM_DEVICE); | |
a12f801d | 2591 | gfar_init_rxbdp(rx_queue, bdp, buf); |
815b97c6 AF |
2592 | } |
2593 | ||
acbc0f03 | 2594 | static struct sk_buff * gfar_alloc_skb(struct net_device *dev) |
1da177e4 LT |
2595 | { |
2596 | struct gfar_private *priv = netdev_priv(dev); | |
2597 | struct sk_buff *skb = NULL; | |
1da177e4 | 2598 | |
acbc0f03 | 2599 | skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); |
815b97c6 | 2600 | if (!skb) |
1da177e4 LT |
2601 | return NULL; |
2602 | ||
acbc0f03 | 2603 | gfar_align_skb(skb); |
7f7f5316 | 2604 | |
acbc0f03 EL |
2605 | return skb; |
2606 | } | |
2607 | ||
2608 | struct sk_buff * gfar_new_skb(struct net_device *dev) | |
2609 | { | |
2610 | struct gfar_private *priv = netdev_priv(dev); | |
2611 | struct sk_buff *skb = NULL; | |
2612 | ||
cd0ea241 | 2613 | skb = skb_dequeue(&priv->rx_recycle); |
acbc0f03 EL |
2614 | if (!skb) |
2615 | skb = gfar_alloc_skb(dev); | |
1da177e4 | 2616 | |
1da177e4 LT |
2617 | return skb; |
2618 | } | |
2619 | ||
298e1a9e | 2620 | static inline void count_errors(unsigned short status, struct net_device *dev) |
1da177e4 | 2621 | { |
298e1a9e | 2622 | struct gfar_private *priv = netdev_priv(dev); |
09f75cd7 | 2623 | struct net_device_stats *stats = &dev->stats; |
1da177e4 LT |
2624 | struct gfar_extra_stats *estats = &priv->extra_stats; |
2625 | ||
2626 | /* If the packet was truncated, none of the other errors | |
2627 | * matter */ | |
2628 | if (status & RXBD_TRUNCATED) { | |
2629 | stats->rx_length_errors++; | |
2630 | ||
2631 | estats->rx_trunc++; | |
2632 | ||
2633 | return; | |
2634 | } | |
2635 | /* Count the errors, if there were any */ | |
2636 | if (status & (RXBD_LARGE | RXBD_SHORT)) { | |
2637 | stats->rx_length_errors++; | |
2638 | ||
2639 | if (status & RXBD_LARGE) | |
2640 | estats->rx_large++; | |
2641 | else | |
2642 | estats->rx_short++; | |
2643 | } | |
2644 | if (status & RXBD_NONOCTET) { | |
2645 | stats->rx_frame_errors++; | |
2646 | estats->rx_nonoctet++; | |
2647 | } | |
2648 | if (status & RXBD_CRCERR) { | |
2649 | estats->rx_crcerr++; | |
2650 | stats->rx_crc_errors++; | |
2651 | } | |
2652 | if (status & RXBD_OVERRUN) { | |
2653 | estats->rx_overrun++; | |
2654 | stats->rx_crc_errors++; | |
2655 | } | |
2656 | } | |
2657 | ||
f4983704 | 2658 | irqreturn_t gfar_receive(int irq, void *grp_id) |
1da177e4 | 2659 | { |
f4983704 | 2660 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); |
1da177e4 LT |
2661 | return IRQ_HANDLED; |
2662 | } | |
2663 | ||
0bbaf069 KG |
2664 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) |
2665 | { | |
2666 | /* If valid headers were found, and valid sums | |
2667 | * were verified, then we tell the kernel that no | |
2668 | * checksumming is necessary. Otherwise, it is */ | |
7f7f5316 | 2669 | if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) |
0bbaf069 KG |
2670 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2671 | else | |
bc8acf2c | 2672 | skb_checksum_none_assert(skb); |
0bbaf069 KG |
2673 | } |
2674 | ||
2675 | ||
1da177e4 LT |
2676 | /* gfar_process_frame() -- handle one incoming packet if skb |
2677 | * isn't NULL. */ | |
2678 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |
2c2db48a | 2679 | int amount_pull) |
1da177e4 LT |
2680 | { |
2681 | struct gfar_private *priv = netdev_priv(dev); | |
0bbaf069 | 2682 | struct rxfcb *fcb = NULL; |
1da177e4 | 2683 | |
2c2db48a | 2684 | int ret; |
1da177e4 | 2685 | |
2c2db48a DH |
2686 | /* fcb is at the beginning if exists */ |
2687 | fcb = (struct rxfcb *)skb->data; | |
0bbaf069 | 2688 | |
2c2db48a DH |
2689 | /* Remove the FCB from the skb */ |
2690 | /* Remove the padded bytes, if there are any */ | |
f74dac08 SG |
2691 | if (amount_pull) { |
2692 | skb_record_rx_queue(skb, fcb->rq); | |
2c2db48a | 2693 | skb_pull(skb, amount_pull); |
f74dac08 | 2694 | } |
0bbaf069 | 2695 | |
cc772ab7 MR |
2696 | /* Get receive timestamp from the skb */ |
2697 | if (priv->hwts_rx_en) { | |
2698 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | |
2699 | u64 *ns = (u64 *) skb->data; | |
2700 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); | |
2701 | shhwtstamps->hwtstamp = ns_to_ktime(*ns); | |
2702 | } | |
2703 | ||
2704 | if (priv->padding) | |
2705 | skb_pull(skb, priv->padding); | |
2706 | ||
8b3afe95 | 2707 | if (dev->features & NETIF_F_RXCSUM) |
2c2db48a | 2708 | gfar_rx_checksum(skb, fcb); |
0bbaf069 | 2709 | |
2c2db48a DH |
2710 | /* Tell the skb what kind of packet this is */ |
2711 | skb->protocol = eth_type_trans(skb, dev); | |
1da177e4 | 2712 | |
87c288c6 JP |
2713 | /* Set vlan tag */ |
2714 | if (fcb->flags & RXFCB_VLN) | |
2715 | __vlan_hwaccel_put_tag(skb, fcb->vlctl); | |
2716 | ||
2c2db48a | 2717 | /* Send the packet up the stack */ |
87c288c6 | 2718 | ret = netif_receive_skb(skb); |
0bbaf069 | 2719 | |
2c2db48a DH |
2720 | if (NET_RX_DROP == ret) |
2721 | priv->extra_stats.kernel_dropped++; | |
1da177e4 LT |
2722 | |
2723 | return 0; | |
2724 | } | |
2725 | ||
2726 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring | |
0bbaf069 | 2727 | * until the budget/quota has been reached. Returns the number |
1da177e4 LT |
2728 | * of frames handled |
2729 | */ | |
a12f801d | 2730 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) |
1da177e4 | 2731 | { |
a12f801d | 2732 | struct net_device *dev = rx_queue->dev; |
31de198b | 2733 | struct rxbd8 *bdp, *base; |
1da177e4 | 2734 | struct sk_buff *skb; |
2c2db48a DH |
2735 | int pkt_len; |
2736 | int amount_pull; | |
1da177e4 LT |
2737 | int howmany = 0; |
2738 | struct gfar_private *priv = netdev_priv(dev); | |
2739 | ||
2740 | /* Get the first full descriptor */ | |
a12f801d SG |
2741 | bdp = rx_queue->cur_rx; |
2742 | base = rx_queue->rx_bd_base; | |
1da177e4 | 2743 | |
cc772ab7 | 2744 | amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0); |
2c2db48a | 2745 | |
1da177e4 | 2746 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { |
815b97c6 | 2747 | struct sk_buff *newskb; |
3b6330ce | 2748 | rmb(); |
815b97c6 AF |
2749 | |
2750 | /* Add another skb for the future */ | |
2751 | newskb = gfar_new_skb(dev); | |
2752 | ||
a12f801d | 2753 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; |
1da177e4 | 2754 | |
4826857f | 2755 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, |
81183059 AF |
2756 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
2757 | ||
63b88b90 AV |
2758 | if (unlikely(!(bdp->status & RXBD_ERR) && |
2759 | bdp->length > priv->rx_buffer_size)) | |
2760 | bdp->status = RXBD_LARGE; | |
2761 | ||
815b97c6 AF |
2762 | /* We drop the frame if we failed to allocate a new buffer */ |
2763 | if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || | |
2764 | bdp->status & RXBD_ERR)) { | |
2765 | count_errors(bdp->status, dev); | |
2766 | ||
2767 | if (unlikely(!newskb)) | |
2768 | newskb = skb; | |
acbc0f03 | 2769 | else if (skb) |
cd0ea241 | 2770 | skb_queue_head(&priv->rx_recycle, skb); |
815b97c6 | 2771 | } else { |
1da177e4 | 2772 | /* Increment the number of packets */ |
a7f38041 | 2773 | rx_queue->stats.rx_packets++; |
1da177e4 LT |
2774 | howmany++; |
2775 | ||
2c2db48a DH |
2776 | if (likely(skb)) { |
2777 | pkt_len = bdp->length - ETH_FCS_LEN; | |
2778 | /* Remove the FCS from the packet length */ | |
2779 | skb_put(skb, pkt_len); | |
a7f38041 | 2780 | rx_queue->stats.rx_bytes += pkt_len; |
f74dac08 | 2781 | skb_record_rx_queue(skb, rx_queue->qindex); |
2c2db48a DH |
2782 | gfar_process_frame(dev, skb, amount_pull); |
2783 | ||
2784 | } else { | |
59deab26 | 2785 | netif_warn(priv, rx_err, dev, "Missing skb!\n"); |
a7f38041 | 2786 | rx_queue->stats.rx_dropped++; |
2c2db48a DH |
2787 | priv->extra_stats.rx_skbmissing++; |
2788 | } | |
1da177e4 | 2789 | |
1da177e4 LT |
2790 | } |
2791 | ||
a12f801d | 2792 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; |
1da177e4 | 2793 | |
815b97c6 | 2794 | /* Setup the new bdp */ |
a12f801d | 2795 | gfar_new_rxbdp(rx_queue, bdp, newskb); |
1da177e4 LT |
2796 | |
2797 | /* Update to the next pointer */ | |
a12f801d | 2798 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); |
1da177e4 LT |
2799 | |
2800 | /* update to point at the next skb */ | |
a12f801d SG |
2801 | rx_queue->skb_currx = |
2802 | (rx_queue->skb_currx + 1) & | |
2803 | RX_RING_MOD_MASK(rx_queue->rx_ring_size); | |
1da177e4 LT |
2804 | } |
2805 | ||
2806 | /* Update the current rxbd pointer to be the next one */ | |
a12f801d | 2807 | rx_queue->cur_rx = bdp; |
1da177e4 | 2808 | |
1da177e4 LT |
2809 | return howmany; |
2810 | } | |
2811 | ||
bea3348e | 2812 | static int gfar_poll(struct napi_struct *napi, int budget) |
1da177e4 | 2813 | { |
fba4ed03 SG |
2814 | struct gfar_priv_grp *gfargrp = container_of(napi, |
2815 | struct gfar_priv_grp, napi); | |
2816 | struct gfar_private *priv = gfargrp->priv; | |
46ceb60c | 2817 | struct gfar __iomem *regs = gfargrp->regs; |
a12f801d | 2818 | struct gfar_priv_tx_q *tx_queue = NULL; |
fba4ed03 SG |
2819 | struct gfar_priv_rx_q *rx_queue = NULL; |
2820 | int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; | |
18294ad1 AV |
2821 | int tx_cleaned = 0, i, left_over_budget = budget; |
2822 | unsigned long serviced_queues = 0; | |
fba4ed03 | 2823 | int num_queues = 0; |
d080cd63 | 2824 | |
fba4ed03 SG |
2825 | num_queues = gfargrp->num_rx_queues; |
2826 | budget_per_queue = budget/num_queues; | |
2827 | ||
8c7396ae DH |
2828 | /* Clear IEVENT, so interrupts aren't called again |
2829 | * because of the packets that have already arrived */ | |
f4983704 | 2830 | gfar_write(®s->ievent, IEVENT_RTX_MASK); |
8c7396ae | 2831 | |
fba4ed03 | 2832 | while (num_queues && left_over_budget) { |
1da177e4 | 2833 | |
fba4ed03 SG |
2834 | budget_per_queue = left_over_budget/num_queues; |
2835 | left_over_budget = 0; | |
2836 | ||
984b3f57 | 2837 | for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { |
fba4ed03 SG |
2838 | if (test_bit(i, &serviced_queues)) |
2839 | continue; | |
2840 | rx_queue = priv->rx_queue[i]; | |
2841 | tx_queue = priv->tx_queue[rx_queue->qindex]; | |
2842 | ||
a3bc1f11 | 2843 | tx_cleaned += gfar_clean_tx_ring(tx_queue); |
fba4ed03 SG |
2844 | rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, |
2845 | budget_per_queue); | |
2846 | rx_cleaned += rx_cleaned_per_queue; | |
2847 | if(rx_cleaned_per_queue < budget_per_queue) { | |
2848 | left_over_budget = left_over_budget + | |
2849 | (budget_per_queue - rx_cleaned_per_queue); | |
2850 | set_bit(i, &serviced_queues); | |
2851 | num_queues--; | |
2852 | } | |
2853 | } | |
2854 | } | |
1da177e4 | 2855 | |
42199884 AF |
2856 | if (tx_cleaned) |
2857 | return budget; | |
2858 | ||
2859 | if (rx_cleaned < budget) { | |
288379f0 | 2860 | napi_complete(napi); |
1da177e4 LT |
2861 | |
2862 | /* Clear the halt bit in RSTAT */ | |
fba4ed03 | 2863 | gfar_write(®s->rstat, gfargrp->rstat); |
1da177e4 | 2864 | |
f4983704 | 2865 | gfar_write(®s->imask, IMASK_DEFAULT); |
1da177e4 LT |
2866 | |
2867 | /* If we are coalescing interrupts, update the timer */ | |
2868 | /* Otherwise, clear it */ | |
46ceb60c SG |
2869 | gfar_configure_coalescing(priv, |
2870 | gfargrp->rx_bit_map, gfargrp->tx_bit_map); | |
1da177e4 LT |
2871 | } |
2872 | ||
42199884 | 2873 | return rx_cleaned; |
1da177e4 | 2874 | } |
1da177e4 | 2875 | |
f2d71c2d VW |
2876 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2877 | /* | |
2878 | * Polling 'interrupt' - used by things like netconsole to send skbs | |
2879 | * without having to re-enable interrupts. It's not called while | |
2880 | * the interrupt routine is executing. | |
2881 | */ | |
2882 | static void gfar_netpoll(struct net_device *dev) | |
2883 | { | |
2884 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2885 | int i = 0; |
f2d71c2d VW |
2886 | |
2887 | /* If the device has multiple interrupts, run tx/rx */ | |
b31a1d8b | 2888 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
46ceb60c SG |
2889 | for (i = 0; i < priv->num_grps; i++) { |
2890 | disable_irq(priv->gfargrp[i].interruptTransmit); | |
2891 | disable_irq(priv->gfargrp[i].interruptReceive); | |
2892 | disable_irq(priv->gfargrp[i].interruptError); | |
2893 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, | |
2894 | &priv->gfargrp[i]); | |
2895 | enable_irq(priv->gfargrp[i].interruptError); | |
2896 | enable_irq(priv->gfargrp[i].interruptReceive); | |
2897 | enable_irq(priv->gfargrp[i].interruptTransmit); | |
2898 | } | |
f2d71c2d | 2899 | } else { |
46ceb60c SG |
2900 | for (i = 0; i < priv->num_grps; i++) { |
2901 | disable_irq(priv->gfargrp[i].interruptTransmit); | |
2902 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, | |
2903 | &priv->gfargrp[i]); | |
2904 | enable_irq(priv->gfargrp[i].interruptTransmit); | |
43de004b | 2905 | } |
f2d71c2d VW |
2906 | } |
2907 | } | |
2908 | #endif | |
2909 | ||
1da177e4 | 2910 | /* The interrupt handler for devices with one interrupt */ |
f4983704 | 2911 | static irqreturn_t gfar_interrupt(int irq, void *grp_id) |
1da177e4 | 2912 | { |
f4983704 | 2913 | struct gfar_priv_grp *gfargrp = grp_id; |
1da177e4 LT |
2914 | |
2915 | /* Save ievent for future reference */ | |
f4983704 | 2916 | u32 events = gfar_read(&gfargrp->regs->ievent); |
1da177e4 | 2917 | |
1da177e4 | 2918 | /* Check for reception */ |
538cc7ee | 2919 | if (events & IEVENT_RX_MASK) |
f4983704 | 2920 | gfar_receive(irq, grp_id); |
1da177e4 LT |
2921 | |
2922 | /* Check for transmit completion */ | |
538cc7ee | 2923 | if (events & IEVENT_TX_MASK) |
f4983704 | 2924 | gfar_transmit(irq, grp_id); |
1da177e4 | 2925 | |
538cc7ee SS |
2926 | /* Check for errors */ |
2927 | if (events & IEVENT_ERR_MASK) | |
f4983704 | 2928 | gfar_error(irq, grp_id); |
1da177e4 LT |
2929 | |
2930 | return IRQ_HANDLED; | |
2931 | } | |
2932 | ||
1da177e4 LT |
2933 | /* Called every time the controller might need to be made |
2934 | * aware of new link state. The PHY code conveys this | |
bb40dcbb | 2935 | * information through variables in the phydev structure, and this |
1da177e4 LT |
2936 | * function converts those variables into the appropriate |
2937 | * register values, and can bring down the device if needed. | |
2938 | */ | |
2939 | static void adjust_link(struct net_device *dev) | |
2940 | { | |
2941 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2942 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
bb40dcbb AF |
2943 | unsigned long flags; |
2944 | struct phy_device *phydev = priv->phydev; | |
2945 | int new_state = 0; | |
2946 | ||
fba4ed03 SG |
2947 | local_irq_save(flags); |
2948 | lock_tx_qs(priv); | |
2949 | ||
bb40dcbb AF |
2950 | if (phydev->link) { |
2951 | u32 tempval = gfar_read(®s->maccfg2); | |
7f7f5316 | 2952 | u32 ecntrl = gfar_read(®s->ecntrl); |
1da177e4 | 2953 | |
1da177e4 LT |
2954 | /* Now we make sure that we can be in full duplex mode. |
2955 | * If not, we operate in half-duplex mode. */ | |
bb40dcbb AF |
2956 | if (phydev->duplex != priv->oldduplex) { |
2957 | new_state = 1; | |
2958 | if (!(phydev->duplex)) | |
1da177e4 | 2959 | tempval &= ~(MACCFG2_FULL_DUPLEX); |
bb40dcbb | 2960 | else |
1da177e4 | 2961 | tempval |= MACCFG2_FULL_DUPLEX; |
1da177e4 | 2962 | |
bb40dcbb | 2963 | priv->oldduplex = phydev->duplex; |
1da177e4 LT |
2964 | } |
2965 | ||
bb40dcbb AF |
2966 | if (phydev->speed != priv->oldspeed) { |
2967 | new_state = 1; | |
2968 | switch (phydev->speed) { | |
1da177e4 | 2969 | case 1000: |
1da177e4 LT |
2970 | tempval = |
2971 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); | |
f430e49e LY |
2972 | |
2973 | ecntrl &= ~(ECNTRL_R100); | |
1da177e4 LT |
2974 | break; |
2975 | case 100: | |
2976 | case 10: | |
1da177e4 LT |
2977 | tempval = |
2978 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); | |
7f7f5316 AF |
2979 | |
2980 | /* Reduced mode distinguishes | |
2981 | * between 10 and 100 */ | |
2982 | if (phydev->speed == SPEED_100) | |
2983 | ecntrl |= ECNTRL_R100; | |
2984 | else | |
2985 | ecntrl &= ~(ECNTRL_R100); | |
1da177e4 LT |
2986 | break; |
2987 | default: | |
59deab26 JP |
2988 | netif_warn(priv, link, dev, |
2989 | "Ack! Speed (%d) is not 10/100/1000!\n", | |
2990 | phydev->speed); | |
1da177e4 LT |
2991 | break; |
2992 | } | |
2993 | ||
bb40dcbb | 2994 | priv->oldspeed = phydev->speed; |
1da177e4 LT |
2995 | } |
2996 | ||
bb40dcbb | 2997 | gfar_write(®s->maccfg2, tempval); |
7f7f5316 | 2998 | gfar_write(®s->ecntrl, ecntrl); |
bb40dcbb | 2999 | |
1da177e4 | 3000 | if (!priv->oldlink) { |
bb40dcbb | 3001 | new_state = 1; |
1da177e4 | 3002 | priv->oldlink = 1; |
1da177e4 | 3003 | } |
bb40dcbb AF |
3004 | } else if (priv->oldlink) { |
3005 | new_state = 1; | |
3006 | priv->oldlink = 0; | |
3007 | priv->oldspeed = 0; | |
3008 | priv->oldduplex = -1; | |
1da177e4 | 3009 | } |
1da177e4 | 3010 | |
bb40dcbb AF |
3011 | if (new_state && netif_msg_link(priv)) |
3012 | phy_print_status(phydev); | |
fba4ed03 SG |
3013 | unlock_tx_qs(priv); |
3014 | local_irq_restore(flags); | |
bb40dcbb | 3015 | } |
1da177e4 LT |
3016 | |
3017 | /* Update the hash table based on the current list of multicast | |
3018 | * addresses we subscribe to. Also, change the promiscuity of | |
3019 | * the device based on the flags (this function is called | |
3020 | * whenever dev->flags is changed */ | |
3021 | static void gfar_set_multi(struct net_device *dev) | |
3022 | { | |
22bedad3 | 3023 | struct netdev_hw_addr *ha; |
1da177e4 | 3024 | struct gfar_private *priv = netdev_priv(dev); |
46ceb60c | 3025 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 LT |
3026 | u32 tempval; |
3027 | ||
a12f801d | 3028 | if (dev->flags & IFF_PROMISC) { |
1da177e4 LT |
3029 | /* Set RCTRL to PROM */ |
3030 | tempval = gfar_read(®s->rctrl); | |
3031 | tempval |= RCTRL_PROM; | |
3032 | gfar_write(®s->rctrl, tempval); | |
3033 | } else { | |
3034 | /* Set RCTRL to not PROM */ | |
3035 | tempval = gfar_read(®s->rctrl); | |
3036 | tempval &= ~(RCTRL_PROM); | |
3037 | gfar_write(®s->rctrl, tempval); | |
3038 | } | |
6aa20a22 | 3039 | |
a12f801d | 3040 | if (dev->flags & IFF_ALLMULTI) { |
1da177e4 | 3041 | /* Set the hash to rx all multicast frames */ |
0bbaf069 KG |
3042 | gfar_write(®s->igaddr0, 0xffffffff); |
3043 | gfar_write(®s->igaddr1, 0xffffffff); | |
3044 | gfar_write(®s->igaddr2, 0xffffffff); | |
3045 | gfar_write(®s->igaddr3, 0xffffffff); | |
3046 | gfar_write(®s->igaddr4, 0xffffffff); | |
3047 | gfar_write(®s->igaddr5, 0xffffffff); | |
3048 | gfar_write(®s->igaddr6, 0xffffffff); | |
3049 | gfar_write(®s->igaddr7, 0xffffffff); | |
1da177e4 LT |
3050 | gfar_write(®s->gaddr0, 0xffffffff); |
3051 | gfar_write(®s->gaddr1, 0xffffffff); | |
3052 | gfar_write(®s->gaddr2, 0xffffffff); | |
3053 | gfar_write(®s->gaddr3, 0xffffffff); | |
3054 | gfar_write(®s->gaddr4, 0xffffffff); | |
3055 | gfar_write(®s->gaddr5, 0xffffffff); | |
3056 | gfar_write(®s->gaddr6, 0xffffffff); | |
3057 | gfar_write(®s->gaddr7, 0xffffffff); | |
3058 | } else { | |
7f7f5316 AF |
3059 | int em_num; |
3060 | int idx; | |
3061 | ||
1da177e4 | 3062 | /* zero out the hash */ |
0bbaf069 KG |
3063 | gfar_write(®s->igaddr0, 0x0); |
3064 | gfar_write(®s->igaddr1, 0x0); | |
3065 | gfar_write(®s->igaddr2, 0x0); | |
3066 | gfar_write(®s->igaddr3, 0x0); | |
3067 | gfar_write(®s->igaddr4, 0x0); | |
3068 | gfar_write(®s->igaddr5, 0x0); | |
3069 | gfar_write(®s->igaddr6, 0x0); | |
3070 | gfar_write(®s->igaddr7, 0x0); | |
1da177e4 LT |
3071 | gfar_write(®s->gaddr0, 0x0); |
3072 | gfar_write(®s->gaddr1, 0x0); | |
3073 | gfar_write(®s->gaddr2, 0x0); | |
3074 | gfar_write(®s->gaddr3, 0x0); | |
3075 | gfar_write(®s->gaddr4, 0x0); | |
3076 | gfar_write(®s->gaddr5, 0x0); | |
3077 | gfar_write(®s->gaddr6, 0x0); | |
3078 | gfar_write(®s->gaddr7, 0x0); | |
3079 | ||
7f7f5316 AF |
3080 | /* If we have extended hash tables, we need to |
3081 | * clear the exact match registers to prepare for | |
3082 | * setting them */ | |
3083 | if (priv->extended_hash) { | |
3084 | em_num = GFAR_EM_NUM + 1; | |
3085 | gfar_clear_exact_match(dev); | |
3086 | idx = 1; | |
3087 | } else { | |
3088 | idx = 0; | |
3089 | em_num = 0; | |
3090 | } | |
3091 | ||
4cd24eaf | 3092 | if (netdev_mc_empty(dev)) |
1da177e4 LT |
3093 | return; |
3094 | ||
3095 | /* Parse the list, and set the appropriate bits */ | |
22bedad3 | 3096 | netdev_for_each_mc_addr(ha, dev) { |
7f7f5316 | 3097 | if (idx < em_num) { |
22bedad3 | 3098 | gfar_set_mac_for_addr(dev, idx, ha->addr); |
7f7f5316 AF |
3099 | idx++; |
3100 | } else | |
22bedad3 | 3101 | gfar_set_hash_for_addr(dev, ha->addr); |
1da177e4 LT |
3102 | } |
3103 | } | |
1da177e4 LT |
3104 | } |
3105 | ||
7f7f5316 AF |
3106 | |
3107 | /* Clears each of the exact match registers to zero, so they | |
3108 | * don't interfere with normal reception */ | |
3109 | static void gfar_clear_exact_match(struct net_device *dev) | |
3110 | { | |
3111 | int idx; | |
b6bc7650 | 3112 | static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; |
7f7f5316 AF |
3113 | |
3114 | for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) | |
b6bc7650 | 3115 | gfar_set_mac_for_addr(dev, idx, zero_arr); |
7f7f5316 AF |
3116 | } |
3117 | ||
1da177e4 LT |
3118 | /* Set the appropriate hash bit for the given addr */ |
3119 | /* The algorithm works like so: | |
3120 | * 1) Take the Destination Address (ie the multicast address), and | |
3121 | * do a CRC on it (little endian), and reverse the bits of the | |
3122 | * result. | |
3123 | * 2) Use the 8 most significant bits as a hash into a 256-entry | |
3124 | * table. The table is controlled through 8 32-bit registers: | |
3125 | * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is | |
3126 | * gaddr7. This means that the 3 most significant bits in the | |
3127 | * hash index which gaddr register to use, and the 5 other bits | |
3128 | * indicate which bit (assuming an IBM numbering scheme, which | |
3129 | * for PowerPC (tm) is usually the case) in the register holds | |
3130 | * the entry. */ | |
3131 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) | |
3132 | { | |
3133 | u32 tempval; | |
3134 | struct gfar_private *priv = netdev_priv(dev); | |
1da177e4 | 3135 | u32 result = ether_crc(MAC_ADDR_LEN, addr); |
0bbaf069 KG |
3136 | int width = priv->hash_width; |
3137 | u8 whichbit = (result >> (32 - width)) & 0x1f; | |
3138 | u8 whichreg = result >> (32 - width + 5); | |
1da177e4 LT |
3139 | u32 value = (1 << (31-whichbit)); |
3140 | ||
0bbaf069 | 3141 | tempval = gfar_read(priv->hash_regs[whichreg]); |
1da177e4 | 3142 | tempval |= value; |
0bbaf069 | 3143 | gfar_write(priv->hash_regs[whichreg], tempval); |
1da177e4 LT |
3144 | } |
3145 | ||
7f7f5316 AF |
3146 | |
3147 | /* There are multiple MAC Address register pairs on some controllers | |
3148 | * This function sets the numth pair to a given address | |
3149 | */ | |
b6bc7650 JP |
3150 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
3151 | const u8 *addr) | |
7f7f5316 AF |
3152 | { |
3153 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 3154 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
7f7f5316 AF |
3155 | int idx; |
3156 | char tmpbuf[MAC_ADDR_LEN]; | |
3157 | u32 tempval; | |
f4983704 | 3158 | u32 __iomem *macptr = ®s->macstnaddr1; |
7f7f5316 AF |
3159 | |
3160 | macptr += num*2; | |
3161 | ||
3162 | /* Now copy it into the mac registers backwards, cuz */ | |
3163 | /* little endian is silly */ | |
3164 | for (idx = 0; idx < MAC_ADDR_LEN; idx++) | |
3165 | tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; | |
3166 | ||
3167 | gfar_write(macptr, *((u32 *) (tmpbuf))); | |
3168 | ||
3169 | tempval = *((u32 *) (tmpbuf + 4)); | |
3170 | ||
3171 | gfar_write(macptr+1, tempval); | |
3172 | } | |
3173 | ||
1da177e4 | 3174 | /* GFAR error interrupt handler */ |
f4983704 | 3175 | static irqreturn_t gfar_error(int irq, void *grp_id) |
1da177e4 | 3176 | { |
f4983704 SG |
3177 | struct gfar_priv_grp *gfargrp = grp_id; |
3178 | struct gfar __iomem *regs = gfargrp->regs; | |
3179 | struct gfar_private *priv= gfargrp->priv; | |
3180 | struct net_device *dev = priv->ndev; | |
1da177e4 LT |
3181 | |
3182 | /* Save ievent for future reference */ | |
f4983704 | 3183 | u32 events = gfar_read(®s->ievent); |
1da177e4 LT |
3184 | |
3185 | /* Clear IEVENT */ | |
f4983704 | 3186 | gfar_write(®s->ievent, events & IEVENT_ERR_MASK); |
d87eb127 SW |
3187 | |
3188 | /* Magic Packet is not an error. */ | |
b31a1d8b | 3189 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && |
d87eb127 SW |
3190 | (events & IEVENT_MAG)) |
3191 | events &= ~IEVENT_MAG; | |
1da177e4 LT |
3192 | |
3193 | /* Hmm... */ | |
0bbaf069 | 3194 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) |
59deab26 JP |
3195 | netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n", |
3196 | events, gfar_read(®s->imask)); | |
1da177e4 LT |
3197 | |
3198 | /* Update the error counters */ | |
3199 | if (events & IEVENT_TXE) { | |
09f75cd7 | 3200 | dev->stats.tx_errors++; |
1da177e4 LT |
3201 | |
3202 | if (events & IEVENT_LC) | |
09f75cd7 | 3203 | dev->stats.tx_window_errors++; |
1da177e4 | 3204 | if (events & IEVENT_CRL) |
09f75cd7 | 3205 | dev->stats.tx_aborted_errors++; |
1da177e4 | 3206 | if (events & IEVENT_XFUN) { |
836cf7fa AV |
3207 | unsigned long flags; |
3208 | ||
59deab26 JP |
3209 | netif_dbg(priv, tx_err, dev, |
3210 | "TX FIFO underrun, packet dropped\n"); | |
09f75cd7 | 3211 | dev->stats.tx_dropped++; |
1da177e4 LT |
3212 | priv->extra_stats.tx_underrun++; |
3213 | ||
836cf7fa AV |
3214 | local_irq_save(flags); |
3215 | lock_tx_qs(priv); | |
3216 | ||
1da177e4 | 3217 | /* Reactivate the Tx Queues */ |
fba4ed03 | 3218 | gfar_write(®s->tstat, gfargrp->tstat); |
836cf7fa AV |
3219 | |
3220 | unlock_tx_qs(priv); | |
3221 | local_irq_restore(flags); | |
1da177e4 | 3222 | } |
59deab26 | 3223 | netif_dbg(priv, tx_err, dev, "Transmit Error\n"); |
1da177e4 LT |
3224 | } |
3225 | if (events & IEVENT_BSY) { | |
09f75cd7 | 3226 | dev->stats.rx_errors++; |
1da177e4 LT |
3227 | priv->extra_stats.rx_bsy++; |
3228 | ||
f4983704 | 3229 | gfar_receive(irq, grp_id); |
1da177e4 | 3230 | |
59deab26 JP |
3231 | netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", |
3232 | gfar_read(®s->rstat)); | |
1da177e4 LT |
3233 | } |
3234 | if (events & IEVENT_BABR) { | |
09f75cd7 | 3235 | dev->stats.rx_errors++; |
1da177e4 LT |
3236 | priv->extra_stats.rx_babr++; |
3237 | ||
59deab26 | 3238 | netif_dbg(priv, rx_err, dev, "babbling RX error\n"); |
1da177e4 LT |
3239 | } |
3240 | if (events & IEVENT_EBERR) { | |
3241 | priv->extra_stats.eberr++; | |
59deab26 | 3242 | netif_dbg(priv, rx_err, dev, "bus error\n"); |
1da177e4 | 3243 | } |
59deab26 JP |
3244 | if (events & IEVENT_RXC) |
3245 | netif_dbg(priv, rx_status, dev, "control frame\n"); | |
1da177e4 LT |
3246 | |
3247 | if (events & IEVENT_BABT) { | |
3248 | priv->extra_stats.tx_babt++; | |
59deab26 | 3249 | netif_dbg(priv, tx_err, dev, "babbling TX error\n"); |
1da177e4 LT |
3250 | } |
3251 | return IRQ_HANDLED; | |
3252 | } | |
3253 | ||
b31a1d8b AF |
3254 | static struct of_device_id gfar_match[] = |
3255 | { | |
3256 | { | |
3257 | .type = "network", | |
3258 | .compatible = "gianfar", | |
3259 | }, | |
46ceb60c SG |
3260 | { |
3261 | .compatible = "fsl,etsec2", | |
3262 | }, | |
b31a1d8b AF |
3263 | {}, |
3264 | }; | |
e72701ac | 3265 | MODULE_DEVICE_TABLE(of, gfar_match); |
b31a1d8b | 3266 | |
1da177e4 | 3267 | /* Structure for a device driver */ |
74888760 | 3268 | static struct platform_driver gfar_driver = { |
4018294b GL |
3269 | .driver = { |
3270 | .name = "fsl-gianfar", | |
3271 | .owner = THIS_MODULE, | |
3272 | .pm = GFAR_PM_OPS, | |
3273 | .of_match_table = gfar_match, | |
3274 | }, | |
1da177e4 LT |
3275 | .probe = gfar_probe, |
3276 | .remove = gfar_remove, | |
3277 | }; | |
3278 | ||
3279 | static int __init gfar_init(void) | |
3280 | { | |
74888760 | 3281 | return platform_driver_register(&gfar_driver); |
1da177e4 LT |
3282 | } |
3283 | ||
3284 | static void __exit gfar_exit(void) | |
3285 | { | |
74888760 | 3286 | platform_driver_unregister(&gfar_driver); |
1da177e4 LT |
3287 | } |
3288 | ||
3289 | module_init(gfar_init); | |
3290 | module_exit(gfar_exit); | |
3291 |