Commit | Line | Data |
---|---|---|
0bbaf069 | 1 | /* |
1da177e4 LT |
2 | * drivers/net/gianfar.c |
3 | * | |
4 | * Gianfar Ethernet Driver | |
7f7f5316 AF |
5 | * This driver is designed for the non-CPM ethernet controllers |
6 | * on the 85xx and 83xx family of integrated processors | |
1da177e4 LT |
7 | * Based on 8260_io/fcc_enet.c |
8 | * | |
9 | * Author: Andy Fleming | |
4c8d3d99 | 10 | * Maintainer: Kumar Gala |
a12f801d | 11 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> |
1da177e4 | 12 | * |
a12f801d SG |
13 | * Copyright 2002-2009 Freescale Semiconductor, Inc. |
14 | * Copyright 2007 MontaVista Software, Inc. | |
1da177e4 LT |
15 | * |
16 | * This program is free software; you can redistribute it and/or modify it | |
17 | * under the terms of the GNU General Public License as published by the | |
18 | * Free Software Foundation; either version 2 of the License, or (at your | |
19 | * option) any later version. | |
20 | * | |
21 | * Gianfar: AKA Lambda Draconis, "Dragon" | |
22 | * RA 11 31 24.2 | |
23 | * Dec +69 19 52 | |
24 | * V 3.84 | |
25 | * B-V +1.62 | |
26 | * | |
27 | * Theory of operation | |
0bbaf069 | 28 | * |
b31a1d8b AF |
29 | * The driver is initialized through of_device. Configuration information |
30 | * is therefore conveyed through an OF-style device tree. | |
1da177e4 LT |
31 | * |
32 | * The Gianfar Ethernet Controller uses a ring of buffer | |
33 | * descriptors. The beginning is indicated by a register | |
0bbaf069 KG |
34 | * pointing to the physical address of the start of the ring. |
35 | * The end is determined by a "wrap" bit being set in the | |
1da177e4 LT |
36 | * last descriptor of the ring. |
37 | * | |
38 | * When a packet is received, the RXF bit in the | |
0bbaf069 | 39 | * IEVENT register is set, triggering an interrupt when the |
1da177e4 LT |
40 | * corresponding bit in the IMASK register is also set (if |
41 | * interrupt coalescing is active, then the interrupt may not | |
42 | * happen immediately, but will wait until either a set number | |
bb40dcbb | 43 | * of frames or amount of time have passed). In NAPI, the |
1da177e4 | 44 | * interrupt handler will signal there is work to be done, and |
0aa1538f | 45 | * exit. This method will start at the last known empty |
0bbaf069 | 46 | * descriptor, and process every subsequent descriptor until there |
1da177e4 LT |
47 | * are none left with data (NAPI will stop after a set number of |
48 | * packets to give time to other tasks, but will eventually | |
49 | * process all the packets). The data arrives inside a | |
50 | * pre-allocated skb, and so after the skb is passed up to the | |
51 | * stack, a new skb must be allocated, and the address field in | |
52 | * the buffer descriptor must be updated to indicate this new | |
53 | * skb. | |
54 | * | |
55 | * When the kernel requests that a packet be transmitted, the | |
56 | * driver starts where it left off last time, and points the | |
57 | * descriptor at the buffer which was passed in. The driver | |
58 | * then informs the DMA engine that there are packets ready to | |
59 | * be transmitted. Once the controller is finished transmitting | |
60 | * the packet, an interrupt may be triggered (under the same | |
61 | * conditions as for reception, but depending on the TXF bit). | |
62 | * The driver then cleans up the buffer. | |
63 | */ | |
64 | ||
1da177e4 | 65 | #include <linux/kernel.h> |
1da177e4 LT |
66 | #include <linux/string.h> |
67 | #include <linux/errno.h> | |
bb40dcbb | 68 | #include <linux/unistd.h> |
1da177e4 LT |
69 | #include <linux/slab.h> |
70 | #include <linux/interrupt.h> | |
71 | #include <linux/init.h> | |
72 | #include <linux/delay.h> | |
73 | #include <linux/netdevice.h> | |
74 | #include <linux/etherdevice.h> | |
75 | #include <linux/skbuff.h> | |
0bbaf069 | 76 | #include <linux/if_vlan.h> |
1da177e4 LT |
77 | #include <linux/spinlock.h> |
78 | #include <linux/mm.h> | |
fe192a49 | 79 | #include <linux/of_mdio.h> |
b31a1d8b | 80 | #include <linux/of_platform.h> |
0bbaf069 KG |
81 | #include <linux/ip.h> |
82 | #include <linux/tcp.h> | |
83 | #include <linux/udp.h> | |
9c07b884 | 84 | #include <linux/in.h> |
1da177e4 LT |
85 | |
86 | #include <asm/io.h> | |
87 | #include <asm/irq.h> | |
88 | #include <asm/uaccess.h> | |
89 | #include <linux/module.h> | |
1da177e4 LT |
90 | #include <linux/dma-mapping.h> |
91 | #include <linux/crc32.h> | |
bb40dcbb AF |
92 | #include <linux/mii.h> |
93 | #include <linux/phy.h> | |
b31a1d8b AF |
94 | #include <linux/phy_fixed.h> |
95 | #include <linux/of.h> | |
1da177e4 LT |
96 | |
97 | #include "gianfar.h" | |
1577ecef | 98 | #include "fsl_pq_mdio.h" |
1da177e4 LT |
99 | |
100 | #define TX_TIMEOUT (1*HZ) | |
1da177e4 LT |
101 | #undef BRIEF_GFAR_ERRORS |
102 | #undef VERBOSE_GFAR_ERRORS | |
103 | ||
1da177e4 | 104 | const char gfar_driver_name[] = "Gianfar Ethernet"; |
7f7f5316 | 105 | const char gfar_driver_version[] = "1.3"; |
1da177e4 | 106 | |
1da177e4 LT |
107 | static int gfar_enet_open(struct net_device *dev); |
108 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | |
ab939905 | 109 | static void gfar_reset_task(struct work_struct *work); |
1da177e4 LT |
110 | static void gfar_timeout(struct net_device *dev); |
111 | static int gfar_close(struct net_device *dev); | |
815b97c6 | 112 | struct sk_buff *gfar_new_skb(struct net_device *dev); |
a12f801d | 113 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
815b97c6 | 114 | struct sk_buff *skb); |
1da177e4 LT |
115 | static int gfar_set_mac_address(struct net_device *dev); |
116 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | |
7d12e780 DH |
117 | static irqreturn_t gfar_error(int irq, void *dev_id); |
118 | static irqreturn_t gfar_transmit(int irq, void *dev_id); | |
119 | static irqreturn_t gfar_interrupt(int irq, void *dev_id); | |
1da177e4 LT |
120 | static void adjust_link(struct net_device *dev); |
121 | static void init_registers(struct net_device *dev); | |
122 | static int init_phy(struct net_device *dev); | |
b31a1d8b AF |
123 | static int gfar_probe(struct of_device *ofdev, |
124 | const struct of_device_id *match); | |
125 | static int gfar_remove(struct of_device *ofdev); | |
bb40dcbb | 126 | static void free_skb_resources(struct gfar_private *priv); |
1da177e4 LT |
127 | static void gfar_set_multi(struct net_device *dev); |
128 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); | |
d3c12873 | 129 | static void gfar_configure_serdes(struct net_device *dev); |
bea3348e | 130 | static int gfar_poll(struct napi_struct *napi, int budget); |
f2d71c2d VW |
131 | #ifdef CONFIG_NET_POLL_CONTROLLER |
132 | static void gfar_netpoll(struct net_device *dev); | |
133 | #endif | |
a12f801d SG |
134 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); |
135 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); | |
2c2db48a DH |
136 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
137 | int amount_pull); | |
0bbaf069 KG |
138 | static void gfar_vlan_rx_register(struct net_device *netdev, |
139 | struct vlan_group *grp); | |
7f7f5316 | 140 | void gfar_halt(struct net_device *dev); |
d87eb127 | 141 | static void gfar_halt_nodisable(struct net_device *dev); |
7f7f5316 AF |
142 | void gfar_start(struct net_device *dev); |
143 | static void gfar_clear_exact_match(struct net_device *dev); | |
144 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); | |
26ccfc37 | 145 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
fba4ed03 | 146 | u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb); |
1da177e4 | 147 | |
1da177e4 LT |
148 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
149 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); | |
150 | MODULE_LICENSE("GPL"); | |
151 | ||
a12f801d | 152 | static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
8a102fe0 AV |
153 | dma_addr_t buf) |
154 | { | |
8a102fe0 AV |
155 | u32 lstatus; |
156 | ||
157 | bdp->bufPtr = buf; | |
158 | ||
159 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); | |
a12f801d | 160 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) |
8a102fe0 AV |
161 | lstatus |= BD_LFLAG(RXBD_WRAP); |
162 | ||
163 | eieio(); | |
164 | ||
165 | bdp->lstatus = lstatus; | |
166 | } | |
167 | ||
8728327e | 168 | static int gfar_init_bds(struct net_device *ndev) |
826aa4a0 | 169 | { |
8728327e | 170 | struct gfar_private *priv = netdev_priv(ndev); |
a12f801d SG |
171 | struct gfar_priv_tx_q *tx_queue = NULL; |
172 | struct gfar_priv_rx_q *rx_queue = NULL; | |
826aa4a0 AV |
173 | struct txbd8 *txbdp; |
174 | struct rxbd8 *rxbdp; | |
fba4ed03 | 175 | int i, j; |
a12f801d | 176 | |
fba4ed03 SG |
177 | for (i = 0; i < priv->num_tx_queues; i++) { |
178 | tx_queue = priv->tx_queue[i]; | |
179 | /* Initialize some variables in our dev structure */ | |
180 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; | |
181 | tx_queue->dirty_tx = tx_queue->tx_bd_base; | |
182 | tx_queue->cur_tx = tx_queue->tx_bd_base; | |
183 | tx_queue->skb_curtx = 0; | |
184 | tx_queue->skb_dirtytx = 0; | |
185 | ||
186 | /* Initialize Transmit Descriptor Ring */ | |
187 | txbdp = tx_queue->tx_bd_base; | |
188 | for (j = 0; j < tx_queue->tx_ring_size; j++) { | |
189 | txbdp->lstatus = 0; | |
190 | txbdp->bufPtr = 0; | |
191 | txbdp++; | |
192 | } | |
8728327e | 193 | |
fba4ed03 SG |
194 | /* Set the last descriptor in the ring to indicate wrap */ |
195 | txbdp--; | |
196 | txbdp->status |= TXBD_WRAP; | |
8728327e AV |
197 | } |
198 | ||
fba4ed03 SG |
199 | for (i = 0; i < priv->num_rx_queues; i++) { |
200 | rx_queue = priv->rx_queue[i]; | |
201 | rx_queue->cur_rx = rx_queue->rx_bd_base; | |
202 | rx_queue->skb_currx = 0; | |
203 | rxbdp = rx_queue->rx_bd_base; | |
8728327e | 204 | |
fba4ed03 SG |
205 | for (j = 0; j < rx_queue->rx_ring_size; j++) { |
206 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; | |
8728327e | 207 | |
fba4ed03 SG |
208 | if (skb) { |
209 | gfar_init_rxbdp(rx_queue, rxbdp, | |
210 | rxbdp->bufPtr); | |
211 | } else { | |
212 | skb = gfar_new_skb(ndev); | |
213 | if (!skb) { | |
214 | pr_err("%s: Can't allocate RX buffers\n", | |
215 | ndev->name); | |
216 | goto err_rxalloc_fail; | |
217 | } | |
218 | rx_queue->rx_skbuff[j] = skb; | |
219 | ||
220 | gfar_new_rxbdp(rx_queue, rxbdp, skb); | |
8728327e | 221 | } |
8728327e | 222 | |
fba4ed03 | 223 | rxbdp++; |
8728327e AV |
224 | } |
225 | ||
8728327e AV |
226 | } |
227 | ||
228 | return 0; | |
fba4ed03 SG |
229 | |
230 | err_rxalloc_fail: | |
231 | free_skb_resources(priv); | |
232 | return -ENOMEM; | |
8728327e AV |
233 | } |
234 | ||
235 | static int gfar_alloc_skb_resources(struct net_device *ndev) | |
236 | { | |
826aa4a0 | 237 | void *vaddr; |
fba4ed03 SG |
238 | dma_addr_t addr; |
239 | int i, j, k; | |
826aa4a0 AV |
240 | struct gfar_private *priv = netdev_priv(ndev); |
241 | struct device *dev = &priv->ofdev->dev; | |
a12f801d SG |
242 | struct gfar_priv_tx_q *tx_queue = NULL; |
243 | struct gfar_priv_rx_q *rx_queue = NULL; | |
244 | ||
fba4ed03 SG |
245 | priv->total_tx_ring_size = 0; |
246 | for (i = 0; i < priv->num_tx_queues; i++) | |
247 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; | |
248 | ||
249 | priv->total_rx_ring_size = 0; | |
250 | for (i = 0; i < priv->num_rx_queues; i++) | |
251 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; | |
826aa4a0 AV |
252 | |
253 | /* Allocate memory for the buffer descriptors */ | |
8728327e | 254 | vaddr = dma_alloc_coherent(dev, |
fba4ed03 SG |
255 | sizeof(struct txbd8) * priv->total_tx_ring_size + |
256 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | |
257 | &addr, GFP_KERNEL); | |
826aa4a0 AV |
258 | if (!vaddr) { |
259 | if (netif_msg_ifup(priv)) | |
260 | pr_err("%s: Could not allocate buffer descriptors!\n", | |
261 | ndev->name); | |
262 | return -ENOMEM; | |
263 | } | |
264 | ||
fba4ed03 SG |
265 | for (i = 0; i < priv->num_tx_queues; i++) { |
266 | tx_queue = priv->tx_queue[i]; | |
267 | tx_queue->tx_bd_base = (struct txbd8 *) vaddr; | |
268 | tx_queue->tx_bd_dma_base = addr; | |
269 | tx_queue->dev = ndev; | |
270 | /* enet DMA only understands physical addresses */ | |
271 | addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | |
272 | vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | |
273 | } | |
826aa4a0 | 274 | |
826aa4a0 | 275 | /* Start the rx descriptor ring where the tx ring leaves off */ |
fba4ed03 SG |
276 | for (i = 0; i < priv->num_rx_queues; i++) { |
277 | rx_queue = priv->rx_queue[i]; | |
278 | rx_queue->rx_bd_base = (struct rxbd8 *) vaddr; | |
279 | rx_queue->rx_bd_dma_base = addr; | |
280 | rx_queue->dev = ndev; | |
281 | addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | |
282 | vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | |
283 | } | |
826aa4a0 AV |
284 | |
285 | /* Setup the skbuff rings */ | |
fba4ed03 SG |
286 | for (i = 0; i < priv->num_tx_queues; i++) { |
287 | tx_queue = priv->tx_queue[i]; | |
288 | tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * | |
a12f801d | 289 | tx_queue->tx_ring_size, GFP_KERNEL); |
fba4ed03 SG |
290 | if (!tx_queue->tx_skbuff) { |
291 | if (netif_msg_ifup(priv)) | |
292 | pr_err("%s: Could not allocate tx_skbuff\n", | |
293 | ndev->name); | |
294 | goto cleanup; | |
295 | } | |
826aa4a0 | 296 | |
fba4ed03 SG |
297 | for (k = 0; k < tx_queue->tx_ring_size; k++) |
298 | tx_queue->tx_skbuff[k] = NULL; | |
299 | } | |
826aa4a0 | 300 | |
fba4ed03 SG |
301 | for (i = 0; i < priv->num_rx_queues; i++) { |
302 | rx_queue = priv->rx_queue[i]; | |
303 | rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * | |
a12f801d | 304 | rx_queue->rx_ring_size, GFP_KERNEL); |
826aa4a0 | 305 | |
fba4ed03 SG |
306 | if (!rx_queue->rx_skbuff) { |
307 | if (netif_msg_ifup(priv)) | |
308 | pr_err("%s: Could not allocate rx_skbuff\n", | |
309 | ndev->name); | |
310 | goto cleanup; | |
311 | } | |
312 | ||
313 | for (j = 0; j < rx_queue->rx_ring_size; j++) | |
314 | rx_queue->rx_skbuff[j] = NULL; | |
315 | } | |
826aa4a0 | 316 | |
8728327e AV |
317 | if (gfar_init_bds(ndev)) |
318 | goto cleanup; | |
826aa4a0 AV |
319 | |
320 | return 0; | |
321 | ||
322 | cleanup: | |
323 | free_skb_resources(priv); | |
324 | return -ENOMEM; | |
325 | } | |
326 | ||
fba4ed03 SG |
327 | static void gfar_init_tx_rx_base(struct gfar_private *priv) |
328 | { | |
46ceb60c | 329 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
18294ad1 | 330 | u32 __iomem *baddr; |
fba4ed03 SG |
331 | int i; |
332 | ||
333 | baddr = ®s->tbase0; | |
334 | for(i = 0; i < priv->num_tx_queues; i++) { | |
335 | gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); | |
336 | baddr += 2; | |
337 | } | |
338 | ||
339 | baddr = ®s->rbase0; | |
340 | for(i = 0; i < priv->num_rx_queues; i++) { | |
341 | gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); | |
342 | baddr += 2; | |
343 | } | |
344 | } | |
345 | ||
826aa4a0 AV |
346 | static void gfar_init_mac(struct net_device *ndev) |
347 | { | |
348 | struct gfar_private *priv = netdev_priv(ndev); | |
46ceb60c | 349 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
826aa4a0 AV |
350 | u32 rctrl = 0; |
351 | u32 tctrl = 0; | |
352 | u32 attrs = 0; | |
353 | ||
fba4ed03 SG |
354 | /* write the tx/rx base registers */ |
355 | gfar_init_tx_rx_base(priv); | |
32c513bc | 356 | |
826aa4a0 | 357 | /* Configure the coalescing support */ |
46ceb60c | 358 | gfar_configure_coalescing(priv, 0xFF, 0xFF); |
fba4ed03 | 359 | |
1ccb8389 | 360 | if (priv->rx_filer_enable) { |
fba4ed03 | 361 | rctrl |= RCTRL_FILREN; |
1ccb8389 SG |
362 | /* Program the RIR0 reg with the required distribution */ |
363 | gfar_write(®s->rir0, DEFAULT_RIR0); | |
364 | } | |
826aa4a0 AV |
365 | |
366 | if (priv->rx_csum_enable) | |
367 | rctrl |= RCTRL_CHECKSUMMING; | |
368 | ||
369 | if (priv->extended_hash) { | |
370 | rctrl |= RCTRL_EXTHASH; | |
371 | ||
372 | gfar_clear_exact_match(ndev); | |
373 | rctrl |= RCTRL_EMEN; | |
374 | } | |
375 | ||
376 | if (priv->padding) { | |
377 | rctrl &= ~RCTRL_PAL_MASK; | |
378 | rctrl |= RCTRL_PADDING(priv->padding); | |
379 | } | |
380 | ||
381 | /* keep vlan related bits if it's enabled */ | |
382 | if (priv->vlgrp) { | |
383 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; | |
384 | tctrl |= TCTRL_VLINS; | |
385 | } | |
386 | ||
387 | /* Init rctrl based on our settings */ | |
388 | gfar_write(®s->rctrl, rctrl); | |
389 | ||
390 | if (ndev->features & NETIF_F_IP_CSUM) | |
391 | tctrl |= TCTRL_INIT_CSUM; | |
392 | ||
fba4ed03 SG |
393 | tctrl |= TCTRL_TXSCHED_PRIO; |
394 | ||
826aa4a0 AV |
395 | gfar_write(®s->tctrl, tctrl); |
396 | ||
397 | /* Set the extraction length and index */ | |
398 | attrs = ATTRELI_EL(priv->rx_stash_size) | | |
399 | ATTRELI_EI(priv->rx_stash_index); | |
400 | ||
401 | gfar_write(®s->attreli, attrs); | |
402 | ||
403 | /* Start with defaults, and add stashing or locking | |
404 | * depending on the approprate variables */ | |
405 | attrs = ATTR_INIT_SETTINGS; | |
406 | ||
407 | if (priv->bd_stash_en) | |
408 | attrs |= ATTR_BDSTASH; | |
409 | ||
410 | if (priv->rx_stash_size != 0) | |
411 | attrs |= ATTR_BUFSTASH; | |
412 | ||
413 | gfar_write(®s->attr, attrs); | |
414 | ||
415 | gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); | |
416 | gfar_write(®s->fifo_tx_starve, priv->fifo_starve); | |
417 | gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); | |
418 | } | |
419 | ||
26ccfc37 AF |
420 | static const struct net_device_ops gfar_netdev_ops = { |
421 | .ndo_open = gfar_enet_open, | |
422 | .ndo_start_xmit = gfar_start_xmit, | |
423 | .ndo_stop = gfar_close, | |
424 | .ndo_change_mtu = gfar_change_mtu, | |
425 | .ndo_set_multicast_list = gfar_set_multi, | |
426 | .ndo_tx_timeout = gfar_timeout, | |
427 | .ndo_do_ioctl = gfar_ioctl, | |
fba4ed03 | 428 | .ndo_select_queue = gfar_select_queue, |
26ccfc37 | 429 | .ndo_vlan_rx_register = gfar_vlan_rx_register, |
240c102d BH |
430 | .ndo_set_mac_address = eth_mac_addr, |
431 | .ndo_validate_addr = eth_validate_addr, | |
26ccfc37 AF |
432 | #ifdef CONFIG_NET_POLL_CONTROLLER |
433 | .ndo_poll_controller = gfar_netpoll, | |
434 | #endif | |
435 | }; | |
436 | ||
7a8b3372 SG |
437 | unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; |
438 | unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; | |
439 | ||
fba4ed03 SG |
440 | void lock_rx_qs(struct gfar_private *priv) |
441 | { | |
442 | int i = 0x0; | |
443 | ||
444 | for (i = 0; i < priv->num_rx_queues; i++) | |
445 | spin_lock(&priv->rx_queue[i]->rxlock); | |
446 | } | |
447 | ||
448 | void lock_tx_qs(struct gfar_private *priv) | |
449 | { | |
450 | int i = 0x0; | |
451 | ||
452 | for (i = 0; i < priv->num_tx_queues; i++) | |
453 | spin_lock(&priv->tx_queue[i]->txlock); | |
454 | } | |
455 | ||
456 | void unlock_rx_qs(struct gfar_private *priv) | |
457 | { | |
458 | int i = 0x0; | |
459 | ||
460 | for (i = 0; i < priv->num_rx_queues; i++) | |
461 | spin_unlock(&priv->rx_queue[i]->rxlock); | |
462 | } | |
463 | ||
464 | void unlock_tx_qs(struct gfar_private *priv) | |
465 | { | |
466 | int i = 0x0; | |
467 | ||
468 | for (i = 0; i < priv->num_tx_queues; i++) | |
469 | spin_unlock(&priv->tx_queue[i]->txlock); | |
470 | } | |
471 | ||
7f7f5316 AF |
472 | /* Returns 1 if incoming frames use an FCB */ |
473 | static inline int gfar_uses_fcb(struct gfar_private *priv) | |
0bbaf069 | 474 | { |
77ecaf2d | 475 | return priv->vlgrp || priv->rx_csum_enable; |
0bbaf069 | 476 | } |
bb40dcbb | 477 | |
fba4ed03 SG |
478 | u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb) |
479 | { | |
480 | return skb_get_queue_mapping(skb); | |
481 | } | |
482 | static void free_tx_pointers(struct gfar_private *priv) | |
483 | { | |
484 | int i = 0; | |
485 | ||
486 | for (i = 0; i < priv->num_tx_queues; i++) | |
487 | kfree(priv->tx_queue[i]); | |
488 | } | |
489 | ||
490 | static void free_rx_pointers(struct gfar_private *priv) | |
491 | { | |
492 | int i = 0; | |
493 | ||
494 | for (i = 0; i < priv->num_rx_queues; i++) | |
495 | kfree(priv->rx_queue[i]); | |
496 | } | |
497 | ||
46ceb60c SG |
498 | static void unmap_group_regs(struct gfar_private *priv) |
499 | { | |
500 | int i = 0; | |
501 | ||
502 | for (i = 0; i < MAXGROUPS; i++) | |
503 | if (priv->gfargrp[i].regs) | |
504 | iounmap(priv->gfargrp[i].regs); | |
505 | } | |
506 | ||
507 | static void disable_napi(struct gfar_private *priv) | |
508 | { | |
509 | int i = 0; | |
510 | ||
511 | for (i = 0; i < priv->num_grps; i++) | |
512 | napi_disable(&priv->gfargrp[i].napi); | |
513 | } | |
514 | ||
515 | static void enable_napi(struct gfar_private *priv) | |
516 | { | |
517 | int i = 0; | |
518 | ||
519 | for (i = 0; i < priv->num_grps; i++) | |
520 | napi_enable(&priv->gfargrp[i].napi); | |
521 | } | |
522 | ||
523 | static int gfar_parse_group(struct device_node *np, | |
524 | struct gfar_private *priv, const char *model) | |
525 | { | |
526 | u32 *queue_mask; | |
527 | u64 addr, size; | |
528 | ||
529 | addr = of_translate_address(np, | |
530 | of_get_address(np, 0, &size, NULL)); | |
531 | priv->gfargrp[priv->num_grps].regs = ioremap(addr, size); | |
532 | ||
533 | if (!priv->gfargrp[priv->num_grps].regs) | |
534 | return -ENOMEM; | |
535 | ||
536 | priv->gfargrp[priv->num_grps].interruptTransmit = | |
537 | irq_of_parse_and_map(np, 0); | |
538 | ||
539 | /* If we aren't the FEC we have multiple interrupts */ | |
540 | if (model && strcasecmp(model, "FEC")) { | |
541 | priv->gfargrp[priv->num_grps].interruptReceive = | |
542 | irq_of_parse_and_map(np, 1); | |
543 | priv->gfargrp[priv->num_grps].interruptError = | |
544 | irq_of_parse_and_map(np,2); | |
545 | if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || | |
546 | priv->gfargrp[priv->num_grps].interruptReceive < 0 || | |
547 | priv->gfargrp[priv->num_grps].interruptError < 0) { | |
548 | return -EINVAL; | |
549 | } | |
550 | } | |
551 | ||
552 | priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; | |
553 | priv->gfargrp[priv->num_grps].priv = priv; | |
554 | spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); | |
555 | if(priv->mode == MQ_MG_MODE) { | |
556 | queue_mask = (u32 *)of_get_property(np, | |
557 | "fsl,rx-bit-map", NULL); | |
558 | priv->gfargrp[priv->num_grps].rx_bit_map = | |
559 | queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); | |
560 | queue_mask = (u32 *)of_get_property(np, | |
561 | "fsl,tx-bit-map", NULL); | |
562 | priv->gfargrp[priv->num_grps].tx_bit_map = | |
563 | queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); | |
564 | } else { | |
565 | priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; | |
566 | priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; | |
567 | } | |
568 | priv->num_grps++; | |
569 | ||
570 | return 0; | |
571 | } | |
572 | ||
fba4ed03 | 573 | static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) |
b31a1d8b | 574 | { |
b31a1d8b AF |
575 | const char *model; |
576 | const char *ctype; | |
577 | const void *mac_addr; | |
fba4ed03 SG |
578 | int err = 0, i; |
579 | struct net_device *dev = NULL; | |
580 | struct gfar_private *priv = NULL; | |
581 | struct device_node *np = ofdev->node; | |
46ceb60c | 582 | struct device_node *child = NULL; |
4d7902f2 AF |
583 | const u32 *stash; |
584 | const u32 *stash_len; | |
585 | const u32 *stash_idx; | |
fba4ed03 SG |
586 | unsigned int num_tx_qs, num_rx_qs; |
587 | u32 *tx_queues, *rx_queues; | |
b31a1d8b AF |
588 | |
589 | if (!np || !of_device_is_available(np)) | |
590 | return -ENODEV; | |
591 | ||
fba4ed03 SG |
592 | /* parse the num of tx and rx queues */ |
593 | tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); | |
594 | num_tx_qs = tx_queues ? *tx_queues : 1; | |
595 | ||
596 | if (num_tx_qs > MAX_TX_QS) { | |
597 | printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", | |
598 | num_tx_qs, MAX_TX_QS); | |
599 | printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); | |
600 | return -EINVAL; | |
601 | } | |
602 | ||
603 | rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); | |
604 | num_rx_qs = rx_queues ? *rx_queues : 1; | |
605 | ||
606 | if (num_rx_qs > MAX_RX_QS) { | |
607 | printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", | |
608 | num_tx_qs, MAX_TX_QS); | |
609 | printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); | |
610 | return -EINVAL; | |
611 | } | |
612 | ||
613 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); | |
614 | dev = *pdev; | |
615 | if (NULL == dev) | |
616 | return -ENOMEM; | |
617 | ||
618 | priv = netdev_priv(dev); | |
619 | priv->node = ofdev->node; | |
620 | priv->ndev = dev; | |
621 | ||
622 | dev->num_tx_queues = num_tx_qs; | |
623 | dev->real_num_tx_queues = num_tx_qs; | |
624 | priv->num_tx_queues = num_tx_qs; | |
625 | priv->num_rx_queues = num_rx_qs; | |
46ceb60c | 626 | priv->num_grps = 0x0; |
b31a1d8b AF |
627 | |
628 | model = of_get_property(np, "model", NULL); | |
629 | ||
46ceb60c SG |
630 | for (i = 0; i < MAXGROUPS; i++) |
631 | priv->gfargrp[i].regs = NULL; | |
b31a1d8b | 632 | |
46ceb60c SG |
633 | /* Parse and initialize group specific information */ |
634 | if (of_device_is_compatible(np, "fsl,etsec2")) { | |
635 | priv->mode = MQ_MG_MODE; | |
636 | for_each_child_of_node(np, child) { | |
637 | err = gfar_parse_group(child, priv, model); | |
638 | if (err) | |
639 | goto err_grp_init; | |
b31a1d8b | 640 | } |
46ceb60c SG |
641 | } else { |
642 | priv->mode = SQ_SG_MODE; | |
643 | err = gfar_parse_group(np, priv, model); | |
644 | if(err) | |
645 | goto err_grp_init; | |
b31a1d8b AF |
646 | } |
647 | ||
fba4ed03 SG |
648 | for (i = 0; i < priv->num_tx_queues; i++) |
649 | priv->tx_queue[i] = NULL; | |
650 | for (i = 0; i < priv->num_rx_queues; i++) | |
651 | priv->rx_queue[i] = NULL; | |
652 | ||
653 | for (i = 0; i < priv->num_tx_queues; i++) { | |
654 | priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( | |
655 | sizeof (struct gfar_priv_tx_q), GFP_KERNEL); | |
656 | if (!priv->tx_queue[i]) { | |
657 | err = -ENOMEM; | |
658 | goto tx_alloc_failed; | |
659 | } | |
660 | priv->tx_queue[i]->tx_skbuff = NULL; | |
661 | priv->tx_queue[i]->qindex = i; | |
662 | priv->tx_queue[i]->dev = dev; | |
663 | spin_lock_init(&(priv->tx_queue[i]->txlock)); | |
664 | } | |
665 | ||
666 | for (i = 0; i < priv->num_rx_queues; i++) { | |
667 | priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( | |
668 | sizeof (struct gfar_priv_rx_q), GFP_KERNEL); | |
669 | if (!priv->rx_queue[i]) { | |
670 | err = -ENOMEM; | |
671 | goto rx_alloc_failed; | |
672 | } | |
673 | priv->rx_queue[i]->rx_skbuff = NULL; | |
674 | priv->rx_queue[i]->qindex = i; | |
675 | priv->rx_queue[i]->dev = dev; | |
676 | spin_lock_init(&(priv->rx_queue[i]->rxlock)); | |
677 | } | |
678 | ||
679 | ||
4d7902f2 AF |
680 | stash = of_get_property(np, "bd-stash", NULL); |
681 | ||
a12f801d | 682 | if (stash) { |
4d7902f2 AF |
683 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; |
684 | priv->bd_stash_en = 1; | |
685 | } | |
686 | ||
687 | stash_len = of_get_property(np, "rx-stash-len", NULL); | |
688 | ||
689 | if (stash_len) | |
690 | priv->rx_stash_size = *stash_len; | |
691 | ||
692 | stash_idx = of_get_property(np, "rx-stash-idx", NULL); | |
693 | ||
694 | if (stash_idx) | |
695 | priv->rx_stash_index = *stash_idx; | |
696 | ||
697 | if (stash_len || stash_idx) | |
698 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; | |
699 | ||
b31a1d8b AF |
700 | mac_addr = of_get_mac_address(np); |
701 | if (mac_addr) | |
702 | memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); | |
703 | ||
704 | if (model && !strcasecmp(model, "TSEC")) | |
705 | priv->device_flags = | |
706 | FSL_GIANFAR_DEV_HAS_GIGABIT | | |
707 | FSL_GIANFAR_DEV_HAS_COALESCE | | |
708 | FSL_GIANFAR_DEV_HAS_RMON | | |
709 | FSL_GIANFAR_DEV_HAS_MULTI_INTR; | |
710 | if (model && !strcasecmp(model, "eTSEC")) | |
711 | priv->device_flags = | |
712 | FSL_GIANFAR_DEV_HAS_GIGABIT | | |
713 | FSL_GIANFAR_DEV_HAS_COALESCE | | |
714 | FSL_GIANFAR_DEV_HAS_RMON | | |
715 | FSL_GIANFAR_DEV_HAS_MULTI_INTR | | |
2c2db48a | 716 | FSL_GIANFAR_DEV_HAS_PADDING | |
b31a1d8b AF |
717 | FSL_GIANFAR_DEV_HAS_CSUM | |
718 | FSL_GIANFAR_DEV_HAS_VLAN | | |
719 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | | |
720 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH; | |
721 | ||
722 | ctype = of_get_property(np, "phy-connection-type", NULL); | |
723 | ||
724 | /* We only care about rgmii-id. The rest are autodetected */ | |
725 | if (ctype && !strcmp(ctype, "rgmii-id")) | |
726 | priv->interface = PHY_INTERFACE_MODE_RGMII_ID; | |
727 | else | |
728 | priv->interface = PHY_INTERFACE_MODE_MII; | |
729 | ||
730 | if (of_get_property(np, "fsl,magic-packet", NULL)) | |
731 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; | |
732 | ||
fe192a49 | 733 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); |
b31a1d8b AF |
734 | |
735 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ | |
fe192a49 | 736 | priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); |
b31a1d8b AF |
737 | |
738 | return 0; | |
739 | ||
fba4ed03 SG |
740 | rx_alloc_failed: |
741 | free_rx_pointers(priv); | |
742 | tx_alloc_failed: | |
743 | free_tx_pointers(priv); | |
46ceb60c SG |
744 | err_grp_init: |
745 | unmap_group_regs(priv); | |
fba4ed03 | 746 | free_netdev(dev); |
b31a1d8b AF |
747 | return err; |
748 | } | |
749 | ||
0faac9f7 CW |
750 | /* Ioctl MII Interface */ |
751 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
752 | { | |
753 | struct gfar_private *priv = netdev_priv(dev); | |
754 | ||
755 | if (!netif_running(dev)) | |
756 | return -EINVAL; | |
757 | ||
758 | if (!priv->phydev) | |
759 | return -ENODEV; | |
760 | ||
761 | return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); | |
762 | } | |
763 | ||
fba4ed03 SG |
764 | static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) |
765 | { | |
766 | unsigned int new_bit_map = 0x0; | |
767 | int mask = 0x1 << (max_qs - 1), i; | |
768 | for (i = 0; i < max_qs; i++) { | |
769 | if (bit_map & mask) | |
770 | new_bit_map = new_bit_map + (1 << i); | |
771 | mask = mask >> 0x1; | |
772 | } | |
773 | return new_bit_map; | |
774 | } | |
7a8b3372 | 775 | |
18294ad1 AV |
776 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, |
777 | u32 class) | |
7a8b3372 SG |
778 | { |
779 | u32 rqfpr = FPR_FILER_MASK; | |
780 | u32 rqfcr = 0x0; | |
781 | ||
782 | rqfar--; | |
783 | rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; | |
784 | ftp_rqfpr[rqfar] = rqfpr; | |
785 | ftp_rqfcr[rqfar] = rqfcr; | |
786 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
787 | ||
788 | rqfar--; | |
789 | rqfcr = RQFCR_CMP_NOMATCH; | |
790 | ftp_rqfpr[rqfar] = rqfpr; | |
791 | ftp_rqfcr[rqfar] = rqfcr; | |
792 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
793 | ||
794 | rqfar--; | |
795 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; | |
796 | rqfpr = class; | |
797 | ftp_rqfcr[rqfar] = rqfcr; | |
798 | ftp_rqfpr[rqfar] = rqfpr; | |
799 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
800 | ||
801 | rqfar--; | |
802 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; | |
803 | rqfpr = class; | |
804 | ftp_rqfcr[rqfar] = rqfcr; | |
805 | ftp_rqfpr[rqfar] = rqfpr; | |
806 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
807 | ||
808 | return rqfar; | |
809 | } | |
810 | ||
811 | static void gfar_init_filer_table(struct gfar_private *priv) | |
812 | { | |
813 | int i = 0x0; | |
814 | u32 rqfar = MAX_FILER_IDX; | |
815 | u32 rqfcr = 0x0; | |
816 | u32 rqfpr = FPR_FILER_MASK; | |
817 | ||
818 | /* Default rule */ | |
819 | rqfcr = RQFCR_CMP_MATCH; | |
820 | ftp_rqfcr[rqfar] = rqfcr; | |
821 | ftp_rqfpr[rqfar] = rqfpr; | |
822 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | |
823 | ||
824 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); | |
825 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); | |
826 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); | |
827 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); | |
828 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); | |
829 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); | |
830 | ||
831 | /* cur_filer_idx indicated the fisrt non-masked rule */ | |
832 | priv->cur_filer_idx = rqfar; | |
833 | ||
834 | /* Rest are masked rules */ | |
835 | rqfcr = RQFCR_CMP_NOMATCH; | |
836 | for (i = 0; i < rqfar; i++) { | |
837 | ftp_rqfcr[i] = rqfcr; | |
838 | ftp_rqfpr[i] = rqfpr; | |
839 | gfar_write_filer(priv, i, rqfcr, rqfpr); | |
840 | } | |
841 | } | |
842 | ||
bb40dcbb AF |
843 | /* Set up the ethernet device structure, private data, |
844 | * and anything else we need before we start */ | |
b31a1d8b AF |
845 | static int gfar_probe(struct of_device *ofdev, |
846 | const struct of_device_id *match) | |
1da177e4 LT |
847 | { |
848 | u32 tempval; | |
849 | struct net_device *dev = NULL; | |
850 | struct gfar_private *priv = NULL; | |
f4983704 | 851 | struct gfar __iomem *regs = NULL; |
46ceb60c | 852 | int err = 0, i, grp_idx = 0; |
c50a5d9a | 853 | int len_devname; |
fba4ed03 | 854 | u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; |
46ceb60c | 855 | u32 isrg = 0; |
18294ad1 | 856 | u32 __iomem *baddr; |
1da177e4 | 857 | |
fba4ed03 | 858 | err = gfar_of_init(ofdev, &dev); |
1da177e4 | 859 | |
fba4ed03 SG |
860 | if (err) |
861 | return err; | |
1da177e4 LT |
862 | |
863 | priv = netdev_priv(dev); | |
4826857f KG |
864 | priv->ndev = dev; |
865 | priv->ofdev = ofdev; | |
b31a1d8b | 866 | priv->node = ofdev->node; |
4826857f | 867 | SET_NETDEV_DEV(dev, &ofdev->dev); |
1da177e4 | 868 | |
d87eb127 | 869 | spin_lock_init(&priv->bflock); |
ab939905 | 870 | INIT_WORK(&priv->reset_task, gfar_reset_task); |
1da177e4 | 871 | |
b31a1d8b | 872 | dev_set_drvdata(&ofdev->dev, priv); |
46ceb60c | 873 | regs = priv->gfargrp[0].regs; |
1da177e4 LT |
874 | |
875 | /* Stop the DMA engine now, in case it was running before */ | |
876 | /* (The firmware could have used it, and left it running). */ | |
257d938a | 877 | gfar_halt(dev); |
1da177e4 LT |
878 | |
879 | /* Reset MAC layer */ | |
f4983704 | 880 | gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); |
1da177e4 | 881 | |
b98ac702 AF |
882 | /* We need to delay at least 3 TX clocks */ |
883 | udelay(2); | |
884 | ||
1da177e4 | 885 | tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); |
f4983704 | 886 | gfar_write(®s->maccfg1, tempval); |
1da177e4 LT |
887 | |
888 | /* Initialize MACCFG2. */ | |
f4983704 | 889 | gfar_write(®s->maccfg2, MACCFG2_INIT_SETTINGS); |
1da177e4 LT |
890 | |
891 | /* Initialize ECNTRL */ | |
f4983704 | 892 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); |
1da177e4 | 893 | |
1da177e4 | 894 | /* Set the dev->base_addr to the gfar reg region */ |
f4983704 | 895 | dev->base_addr = (unsigned long) regs; |
1da177e4 | 896 | |
b31a1d8b | 897 | SET_NETDEV_DEV(dev, &ofdev->dev); |
1da177e4 LT |
898 | |
899 | /* Fill in the dev structure */ | |
1da177e4 | 900 | dev->watchdog_timeo = TX_TIMEOUT; |
1da177e4 | 901 | dev->mtu = 1500; |
26ccfc37 | 902 | dev->netdev_ops = &gfar_netdev_ops; |
0bbaf069 KG |
903 | dev->ethtool_ops = &gfar_ethtool_ops; |
904 | ||
fba4ed03 | 905 | /* Register for napi ...We are registering NAPI for each grp */ |
46ceb60c SG |
906 | for (i = 0; i < priv->num_grps; i++) |
907 | netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); | |
a12f801d | 908 | |
b31a1d8b | 909 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { |
0bbaf069 | 910 | priv->rx_csum_enable = 1; |
4669bc90 | 911 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; |
0bbaf069 KG |
912 | } else |
913 | priv->rx_csum_enable = 0; | |
914 | ||
915 | priv->vlgrp = NULL; | |
1da177e4 | 916 | |
26ccfc37 | 917 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) |
0bbaf069 | 918 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
0bbaf069 | 919 | |
b31a1d8b | 920 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { |
0bbaf069 KG |
921 | priv->extended_hash = 1; |
922 | priv->hash_width = 9; | |
923 | ||
f4983704 SG |
924 | priv->hash_regs[0] = ®s->igaddr0; |
925 | priv->hash_regs[1] = ®s->igaddr1; | |
926 | priv->hash_regs[2] = ®s->igaddr2; | |
927 | priv->hash_regs[3] = ®s->igaddr3; | |
928 | priv->hash_regs[4] = ®s->igaddr4; | |
929 | priv->hash_regs[5] = ®s->igaddr5; | |
930 | priv->hash_regs[6] = ®s->igaddr6; | |
931 | priv->hash_regs[7] = ®s->igaddr7; | |
932 | priv->hash_regs[8] = ®s->gaddr0; | |
933 | priv->hash_regs[9] = ®s->gaddr1; | |
934 | priv->hash_regs[10] = ®s->gaddr2; | |
935 | priv->hash_regs[11] = ®s->gaddr3; | |
936 | priv->hash_regs[12] = ®s->gaddr4; | |
937 | priv->hash_regs[13] = ®s->gaddr5; | |
938 | priv->hash_regs[14] = ®s->gaddr6; | |
939 | priv->hash_regs[15] = ®s->gaddr7; | |
0bbaf069 KG |
940 | |
941 | } else { | |
942 | priv->extended_hash = 0; | |
943 | priv->hash_width = 8; | |
944 | ||
f4983704 SG |
945 | priv->hash_regs[0] = ®s->gaddr0; |
946 | priv->hash_regs[1] = ®s->gaddr1; | |
947 | priv->hash_regs[2] = ®s->gaddr2; | |
948 | priv->hash_regs[3] = ®s->gaddr3; | |
949 | priv->hash_regs[4] = ®s->gaddr4; | |
950 | priv->hash_regs[5] = ®s->gaddr5; | |
951 | priv->hash_regs[6] = ®s->gaddr6; | |
952 | priv->hash_regs[7] = ®s->gaddr7; | |
0bbaf069 KG |
953 | } |
954 | ||
b31a1d8b | 955 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) |
0bbaf069 KG |
956 | priv->padding = DEFAULT_PADDING; |
957 | else | |
958 | priv->padding = 0; | |
959 | ||
0bbaf069 KG |
960 | if (dev->features & NETIF_F_IP_CSUM) |
961 | dev->hard_header_len += GMAC_FCB_LEN; | |
1da177e4 | 962 | |
46ceb60c SG |
963 | /* Program the isrg regs only if number of grps > 1 */ |
964 | if (priv->num_grps > 1) { | |
965 | baddr = ®s->isrg0; | |
966 | for (i = 0; i < priv->num_grps; i++) { | |
967 | isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); | |
968 | isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); | |
969 | gfar_write(baddr, isrg); | |
970 | baddr++; | |
971 | isrg = 0x0; | |
972 | } | |
973 | } | |
974 | ||
fba4ed03 SG |
975 | /* Need to reverse the bit maps as bit_map's MSB is q0 |
976 | * but, for_each_bit parses from right to left, which | |
977 | * basically reverses the queue numbers */ | |
46ceb60c SG |
978 | for (i = 0; i< priv->num_grps; i++) { |
979 | priv->gfargrp[i].tx_bit_map = reverse_bitmap( | |
980 | priv->gfargrp[i].tx_bit_map, MAX_TX_QS); | |
981 | priv->gfargrp[i].rx_bit_map = reverse_bitmap( | |
982 | priv->gfargrp[i].rx_bit_map, MAX_RX_QS); | |
983 | } | |
984 | ||
985 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, | |
986 | * also assign queues to groups */ | |
987 | for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { | |
988 | priv->gfargrp[grp_idx].num_rx_queues = 0x0; | |
989 | for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, | |
990 | priv->num_rx_queues) { | |
991 | priv->gfargrp[grp_idx].num_rx_queues++; | |
992 | priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; | |
993 | rstat = rstat | (RSTAT_CLEAR_RHALT >> i); | |
994 | rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); | |
995 | } | |
996 | priv->gfargrp[grp_idx].num_tx_queues = 0x0; | |
997 | for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map, | |
998 | priv->num_tx_queues) { | |
999 | priv->gfargrp[grp_idx].num_tx_queues++; | |
1000 | priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; | |
1001 | tstat = tstat | (TSTAT_CLEAR_THALT >> i); | |
1002 | tqueue = tqueue | (TQUEUE_EN0 >> i); | |
1003 | } | |
1004 | priv->gfargrp[grp_idx].rstat = rstat; | |
1005 | priv->gfargrp[grp_idx].tstat = tstat; | |
1006 | rstat = tstat =0; | |
fba4ed03 | 1007 | } |
fba4ed03 SG |
1008 | |
1009 | gfar_write(®s->rqueue, rqueue); | |
1010 | gfar_write(®s->tqueue, tqueue); | |
1011 | ||
1da177e4 | 1012 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; |
1da177e4 | 1013 | |
a12f801d | 1014 | /* Initializing some of the rx/tx queue level parameters */ |
fba4ed03 SG |
1015 | for (i = 0; i < priv->num_tx_queues; i++) { |
1016 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; | |
1017 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; | |
1018 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; | |
1019 | priv->tx_queue[i]->txic = DEFAULT_TXIC; | |
1020 | } | |
a12f801d | 1021 | |
fba4ed03 SG |
1022 | for (i = 0; i < priv->num_rx_queues; i++) { |
1023 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; | |
1024 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; | |
1025 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | |
1026 | } | |
1da177e4 | 1027 | |
1ccb8389 SG |
1028 | /* enable filer if using multiple RX queues*/ |
1029 | if(priv->num_rx_queues > 1) | |
1030 | priv->rx_filer_enable = 1; | |
0bbaf069 KG |
1031 | /* Enable most messages by default */ |
1032 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | |
1033 | ||
d3eab82b TP |
1034 | /* Carrier starts down, phylib will bring it up */ |
1035 | netif_carrier_off(dev); | |
1036 | ||
1da177e4 LT |
1037 | err = register_netdev(dev); |
1038 | ||
1039 | if (err) { | |
1040 | printk(KERN_ERR "%s: Cannot register net device, aborting.\n", | |
1041 | dev->name); | |
1042 | goto register_fail; | |
1043 | } | |
1044 | ||
2884e5cc AV |
1045 | device_init_wakeup(&dev->dev, |
1046 | priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | |
1047 | ||
c50a5d9a DH |
1048 | /* fill out IRQ number and name fields */ |
1049 | len_devname = strlen(dev->name); | |
46ceb60c SG |
1050 | for (i = 0; i < priv->num_grps; i++) { |
1051 | strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, | |
1052 | len_devname); | |
1053 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | |
1054 | strncpy(&priv->gfargrp[i].int_name_tx[len_devname], | |
1055 | "_g", sizeof("_g")); | |
1056 | priv->gfargrp[i].int_name_tx[ | |
1057 | strlen(priv->gfargrp[i].int_name_tx)] = i+48; | |
1058 | strncpy(&priv->gfargrp[i].int_name_tx[strlen( | |
1059 | priv->gfargrp[i].int_name_tx)], | |
1060 | "_tx", sizeof("_tx") + 1); | |
1061 | ||
1062 | strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, | |
1063 | len_devname); | |
1064 | strncpy(&priv->gfargrp[i].int_name_rx[len_devname], | |
1065 | "_g", sizeof("_g")); | |
1066 | priv->gfargrp[i].int_name_rx[ | |
1067 | strlen(priv->gfargrp[i].int_name_rx)] = i+48; | |
1068 | strncpy(&priv->gfargrp[i].int_name_rx[strlen( | |
1069 | priv->gfargrp[i].int_name_rx)], | |
1070 | "_rx", sizeof("_rx") + 1); | |
1071 | ||
1072 | strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, | |
1073 | len_devname); | |
1074 | strncpy(&priv->gfargrp[i].int_name_er[len_devname], | |
1075 | "_g", sizeof("_g")); | |
1076 | priv->gfargrp[i].int_name_er[strlen( | |
1077 | priv->gfargrp[i].int_name_er)] = i+48; | |
1078 | strncpy(&priv->gfargrp[i].int_name_er[strlen(\ | |
1079 | priv->gfargrp[i].int_name_er)], | |
1080 | "_er", sizeof("_er") + 1); | |
1081 | } else | |
1082 | priv->gfargrp[i].int_name_tx[len_devname] = '\0'; | |
1083 | } | |
c50a5d9a | 1084 | |
7a8b3372 SG |
1085 | /* Initialize the filer table */ |
1086 | gfar_init_filer_table(priv); | |
1087 | ||
7f7f5316 AF |
1088 | /* Create all the sysfs files */ |
1089 | gfar_init_sysfs(dev); | |
1090 | ||
1da177e4 | 1091 | /* Print out the device info */ |
e174961c | 1092 | printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr); |
1da177e4 LT |
1093 | |
1094 | /* Even more device info helps when determining which kernel */ | |
7f7f5316 | 1095 | /* provided which set of benchmarks. */ |
1da177e4 | 1096 | printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); |
fba4ed03 SG |
1097 | for (i = 0; i < priv->num_rx_queues; i++) |
1098 | printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", | |
1099 | dev->name, i, priv->rx_queue[i]->rx_ring_size); | |
1100 | for(i = 0; i < priv->num_tx_queues; i++) | |
1101 | printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", | |
1102 | dev->name, i, priv->tx_queue[i]->tx_ring_size); | |
1da177e4 LT |
1103 | |
1104 | return 0; | |
1105 | ||
1106 | register_fail: | |
46ceb60c | 1107 | unmap_group_regs(priv); |
fba4ed03 SG |
1108 | free_tx_pointers(priv); |
1109 | free_rx_pointers(priv); | |
fe192a49 GL |
1110 | if (priv->phy_node) |
1111 | of_node_put(priv->phy_node); | |
1112 | if (priv->tbi_node) | |
1113 | of_node_put(priv->tbi_node); | |
1da177e4 | 1114 | free_netdev(dev); |
bb40dcbb | 1115 | return err; |
1da177e4 LT |
1116 | } |
1117 | ||
b31a1d8b | 1118 | static int gfar_remove(struct of_device *ofdev) |
1da177e4 | 1119 | { |
b31a1d8b | 1120 | struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); |
1da177e4 | 1121 | |
fe192a49 GL |
1122 | if (priv->phy_node) |
1123 | of_node_put(priv->phy_node); | |
1124 | if (priv->tbi_node) | |
1125 | of_node_put(priv->tbi_node); | |
1126 | ||
b31a1d8b | 1127 | dev_set_drvdata(&ofdev->dev, NULL); |
1da177e4 | 1128 | |
d9d8e041 | 1129 | unregister_netdev(priv->ndev); |
46ceb60c | 1130 | unmap_group_regs(priv); |
4826857f | 1131 | free_netdev(priv->ndev); |
1da177e4 LT |
1132 | |
1133 | return 0; | |
1134 | } | |
1135 | ||
d87eb127 | 1136 | #ifdef CONFIG_PM |
be926fc4 AV |
1137 | |
1138 | static int gfar_suspend(struct device *dev) | |
d87eb127 | 1139 | { |
be926fc4 AV |
1140 | struct gfar_private *priv = dev_get_drvdata(dev); |
1141 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1142 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 SW |
1143 | unsigned long flags; |
1144 | u32 tempval; | |
1145 | ||
1146 | int magic_packet = priv->wol_en && | |
b31a1d8b | 1147 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
d87eb127 | 1148 | |
be926fc4 | 1149 | netif_device_detach(ndev); |
d87eb127 | 1150 | |
be926fc4 | 1151 | if (netif_running(ndev)) { |
fba4ed03 SG |
1152 | |
1153 | local_irq_save(flags); | |
1154 | lock_tx_qs(priv); | |
1155 | lock_rx_qs(priv); | |
d87eb127 | 1156 | |
be926fc4 | 1157 | gfar_halt_nodisable(ndev); |
d87eb127 SW |
1158 | |
1159 | /* Disable Tx, and Rx if wake-on-LAN is disabled. */ | |
f4983704 | 1160 | tempval = gfar_read(®s->maccfg1); |
d87eb127 SW |
1161 | |
1162 | tempval &= ~MACCFG1_TX_EN; | |
1163 | ||
1164 | if (!magic_packet) | |
1165 | tempval &= ~MACCFG1_RX_EN; | |
1166 | ||
f4983704 | 1167 | gfar_write(®s->maccfg1, tempval); |
d87eb127 | 1168 | |
fba4ed03 SG |
1169 | unlock_rx_qs(priv); |
1170 | unlock_tx_qs(priv); | |
1171 | local_irq_restore(flags); | |
d87eb127 | 1172 | |
46ceb60c | 1173 | disable_napi(priv); |
d87eb127 SW |
1174 | |
1175 | if (magic_packet) { | |
1176 | /* Enable interrupt on Magic Packet */ | |
f4983704 | 1177 | gfar_write(®s->imask, IMASK_MAG); |
d87eb127 SW |
1178 | |
1179 | /* Enable Magic Packet mode */ | |
f4983704 | 1180 | tempval = gfar_read(®s->maccfg2); |
d87eb127 | 1181 | tempval |= MACCFG2_MPEN; |
f4983704 | 1182 | gfar_write(®s->maccfg2, tempval); |
d87eb127 SW |
1183 | } else { |
1184 | phy_stop(priv->phydev); | |
1185 | } | |
1186 | } | |
1187 | ||
1188 | return 0; | |
1189 | } | |
1190 | ||
be926fc4 | 1191 | static int gfar_resume(struct device *dev) |
d87eb127 | 1192 | { |
be926fc4 AV |
1193 | struct gfar_private *priv = dev_get_drvdata(dev); |
1194 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1195 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 SW |
1196 | unsigned long flags; |
1197 | u32 tempval; | |
1198 | int magic_packet = priv->wol_en && | |
b31a1d8b | 1199 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); |
d87eb127 | 1200 | |
be926fc4 AV |
1201 | if (!netif_running(ndev)) { |
1202 | netif_device_attach(ndev); | |
d87eb127 SW |
1203 | return 0; |
1204 | } | |
1205 | ||
1206 | if (!magic_packet && priv->phydev) | |
1207 | phy_start(priv->phydev); | |
1208 | ||
1209 | /* Disable Magic Packet mode, in case something | |
1210 | * else woke us up. | |
1211 | */ | |
fba4ed03 SG |
1212 | local_irq_save(flags); |
1213 | lock_tx_qs(priv); | |
1214 | lock_rx_qs(priv); | |
d87eb127 | 1215 | |
f4983704 | 1216 | tempval = gfar_read(®s->maccfg2); |
d87eb127 | 1217 | tempval &= ~MACCFG2_MPEN; |
f4983704 | 1218 | gfar_write(®s->maccfg2, tempval); |
d87eb127 | 1219 | |
be926fc4 | 1220 | gfar_start(ndev); |
d87eb127 | 1221 | |
fba4ed03 SG |
1222 | unlock_rx_qs(priv); |
1223 | unlock_tx_qs(priv); | |
1224 | local_irq_restore(flags); | |
d87eb127 | 1225 | |
be926fc4 AV |
1226 | netif_device_attach(ndev); |
1227 | ||
46ceb60c | 1228 | enable_napi(priv); |
be926fc4 AV |
1229 | |
1230 | return 0; | |
1231 | } | |
1232 | ||
1233 | static int gfar_restore(struct device *dev) | |
1234 | { | |
1235 | struct gfar_private *priv = dev_get_drvdata(dev); | |
1236 | struct net_device *ndev = priv->ndev; | |
1237 | ||
1238 | if (!netif_running(ndev)) | |
1239 | return 0; | |
1240 | ||
1241 | gfar_init_bds(ndev); | |
1242 | init_registers(ndev); | |
1243 | gfar_set_mac_address(ndev); | |
1244 | gfar_init_mac(ndev); | |
1245 | gfar_start(ndev); | |
1246 | ||
1247 | priv->oldlink = 0; | |
1248 | priv->oldspeed = 0; | |
1249 | priv->oldduplex = -1; | |
1250 | ||
1251 | if (priv->phydev) | |
1252 | phy_start(priv->phydev); | |
d87eb127 | 1253 | |
be926fc4 | 1254 | netif_device_attach(ndev); |
5ea681d4 | 1255 | enable_napi(priv); |
d87eb127 SW |
1256 | |
1257 | return 0; | |
1258 | } | |
be926fc4 AV |
1259 | |
1260 | static struct dev_pm_ops gfar_pm_ops = { | |
1261 | .suspend = gfar_suspend, | |
1262 | .resume = gfar_resume, | |
1263 | .freeze = gfar_suspend, | |
1264 | .thaw = gfar_resume, | |
1265 | .restore = gfar_restore, | |
1266 | }; | |
1267 | ||
1268 | #define GFAR_PM_OPS (&gfar_pm_ops) | |
1269 | ||
1270 | static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state) | |
1271 | { | |
1272 | return gfar_suspend(&ofdev->dev); | |
1273 | } | |
1274 | ||
1275 | static int gfar_legacy_resume(struct of_device *ofdev) | |
1276 | { | |
1277 | return gfar_resume(&ofdev->dev); | |
1278 | } | |
1279 | ||
d87eb127 | 1280 | #else |
be926fc4 AV |
1281 | |
1282 | #define GFAR_PM_OPS NULL | |
1283 | #define gfar_legacy_suspend NULL | |
1284 | #define gfar_legacy_resume NULL | |
1285 | ||
d87eb127 | 1286 | #endif |
1da177e4 | 1287 | |
e8a2b6a4 AF |
1288 | /* Reads the controller's registers to determine what interface |
1289 | * connects it to the PHY. | |
1290 | */ | |
1291 | static phy_interface_t gfar_get_interface(struct net_device *dev) | |
1292 | { | |
1293 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1294 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
f4983704 SG |
1295 | u32 ecntrl; |
1296 | ||
f4983704 | 1297 | ecntrl = gfar_read(®s->ecntrl); |
e8a2b6a4 AF |
1298 | |
1299 | if (ecntrl & ECNTRL_SGMII_MODE) | |
1300 | return PHY_INTERFACE_MODE_SGMII; | |
1301 | ||
1302 | if (ecntrl & ECNTRL_TBI_MODE) { | |
1303 | if (ecntrl & ECNTRL_REDUCED_MODE) | |
1304 | return PHY_INTERFACE_MODE_RTBI; | |
1305 | else | |
1306 | return PHY_INTERFACE_MODE_TBI; | |
1307 | } | |
1308 | ||
1309 | if (ecntrl & ECNTRL_REDUCED_MODE) { | |
1310 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) | |
1311 | return PHY_INTERFACE_MODE_RMII; | |
7132ab7f | 1312 | else { |
b31a1d8b | 1313 | phy_interface_t interface = priv->interface; |
7132ab7f AF |
1314 | |
1315 | /* | |
1316 | * This isn't autodetected right now, so it must | |
1317 | * be set by the device tree or platform code. | |
1318 | */ | |
1319 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) | |
1320 | return PHY_INTERFACE_MODE_RGMII_ID; | |
1321 | ||
e8a2b6a4 | 1322 | return PHY_INTERFACE_MODE_RGMII; |
7132ab7f | 1323 | } |
e8a2b6a4 AF |
1324 | } |
1325 | ||
b31a1d8b | 1326 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
e8a2b6a4 AF |
1327 | return PHY_INTERFACE_MODE_GMII; |
1328 | ||
1329 | return PHY_INTERFACE_MODE_MII; | |
1330 | } | |
1331 | ||
1332 | ||
bb40dcbb AF |
1333 | /* Initializes driver's PHY state, and attaches to the PHY. |
1334 | * Returns 0 on success. | |
1da177e4 LT |
1335 | */ |
1336 | static int init_phy(struct net_device *dev) | |
1337 | { | |
1338 | struct gfar_private *priv = netdev_priv(dev); | |
bb40dcbb | 1339 | uint gigabit_support = |
b31a1d8b | 1340 | priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? |
bb40dcbb | 1341 | SUPPORTED_1000baseT_Full : 0; |
e8a2b6a4 | 1342 | phy_interface_t interface; |
1da177e4 LT |
1343 | |
1344 | priv->oldlink = 0; | |
1345 | priv->oldspeed = 0; | |
1346 | priv->oldduplex = -1; | |
1347 | ||
e8a2b6a4 AF |
1348 | interface = gfar_get_interface(dev); |
1349 | ||
1db780f8 AV |
1350 | priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, |
1351 | interface); | |
1352 | if (!priv->phydev) | |
1353 | priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, | |
1354 | interface); | |
1355 | if (!priv->phydev) { | |
1356 | dev_err(&dev->dev, "could not attach to PHY\n"); | |
1357 | return -ENODEV; | |
fe192a49 | 1358 | } |
1da177e4 | 1359 | |
d3c12873 KJ |
1360 | if (interface == PHY_INTERFACE_MODE_SGMII) |
1361 | gfar_configure_serdes(dev); | |
1362 | ||
bb40dcbb | 1363 | /* Remove any features not supported by the controller */ |
fe192a49 GL |
1364 | priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); |
1365 | priv->phydev->advertising = priv->phydev->supported; | |
1da177e4 LT |
1366 | |
1367 | return 0; | |
1da177e4 LT |
1368 | } |
1369 | ||
d0313587 PG |
1370 | /* |
1371 | * Initialize TBI PHY interface for communicating with the | |
1372 | * SERDES lynx PHY on the chip. We communicate with this PHY | |
1373 | * through the MDIO bus on each controller, treating it as a | |
1374 | * "normal" PHY at the address found in the TBIPA register. We assume | |
1375 | * that the TBIPA register is valid. Either the MDIO bus code will set | |
1376 | * it to a value that doesn't conflict with other PHYs on the bus, or the | |
1377 | * value doesn't matter, as there are no other PHYs on the bus. | |
1378 | */ | |
d3c12873 KJ |
1379 | static void gfar_configure_serdes(struct net_device *dev) |
1380 | { | |
1381 | struct gfar_private *priv = netdev_priv(dev); | |
fe192a49 GL |
1382 | struct phy_device *tbiphy; |
1383 | ||
1384 | if (!priv->tbi_node) { | |
1385 | dev_warn(&dev->dev, "error: SGMII mode requires that the " | |
1386 | "device tree specify a tbi-handle\n"); | |
1387 | return; | |
1388 | } | |
c132419e | 1389 | |
fe192a49 GL |
1390 | tbiphy = of_phy_find_device(priv->tbi_node); |
1391 | if (!tbiphy) { | |
1392 | dev_err(&dev->dev, "error: Could not get TBI device\n"); | |
b31a1d8b AF |
1393 | return; |
1394 | } | |
d3c12873 | 1395 | |
b31a1d8b AF |
1396 | /* |
1397 | * If the link is already up, we must already be ok, and don't need to | |
bdb59f94 TP |
1398 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
1399 | * everything for us? Resetting it takes the link down and requires | |
1400 | * several seconds for it to come back. | |
1401 | */ | |
fe192a49 | 1402 | if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) |
b31a1d8b | 1403 | return; |
d3c12873 | 1404 | |
d0313587 | 1405 | /* Single clk mode, mii mode off(for serdes communication) */ |
fe192a49 | 1406 | phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); |
d3c12873 | 1407 | |
fe192a49 | 1408 | phy_write(tbiphy, MII_ADVERTISE, |
d3c12873 KJ |
1409 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | |
1410 | ADVERTISE_1000XPSE_ASYM); | |
1411 | ||
fe192a49 | 1412 | phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | |
d3c12873 KJ |
1413 | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); |
1414 | } | |
1415 | ||
1da177e4 LT |
1416 | static void init_registers(struct net_device *dev) |
1417 | { | |
1418 | struct gfar_private *priv = netdev_priv(dev); | |
f4983704 | 1419 | struct gfar __iomem *regs = NULL; |
46ceb60c | 1420 | int i = 0; |
1da177e4 | 1421 | |
46ceb60c SG |
1422 | for (i = 0; i < priv->num_grps; i++) { |
1423 | regs = priv->gfargrp[i].regs; | |
1424 | /* Clear IEVENT */ | |
1425 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | |
1da177e4 | 1426 | |
46ceb60c SG |
1427 | /* Initialize IMASK */ |
1428 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
1429 | } | |
1da177e4 | 1430 | |
46ceb60c | 1431 | regs = priv->gfargrp[0].regs; |
1da177e4 | 1432 | /* Init hash registers to zero */ |
f4983704 SG |
1433 | gfar_write(®s->igaddr0, 0); |
1434 | gfar_write(®s->igaddr1, 0); | |
1435 | gfar_write(®s->igaddr2, 0); | |
1436 | gfar_write(®s->igaddr3, 0); | |
1437 | gfar_write(®s->igaddr4, 0); | |
1438 | gfar_write(®s->igaddr5, 0); | |
1439 | gfar_write(®s->igaddr6, 0); | |
1440 | gfar_write(®s->igaddr7, 0); | |
1441 | ||
1442 | gfar_write(®s->gaddr0, 0); | |
1443 | gfar_write(®s->gaddr1, 0); | |
1444 | gfar_write(®s->gaddr2, 0); | |
1445 | gfar_write(®s->gaddr3, 0); | |
1446 | gfar_write(®s->gaddr4, 0); | |
1447 | gfar_write(®s->gaddr5, 0); | |
1448 | gfar_write(®s->gaddr6, 0); | |
1449 | gfar_write(®s->gaddr7, 0); | |
1da177e4 | 1450 | |
1da177e4 | 1451 | /* Zero out the rmon mib registers if it has them */ |
b31a1d8b | 1452 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { |
f4983704 | 1453 | memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); |
1da177e4 LT |
1454 | |
1455 | /* Mask off the CAM interrupts */ | |
f4983704 SG |
1456 | gfar_write(®s->rmon.cam1, 0xffffffff); |
1457 | gfar_write(®s->rmon.cam2, 0xffffffff); | |
1da177e4 LT |
1458 | } |
1459 | ||
1460 | /* Initialize the max receive buffer length */ | |
f4983704 | 1461 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
1da177e4 | 1462 | |
1da177e4 | 1463 | /* Initialize the Minimum Frame Length Register */ |
f4983704 | 1464 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); |
1da177e4 LT |
1465 | } |
1466 | ||
0bbaf069 KG |
1467 | |
1468 | /* Halt the receive and transmit queues */ | |
d87eb127 | 1469 | static void gfar_halt_nodisable(struct net_device *dev) |
1da177e4 LT |
1470 | { |
1471 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1472 | struct gfar __iomem *regs = NULL; |
1da177e4 | 1473 | u32 tempval; |
46ceb60c | 1474 | int i = 0; |
1da177e4 | 1475 | |
46ceb60c SG |
1476 | for (i = 0; i < priv->num_grps; i++) { |
1477 | regs = priv->gfargrp[i].regs; | |
1478 | /* Mask all interrupts */ | |
1479 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
1da177e4 | 1480 | |
46ceb60c SG |
1481 | /* Clear all interrupts */ |
1482 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | |
1483 | } | |
1da177e4 | 1484 | |
46ceb60c | 1485 | regs = priv->gfargrp[0].regs; |
1da177e4 | 1486 | /* Stop the DMA, and wait for it to stop */ |
f4983704 | 1487 | tempval = gfar_read(®s->dmactrl); |
1da177e4 LT |
1488 | if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) |
1489 | != (DMACTRL_GRS | DMACTRL_GTS)) { | |
1490 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); | |
f4983704 | 1491 | gfar_write(®s->dmactrl, tempval); |
1da177e4 | 1492 | |
f4983704 | 1493 | while (!(gfar_read(®s->ievent) & |
1da177e4 LT |
1494 | (IEVENT_GRSC | IEVENT_GTSC))) |
1495 | cpu_relax(); | |
1496 | } | |
d87eb127 | 1497 | } |
d87eb127 SW |
1498 | |
1499 | /* Halt the receive and transmit queues */ | |
1500 | void gfar_halt(struct net_device *dev) | |
1501 | { | |
1502 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1503 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 | 1504 | u32 tempval; |
1da177e4 | 1505 | |
2a54adc3 SW |
1506 | gfar_halt_nodisable(dev); |
1507 | ||
1da177e4 LT |
1508 | /* Disable Rx and Tx */ |
1509 | tempval = gfar_read(®s->maccfg1); | |
1510 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); | |
1511 | gfar_write(®s->maccfg1, tempval); | |
0bbaf069 KG |
1512 | } |
1513 | ||
46ceb60c SG |
1514 | static void free_grp_irqs(struct gfar_priv_grp *grp) |
1515 | { | |
1516 | free_irq(grp->interruptError, grp); | |
1517 | free_irq(grp->interruptTransmit, grp); | |
1518 | free_irq(grp->interruptReceive, grp); | |
1519 | } | |
1520 | ||
0bbaf069 KG |
1521 | void stop_gfar(struct net_device *dev) |
1522 | { | |
1523 | struct gfar_private *priv = netdev_priv(dev); | |
0bbaf069 | 1524 | unsigned long flags; |
46ceb60c | 1525 | int i; |
0bbaf069 | 1526 | |
bb40dcbb AF |
1527 | phy_stop(priv->phydev); |
1528 | ||
a12f801d | 1529 | |
0bbaf069 | 1530 | /* Lock it down */ |
fba4ed03 SG |
1531 | local_irq_save(flags); |
1532 | lock_tx_qs(priv); | |
1533 | lock_rx_qs(priv); | |
0bbaf069 | 1534 | |
0bbaf069 | 1535 | gfar_halt(dev); |
1da177e4 | 1536 | |
fba4ed03 SG |
1537 | unlock_rx_qs(priv); |
1538 | unlock_tx_qs(priv); | |
1539 | local_irq_restore(flags); | |
1da177e4 LT |
1540 | |
1541 | /* Free the IRQs */ | |
b31a1d8b | 1542 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
46ceb60c SG |
1543 | for (i = 0; i < priv->num_grps; i++) |
1544 | free_grp_irqs(&priv->gfargrp[i]); | |
1da177e4 | 1545 | } else { |
46ceb60c SG |
1546 | for (i = 0; i < priv->num_grps; i++) |
1547 | free_irq(priv->gfargrp[i].interruptTransmit, | |
1548 | &priv->gfargrp[i]); | |
1da177e4 LT |
1549 | } |
1550 | ||
1551 | free_skb_resources(priv); | |
1da177e4 LT |
1552 | } |
1553 | ||
fba4ed03 | 1554 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 1555 | { |
1da177e4 | 1556 | struct txbd8 *txbdp; |
fba4ed03 | 1557 | struct gfar_private *priv = netdev_priv(tx_queue->dev); |
4669bc90 | 1558 | int i, j; |
1da177e4 | 1559 | |
a12f801d | 1560 | txbdp = tx_queue->tx_bd_base; |
1da177e4 | 1561 | |
a12f801d SG |
1562 | for (i = 0; i < tx_queue->tx_ring_size; i++) { |
1563 | if (!tx_queue->tx_skbuff[i]) | |
4669bc90 | 1564 | continue; |
1da177e4 | 1565 | |
4826857f | 1566 | dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, |
4669bc90 DH |
1567 | txbdp->length, DMA_TO_DEVICE); |
1568 | txbdp->lstatus = 0; | |
fba4ed03 SG |
1569 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
1570 | j++) { | |
4669bc90 | 1571 | txbdp++; |
4826857f | 1572 | dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, |
4669bc90 | 1573 | txbdp->length, DMA_TO_DEVICE); |
1da177e4 | 1574 | } |
ad5da7ab | 1575 | txbdp++; |
a12f801d SG |
1576 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); |
1577 | tx_queue->tx_skbuff[i] = NULL; | |
1da177e4 | 1578 | } |
a12f801d | 1579 | kfree(tx_queue->tx_skbuff); |
fba4ed03 | 1580 | } |
1da177e4 | 1581 | |
fba4ed03 SG |
1582 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
1583 | { | |
1584 | struct rxbd8 *rxbdp; | |
1585 | struct gfar_private *priv = netdev_priv(rx_queue->dev); | |
1586 | int i; | |
1da177e4 | 1587 | |
fba4ed03 | 1588 | rxbdp = rx_queue->rx_bd_base; |
1da177e4 | 1589 | |
a12f801d SG |
1590 | for (i = 0; i < rx_queue->rx_ring_size; i++) { |
1591 | if (rx_queue->rx_skbuff[i]) { | |
fba4ed03 SG |
1592 | dma_unmap_single(&priv->ofdev->dev, |
1593 | rxbdp->bufPtr, priv->rx_buffer_size, | |
e69edd21 | 1594 | DMA_FROM_DEVICE); |
a12f801d SG |
1595 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); |
1596 | rx_queue->rx_skbuff[i] = NULL; | |
1da177e4 | 1597 | } |
e69edd21 AV |
1598 | rxbdp->lstatus = 0; |
1599 | rxbdp->bufPtr = 0; | |
1600 | rxbdp++; | |
1da177e4 | 1601 | } |
a12f801d | 1602 | kfree(rx_queue->rx_skbuff); |
fba4ed03 | 1603 | } |
e69edd21 | 1604 | |
fba4ed03 SG |
1605 | /* If there are any tx skbs or rx skbs still around, free them. |
1606 | * Then free tx_skbuff and rx_skbuff */ | |
1607 | static void free_skb_resources(struct gfar_private *priv) | |
1608 | { | |
1609 | struct gfar_priv_tx_q *tx_queue = NULL; | |
1610 | struct gfar_priv_rx_q *rx_queue = NULL; | |
1611 | int i; | |
1612 | ||
1613 | /* Go through all the buffer descriptors and free their data buffers */ | |
1614 | for (i = 0; i < priv->num_tx_queues; i++) { | |
1615 | tx_queue = priv->tx_queue[i]; | |
1616 | if(!tx_queue->tx_skbuff) | |
1617 | free_skb_tx_queue(tx_queue); | |
1618 | } | |
1619 | ||
1620 | for (i = 0; i < priv->num_rx_queues; i++) { | |
1621 | rx_queue = priv->rx_queue[i]; | |
1622 | if(!rx_queue->rx_skbuff) | |
1623 | free_skb_rx_queue(rx_queue); | |
1624 | } | |
1625 | ||
1626 | dma_free_coherent(&priv->ofdev->dev, | |
1627 | sizeof(struct txbd8) * priv->total_tx_ring_size + | |
1628 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | |
1629 | priv->tx_queue[0]->tx_bd_base, | |
1630 | priv->tx_queue[0]->tx_bd_dma_base); | |
1da177e4 LT |
1631 | } |
1632 | ||
0bbaf069 KG |
1633 | void gfar_start(struct net_device *dev) |
1634 | { | |
1635 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1636 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
0bbaf069 | 1637 | u32 tempval; |
46ceb60c | 1638 | int i = 0; |
0bbaf069 KG |
1639 | |
1640 | /* Enable Rx and Tx in MACCFG1 */ | |
1641 | tempval = gfar_read(®s->maccfg1); | |
1642 | tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); | |
1643 | gfar_write(®s->maccfg1, tempval); | |
1644 | ||
1645 | /* Initialize DMACTRL to have WWR and WOP */ | |
f4983704 | 1646 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 1647 | tempval |= DMACTRL_INIT_SETTINGS; |
f4983704 | 1648 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 1649 | |
0bbaf069 | 1650 | /* Make sure we aren't stopped */ |
f4983704 | 1651 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 1652 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); |
f4983704 | 1653 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 1654 | |
46ceb60c SG |
1655 | for (i = 0; i < priv->num_grps; i++) { |
1656 | regs = priv->gfargrp[i].regs; | |
1657 | /* Clear THLT/RHLT, so that the DMA starts polling now */ | |
1658 | gfar_write(®s->tstat, priv->gfargrp[i].tstat); | |
1659 | gfar_write(®s->rstat, priv->gfargrp[i].rstat); | |
1660 | /* Unmask the interrupts we look for */ | |
1661 | gfar_write(®s->imask, IMASK_DEFAULT); | |
1662 | } | |
12dea57b DH |
1663 | |
1664 | dev->trans_start = jiffies; | |
0bbaf069 KG |
1665 | } |
1666 | ||
46ceb60c | 1667 | void gfar_configure_coalescing(struct gfar_private *priv, |
18294ad1 | 1668 | unsigned long tx_mask, unsigned long rx_mask) |
1da177e4 | 1669 | { |
46ceb60c | 1670 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
18294ad1 | 1671 | u32 __iomem *baddr; |
46ceb60c | 1672 | int i = 0; |
1da177e4 | 1673 | |
46ceb60c SG |
1674 | /* Backward compatible case ---- even if we enable |
1675 | * multiple queues, there's only single reg to program | |
1676 | */ | |
1677 | gfar_write(®s->txic, 0); | |
1678 | if(likely(priv->tx_queue[0]->txcoalescing)) | |
1679 | gfar_write(®s->txic, priv->tx_queue[0]->txic); | |
1da177e4 | 1680 | |
46ceb60c SG |
1681 | gfar_write(®s->rxic, 0); |
1682 | if(unlikely(priv->rx_queue[0]->rxcoalescing)) | |
1683 | gfar_write(®s->rxic, priv->rx_queue[0]->rxic); | |
815b97c6 | 1684 | |
46ceb60c SG |
1685 | if (priv->mode == MQ_MG_MODE) { |
1686 | baddr = ®s->txic0; | |
1687 | for_each_bit (i, &tx_mask, priv->num_tx_queues) { | |
1688 | if (likely(priv->tx_queue[i]->txcoalescing)) { | |
1689 | gfar_write(baddr + i, 0); | |
1690 | gfar_write(baddr + i, priv->tx_queue[i]->txic); | |
1691 | } | |
1692 | } | |
1693 | ||
1694 | baddr = ®s->rxic0; | |
1695 | for_each_bit (i, &rx_mask, priv->num_rx_queues) { | |
1696 | if (likely(priv->rx_queue[i]->rxcoalescing)) { | |
1697 | gfar_write(baddr + i, 0); | |
1698 | gfar_write(baddr + i, priv->rx_queue[i]->rxic); | |
1699 | } | |
1700 | } | |
1701 | } | |
1702 | } | |
1703 | ||
1704 | static int register_grp_irqs(struct gfar_priv_grp *grp) | |
1705 | { | |
1706 | struct gfar_private *priv = grp->priv; | |
1707 | struct net_device *dev = priv->ndev; | |
1708 | int err; | |
1da177e4 | 1709 | |
1da177e4 LT |
1710 | /* If the device has multiple interrupts, register for |
1711 | * them. Otherwise, only register for the one */ | |
b31a1d8b | 1712 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
0bbaf069 | 1713 | /* Install our interrupt handlers for Error, |
1da177e4 | 1714 | * Transmit, and Receive */ |
46ceb60c SG |
1715 | if ((err = request_irq(grp->interruptError, gfar_error, 0, |
1716 | grp->int_name_er,grp)) < 0) { | |
0bbaf069 | 1717 | if (netif_msg_intr(priv)) |
46ceb60c SG |
1718 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
1719 | dev->name, grp->interruptError); | |
1720 | ||
1721 | goto err_irq_fail; | |
1da177e4 LT |
1722 | } |
1723 | ||
46ceb60c SG |
1724 | if ((err = request_irq(grp->interruptTransmit, gfar_transmit, |
1725 | 0, grp->int_name_tx, grp)) < 0) { | |
0bbaf069 | 1726 | if (netif_msg_intr(priv)) |
46ceb60c SG |
1727 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
1728 | dev->name, grp->interruptTransmit); | |
1da177e4 LT |
1729 | goto tx_irq_fail; |
1730 | } | |
1731 | ||
46ceb60c SG |
1732 | if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, |
1733 | grp->int_name_rx, grp)) < 0) { | |
0bbaf069 | 1734 | if (netif_msg_intr(priv)) |
46ceb60c SG |
1735 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
1736 | dev->name, grp->interruptReceive); | |
1da177e4 LT |
1737 | goto rx_irq_fail; |
1738 | } | |
1739 | } else { | |
46ceb60c SG |
1740 | if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, |
1741 | grp->int_name_tx, grp)) < 0) { | |
0bbaf069 | 1742 | if (netif_msg_intr(priv)) |
46ceb60c SG |
1743 | printk(KERN_ERR "%s: Can't get IRQ %d\n", |
1744 | dev->name, grp->interruptTransmit); | |
1da177e4 LT |
1745 | goto err_irq_fail; |
1746 | } | |
1747 | } | |
1748 | ||
46ceb60c SG |
1749 | return 0; |
1750 | ||
1751 | rx_irq_fail: | |
1752 | free_irq(grp->interruptTransmit, grp); | |
1753 | tx_irq_fail: | |
1754 | free_irq(grp->interruptError, grp); | |
1755 | err_irq_fail: | |
1756 | return err; | |
1757 | ||
1758 | } | |
1759 | ||
1760 | /* Bring the controller up and running */ | |
1761 | int startup_gfar(struct net_device *ndev) | |
1762 | { | |
1763 | struct gfar_private *priv = netdev_priv(ndev); | |
1764 | struct gfar __iomem *regs = NULL; | |
1765 | int err, i, j; | |
1766 | ||
1767 | for (i = 0; i < priv->num_grps; i++) { | |
1768 | regs= priv->gfargrp[i].regs; | |
1769 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
1770 | } | |
1771 | ||
1772 | regs= priv->gfargrp[0].regs; | |
1773 | err = gfar_alloc_skb_resources(ndev); | |
1774 | if (err) | |
1775 | return err; | |
1776 | ||
1777 | gfar_init_mac(ndev); | |
1778 | ||
1779 | for (i = 0; i < priv->num_grps; i++) { | |
1780 | err = register_grp_irqs(&priv->gfargrp[i]); | |
1781 | if (err) { | |
1782 | for (j = 0; j < i; j++) | |
1783 | free_grp_irqs(&priv->gfargrp[j]); | |
1784 | goto irq_fail; | |
1785 | } | |
1786 | } | |
1787 | ||
7f7f5316 | 1788 | /* Start the controller */ |
ccc05c6e | 1789 | gfar_start(ndev); |
1da177e4 | 1790 | |
826aa4a0 AV |
1791 | phy_start(priv->phydev); |
1792 | ||
46ceb60c SG |
1793 | gfar_configure_coalescing(priv, 0xFF, 0xFF); |
1794 | ||
1da177e4 LT |
1795 | return 0; |
1796 | ||
46ceb60c | 1797 | irq_fail: |
e69edd21 | 1798 | free_skb_resources(priv); |
1da177e4 LT |
1799 | return err; |
1800 | } | |
1801 | ||
1802 | /* Called when something needs to use the ethernet device */ | |
1803 | /* Returns 0 for success. */ | |
1804 | static int gfar_enet_open(struct net_device *dev) | |
1805 | { | |
94e8cc35 | 1806 | struct gfar_private *priv = netdev_priv(dev); |
1da177e4 LT |
1807 | int err; |
1808 | ||
46ceb60c | 1809 | enable_napi(priv); |
bea3348e | 1810 | |
0fd56bb5 AF |
1811 | skb_queue_head_init(&priv->rx_recycle); |
1812 | ||
1da177e4 LT |
1813 | /* Initialize a bunch of registers */ |
1814 | init_registers(dev); | |
1815 | ||
1816 | gfar_set_mac_address(dev); | |
1817 | ||
1818 | err = init_phy(dev); | |
1819 | ||
a12f801d | 1820 | if (err) { |
46ceb60c | 1821 | disable_napi(priv); |
1da177e4 | 1822 | return err; |
bea3348e | 1823 | } |
1da177e4 LT |
1824 | |
1825 | err = startup_gfar(dev); | |
db0e8e3f | 1826 | if (err) { |
46ceb60c | 1827 | disable_napi(priv); |
db0e8e3f AV |
1828 | return err; |
1829 | } | |
1da177e4 | 1830 | |
fba4ed03 | 1831 | netif_tx_start_all_queues(dev); |
1da177e4 | 1832 | |
2884e5cc AV |
1833 | device_set_wakeup_enable(&dev->dev, priv->wol_en); |
1834 | ||
1da177e4 LT |
1835 | return err; |
1836 | } | |
1837 | ||
54dc79fe | 1838 | static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) |
0bbaf069 | 1839 | { |
54dc79fe | 1840 | struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); |
6c31d55f KG |
1841 | |
1842 | memset(fcb, 0, GMAC_FCB_LEN); | |
0bbaf069 | 1843 | |
0bbaf069 KG |
1844 | return fcb; |
1845 | } | |
1846 | ||
1847 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) | |
1848 | { | |
7f7f5316 | 1849 | u8 flags = 0; |
0bbaf069 KG |
1850 | |
1851 | /* If we're here, it's a IP packet with a TCP or UDP | |
1852 | * payload. We set it to checksum, using a pseudo-header | |
1853 | * we provide | |
1854 | */ | |
7f7f5316 | 1855 | flags = TXFCB_DEFAULT; |
0bbaf069 | 1856 | |
7f7f5316 AF |
1857 | /* Tell the controller what the protocol is */ |
1858 | /* And provide the already calculated phcs */ | |
eddc9ec5 | 1859 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
7f7f5316 | 1860 | flags |= TXFCB_UDP; |
4bedb452 | 1861 | fcb->phcs = udp_hdr(skb)->check; |
7f7f5316 | 1862 | } else |
8da32de5 | 1863 | fcb->phcs = tcp_hdr(skb)->check; |
0bbaf069 KG |
1864 | |
1865 | /* l3os is the distance between the start of the | |
1866 | * frame (skb->data) and the start of the IP hdr. | |
1867 | * l4os is the distance between the start of the | |
1868 | * l3 hdr and the l4 hdr */ | |
bbe735e4 | 1869 | fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); |
cfe1fc77 | 1870 | fcb->l4os = skb_network_header_len(skb); |
0bbaf069 | 1871 | |
7f7f5316 | 1872 | fcb->flags = flags; |
0bbaf069 KG |
1873 | } |
1874 | ||
7f7f5316 | 1875 | void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) |
0bbaf069 | 1876 | { |
7f7f5316 | 1877 | fcb->flags |= TXFCB_VLN; |
0bbaf069 KG |
1878 | fcb->vlctl = vlan_tx_tag_get(skb); |
1879 | } | |
1880 | ||
4669bc90 DH |
1881 | static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, |
1882 | struct txbd8 *base, int ring_size) | |
1883 | { | |
1884 | struct txbd8 *new_bd = bdp + stride; | |
1885 | ||
1886 | return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; | |
1887 | } | |
1888 | ||
1889 | static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, | |
1890 | int ring_size) | |
1891 | { | |
1892 | return skip_txbd(bdp, 1, base, ring_size); | |
1893 | } | |
1894 | ||
1da177e4 LT |
1895 | /* This is called by the kernel when a frame is ready for transmission. */ |
1896 | /* It is pointed to by the dev->hard_start_xmit function pointer */ | |
1897 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1898 | { | |
1899 | struct gfar_private *priv = netdev_priv(dev); | |
a12f801d | 1900 | struct gfar_priv_tx_q *tx_queue = NULL; |
fba4ed03 | 1901 | struct netdev_queue *txq; |
f4983704 | 1902 | struct gfar __iomem *regs = NULL; |
0bbaf069 | 1903 | struct txfcb *fcb = NULL; |
4669bc90 | 1904 | struct txbd8 *txbdp, *txbdp_start, *base; |
5a5efed4 | 1905 | u32 lstatus; |
fba4ed03 | 1906 | int i, rq = 0; |
4669bc90 | 1907 | u32 bufaddr; |
fef6108d | 1908 | unsigned long flags; |
4669bc90 DH |
1909 | unsigned int nr_frags, length; |
1910 | ||
fba4ed03 SG |
1911 | |
1912 | rq = skb->queue_mapping; | |
1913 | tx_queue = priv->tx_queue[rq]; | |
1914 | txq = netdev_get_tx_queue(dev, rq); | |
a12f801d | 1915 | base = tx_queue->tx_bd_base; |
46ceb60c | 1916 | regs = tx_queue->grp->regs; |
4669bc90 | 1917 | |
5b28beaf LY |
1918 | /* make space for additional header when fcb is needed */ |
1919 | if (((skb->ip_summed == CHECKSUM_PARTIAL) || | |
1920 | (priv->vlgrp && vlan_tx_tag_present(skb))) && | |
1921 | (skb_headroom(skb) < GMAC_FCB_LEN)) { | |
54dc79fe SH |
1922 | struct sk_buff *skb_new; |
1923 | ||
1924 | skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); | |
1925 | if (!skb_new) { | |
1926 | dev->stats.tx_errors++; | |
bd14ba84 | 1927 | kfree_skb(skb); |
54dc79fe SH |
1928 | return NETDEV_TX_OK; |
1929 | } | |
1930 | kfree_skb(skb); | |
1931 | skb = skb_new; | |
1932 | } | |
1933 | ||
4669bc90 DH |
1934 | /* total number of fragments in the SKB */ |
1935 | nr_frags = skb_shinfo(skb)->nr_frags; | |
1936 | ||
4669bc90 | 1937 | /* check if there is space to queue this packet */ |
a12f801d | 1938 | if ((nr_frags+1) > tx_queue->num_txbdfree) { |
4669bc90 | 1939 | /* no space, stop the queue */ |
fba4ed03 | 1940 | netif_tx_stop_queue(txq); |
4669bc90 | 1941 | dev->stats.tx_fifo_errors++; |
4669bc90 DH |
1942 | return NETDEV_TX_BUSY; |
1943 | } | |
1da177e4 LT |
1944 | |
1945 | /* Update transmit stats */ | |
09f75cd7 | 1946 | dev->stats.tx_bytes += skb->len; |
1da177e4 | 1947 | |
a12f801d | 1948 | txbdp = txbdp_start = tx_queue->cur_tx; |
1da177e4 | 1949 | |
4669bc90 DH |
1950 | if (nr_frags == 0) { |
1951 | lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
1952 | } else { | |
1953 | /* Place the fragment addresses and lengths into the TxBDs */ | |
1954 | for (i = 0; i < nr_frags; i++) { | |
1955 | /* Point at the next BD, wrapping as needed */ | |
a12f801d | 1956 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 DH |
1957 | |
1958 | length = skb_shinfo(skb)->frags[i].size; | |
1959 | ||
1960 | lstatus = txbdp->lstatus | length | | |
1961 | BD_LFLAG(TXBD_READY); | |
1962 | ||
1963 | /* Handle the last BD specially */ | |
1964 | if (i == nr_frags - 1) | |
1965 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
1da177e4 | 1966 | |
4826857f | 1967 | bufaddr = dma_map_page(&priv->ofdev->dev, |
4669bc90 DH |
1968 | skb_shinfo(skb)->frags[i].page, |
1969 | skb_shinfo(skb)->frags[i].page_offset, | |
1970 | length, | |
1971 | DMA_TO_DEVICE); | |
1972 | ||
1973 | /* set the TxBD length and buffer pointer */ | |
1974 | txbdp->bufPtr = bufaddr; | |
1975 | txbdp->lstatus = lstatus; | |
1976 | } | |
1977 | ||
1978 | lstatus = txbdp_start->lstatus; | |
1979 | } | |
1da177e4 | 1980 | |
0bbaf069 | 1981 | /* Set up checksumming */ |
12dea57b | 1982 | if (CHECKSUM_PARTIAL == skb->ip_summed) { |
54dc79fe SH |
1983 | fcb = gfar_add_fcb(skb); |
1984 | lstatus |= BD_LFLAG(TXBD_TOE); | |
1985 | gfar_tx_checksum(skb, fcb); | |
0bbaf069 KG |
1986 | } |
1987 | ||
77ecaf2d | 1988 | if (priv->vlgrp && vlan_tx_tag_present(skb)) { |
54dc79fe SH |
1989 | if (unlikely(NULL == fcb)) { |
1990 | fcb = gfar_add_fcb(skb); | |
5a5efed4 | 1991 | lstatus |= BD_LFLAG(TXBD_TOE); |
7f7f5316 | 1992 | } |
54dc79fe SH |
1993 | |
1994 | gfar_tx_vlan(skb, fcb); | |
0bbaf069 KG |
1995 | } |
1996 | ||
4669bc90 | 1997 | /* setup the TxBD length and buffer pointer for the first BD */ |
a12f801d | 1998 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; |
4826857f | 1999 | txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, |
4669bc90 | 2000 | skb_headlen(skb), DMA_TO_DEVICE); |
1da177e4 | 2001 | |
4669bc90 | 2002 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); |
1da177e4 | 2003 | |
a3bc1f11 AV |
2004 | /* |
2005 | * We can work in parallel with gfar_clean_tx_ring(), except | |
2006 | * when modifying num_txbdfree. Note that we didn't grab the lock | |
2007 | * when we were reading the num_txbdfree and checking for available | |
2008 | * space, that's because outside of this function it can only grow, | |
2009 | * and once we've got needed space, it cannot suddenly disappear. | |
2010 | * | |
2011 | * The lock also protects us from gfar_error(), which can modify | |
2012 | * regs->tstat and thus retrigger the transfers, which is why we | |
2013 | * also must grab the lock before setting ready bit for the first | |
2014 | * to be transmitted BD. | |
2015 | */ | |
2016 | spin_lock_irqsave(&tx_queue->txlock, flags); | |
2017 | ||
4669bc90 DH |
2018 | /* |
2019 | * The powerpc-specific eieio() is used, as wmb() has too strong | |
3b6330ce SW |
2020 | * semantics (it requires synchronization between cacheable and |
2021 | * uncacheable mappings, which eieio doesn't provide and which we | |
2022 | * don't need), thus requiring a more expensive sync instruction. At | |
2023 | * some point, the set of architecture-independent barrier functions | |
2024 | * should be expanded to include weaker barriers. | |
2025 | */ | |
3b6330ce | 2026 | eieio(); |
7f7f5316 | 2027 | |
4669bc90 DH |
2028 | txbdp_start->lstatus = lstatus; |
2029 | ||
2030 | /* Update the current skb pointer to the next entry we will use | |
2031 | * (wrapping if necessary) */ | |
a12f801d SG |
2032 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
2033 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); | |
4669bc90 | 2034 | |
a12f801d | 2035 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 DH |
2036 | |
2037 | /* reduce TxBD free count */ | |
a12f801d | 2038 | tx_queue->num_txbdfree -= (nr_frags + 1); |
4669bc90 DH |
2039 | |
2040 | dev->trans_start = jiffies; | |
1da177e4 LT |
2041 | |
2042 | /* If the next BD still needs to be cleaned up, then the bds | |
2043 | are full. We need to tell the kernel to stop sending us stuff. */ | |
a12f801d | 2044 | if (!tx_queue->num_txbdfree) { |
fba4ed03 | 2045 | netif_tx_stop_queue(txq); |
1da177e4 | 2046 | |
09f75cd7 | 2047 | dev->stats.tx_fifo_errors++; |
1da177e4 LT |
2048 | } |
2049 | ||
1da177e4 | 2050 | /* Tell the DMA to go go go */ |
fba4ed03 | 2051 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
1da177e4 LT |
2052 | |
2053 | /* Unlock priv */ | |
a12f801d | 2054 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
1da177e4 | 2055 | |
54dc79fe | 2056 | return NETDEV_TX_OK; |
1da177e4 LT |
2057 | } |
2058 | ||
2059 | /* Stops the kernel queue, and halts the controller */ | |
2060 | static int gfar_close(struct net_device *dev) | |
2061 | { | |
2062 | struct gfar_private *priv = netdev_priv(dev); | |
bea3348e | 2063 | |
46ceb60c | 2064 | disable_napi(priv); |
bea3348e | 2065 | |
0fd56bb5 | 2066 | skb_queue_purge(&priv->rx_recycle); |
ab939905 | 2067 | cancel_work_sync(&priv->reset_task); |
1da177e4 LT |
2068 | stop_gfar(dev); |
2069 | ||
bb40dcbb AF |
2070 | /* Disconnect from the PHY */ |
2071 | phy_disconnect(priv->phydev); | |
2072 | priv->phydev = NULL; | |
1da177e4 | 2073 | |
fba4ed03 | 2074 | netif_tx_stop_all_queues(dev); |
1da177e4 LT |
2075 | |
2076 | return 0; | |
2077 | } | |
2078 | ||
1da177e4 | 2079 | /* Changes the mac address if the controller is not running. */ |
f162b9d5 | 2080 | static int gfar_set_mac_address(struct net_device *dev) |
1da177e4 | 2081 | { |
7f7f5316 | 2082 | gfar_set_mac_for_addr(dev, 0, dev->dev_addr); |
1da177e4 LT |
2083 | |
2084 | return 0; | |
2085 | } | |
2086 | ||
2087 | ||
0bbaf069 KG |
2088 | /* Enables and disables VLAN insertion/extraction */ |
2089 | static void gfar_vlan_rx_register(struct net_device *dev, | |
2090 | struct vlan_group *grp) | |
2091 | { | |
2092 | struct gfar_private *priv = netdev_priv(dev); | |
f4983704 | 2093 | struct gfar __iomem *regs = NULL; |
0bbaf069 KG |
2094 | unsigned long flags; |
2095 | u32 tempval; | |
2096 | ||
46ceb60c | 2097 | regs = priv->gfargrp[0].regs; |
fba4ed03 SG |
2098 | local_irq_save(flags); |
2099 | lock_rx_qs(priv); | |
0bbaf069 | 2100 | |
cd1f55a5 | 2101 | priv->vlgrp = grp; |
0bbaf069 KG |
2102 | |
2103 | if (grp) { | |
2104 | /* Enable VLAN tag insertion */ | |
f4983704 | 2105 | tempval = gfar_read(®s->tctrl); |
0bbaf069 KG |
2106 | tempval |= TCTRL_VLINS; |
2107 | ||
f4983704 | 2108 | gfar_write(®s->tctrl, tempval); |
6aa20a22 | 2109 | |
0bbaf069 | 2110 | /* Enable VLAN tag extraction */ |
f4983704 | 2111 | tempval = gfar_read(®s->rctrl); |
77ecaf2d | 2112 | tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); |
f4983704 | 2113 | gfar_write(®s->rctrl, tempval); |
0bbaf069 KG |
2114 | } else { |
2115 | /* Disable VLAN tag insertion */ | |
f4983704 | 2116 | tempval = gfar_read(®s->tctrl); |
0bbaf069 | 2117 | tempval &= ~TCTRL_VLINS; |
f4983704 | 2118 | gfar_write(®s->tctrl, tempval); |
0bbaf069 KG |
2119 | |
2120 | /* Disable VLAN tag extraction */ | |
f4983704 | 2121 | tempval = gfar_read(®s->rctrl); |
0bbaf069 | 2122 | tempval &= ~RCTRL_VLEX; |
77ecaf2d DH |
2123 | /* If parse is no longer required, then disable parser */ |
2124 | if (tempval & RCTRL_REQ_PARSER) | |
2125 | tempval |= RCTRL_PRSDEP_INIT; | |
2126 | else | |
2127 | tempval &= ~RCTRL_PRSDEP_INIT; | |
f4983704 | 2128 | gfar_write(®s->rctrl, tempval); |
0bbaf069 KG |
2129 | } |
2130 | ||
77ecaf2d DH |
2131 | gfar_change_mtu(dev, dev->mtu); |
2132 | ||
fba4ed03 SG |
2133 | unlock_rx_qs(priv); |
2134 | local_irq_restore(flags); | |
0bbaf069 KG |
2135 | } |
2136 | ||
1da177e4 LT |
2137 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
2138 | { | |
2139 | int tempsize, tempval; | |
2140 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2141 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 | 2142 | int oldsize = priv->rx_buffer_size; |
0bbaf069 KG |
2143 | int frame_size = new_mtu + ETH_HLEN; |
2144 | ||
77ecaf2d | 2145 | if (priv->vlgrp) |
faa89577 | 2146 | frame_size += VLAN_HLEN; |
0bbaf069 | 2147 | |
1da177e4 | 2148 | if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { |
0bbaf069 KG |
2149 | if (netif_msg_drv(priv)) |
2150 | printk(KERN_ERR "%s: Invalid MTU setting\n", | |
2151 | dev->name); | |
1da177e4 LT |
2152 | return -EINVAL; |
2153 | } | |
2154 | ||
77ecaf2d DH |
2155 | if (gfar_uses_fcb(priv)) |
2156 | frame_size += GMAC_FCB_LEN; | |
2157 | ||
2158 | frame_size += priv->padding; | |
2159 | ||
1da177e4 LT |
2160 | tempsize = |
2161 | (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + | |
2162 | INCREMENTAL_BUFFER_SIZE; | |
2163 | ||
2164 | /* Only stop and start the controller if it isn't already | |
7f7f5316 | 2165 | * stopped, and we changed something */ |
1da177e4 LT |
2166 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) |
2167 | stop_gfar(dev); | |
2168 | ||
2169 | priv->rx_buffer_size = tempsize; | |
2170 | ||
2171 | dev->mtu = new_mtu; | |
2172 | ||
f4983704 SG |
2173 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
2174 | gfar_write(®s->maxfrm, priv->rx_buffer_size); | |
1da177e4 LT |
2175 | |
2176 | /* If the mtu is larger than the max size for standard | |
2177 | * ethernet frames (ie, a jumbo frame), then set maccfg2 | |
2178 | * to allow huge frames, and to check the length */ | |
f4983704 | 2179 | tempval = gfar_read(®s->maccfg2); |
1da177e4 LT |
2180 | |
2181 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) | |
2182 | tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | |
2183 | else | |
2184 | tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | |
2185 | ||
f4983704 | 2186 | gfar_write(®s->maccfg2, tempval); |
1da177e4 LT |
2187 | |
2188 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) | |
2189 | startup_gfar(dev); | |
2190 | ||
2191 | return 0; | |
2192 | } | |
2193 | ||
ab939905 | 2194 | /* gfar_reset_task gets scheduled when a packet has not been |
1da177e4 LT |
2195 | * transmitted after a set amount of time. |
2196 | * For now, assume that clearing out all the structures, and | |
ab939905 SS |
2197 | * starting over will fix the problem. |
2198 | */ | |
2199 | static void gfar_reset_task(struct work_struct *work) | |
1da177e4 | 2200 | { |
ab939905 SS |
2201 | struct gfar_private *priv = container_of(work, struct gfar_private, |
2202 | reset_task); | |
4826857f | 2203 | struct net_device *dev = priv->ndev; |
1da177e4 LT |
2204 | |
2205 | if (dev->flags & IFF_UP) { | |
fba4ed03 | 2206 | netif_tx_stop_all_queues(dev); |
1da177e4 LT |
2207 | stop_gfar(dev); |
2208 | startup_gfar(dev); | |
fba4ed03 | 2209 | netif_tx_start_all_queues(dev); |
1da177e4 LT |
2210 | } |
2211 | ||
263ba320 | 2212 | netif_tx_schedule_all(dev); |
1da177e4 LT |
2213 | } |
2214 | ||
ab939905 SS |
2215 | static void gfar_timeout(struct net_device *dev) |
2216 | { | |
2217 | struct gfar_private *priv = netdev_priv(dev); | |
2218 | ||
2219 | dev->stats.tx_errors++; | |
2220 | schedule_work(&priv->reset_task); | |
2221 | } | |
2222 | ||
1da177e4 | 2223 | /* Interrupt Handler for Transmit complete */ |
a12f801d | 2224 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 2225 | { |
a12f801d | 2226 | struct net_device *dev = tx_queue->dev; |
d080cd63 | 2227 | struct gfar_private *priv = netdev_priv(dev); |
a12f801d | 2228 | struct gfar_priv_rx_q *rx_queue = NULL; |
4669bc90 DH |
2229 | struct txbd8 *bdp; |
2230 | struct txbd8 *lbdp = NULL; | |
a12f801d | 2231 | struct txbd8 *base = tx_queue->tx_bd_base; |
4669bc90 DH |
2232 | struct sk_buff *skb; |
2233 | int skb_dirtytx; | |
a12f801d | 2234 | int tx_ring_size = tx_queue->tx_ring_size; |
4669bc90 DH |
2235 | int frags = 0; |
2236 | int i; | |
d080cd63 | 2237 | int howmany = 0; |
4669bc90 | 2238 | u32 lstatus; |
1da177e4 | 2239 | |
fba4ed03 | 2240 | rx_queue = priv->rx_queue[tx_queue->qindex]; |
a12f801d SG |
2241 | bdp = tx_queue->dirty_tx; |
2242 | skb_dirtytx = tx_queue->skb_dirtytx; | |
1da177e4 | 2243 | |
a12f801d | 2244 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { |
a3bc1f11 AV |
2245 | unsigned long flags; |
2246 | ||
4669bc90 DH |
2247 | frags = skb_shinfo(skb)->nr_frags; |
2248 | lbdp = skip_txbd(bdp, frags, base, tx_ring_size); | |
1da177e4 | 2249 | |
4669bc90 | 2250 | lstatus = lbdp->lstatus; |
1da177e4 | 2251 | |
4669bc90 DH |
2252 | /* Only clean completed frames */ |
2253 | if ((lstatus & BD_LFLAG(TXBD_READY)) && | |
2254 | (lstatus & BD_LENGTH_MASK)) | |
2255 | break; | |
2256 | ||
4826857f | 2257 | dma_unmap_single(&priv->ofdev->dev, |
4669bc90 DH |
2258 | bdp->bufPtr, |
2259 | bdp->length, | |
2260 | DMA_TO_DEVICE); | |
81183059 | 2261 | |
4669bc90 DH |
2262 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
2263 | bdp = next_txbd(bdp, base, tx_ring_size); | |
d080cd63 | 2264 | |
4669bc90 | 2265 | for (i = 0; i < frags; i++) { |
4826857f | 2266 | dma_unmap_page(&priv->ofdev->dev, |
4669bc90 DH |
2267 | bdp->bufPtr, |
2268 | bdp->length, | |
2269 | DMA_TO_DEVICE); | |
2270 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | |
2271 | bdp = next_txbd(bdp, base, tx_ring_size); | |
2272 | } | |
1da177e4 | 2273 | |
0fd56bb5 AF |
2274 | /* |
2275 | * If there's room in the queue (limit it to rx_buffer_size) | |
2276 | * we add this skb back into the pool, if it's the right size | |
2277 | */ | |
a12f801d | 2278 | if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && |
0fd56bb5 AF |
2279 | skb_recycle_check(skb, priv->rx_buffer_size + |
2280 | RXBUF_ALIGNMENT)) | |
2281 | __skb_queue_head(&priv->rx_recycle, skb); | |
2282 | else | |
2283 | dev_kfree_skb_any(skb); | |
2284 | ||
a12f801d | 2285 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
d080cd63 | 2286 | |
4669bc90 DH |
2287 | skb_dirtytx = (skb_dirtytx + 1) & |
2288 | TX_RING_MOD_MASK(tx_ring_size); | |
2289 | ||
2290 | howmany++; | |
a3bc1f11 | 2291 | spin_lock_irqsave(&tx_queue->txlock, flags); |
a12f801d | 2292 | tx_queue->num_txbdfree += frags + 1; |
a3bc1f11 | 2293 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
4669bc90 | 2294 | } |
1da177e4 | 2295 | |
4669bc90 | 2296 | /* If we freed a buffer, we can restart transmission, if necessary */ |
fba4ed03 SG |
2297 | if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) |
2298 | netif_wake_subqueue(dev, tx_queue->qindex); | |
1da177e4 | 2299 | |
4669bc90 | 2300 | /* Update dirty indicators */ |
a12f801d SG |
2301 | tx_queue->skb_dirtytx = skb_dirtytx; |
2302 | tx_queue->dirty_tx = bdp; | |
1da177e4 | 2303 | |
d080cd63 DH |
2304 | dev->stats.tx_packets += howmany; |
2305 | ||
2306 | return howmany; | |
2307 | } | |
2308 | ||
f4983704 | 2309 | static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) |
d080cd63 | 2310 | { |
a6d0b91a AV |
2311 | unsigned long flags; |
2312 | ||
fba4ed03 SG |
2313 | spin_lock_irqsave(&gfargrp->grplock, flags); |
2314 | if (napi_schedule_prep(&gfargrp->napi)) { | |
f4983704 | 2315 | gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); |
fba4ed03 | 2316 | __napi_schedule(&gfargrp->napi); |
8707bdd4 JP |
2317 | } else { |
2318 | /* | |
2319 | * Clear IEVENT, so interrupts aren't called again | |
2320 | * because of the packets that have already arrived. | |
2321 | */ | |
f4983704 | 2322 | gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); |
2f448911 | 2323 | } |
fba4ed03 | 2324 | spin_unlock_irqrestore(&gfargrp->grplock, flags); |
a6d0b91a | 2325 | |
8c7396ae | 2326 | } |
1da177e4 | 2327 | |
8c7396ae | 2328 | /* Interrupt Handler for Transmit complete */ |
f4983704 | 2329 | static irqreturn_t gfar_transmit(int irq, void *grp_id) |
8c7396ae | 2330 | { |
f4983704 | 2331 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); |
1da177e4 LT |
2332 | return IRQ_HANDLED; |
2333 | } | |
2334 | ||
a12f801d | 2335 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
815b97c6 AF |
2336 | struct sk_buff *skb) |
2337 | { | |
a12f801d | 2338 | struct net_device *dev = rx_queue->dev; |
815b97c6 | 2339 | struct gfar_private *priv = netdev_priv(dev); |
8a102fe0 | 2340 | dma_addr_t buf; |
815b97c6 | 2341 | |
8a102fe0 AV |
2342 | buf = dma_map_single(&priv->ofdev->dev, skb->data, |
2343 | priv->rx_buffer_size, DMA_FROM_DEVICE); | |
a12f801d | 2344 | gfar_init_rxbdp(rx_queue, bdp, buf); |
815b97c6 AF |
2345 | } |
2346 | ||
2347 | ||
2348 | struct sk_buff * gfar_new_skb(struct net_device *dev) | |
1da177e4 | 2349 | { |
7f7f5316 | 2350 | unsigned int alignamount; |
1da177e4 LT |
2351 | struct gfar_private *priv = netdev_priv(dev); |
2352 | struct sk_buff *skb = NULL; | |
1da177e4 | 2353 | |
0fd56bb5 AF |
2354 | skb = __skb_dequeue(&priv->rx_recycle); |
2355 | if (!skb) | |
2356 | skb = netdev_alloc_skb(dev, | |
2357 | priv->rx_buffer_size + RXBUF_ALIGNMENT); | |
1da177e4 | 2358 | |
815b97c6 | 2359 | if (!skb) |
1da177e4 LT |
2360 | return NULL; |
2361 | ||
7f7f5316 | 2362 | alignamount = RXBUF_ALIGNMENT - |
bea3348e | 2363 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); |
7f7f5316 | 2364 | |
1da177e4 LT |
2365 | /* We need the data buffer to be aligned properly. We will reserve |
2366 | * as many bytes as needed to align the data properly | |
2367 | */ | |
7f7f5316 | 2368 | skb_reserve(skb, alignamount); |
1da177e4 | 2369 | |
1da177e4 LT |
2370 | return skb; |
2371 | } | |
2372 | ||
298e1a9e | 2373 | static inline void count_errors(unsigned short status, struct net_device *dev) |
1da177e4 | 2374 | { |
298e1a9e | 2375 | struct gfar_private *priv = netdev_priv(dev); |
09f75cd7 | 2376 | struct net_device_stats *stats = &dev->stats; |
1da177e4 LT |
2377 | struct gfar_extra_stats *estats = &priv->extra_stats; |
2378 | ||
2379 | /* If the packet was truncated, none of the other errors | |
2380 | * matter */ | |
2381 | if (status & RXBD_TRUNCATED) { | |
2382 | stats->rx_length_errors++; | |
2383 | ||
2384 | estats->rx_trunc++; | |
2385 | ||
2386 | return; | |
2387 | } | |
2388 | /* Count the errors, if there were any */ | |
2389 | if (status & (RXBD_LARGE | RXBD_SHORT)) { | |
2390 | stats->rx_length_errors++; | |
2391 | ||
2392 | if (status & RXBD_LARGE) | |
2393 | estats->rx_large++; | |
2394 | else | |
2395 | estats->rx_short++; | |
2396 | } | |
2397 | if (status & RXBD_NONOCTET) { | |
2398 | stats->rx_frame_errors++; | |
2399 | estats->rx_nonoctet++; | |
2400 | } | |
2401 | if (status & RXBD_CRCERR) { | |
2402 | estats->rx_crcerr++; | |
2403 | stats->rx_crc_errors++; | |
2404 | } | |
2405 | if (status & RXBD_OVERRUN) { | |
2406 | estats->rx_overrun++; | |
2407 | stats->rx_crc_errors++; | |
2408 | } | |
2409 | } | |
2410 | ||
f4983704 | 2411 | irqreturn_t gfar_receive(int irq, void *grp_id) |
1da177e4 | 2412 | { |
f4983704 | 2413 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); |
1da177e4 LT |
2414 | return IRQ_HANDLED; |
2415 | } | |
2416 | ||
0bbaf069 KG |
2417 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) |
2418 | { | |
2419 | /* If valid headers were found, and valid sums | |
2420 | * were verified, then we tell the kernel that no | |
2421 | * checksumming is necessary. Otherwise, it is */ | |
7f7f5316 | 2422 | if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) |
0bbaf069 KG |
2423 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2424 | else | |
2425 | skb->ip_summed = CHECKSUM_NONE; | |
2426 | } | |
2427 | ||
2428 | ||
1da177e4 LT |
2429 | /* gfar_process_frame() -- handle one incoming packet if skb |
2430 | * isn't NULL. */ | |
2431 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | |
2c2db48a | 2432 | int amount_pull) |
1da177e4 LT |
2433 | { |
2434 | struct gfar_private *priv = netdev_priv(dev); | |
0bbaf069 | 2435 | struct rxfcb *fcb = NULL; |
1da177e4 | 2436 | |
2c2db48a | 2437 | int ret; |
1da177e4 | 2438 | |
2c2db48a DH |
2439 | /* fcb is at the beginning if exists */ |
2440 | fcb = (struct rxfcb *)skb->data; | |
0bbaf069 | 2441 | |
2c2db48a | 2442 | /* Remove the FCB from the skb */ |
fba4ed03 | 2443 | skb_set_queue_mapping(skb, fcb->rq); |
2c2db48a DH |
2444 | /* Remove the padded bytes, if there are any */ |
2445 | if (amount_pull) | |
2446 | skb_pull(skb, amount_pull); | |
0bbaf069 | 2447 | |
2c2db48a DH |
2448 | if (priv->rx_csum_enable) |
2449 | gfar_rx_checksum(skb, fcb); | |
0bbaf069 | 2450 | |
2c2db48a DH |
2451 | /* Tell the skb what kind of packet this is */ |
2452 | skb->protocol = eth_type_trans(skb, dev); | |
1da177e4 | 2453 | |
2c2db48a DH |
2454 | /* Send the packet up the stack */ |
2455 | if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) | |
2456 | ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl); | |
2457 | else | |
2458 | ret = netif_receive_skb(skb); | |
0bbaf069 | 2459 | |
2c2db48a DH |
2460 | if (NET_RX_DROP == ret) |
2461 | priv->extra_stats.kernel_dropped++; | |
1da177e4 LT |
2462 | |
2463 | return 0; | |
2464 | } | |
2465 | ||
2466 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring | |
0bbaf069 | 2467 | * until the budget/quota has been reached. Returns the number |
1da177e4 LT |
2468 | * of frames handled |
2469 | */ | |
a12f801d | 2470 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) |
1da177e4 | 2471 | { |
a12f801d | 2472 | struct net_device *dev = rx_queue->dev; |
31de198b | 2473 | struct rxbd8 *bdp, *base; |
1da177e4 | 2474 | struct sk_buff *skb; |
2c2db48a DH |
2475 | int pkt_len; |
2476 | int amount_pull; | |
1da177e4 LT |
2477 | int howmany = 0; |
2478 | struct gfar_private *priv = netdev_priv(dev); | |
2479 | ||
2480 | /* Get the first full descriptor */ | |
a12f801d SG |
2481 | bdp = rx_queue->cur_rx; |
2482 | base = rx_queue->rx_bd_base; | |
1da177e4 | 2483 | |
2c2db48a DH |
2484 | amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + |
2485 | priv->padding; | |
2486 | ||
1da177e4 | 2487 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { |
815b97c6 | 2488 | struct sk_buff *newskb; |
3b6330ce | 2489 | rmb(); |
815b97c6 AF |
2490 | |
2491 | /* Add another skb for the future */ | |
2492 | newskb = gfar_new_skb(dev); | |
2493 | ||
a12f801d | 2494 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; |
1da177e4 | 2495 | |
4826857f | 2496 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, |
81183059 AF |
2497 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
2498 | ||
815b97c6 AF |
2499 | /* We drop the frame if we failed to allocate a new buffer */ |
2500 | if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || | |
2501 | bdp->status & RXBD_ERR)) { | |
2502 | count_errors(bdp->status, dev); | |
2503 | ||
2504 | if (unlikely(!newskb)) | |
2505 | newskb = skb; | |
4e2fd555 LB |
2506 | else if (skb) { |
2507 | /* | |
2508 | * We need to reset ->data to what it | |
2509 | * was before gfar_new_skb() re-aligned | |
2510 | * it to an RXBUF_ALIGNMENT boundary | |
2511 | * before we put the skb back on the | |
2512 | * recycle list. | |
2513 | */ | |
2514 | skb->data = skb->head + NET_SKB_PAD; | |
0fd56bb5 | 2515 | __skb_queue_head(&priv->rx_recycle, skb); |
4e2fd555 | 2516 | } |
815b97c6 | 2517 | } else { |
1da177e4 | 2518 | /* Increment the number of packets */ |
09f75cd7 | 2519 | dev->stats.rx_packets++; |
1da177e4 LT |
2520 | howmany++; |
2521 | ||
2c2db48a DH |
2522 | if (likely(skb)) { |
2523 | pkt_len = bdp->length - ETH_FCS_LEN; | |
2524 | /* Remove the FCS from the packet length */ | |
2525 | skb_put(skb, pkt_len); | |
2526 | dev->stats.rx_bytes += pkt_len; | |
1da177e4 | 2527 | |
2c2db48a DH |
2528 | gfar_process_frame(dev, skb, amount_pull); |
2529 | ||
2530 | } else { | |
2531 | if (netif_msg_rx_err(priv)) | |
2532 | printk(KERN_WARNING | |
2533 | "%s: Missing skb!\n", dev->name); | |
2534 | dev->stats.rx_dropped++; | |
2535 | priv->extra_stats.rx_skbmissing++; | |
2536 | } | |
1da177e4 | 2537 | |
1da177e4 LT |
2538 | } |
2539 | ||
a12f801d | 2540 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; |
1da177e4 | 2541 | |
815b97c6 | 2542 | /* Setup the new bdp */ |
a12f801d | 2543 | gfar_new_rxbdp(rx_queue, bdp, newskb); |
1da177e4 LT |
2544 | |
2545 | /* Update to the next pointer */ | |
a12f801d | 2546 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); |
1da177e4 LT |
2547 | |
2548 | /* update to point at the next skb */ | |
a12f801d SG |
2549 | rx_queue->skb_currx = |
2550 | (rx_queue->skb_currx + 1) & | |
2551 | RX_RING_MOD_MASK(rx_queue->rx_ring_size); | |
1da177e4 LT |
2552 | } |
2553 | ||
2554 | /* Update the current rxbd pointer to be the next one */ | |
a12f801d | 2555 | rx_queue->cur_rx = bdp; |
1da177e4 | 2556 | |
1da177e4 LT |
2557 | return howmany; |
2558 | } | |
2559 | ||
bea3348e | 2560 | static int gfar_poll(struct napi_struct *napi, int budget) |
1da177e4 | 2561 | { |
fba4ed03 SG |
2562 | struct gfar_priv_grp *gfargrp = container_of(napi, |
2563 | struct gfar_priv_grp, napi); | |
2564 | struct gfar_private *priv = gfargrp->priv; | |
46ceb60c | 2565 | struct gfar __iomem *regs = gfargrp->regs; |
a12f801d | 2566 | struct gfar_priv_tx_q *tx_queue = NULL; |
fba4ed03 SG |
2567 | struct gfar_priv_rx_q *rx_queue = NULL; |
2568 | int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; | |
18294ad1 AV |
2569 | int tx_cleaned = 0, i, left_over_budget = budget; |
2570 | unsigned long serviced_queues = 0; | |
fba4ed03 | 2571 | int num_queues = 0; |
d080cd63 | 2572 | |
fba4ed03 SG |
2573 | num_queues = gfargrp->num_rx_queues; |
2574 | budget_per_queue = budget/num_queues; | |
2575 | ||
8c7396ae DH |
2576 | /* Clear IEVENT, so interrupts aren't called again |
2577 | * because of the packets that have already arrived */ | |
f4983704 | 2578 | gfar_write(®s->ievent, IEVENT_RTX_MASK); |
8c7396ae | 2579 | |
fba4ed03 | 2580 | while (num_queues && left_over_budget) { |
1da177e4 | 2581 | |
fba4ed03 SG |
2582 | budget_per_queue = left_over_budget/num_queues; |
2583 | left_over_budget = 0; | |
2584 | ||
2585 | for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { | |
2586 | if (test_bit(i, &serviced_queues)) | |
2587 | continue; | |
2588 | rx_queue = priv->rx_queue[i]; | |
2589 | tx_queue = priv->tx_queue[rx_queue->qindex]; | |
2590 | ||
a3bc1f11 | 2591 | tx_cleaned += gfar_clean_tx_ring(tx_queue); |
fba4ed03 SG |
2592 | rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, |
2593 | budget_per_queue); | |
2594 | rx_cleaned += rx_cleaned_per_queue; | |
2595 | if(rx_cleaned_per_queue < budget_per_queue) { | |
2596 | left_over_budget = left_over_budget + | |
2597 | (budget_per_queue - rx_cleaned_per_queue); | |
2598 | set_bit(i, &serviced_queues); | |
2599 | num_queues--; | |
2600 | } | |
2601 | } | |
2602 | } | |
1da177e4 | 2603 | |
42199884 AF |
2604 | if (tx_cleaned) |
2605 | return budget; | |
2606 | ||
2607 | if (rx_cleaned < budget) { | |
288379f0 | 2608 | napi_complete(napi); |
1da177e4 LT |
2609 | |
2610 | /* Clear the halt bit in RSTAT */ | |
fba4ed03 | 2611 | gfar_write(®s->rstat, gfargrp->rstat); |
1da177e4 | 2612 | |
f4983704 | 2613 | gfar_write(®s->imask, IMASK_DEFAULT); |
1da177e4 LT |
2614 | |
2615 | /* If we are coalescing interrupts, update the timer */ | |
2616 | /* Otherwise, clear it */ | |
46ceb60c SG |
2617 | gfar_configure_coalescing(priv, |
2618 | gfargrp->rx_bit_map, gfargrp->tx_bit_map); | |
1da177e4 LT |
2619 | } |
2620 | ||
42199884 | 2621 | return rx_cleaned; |
1da177e4 | 2622 | } |
1da177e4 | 2623 | |
f2d71c2d VW |
2624 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2625 | /* | |
2626 | * Polling 'interrupt' - used by things like netconsole to send skbs | |
2627 | * without having to re-enable interrupts. It's not called while | |
2628 | * the interrupt routine is executing. | |
2629 | */ | |
2630 | static void gfar_netpoll(struct net_device *dev) | |
2631 | { | |
2632 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2633 | int i = 0; |
f2d71c2d VW |
2634 | |
2635 | /* If the device has multiple interrupts, run tx/rx */ | |
b31a1d8b | 2636 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
46ceb60c SG |
2637 | for (i = 0; i < priv->num_grps; i++) { |
2638 | disable_irq(priv->gfargrp[i].interruptTransmit); | |
2639 | disable_irq(priv->gfargrp[i].interruptReceive); | |
2640 | disable_irq(priv->gfargrp[i].interruptError); | |
2641 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, | |
2642 | &priv->gfargrp[i]); | |
2643 | enable_irq(priv->gfargrp[i].interruptError); | |
2644 | enable_irq(priv->gfargrp[i].interruptReceive); | |
2645 | enable_irq(priv->gfargrp[i].interruptTransmit); | |
2646 | } | |
f2d71c2d | 2647 | } else { |
46ceb60c SG |
2648 | for (i = 0; i < priv->num_grps; i++) { |
2649 | disable_irq(priv->gfargrp[i].interruptTransmit); | |
2650 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, | |
2651 | &priv->gfargrp[i]); | |
2652 | enable_irq(priv->gfargrp[i].interruptTransmit); | |
43de004b | 2653 | } |
f2d71c2d VW |
2654 | } |
2655 | } | |
2656 | #endif | |
2657 | ||
1da177e4 | 2658 | /* The interrupt handler for devices with one interrupt */ |
f4983704 | 2659 | static irqreturn_t gfar_interrupt(int irq, void *grp_id) |
1da177e4 | 2660 | { |
f4983704 | 2661 | struct gfar_priv_grp *gfargrp = grp_id; |
1da177e4 LT |
2662 | |
2663 | /* Save ievent for future reference */ | |
f4983704 | 2664 | u32 events = gfar_read(&gfargrp->regs->ievent); |
1da177e4 | 2665 | |
1da177e4 | 2666 | /* Check for reception */ |
538cc7ee | 2667 | if (events & IEVENT_RX_MASK) |
f4983704 | 2668 | gfar_receive(irq, grp_id); |
1da177e4 LT |
2669 | |
2670 | /* Check for transmit completion */ | |
538cc7ee | 2671 | if (events & IEVENT_TX_MASK) |
f4983704 | 2672 | gfar_transmit(irq, grp_id); |
1da177e4 | 2673 | |
538cc7ee SS |
2674 | /* Check for errors */ |
2675 | if (events & IEVENT_ERR_MASK) | |
f4983704 | 2676 | gfar_error(irq, grp_id); |
1da177e4 LT |
2677 | |
2678 | return IRQ_HANDLED; | |
2679 | } | |
2680 | ||
1da177e4 LT |
2681 | /* Called every time the controller might need to be made |
2682 | * aware of new link state. The PHY code conveys this | |
bb40dcbb | 2683 | * information through variables in the phydev structure, and this |
1da177e4 LT |
2684 | * function converts those variables into the appropriate |
2685 | * register values, and can bring down the device if needed. | |
2686 | */ | |
2687 | static void adjust_link(struct net_device *dev) | |
2688 | { | |
2689 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2690 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
bb40dcbb AF |
2691 | unsigned long flags; |
2692 | struct phy_device *phydev = priv->phydev; | |
2693 | int new_state = 0; | |
2694 | ||
fba4ed03 SG |
2695 | local_irq_save(flags); |
2696 | lock_tx_qs(priv); | |
2697 | ||
bb40dcbb AF |
2698 | if (phydev->link) { |
2699 | u32 tempval = gfar_read(®s->maccfg2); | |
7f7f5316 | 2700 | u32 ecntrl = gfar_read(®s->ecntrl); |
1da177e4 | 2701 | |
1da177e4 LT |
2702 | /* Now we make sure that we can be in full duplex mode. |
2703 | * If not, we operate in half-duplex mode. */ | |
bb40dcbb AF |
2704 | if (phydev->duplex != priv->oldduplex) { |
2705 | new_state = 1; | |
2706 | if (!(phydev->duplex)) | |
1da177e4 | 2707 | tempval &= ~(MACCFG2_FULL_DUPLEX); |
bb40dcbb | 2708 | else |
1da177e4 | 2709 | tempval |= MACCFG2_FULL_DUPLEX; |
1da177e4 | 2710 | |
bb40dcbb | 2711 | priv->oldduplex = phydev->duplex; |
1da177e4 LT |
2712 | } |
2713 | ||
bb40dcbb AF |
2714 | if (phydev->speed != priv->oldspeed) { |
2715 | new_state = 1; | |
2716 | switch (phydev->speed) { | |
1da177e4 | 2717 | case 1000: |
1da177e4 LT |
2718 | tempval = |
2719 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); | |
f430e49e LY |
2720 | |
2721 | ecntrl &= ~(ECNTRL_R100); | |
1da177e4 LT |
2722 | break; |
2723 | case 100: | |
2724 | case 10: | |
1da177e4 LT |
2725 | tempval = |
2726 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); | |
7f7f5316 AF |
2727 | |
2728 | /* Reduced mode distinguishes | |
2729 | * between 10 and 100 */ | |
2730 | if (phydev->speed == SPEED_100) | |
2731 | ecntrl |= ECNTRL_R100; | |
2732 | else | |
2733 | ecntrl &= ~(ECNTRL_R100); | |
1da177e4 LT |
2734 | break; |
2735 | default: | |
0bbaf069 KG |
2736 | if (netif_msg_link(priv)) |
2737 | printk(KERN_WARNING | |
bb40dcbb AF |
2738 | "%s: Ack! Speed (%d) is not 10/100/1000!\n", |
2739 | dev->name, phydev->speed); | |
1da177e4 LT |
2740 | break; |
2741 | } | |
2742 | ||
bb40dcbb | 2743 | priv->oldspeed = phydev->speed; |
1da177e4 LT |
2744 | } |
2745 | ||
bb40dcbb | 2746 | gfar_write(®s->maccfg2, tempval); |
7f7f5316 | 2747 | gfar_write(®s->ecntrl, ecntrl); |
bb40dcbb | 2748 | |
1da177e4 | 2749 | if (!priv->oldlink) { |
bb40dcbb | 2750 | new_state = 1; |
1da177e4 | 2751 | priv->oldlink = 1; |
1da177e4 | 2752 | } |
bb40dcbb AF |
2753 | } else if (priv->oldlink) { |
2754 | new_state = 1; | |
2755 | priv->oldlink = 0; | |
2756 | priv->oldspeed = 0; | |
2757 | priv->oldduplex = -1; | |
1da177e4 | 2758 | } |
1da177e4 | 2759 | |
bb40dcbb AF |
2760 | if (new_state && netif_msg_link(priv)) |
2761 | phy_print_status(phydev); | |
fba4ed03 SG |
2762 | unlock_tx_qs(priv); |
2763 | local_irq_restore(flags); | |
bb40dcbb | 2764 | } |
1da177e4 LT |
2765 | |
2766 | /* Update the hash table based on the current list of multicast | |
2767 | * addresses we subscribe to. Also, change the promiscuity of | |
2768 | * the device based on the flags (this function is called | |
2769 | * whenever dev->flags is changed */ | |
2770 | static void gfar_set_multi(struct net_device *dev) | |
2771 | { | |
2772 | struct dev_mc_list *mc_ptr; | |
2773 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2774 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 LT |
2775 | u32 tempval; |
2776 | ||
a12f801d | 2777 | if (dev->flags & IFF_PROMISC) { |
1da177e4 LT |
2778 | /* Set RCTRL to PROM */ |
2779 | tempval = gfar_read(®s->rctrl); | |
2780 | tempval |= RCTRL_PROM; | |
2781 | gfar_write(®s->rctrl, tempval); | |
2782 | } else { | |
2783 | /* Set RCTRL to not PROM */ | |
2784 | tempval = gfar_read(®s->rctrl); | |
2785 | tempval &= ~(RCTRL_PROM); | |
2786 | gfar_write(®s->rctrl, tempval); | |
2787 | } | |
6aa20a22 | 2788 | |
a12f801d | 2789 | if (dev->flags & IFF_ALLMULTI) { |
1da177e4 | 2790 | /* Set the hash to rx all multicast frames */ |
0bbaf069 KG |
2791 | gfar_write(®s->igaddr0, 0xffffffff); |
2792 | gfar_write(®s->igaddr1, 0xffffffff); | |
2793 | gfar_write(®s->igaddr2, 0xffffffff); | |
2794 | gfar_write(®s->igaddr3, 0xffffffff); | |
2795 | gfar_write(®s->igaddr4, 0xffffffff); | |
2796 | gfar_write(®s->igaddr5, 0xffffffff); | |
2797 | gfar_write(®s->igaddr6, 0xffffffff); | |
2798 | gfar_write(®s->igaddr7, 0xffffffff); | |
1da177e4 LT |
2799 | gfar_write(®s->gaddr0, 0xffffffff); |
2800 | gfar_write(®s->gaddr1, 0xffffffff); | |
2801 | gfar_write(®s->gaddr2, 0xffffffff); | |
2802 | gfar_write(®s->gaddr3, 0xffffffff); | |
2803 | gfar_write(®s->gaddr4, 0xffffffff); | |
2804 | gfar_write(®s->gaddr5, 0xffffffff); | |
2805 | gfar_write(®s->gaddr6, 0xffffffff); | |
2806 | gfar_write(®s->gaddr7, 0xffffffff); | |
2807 | } else { | |
7f7f5316 AF |
2808 | int em_num; |
2809 | int idx; | |
2810 | ||
1da177e4 | 2811 | /* zero out the hash */ |
0bbaf069 KG |
2812 | gfar_write(®s->igaddr0, 0x0); |
2813 | gfar_write(®s->igaddr1, 0x0); | |
2814 | gfar_write(®s->igaddr2, 0x0); | |
2815 | gfar_write(®s->igaddr3, 0x0); | |
2816 | gfar_write(®s->igaddr4, 0x0); | |
2817 | gfar_write(®s->igaddr5, 0x0); | |
2818 | gfar_write(®s->igaddr6, 0x0); | |
2819 | gfar_write(®s->igaddr7, 0x0); | |
1da177e4 LT |
2820 | gfar_write(®s->gaddr0, 0x0); |
2821 | gfar_write(®s->gaddr1, 0x0); | |
2822 | gfar_write(®s->gaddr2, 0x0); | |
2823 | gfar_write(®s->gaddr3, 0x0); | |
2824 | gfar_write(®s->gaddr4, 0x0); | |
2825 | gfar_write(®s->gaddr5, 0x0); | |
2826 | gfar_write(®s->gaddr6, 0x0); | |
2827 | gfar_write(®s->gaddr7, 0x0); | |
2828 | ||
7f7f5316 AF |
2829 | /* If we have extended hash tables, we need to |
2830 | * clear the exact match registers to prepare for | |
2831 | * setting them */ | |
2832 | if (priv->extended_hash) { | |
2833 | em_num = GFAR_EM_NUM + 1; | |
2834 | gfar_clear_exact_match(dev); | |
2835 | idx = 1; | |
2836 | } else { | |
2837 | idx = 0; | |
2838 | em_num = 0; | |
2839 | } | |
2840 | ||
a12f801d | 2841 | if (dev->mc_count == 0) |
1da177e4 LT |
2842 | return; |
2843 | ||
2844 | /* Parse the list, and set the appropriate bits */ | |
2845 | for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { | |
7f7f5316 AF |
2846 | if (idx < em_num) { |
2847 | gfar_set_mac_for_addr(dev, idx, | |
2848 | mc_ptr->dmi_addr); | |
2849 | idx++; | |
2850 | } else | |
2851 | gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); | |
1da177e4 LT |
2852 | } |
2853 | } | |
2854 | ||
2855 | return; | |
2856 | } | |
2857 | ||
7f7f5316 AF |
2858 | |
2859 | /* Clears each of the exact match registers to zero, so they | |
2860 | * don't interfere with normal reception */ | |
2861 | static void gfar_clear_exact_match(struct net_device *dev) | |
2862 | { | |
2863 | int idx; | |
2864 | u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0}; | |
2865 | ||
2866 | for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) | |
2867 | gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr); | |
2868 | } | |
2869 | ||
1da177e4 LT |
2870 | /* Set the appropriate hash bit for the given addr */ |
2871 | /* The algorithm works like so: | |
2872 | * 1) Take the Destination Address (ie the multicast address), and | |
2873 | * do a CRC on it (little endian), and reverse the bits of the | |
2874 | * result. | |
2875 | * 2) Use the 8 most significant bits as a hash into a 256-entry | |
2876 | * table. The table is controlled through 8 32-bit registers: | |
2877 | * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is | |
2878 | * gaddr7. This means that the 3 most significant bits in the | |
2879 | * hash index which gaddr register to use, and the 5 other bits | |
2880 | * indicate which bit (assuming an IBM numbering scheme, which | |
2881 | * for PowerPC (tm) is usually the case) in the register holds | |
2882 | * the entry. */ | |
2883 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) | |
2884 | { | |
2885 | u32 tempval; | |
2886 | struct gfar_private *priv = netdev_priv(dev); | |
1da177e4 | 2887 | u32 result = ether_crc(MAC_ADDR_LEN, addr); |
0bbaf069 KG |
2888 | int width = priv->hash_width; |
2889 | u8 whichbit = (result >> (32 - width)) & 0x1f; | |
2890 | u8 whichreg = result >> (32 - width + 5); | |
1da177e4 LT |
2891 | u32 value = (1 << (31-whichbit)); |
2892 | ||
0bbaf069 | 2893 | tempval = gfar_read(priv->hash_regs[whichreg]); |
1da177e4 | 2894 | tempval |= value; |
0bbaf069 | 2895 | gfar_write(priv->hash_regs[whichreg], tempval); |
1da177e4 LT |
2896 | |
2897 | return; | |
2898 | } | |
2899 | ||
7f7f5316 AF |
2900 | |
2901 | /* There are multiple MAC Address register pairs on some controllers | |
2902 | * This function sets the numth pair to a given address | |
2903 | */ | |
2904 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) | |
2905 | { | |
2906 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 2907 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
7f7f5316 AF |
2908 | int idx; |
2909 | char tmpbuf[MAC_ADDR_LEN]; | |
2910 | u32 tempval; | |
f4983704 | 2911 | u32 __iomem *macptr = ®s->macstnaddr1; |
7f7f5316 AF |
2912 | |
2913 | macptr += num*2; | |
2914 | ||
2915 | /* Now copy it into the mac registers backwards, cuz */ | |
2916 | /* little endian is silly */ | |
2917 | for (idx = 0; idx < MAC_ADDR_LEN; idx++) | |
2918 | tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; | |
2919 | ||
2920 | gfar_write(macptr, *((u32 *) (tmpbuf))); | |
2921 | ||
2922 | tempval = *((u32 *) (tmpbuf + 4)); | |
2923 | ||
2924 | gfar_write(macptr+1, tempval); | |
2925 | } | |
2926 | ||
1da177e4 | 2927 | /* GFAR error interrupt handler */ |
f4983704 | 2928 | static irqreturn_t gfar_error(int irq, void *grp_id) |
1da177e4 | 2929 | { |
f4983704 SG |
2930 | struct gfar_priv_grp *gfargrp = grp_id; |
2931 | struct gfar __iomem *regs = gfargrp->regs; | |
2932 | struct gfar_private *priv= gfargrp->priv; | |
2933 | struct net_device *dev = priv->ndev; | |
1da177e4 LT |
2934 | |
2935 | /* Save ievent for future reference */ | |
f4983704 | 2936 | u32 events = gfar_read(®s->ievent); |
1da177e4 LT |
2937 | |
2938 | /* Clear IEVENT */ | |
f4983704 | 2939 | gfar_write(®s->ievent, events & IEVENT_ERR_MASK); |
d87eb127 SW |
2940 | |
2941 | /* Magic Packet is not an error. */ | |
b31a1d8b | 2942 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && |
d87eb127 SW |
2943 | (events & IEVENT_MAG)) |
2944 | events &= ~IEVENT_MAG; | |
1da177e4 LT |
2945 | |
2946 | /* Hmm... */ | |
0bbaf069 KG |
2947 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) |
2948 | printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", | |
f4983704 | 2949 | dev->name, events, gfar_read(®s->imask)); |
1da177e4 LT |
2950 | |
2951 | /* Update the error counters */ | |
2952 | if (events & IEVENT_TXE) { | |
09f75cd7 | 2953 | dev->stats.tx_errors++; |
1da177e4 LT |
2954 | |
2955 | if (events & IEVENT_LC) | |
09f75cd7 | 2956 | dev->stats.tx_window_errors++; |
1da177e4 | 2957 | if (events & IEVENT_CRL) |
09f75cd7 | 2958 | dev->stats.tx_aborted_errors++; |
1da177e4 | 2959 | if (events & IEVENT_XFUN) { |
836cf7fa AV |
2960 | unsigned long flags; |
2961 | ||
0bbaf069 | 2962 | if (netif_msg_tx_err(priv)) |
538cc7ee SS |
2963 | printk(KERN_DEBUG "%s: TX FIFO underrun, " |
2964 | "packet dropped.\n", dev->name); | |
09f75cd7 | 2965 | dev->stats.tx_dropped++; |
1da177e4 LT |
2966 | priv->extra_stats.tx_underrun++; |
2967 | ||
836cf7fa AV |
2968 | local_irq_save(flags); |
2969 | lock_tx_qs(priv); | |
2970 | ||
1da177e4 | 2971 | /* Reactivate the Tx Queues */ |
fba4ed03 | 2972 | gfar_write(®s->tstat, gfargrp->tstat); |
836cf7fa AV |
2973 | |
2974 | unlock_tx_qs(priv); | |
2975 | local_irq_restore(flags); | |
1da177e4 | 2976 | } |
0bbaf069 KG |
2977 | if (netif_msg_tx_err(priv)) |
2978 | printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); | |
1da177e4 LT |
2979 | } |
2980 | if (events & IEVENT_BSY) { | |
09f75cd7 | 2981 | dev->stats.rx_errors++; |
1da177e4 LT |
2982 | priv->extra_stats.rx_bsy++; |
2983 | ||
f4983704 | 2984 | gfar_receive(irq, grp_id); |
1da177e4 | 2985 | |
0bbaf069 | 2986 | if (netif_msg_rx_err(priv)) |
538cc7ee | 2987 | printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", |
f4983704 | 2988 | dev->name, gfar_read(®s->rstat)); |
1da177e4 LT |
2989 | } |
2990 | if (events & IEVENT_BABR) { | |
09f75cd7 | 2991 | dev->stats.rx_errors++; |
1da177e4 LT |
2992 | priv->extra_stats.rx_babr++; |
2993 | ||
0bbaf069 | 2994 | if (netif_msg_rx_err(priv)) |
538cc7ee | 2995 | printk(KERN_DEBUG "%s: babbling RX error\n", dev->name); |
1da177e4 LT |
2996 | } |
2997 | if (events & IEVENT_EBERR) { | |
2998 | priv->extra_stats.eberr++; | |
0bbaf069 | 2999 | if (netif_msg_rx_err(priv)) |
538cc7ee | 3000 | printk(KERN_DEBUG "%s: bus error\n", dev->name); |
1da177e4 | 3001 | } |
0bbaf069 | 3002 | if ((events & IEVENT_RXC) && netif_msg_rx_status(priv)) |
538cc7ee | 3003 | printk(KERN_DEBUG "%s: control frame\n", dev->name); |
1da177e4 LT |
3004 | |
3005 | if (events & IEVENT_BABT) { | |
3006 | priv->extra_stats.tx_babt++; | |
0bbaf069 | 3007 | if (netif_msg_tx_err(priv)) |
538cc7ee | 3008 | printk(KERN_DEBUG "%s: babbling TX error\n", dev->name); |
1da177e4 LT |
3009 | } |
3010 | return IRQ_HANDLED; | |
3011 | } | |
3012 | ||
b31a1d8b AF |
3013 | static struct of_device_id gfar_match[] = |
3014 | { | |
3015 | { | |
3016 | .type = "network", | |
3017 | .compatible = "gianfar", | |
3018 | }, | |
46ceb60c SG |
3019 | { |
3020 | .compatible = "fsl,etsec2", | |
3021 | }, | |
b31a1d8b AF |
3022 | {}, |
3023 | }; | |
e72701ac | 3024 | MODULE_DEVICE_TABLE(of, gfar_match); |
b31a1d8b | 3025 | |
1da177e4 | 3026 | /* Structure for a device driver */ |
b31a1d8b AF |
3027 | static struct of_platform_driver gfar_driver = { |
3028 | .name = "fsl-gianfar", | |
3029 | .match_table = gfar_match, | |
3030 | ||
1da177e4 LT |
3031 | .probe = gfar_probe, |
3032 | .remove = gfar_remove, | |
be926fc4 AV |
3033 | .suspend = gfar_legacy_suspend, |
3034 | .resume = gfar_legacy_resume, | |
3035 | .driver.pm = GFAR_PM_OPS, | |
1da177e4 LT |
3036 | }; |
3037 | ||
3038 | static int __init gfar_init(void) | |
3039 | { | |
1577ecef | 3040 | return of_register_platform_driver(&gfar_driver); |
1da177e4 LT |
3041 | } |
3042 | ||
3043 | static void __exit gfar_exit(void) | |
3044 | { | |
b31a1d8b | 3045 | of_unregister_platform_driver(&gfar_driver); |
1da177e4 LT |
3046 | } |
3047 | ||
3048 | module_init(gfar_init); | |
3049 | module_exit(gfar_exit); | |
3050 |