Fix common misspellings
[deliverable/linux.git] / drivers / net / myri_sbus.c
1 /* myri_sbus.c: MyriCOM MyriNET SBUS card driver.
2 *
3 * Copyright (C) 1996, 1999, 2006, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6 static char version[] =
7 "myri_sbus.c:v2.0 June 23, 2006 David S. Miller (davem@davemloft.net)\n";
8
9 #include <linux/module.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/fcntl.h>
14 #include <linux/interrupt.h>
15 #include <linux/ioport.h>
16 #include <linux/in.h>
17 #include <linux/string.h>
18 #include <linux/delay.h>
19 #include <linux/init.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/bitops.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/firmware.h>
28 #include <linux/gfp.h>
29
30 #include <net/dst.h>
31 #include <net/arp.h>
32 #include <net/sock.h>
33 #include <net/ipv6.h>
34
35 #include <asm/system.h>
36 #include <asm/io.h>
37 #include <asm/dma.h>
38 #include <asm/byteorder.h>
39 #include <asm/idprom.h>
40 #include <asm/openprom.h>
41 #include <asm/oplib.h>
42 #include <asm/auxio.h>
43 #include <asm/pgtable.h>
44 #include <asm/irq.h>
45
46 #include "myri_sbus.h"
47
48 /* #define DEBUG_DETECT */
49 /* #define DEBUG_IRQ */
50 /* #define DEBUG_TRANSMIT */
51 /* #define DEBUG_RECEIVE */
52 /* #define DEBUG_HEADER */
53
54 #ifdef DEBUG_DETECT
55 #define DET(x) printk x
56 #else
57 #define DET(x)
58 #endif
59
60 #ifdef DEBUG_IRQ
61 #define DIRQ(x) printk x
62 #else
63 #define DIRQ(x)
64 #endif
65
66 #ifdef DEBUG_TRANSMIT
67 #define DTX(x) printk x
68 #else
69 #define DTX(x)
70 #endif
71
72 #ifdef DEBUG_RECEIVE
73 #define DRX(x) printk x
74 #else
75 #define DRX(x)
76 #endif
77
78 #ifdef DEBUG_HEADER
79 #define DHDR(x) printk x
80 #else
81 #define DHDR(x)
82 #endif
83
84 /* Firmware name */
85 #define FWNAME "myricom/lanai.bin"
86
87 static void myri_reset_off(void __iomem *lp, void __iomem *cregs)
88 {
89 /* Clear IRQ mask. */
90 sbus_writel(0, lp + LANAI_EIMASK);
91
92 /* Turn RESET function off. */
93 sbus_writel(CONTROL_ROFF, cregs + MYRICTRL_CTRL);
94 }
95
96 static void myri_reset_on(void __iomem *cregs)
97 {
98 /* Enable RESET function. */
99 sbus_writel(CONTROL_RON, cregs + MYRICTRL_CTRL);
100
101 /* Disable IRQ's. */
102 sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL);
103 }
104
105 static void myri_disable_irq(void __iomem *lp, void __iomem *cregs)
106 {
107 sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL);
108 sbus_writel(0, lp + LANAI_EIMASK);
109 sbus_writel(ISTAT_HOST, lp + LANAI_ISTAT);
110 }
111
112 static void myri_enable_irq(void __iomem *lp, void __iomem *cregs)
113 {
114 sbus_writel(CONTROL_EIRQ, cregs + MYRICTRL_CTRL);
115 sbus_writel(ISTAT_HOST, lp + LANAI_EIMASK);
116 }
117
118 static inline void bang_the_chip(struct myri_eth *mp)
119 {
120 struct myri_shmem __iomem *shmem = mp->shmem;
121 void __iomem *cregs = mp->cregs;
122
123 sbus_writel(1, &shmem->send);
124 sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL);
125 }
126
127 static int myri_do_handshake(struct myri_eth *mp)
128 {
129 struct myri_shmem __iomem *shmem = mp->shmem;
130 void __iomem *cregs = mp->cregs;
131 struct myri_channel __iomem *chan = &shmem->channel;
132 int tick = 0;
133
134 DET(("myri_do_handshake: "));
135 if (sbus_readl(&chan->state) == STATE_READY) {
136 DET(("Already STATE_READY, failed.\n"));
137 return -1; /* We're hosed... */
138 }
139
140 myri_disable_irq(mp->lregs, cregs);
141
142 while (tick++ < 25) {
143 u32 softstate;
144
145 /* Wake it up. */
146 DET(("shakedown, CONTROL_WON, "));
147 sbus_writel(1, &shmem->shakedown);
148 sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL);
149
150 softstate = sbus_readl(&chan->state);
151 DET(("chanstate[%08x] ", softstate));
152 if (softstate == STATE_READY) {
153 DET(("wakeup successful, "));
154 break;
155 }
156
157 if (softstate != STATE_WFN) {
158 DET(("not WFN setting that, "));
159 sbus_writel(STATE_WFN, &chan->state);
160 }
161
162 udelay(20);
163 }
164
165 myri_enable_irq(mp->lregs, cregs);
166
167 if (tick > 25) {
168 DET(("25 ticks we lose, failure.\n"));
169 return -1;
170 }
171 DET(("success\n"));
172 return 0;
173 }
174
175 static int __devinit myri_load_lanai(struct myri_eth *mp)
176 {
177 const struct firmware *fw;
178 struct net_device *dev = mp->dev;
179 struct myri_shmem __iomem *shmem = mp->shmem;
180 void __iomem *rptr;
181 int i, lanai4_data_size;
182
183 myri_disable_irq(mp->lregs, mp->cregs);
184 myri_reset_on(mp->cregs);
185
186 rptr = mp->lanai;
187 for (i = 0; i < mp->eeprom.ramsz; i++)
188 sbus_writeb(0, rptr + i);
189
190 if (mp->eeprom.cpuvers >= CPUVERS_3_0)
191 sbus_writel(mp->eeprom.cval, mp->lregs + LANAI_CVAL);
192
193 i = request_firmware(&fw, FWNAME, &mp->myri_op->dev);
194 if (i) {
195 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
196 FWNAME, i);
197 return i;
198 }
199 if (fw->size < 2) {
200 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
201 fw->size, FWNAME);
202 release_firmware(fw);
203 return -EINVAL;
204 }
205 lanai4_data_size = fw->data[0] << 8 | fw->data[1];
206
207 /* Load executable code. */
208 for (i = 2; i < fw->size; i++)
209 sbus_writeb(fw->data[i], rptr++);
210
211 /* Load data segment. */
212 for (i = 0; i < lanai4_data_size; i++)
213 sbus_writeb(0, rptr++);
214
215 /* Set device address. */
216 sbus_writeb(0, &shmem->addr[0]);
217 sbus_writeb(0, &shmem->addr[1]);
218 for (i = 0; i < 6; i++)
219 sbus_writeb(dev->dev_addr[i],
220 &shmem->addr[i + 2]);
221
222 /* Set SBUS bursts and interrupt mask. */
223 sbus_writel(((mp->myri_bursts & 0xf8) >> 3), &shmem->burst);
224 sbus_writel(SHMEM_IMASK_RX, &shmem->imask);
225
226 /* Release the LANAI. */
227 myri_disable_irq(mp->lregs, mp->cregs);
228 myri_reset_off(mp->lregs, mp->cregs);
229 myri_disable_irq(mp->lregs, mp->cregs);
230
231 /* Wait for the reset to complete. */
232 for (i = 0; i < 5000; i++) {
233 if (sbus_readl(&shmem->channel.state) != STATE_READY)
234 break;
235 else
236 udelay(10);
237 }
238
239 if (i == 5000)
240 printk(KERN_ERR "myricom: Chip would not reset after firmware load.\n");
241
242 i = myri_do_handshake(mp);
243 if (i)
244 printk(KERN_ERR "myricom: Handshake with LANAI failed.\n");
245
246 if (mp->eeprom.cpuvers == CPUVERS_4_0)
247 sbus_writel(0, mp->lregs + LANAI_VERS);
248
249 release_firmware(fw);
250 return i;
251 }
252
253 static void myri_clean_rings(struct myri_eth *mp)
254 {
255 struct sendq __iomem *sq = mp->sq;
256 struct recvq __iomem *rq = mp->rq;
257 int i;
258
259 sbus_writel(0, &rq->tail);
260 sbus_writel(0, &rq->head);
261 for (i = 0; i < (RX_RING_SIZE+1); i++) {
262 if (mp->rx_skbs[i] != NULL) {
263 struct myri_rxd __iomem *rxd = &rq->myri_rxd[i];
264 u32 dma_addr;
265
266 dma_addr = sbus_readl(&rxd->myri_scatters[0].addr);
267 dma_unmap_single(&mp->myri_op->dev, dma_addr,
268 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
269 dev_kfree_skb(mp->rx_skbs[i]);
270 mp->rx_skbs[i] = NULL;
271 }
272 }
273
274 mp->tx_old = 0;
275 sbus_writel(0, &sq->tail);
276 sbus_writel(0, &sq->head);
277 for (i = 0; i < TX_RING_SIZE; i++) {
278 if (mp->tx_skbs[i] != NULL) {
279 struct sk_buff *skb = mp->tx_skbs[i];
280 struct myri_txd __iomem *txd = &sq->myri_txd[i];
281 u32 dma_addr;
282
283 dma_addr = sbus_readl(&txd->myri_gathers[0].addr);
284 dma_unmap_single(&mp->myri_op->dev, dma_addr,
285 (skb->len + 3) & ~3,
286 DMA_TO_DEVICE);
287 dev_kfree_skb(mp->tx_skbs[i]);
288 mp->tx_skbs[i] = NULL;
289 }
290 }
291 }
292
293 static void myri_init_rings(struct myri_eth *mp, int from_irq)
294 {
295 struct recvq __iomem *rq = mp->rq;
296 struct myri_rxd __iomem *rxd = &rq->myri_rxd[0];
297 struct net_device *dev = mp->dev;
298 gfp_t gfp_flags = GFP_KERNEL;
299 int i;
300
301 if (from_irq || in_interrupt())
302 gfp_flags = GFP_ATOMIC;
303
304 myri_clean_rings(mp);
305 for (i = 0; i < RX_RING_SIZE; i++) {
306 struct sk_buff *skb = myri_alloc_skb(RX_ALLOC_SIZE, gfp_flags);
307 u32 dma_addr;
308
309 if (!skb)
310 continue;
311 mp->rx_skbs[i] = skb;
312 skb->dev = dev;
313 skb_put(skb, RX_ALLOC_SIZE);
314
315 dma_addr = dma_map_single(&mp->myri_op->dev,
316 skb->data, RX_ALLOC_SIZE,
317 DMA_FROM_DEVICE);
318 sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr);
319 sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len);
320 sbus_writel(i, &rxd[i].ctx);
321 sbus_writel(1, &rxd[i].num_sg);
322 }
323 sbus_writel(0, &rq->head);
324 sbus_writel(RX_RING_SIZE, &rq->tail);
325 }
326
327 static int myri_init(struct myri_eth *mp, int from_irq)
328 {
329 myri_init_rings(mp, from_irq);
330 return 0;
331 }
332
333 static void myri_is_not_so_happy(struct myri_eth *mp)
334 {
335 }
336
337 #ifdef DEBUG_HEADER
338 static void dump_ehdr(struct ethhdr *ehdr)
339 {
340 printk("ehdr[h_dst(%pM)"
341 "h_source(%pM)"
342 "h_proto(%04x)]\n",
343 ehdr->h_dest, ehdr->h_source, ehdr->h_proto);
344 }
345
346 static void dump_ehdr_and_myripad(unsigned char *stuff)
347 {
348 struct ethhdr *ehdr = (struct ethhdr *) (stuff + 2);
349
350 printk("pad[%02x:%02x]", stuff[0], stuff[1]);
351 dump_ehdr(ehdr);
352 }
353 #endif
354
355 static void myri_tx(struct myri_eth *mp, struct net_device *dev)
356 {
357 struct sendq __iomem *sq= mp->sq;
358 int entry = mp->tx_old;
359 int limit = sbus_readl(&sq->head);
360
361 DTX(("entry[%d] limit[%d] ", entry, limit));
362 if (entry == limit)
363 return;
364 while (entry != limit) {
365 struct sk_buff *skb = mp->tx_skbs[entry];
366 u32 dma_addr;
367
368 DTX(("SKB[%d] ", entry));
369 dma_addr = sbus_readl(&sq->myri_txd[entry].myri_gathers[0].addr);
370 dma_unmap_single(&mp->myri_op->dev, dma_addr,
371 skb->len, DMA_TO_DEVICE);
372 dev_kfree_skb(skb);
373 mp->tx_skbs[entry] = NULL;
374 dev->stats.tx_packets++;
375 entry = NEXT_TX(entry);
376 }
377 mp->tx_old = entry;
378 }
379
380 /* Determine the packet's protocol ID. The rule here is that we
381 * assume 802.3 if the type field is short enough to be a length.
382 * This is normal practice and works for any 'now in use' protocol.
383 */
384 static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev)
385 {
386 struct ethhdr *eth;
387 unsigned char *rawp;
388
389 skb_set_mac_header(skb, MYRI_PAD_LEN);
390 skb_pull(skb, dev->hard_header_len);
391 eth = eth_hdr(skb);
392
393 #ifdef DEBUG_HEADER
394 DHDR(("myri_type_trans: "));
395 dump_ehdr(eth);
396 #endif
397 if (*eth->h_dest & 1) {
398 if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN)==0)
399 skb->pkt_type = PACKET_BROADCAST;
400 else
401 skb->pkt_type = PACKET_MULTICAST;
402 } else if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) {
403 if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
404 skb->pkt_type = PACKET_OTHERHOST;
405 }
406
407 if (ntohs(eth->h_proto) >= 1536)
408 return eth->h_proto;
409
410 rawp = skb->data;
411
412 /* This is a magic hack to spot IPX packets. Older Novell breaks
413 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
414 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
415 * won't work for fault tolerant netware but does for the rest.
416 */
417 if (*(unsigned short *)rawp == 0xFFFF)
418 return htons(ETH_P_802_3);
419
420 /* Real 802.2 LLC */
421 return htons(ETH_P_802_2);
422 }
423
424 static void myri_rx(struct myri_eth *mp, struct net_device *dev)
425 {
426 struct recvq __iomem *rq = mp->rq;
427 struct recvq __iomem *rqa = mp->rqack;
428 int entry = sbus_readl(&rqa->head);
429 int limit = sbus_readl(&rqa->tail);
430 int drops;
431
432 DRX(("entry[%d] limit[%d] ", entry, limit));
433 if (entry == limit)
434 return;
435 drops = 0;
436 DRX(("\n"));
437 while (entry != limit) {
438 struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry];
439 u32 csum = sbus_readl(&rxdack->csum);
440 int len = sbus_readl(&rxdack->myri_scatters[0].len);
441 int index = sbus_readl(&rxdack->ctx);
442 struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)];
443 struct sk_buff *skb = mp->rx_skbs[index];
444
445 /* Ack it. */
446 sbus_writel(NEXT_RX(entry), &rqa->head);
447
448 /* Check for errors. */
449 DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
450 dma_sync_single_for_cpu(&mp->myri_op->dev,
451 sbus_readl(&rxd->myri_scatters[0].addr),
452 RX_ALLOC_SIZE, DMA_FROM_DEVICE);
453 if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
454 DRX(("ERROR["));
455 dev->stats.rx_errors++;
456 if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
457 DRX(("BAD_LENGTH] "));
458 dev->stats.rx_length_errors++;
459 } else {
460 DRX(("NO_PADDING] "));
461 dev->stats.rx_frame_errors++;
462 }
463
464 /* Return it to the LANAI. */
465 drop_it:
466 drops++;
467 DRX(("DROP "));
468 dev->stats.rx_dropped++;
469 dma_sync_single_for_device(&mp->myri_op->dev,
470 sbus_readl(&rxd->myri_scatters[0].addr),
471 RX_ALLOC_SIZE,
472 DMA_FROM_DEVICE);
473 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
474 sbus_writel(index, &rxd->ctx);
475 sbus_writel(1, &rxd->num_sg);
476 sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
477 goto next;
478 }
479
480 DRX(("len[%d] ", len));
481 if (len > RX_COPY_THRESHOLD) {
482 struct sk_buff *new_skb;
483 u32 dma_addr;
484
485 DRX(("BIGBUFF "));
486 new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
487 if (new_skb == NULL) {
488 DRX(("skb_alloc(FAILED) "));
489 goto drop_it;
490 }
491 dma_unmap_single(&mp->myri_op->dev,
492 sbus_readl(&rxd->myri_scatters[0].addr),
493 RX_ALLOC_SIZE,
494 DMA_FROM_DEVICE);
495 mp->rx_skbs[index] = new_skb;
496 new_skb->dev = dev;
497 skb_put(new_skb, RX_ALLOC_SIZE);
498 dma_addr = dma_map_single(&mp->myri_op->dev,
499 new_skb->data,
500 RX_ALLOC_SIZE,
501 DMA_FROM_DEVICE);
502 sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
503 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
504 sbus_writel(index, &rxd->ctx);
505 sbus_writel(1, &rxd->num_sg);
506 sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
507
508 /* Trim the original skb for the netif. */
509 DRX(("trim(%d) ", len));
510 skb_trim(skb, len);
511 } else {
512 struct sk_buff *copy_skb = dev_alloc_skb(len);
513
514 DRX(("SMALLBUFF "));
515 if (copy_skb == NULL) {
516 DRX(("dev_alloc_skb(FAILED) "));
517 goto drop_it;
518 }
519 /* DMA sync already done above. */
520 copy_skb->dev = dev;
521 DRX(("resv_and_put "));
522 skb_put(copy_skb, len);
523 skb_copy_from_linear_data(skb, copy_skb->data, len);
524
525 /* Reuse original ring buffer. */
526 DRX(("reuse "));
527 dma_sync_single_for_device(&mp->myri_op->dev,
528 sbus_readl(&rxd->myri_scatters[0].addr),
529 RX_ALLOC_SIZE,
530 DMA_FROM_DEVICE);
531 sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
532 sbus_writel(index, &rxd->ctx);
533 sbus_writel(1, &rxd->num_sg);
534 sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
535
536 skb = copy_skb;
537 }
538
539 /* Just like the happy meal we get checksums from this card. */
540 skb->csum = csum;
541 skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */
542
543 skb->protocol = myri_type_trans(skb, dev);
544 DRX(("prot[%04x] netif_rx ", skb->protocol));
545 netif_rx(skb);
546
547 dev->stats.rx_packets++;
548 dev->stats.rx_bytes += len;
549 next:
550 DRX(("NEXT\n"));
551 entry = NEXT_RX(entry);
552 }
553 }
554
555 static irqreturn_t myri_interrupt(int irq, void *dev_id)
556 {
557 struct net_device *dev = (struct net_device *) dev_id;
558 struct myri_eth *mp = netdev_priv(dev);
559 void __iomem *lregs = mp->lregs;
560 struct myri_channel __iomem *chan = &mp->shmem->channel;
561 unsigned long flags;
562 u32 status;
563 int handled = 0;
564
565 spin_lock_irqsave(&mp->irq_lock, flags);
566
567 status = sbus_readl(lregs + LANAI_ISTAT);
568 DIRQ(("myri_interrupt: status[%08x] ", status));
569 if (status & ISTAT_HOST) {
570 u32 softstate;
571
572 handled = 1;
573 DIRQ(("IRQ_DISAB "));
574 myri_disable_irq(lregs, mp->cregs);
575 softstate = sbus_readl(&chan->state);
576 DIRQ(("state[%08x] ", softstate));
577 if (softstate != STATE_READY) {
578 DIRQ(("myri_not_so_happy "));
579 myri_is_not_so_happy(mp);
580 }
581 DIRQ(("\nmyri_rx: "));
582 myri_rx(mp, dev);
583 DIRQ(("\nistat=ISTAT_HOST "));
584 sbus_writel(ISTAT_HOST, lregs + LANAI_ISTAT);
585 DIRQ(("IRQ_ENAB "));
586 myri_enable_irq(lregs, mp->cregs);
587 }
588 DIRQ(("\n"));
589
590 spin_unlock_irqrestore(&mp->irq_lock, flags);
591
592 return IRQ_RETVAL(handled);
593 }
594
595 static int myri_open(struct net_device *dev)
596 {
597 struct myri_eth *mp = netdev_priv(dev);
598
599 return myri_init(mp, in_interrupt());
600 }
601
602 static int myri_close(struct net_device *dev)
603 {
604 struct myri_eth *mp = netdev_priv(dev);
605
606 myri_clean_rings(mp);
607 return 0;
608 }
609
610 static void myri_tx_timeout(struct net_device *dev)
611 {
612 struct myri_eth *mp = netdev_priv(dev);
613
614 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
615
616 dev->stats.tx_errors++;
617 myri_init(mp, 0);
618 netif_wake_queue(dev);
619 }
620
621 static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
622 {
623 struct myri_eth *mp = netdev_priv(dev);
624 struct sendq __iomem *sq = mp->sq;
625 struct myri_txd __iomem *txd;
626 unsigned long flags;
627 unsigned int head, tail;
628 int len, entry;
629 u32 dma_addr;
630
631 DTX(("myri_start_xmit: "));
632
633 myri_tx(mp, dev);
634
635 netif_stop_queue(dev);
636
637 /* This is just to prevent multiple PIO reads for TX_BUFFS_AVAIL. */
638 head = sbus_readl(&sq->head);
639 tail = sbus_readl(&sq->tail);
640
641 if (!TX_BUFFS_AVAIL(head, tail)) {
642 DTX(("no buffs available, returning 1\n"));
643 return NETDEV_TX_BUSY;
644 }
645
646 spin_lock_irqsave(&mp->irq_lock, flags);
647
648 DHDR(("xmit[skbdata(%p)]\n", skb->data));
649 #ifdef DEBUG_HEADER
650 dump_ehdr_and_myripad(((unsigned char *) skb->data));
651 #endif
652
653 /* XXX Maybe this can go as well. */
654 len = skb->len;
655 if (len & 3) {
656 DTX(("len&3 "));
657 len = (len + 4) & (~3);
658 }
659
660 entry = sbus_readl(&sq->tail);
661
662 txd = &sq->myri_txd[entry];
663 mp->tx_skbs[entry] = skb;
664
665 /* Must do this before we sbus map it. */
666 if (skb->data[MYRI_PAD_LEN] & 0x1) {
667 sbus_writew(0xffff, &txd->addr[0]);
668 sbus_writew(0xffff, &txd->addr[1]);
669 sbus_writew(0xffff, &txd->addr[2]);
670 sbus_writew(0xffff, &txd->addr[3]);
671 } else {
672 sbus_writew(0xffff, &txd->addr[0]);
673 sbus_writew((skb->data[0] << 8) | skb->data[1], &txd->addr[1]);
674 sbus_writew((skb->data[2] << 8) | skb->data[3], &txd->addr[2]);
675 sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]);
676 }
677
678 dma_addr = dma_map_single(&mp->myri_op->dev, skb->data,
679 len, DMA_TO_DEVICE);
680 sbus_writel(dma_addr, &txd->myri_gathers[0].addr);
681 sbus_writel(len, &txd->myri_gathers[0].len);
682 sbus_writel(1, &txd->num_sg);
683 sbus_writel(KERNEL_CHANNEL, &txd->chan);
684 sbus_writel(len, &txd->len);
685 sbus_writel((u32)-1, &txd->csum_off);
686 sbus_writel(0, &txd->csum_field);
687
688 sbus_writel(NEXT_TX(entry), &sq->tail);
689 DTX(("BangTheChip "));
690 bang_the_chip(mp);
691
692 DTX(("tbusy=0, returning 0\n"));
693 netif_start_queue(dev);
694 spin_unlock_irqrestore(&mp->irq_lock, flags);
695 return NETDEV_TX_OK;
696 }
697
698 /* Create the MyriNet MAC header for an arbitrary protocol layer
699 *
700 * saddr=NULL means use device source address
701 * daddr=NULL means leave destination address (eg unresolved arp)
702 */
703 static int myri_header(struct sk_buff *skb, struct net_device *dev,
704 unsigned short type, const void *daddr,
705 const void *saddr, unsigned len)
706 {
707 struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
708 unsigned char *pad = (unsigned char *) skb_push(skb, MYRI_PAD_LEN);
709
710 #ifdef DEBUG_HEADER
711 DHDR(("myri_header: pad[%02x,%02x] ", pad[0], pad[1]));
712 dump_ehdr(eth);
713 #endif
714
715 /* Set the MyriNET padding identifier. */
716 pad[0] = MYRI_PAD_LEN;
717 pad[1] = 0xab;
718
719 /* Set the protocol type. For a packet of type ETH_P_802_3/2 we put the
720 * length in here instead.
721 */
722 if (type != ETH_P_802_3 && type != ETH_P_802_2)
723 eth->h_proto = htons(type);
724 else
725 eth->h_proto = htons(len);
726
727 /* Set the source hardware address. */
728 if (saddr)
729 memcpy(eth->h_source, saddr, dev->addr_len);
730 else
731 memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
732
733 /* Anyway, the loopback-device should never use this function... */
734 if (dev->flags & IFF_LOOPBACK) {
735 int i;
736 for (i = 0; i < dev->addr_len; i++)
737 eth->h_dest[i] = 0;
738 return dev->hard_header_len;
739 }
740
741 if (daddr) {
742 memcpy(eth->h_dest, daddr, dev->addr_len);
743 return dev->hard_header_len;
744 }
745 return -dev->hard_header_len;
746 }
747
748 /* Rebuild the MyriNet MAC header. This is called after an ARP
749 * (or in future other address resolution) has completed on this
750 * sk_buff. We now let ARP fill in the other fields.
751 */
752 static int myri_rebuild_header(struct sk_buff *skb)
753 {
754 unsigned char *pad = (unsigned char *) skb->data;
755 struct ethhdr *eth = (struct ethhdr *) (pad + MYRI_PAD_LEN);
756 struct net_device *dev = skb->dev;
757
758 #ifdef DEBUG_HEADER
759 DHDR(("myri_rebuild_header: pad[%02x,%02x] ", pad[0], pad[1]));
760 dump_ehdr(eth);
761 #endif
762
763 /* Refill MyriNet padding identifiers, this is just being anal. */
764 pad[0] = MYRI_PAD_LEN;
765 pad[1] = 0xab;
766
767 switch (eth->h_proto)
768 {
769 #ifdef CONFIG_INET
770 case cpu_to_be16(ETH_P_IP):
771 return arp_find(eth->h_dest, skb);
772 #endif
773
774 default:
775 printk(KERN_DEBUG
776 "%s: unable to resolve type %X addresses.\n",
777 dev->name, (int)eth->h_proto);
778
779 memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
780 return 0;
781 break;
782 }
783
784 return 0;
785 }
786
787 static int myri_header_cache(const struct neighbour *neigh, struct hh_cache *hh)
788 {
789 unsigned short type = hh->hh_type;
790 unsigned char *pad;
791 struct ethhdr *eth;
792 const struct net_device *dev = neigh->dev;
793
794 pad = ((unsigned char *) hh->hh_data) +
795 HH_DATA_OFF(sizeof(*eth) + MYRI_PAD_LEN);
796 eth = (struct ethhdr *) (pad + MYRI_PAD_LEN);
797
798 if (type == htons(ETH_P_802_3))
799 return -1;
800
801 /* Refill MyriNet padding identifiers, this is just being anal. */
802 pad[0] = MYRI_PAD_LEN;
803 pad[1] = 0xab;
804
805 eth->h_proto = type;
806 memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
807 memcpy(eth->h_dest, neigh->ha, dev->addr_len);
808 hh->hh_len = 16;
809 return 0;
810 }
811
812
813 /* Called by Address Resolution module to notify changes in address. */
814 void myri_header_cache_update(struct hh_cache *hh,
815 const struct net_device *dev,
816 const unsigned char * haddr)
817 {
818 memcpy(((u8*)hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)),
819 haddr, dev->addr_len);
820 }
821
822 static int myri_change_mtu(struct net_device *dev, int new_mtu)
823 {
824 if ((new_mtu < (ETH_HLEN + MYRI_PAD_LEN)) || (new_mtu > MYRINET_MTU))
825 return -EINVAL;
826 dev->mtu = new_mtu;
827 return 0;
828 }
829
830 static void myri_set_multicast(struct net_device *dev)
831 {
832 /* Do nothing, all MyriCOM nodes transmit multicast frames
833 * as broadcast packets...
834 */
835 }
836
837 static inline void set_boardid_from_idprom(struct myri_eth *mp, int num)
838 {
839 mp->eeprom.id[0] = 0;
840 mp->eeprom.id[1] = idprom->id_machtype;
841 mp->eeprom.id[2] = (idprom->id_sernum >> 16) & 0xff;
842 mp->eeprom.id[3] = (idprom->id_sernum >> 8) & 0xff;
843 mp->eeprom.id[4] = (idprom->id_sernum >> 0) & 0xff;
844 mp->eeprom.id[5] = num;
845 }
846
847 static inline void determine_reg_space_size(struct myri_eth *mp)
848 {
849 switch(mp->eeprom.cpuvers) {
850 case CPUVERS_2_3:
851 case CPUVERS_3_0:
852 case CPUVERS_3_1:
853 case CPUVERS_3_2:
854 mp->reg_size = (3 * 128 * 1024) + 4096;
855 break;
856
857 case CPUVERS_4_0:
858 case CPUVERS_4_1:
859 mp->reg_size = ((4096<<1) + mp->eeprom.ramsz);
860 break;
861
862 case CPUVERS_4_2:
863 case CPUVERS_5_0:
864 default:
865 printk("myricom: AIEEE weird cpu version %04x assuming pre4.0\n",
866 mp->eeprom.cpuvers);
867 mp->reg_size = (3 * 128 * 1024) + 4096;
868 }
869 }
870
871 #ifdef DEBUG_DETECT
872 static void dump_eeprom(struct myri_eth *mp)
873 {
874 printk("EEPROM: clockval[%08x] cpuvers[%04x] "
875 "id[%02x,%02x,%02x,%02x,%02x,%02x]\n",
876 mp->eeprom.cval, mp->eeprom.cpuvers,
877 mp->eeprom.id[0], mp->eeprom.id[1], mp->eeprom.id[2],
878 mp->eeprom.id[3], mp->eeprom.id[4], mp->eeprom.id[5]);
879 printk("EEPROM: ramsz[%08x]\n", mp->eeprom.ramsz);
880 printk("EEPROM: fvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
881 mp->eeprom.fvers[0], mp->eeprom.fvers[1], mp->eeprom.fvers[2],
882 mp->eeprom.fvers[3], mp->eeprom.fvers[4], mp->eeprom.fvers[5],
883 mp->eeprom.fvers[6], mp->eeprom.fvers[7]);
884 printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
885 mp->eeprom.fvers[8], mp->eeprom.fvers[9], mp->eeprom.fvers[10],
886 mp->eeprom.fvers[11], mp->eeprom.fvers[12], mp->eeprom.fvers[13],
887 mp->eeprom.fvers[14], mp->eeprom.fvers[15]);
888 printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
889 mp->eeprom.fvers[16], mp->eeprom.fvers[17], mp->eeprom.fvers[18],
890 mp->eeprom.fvers[19], mp->eeprom.fvers[20], mp->eeprom.fvers[21],
891 mp->eeprom.fvers[22], mp->eeprom.fvers[23]);
892 printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n",
893 mp->eeprom.fvers[24], mp->eeprom.fvers[25], mp->eeprom.fvers[26],
894 mp->eeprom.fvers[27], mp->eeprom.fvers[28], mp->eeprom.fvers[29],
895 mp->eeprom.fvers[30], mp->eeprom.fvers[31]);
896 printk("EEPROM: mvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
897 mp->eeprom.mvers[0], mp->eeprom.mvers[1], mp->eeprom.mvers[2],
898 mp->eeprom.mvers[3], mp->eeprom.mvers[4], mp->eeprom.mvers[5],
899 mp->eeprom.mvers[6], mp->eeprom.mvers[7]);
900 printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n",
901 mp->eeprom.mvers[8], mp->eeprom.mvers[9], mp->eeprom.mvers[10],
902 mp->eeprom.mvers[11], mp->eeprom.mvers[12], mp->eeprom.mvers[13],
903 mp->eeprom.mvers[14], mp->eeprom.mvers[15]);
904 printk("EEPROM: dlval[%04x] brd_type[%04x] bus_type[%04x] prod_code[%04x]\n",
905 mp->eeprom.dlval, mp->eeprom.brd_type, mp->eeprom.bus_type,
906 mp->eeprom.prod_code);
907 printk("EEPROM: serial_num[%08x]\n", mp->eeprom.serial_num);
908 }
909 #endif
910
911 static const struct header_ops myri_header_ops = {
912 .create = myri_header,
913 .rebuild = myri_rebuild_header,
914 .cache = myri_header_cache,
915 .cache_update = myri_header_cache_update,
916 };
917
918 static const struct net_device_ops myri_ops = {
919 .ndo_open = myri_open,
920 .ndo_stop = myri_close,
921 .ndo_start_xmit = myri_start_xmit,
922 .ndo_set_multicast_list = myri_set_multicast,
923 .ndo_tx_timeout = myri_tx_timeout,
924 .ndo_change_mtu = myri_change_mtu,
925 .ndo_set_mac_address = eth_mac_addr,
926 .ndo_validate_addr = eth_validate_addr,
927 };
928
929 static int __devinit myri_sbus_probe(struct platform_device *op)
930 {
931 struct device_node *dp = op->dev.of_node;
932 static unsigned version_printed;
933 struct net_device *dev;
934 struct myri_eth *mp;
935 const void *prop;
936 static int num;
937 int i, len;
938
939 DET(("myri_ether_init(%p,%d):\n", op, num));
940 dev = alloc_etherdev(sizeof(struct myri_eth));
941 if (!dev)
942 return -ENOMEM;
943
944 if (version_printed++ == 0)
945 printk(version);
946
947 SET_NETDEV_DEV(dev, &op->dev);
948
949 mp = netdev_priv(dev);
950 spin_lock_init(&mp->irq_lock);
951 mp->myri_op = op;
952
953 /* Clean out skb arrays. */
954 for (i = 0; i < (RX_RING_SIZE + 1); i++)
955 mp->rx_skbs[i] = NULL;
956
957 for (i = 0; i < TX_RING_SIZE; i++)
958 mp->tx_skbs[i] = NULL;
959
960 /* First check for EEPROM information. */
961 prop = of_get_property(dp, "myrinet-eeprom-info", &len);
962
963 if (prop)
964 memcpy(&mp->eeprom, prop, sizeof(struct myri_eeprom));
965 if (!prop) {
966 /* No eeprom property, must cook up the values ourselves. */
967 DET(("No EEPROM: "));
968 mp->eeprom.bus_type = BUS_TYPE_SBUS;
969 mp->eeprom.cpuvers =
970 of_getintprop_default(dp, "cpu_version", 0);
971 mp->eeprom.cval =
972 of_getintprop_default(dp, "clock_value", 0);
973 mp->eeprom.ramsz = of_getintprop_default(dp, "sram_size", 0);
974 if (!mp->eeprom.cpuvers)
975 mp->eeprom.cpuvers = CPUVERS_2_3;
976 if (mp->eeprom.cpuvers < CPUVERS_3_0)
977 mp->eeprom.cval = 0;
978 if (!mp->eeprom.ramsz)
979 mp->eeprom.ramsz = (128 * 1024);
980
981 prop = of_get_property(dp, "myrinet-board-id", &len);
982 if (prop)
983 memcpy(&mp->eeprom.id[0], prop, 6);
984 else
985 set_boardid_from_idprom(mp, num);
986
987 prop = of_get_property(dp, "fpga_version", &len);
988 if (prop)
989 memcpy(&mp->eeprom.fvers[0], prop, 32);
990 else
991 memset(&mp->eeprom.fvers[0], 0, 32);
992
993 if (mp->eeprom.cpuvers == CPUVERS_4_1) {
994 if (mp->eeprom.ramsz == (128 * 1024))
995 mp->eeprom.ramsz = (256 * 1024);
996 if ((mp->eeprom.cval == 0x40414041) ||
997 (mp->eeprom.cval == 0x90449044))
998 mp->eeprom.cval = 0x50e450e4;
999 }
1000 }
1001 #ifdef DEBUG_DETECT
1002 dump_eeprom(mp);
1003 #endif
1004
1005 for (i = 0; i < 6; i++)
1006 dev->dev_addr[i] = mp->eeprom.id[i];
1007
1008 determine_reg_space_size(mp);
1009
1010 /* Map in the MyriCOM register/localram set. */
1011 if (mp->eeprom.cpuvers < CPUVERS_4_0) {
1012 /* XXX Makes no sense, if control reg is non-existent this
1013 * XXX driver cannot function at all... maybe pre-4.0 is
1014 * XXX only a valid version for PCI cards? Ask feldy...
1015 */
1016 DET(("Mapping regs for cpuvers < CPUVERS_4_0\n"));
1017 mp->regs = of_ioremap(&op->resource[0], 0,
1018 mp->reg_size, "MyriCOM Regs");
1019 if (!mp->regs) {
1020 printk("MyriCOM: Cannot map MyriCOM registers.\n");
1021 goto err;
1022 }
1023 mp->lanai = mp->regs + (256 * 1024);
1024 mp->lregs = mp->lanai + (0x10000 * 2);
1025 } else {
1026 DET(("Mapping regs for cpuvers >= CPUVERS_4_0\n"));
1027 mp->cregs = of_ioremap(&op->resource[0], 0,
1028 PAGE_SIZE, "MyriCOM Control Regs");
1029 mp->lregs = of_ioremap(&op->resource[0], (256 * 1024),
1030 PAGE_SIZE, "MyriCOM LANAI Regs");
1031 mp->lanai = of_ioremap(&op->resource[0], (512 * 1024),
1032 mp->eeprom.ramsz, "MyriCOM SRAM");
1033 }
1034 DET(("Registers mapped: cregs[%p] lregs[%p] lanai[%p]\n",
1035 mp->cregs, mp->lregs, mp->lanai));
1036
1037 if (mp->eeprom.cpuvers >= CPUVERS_4_0)
1038 mp->shmem_base = 0xf000;
1039 else
1040 mp->shmem_base = 0x8000;
1041
1042 DET(("Shared memory base is %04x, ", mp->shmem_base));
1043
1044 mp->shmem = (struct myri_shmem __iomem *)
1045 (mp->lanai + (mp->shmem_base * 2));
1046 DET(("shmem mapped at %p\n", mp->shmem));
1047
1048 mp->rqack = &mp->shmem->channel.recvqa;
1049 mp->rq = &mp->shmem->channel.recvq;
1050 mp->sq = &mp->shmem->channel.sendq;
1051
1052 /* Reset the board. */
1053 DET(("Resetting LANAI\n"));
1054 myri_reset_off(mp->lregs, mp->cregs);
1055 myri_reset_on(mp->cregs);
1056
1057 /* Turn IRQ's off. */
1058 myri_disable_irq(mp->lregs, mp->cregs);
1059
1060 /* Reset once more. */
1061 myri_reset_on(mp->cregs);
1062
1063 /* Get the supported DVMA burst sizes from our SBUS. */
1064 mp->myri_bursts = of_getintprop_default(dp->parent,
1065 "burst-sizes", 0x00);
1066 if (!sbus_can_burst64())
1067 mp->myri_bursts &= ~(DMA_BURST64);
1068
1069 DET(("MYRI bursts %02x\n", mp->myri_bursts));
1070
1071 /* Encode SBUS interrupt level in second control register. */
1072 i = of_getintprop_default(dp, "interrupts", 0);
1073 if (i == 0)
1074 i = 4;
1075 DET(("prom_getint(interrupts)==%d, irqlvl set to %04x\n",
1076 i, (1 << i)));
1077
1078 sbus_writel((1 << i), mp->cregs + MYRICTRL_IRQLVL);
1079
1080 mp->dev = dev;
1081 dev->watchdog_timeo = 5*HZ;
1082 dev->irq = op->archdata.irqs[0];
1083 dev->netdev_ops = &myri_ops;
1084
1085 /* Register interrupt handler now. */
1086 DET(("Requesting MYRIcom IRQ line.\n"));
1087 if (request_irq(dev->irq, myri_interrupt,
1088 IRQF_SHARED, "MyriCOM Ethernet", (void *) dev)) {
1089 printk("MyriCOM: Cannot register interrupt handler.\n");
1090 goto err;
1091 }
1092
1093 dev->mtu = MYRINET_MTU;
1094 dev->header_ops = &myri_header_ops;
1095
1096 dev->hard_header_len = (ETH_HLEN + MYRI_PAD_LEN);
1097
1098 /* Load code onto the LANai. */
1099 DET(("Loading LANAI firmware\n"));
1100 if (myri_load_lanai(mp)) {
1101 printk(KERN_ERR "MyriCOM: Cannot Load LANAI firmware.\n");
1102 goto err_free_irq;
1103 }
1104
1105 if (register_netdev(dev)) {
1106 printk("MyriCOM: Cannot register device.\n");
1107 goto err_free_irq;
1108 }
1109
1110 dev_set_drvdata(&op->dev, mp);
1111
1112 num++;
1113
1114 printk("%s: MyriCOM MyriNET Ethernet %pM\n",
1115 dev->name, dev->dev_addr);
1116
1117 return 0;
1118
1119 err_free_irq:
1120 free_irq(dev->irq, dev);
1121 err:
1122 /* This will also free the co-allocated private data*/
1123 free_netdev(dev);
1124 return -ENODEV;
1125 }
1126
1127 static int __devexit myri_sbus_remove(struct platform_device *op)
1128 {
1129 struct myri_eth *mp = dev_get_drvdata(&op->dev);
1130 struct net_device *net_dev = mp->dev;
1131
1132 unregister_netdev(net_dev);
1133
1134 free_irq(net_dev->irq, net_dev);
1135
1136 if (mp->eeprom.cpuvers < CPUVERS_4_0) {
1137 of_iounmap(&op->resource[0], mp->regs, mp->reg_size);
1138 } else {
1139 of_iounmap(&op->resource[0], mp->cregs, PAGE_SIZE);
1140 of_iounmap(&op->resource[0], mp->lregs, (256 * 1024));
1141 of_iounmap(&op->resource[0], mp->lanai, (512 * 1024));
1142 }
1143
1144 free_netdev(net_dev);
1145
1146 dev_set_drvdata(&op->dev, NULL);
1147
1148 return 0;
1149 }
1150
1151 static const struct of_device_id myri_sbus_match[] = {
1152 {
1153 .name = "MYRICOM,mlanai",
1154 },
1155 {
1156 .name = "myri",
1157 },
1158 {},
1159 };
1160
1161 MODULE_DEVICE_TABLE(of, myri_sbus_match);
1162
1163 static struct platform_driver myri_sbus_driver = {
1164 .driver = {
1165 .name = "myri",
1166 .owner = THIS_MODULE,
1167 .of_match_table = myri_sbus_match,
1168 },
1169 .probe = myri_sbus_probe,
1170 .remove = __devexit_p(myri_sbus_remove),
1171 };
1172
1173 static int __init myri_sbus_init(void)
1174 {
1175 return platform_driver_register(&myri_sbus_driver);
1176 }
1177
1178 static void __exit myri_sbus_exit(void)
1179 {
1180 platform_driver_unregister(&myri_sbus_driver);
1181 }
1182
1183 module_init(myri_sbus_init);
1184 module_exit(myri_sbus_exit);
1185
1186 MODULE_LICENSE("GPL");
1187 MODULE_FIRMWARE(FWNAME);
This page took 0.076873 seconds and 5 git commands to generate.