[IB] mthca: fix hw_ver value returned from mthca_query_device
[deliverable/linux.git] / drivers / infiniband / ulp / ipoib / ipoib_main.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
2a1d9b7f
RD
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
1da177e4
LT
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
35 */
36
37#include "ipoib.h"
38
1da177e4
LT
39#include <linux/module.h>
40
41#include <linux/init.h>
42#include <linux/slab.h>
43#include <linux/vmalloc.h>
44
45#include <linux/if_arp.h> /* For ARPHRD_xxx */
46
47#include <linux/ip.h>
48#include <linux/in.h>
49
50MODULE_AUTHOR("Roland Dreier");
51MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
52MODULE_LICENSE("Dual BSD/GPL");
53
54#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
55int ipoib_debug_level;
56
57module_param_named(debug_level, ipoib_debug_level, int, 0644);
58MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
59#endif
60
61static const u8 ipv4_bcast_addr[] = {
62 0x00, 0xff, 0xff, 0xff,
63 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
65};
66
67struct workqueue_struct *ipoib_workqueue;
68
69static void ipoib_add_one(struct ib_device *device);
70static void ipoib_remove_one(struct ib_device *device);
71
72static struct ib_client ipoib_client = {
73 .name = "ipoib",
74 .add = ipoib_add_one,
75 .remove = ipoib_remove_one
76};
77
78int ipoib_open(struct net_device *dev)
79{
80 struct ipoib_dev_priv *priv = netdev_priv(dev);
81
82 ipoib_dbg(priv, "bringing up interface\n");
83
84 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
85
86 if (ipoib_pkey_dev_delay_open(dev))
87 return 0;
88
89 if (ipoib_ib_dev_open(dev))
90 return -EINVAL;
91
92 if (ipoib_ib_dev_up(dev))
93 return -EINVAL;
94
95 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
96 struct ipoib_dev_priv *cpriv;
97
98 /* Bring up any child interfaces too */
99 down(&priv->vlan_mutex);
100 list_for_each_entry(cpriv, &priv->child_intfs, list) {
101 int flags;
102
103 flags = cpriv->dev->flags;
104 if (flags & IFF_UP)
105 continue;
106
107 dev_change_flags(cpriv->dev, flags | IFF_UP);
108 }
109 up(&priv->vlan_mutex);
110 }
111
112 netif_start_queue(dev);
113
114 return 0;
115}
116
117static int ipoib_stop(struct net_device *dev)
118{
119 struct ipoib_dev_priv *priv = netdev_priv(dev);
120
121 ipoib_dbg(priv, "stopping interface\n");
122
123 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
124
125 netif_stop_queue(dev);
126
127 ipoib_ib_dev_down(dev);
128 ipoib_ib_dev_stop(dev);
129
130 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
131 struct ipoib_dev_priv *cpriv;
132
133 /* Bring down any child interfaces too */
134 down(&priv->vlan_mutex);
135 list_for_each_entry(cpriv, &priv->child_intfs, list) {
136 int flags;
137
138 flags = cpriv->dev->flags;
139 if (!(flags & IFF_UP))
140 continue;
141
142 dev_change_flags(cpriv->dev, flags & ~IFF_UP);
143 }
144 up(&priv->vlan_mutex);
145 }
146
147 return 0;
148}
149
150static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
151{
152 struct ipoib_dev_priv *priv = netdev_priv(dev);
153
154 if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
155 return -EINVAL;
156
157 priv->admin_mtu = new_mtu;
158
159 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
160
161 return 0;
162}
163
164static struct ipoib_path *__path_find(struct net_device *dev,
165 union ib_gid *gid)
166{
167 struct ipoib_dev_priv *priv = netdev_priv(dev);
168 struct rb_node *n = priv->path_tree.rb_node;
169 struct ipoib_path *path;
170 int ret;
171
172 while (n) {
173 path = rb_entry(n, struct ipoib_path, rb_node);
174
175 ret = memcmp(gid->raw, path->pathrec.dgid.raw,
176 sizeof (union ib_gid));
177
178 if (ret < 0)
179 n = n->rb_left;
180 else if (ret > 0)
181 n = n->rb_right;
182 else
183 return path;
184 }
185
186 return NULL;
187}
188
189static int __path_add(struct net_device *dev, struct ipoib_path *path)
190{
191 struct ipoib_dev_priv *priv = netdev_priv(dev);
192 struct rb_node **n = &priv->path_tree.rb_node;
193 struct rb_node *pn = NULL;
194 struct ipoib_path *tpath;
195 int ret;
196
197 while (*n) {
198 pn = *n;
199 tpath = rb_entry(pn, struct ipoib_path, rb_node);
200
201 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
202 sizeof (union ib_gid));
203 if (ret < 0)
204 n = &pn->rb_left;
205 else if (ret > 0)
206 n = &pn->rb_right;
207 else
208 return -EEXIST;
209 }
210
211 rb_link_node(&path->rb_node, pn, n);
212 rb_insert_color(&path->rb_node, &priv->path_tree);
213
214 list_add_tail(&path->list, &priv->path_list);
215
216 return 0;
217}
218
219static void path_free(struct net_device *dev, struct ipoib_path *path)
220{
221 struct ipoib_dev_priv *priv = netdev_priv(dev);
222 struct ipoib_neigh *neigh, *tn;
223 struct sk_buff *skb;
224 unsigned long flags;
225
226 while ((skb = __skb_dequeue(&path->queue)))
227 dev_kfree_skb_irq(skb);
228
229 spin_lock_irqsave(&priv->lock, flags);
230
231 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
232 /*
233 * It's safe to call ipoib_put_ah() inside priv->lock
234 * here, because we know that path->ah will always
235 * hold one more reference, so ipoib_put_ah() will
236 * never do more than decrement the ref count.
237 */
238 if (neigh->ah)
239 ipoib_put_ah(neigh->ah);
240 *to_ipoib_neigh(neigh->neighbour) = NULL;
241 neigh->neighbour->ops->destructor = NULL;
242 kfree(neigh);
243 }
244
245 spin_unlock_irqrestore(&priv->lock, flags);
246
247 if (path->ah)
248 ipoib_put_ah(path->ah);
249
250 kfree(path);
251}
252
253void ipoib_flush_paths(struct net_device *dev)
254{
255 struct ipoib_dev_priv *priv = netdev_priv(dev);
256 struct ipoib_path *path, *tp;
257 LIST_HEAD(remove_list);
258 unsigned long flags;
259
260 spin_lock_irqsave(&priv->lock, flags);
261
262 list_splice(&priv->path_list, &remove_list);
263 INIT_LIST_HEAD(&priv->path_list);
264
265 list_for_each_entry(path, &remove_list, list)
266 rb_erase(&path->rb_node, &priv->path_tree);
267
268 spin_unlock_irqrestore(&priv->lock, flags);
269
270 list_for_each_entry_safe(path, tp, &remove_list, list) {
271 if (path->query)
272 ib_sa_cancel_query(path->query_id, path->query);
273 wait_for_completion(&path->done);
274 path_free(dev, path);
275 }
276}
277
278static void path_rec_completion(int status,
279 struct ib_sa_path_rec *pathrec,
280 void *path_ptr)
281{
282 struct ipoib_path *path = path_ptr;
283 struct net_device *dev = path->dev;
284 struct ipoib_dev_priv *priv = netdev_priv(dev);
285 struct ipoib_ah *ah = NULL;
286 struct ipoib_neigh *neigh;
287 struct sk_buff_head skqueue;
288 struct sk_buff *skb;
289 unsigned long flags;
290
291 if (pathrec)
292 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
293 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
294 else
295 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n",
296 status, IPOIB_GID_ARG(path->pathrec.dgid));
297
298 skb_queue_head_init(&skqueue);
299
300 if (!status) {
301 struct ib_ah_attr av = {
302 .dlid = be16_to_cpu(pathrec->dlid),
303 .sl = pathrec->sl,
304 .port_num = priv->port
305 };
e6ded99c 306 int path_rate = ib_sa_rate_enum_to_int(pathrec->rate);
1da177e4 307
e6ded99c
RD
308 if (path_rate > 0 && priv->local_rate > path_rate)
309 av.static_rate = (priv->local_rate - 1) / path_rate;
1da177e4
LT
310
311 ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n",
312 av.static_rate, priv->local_rate,
313 ib_sa_rate_enum_to_int(pathrec->rate));
314
315 ah = ipoib_create_ah(dev, priv->pd, &av);
316 }
317
318 spin_lock_irqsave(&priv->lock, flags);
319
320 path->ah = ah;
321
322 if (ah) {
323 path->pathrec = *pathrec;
324
325 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
326 ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
327
328 while ((skb = __skb_dequeue(&path->queue)))
329 __skb_queue_tail(&skqueue, skb);
330
331 list_for_each_entry(neigh, &path->neigh_list, list) {
332 kref_get(&path->ah->ref);
333 neigh->ah = path->ah;
334
335 while ((skb = __skb_dequeue(&neigh->queue)))
336 __skb_queue_tail(&skqueue, skb);
337 }
338 } else
339 path->query = NULL;
340
341 complete(&path->done);
342
343 spin_unlock_irqrestore(&priv->lock, flags);
344
345 while ((skb = __skb_dequeue(&skqueue))) {
346 skb->dev = dev;
347 if (dev_queue_xmit(skb))
348 ipoib_warn(priv, "dev_queue_xmit failed "
349 "to requeue packet\n");
350 }
351}
352
353static struct ipoib_path *path_rec_create(struct net_device *dev,
354 union ib_gid *gid)
355{
356 struct ipoib_dev_priv *priv = netdev_priv(dev);
357 struct ipoib_path *path;
358
359 path = kmalloc(sizeof *path, GFP_ATOMIC);
360 if (!path)
361 return NULL;
362
363 path->dev = dev;
364 path->pathrec.dlid = 0;
365 path->ah = NULL;
366
367 skb_queue_head_init(&path->queue);
368
369 INIT_LIST_HEAD(&path->neigh_list);
370 path->query = NULL;
371 init_completion(&path->done);
372
373 memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid));
374 path->pathrec.sgid = priv->local_gid;
375 path->pathrec.pkey = cpu_to_be16(priv->pkey);
376 path->pathrec.numb_path = 1;
377
378 return path;
379}
380
381static int path_rec_start(struct net_device *dev,
382 struct ipoib_path *path)
383{
384 struct ipoib_dev_priv *priv = netdev_priv(dev);
385
386 ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
387 IPOIB_GID_ARG(path->pathrec.dgid));
388
389 path->query_id =
390 ib_sa_path_rec_get(priv->ca, priv->port,
391 &path->pathrec,
392 IB_SA_PATH_REC_DGID |
393 IB_SA_PATH_REC_SGID |
394 IB_SA_PATH_REC_NUMB_PATH |
395 IB_SA_PATH_REC_PKEY,
396 1000, GFP_ATOMIC,
397 path_rec_completion,
398 path, &path->query);
399 if (path->query_id < 0) {
400 ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
401 path->query = NULL;
402 return path->query_id;
403 }
404
405 return 0;
406}
407
408static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
409{
410 struct ipoib_dev_priv *priv = netdev_priv(dev);
411 struct ipoib_path *path;
412 struct ipoib_neigh *neigh;
413
414 neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
415 if (!neigh) {
416 ++priv->stats.tx_dropped;
417 dev_kfree_skb_any(skb);
418 return;
419 }
420
421 skb_queue_head_init(&neigh->queue);
422 neigh->neighbour = skb->dst->neighbour;
423 *to_ipoib_neigh(skb->dst->neighbour) = neigh;
424
425 /*
426 * We can only be called from ipoib_start_xmit, so we're
427 * inside tx_lock -- no need to save/restore flags.
428 */
429 spin_lock(&priv->lock);
430
431 path = __path_find(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4));
432 if (!path) {
433 path = path_rec_create(dev,
434 (union ib_gid *) (skb->dst->neighbour->ha + 4));
435 if (!path)
436 goto err;
437
438 __path_add(dev, path);
439 }
440
441 list_add_tail(&neigh->list, &path->neigh_list);
442
443 if (path->pathrec.dlid) {
444 kref_get(&path->ah->ref);
445 neigh->ah = path->ah;
446
447 ipoib_send(dev, skb, path->ah,
448 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
449 } else {
450 neigh->ah = NULL;
451 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
452 __skb_queue_tail(&neigh->queue, skb);
453 } else {
454 ++priv->stats.tx_dropped;
455 dev_kfree_skb_any(skb);
456 }
457
458 if (!path->query && path_rec_start(dev, path))
459 goto err;
460 }
461
462 spin_unlock(&priv->lock);
463 return;
464
465err:
466 *to_ipoib_neigh(skb->dst->neighbour) = NULL;
467 list_del(&neigh->list);
468 neigh->neighbour->ops->destructor = NULL;
469 kfree(neigh);
470
471 ++priv->stats.tx_dropped;
472 dev_kfree_skb_any(skb);
473
474 spin_unlock(&priv->lock);
475}
476
477static void path_lookup(struct sk_buff *skb, struct net_device *dev)
478{
479 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
480
481 /* Look up path record for unicasts */
482 if (skb->dst->neighbour->ha[4] != 0xff) {
483 neigh_add_path(skb, dev);
484 return;
485 }
486
487 /* Add in the P_Key for multicasts */
488 skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
489 skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
490 ipoib_mcast_send(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4), skb);
491}
492
493static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
494 struct ipoib_pseudoheader *phdr)
495{
496 struct ipoib_dev_priv *priv = netdev_priv(dev);
497 struct ipoib_path *path;
498
499 /*
500 * We can only be called from ipoib_start_xmit, so we're
501 * inside tx_lock -- no need to save/restore flags.
502 */
503 spin_lock(&priv->lock);
504
505 path = __path_find(dev, (union ib_gid *) (phdr->hwaddr + 4));
506 if (!path) {
507 path = path_rec_create(dev,
508 (union ib_gid *) (phdr->hwaddr + 4));
509 if (path) {
510 /* put pseudoheader back on for next time */
511 skb_push(skb, sizeof *phdr);
512 __skb_queue_tail(&path->queue, skb);
513
514 if (path_rec_start(dev, path)) {
515 spin_unlock(&priv->lock);
516 path_free(dev, path);
517 return;
518 } else
519 __path_add(dev, path);
520 } else {
521 ++priv->stats.tx_dropped;
522 dev_kfree_skb_any(skb);
523 }
524
525 spin_unlock(&priv->lock);
526 return;
527 }
528
529 if (path->pathrec.dlid) {
530 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
531 be16_to_cpu(path->pathrec.dlid));
532
533 ipoib_send(dev, skb, path->ah,
534 be32_to_cpup((__be32 *) phdr->hwaddr));
535 } else if ((path->query || !path_rec_start(dev, path)) &&
536 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
537 /* put pseudoheader back on for next time */
538 skb_push(skb, sizeof *phdr);
539 __skb_queue_tail(&path->queue, skb);
540 } else {
541 ++priv->stats.tx_dropped;
542 dev_kfree_skb_any(skb);
543 }
544
545 spin_unlock(&priv->lock);
546}
547
548static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
549{
550 struct ipoib_dev_priv *priv = netdev_priv(dev);
551 struct ipoib_neigh *neigh;
552 unsigned long flags;
553
554 local_irq_save(flags);
555 if (!spin_trylock(&priv->tx_lock)) {
556 local_irq_restore(flags);
557 return NETDEV_TX_LOCKED;
558 }
559
560 /*
561 * Check if our queue is stopped. Since we have the LLTX bit
562 * set, we can't rely on netif_stop_queue() preventing our
563 * xmit function from being called with a full queue.
564 */
565 if (unlikely(netif_queue_stopped(dev))) {
566 spin_unlock_irqrestore(&priv->tx_lock, flags);
567 return NETDEV_TX_BUSY;
568 }
569
570 if (skb->dst && skb->dst->neighbour) {
571 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
572 path_lookup(skb, dev);
573 goto out;
574 }
575
576 neigh = *to_ipoib_neigh(skb->dst->neighbour);
577
578 if (likely(neigh->ah)) {
579 ipoib_send(dev, skb, neigh->ah,
580 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
581 goto out;
582 }
583
584 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
585 spin_lock(&priv->lock);
586 __skb_queue_tail(&neigh->queue, skb);
587 spin_unlock(&priv->lock);
588 } else {
589 ++priv->stats.tx_dropped;
590 dev_kfree_skb_any(skb);
591 }
592 } else {
593 struct ipoib_pseudoheader *phdr =
594 (struct ipoib_pseudoheader *) skb->data;
595 skb_pull(skb, sizeof *phdr);
596
597 if (phdr->hwaddr[4] == 0xff) {
598 /* Add in the P_Key for multicast*/
599 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
600 phdr->hwaddr[9] = priv->pkey & 0xff;
601
602 ipoib_mcast_send(dev, (union ib_gid *) (phdr->hwaddr + 4), skb);
603 } else {
0dca0f7b 604 /* unicast GID -- should be ARP or RARP reply */
1da177e4 605
0dca0f7b
HR
606 if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
607 (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
1da177e4
LT
608 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
609 IPOIB_GID_FMT "\n",
610 skb->dst ? "neigh" : "dst",
97f52eb4
SH
611 be16_to_cpup((__be16 *) skb->data),
612 be32_to_cpup((__be32 *) phdr->hwaddr),
1da177e4
LT
613 IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4)));
614 dev_kfree_skb_any(skb);
615 ++priv->stats.tx_dropped;
616 goto out;
617 }
618
619 unicast_arp_send(skb, dev, phdr);
620 }
621 }
622
623out:
624 spin_unlock_irqrestore(&priv->tx_lock, flags);
625
626 return NETDEV_TX_OK;
627}
628
629static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
630{
631 struct ipoib_dev_priv *priv = netdev_priv(dev);
632
633 return &priv->stats;
634}
635
636static void ipoib_timeout(struct net_device *dev)
637{
638 struct ipoib_dev_priv *priv = netdev_priv(dev);
639
640 ipoib_warn(priv, "transmit timeout: latency %ld\n",
641 jiffies - dev->trans_start);
642 /* XXX reset QP, etc. */
643}
644
645static int ipoib_hard_header(struct sk_buff *skb,
646 struct net_device *dev,
647 unsigned short type,
648 void *daddr, void *saddr, unsigned len)
649{
650 struct ipoib_header *header;
651
652 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
653
654 header->proto = htons(type);
655 header->reserved = 0;
656
657 /*
658 * If we don't have a neighbour structure, stuff the
659 * destination address onto the front of the skb so we can
660 * figure out where to send the packet later.
661 */
662 if (!skb->dst || !skb->dst->neighbour) {
663 struct ipoib_pseudoheader *phdr =
664 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
665 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
666 }
667
668 return 0;
669}
670
671static void ipoib_set_mcast_list(struct net_device *dev)
672{
673 struct ipoib_dev_priv *priv = netdev_priv(dev);
674
1ad62a19 675 queue_work(ipoib_workqueue, &priv->restart_task);
1da177e4
LT
676}
677
678static void ipoib_neigh_destructor(struct neighbour *n)
679{
680 struct ipoib_neigh *neigh;
681 struct ipoib_dev_priv *priv = netdev_priv(n->dev);
682 unsigned long flags;
683 struct ipoib_ah *ah = NULL;
684
685 ipoib_dbg(priv,
686 "neigh_destructor for %06x " IPOIB_GID_FMT "\n",
687 be32_to_cpup((__be32 *) n->ha),
688 IPOIB_GID_ARG(*((union ib_gid *) (n->ha + 4))));
689
690 spin_lock_irqsave(&priv->lock, flags);
691
692 neigh = *to_ipoib_neigh(n);
693 if (neigh) {
694 if (neigh->ah)
695 ah = neigh->ah;
696 list_del(&neigh->list);
697 *to_ipoib_neigh(n) = NULL;
698 kfree(neigh);
699 }
700
701 spin_unlock_irqrestore(&priv->lock, flags);
702
703 if (ah)
704 ipoib_put_ah(ah);
705}
706
707static int ipoib_neigh_setup(struct neighbour *neigh)
708{
709 /*
710 * Is this kosher? I can't find anybody in the kernel that
711 * sets neigh->destructor, so we should be able to set it here
712 * without trouble.
713 */
714 neigh->ops->destructor = ipoib_neigh_destructor;
715
716 return 0;
717}
718
719static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
720{
721 parms->neigh_setup = ipoib_neigh_setup;
722
723 return 0;
724}
725
726int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
727{
728 struct ipoib_dev_priv *priv = netdev_priv(dev);
729
730 /* Allocate RX/TX "rings" to hold queued skbs */
731
732 priv->rx_ring = kmalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_buf),
733 GFP_KERNEL);
734 if (!priv->rx_ring) {
735 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
736 ca->name, IPOIB_RX_RING_SIZE);
737 goto out;
738 }
739 memset(priv->rx_ring, 0,
740 IPOIB_RX_RING_SIZE * sizeof (struct ipoib_buf));
741
742 priv->tx_ring = kmalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_buf),
743 GFP_KERNEL);
744 if (!priv->tx_ring) {
745 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
746 ca->name, IPOIB_TX_RING_SIZE);
747 goto out_rx_ring_cleanup;
748 }
749 memset(priv->tx_ring, 0,
750 IPOIB_TX_RING_SIZE * sizeof (struct ipoib_buf));
751
752 /* priv->tx_head & tx_tail are already 0 */
753
754 if (ipoib_ib_dev_init(dev, ca, port))
755 goto out_tx_ring_cleanup;
756
757 return 0;
758
759out_tx_ring_cleanup:
760 kfree(priv->tx_ring);
761
762out_rx_ring_cleanup:
763 kfree(priv->rx_ring);
764
765out:
766 return -ENOMEM;
767}
768
769void ipoib_dev_cleanup(struct net_device *dev)
770{
771 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
772
773 ipoib_delete_debug_file(dev);
774
775 /* Delete any child interfaces first */
776 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
777 unregister_netdev(cpriv->dev);
778 ipoib_dev_cleanup(cpriv->dev);
779 free_netdev(cpriv->dev);
780 }
781
782 ipoib_ib_dev_cleanup(dev);
783
92a6b34b
HR
784 kfree(priv->rx_ring);
785 kfree(priv->tx_ring);
1da177e4 786
92a6b34b
HR
787 priv->rx_ring = NULL;
788 priv->tx_ring = NULL;
1da177e4
LT
789}
790
791static void ipoib_setup(struct net_device *dev)
792{
793 struct ipoib_dev_priv *priv = netdev_priv(dev);
794
795 dev->open = ipoib_open;
796 dev->stop = ipoib_stop;
797 dev->change_mtu = ipoib_change_mtu;
798 dev->hard_start_xmit = ipoib_start_xmit;
799 dev->get_stats = ipoib_get_stats;
800 dev->tx_timeout = ipoib_timeout;
801 dev->hard_header = ipoib_hard_header;
802 dev->set_multicast_list = ipoib_set_mcast_list;
803 dev->neigh_setup = ipoib_neigh_setup_dev;
804
805 dev->watchdog_timeo = HZ;
806
807 dev->rebuild_header = NULL;
808 dev->set_mac_address = NULL;
809 dev->header_cache_update = NULL;
810
811 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
812
813 /*
814 * We add in INFINIBAND_ALEN to allow for the destination
815 * address "pseudoheader" for skbs without neighbour struct.
816 */
817 dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
818 dev->addr_len = INFINIBAND_ALEN;
819 dev->type = ARPHRD_INFINIBAND;
820 dev->tx_queue_len = IPOIB_TX_RING_SIZE * 2;
821 dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
822
823 /* MTU will be reset when mcast join happens */
824 dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
825 priv->mcast_mtu = priv->admin_mtu = dev->mtu;
826
827 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
828
829 netif_carrier_off(dev);
830
831 SET_MODULE_OWNER(dev);
832
833 priv->dev = dev;
834
835 spin_lock_init(&priv->lock);
836 spin_lock_init(&priv->tx_lock);
837
838 init_MUTEX(&priv->mcast_mutex);
839 init_MUTEX(&priv->vlan_mutex);
840
841 INIT_LIST_HEAD(&priv->path_list);
842 INIT_LIST_HEAD(&priv->child_intfs);
843 INIT_LIST_HEAD(&priv->dead_ahs);
844 INIT_LIST_HEAD(&priv->multicast_list);
845
846 INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev);
847 INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev);
848 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev);
849 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
850 INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev);
851}
852
853struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
854{
855 struct net_device *dev;
856
857 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
858 ipoib_setup);
859 if (!dev)
860 return NULL;
861
862 return netdev_priv(dev);
863}
864
865static ssize_t show_pkey(struct class_device *cdev, char *buf)
866{
867 struct ipoib_dev_priv *priv =
868 netdev_priv(container_of(cdev, struct net_device, class_dev));
869
870 return sprintf(buf, "0x%04x\n", priv->pkey);
871}
872static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
873
874static ssize_t create_child(struct class_device *cdev,
875 const char *buf, size_t count)
876{
877 int pkey;
878 int ret;
879
880 if (sscanf(buf, "%i", &pkey) != 1)
881 return -EINVAL;
882
883 if (pkey < 0 || pkey > 0xffff)
884 return -EINVAL;
885
4ce05937
RD
886 /*
887 * Set the full membership bit, so that we join the right
888 * broadcast group, etc.
889 */
890 pkey |= 0x8000;
891
1da177e4
LT
892 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev),
893 pkey);
894
895 return ret ? ret : count;
896}
897static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
898
899static ssize_t delete_child(struct class_device *cdev,
900 const char *buf, size_t count)
901{
902 int pkey;
903 int ret;
904
905 if (sscanf(buf, "%i", &pkey) != 1)
906 return -EINVAL;
907
908 if (pkey < 0 || pkey > 0xffff)
909 return -EINVAL;
910
911 ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev),
912 pkey);
913
914 return ret ? ret : count;
915
916}
917static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
918
919int ipoib_add_pkey_attr(struct net_device *dev)
920{
921 return class_device_create_file(&dev->class_dev,
922 &class_device_attr_pkey);
923}
924
925static struct net_device *ipoib_add_port(const char *format,
926 struct ib_device *hca, u8 port)
927{
928 struct ipoib_dev_priv *priv;
929 int result = -ENOMEM;
930
931 priv = ipoib_intf_alloc(format);
932 if (!priv)
933 goto alloc_mem_failed;
934
935 SET_NETDEV_DEV(priv->dev, hca->dma_device);
936
937 result = ib_query_pkey(hca, port, 0, &priv->pkey);
938 if (result) {
939 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
940 hca->name, port, result);
941 goto alloc_mem_failed;
942 }
943
4ce05937
RD
944 /*
945 * Set the full membership bit, so that we join the right
946 * broadcast group, etc.
947 */
948 priv->pkey |= 0x8000;
949
1da177e4
LT
950 priv->dev->broadcast[8] = priv->pkey >> 8;
951 priv->dev->broadcast[9] = priv->pkey & 0xff;
952
953 result = ib_query_gid(hca, port, 0, &priv->local_gid);
954 if (result) {
955 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
956 hca->name, port, result);
957 goto alloc_mem_failed;
958 } else
959 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
960
961
962 result = ipoib_dev_init(priv->dev, hca, port);
963 if (result < 0) {
964 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
965 hca->name, port, result);
966 goto device_init_failed;
967 }
968
969 INIT_IB_EVENT_HANDLER(&priv->event_handler,
970 priv->ca, ipoib_event);
971 result = ib_register_event_handler(&priv->event_handler);
972 if (result < 0) {
973 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
974 "port %d (ret = %d)\n",
975 hca->name, port, result);
976 goto event_failed;
977 }
978
979 result = register_netdev(priv->dev);
980 if (result) {
981 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
982 hca->name, port, result);
983 goto register_failed;
984 }
985
986 if (ipoib_create_debug_file(priv->dev))
987 goto debug_failed;
988
989 if (ipoib_add_pkey_attr(priv->dev))
990 goto sysfs_failed;
991 if (class_device_create_file(&priv->dev->class_dev,
992 &class_device_attr_create_child))
993 goto sysfs_failed;
994 if (class_device_create_file(&priv->dev->class_dev,
995 &class_device_attr_delete_child))
996 goto sysfs_failed;
997
998 return priv->dev;
999
1000sysfs_failed:
1001 ipoib_delete_debug_file(priv->dev);
1002
1003debug_failed:
1004 unregister_netdev(priv->dev);
1005
1006register_failed:
1007 ib_unregister_event_handler(&priv->event_handler);
51574e03 1008 flush_scheduled_work();
1da177e4
LT
1009
1010event_failed:
1011 ipoib_dev_cleanup(priv->dev);
1012
1013device_init_failed:
1014 free_netdev(priv->dev);
1015
1016alloc_mem_failed:
1017 return ERR_PTR(result);
1018}
1019
1020static void ipoib_add_one(struct ib_device *device)
1021{
1022 struct list_head *dev_list;
1023 struct net_device *dev;
1024 struct ipoib_dev_priv *priv;
1025 int s, e, p;
1026
1027 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1028 if (!dev_list)
1029 return;
1030
1031 INIT_LIST_HEAD(dev_list);
1032
1033 if (device->node_type == IB_NODE_SWITCH) {
1034 s = 0;
1035 e = 0;
1036 } else {
1037 s = 1;
1038 e = device->phys_port_cnt;
1039 }
1040
1041 for (p = s; p <= e; ++p) {
1042 dev = ipoib_add_port("ib%d", device, p);
1043 if (!IS_ERR(dev)) {
1044 priv = netdev_priv(dev);
1045 list_add_tail(&priv->list, dev_list);
1046 }
1047 }
1048
1049 ib_set_client_data(device, &ipoib_client, dev_list);
1050}
1051
1052static void ipoib_remove_one(struct ib_device *device)
1053{
1054 struct ipoib_dev_priv *priv, *tmp;
1055 struct list_head *dev_list;
1056
1057 dev_list = ib_get_client_data(device, &ipoib_client);
1058
1059 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1060 ib_unregister_event_handler(&priv->event_handler);
51574e03 1061 flush_scheduled_work();
1da177e4
LT
1062
1063 unregister_netdev(priv->dev);
1064 ipoib_dev_cleanup(priv->dev);
1065 free_netdev(priv->dev);
1066 }
06c56e44
MT
1067
1068 kfree(dev_list);
1da177e4
LT
1069}
1070
1071static int __init ipoib_init_module(void)
1072{
1073 int ret;
1074
1075 ret = ipoib_register_debugfs();
1076 if (ret)
1077 return ret;
1078
1079 /*
1080 * We create our own workqueue mainly because we want to be
1081 * able to flush it when devices are being removed. We can't
1082 * use schedule_work()/flush_scheduled_work() because both
1083 * unregister_netdev() and linkwatch_event take the rtnl lock,
1084 * so flush_scheduled_work() can deadlock during device
1085 * removal.
1086 */
1087 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1088 if (!ipoib_workqueue) {
1089 ret = -ENOMEM;
1090 goto err_fs;
1091 }
1092
1093 ret = ib_register_client(&ipoib_client);
1094 if (ret)
1095 goto err_wq;
1096
1097 return 0;
1098
1da177e4
LT
1099err_wq:
1100 destroy_workqueue(ipoib_workqueue);
1101
9adec1a8
RD
1102err_fs:
1103 ipoib_unregister_debugfs();
1104
1da177e4
LT
1105 return ret;
1106}
1107
1108static void __exit ipoib_cleanup_module(void)
1109{
1da177e4 1110 ib_unregister_client(&ipoib_client);
9adec1a8 1111 ipoib_unregister_debugfs();
1da177e4
LT
1112 destroy_workqueue(ipoib_workqueue);
1113}
1114
1115module_init(ipoib_init_module);
1116module_exit(ipoib_cleanup_module);
This page took 0.094776 seconds and 5 git commands to generate.