infiniband: ipoib replace IPOIB_GID_FMT with %p6
[deliverable/linux.git] / drivers / infiniband / ulp / ipoib / ipoib_main.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
2a1d9b7f
RD
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
1da177e4
LT
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
1da177e4
LT
33 */
34
35#include "ipoib.h"
36
1da177e4
LT
37#include <linux/module.h>
38
39#include <linux/init.h>
40#include <linux/slab.h>
0f485251 41#include <linux/kernel.h>
10313cbb 42#include <linux/vmalloc.h>
1da177e4
LT
43
44#include <linux/if_arp.h> /* For ARPHRD_xxx */
45
46#include <linux/ip.h>
47#include <linux/in.h>
48
14c85021
ACM
49#include <net/dst.h>
50
1da177e4
LT
51MODULE_AUTHOR("Roland Dreier");
52MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
53MODULE_LICENSE("Dual BSD/GPL");
54
0f485251
SM
55int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
56int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
57
58module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
59MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
60module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
61MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
62
af40da89
VS
63static int lro;
64module_param(lro, bool, 0444);
65MODULE_PARM_DESC(lro, "Enable LRO (Large Receive Offload)");
66
67static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
68module_param(lro_max_aggr, int, 0644);
69MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
70 "(default = 64)");
71
1da177e4
LT
72#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
73int ipoib_debug_level;
74
75module_param_named(debug_level, ipoib_debug_level, int, 0644);
76MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
77#endif
78
1732b0ef
RD
79struct ipoib_path_iter {
80 struct net_device *dev;
81 struct ipoib_path path;
82};
83
1da177e4
LT
84static const u8 ipv4_bcast_addr[] = {
85 0x00, 0xff, 0xff, 0xff,
86 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
87 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
88};
89
90struct workqueue_struct *ipoib_workqueue;
91
c1a0b23b
MT
92struct ib_sa_client ipoib_sa_client;
93
1da177e4
LT
94static void ipoib_add_one(struct ib_device *device);
95static void ipoib_remove_one(struct ib_device *device);
96
97static struct ib_client ipoib_client = {
98 .name = "ipoib",
99 .add = ipoib_add_one,
100 .remove = ipoib_remove_one
101};
102
103int ipoib_open(struct net_device *dev)
104{
105 struct ipoib_dev_priv *priv = netdev_priv(dev);
106
107 ipoib_dbg(priv, "bringing up interface\n");
108
bea3348e 109 napi_enable(&priv->napi);
1da177e4
LT
110 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
111
112 if (ipoib_pkey_dev_delay_open(dev))
113 return 0;
114
bea3348e
SH
115 if (ipoib_ib_dev_open(dev)) {
116 napi_disable(&priv->napi);
1da177e4 117 return -EINVAL;
bea3348e 118 }
1da177e4 119
267ee88e 120 if (ipoib_ib_dev_up(dev)) {
26bbf13c 121 ipoib_ib_dev_stop(dev, 1);
bea3348e 122 napi_disable(&priv->napi);
1da177e4 123 return -EINVAL;
267ee88e 124 }
1da177e4
LT
125
126 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
127 struct ipoib_dev_priv *cpriv;
128
129 /* Bring up any child interfaces too */
95ed644f 130 mutex_lock(&priv->vlan_mutex);
1da177e4
LT
131 list_for_each_entry(cpriv, &priv->child_intfs, list) {
132 int flags;
133
134 flags = cpriv->dev->flags;
135 if (flags & IFF_UP)
136 continue;
137
138 dev_change_flags(cpriv->dev, flags | IFF_UP);
139 }
95ed644f 140 mutex_unlock(&priv->vlan_mutex);
1da177e4
LT
141 }
142
143 netif_start_queue(dev);
144
145 return 0;
146}
147
148static int ipoib_stop(struct net_device *dev)
149{
150 struct ipoib_dev_priv *priv = netdev_priv(dev);
151
152 ipoib_dbg(priv, "stopping interface\n");
153
154 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
bea3348e 155 napi_disable(&priv->napi);
1da177e4
LT
156
157 netif_stop_queue(dev);
158
a77a57a1
RD
159 ipoib_ib_dev_down(dev, 0);
160 ipoib_ib_dev_stop(dev, 0);
1da177e4
LT
161
162 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
163 struct ipoib_dev_priv *cpriv;
164
165 /* Bring down any child interfaces too */
95ed644f 166 mutex_lock(&priv->vlan_mutex);
1da177e4
LT
167 list_for_each_entry(cpriv, &priv->child_intfs, list) {
168 int flags;
169
170 flags = cpriv->dev->flags;
171 if (!(flags & IFF_UP))
172 continue;
173
174 dev_change_flags(cpriv->dev, flags & ~IFF_UP);
175 }
95ed644f 176 mutex_unlock(&priv->vlan_mutex);
1da177e4
LT
177 }
178
179 return 0;
180}
181
182static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
183{
184 struct ipoib_dev_priv *priv = netdev_priv(dev);
185
839fcaba 186 /* dev->mtu > 2K ==> connected mode */
586a6934
PS
187 if (ipoib_cm_admin_enabled(dev)) {
188 if (new_mtu > ipoib_cm_max_mtu(dev))
189 return -EINVAL;
190
839fcaba
MT
191 if (new_mtu > priv->mcast_mtu)
192 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
193 priv->mcast_mtu);
586a6934 194
839fcaba
MT
195 dev->mtu = new_mtu;
196 return 0;
197 }
198
bc7b3a36 199 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
1da177e4
LT
200 return -EINVAL;
201
202 priv->admin_mtu = new_mtu;
203
204 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
205
206 return 0;
207}
208
37c22a77 209static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
1da177e4
LT
210{
211 struct ipoib_dev_priv *priv = netdev_priv(dev);
212 struct rb_node *n = priv->path_tree.rb_node;
213 struct ipoib_path *path;
214 int ret;
215
216 while (n) {
217 path = rb_entry(n, struct ipoib_path, rb_node);
218
37c22a77 219 ret = memcmp(gid, path->pathrec.dgid.raw,
1da177e4
LT
220 sizeof (union ib_gid));
221
222 if (ret < 0)
223 n = n->rb_left;
224 else if (ret > 0)
225 n = n->rb_right;
226 else
227 return path;
228 }
229
230 return NULL;
231}
232
233static int __path_add(struct net_device *dev, struct ipoib_path *path)
234{
235 struct ipoib_dev_priv *priv = netdev_priv(dev);
236 struct rb_node **n = &priv->path_tree.rb_node;
237 struct rb_node *pn = NULL;
238 struct ipoib_path *tpath;
239 int ret;
240
241 while (*n) {
242 pn = *n;
243 tpath = rb_entry(pn, struct ipoib_path, rb_node);
244
245 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
246 sizeof (union ib_gid));
247 if (ret < 0)
248 n = &pn->rb_left;
249 else if (ret > 0)
250 n = &pn->rb_right;
251 else
252 return -EEXIST;
253 }
254
255 rb_link_node(&path->rb_node, pn, n);
256 rb_insert_color(&path->rb_node, &priv->path_tree);
257
258 list_add_tail(&path->list, &priv->path_list);
259
260 return 0;
261}
262
263static void path_free(struct net_device *dev, struct ipoib_path *path)
264{
265 struct ipoib_dev_priv *priv = netdev_priv(dev);
266 struct ipoib_neigh *neigh, *tn;
267 struct sk_buff *skb;
268 unsigned long flags;
269
270 while ((skb = __skb_dequeue(&path->queue)))
271 dev_kfree_skb_irq(skb);
272
273 spin_lock_irqsave(&priv->lock, flags);
274
275 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
276 /*
277 * It's safe to call ipoib_put_ah() inside priv->lock
278 * here, because we know that path->ah will always
279 * hold one more reference, so ipoib_put_ah() will
280 * never do more than decrement the ref count.
281 */
282 if (neigh->ah)
283 ipoib_put_ah(neigh->ah);
d2e0655e 284
2745b5b7 285 ipoib_neigh_free(dev, neigh);
1da177e4
LT
286 }
287
288 spin_unlock_irqrestore(&priv->lock, flags);
289
290 if (path->ah)
291 ipoib_put_ah(path->ah);
292
293 kfree(path);
294}
295
1732b0ef
RD
296#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
297
298struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
299{
300 struct ipoib_path_iter *iter;
301
302 iter = kmalloc(sizeof *iter, GFP_KERNEL);
303 if (!iter)
304 return NULL;
305
306 iter->dev = dev;
307 memset(iter->path.pathrec.dgid.raw, 0, 16);
308
309 if (ipoib_path_iter_next(iter)) {
310 kfree(iter);
311 return NULL;
312 }
313
314 return iter;
315}
316
317int ipoib_path_iter_next(struct ipoib_path_iter *iter)
318{
319 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
320 struct rb_node *n;
321 struct ipoib_path *path;
322 int ret = 1;
323
324 spin_lock_irq(&priv->lock);
325
326 n = rb_first(&priv->path_tree);
327
328 while (n) {
329 path = rb_entry(n, struct ipoib_path, rb_node);
330
331 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
332 sizeof (union ib_gid)) < 0) {
333 iter->path = *path;
334 ret = 0;
335 break;
336 }
337
338 n = rb_next(n);
339 }
340
341 spin_unlock_irq(&priv->lock);
342
343 return ret;
344}
345
346void ipoib_path_iter_read(struct ipoib_path_iter *iter,
347 struct ipoib_path *path)
348{
349 *path = iter->path;
350}
351
352#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
353
ee1e2c82
MS
354void ipoib_mark_paths_invalid(struct net_device *dev)
355{
356 struct ipoib_dev_priv *priv = netdev_priv(dev);
357 struct ipoib_path *path, *tp;
358
359 spin_lock_irq(&priv->lock);
360
361 list_for_each_entry_safe(path, tp, &priv->path_list, list) {
fcace2fe 362 ipoib_dbg(priv, "mark path LID 0x%04x GID %p6 invalid\n",
ee1e2c82 363 be16_to_cpu(path->pathrec.dlid),
fcace2fe 364 path->pathrec.dgid.raw);
ee1e2c82
MS
365 path->valid = 0;
366 }
367
368 spin_unlock_irq(&priv->lock);
369}
370
1da177e4
LT
371void ipoib_flush_paths(struct net_device *dev)
372{
373 struct ipoib_dev_priv *priv = netdev_priv(dev);
374 struct ipoib_path *path, *tp;
375 LIST_HEAD(remove_list);
943c246e 376 unsigned long flags;
1da177e4 377
943c246e
RD
378 netif_tx_lock_bh(dev);
379 spin_lock_irqsave(&priv->lock, flags);
1da177e4 380
157de229 381 list_splice_init(&priv->path_list, &remove_list);
1da177e4
LT
382
383 list_for_each_entry(path, &remove_list, list)
384 rb_erase(&path->rb_node, &priv->path_tree);
385
1da177e4
LT
386 list_for_each_entry_safe(path, tp, &remove_list, list) {
387 if (path->query)
388 ib_sa_cancel_query(path->query_id, path->query);
943c246e
RD
389 spin_unlock_irqrestore(&priv->lock, flags);
390 netif_tx_unlock_bh(dev);
1da177e4
LT
391 wait_for_completion(&path->done);
392 path_free(dev, path);
943c246e
RD
393 netif_tx_lock_bh(dev);
394 spin_lock_irqsave(&priv->lock, flags);
1da177e4 395 }
943c246e
RD
396
397 spin_unlock_irqrestore(&priv->lock, flags);
398 netif_tx_unlock_bh(dev);
1da177e4
LT
399}
400
401static void path_rec_completion(int status,
402 struct ib_sa_path_rec *pathrec,
403 void *path_ptr)
404{
405 struct ipoib_path *path = path_ptr;
406 struct net_device *dev = path->dev;
407 struct ipoib_dev_priv *priv = netdev_priv(dev);
408 struct ipoib_ah *ah = NULL;
c9da4bad 409 struct ipoib_ah *old_ah = NULL;
d04d01b1 410 struct ipoib_neigh *neigh, *tn;
1da177e4
LT
411 struct sk_buff_head skqueue;
412 struct sk_buff *skb;
413 unsigned long flags;
414
843613b0 415 if (!status)
fcace2fe
HH
416 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %p6\n",
417 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw);
1da177e4 418 else
fcace2fe
HH
419 ipoib_dbg(priv, "PathRec status %d for GID %p6\n",
420 status, path->pathrec.dgid.raw);
1da177e4
LT
421
422 skb_queue_head_init(&skqueue);
423
424 if (!status) {
46f1b3d7
SH
425 struct ib_ah_attr av;
426
427 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
428 ah = ipoib_create_ah(dev, priv->pd, &av);
1da177e4
LT
429 }
430
431 spin_lock_irqsave(&priv->lock, flags);
432
1da177e4
LT
433 if (ah) {
434 path->pathrec = *pathrec;
435
c9da4bad
RD
436 old_ah = path->ah;
437 path->ah = ah;
438
1da177e4
LT
439 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
440 ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
441
442 while ((skb = __skb_dequeue(&path->queue)))
443 __skb_queue_tail(&skqueue, skb);
444
d04d01b1 445 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
ee1e2c82
MS
446 if (neigh->ah) {
447 WARN_ON(neigh->ah != old_ah);
448 /*
449 * Dropping the ah reference inside
450 * priv->lock is safe here, because we
451 * will hold one more reference from
452 * the original value of path->ah (ie
453 * old_ah).
454 */
455 ipoib_put_ah(neigh->ah);
456 }
1da177e4
LT
457 kref_get(&path->ah->ref);
458 neigh->ah = path->ah;
8a7f7521
MT
459 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
460 sizeof(union ib_gid));
1da177e4 461
839fcaba
MT
462 if (ipoib_cm_enabled(dev, neigh->neighbour)) {
463 if (!ipoib_cm_get(neigh))
464 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
465 path,
466 neigh));
467 if (!ipoib_cm_get(neigh)) {
468 list_del(&neigh->list);
469 if (neigh->ah)
470 ipoib_put_ah(neigh->ah);
471 ipoib_neigh_free(dev, neigh);
472 continue;
473 }
474 }
475
1da177e4
LT
476 while ((skb = __skb_dequeue(&neigh->queue)))
477 __skb_queue_tail(&skqueue, skb);
478 }
ee1e2c82 479 path->valid = 1;
5872a9fc 480 }
1da177e4 481
5872a9fc 482 path->query = NULL;
1da177e4
LT
483 complete(&path->done);
484
485 spin_unlock_irqrestore(&priv->lock, flags);
486
ee1e2c82
MS
487 if (old_ah)
488 ipoib_put_ah(old_ah);
489
1da177e4
LT
490 while ((skb = __skb_dequeue(&skqueue))) {
491 skb->dev = dev;
492 if (dev_queue_xmit(skb))
493 ipoib_warn(priv, "dev_queue_xmit failed "
494 "to requeue packet\n");
495 }
496}
497
37c22a77 498static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
1da177e4
LT
499{
500 struct ipoib_dev_priv *priv = netdev_priv(dev);
501 struct ipoib_path *path;
502
1401b53a
JM
503 if (!priv->broadcast)
504 return NULL;
505
21a38489 506 path = kzalloc(sizeof *path, GFP_ATOMIC);
1da177e4
LT
507 if (!path)
508 return NULL;
509
21a38489 510 path->dev = dev;
1da177e4
LT
511
512 skb_queue_head_init(&path->queue);
513
514 INIT_LIST_HEAD(&path->neigh_list);
1da177e4 515
37c22a77 516 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
2337f809
RD
517 path->pathrec.sgid = priv->local_gid;
518 path->pathrec.pkey = cpu_to_be16(priv->pkey);
81668838
SH
519 path->pathrec.numb_path = 1;
520 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
1da177e4
LT
521
522 return path;
523}
524
525static int path_rec_start(struct net_device *dev,
526 struct ipoib_path *path)
527{
528 struct ipoib_dev_priv *priv = netdev_priv(dev);
529
fcace2fe
HH
530 ipoib_dbg(priv, "Start path record lookup for %p6\n",
531 path->pathrec.dgid.raw);
1da177e4 532
65c7edda
RD
533 init_completion(&path->done);
534
1da177e4 535 path->query_id =
c1a0b23b 536 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
1da177e4
LT
537 &path->pathrec,
538 IB_SA_PATH_REC_DGID |
539 IB_SA_PATH_REC_SGID |
540 IB_SA_PATH_REC_NUMB_PATH |
81668838 541 IB_SA_PATH_REC_TRAFFIC_CLASS |
1da177e4
LT
542 IB_SA_PATH_REC_PKEY,
543 1000, GFP_ATOMIC,
544 path_rec_completion,
545 path, &path->query);
546 if (path->query_id < 0) {
01b3fc8b 547 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
1da177e4
LT
548 path->query = NULL;
549 return path->query_id;
550 }
551
552 return 0;
553}
554
555static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
556{
557 struct ipoib_dev_priv *priv = netdev_priv(dev);
558 struct ipoib_path *path;
559 struct ipoib_neigh *neigh;
943c246e 560 unsigned long flags;
1da177e4 561
732a2170 562 neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
1da177e4 563 if (!neigh) {
de903512 564 ++dev->stats.tx_dropped;
1da177e4
LT
565 dev_kfree_skb_any(skb);
566 return;
567 }
568
943c246e 569 spin_lock_irqsave(&priv->lock, flags);
1da177e4 570
37c22a77 571 path = __path_find(dev, skb->dst->neighbour->ha + 4);
1da177e4 572 if (!path) {
37c22a77 573 path = path_rec_create(dev, skb->dst->neighbour->ha + 4);
1da177e4 574 if (!path)
d2e0655e 575 goto err_path;
1da177e4
LT
576
577 __path_add(dev, path);
578 }
579
580 list_add_tail(&neigh->list, &path->neigh_list);
581
47f7a071 582 if (path->ah) {
1da177e4
LT
583 kref_get(&path->ah->ref);
584 neigh->ah = path->ah;
8a7f7521
MT
585 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
586 sizeof(union ib_gid));
1da177e4 587
839fcaba
MT
588 if (ipoib_cm_enabled(dev, neigh->neighbour)) {
589 if (!ipoib_cm_get(neigh))
590 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
591 if (!ipoib_cm_get(neigh)) {
592 list_del(&neigh->list);
593 if (neigh->ah)
594 ipoib_put_ah(neigh->ah);
595 ipoib_neigh_free(dev, neigh);
596 goto err_drop;
597 }
598 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
599 __skb_queue_tail(&neigh->queue, skb);
600 else {
601 ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
602 skb_queue_len(&neigh->queue));
603 goto err_drop;
604 }
605 } else
606 ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha));
1da177e4
LT
607 } else {
608 neigh->ah = NULL;
1da177e4
LT
609
610 if (!path->query && path_rec_start(dev, path))
d2e0655e 611 goto err_list;
2745b5b7
MT
612
613 __skb_queue_tail(&neigh->queue, skb);
1da177e4
LT
614 }
615
943c246e 616 spin_unlock_irqrestore(&priv->lock, flags);
1da177e4
LT
617 return;
618
d2e0655e 619err_list:
1da177e4 620 list_del(&neigh->list);
1da177e4 621
d2e0655e 622err_path:
2745b5b7 623 ipoib_neigh_free(dev, neigh);
839fcaba 624err_drop:
de903512 625 ++dev->stats.tx_dropped;
1da177e4
LT
626 dev_kfree_skb_any(skb);
627
943c246e 628 spin_unlock_irqrestore(&priv->lock, flags);
1da177e4
LT
629}
630
d70ed607 631static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
632{
633 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
634
635 /* Look up path record for unicasts */
636 if (skb->dst->neighbour->ha[4] != 0xff) {
637 neigh_add_path(skb, dev);
638 return;
639 }
640
641 /* Add in the P_Key for multicasts */
642 skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
643 skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
37c22a77 644 ipoib_mcast_send(dev, skb->dst->neighbour->ha + 4, skb);
1da177e4
LT
645}
646
647static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
648 struct ipoib_pseudoheader *phdr)
649{
650 struct ipoib_dev_priv *priv = netdev_priv(dev);
651 struct ipoib_path *path;
943c246e 652 unsigned long flags;
1da177e4 653
943c246e 654 spin_lock_irqsave(&priv->lock, flags);
1da177e4 655
37c22a77 656 path = __path_find(dev, phdr->hwaddr + 4);
ee1e2c82
MS
657 if (!path || !path->valid) {
658 if (!path)
659 path = path_rec_create(dev, phdr->hwaddr + 4);
1da177e4
LT
660 if (path) {
661 /* put pseudoheader back on for next time */
662 skb_push(skb, sizeof *phdr);
663 __skb_queue_tail(&path->queue, skb);
664
665 if (path_rec_start(dev, path)) {
943c246e 666 spin_unlock_irqrestore(&priv->lock, flags);
1da177e4
LT
667 path_free(dev, path);
668 return;
669 } else
670 __path_add(dev, path);
671 } else {
de903512 672 ++dev->stats.tx_dropped;
1da177e4
LT
673 dev_kfree_skb_any(skb);
674 }
675
943c246e 676 spin_unlock_irqrestore(&priv->lock, flags);
1da177e4
LT
677 return;
678 }
679
47f7a071 680 if (path->ah) {
1da177e4
LT
681 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
682 be16_to_cpu(path->pathrec.dlid));
683
073ae841 684 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
1da177e4
LT
685 } else if ((path->query || !path_rec_start(dev, path)) &&
686 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
687 /* put pseudoheader back on for next time */
688 skb_push(skb, sizeof *phdr);
689 __skb_queue_tail(&path->queue, skb);
690 } else {
de903512 691 ++dev->stats.tx_dropped;
1da177e4
LT
692 dev_kfree_skb_any(skb);
693 }
694
943c246e 695 spin_unlock_irqrestore(&priv->lock, flags);
1da177e4
LT
696}
697
698static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
699{
700 struct ipoib_dev_priv *priv = netdev_priv(dev);
701 struct ipoib_neigh *neigh;
702 unsigned long flags;
703
a8bfca02 704 if (likely(skb->dst && skb->dst->neighbour)) {
1da177e4 705 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
d70ed607 706 ipoib_path_lookup(skb, dev);
943c246e 707 return NETDEV_TX_OK;
1da177e4
LT
708 }
709
710 neigh = *to_ipoib_neigh(skb->dst->neighbour);
711
bafff974 712 if (neigh->ah)
200d1713 713 if (unlikely((memcmp(&neigh->dgid.raw,
8a7f7521 714 skb->dst->neighbour->ha + 4,
200d1713
MS
715 sizeof(union ib_gid))) ||
716 (neigh->dev != dev))) {
943c246e 717 spin_lock_irqsave(&priv->lock, flags);
8a7f7521
MT
718 /*
719 * It's safe to call ipoib_put_ah() inside
720 * priv->lock here, because we know that
721 * path->ah will always hold one more reference,
722 * so ipoib_put_ah() will never do more than
723 * decrement the ref count.
724 */
725 ipoib_put_ah(neigh->ah);
726 list_del(&neigh->list);
2745b5b7 727 ipoib_neigh_free(dev, neigh);
943c246e 728 spin_unlock_irqrestore(&priv->lock, flags);
8a7f7521 729 ipoib_path_lookup(skb, dev);
943c246e 730 return NETDEV_TX_OK;
8a7f7521
MT
731 }
732
bafff974
OG
733 if (ipoib_cm_get(neigh)) {
734 if (ipoib_cm_up(neigh)) {
735 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
943c246e 736 return NETDEV_TX_OK;
bafff974
OG
737 }
738 } else if (neigh->ah) {
073ae841 739 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
943c246e 740 return NETDEV_TX_OK;
1da177e4
LT
741 }
742
743 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
943c246e 744 spin_lock_irqsave(&priv->lock, flags);
1da177e4 745 __skb_queue_tail(&neigh->queue, skb);
943c246e 746 spin_unlock_irqrestore(&priv->lock, flags);
1da177e4 747 } else {
de903512 748 ++dev->stats.tx_dropped;
1da177e4
LT
749 dev_kfree_skb_any(skb);
750 }
751 } else {
752 struct ipoib_pseudoheader *phdr =
753 (struct ipoib_pseudoheader *) skb->data;
754 skb_pull(skb, sizeof *phdr);
755
756 if (phdr->hwaddr[4] == 0xff) {
757 /* Add in the P_Key for multicast*/
758 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
759 phdr->hwaddr[9] = priv->pkey & 0xff;
760
37c22a77 761 ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
1da177e4 762 } else {
0dca0f7b 763 /* unicast GID -- should be ARP or RARP reply */
1da177e4 764
0dca0f7b
HR
765 if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
766 (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
fcace2fe 767 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %p6\n",
1da177e4 768 skb->dst ? "neigh" : "dst",
97f52eb4 769 be16_to_cpup((__be16 *) skb->data),
073ae841 770 IPOIB_QPN(phdr->hwaddr),
fcace2fe 771 phdr->hwaddr + 4);
1da177e4 772 dev_kfree_skb_any(skb);
de903512 773 ++dev->stats.tx_dropped;
943c246e 774 return NETDEV_TX_OK;
1da177e4
LT
775 }
776
777 unicast_arp_send(skb, dev, phdr);
778 }
779 }
780
1da177e4
LT
781 return NETDEV_TX_OK;
782}
783
1da177e4
LT
784static void ipoib_timeout(struct net_device *dev)
785{
786 struct ipoib_dev_priv *priv = netdev_priv(dev);
787
4b2d319b
RD
788 ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
789 jiffies_to_msecs(jiffies - dev->trans_start));
790 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
791 netif_queue_stopped(dev),
792 priv->tx_head, priv->tx_tail);
1da177e4
LT
793 /* XXX reset QP, etc. */
794}
795
796static int ipoib_hard_header(struct sk_buff *skb,
797 struct net_device *dev,
798 unsigned short type,
3b04ddde 799 const void *daddr, const void *saddr, unsigned len)
1da177e4
LT
800{
801 struct ipoib_header *header;
802
803 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
804
805 header->proto = htons(type);
806 header->reserved = 0;
807
808 /*
809 * If we don't have a neighbour structure, stuff the
810 * destination address onto the front of the skb so we can
811 * figure out where to send the packet later.
812 */
ef12d456 813 if ((!skb->dst || !skb->dst->neighbour) && daddr) {
1da177e4
LT
814 struct ipoib_pseudoheader *phdr =
815 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
816 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
817 }
818
819 return 0;
820}
821
822static void ipoib_set_mcast_list(struct net_device *dev)
823{
824 struct ipoib_dev_priv *priv = netdev_priv(dev);
825
7a343d4c
LA
826 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
827 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
828 return;
829 }
830
1ad62a19 831 queue_work(ipoib_workqueue, &priv->restart_task);
1da177e4
LT
832}
833
ecbb4169 834static void ipoib_neigh_cleanup(struct neighbour *n)
1da177e4
LT
835{
836 struct ipoib_neigh *neigh;
837 struct ipoib_dev_priv *priv = netdev_priv(n->dev);
838 unsigned long flags;
839 struct ipoib_ah *ah = NULL;
840
732a2170 841 neigh = *to_ipoib_neigh(n);
7bc531dd 842 if (neigh)
732a2170 843 priv = netdev_priv(neigh->dev);
7bc531dd 844 else
732a2170 845 return;
1da177e4 846 ipoib_dbg(priv,
fcace2fe 847 "neigh_cleanup for %06x %p6\n",
073ae841 848 IPOIB_QPN(n->ha),
fcace2fe 849 n->ha + 4);
1da177e4
LT
850
851 spin_lock_irqsave(&priv->lock, flags);
852
732a2170
MS
853 if (neigh->ah)
854 ah = neigh->ah;
855 list_del(&neigh->list);
856 ipoib_neigh_free(n->dev, neigh);
1da177e4
LT
857
858 spin_unlock_irqrestore(&priv->lock, flags);
859
860 if (ah)
861 ipoib_put_ah(ah);
862}
863
732a2170
MS
864struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
865 struct net_device *dev)
d2e0655e
MT
866{
867 struct ipoib_neigh *neigh;
868
869 neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
870 if (!neigh)
871 return NULL;
872
873 neigh->neighbour = neighbour;
732a2170 874 neigh->dev = dev;
d2e0655e 875 *to_ipoib_neigh(neighbour) = neigh;
82b39913 876 skb_queue_head_init(&neigh->queue);
839fcaba 877 ipoib_cm_set(neigh, NULL);
d2e0655e
MT
878
879 return neigh;
880}
881
2745b5b7 882void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
d2e0655e 883{
2745b5b7 884 struct sk_buff *skb;
d2e0655e 885 *to_ipoib_neigh(neigh->neighbour) = NULL;
2745b5b7 886 while ((skb = __skb_dequeue(&neigh->queue))) {
de903512 887 ++dev->stats.tx_dropped;
2745b5b7
MT
888 dev_kfree_skb_any(skb);
889 }
839fcaba
MT
890 if (ipoib_cm_get(neigh))
891 ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
d2e0655e
MT
892 kfree(neigh);
893}
894
1da177e4
LT
895static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
896{
ecbb4169 897 parms->neigh_cleanup = ipoib_neigh_cleanup;
1da177e4
LT
898
899 return 0;
900}
901
902int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
903{
904 struct ipoib_dev_priv *priv = netdev_priv(dev);
905
906 /* Allocate RX/TX "rings" to hold queued skbs */
0f485251 907 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
1da177e4
LT
908 GFP_KERNEL);
909 if (!priv->rx_ring) {
910 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
0f485251 911 ca->name, ipoib_recvq_size);
1da177e4
LT
912 goto out;
913 }
1da177e4 914
10313cbb 915 priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
1da177e4
LT
916 if (!priv->tx_ring) {
917 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
0f485251 918 ca->name, ipoib_sendq_size);
1da177e4
LT
919 goto out_rx_ring_cleanup;
920 }
10313cbb 921 memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
1da177e4 922
1b524963 923 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1da177e4
LT
924
925 if (ipoib_ib_dev_init(dev, ca, port))
926 goto out_tx_ring_cleanup;
927
928 return 0;
929
930out_tx_ring_cleanup:
10313cbb 931 vfree(priv->tx_ring);
1da177e4
LT
932
933out_rx_ring_cleanup:
934 kfree(priv->rx_ring);
935
936out:
937 return -ENOMEM;
938}
939
940void ipoib_dev_cleanup(struct net_device *dev)
941{
942 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
943
1732b0ef 944 ipoib_delete_debug_files(dev);
1da177e4
LT
945
946 /* Delete any child interfaces first */
947 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
948 unregister_netdev(cpriv->dev);
949 ipoib_dev_cleanup(cpriv->dev);
950 free_netdev(cpriv->dev);
951 }
952
953 ipoib_ib_dev_cleanup(dev);
954
92a6b34b 955 kfree(priv->rx_ring);
10313cbb 956 vfree(priv->tx_ring);
1da177e4 957
92a6b34b
HR
958 priv->rx_ring = NULL;
959 priv->tx_ring = NULL;
1da177e4
LT
960}
961
3b04ddde
SH
962static const struct header_ops ipoib_header_ops = {
963 .create = ipoib_hard_header,
964};
965
af40da89
VS
966static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
967 void **tcph, u64 *hdr_flags, void *priv)
968{
969 unsigned int ip_len;
970 struct iphdr *iph;
971
972 if (unlikely(skb->protocol != htons(ETH_P_IP)))
973 return -1;
974
975 /*
976 * In the future we may add an else clause that verifies the
977 * checksum and allows devices which do not calculate checksum
978 * to use LRO.
979 */
980 if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
981 return -1;
982
983 /* Check for non-TCP packet */
984 skb_reset_network_header(skb);
985 iph = ip_hdr(skb);
986 if (iph->protocol != IPPROTO_TCP)
987 return -1;
988
989 ip_len = ip_hdrlen(skb);
990 skb_set_transport_header(skb, ip_len);
991 *tcph = tcp_hdr(skb);
992
993 /* check if IP header and TCP header are complete */
994 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
995 return -1;
996
997 *hdr_flags = LRO_IPV4 | LRO_TCP;
998 *iphdr = iph;
999
1000 return 0;
1001}
1002
1003static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
1004{
1005 priv->lro.lro_mgr.max_aggr = lro_max_aggr;
1006 priv->lro.lro_mgr.max_desc = IPOIB_MAX_LRO_DESCRIPTORS;
1007 priv->lro.lro_mgr.lro_arr = priv->lro.lro_desc;
1008 priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
1009 priv->lro.lro_mgr.features = LRO_F_NAPI;
1010 priv->lro.lro_mgr.dev = priv->dev;
1011 priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1012}
1013
1da177e4
LT
1014static void ipoib_setup(struct net_device *dev)
1015{
1016 struct ipoib_dev_priv *priv = netdev_priv(dev);
1017
2337f809
RD
1018 dev->open = ipoib_open;
1019 dev->stop = ipoib_stop;
1020 dev->change_mtu = ipoib_change_mtu;
1021 dev->hard_start_xmit = ipoib_start_xmit;
1022 dev->tx_timeout = ipoib_timeout;
1023 dev->header_ops = &ipoib_header_ops;
1024 dev->set_multicast_list = ipoib_set_mcast_list;
1025 dev->neigh_setup = ipoib_neigh_setup_dev;
bea3348e 1026
82c24c18
EC
1027 ipoib_set_ethtool_ops(dev);
1028
bea3348e 1029 netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
1da177e4 1030
2337f809 1031 dev->watchdog_timeo = HZ;
1da177e4 1032
2337f809 1033 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1da177e4
LT
1034
1035 /*
1036 * We add in INFINIBAND_ALEN to allow for the destination
1037 * address "pseudoheader" for skbs without neighbour struct.
1038 */
2337f809
RD
1039 dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
1040 dev->addr_len = INFINIBAND_ALEN;
1041 dev->type = ARPHRD_INFINIBAND;
1042 dev->tx_queue_len = ipoib_sendq_size * 2;
eb14032f 1043 dev->features = (NETIF_F_VLAN_CHALLENGED |
eb14032f 1044 NETIF_F_HIGHDMA);
1da177e4 1045
1da177e4
LT
1046 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1047
1048 netif_carrier_off(dev);
1049
1da177e4
LT
1050 priv->dev = dev;
1051
af40da89
VS
1052 ipoib_lro_setup(priv);
1053
1da177e4 1054 spin_lock_init(&priv->lock);
1da177e4 1055
95ed644f 1056 mutex_init(&priv->vlan_mutex);
1da177e4
LT
1057
1058 INIT_LIST_HEAD(&priv->path_list);
1059 INIT_LIST_HEAD(&priv->child_intfs);
1060 INIT_LIST_HEAD(&priv->dead_ahs);
1061 INIT_LIST_HEAD(&priv->multicast_list);
1062
26bbf13c 1063 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
c4028958 1064 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
e8224e4b 1065 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
ee1e2c82
MS
1066 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
1067 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
1068 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
c4028958
DH
1069 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1070 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1da177e4
LT
1071}
1072
1073struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1074{
1075 struct net_device *dev;
1076
1077 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
1078 ipoib_setup);
1079 if (!dev)
1080 return NULL;
1081
1082 return netdev_priv(dev);
1083}
1084
43cb76d9
GKH
1085static ssize_t show_pkey(struct device *dev,
1086 struct device_attribute *attr, char *buf)
1da177e4 1087{
43cb76d9 1088 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1da177e4
LT
1089
1090 return sprintf(buf, "0x%04x\n", priv->pkey);
1091}
43cb76d9 1092static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1da177e4 1093
335a64a5
OG
1094static ssize_t show_umcast(struct device *dev,
1095 struct device_attribute *attr, char *buf)
1096{
1097 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1098
1099 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1100}
1101
1102static ssize_t set_umcast(struct device *dev,
1103 struct device_attribute *attr,
1104 const char *buf, size_t count)
1105{
1106 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1107 unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1108
1109 if (umcast_val > 0) {
1110 set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1111 ipoib_warn(priv, "ignoring multicast groups joined directly "
1112 "by userspace\n");
1113 } else
1114 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1115
1116 return count;
1117}
1118static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1119
1120int ipoib_add_umcast_attr(struct net_device *dev)
1121{
1122 return device_create_file(&dev->dev, &dev_attr_umcast);
1123}
1124
43cb76d9
GKH
1125static ssize_t create_child(struct device *dev,
1126 struct device_attribute *attr,
1da177e4
LT
1127 const char *buf, size_t count)
1128{
1129 int pkey;
1130 int ret;
1131
1132 if (sscanf(buf, "%i", &pkey) != 1)
1133 return -EINVAL;
1134
1135 if (pkey < 0 || pkey > 0xffff)
1136 return -EINVAL;
1137
4ce05937
RD
1138 /*
1139 * Set the full membership bit, so that we join the right
1140 * broadcast group, etc.
1141 */
1142 pkey |= 0x8000;
1143
43cb76d9 1144 ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1da177e4
LT
1145
1146 return ret ? ret : count;
1147}
43cb76d9 1148static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
1da177e4 1149
43cb76d9
GKH
1150static ssize_t delete_child(struct device *dev,
1151 struct device_attribute *attr,
1da177e4
LT
1152 const char *buf, size_t count)
1153{
1154 int pkey;
1155 int ret;
1156
1157 if (sscanf(buf, "%i", &pkey) != 1)
1158 return -EINVAL;
1159
1160 if (pkey < 0 || pkey > 0xffff)
1161 return -EINVAL;
1162
43cb76d9 1163 ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1da177e4
LT
1164
1165 return ret ? ret : count;
1166
1167}
43cb76d9 1168static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
1da177e4
LT
1169
1170int ipoib_add_pkey_attr(struct net_device *dev)
1171{
43cb76d9 1172 return device_create_file(&dev->dev, &dev_attr_pkey);
1da177e4
LT
1173}
1174
83bb63f6
OG
1175int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1176{
1177 struct ib_device_attr *device_attr;
1178 int result = -ENOMEM;
1179
1180 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1181 if (!device_attr) {
1182 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1183 hca->name, sizeof *device_attr);
1184 return result;
1185 }
1186
1187 result = ib_query_device(hca, device_attr);
1188 if (result) {
1189 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1190 hca->name, result);
1191 kfree(device_attr);
1192 return result;
1193 }
1194 priv->hca_caps = device_attr->device_cap_flags;
1195
1196 kfree(device_attr);
1197
1198 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1199 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
1200 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1201 }
1202
1203 if (lro)
1204 priv->dev->features |= NETIF_F_LRO;
1205
1206 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1207 priv->dev->features |= NETIF_F_TSO;
1208
1209 return 0;
1210}
1211
1212
1da177e4
LT
1213static struct net_device *ipoib_add_port(const char *format,
1214 struct ib_device *hca, u8 port)
1215{
1216 struct ipoib_dev_priv *priv;
bc7b3a36 1217 struct ib_port_attr attr;
1da177e4
LT
1218 int result = -ENOMEM;
1219
1220 priv = ipoib_intf_alloc(format);
1221 if (!priv)
1222 goto alloc_mem_failed;
1223
1224 SET_NETDEV_DEV(priv->dev, hca->dma_device);
1225
bc7b3a36
SM
1226 if (!ib_query_port(hca, port, &attr))
1227 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1228 else {
1229 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1230 hca->name, port);
1231 goto device_init_failed;
1232 }
1233
1234 /* MTU will be reset when mcast join happens */
1235 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1236 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
1237
1da177e4
LT
1238 result = ib_query_pkey(hca, port, 0, &priv->pkey);
1239 if (result) {
1240 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1241 hca->name, port, result);
ca6de177 1242 goto device_init_failed;
1da177e4
LT
1243 }
1244
83bb63f6 1245 if (ipoib_set_dev_features(priv, hca))
6046136c 1246 goto device_init_failed;
af40da89 1247
4ce05937
RD
1248 /*
1249 * Set the full membership bit, so that we join the right
1250 * broadcast group, etc.
1251 */
1252 priv->pkey |= 0x8000;
1253
1da177e4
LT
1254 priv->dev->broadcast[8] = priv->pkey >> 8;
1255 priv->dev->broadcast[9] = priv->pkey & 0xff;
1256
1257 result = ib_query_gid(hca, port, 0, &priv->local_gid);
1258 if (result) {
1259 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1260 hca->name, port, result);
ca6de177 1261 goto device_init_failed;
1da177e4
LT
1262 } else
1263 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1264
1da177e4
LT
1265 result = ipoib_dev_init(priv->dev, hca, port);
1266 if (result < 0) {
1267 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1268 hca->name, port, result);
1269 goto device_init_failed;
1270 }
1271
1272 INIT_IB_EVENT_HANDLER(&priv->event_handler,
1273 priv->ca, ipoib_event);
1274 result = ib_register_event_handler(&priv->event_handler);
1275 if (result < 0) {
1276 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1277 "port %d (ret = %d)\n",
1278 hca->name, port, result);
1279 goto event_failed;
1280 }
1281
1282 result = register_netdev(priv->dev);
1283 if (result) {
1284 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1285 hca->name, port, result);
1286 goto register_failed;
1287 }
1288
1732b0ef 1289 ipoib_create_debug_files(priv->dev);
1da177e4 1290
839fcaba
MT
1291 if (ipoib_cm_add_mode_attr(priv->dev))
1292 goto sysfs_failed;
1da177e4
LT
1293 if (ipoib_add_pkey_attr(priv->dev))
1294 goto sysfs_failed;
335a64a5
OG
1295 if (ipoib_add_umcast_attr(priv->dev))
1296 goto sysfs_failed;
43cb76d9 1297 if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1da177e4 1298 goto sysfs_failed;
43cb76d9 1299 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1da177e4
LT
1300 goto sysfs_failed;
1301
1302 return priv->dev;
1303
1304sysfs_failed:
1732b0ef 1305 ipoib_delete_debug_files(priv->dev);
1da177e4
LT
1306 unregister_netdev(priv->dev);
1307
1308register_failed:
1309 ib_unregister_event_handler(&priv->event_handler);
a77a57a1 1310 flush_workqueue(ipoib_workqueue);
1da177e4
LT
1311
1312event_failed:
1313 ipoib_dev_cleanup(priv->dev);
1314
1315device_init_failed:
1316 free_netdev(priv->dev);
1317
1318alloc_mem_failed:
1319 return ERR_PTR(result);
1320}
1321
1322static void ipoib_add_one(struct ib_device *device)
1323{
1324 struct list_head *dev_list;
1325 struct net_device *dev;
1326 struct ipoib_dev_priv *priv;
1327 int s, e, p;
1328
07ebafba
TT
1329 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1330 return;
1331
1da177e4
LT
1332 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1333 if (!dev_list)
1334 return;
1335
1336 INIT_LIST_HEAD(dev_list);
1337
07ebafba 1338 if (device->node_type == RDMA_NODE_IB_SWITCH) {
1da177e4
LT
1339 s = 0;
1340 e = 0;
1341 } else {
1342 s = 1;
1343 e = device->phys_port_cnt;
1344 }
1345
1346 for (p = s; p <= e; ++p) {
1347 dev = ipoib_add_port("ib%d", device, p);
1348 if (!IS_ERR(dev)) {
1349 priv = netdev_priv(dev);
1350 list_add_tail(&priv->list, dev_list);
1351 }
1352 }
1353
1354 ib_set_client_data(device, &ipoib_client, dev_list);
1355}
1356
1357static void ipoib_remove_one(struct ib_device *device)
1358{
1359 struct ipoib_dev_priv *priv, *tmp;
1360 struct list_head *dev_list;
1361
07ebafba
TT
1362 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1363 return;
1364
1da177e4
LT
1365 dev_list = ib_get_client_data(device, &ipoib_client);
1366
1367 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1368 ib_unregister_event_handler(&priv->event_handler);
a77a57a1
RD
1369
1370 rtnl_lock();
1371 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
1372 rtnl_unlock();
1373
1374 flush_workqueue(ipoib_workqueue);
1da177e4
LT
1375
1376 unregister_netdev(priv->dev);
1377 ipoib_dev_cleanup(priv->dev);
1378 free_netdev(priv->dev);
1379 }
06c56e44
MT
1380
1381 kfree(dev_list);
1da177e4
LT
1382}
1383
1384static int __init ipoib_init_module(void)
1385{
1386 int ret;
1387
0f485251
SM
1388 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1389 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1390 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1391
1392 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1393 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
f56bcd80
EC
1394 ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE,
1395 IPOIB_MIN_QUEUE_SIZE));
68e995a2
PS
1396#ifdef CONFIG_INFINIBAND_IPOIB_CM
1397 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
1398#endif
0f485251 1399
f89271da
EC
1400 /*
1401 * When copying small received packets, we only copy from the
1402 * linear data part of the SKB, so we rely on this condition.
1403 */
1404 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
1405
1da177e4
LT
1406 ret = ipoib_register_debugfs();
1407 if (ret)
1408 return ret;
1409
1410 /*
1411 * We create our own workqueue mainly because we want to be
1412 * able to flush it when devices are being removed. We can't
1413 * use schedule_work()/flush_scheduled_work() because both
1414 * unregister_netdev() and linkwatch_event take the rtnl lock,
1415 * so flush_scheduled_work() can deadlock during device
1416 * removal.
1417 */
1418 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1419 if (!ipoib_workqueue) {
1420 ret = -ENOMEM;
1421 goto err_fs;
1422 }
1423
c1a0b23b
MT
1424 ib_sa_register_client(&ipoib_sa_client);
1425
1da177e4
LT
1426 ret = ib_register_client(&ipoib_client);
1427 if (ret)
c1a0b23b 1428 goto err_sa;
1da177e4
LT
1429
1430 return 0;
1431
c1a0b23b
MT
1432err_sa:
1433 ib_sa_unregister_client(&ipoib_sa_client);
1da177e4
LT
1434 destroy_workqueue(ipoib_workqueue);
1435
9adec1a8
RD
1436err_fs:
1437 ipoib_unregister_debugfs();
1438
1da177e4
LT
1439 return ret;
1440}
1441
1442static void __exit ipoib_cleanup_module(void)
1443{
1da177e4 1444 ib_unregister_client(&ipoib_client);
c1a0b23b 1445 ib_sa_unregister_client(&ipoib_sa_client);
9adec1a8 1446 ipoib_unregister_debugfs();
1da177e4
LT
1447 destroy_workqueue(ipoib_workqueue);
1448}
1449
1450module_init(ipoib_init_module);
1451module_exit(ipoib_cleanup_module);
This page took 0.504329 seconds and 5 git commands to generate.