IB/core: Add netdev and gid attributes paramteres to cache
[deliverable/linux.git] / drivers / infiniband / core / cma.c
1 /*
2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/completion.h>
37 #include <linux/in.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/idr.h>
42 #include <linux/inetdevice.h>
43 #include <linux/slab.h>
44 #include <linux/module.h>
45 #include <net/route.h>
46
47 #include <net/tcp.h>
48 #include <net/ipv6.h>
49 #include <net/ip_fib.h>
50 #include <net/ip6_route.h>
51
52 #include <rdma/rdma_cm.h>
53 #include <rdma/rdma_cm_ib.h>
54 #include <rdma/rdma_netlink.h>
55 #include <rdma/ib.h>
56 #include <rdma/ib_cache.h>
57 #include <rdma/ib_cm.h>
58 #include <rdma/ib_sa.h>
59 #include <rdma/iw_cm.h>
60
61 MODULE_AUTHOR("Sean Hefty");
62 MODULE_DESCRIPTION("Generic RDMA CM Agent");
63 MODULE_LICENSE("Dual BSD/GPL");
64
65 #define CMA_CM_RESPONSE_TIMEOUT 20
66 #define CMA_MAX_CM_RETRIES 15
67 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
68 #define CMA_IBOE_PACKET_LIFETIME 18
69
70 static const char * const cma_events[] = {
71 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved",
72 [RDMA_CM_EVENT_ADDR_ERROR] = "address error",
73 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ",
74 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error",
75 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request",
76 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
77 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error",
78 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable",
79 [RDMA_CM_EVENT_REJECTED] = "rejected",
80 [RDMA_CM_EVENT_ESTABLISHED] = "established",
81 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected",
82 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal",
83 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join",
84 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error",
85 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change",
86 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
87 };
88
89 const char *rdma_event_msg(enum rdma_cm_event_type event)
90 {
91 size_t index = event;
92
93 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
94 cma_events[index] : "unrecognized event";
95 }
96 EXPORT_SYMBOL(rdma_event_msg);
97
98 static void cma_add_one(struct ib_device *device);
99 static void cma_remove_one(struct ib_device *device, void *client_data);
100
101 static struct ib_client cma_client = {
102 .name = "cma",
103 .add = cma_add_one,
104 .remove = cma_remove_one
105 };
106
107 static struct ib_sa_client sa_client;
108 static struct rdma_addr_client addr_client;
109 static LIST_HEAD(dev_list);
110 static LIST_HEAD(listen_any_list);
111 static DEFINE_MUTEX(lock);
112 static struct workqueue_struct *cma_wq;
113 static DEFINE_IDR(tcp_ps);
114 static DEFINE_IDR(udp_ps);
115 static DEFINE_IDR(ipoib_ps);
116 static DEFINE_IDR(ib_ps);
117
118 static struct idr *cma_idr(enum rdma_port_space ps)
119 {
120 switch (ps) {
121 case RDMA_PS_TCP:
122 return &tcp_ps;
123 case RDMA_PS_UDP:
124 return &udp_ps;
125 case RDMA_PS_IPOIB:
126 return &ipoib_ps;
127 case RDMA_PS_IB:
128 return &ib_ps;
129 default:
130 return NULL;
131 }
132 }
133
134 struct cma_device {
135 struct list_head list;
136 struct ib_device *device;
137 struct completion comp;
138 atomic_t refcount;
139 struct list_head id_list;
140 };
141
142 struct rdma_bind_list {
143 enum rdma_port_space ps;
144 struct hlist_head owners;
145 unsigned short port;
146 };
147
148 static int cma_ps_alloc(enum rdma_port_space ps,
149 struct rdma_bind_list *bind_list, int snum)
150 {
151 struct idr *idr = cma_idr(ps);
152
153 return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL);
154 }
155
156 static struct rdma_bind_list *cma_ps_find(enum rdma_port_space ps, int snum)
157 {
158 struct idr *idr = cma_idr(ps);
159
160 return idr_find(idr, snum);
161 }
162
163 static void cma_ps_remove(enum rdma_port_space ps, int snum)
164 {
165 struct idr *idr = cma_idr(ps);
166
167 idr_remove(idr, snum);
168 }
169
170 enum {
171 CMA_OPTION_AFONLY,
172 };
173
174 /*
175 * Device removal can occur at anytime, so we need extra handling to
176 * serialize notifying the user of device removal with other callbacks.
177 * We do this by disabling removal notification while a callback is in process,
178 * and reporting it after the callback completes.
179 */
180 struct rdma_id_private {
181 struct rdma_cm_id id;
182
183 struct rdma_bind_list *bind_list;
184 struct hlist_node node;
185 struct list_head list; /* listen_any_list or cma_device.list */
186 struct list_head listen_list; /* per device listens */
187 struct cma_device *cma_dev;
188 struct list_head mc_list;
189
190 int internal_id;
191 enum rdma_cm_state state;
192 spinlock_t lock;
193 struct mutex qp_mutex;
194
195 struct completion comp;
196 atomic_t refcount;
197 struct mutex handler_mutex;
198
199 int backlog;
200 int timeout_ms;
201 struct ib_sa_query *query;
202 int query_id;
203 union {
204 struct ib_cm_id *ib;
205 struct iw_cm_id *iw;
206 } cm_id;
207
208 u32 seq_num;
209 u32 qkey;
210 u32 qp_num;
211 pid_t owner;
212 u32 options;
213 u8 srq;
214 u8 tos;
215 u8 reuseaddr;
216 u8 afonly;
217 };
218
219 struct cma_multicast {
220 struct rdma_id_private *id_priv;
221 union {
222 struct ib_sa_multicast *ib;
223 } multicast;
224 struct list_head list;
225 void *context;
226 struct sockaddr_storage addr;
227 struct kref mcref;
228 };
229
230 struct cma_work {
231 struct work_struct work;
232 struct rdma_id_private *id;
233 enum rdma_cm_state old_state;
234 enum rdma_cm_state new_state;
235 struct rdma_cm_event event;
236 };
237
238 struct cma_ndev_work {
239 struct work_struct work;
240 struct rdma_id_private *id;
241 struct rdma_cm_event event;
242 };
243
244 struct iboe_mcast_work {
245 struct work_struct work;
246 struct rdma_id_private *id;
247 struct cma_multicast *mc;
248 };
249
250 union cma_ip_addr {
251 struct in6_addr ip6;
252 struct {
253 __be32 pad[3];
254 __be32 addr;
255 } ip4;
256 };
257
258 struct cma_hdr {
259 u8 cma_version;
260 u8 ip_version; /* IP version: 7:4 */
261 __be16 port;
262 union cma_ip_addr src_addr;
263 union cma_ip_addr dst_addr;
264 };
265
266 #define CMA_VERSION 0x00
267
268 struct cma_req_info {
269 struct ib_device *device;
270 int port;
271 union ib_gid local_gid;
272 __be64 service_id;
273 u16 pkey;
274 bool has_gid:1;
275 };
276
277 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
278 {
279 unsigned long flags;
280 int ret;
281
282 spin_lock_irqsave(&id_priv->lock, flags);
283 ret = (id_priv->state == comp);
284 spin_unlock_irqrestore(&id_priv->lock, flags);
285 return ret;
286 }
287
288 static int cma_comp_exch(struct rdma_id_private *id_priv,
289 enum rdma_cm_state comp, enum rdma_cm_state exch)
290 {
291 unsigned long flags;
292 int ret;
293
294 spin_lock_irqsave(&id_priv->lock, flags);
295 if ((ret = (id_priv->state == comp)))
296 id_priv->state = exch;
297 spin_unlock_irqrestore(&id_priv->lock, flags);
298 return ret;
299 }
300
301 static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
302 enum rdma_cm_state exch)
303 {
304 unsigned long flags;
305 enum rdma_cm_state old;
306
307 spin_lock_irqsave(&id_priv->lock, flags);
308 old = id_priv->state;
309 id_priv->state = exch;
310 spin_unlock_irqrestore(&id_priv->lock, flags);
311 return old;
312 }
313
314 static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
315 {
316 return hdr->ip_version >> 4;
317 }
318
319 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
320 {
321 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
322 }
323
324 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
325 struct cma_device *cma_dev)
326 {
327 atomic_inc(&cma_dev->refcount);
328 id_priv->cma_dev = cma_dev;
329 id_priv->id.device = cma_dev->device;
330 id_priv->id.route.addr.dev_addr.transport =
331 rdma_node_get_transport(cma_dev->device->node_type);
332 list_add_tail(&id_priv->list, &cma_dev->id_list);
333 }
334
335 static inline void cma_deref_dev(struct cma_device *cma_dev)
336 {
337 if (atomic_dec_and_test(&cma_dev->refcount))
338 complete(&cma_dev->comp);
339 }
340
341 static inline void release_mc(struct kref *kref)
342 {
343 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
344
345 kfree(mc->multicast.ib);
346 kfree(mc);
347 }
348
349 static void cma_release_dev(struct rdma_id_private *id_priv)
350 {
351 mutex_lock(&lock);
352 list_del(&id_priv->list);
353 cma_deref_dev(id_priv->cma_dev);
354 id_priv->cma_dev = NULL;
355 mutex_unlock(&lock);
356 }
357
358 static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
359 {
360 return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
361 }
362
363 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
364 {
365 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
366 }
367
368 static inline unsigned short cma_family(struct rdma_id_private *id_priv)
369 {
370 return id_priv->id.route.addr.src_addr.ss_family;
371 }
372
373 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
374 {
375 struct ib_sa_mcmember_rec rec;
376 int ret = 0;
377
378 if (id_priv->qkey) {
379 if (qkey && id_priv->qkey != qkey)
380 return -EINVAL;
381 return 0;
382 }
383
384 if (qkey) {
385 id_priv->qkey = qkey;
386 return 0;
387 }
388
389 switch (id_priv->id.ps) {
390 case RDMA_PS_UDP:
391 case RDMA_PS_IB:
392 id_priv->qkey = RDMA_UDP_QKEY;
393 break;
394 case RDMA_PS_IPOIB:
395 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
396 ret = ib_sa_get_mcmember_rec(id_priv->id.device,
397 id_priv->id.port_num, &rec.mgid,
398 &rec);
399 if (!ret)
400 id_priv->qkey = be32_to_cpu(rec.qkey);
401 break;
402 default:
403 break;
404 }
405 return ret;
406 }
407
408 static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
409 {
410 dev_addr->dev_type = ARPHRD_INFINIBAND;
411 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
412 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
413 }
414
415 static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
416 {
417 int ret;
418
419 if (addr->sa_family != AF_IB) {
420 ret = rdma_translate_ip(addr, dev_addr, NULL);
421 } else {
422 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
423 ret = 0;
424 }
425
426 return ret;
427 }
428
429 static inline int cma_validate_port(struct ib_device *device, u8 port,
430 union ib_gid *gid, int dev_type)
431 {
432 u8 found_port;
433 int ret = -ENODEV;
434
435 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
436 return ret;
437
438 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
439 return ret;
440
441 ret = ib_find_cached_gid(device, gid, NULL, &found_port, NULL);
442 if (port != found_port)
443 return -ENODEV;
444
445 return ret;
446 }
447
448 static int cma_acquire_dev(struct rdma_id_private *id_priv,
449 struct rdma_id_private *listen_id_priv)
450 {
451 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
452 struct cma_device *cma_dev;
453 union ib_gid gid, iboe_gid, *gidp;
454 int ret = -ENODEV;
455 u8 port;
456
457 if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
458 id_priv->id.ps == RDMA_PS_IPOIB)
459 return -EINVAL;
460
461 mutex_lock(&lock);
462 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
463 &iboe_gid);
464
465 memcpy(&gid, dev_addr->src_dev_addr +
466 rdma_addr_gid_offset(dev_addr), sizeof gid);
467
468 if (listen_id_priv) {
469 cma_dev = listen_id_priv->cma_dev;
470 port = listen_id_priv->id.port_num;
471 gidp = rdma_protocol_roce(cma_dev->device, port) ?
472 &iboe_gid : &gid;
473
474 ret = cma_validate_port(cma_dev->device, port, gidp,
475 dev_addr->dev_type);
476 if (!ret) {
477 id_priv->id.port_num = port;
478 goto out;
479 }
480 }
481
482 list_for_each_entry(cma_dev, &dev_list, list) {
483 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
484 if (listen_id_priv &&
485 listen_id_priv->cma_dev == cma_dev &&
486 listen_id_priv->id.port_num == port)
487 continue;
488
489 gidp = rdma_protocol_roce(cma_dev->device, port) ?
490 &iboe_gid : &gid;
491
492 ret = cma_validate_port(cma_dev->device, port, gidp,
493 dev_addr->dev_type);
494 if (!ret) {
495 id_priv->id.port_num = port;
496 goto out;
497 }
498 }
499 }
500
501 out:
502 if (!ret)
503 cma_attach_to_dev(id_priv, cma_dev);
504
505 mutex_unlock(&lock);
506 return ret;
507 }
508
509 /*
510 * Select the source IB device and address to reach the destination IB address.
511 */
512 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
513 {
514 struct cma_device *cma_dev, *cur_dev;
515 struct sockaddr_ib *addr;
516 union ib_gid gid, sgid, *dgid;
517 u16 pkey, index;
518 u8 p;
519 int i;
520
521 cma_dev = NULL;
522 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
523 dgid = (union ib_gid *) &addr->sib_addr;
524 pkey = ntohs(addr->sib_pkey);
525
526 list_for_each_entry(cur_dev, &dev_list, list) {
527 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
528 if (!rdma_cap_af_ib(cur_dev->device, p))
529 continue;
530
531 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
532 continue;
533
534 for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i,
535 &gid, NULL);
536 i++) {
537 if (!memcmp(&gid, dgid, sizeof(gid))) {
538 cma_dev = cur_dev;
539 sgid = gid;
540 id_priv->id.port_num = p;
541 goto found;
542 }
543
544 if (!cma_dev && (gid.global.subnet_prefix ==
545 dgid->global.subnet_prefix)) {
546 cma_dev = cur_dev;
547 sgid = gid;
548 id_priv->id.port_num = p;
549 }
550 }
551 }
552 }
553
554 if (!cma_dev)
555 return -ENODEV;
556
557 found:
558 cma_attach_to_dev(id_priv, cma_dev);
559 addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
560 memcpy(&addr->sib_addr, &sgid, sizeof sgid);
561 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
562 return 0;
563 }
564
565 static void cma_deref_id(struct rdma_id_private *id_priv)
566 {
567 if (atomic_dec_and_test(&id_priv->refcount))
568 complete(&id_priv->comp);
569 }
570
571 static int cma_disable_callback(struct rdma_id_private *id_priv,
572 enum rdma_cm_state state)
573 {
574 mutex_lock(&id_priv->handler_mutex);
575 if (id_priv->state != state) {
576 mutex_unlock(&id_priv->handler_mutex);
577 return -EINVAL;
578 }
579 return 0;
580 }
581
582 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
583 void *context, enum rdma_port_space ps,
584 enum ib_qp_type qp_type)
585 {
586 struct rdma_id_private *id_priv;
587
588 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
589 if (!id_priv)
590 return ERR_PTR(-ENOMEM);
591
592 id_priv->owner = task_pid_nr(current);
593 id_priv->state = RDMA_CM_IDLE;
594 id_priv->id.context = context;
595 id_priv->id.event_handler = event_handler;
596 id_priv->id.ps = ps;
597 id_priv->id.qp_type = qp_type;
598 spin_lock_init(&id_priv->lock);
599 mutex_init(&id_priv->qp_mutex);
600 init_completion(&id_priv->comp);
601 atomic_set(&id_priv->refcount, 1);
602 mutex_init(&id_priv->handler_mutex);
603 INIT_LIST_HEAD(&id_priv->listen_list);
604 INIT_LIST_HEAD(&id_priv->mc_list);
605 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
606
607 return &id_priv->id;
608 }
609 EXPORT_SYMBOL(rdma_create_id);
610
611 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
612 {
613 struct ib_qp_attr qp_attr;
614 int qp_attr_mask, ret;
615
616 qp_attr.qp_state = IB_QPS_INIT;
617 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
618 if (ret)
619 return ret;
620
621 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
622 if (ret)
623 return ret;
624
625 qp_attr.qp_state = IB_QPS_RTR;
626 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
627 if (ret)
628 return ret;
629
630 qp_attr.qp_state = IB_QPS_RTS;
631 qp_attr.sq_psn = 0;
632 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
633
634 return ret;
635 }
636
637 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
638 {
639 struct ib_qp_attr qp_attr;
640 int qp_attr_mask, ret;
641
642 qp_attr.qp_state = IB_QPS_INIT;
643 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
644 if (ret)
645 return ret;
646
647 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
648 }
649
650 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
651 struct ib_qp_init_attr *qp_init_attr)
652 {
653 struct rdma_id_private *id_priv;
654 struct ib_qp *qp;
655 int ret;
656
657 id_priv = container_of(id, struct rdma_id_private, id);
658 if (id->device != pd->device)
659 return -EINVAL;
660
661 qp = ib_create_qp(pd, qp_init_attr);
662 if (IS_ERR(qp))
663 return PTR_ERR(qp);
664
665 if (id->qp_type == IB_QPT_UD)
666 ret = cma_init_ud_qp(id_priv, qp);
667 else
668 ret = cma_init_conn_qp(id_priv, qp);
669 if (ret)
670 goto err;
671
672 id->qp = qp;
673 id_priv->qp_num = qp->qp_num;
674 id_priv->srq = (qp->srq != NULL);
675 return 0;
676 err:
677 ib_destroy_qp(qp);
678 return ret;
679 }
680 EXPORT_SYMBOL(rdma_create_qp);
681
682 void rdma_destroy_qp(struct rdma_cm_id *id)
683 {
684 struct rdma_id_private *id_priv;
685
686 id_priv = container_of(id, struct rdma_id_private, id);
687 mutex_lock(&id_priv->qp_mutex);
688 ib_destroy_qp(id_priv->id.qp);
689 id_priv->id.qp = NULL;
690 mutex_unlock(&id_priv->qp_mutex);
691 }
692 EXPORT_SYMBOL(rdma_destroy_qp);
693
694 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
695 struct rdma_conn_param *conn_param)
696 {
697 struct ib_qp_attr qp_attr;
698 int qp_attr_mask, ret;
699 union ib_gid sgid;
700
701 mutex_lock(&id_priv->qp_mutex);
702 if (!id_priv->id.qp) {
703 ret = 0;
704 goto out;
705 }
706
707 /* Need to update QP attributes from default values. */
708 qp_attr.qp_state = IB_QPS_INIT;
709 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
710 if (ret)
711 goto out;
712
713 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
714 if (ret)
715 goto out;
716
717 qp_attr.qp_state = IB_QPS_RTR;
718 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
719 if (ret)
720 goto out;
721
722 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
723 qp_attr.ah_attr.grh.sgid_index, &sgid, NULL);
724 if (ret)
725 goto out;
726
727 BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
728
729 if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
730 ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
731
732 if (ret)
733 goto out;
734 }
735 if (conn_param)
736 qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
737 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
738 out:
739 mutex_unlock(&id_priv->qp_mutex);
740 return ret;
741 }
742
743 static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
744 struct rdma_conn_param *conn_param)
745 {
746 struct ib_qp_attr qp_attr;
747 int qp_attr_mask, ret;
748
749 mutex_lock(&id_priv->qp_mutex);
750 if (!id_priv->id.qp) {
751 ret = 0;
752 goto out;
753 }
754
755 qp_attr.qp_state = IB_QPS_RTS;
756 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
757 if (ret)
758 goto out;
759
760 if (conn_param)
761 qp_attr.max_rd_atomic = conn_param->initiator_depth;
762 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
763 out:
764 mutex_unlock(&id_priv->qp_mutex);
765 return ret;
766 }
767
768 static int cma_modify_qp_err(struct rdma_id_private *id_priv)
769 {
770 struct ib_qp_attr qp_attr;
771 int ret;
772
773 mutex_lock(&id_priv->qp_mutex);
774 if (!id_priv->id.qp) {
775 ret = 0;
776 goto out;
777 }
778
779 qp_attr.qp_state = IB_QPS_ERR;
780 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
781 out:
782 mutex_unlock(&id_priv->qp_mutex);
783 return ret;
784 }
785
786 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
787 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
788 {
789 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
790 int ret;
791 u16 pkey;
792
793 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
794 pkey = 0xffff;
795 else
796 pkey = ib_addr_get_pkey(dev_addr);
797
798 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
799 pkey, &qp_attr->pkey_index);
800 if (ret)
801 return ret;
802
803 qp_attr->port_num = id_priv->id.port_num;
804 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
805
806 if (id_priv->id.qp_type == IB_QPT_UD) {
807 ret = cma_set_qkey(id_priv, 0);
808 if (ret)
809 return ret;
810
811 qp_attr->qkey = id_priv->qkey;
812 *qp_attr_mask |= IB_QP_QKEY;
813 } else {
814 qp_attr->qp_access_flags = 0;
815 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
816 }
817 return 0;
818 }
819
820 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
821 int *qp_attr_mask)
822 {
823 struct rdma_id_private *id_priv;
824 int ret = 0;
825
826 id_priv = container_of(id, struct rdma_id_private, id);
827 if (rdma_cap_ib_cm(id->device, id->port_num)) {
828 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
829 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
830 else
831 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
832 qp_attr_mask);
833
834 if (qp_attr->qp_state == IB_QPS_RTR)
835 qp_attr->rq_psn = id_priv->seq_num;
836 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
837 if (!id_priv->cm_id.iw) {
838 qp_attr->qp_access_flags = 0;
839 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
840 } else
841 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
842 qp_attr_mask);
843 } else
844 ret = -ENOSYS;
845
846 return ret;
847 }
848 EXPORT_SYMBOL(rdma_init_qp_attr);
849
850 static inline int cma_zero_addr(struct sockaddr *addr)
851 {
852 switch (addr->sa_family) {
853 case AF_INET:
854 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
855 case AF_INET6:
856 return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr);
857 case AF_IB:
858 return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr);
859 default:
860 return 0;
861 }
862 }
863
864 static inline int cma_loopback_addr(struct sockaddr *addr)
865 {
866 switch (addr->sa_family) {
867 case AF_INET:
868 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
869 case AF_INET6:
870 return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr);
871 case AF_IB:
872 return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr);
873 default:
874 return 0;
875 }
876 }
877
878 static inline int cma_any_addr(struct sockaddr *addr)
879 {
880 return cma_zero_addr(addr) || cma_loopback_addr(addr);
881 }
882
883 static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
884 {
885 if (src->sa_family != dst->sa_family)
886 return -1;
887
888 switch (src->sa_family) {
889 case AF_INET:
890 return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
891 ((struct sockaddr_in *) dst)->sin_addr.s_addr;
892 case AF_INET6:
893 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
894 &((struct sockaddr_in6 *) dst)->sin6_addr);
895 default:
896 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
897 &((struct sockaddr_ib *) dst)->sib_addr);
898 }
899 }
900
901 static __be16 cma_port(struct sockaddr *addr)
902 {
903 struct sockaddr_ib *sib;
904
905 switch (addr->sa_family) {
906 case AF_INET:
907 return ((struct sockaddr_in *) addr)->sin_port;
908 case AF_INET6:
909 return ((struct sockaddr_in6 *) addr)->sin6_port;
910 case AF_IB:
911 sib = (struct sockaddr_ib *) addr;
912 return htons((u16) (be64_to_cpu(sib->sib_sid) &
913 be64_to_cpu(sib->sib_sid_mask)));
914 default:
915 return 0;
916 }
917 }
918
919 static inline int cma_any_port(struct sockaddr *addr)
920 {
921 return !cma_port(addr);
922 }
923
924 static void cma_save_ib_info(struct sockaddr *src_addr,
925 struct sockaddr *dst_addr,
926 struct rdma_cm_id *listen_id,
927 struct ib_sa_path_rec *path)
928 {
929 struct sockaddr_ib *listen_ib, *ib;
930
931 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
932 if (src_addr) {
933 ib = (struct sockaddr_ib *)src_addr;
934 ib->sib_family = AF_IB;
935 if (path) {
936 ib->sib_pkey = path->pkey;
937 ib->sib_flowinfo = path->flow_label;
938 memcpy(&ib->sib_addr, &path->sgid, 16);
939 ib->sib_sid = path->service_id;
940 ib->sib_scope_id = 0;
941 } else {
942 ib->sib_pkey = listen_ib->sib_pkey;
943 ib->sib_flowinfo = listen_ib->sib_flowinfo;
944 ib->sib_addr = listen_ib->sib_addr;
945 ib->sib_sid = listen_ib->sib_sid;
946 ib->sib_scope_id = listen_ib->sib_scope_id;
947 }
948 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
949 }
950 if (dst_addr) {
951 ib = (struct sockaddr_ib *)dst_addr;
952 ib->sib_family = AF_IB;
953 if (path) {
954 ib->sib_pkey = path->pkey;
955 ib->sib_flowinfo = path->flow_label;
956 memcpy(&ib->sib_addr, &path->dgid, 16);
957 }
958 }
959 }
960
961 static void cma_save_ip4_info(struct sockaddr *src_addr,
962 struct sockaddr *dst_addr,
963 struct cma_hdr *hdr,
964 __be16 local_port)
965 {
966 struct sockaddr_in *ip4;
967
968 if (src_addr) {
969 ip4 = (struct sockaddr_in *)src_addr;
970 ip4->sin_family = AF_INET;
971 ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
972 ip4->sin_port = local_port;
973 }
974
975 if (dst_addr) {
976 ip4 = (struct sockaddr_in *)dst_addr;
977 ip4->sin_family = AF_INET;
978 ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
979 ip4->sin_port = hdr->port;
980 }
981 }
982
983 static void cma_save_ip6_info(struct sockaddr *src_addr,
984 struct sockaddr *dst_addr,
985 struct cma_hdr *hdr,
986 __be16 local_port)
987 {
988 struct sockaddr_in6 *ip6;
989
990 if (src_addr) {
991 ip6 = (struct sockaddr_in6 *)src_addr;
992 ip6->sin6_family = AF_INET6;
993 ip6->sin6_addr = hdr->dst_addr.ip6;
994 ip6->sin6_port = local_port;
995 }
996
997 if (dst_addr) {
998 ip6 = (struct sockaddr_in6 *)dst_addr;
999 ip6->sin6_family = AF_INET6;
1000 ip6->sin6_addr = hdr->src_addr.ip6;
1001 ip6->sin6_port = hdr->port;
1002 }
1003 }
1004
1005 static u16 cma_port_from_service_id(__be64 service_id)
1006 {
1007 return (u16)be64_to_cpu(service_id);
1008 }
1009
1010 static int cma_save_ip_info(struct sockaddr *src_addr,
1011 struct sockaddr *dst_addr,
1012 struct ib_cm_event *ib_event,
1013 __be64 service_id)
1014 {
1015 struct cma_hdr *hdr;
1016 __be16 port;
1017
1018 hdr = ib_event->private_data;
1019 if (hdr->cma_version != CMA_VERSION)
1020 return -EINVAL;
1021
1022 port = htons(cma_port_from_service_id(service_id));
1023
1024 switch (cma_get_ip_ver(hdr)) {
1025 case 4:
1026 cma_save_ip4_info(src_addr, dst_addr, hdr, port);
1027 break;
1028 case 6:
1029 cma_save_ip6_info(src_addr, dst_addr, hdr, port);
1030 break;
1031 default:
1032 return -EAFNOSUPPORT;
1033 }
1034
1035 return 0;
1036 }
1037
1038 static int cma_save_net_info(struct sockaddr *src_addr,
1039 struct sockaddr *dst_addr,
1040 struct rdma_cm_id *listen_id,
1041 struct ib_cm_event *ib_event,
1042 sa_family_t sa_family, __be64 service_id)
1043 {
1044 if (sa_family == AF_IB) {
1045 if (ib_event->event == IB_CM_REQ_RECEIVED)
1046 cma_save_ib_info(src_addr, dst_addr, listen_id,
1047 ib_event->param.req_rcvd.primary_path);
1048 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
1049 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL);
1050 return 0;
1051 }
1052
1053 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id);
1054 }
1055
1056 static int cma_save_req_info(const struct ib_cm_event *ib_event,
1057 struct cma_req_info *req)
1058 {
1059 const struct ib_cm_req_event_param *req_param =
1060 &ib_event->param.req_rcvd;
1061 const struct ib_cm_sidr_req_event_param *sidr_param =
1062 &ib_event->param.sidr_req_rcvd;
1063
1064 switch (ib_event->event) {
1065 case IB_CM_REQ_RECEIVED:
1066 req->device = req_param->listen_id->device;
1067 req->port = req_param->port;
1068 memcpy(&req->local_gid, &req_param->primary_path->sgid,
1069 sizeof(req->local_gid));
1070 req->has_gid = true;
1071 req->service_id = req_param->primary_path->service_id;
1072 req->pkey = be16_to_cpu(req_param->primary_path->pkey);
1073 break;
1074 case IB_CM_SIDR_REQ_RECEIVED:
1075 req->device = sidr_param->listen_id->device;
1076 req->port = sidr_param->port;
1077 req->has_gid = false;
1078 req->service_id = sidr_param->service_id;
1079 req->pkey = sidr_param->pkey;
1080 break;
1081 default:
1082 return -EINVAL;
1083 }
1084
1085 return 0;
1086 }
1087
1088 static bool validate_ipv4_net_dev(struct net_device *net_dev,
1089 const struct sockaddr_in *dst_addr,
1090 const struct sockaddr_in *src_addr)
1091 {
1092 __be32 daddr = dst_addr->sin_addr.s_addr,
1093 saddr = src_addr->sin_addr.s_addr;
1094 struct fib_result res;
1095 struct flowi4 fl4;
1096 int err;
1097 bool ret;
1098
1099 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1100 ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) ||
1101 ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) ||
1102 ipv4_is_loopback(saddr))
1103 return false;
1104
1105 memset(&fl4, 0, sizeof(fl4));
1106 fl4.flowi4_iif = net_dev->ifindex;
1107 fl4.daddr = daddr;
1108 fl4.saddr = saddr;
1109
1110 rcu_read_lock();
1111 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
1112 if (err)
1113 return false;
1114
1115 ret = FIB_RES_DEV(res) == net_dev;
1116 rcu_read_unlock();
1117
1118 return ret;
1119 }
1120
1121 static bool validate_ipv6_net_dev(struct net_device *net_dev,
1122 const struct sockaddr_in6 *dst_addr,
1123 const struct sockaddr_in6 *src_addr)
1124 {
1125 #if IS_ENABLED(CONFIG_IPV6)
1126 const int strict = ipv6_addr_type(&dst_addr->sin6_addr) &
1127 IPV6_ADDR_LINKLOCAL;
1128 struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr,
1129 &src_addr->sin6_addr, net_dev->ifindex,
1130 strict);
1131 bool ret;
1132
1133 if (!rt)
1134 return false;
1135
1136 ret = rt->rt6i_idev->dev == net_dev;
1137 ip6_rt_put(rt);
1138
1139 return ret;
1140 #else
1141 return false;
1142 #endif
1143 }
1144
1145 static bool validate_net_dev(struct net_device *net_dev,
1146 const struct sockaddr *daddr,
1147 const struct sockaddr *saddr)
1148 {
1149 const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr;
1150 const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr;
1151 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
1152 const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr;
1153
1154 switch (daddr->sa_family) {
1155 case AF_INET:
1156 return saddr->sa_family == AF_INET &&
1157 validate_ipv4_net_dev(net_dev, daddr4, saddr4);
1158
1159 case AF_INET6:
1160 return saddr->sa_family == AF_INET6 &&
1161 validate_ipv6_net_dev(net_dev, daddr6, saddr6);
1162
1163 default:
1164 return false;
1165 }
1166 }
1167
1168 static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
1169 const struct cma_req_info *req)
1170 {
1171 struct sockaddr_storage listen_addr_storage, src_addr_storage;
1172 struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage,
1173 *src_addr = (struct sockaddr *)&src_addr_storage;
1174 struct net_device *net_dev;
1175 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
1176 int err;
1177
1178 err = cma_save_ip_info(listen_addr, src_addr, ib_event,
1179 req->service_id);
1180 if (err)
1181 return ERR_PTR(err);
1182
1183 net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey,
1184 gid, listen_addr);
1185 if (!net_dev)
1186 return ERR_PTR(-ENODEV);
1187
1188 if (!validate_net_dev(net_dev, listen_addr, src_addr)) {
1189 dev_put(net_dev);
1190 return ERR_PTR(-EHOSTUNREACH);
1191 }
1192
1193 return net_dev;
1194 }
1195
1196 static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id)
1197 {
1198 return (be64_to_cpu(service_id) >> 16) & 0xffff;
1199 }
1200
1201 static bool cma_match_private_data(struct rdma_id_private *id_priv,
1202 const struct cma_hdr *hdr)
1203 {
1204 struct sockaddr *addr = cma_src_addr(id_priv);
1205 __be32 ip4_addr;
1206 struct in6_addr ip6_addr;
1207
1208 if (cma_any_addr(addr) && !id_priv->afonly)
1209 return true;
1210
1211 switch (addr->sa_family) {
1212 case AF_INET:
1213 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
1214 if (cma_get_ip_ver(hdr) != 4)
1215 return false;
1216 if (!cma_any_addr(addr) &&
1217 hdr->dst_addr.ip4.addr != ip4_addr)
1218 return false;
1219 break;
1220 case AF_INET6:
1221 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr;
1222 if (cma_get_ip_ver(hdr) != 6)
1223 return false;
1224 if (!cma_any_addr(addr) &&
1225 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr)))
1226 return false;
1227 break;
1228 case AF_IB:
1229 return true;
1230 default:
1231 return false;
1232 }
1233
1234 return true;
1235 }
1236
1237 static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num)
1238 {
1239 enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num);
1240 enum rdma_transport_type transport =
1241 rdma_node_get_transport(device->node_type);
1242
1243 return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB;
1244 }
1245
1246 static bool cma_protocol_roce(const struct rdma_cm_id *id)
1247 {
1248 struct ib_device *device = id->device;
1249 const int port_num = id->port_num ?: rdma_start_port(device);
1250
1251 return cma_protocol_roce_dev_port(device, port_num);
1252 }
1253
1254 static bool cma_match_net_dev(const struct rdma_id_private *id_priv,
1255 const struct net_device *net_dev)
1256 {
1257 const struct rdma_addr *addr = &id_priv->id.route.addr;
1258
1259 if (!net_dev)
1260 /* This request is an AF_IB request or a RoCE request */
1261 return addr->src_addr.ss_family == AF_IB ||
1262 cma_protocol_roce(&id_priv->id);
1263
1264 return !addr->dev_addr.bound_dev_if ||
1265 (net_eq(dev_net(net_dev), &init_net) &&
1266 addr->dev_addr.bound_dev_if == net_dev->ifindex);
1267 }
1268
1269 static struct rdma_id_private *cma_find_listener(
1270 const struct rdma_bind_list *bind_list,
1271 const struct ib_cm_id *cm_id,
1272 const struct ib_cm_event *ib_event,
1273 const struct cma_req_info *req,
1274 const struct net_device *net_dev)
1275 {
1276 struct rdma_id_private *id_priv, *id_priv_dev;
1277
1278 if (!bind_list)
1279 return ERR_PTR(-EINVAL);
1280
1281 hlist_for_each_entry(id_priv, &bind_list->owners, node) {
1282 if (cma_match_private_data(id_priv, ib_event->private_data)) {
1283 if (id_priv->id.device == cm_id->device &&
1284 cma_match_net_dev(id_priv, net_dev))
1285 return id_priv;
1286 list_for_each_entry(id_priv_dev,
1287 &id_priv->listen_list,
1288 listen_list) {
1289 if (id_priv_dev->id.device == cm_id->device &&
1290 cma_match_net_dev(id_priv_dev, net_dev))
1291 return id_priv_dev;
1292 }
1293 }
1294 }
1295
1296 return ERR_PTR(-EINVAL);
1297 }
1298
1299 static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
1300 struct ib_cm_event *ib_event,
1301 struct net_device **net_dev)
1302 {
1303 struct cma_req_info req;
1304 struct rdma_bind_list *bind_list;
1305 struct rdma_id_private *id_priv;
1306 int err;
1307
1308 err = cma_save_req_info(ib_event, &req);
1309 if (err)
1310 return ERR_PTR(err);
1311
1312 *net_dev = cma_get_net_dev(ib_event, &req);
1313 if (IS_ERR(*net_dev)) {
1314 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
1315 /* Assuming the protocol is AF_IB */
1316 *net_dev = NULL;
1317 } else if (cma_protocol_roce_dev_port(req.device, req.port)) {
1318 /* TODO find the net dev matching the request parameters
1319 * through the RoCE GID table */
1320 *net_dev = NULL;
1321 } else {
1322 return ERR_CAST(*net_dev);
1323 }
1324 }
1325
1326 bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id),
1327 cma_port_from_service_id(req.service_id));
1328 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
1329 if (IS_ERR(id_priv) && *net_dev) {
1330 dev_put(*net_dev);
1331 *net_dev = NULL;
1332 }
1333
1334 return id_priv;
1335 }
1336
1337 static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
1338 {
1339 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
1340 }
1341
1342 static void cma_cancel_route(struct rdma_id_private *id_priv)
1343 {
1344 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
1345 if (id_priv->query)
1346 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
1347 }
1348 }
1349
1350 static void cma_cancel_listens(struct rdma_id_private *id_priv)
1351 {
1352 struct rdma_id_private *dev_id_priv;
1353
1354 /*
1355 * Remove from listen_any_list to prevent added devices from spawning
1356 * additional listen requests.
1357 */
1358 mutex_lock(&lock);
1359 list_del(&id_priv->list);
1360
1361 while (!list_empty(&id_priv->listen_list)) {
1362 dev_id_priv = list_entry(id_priv->listen_list.next,
1363 struct rdma_id_private, listen_list);
1364 /* sync with device removal to avoid duplicate destruction */
1365 list_del_init(&dev_id_priv->list);
1366 list_del(&dev_id_priv->listen_list);
1367 mutex_unlock(&lock);
1368
1369 rdma_destroy_id(&dev_id_priv->id);
1370 mutex_lock(&lock);
1371 }
1372 mutex_unlock(&lock);
1373 }
1374
1375 static void cma_cancel_operation(struct rdma_id_private *id_priv,
1376 enum rdma_cm_state state)
1377 {
1378 switch (state) {
1379 case RDMA_CM_ADDR_QUERY:
1380 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
1381 break;
1382 case RDMA_CM_ROUTE_QUERY:
1383 cma_cancel_route(id_priv);
1384 break;
1385 case RDMA_CM_LISTEN:
1386 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
1387 cma_cancel_listens(id_priv);
1388 break;
1389 default:
1390 break;
1391 }
1392 }
1393
1394 static void cma_release_port(struct rdma_id_private *id_priv)
1395 {
1396 struct rdma_bind_list *bind_list = id_priv->bind_list;
1397
1398 if (!bind_list)
1399 return;
1400
1401 mutex_lock(&lock);
1402 hlist_del(&id_priv->node);
1403 if (hlist_empty(&bind_list->owners)) {
1404 cma_ps_remove(bind_list->ps, bind_list->port);
1405 kfree(bind_list);
1406 }
1407 mutex_unlock(&lock);
1408 }
1409
1410 static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
1411 {
1412 struct cma_multicast *mc;
1413
1414 while (!list_empty(&id_priv->mc_list)) {
1415 mc = container_of(id_priv->mc_list.next,
1416 struct cma_multicast, list);
1417 list_del(&mc->list);
1418 if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
1419 id_priv->id.port_num)) {
1420 ib_sa_free_multicast(mc->multicast.ib);
1421 kfree(mc);
1422 } else
1423 kref_put(&mc->mcref, release_mc);
1424 }
1425 }
1426
1427 void rdma_destroy_id(struct rdma_cm_id *id)
1428 {
1429 struct rdma_id_private *id_priv;
1430 enum rdma_cm_state state;
1431
1432 id_priv = container_of(id, struct rdma_id_private, id);
1433 state = cma_exch(id_priv, RDMA_CM_DESTROYING);
1434 cma_cancel_operation(id_priv, state);
1435
1436 /*
1437 * Wait for any active callback to finish. New callbacks will find
1438 * the id_priv state set to destroying and abort.
1439 */
1440 mutex_lock(&id_priv->handler_mutex);
1441 mutex_unlock(&id_priv->handler_mutex);
1442
1443 if (id_priv->cma_dev) {
1444 if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
1445 if (id_priv->cm_id.ib)
1446 ib_destroy_cm_id(id_priv->cm_id.ib);
1447 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
1448 if (id_priv->cm_id.iw)
1449 iw_destroy_cm_id(id_priv->cm_id.iw);
1450 }
1451 cma_leave_mc_groups(id_priv);
1452 cma_release_dev(id_priv);
1453 }
1454
1455 cma_release_port(id_priv);
1456 cma_deref_id(id_priv);
1457 wait_for_completion(&id_priv->comp);
1458
1459 if (id_priv->internal_id)
1460 cma_deref_id(id_priv->id.context);
1461
1462 kfree(id_priv->id.route.path_rec);
1463 kfree(id_priv);
1464 }
1465 EXPORT_SYMBOL(rdma_destroy_id);
1466
1467 static int cma_rep_recv(struct rdma_id_private *id_priv)
1468 {
1469 int ret;
1470
1471 ret = cma_modify_qp_rtr(id_priv, NULL);
1472 if (ret)
1473 goto reject;
1474
1475 ret = cma_modify_qp_rts(id_priv, NULL);
1476 if (ret)
1477 goto reject;
1478
1479 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
1480 if (ret)
1481 goto reject;
1482
1483 return 0;
1484 reject:
1485 cma_modify_qp_err(id_priv);
1486 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
1487 NULL, 0, NULL, 0);
1488 return ret;
1489 }
1490
1491 static void cma_set_rep_event_data(struct rdma_cm_event *event,
1492 struct ib_cm_rep_event_param *rep_data,
1493 void *private_data)
1494 {
1495 event->param.conn.private_data = private_data;
1496 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
1497 event->param.conn.responder_resources = rep_data->responder_resources;
1498 event->param.conn.initiator_depth = rep_data->initiator_depth;
1499 event->param.conn.flow_control = rep_data->flow_control;
1500 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
1501 event->param.conn.srq = rep_data->srq;
1502 event->param.conn.qp_num = rep_data->remote_qpn;
1503 }
1504
1505 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1506 {
1507 struct rdma_id_private *id_priv = cm_id->context;
1508 struct rdma_cm_event event;
1509 int ret = 0;
1510
1511 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
1512 cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
1513 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
1514 cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
1515 return 0;
1516
1517 memset(&event, 0, sizeof event);
1518 switch (ib_event->event) {
1519 case IB_CM_REQ_ERROR:
1520 case IB_CM_REP_ERROR:
1521 event.event = RDMA_CM_EVENT_UNREACHABLE;
1522 event.status = -ETIMEDOUT;
1523 break;
1524 case IB_CM_REP_RECEIVED:
1525 if (id_priv->id.qp) {
1526 event.status = cma_rep_recv(id_priv);
1527 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
1528 RDMA_CM_EVENT_ESTABLISHED;
1529 } else {
1530 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
1531 }
1532 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
1533 ib_event->private_data);
1534 break;
1535 case IB_CM_RTU_RECEIVED:
1536 case IB_CM_USER_ESTABLISHED:
1537 event.event = RDMA_CM_EVENT_ESTABLISHED;
1538 break;
1539 case IB_CM_DREQ_ERROR:
1540 event.status = -ETIMEDOUT; /* fall through */
1541 case IB_CM_DREQ_RECEIVED:
1542 case IB_CM_DREP_RECEIVED:
1543 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
1544 RDMA_CM_DISCONNECT))
1545 goto out;
1546 event.event = RDMA_CM_EVENT_DISCONNECTED;
1547 break;
1548 case IB_CM_TIMEWAIT_EXIT:
1549 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
1550 break;
1551 case IB_CM_MRA_RECEIVED:
1552 /* ignore event */
1553 goto out;
1554 case IB_CM_REJ_RECEIVED:
1555 cma_modify_qp_err(id_priv);
1556 event.status = ib_event->param.rej_rcvd.reason;
1557 event.event = RDMA_CM_EVENT_REJECTED;
1558 event.param.conn.private_data = ib_event->private_data;
1559 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
1560 break;
1561 default:
1562 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
1563 ib_event->event);
1564 goto out;
1565 }
1566
1567 ret = id_priv->id.event_handler(&id_priv->id, &event);
1568 if (ret) {
1569 /* Destroy the CM ID by returning a non-zero value. */
1570 id_priv->cm_id.ib = NULL;
1571 cma_exch(id_priv, RDMA_CM_DESTROYING);
1572 mutex_unlock(&id_priv->handler_mutex);
1573 rdma_destroy_id(&id_priv->id);
1574 return ret;
1575 }
1576 out:
1577 mutex_unlock(&id_priv->handler_mutex);
1578 return ret;
1579 }
1580
1581 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1582 struct ib_cm_event *ib_event,
1583 struct net_device *net_dev)
1584 {
1585 struct rdma_id_private *id_priv;
1586 struct rdma_cm_id *id;
1587 struct rdma_route *rt;
1588 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
1589 const __be64 service_id =
1590 ib_event->param.req_rcvd.primary_path->service_id;
1591 int ret;
1592
1593 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1594 listen_id->ps, ib_event->param.req_rcvd.qp_type);
1595 if (IS_ERR(id))
1596 return NULL;
1597
1598 id_priv = container_of(id, struct rdma_id_private, id);
1599 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
1600 (struct sockaddr *)&id->route.addr.dst_addr,
1601 listen_id, ib_event, ss_family, service_id))
1602 goto err;
1603
1604 rt = &id->route;
1605 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
1606 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
1607 GFP_KERNEL);
1608 if (!rt->path_rec)
1609 goto err;
1610
1611 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
1612 if (rt->num_paths == 2)
1613 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1614
1615 if (net_dev) {
1616 ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
1617 if (ret)
1618 goto err;
1619 } else {
1620 if (!cma_protocol_roce(listen_id) &&
1621 cma_any_addr(cma_src_addr(id_priv))) {
1622 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
1623 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
1624 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
1625 } else if (!cma_any_addr(cma_src_addr(id_priv))) {
1626 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
1627 if (ret)
1628 goto err;
1629 }
1630 }
1631 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1632
1633 id_priv->state = RDMA_CM_CONNECT;
1634 return id_priv;
1635
1636 err:
1637 rdma_destroy_id(id);
1638 return NULL;
1639 }
1640
1641 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1642 struct ib_cm_event *ib_event,
1643 struct net_device *net_dev)
1644 {
1645 struct rdma_id_private *id_priv;
1646 struct rdma_cm_id *id;
1647 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
1648 int ret;
1649
1650 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1651 listen_id->ps, IB_QPT_UD);
1652 if (IS_ERR(id))
1653 return NULL;
1654
1655 id_priv = container_of(id, struct rdma_id_private, id);
1656 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
1657 (struct sockaddr *)&id->route.addr.dst_addr,
1658 listen_id, ib_event, ss_family,
1659 ib_event->param.sidr_req_rcvd.service_id))
1660 goto err;
1661
1662 if (net_dev) {
1663 ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
1664 if (ret)
1665 goto err;
1666 } else {
1667 if (!cma_any_addr(cma_src_addr(id_priv))) {
1668 ret = cma_translate_addr(cma_src_addr(id_priv),
1669 &id->route.addr.dev_addr);
1670 if (ret)
1671 goto err;
1672 }
1673 }
1674
1675 id_priv->state = RDMA_CM_CONNECT;
1676 return id_priv;
1677 err:
1678 rdma_destroy_id(id);
1679 return NULL;
1680 }
1681
1682 static void cma_set_req_event_data(struct rdma_cm_event *event,
1683 struct ib_cm_req_event_param *req_data,
1684 void *private_data, int offset)
1685 {
1686 event->param.conn.private_data = private_data + offset;
1687 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
1688 event->param.conn.responder_resources = req_data->responder_resources;
1689 event->param.conn.initiator_depth = req_data->initiator_depth;
1690 event->param.conn.flow_control = req_data->flow_control;
1691 event->param.conn.retry_count = req_data->retry_count;
1692 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
1693 event->param.conn.srq = req_data->srq;
1694 event->param.conn.qp_num = req_data->remote_qpn;
1695 }
1696
1697 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
1698 {
1699 return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
1700 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
1701 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
1702 (id->qp_type == IB_QPT_UD)) ||
1703 (!id->qp_type));
1704 }
1705
1706 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1707 {
1708 struct rdma_id_private *listen_id, *conn_id;
1709 struct rdma_cm_event event;
1710 struct net_device *net_dev;
1711 int offset, ret;
1712
1713 listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
1714 if (IS_ERR(listen_id))
1715 return PTR_ERR(listen_id);
1716
1717 if (!cma_check_req_qp_type(&listen_id->id, ib_event)) {
1718 ret = -EINVAL;
1719 goto net_dev_put;
1720 }
1721
1722 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) {
1723 ret = -ECONNABORTED;
1724 goto net_dev_put;
1725 }
1726
1727 memset(&event, 0, sizeof event);
1728 offset = cma_user_data_offset(listen_id);
1729 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1730 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
1731 conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev);
1732 event.param.ud.private_data = ib_event->private_data + offset;
1733 event.param.ud.private_data_len =
1734 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1735 } else {
1736 conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev);
1737 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1738 ib_event->private_data, offset);
1739 }
1740 if (!conn_id) {
1741 ret = -ENOMEM;
1742 goto err1;
1743 }
1744
1745 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1746 ret = cma_acquire_dev(conn_id, listen_id);
1747 if (ret)
1748 goto err2;
1749
1750 conn_id->cm_id.ib = cm_id;
1751 cm_id->context = conn_id;
1752 cm_id->cm_handler = cma_ib_handler;
1753
1754 /*
1755 * Protect against the user destroying conn_id from another thread
1756 * until we're done accessing it.
1757 */
1758 atomic_inc(&conn_id->refcount);
1759 ret = conn_id->id.event_handler(&conn_id->id, &event);
1760 if (ret)
1761 goto err3;
1762 /*
1763 * Acquire mutex to prevent user executing rdma_destroy_id()
1764 * while we're accessing the cm_id.
1765 */
1766 mutex_lock(&lock);
1767 if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
1768 (conn_id->id.qp_type != IB_QPT_UD))
1769 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1770 mutex_unlock(&lock);
1771 mutex_unlock(&conn_id->handler_mutex);
1772 mutex_unlock(&listen_id->handler_mutex);
1773 cma_deref_id(conn_id);
1774 if (net_dev)
1775 dev_put(net_dev);
1776 return 0;
1777
1778 err3:
1779 cma_deref_id(conn_id);
1780 /* Destroy the CM ID by returning a non-zero value. */
1781 conn_id->cm_id.ib = NULL;
1782 err2:
1783 cma_exch(conn_id, RDMA_CM_DESTROYING);
1784 mutex_unlock(&conn_id->handler_mutex);
1785 err1:
1786 mutex_unlock(&listen_id->handler_mutex);
1787 if (conn_id)
1788 rdma_destroy_id(&conn_id->id);
1789
1790 net_dev_put:
1791 if (net_dev)
1792 dev_put(net_dev);
1793
1794 return ret;
1795 }
1796
1797 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
1798 {
1799 if (addr->sa_family == AF_IB)
1800 return ((struct sockaddr_ib *) addr)->sib_sid;
1801
1802 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
1803 }
1804 EXPORT_SYMBOL(rdma_get_service_id);
1805
1806 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1807 {
1808 struct rdma_id_private *id_priv = iw_id->context;
1809 struct rdma_cm_event event;
1810 int ret = 0;
1811 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
1812 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
1813
1814 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
1815 return 0;
1816
1817 memset(&event, 0, sizeof event);
1818 switch (iw_event->event) {
1819 case IW_CM_EVENT_CLOSE:
1820 event.event = RDMA_CM_EVENT_DISCONNECTED;
1821 break;
1822 case IW_CM_EVENT_CONNECT_REPLY:
1823 memcpy(cma_src_addr(id_priv), laddr,
1824 rdma_addr_size(laddr));
1825 memcpy(cma_dst_addr(id_priv), raddr,
1826 rdma_addr_size(raddr));
1827 switch (iw_event->status) {
1828 case 0:
1829 event.event = RDMA_CM_EVENT_ESTABLISHED;
1830 event.param.conn.initiator_depth = iw_event->ird;
1831 event.param.conn.responder_resources = iw_event->ord;
1832 break;
1833 case -ECONNRESET:
1834 case -ECONNREFUSED:
1835 event.event = RDMA_CM_EVENT_REJECTED;
1836 break;
1837 case -ETIMEDOUT:
1838 event.event = RDMA_CM_EVENT_UNREACHABLE;
1839 break;
1840 default:
1841 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1842 break;
1843 }
1844 break;
1845 case IW_CM_EVENT_ESTABLISHED:
1846 event.event = RDMA_CM_EVENT_ESTABLISHED;
1847 event.param.conn.initiator_depth = iw_event->ird;
1848 event.param.conn.responder_resources = iw_event->ord;
1849 break;
1850 default:
1851 BUG_ON(1);
1852 }
1853
1854 event.status = iw_event->status;
1855 event.param.conn.private_data = iw_event->private_data;
1856 event.param.conn.private_data_len = iw_event->private_data_len;
1857 ret = id_priv->id.event_handler(&id_priv->id, &event);
1858 if (ret) {
1859 /* Destroy the CM ID by returning a non-zero value. */
1860 id_priv->cm_id.iw = NULL;
1861 cma_exch(id_priv, RDMA_CM_DESTROYING);
1862 mutex_unlock(&id_priv->handler_mutex);
1863 rdma_destroy_id(&id_priv->id);
1864 return ret;
1865 }
1866
1867 mutex_unlock(&id_priv->handler_mutex);
1868 return ret;
1869 }
1870
1871 static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1872 struct iw_cm_event *iw_event)
1873 {
1874 struct rdma_cm_id *new_cm_id;
1875 struct rdma_id_private *listen_id, *conn_id;
1876 struct rdma_cm_event event;
1877 int ret;
1878 struct ib_device_attr attr;
1879 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
1880 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
1881
1882 listen_id = cm_id->context;
1883 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
1884 return -ECONNABORTED;
1885
1886 /* Create a new RDMA id for the new IW CM ID */
1887 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1888 listen_id->id.context,
1889 RDMA_PS_TCP, IB_QPT_RC);
1890 if (IS_ERR(new_cm_id)) {
1891 ret = -ENOMEM;
1892 goto out;
1893 }
1894 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1895 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1896 conn_id->state = RDMA_CM_CONNECT;
1897
1898 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
1899 if (ret) {
1900 mutex_unlock(&conn_id->handler_mutex);
1901 rdma_destroy_id(new_cm_id);
1902 goto out;
1903 }
1904
1905 ret = cma_acquire_dev(conn_id, listen_id);
1906 if (ret) {
1907 mutex_unlock(&conn_id->handler_mutex);
1908 rdma_destroy_id(new_cm_id);
1909 goto out;
1910 }
1911
1912 conn_id->cm_id.iw = cm_id;
1913 cm_id->context = conn_id;
1914 cm_id->cm_handler = cma_iw_handler;
1915
1916 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
1917 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
1918
1919 ret = ib_query_device(conn_id->id.device, &attr);
1920 if (ret) {
1921 mutex_unlock(&conn_id->handler_mutex);
1922 rdma_destroy_id(new_cm_id);
1923 goto out;
1924 }
1925
1926 memset(&event, 0, sizeof event);
1927 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1928 event.param.conn.private_data = iw_event->private_data;
1929 event.param.conn.private_data_len = iw_event->private_data_len;
1930 event.param.conn.initiator_depth = iw_event->ird;
1931 event.param.conn.responder_resources = iw_event->ord;
1932
1933 /*
1934 * Protect against the user destroying conn_id from another thread
1935 * until we're done accessing it.
1936 */
1937 atomic_inc(&conn_id->refcount);
1938 ret = conn_id->id.event_handler(&conn_id->id, &event);
1939 if (ret) {
1940 /* User wants to destroy the CM ID */
1941 conn_id->cm_id.iw = NULL;
1942 cma_exch(conn_id, RDMA_CM_DESTROYING);
1943 mutex_unlock(&conn_id->handler_mutex);
1944 cma_deref_id(conn_id);
1945 rdma_destroy_id(&conn_id->id);
1946 goto out;
1947 }
1948
1949 mutex_unlock(&conn_id->handler_mutex);
1950 cma_deref_id(conn_id);
1951
1952 out:
1953 mutex_unlock(&listen_id->handler_mutex);
1954 return ret;
1955 }
1956
1957 static int cma_ib_listen(struct rdma_id_private *id_priv)
1958 {
1959 struct sockaddr *addr;
1960 struct ib_cm_id *id;
1961 __be64 svc_id;
1962
1963 addr = cma_src_addr(id_priv);
1964 svc_id = rdma_get_service_id(&id_priv->id, addr);
1965 id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id);
1966 if (IS_ERR(id))
1967 return PTR_ERR(id);
1968 id_priv->cm_id.ib = id;
1969
1970 return 0;
1971 }
1972
1973 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1974 {
1975 int ret;
1976 struct iw_cm_id *id;
1977
1978 id = iw_create_cm_id(id_priv->id.device,
1979 iw_conn_req_handler,
1980 id_priv);
1981 if (IS_ERR(id))
1982 return PTR_ERR(id);
1983
1984 id->tos = id_priv->tos;
1985 id_priv->cm_id.iw = id;
1986
1987 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
1988 rdma_addr_size(cma_src_addr(id_priv)));
1989
1990 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1991
1992 if (ret) {
1993 iw_destroy_cm_id(id_priv->cm_id.iw);
1994 id_priv->cm_id.iw = NULL;
1995 }
1996
1997 return ret;
1998 }
1999
2000 static int cma_listen_handler(struct rdma_cm_id *id,
2001 struct rdma_cm_event *event)
2002 {
2003 struct rdma_id_private *id_priv = id->context;
2004
2005 id->context = id_priv->id.context;
2006 id->event_handler = id_priv->id.event_handler;
2007 return id_priv->id.event_handler(id, event);
2008 }
2009
2010 static void cma_listen_on_dev(struct rdma_id_private *id_priv,
2011 struct cma_device *cma_dev)
2012 {
2013 struct rdma_id_private *dev_id_priv;
2014 struct rdma_cm_id *id;
2015 int ret;
2016
2017 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
2018 return;
2019
2020 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
2021 id_priv->id.qp_type);
2022 if (IS_ERR(id))
2023 return;
2024
2025 dev_id_priv = container_of(id, struct rdma_id_private, id);
2026
2027 dev_id_priv->state = RDMA_CM_ADDR_BOUND;
2028 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
2029 rdma_addr_size(cma_src_addr(id_priv)));
2030
2031 cma_attach_to_dev(dev_id_priv, cma_dev);
2032 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
2033 atomic_inc(&id_priv->refcount);
2034 dev_id_priv->internal_id = 1;
2035 dev_id_priv->afonly = id_priv->afonly;
2036
2037 ret = rdma_listen(id, id_priv->backlog);
2038 if (ret)
2039 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
2040 "listening on device %s\n", ret, cma_dev->device->name);
2041 }
2042
2043 static void cma_listen_on_all(struct rdma_id_private *id_priv)
2044 {
2045 struct cma_device *cma_dev;
2046
2047 mutex_lock(&lock);
2048 list_add_tail(&id_priv->list, &listen_any_list);
2049 list_for_each_entry(cma_dev, &dev_list, list)
2050 cma_listen_on_dev(id_priv, cma_dev);
2051 mutex_unlock(&lock);
2052 }
2053
2054 void rdma_set_service_type(struct rdma_cm_id *id, int tos)
2055 {
2056 struct rdma_id_private *id_priv;
2057
2058 id_priv = container_of(id, struct rdma_id_private, id);
2059 id_priv->tos = (u8) tos;
2060 }
2061 EXPORT_SYMBOL(rdma_set_service_type);
2062
2063 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
2064 void *context)
2065 {
2066 struct cma_work *work = context;
2067 struct rdma_route *route;
2068
2069 route = &work->id->id.route;
2070
2071 if (!status) {
2072 route->num_paths = 1;
2073 *route->path_rec = *path_rec;
2074 } else {
2075 work->old_state = RDMA_CM_ROUTE_QUERY;
2076 work->new_state = RDMA_CM_ADDR_RESOLVED;
2077 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
2078 work->event.status = status;
2079 }
2080
2081 queue_work(cma_wq, &work->work);
2082 }
2083
2084 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
2085 struct cma_work *work)
2086 {
2087 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2088 struct ib_sa_path_rec path_rec;
2089 ib_sa_comp_mask comp_mask;
2090 struct sockaddr_in6 *sin6;
2091 struct sockaddr_ib *sib;
2092
2093 memset(&path_rec, 0, sizeof path_rec);
2094 rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
2095 rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
2096 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2097 path_rec.numb_path = 1;
2098 path_rec.reversible = 1;
2099 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
2100
2101 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
2102 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
2103 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
2104
2105 switch (cma_family(id_priv)) {
2106 case AF_INET:
2107 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
2108 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
2109 break;
2110 case AF_INET6:
2111 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
2112 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
2113 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2114 break;
2115 case AF_IB:
2116 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
2117 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
2118 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2119 break;
2120 }
2121
2122 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
2123 id_priv->id.port_num, &path_rec,
2124 comp_mask, timeout_ms,
2125 GFP_KERNEL, cma_query_handler,
2126 work, &id_priv->query);
2127
2128 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
2129 }
2130
2131 static void cma_work_handler(struct work_struct *_work)
2132 {
2133 struct cma_work *work = container_of(_work, struct cma_work, work);
2134 struct rdma_id_private *id_priv = work->id;
2135 int destroy = 0;
2136
2137 mutex_lock(&id_priv->handler_mutex);
2138 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
2139 goto out;
2140
2141 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
2142 cma_exch(id_priv, RDMA_CM_DESTROYING);
2143 destroy = 1;
2144 }
2145 out:
2146 mutex_unlock(&id_priv->handler_mutex);
2147 cma_deref_id(id_priv);
2148 if (destroy)
2149 rdma_destroy_id(&id_priv->id);
2150 kfree(work);
2151 }
2152
2153 static void cma_ndev_work_handler(struct work_struct *_work)
2154 {
2155 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
2156 struct rdma_id_private *id_priv = work->id;
2157 int destroy = 0;
2158
2159 mutex_lock(&id_priv->handler_mutex);
2160 if (id_priv->state == RDMA_CM_DESTROYING ||
2161 id_priv->state == RDMA_CM_DEVICE_REMOVAL)
2162 goto out;
2163
2164 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
2165 cma_exch(id_priv, RDMA_CM_DESTROYING);
2166 destroy = 1;
2167 }
2168
2169 out:
2170 mutex_unlock(&id_priv->handler_mutex);
2171 cma_deref_id(id_priv);
2172 if (destroy)
2173 rdma_destroy_id(&id_priv->id);
2174 kfree(work);
2175 }
2176
2177 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
2178 {
2179 struct rdma_route *route = &id_priv->id.route;
2180 struct cma_work *work;
2181 int ret;
2182
2183 work = kzalloc(sizeof *work, GFP_KERNEL);
2184 if (!work)
2185 return -ENOMEM;
2186
2187 work->id = id_priv;
2188 INIT_WORK(&work->work, cma_work_handler);
2189 work->old_state = RDMA_CM_ROUTE_QUERY;
2190 work->new_state = RDMA_CM_ROUTE_RESOLVED;
2191 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
2192
2193 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
2194 if (!route->path_rec) {
2195 ret = -ENOMEM;
2196 goto err1;
2197 }
2198
2199 ret = cma_query_ib_route(id_priv, timeout_ms, work);
2200 if (ret)
2201 goto err2;
2202
2203 return 0;
2204 err2:
2205 kfree(route->path_rec);
2206 route->path_rec = NULL;
2207 err1:
2208 kfree(work);
2209 return ret;
2210 }
2211
2212 int rdma_set_ib_paths(struct rdma_cm_id *id,
2213 struct ib_sa_path_rec *path_rec, int num_paths)
2214 {
2215 struct rdma_id_private *id_priv;
2216 int ret;
2217
2218 id_priv = container_of(id, struct rdma_id_private, id);
2219 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
2220 RDMA_CM_ROUTE_RESOLVED))
2221 return -EINVAL;
2222
2223 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
2224 GFP_KERNEL);
2225 if (!id->route.path_rec) {
2226 ret = -ENOMEM;
2227 goto err;
2228 }
2229
2230 id->route.num_paths = num_paths;
2231 return 0;
2232 err:
2233 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
2234 return ret;
2235 }
2236 EXPORT_SYMBOL(rdma_set_ib_paths);
2237
2238 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
2239 {
2240 struct cma_work *work;
2241
2242 work = kzalloc(sizeof *work, GFP_KERNEL);
2243 if (!work)
2244 return -ENOMEM;
2245
2246 work->id = id_priv;
2247 INIT_WORK(&work->work, cma_work_handler);
2248 work->old_state = RDMA_CM_ROUTE_QUERY;
2249 work->new_state = RDMA_CM_ROUTE_RESOLVED;
2250 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
2251 queue_work(cma_wq, &work->work);
2252 return 0;
2253 }
2254
2255 static int iboe_tos_to_sl(struct net_device *ndev, int tos)
2256 {
2257 int prio;
2258 struct net_device *dev;
2259
2260 prio = rt_tos2priority(tos);
2261 dev = ndev->priv_flags & IFF_802_1Q_VLAN ?
2262 vlan_dev_real_dev(ndev) : ndev;
2263
2264 if (dev->num_tc)
2265 return netdev_get_prio_tc_map(dev, prio);
2266
2267 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2268 if (ndev->priv_flags & IFF_802_1Q_VLAN)
2269 return (vlan_dev_get_egress_qos_mask(ndev, prio) &
2270 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
2271 #endif
2272 return 0;
2273 }
2274
2275 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
2276 {
2277 struct rdma_route *route = &id_priv->id.route;
2278 struct rdma_addr *addr = &route->addr;
2279 struct cma_work *work;
2280 int ret;
2281 struct net_device *ndev = NULL;
2282
2283
2284 work = kzalloc(sizeof *work, GFP_KERNEL);
2285 if (!work)
2286 return -ENOMEM;
2287
2288 work->id = id_priv;
2289 INIT_WORK(&work->work, cma_work_handler);
2290
2291 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
2292 if (!route->path_rec) {
2293 ret = -ENOMEM;
2294 goto err1;
2295 }
2296
2297 route->num_paths = 1;
2298
2299 if (addr->dev_addr.bound_dev_if)
2300 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
2301 if (!ndev) {
2302 ret = -ENODEV;
2303 goto err2;
2304 }
2305
2306 route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev);
2307 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN);
2308 memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len);
2309
2310 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
2311 &route->path_rec->sgid);
2312 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
2313 &route->path_rec->dgid);
2314
2315 route->path_rec->hop_limit = 1;
2316 route->path_rec->reversible = 1;
2317 route->path_rec->pkey = cpu_to_be16(0xffff);
2318 route->path_rec->mtu_selector = IB_SA_EQ;
2319 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos);
2320 route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
2321 route->path_rec->rate_selector = IB_SA_EQ;
2322 route->path_rec->rate = iboe_get_rate(ndev);
2323 dev_put(ndev);
2324 route->path_rec->packet_life_time_selector = IB_SA_EQ;
2325 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
2326 if (!route->path_rec->mtu) {
2327 ret = -EINVAL;
2328 goto err2;
2329 }
2330
2331 work->old_state = RDMA_CM_ROUTE_QUERY;
2332 work->new_state = RDMA_CM_ROUTE_RESOLVED;
2333 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
2334 work->event.status = 0;
2335
2336 queue_work(cma_wq, &work->work);
2337
2338 return 0;
2339
2340 err2:
2341 kfree(route->path_rec);
2342 route->path_rec = NULL;
2343 err1:
2344 kfree(work);
2345 return ret;
2346 }
2347
2348 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
2349 {
2350 struct rdma_id_private *id_priv;
2351 int ret;
2352
2353 id_priv = container_of(id, struct rdma_id_private, id);
2354 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
2355 return -EINVAL;
2356
2357 atomic_inc(&id_priv->refcount);
2358 if (rdma_cap_ib_sa(id->device, id->port_num))
2359 ret = cma_resolve_ib_route(id_priv, timeout_ms);
2360 else if (rdma_protocol_roce(id->device, id->port_num))
2361 ret = cma_resolve_iboe_route(id_priv);
2362 else if (rdma_protocol_iwarp(id->device, id->port_num))
2363 ret = cma_resolve_iw_route(id_priv, timeout_ms);
2364 else
2365 ret = -ENOSYS;
2366
2367 if (ret)
2368 goto err;
2369
2370 return 0;
2371 err:
2372 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
2373 cma_deref_id(id_priv);
2374 return ret;
2375 }
2376 EXPORT_SYMBOL(rdma_resolve_route);
2377
2378 static void cma_set_loopback(struct sockaddr *addr)
2379 {
2380 switch (addr->sa_family) {
2381 case AF_INET:
2382 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
2383 break;
2384 case AF_INET6:
2385 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
2386 0, 0, 0, htonl(1));
2387 break;
2388 default:
2389 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
2390 0, 0, 0, htonl(1));
2391 break;
2392 }
2393 }
2394
2395 static int cma_bind_loopback(struct rdma_id_private *id_priv)
2396 {
2397 struct cma_device *cma_dev, *cur_dev;
2398 struct ib_port_attr port_attr;
2399 union ib_gid gid;
2400 u16 pkey;
2401 int ret;
2402 u8 p;
2403
2404 cma_dev = NULL;
2405 mutex_lock(&lock);
2406 list_for_each_entry(cur_dev, &dev_list, list) {
2407 if (cma_family(id_priv) == AF_IB &&
2408 !rdma_cap_ib_cm(cur_dev->device, 1))
2409 continue;
2410
2411 if (!cma_dev)
2412 cma_dev = cur_dev;
2413
2414 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
2415 if (!ib_query_port(cur_dev->device, p, &port_attr) &&
2416 port_attr.state == IB_PORT_ACTIVE) {
2417 cma_dev = cur_dev;
2418 goto port_found;
2419 }
2420 }
2421 }
2422
2423 if (!cma_dev) {
2424 ret = -ENODEV;
2425 goto out;
2426 }
2427
2428 p = 1;
2429
2430 port_found:
2431 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid, NULL);
2432 if (ret)
2433 goto out;
2434
2435 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
2436 if (ret)
2437 goto out;
2438
2439 id_priv->id.route.addr.dev_addr.dev_type =
2440 (rdma_protocol_ib(cma_dev->device, p)) ?
2441 ARPHRD_INFINIBAND : ARPHRD_ETHER;
2442
2443 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
2444 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
2445 id_priv->id.port_num = p;
2446 cma_attach_to_dev(id_priv, cma_dev);
2447 cma_set_loopback(cma_src_addr(id_priv));
2448 out:
2449 mutex_unlock(&lock);
2450 return ret;
2451 }
2452
2453 static void addr_handler(int status, struct sockaddr *src_addr,
2454 struct rdma_dev_addr *dev_addr, void *context)
2455 {
2456 struct rdma_id_private *id_priv = context;
2457 struct rdma_cm_event event;
2458
2459 memset(&event, 0, sizeof event);
2460 mutex_lock(&id_priv->handler_mutex);
2461 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
2462 RDMA_CM_ADDR_RESOLVED))
2463 goto out;
2464
2465 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
2466 if (!status && !id_priv->cma_dev)
2467 status = cma_acquire_dev(id_priv, NULL);
2468
2469 if (status) {
2470 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
2471 RDMA_CM_ADDR_BOUND))
2472 goto out;
2473 event.event = RDMA_CM_EVENT_ADDR_ERROR;
2474 event.status = status;
2475 } else
2476 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2477
2478 if (id_priv->id.event_handler(&id_priv->id, &event)) {
2479 cma_exch(id_priv, RDMA_CM_DESTROYING);
2480 mutex_unlock(&id_priv->handler_mutex);
2481 cma_deref_id(id_priv);
2482 rdma_destroy_id(&id_priv->id);
2483 return;
2484 }
2485 out:
2486 mutex_unlock(&id_priv->handler_mutex);
2487 cma_deref_id(id_priv);
2488 }
2489
2490 static int cma_resolve_loopback(struct rdma_id_private *id_priv)
2491 {
2492 struct cma_work *work;
2493 union ib_gid gid;
2494 int ret;
2495
2496 work = kzalloc(sizeof *work, GFP_KERNEL);
2497 if (!work)
2498 return -ENOMEM;
2499
2500 if (!id_priv->cma_dev) {
2501 ret = cma_bind_loopback(id_priv);
2502 if (ret)
2503 goto err;
2504 }
2505
2506 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
2507 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
2508
2509 work->id = id_priv;
2510 INIT_WORK(&work->work, cma_work_handler);
2511 work->old_state = RDMA_CM_ADDR_QUERY;
2512 work->new_state = RDMA_CM_ADDR_RESOLVED;
2513 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2514 queue_work(cma_wq, &work->work);
2515 return 0;
2516 err:
2517 kfree(work);
2518 return ret;
2519 }
2520
2521 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
2522 {
2523 struct cma_work *work;
2524 int ret;
2525
2526 work = kzalloc(sizeof *work, GFP_KERNEL);
2527 if (!work)
2528 return -ENOMEM;
2529
2530 if (!id_priv->cma_dev) {
2531 ret = cma_resolve_ib_dev(id_priv);
2532 if (ret)
2533 goto err;
2534 }
2535
2536 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
2537 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
2538
2539 work->id = id_priv;
2540 INIT_WORK(&work->work, cma_work_handler);
2541 work->old_state = RDMA_CM_ADDR_QUERY;
2542 work->new_state = RDMA_CM_ADDR_RESOLVED;
2543 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2544 queue_work(cma_wq, &work->work);
2545 return 0;
2546 err:
2547 kfree(work);
2548 return ret;
2549 }
2550
2551 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2552 struct sockaddr *dst_addr)
2553 {
2554 if (!src_addr || !src_addr->sa_family) {
2555 src_addr = (struct sockaddr *) &id->route.addr.src_addr;
2556 src_addr->sa_family = dst_addr->sa_family;
2557 if (dst_addr->sa_family == AF_INET6) {
2558 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
2559 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
2560 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
2561 if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
2562 id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id;
2563 } else if (dst_addr->sa_family == AF_IB) {
2564 ((struct sockaddr_ib *) src_addr)->sib_pkey =
2565 ((struct sockaddr_ib *) dst_addr)->sib_pkey;
2566 }
2567 }
2568 return rdma_bind_addr(id, src_addr);
2569 }
2570
2571 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2572 struct sockaddr *dst_addr, int timeout_ms)
2573 {
2574 struct rdma_id_private *id_priv;
2575 int ret;
2576
2577 id_priv = container_of(id, struct rdma_id_private, id);
2578 if (id_priv->state == RDMA_CM_IDLE) {
2579 ret = cma_bind_addr(id, src_addr, dst_addr);
2580 if (ret)
2581 return ret;
2582 }
2583
2584 if (cma_family(id_priv) != dst_addr->sa_family)
2585 return -EINVAL;
2586
2587 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
2588 return -EINVAL;
2589
2590 atomic_inc(&id_priv->refcount);
2591 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
2592 if (cma_any_addr(dst_addr)) {
2593 ret = cma_resolve_loopback(id_priv);
2594 } else {
2595 if (dst_addr->sa_family == AF_IB) {
2596 ret = cma_resolve_ib_addr(id_priv);
2597 } else {
2598 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv),
2599 dst_addr, &id->route.addr.dev_addr,
2600 timeout_ms, addr_handler, id_priv);
2601 }
2602 }
2603 if (ret)
2604 goto err;
2605
2606 return 0;
2607 err:
2608 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
2609 cma_deref_id(id_priv);
2610 return ret;
2611 }
2612 EXPORT_SYMBOL(rdma_resolve_addr);
2613
2614 int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
2615 {
2616 struct rdma_id_private *id_priv;
2617 unsigned long flags;
2618 int ret;
2619
2620 id_priv = container_of(id, struct rdma_id_private, id);
2621 spin_lock_irqsave(&id_priv->lock, flags);
2622 if (reuse || id_priv->state == RDMA_CM_IDLE) {
2623 id_priv->reuseaddr = reuse;
2624 ret = 0;
2625 } else {
2626 ret = -EINVAL;
2627 }
2628 spin_unlock_irqrestore(&id_priv->lock, flags);
2629 return ret;
2630 }
2631 EXPORT_SYMBOL(rdma_set_reuseaddr);
2632
2633 int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
2634 {
2635 struct rdma_id_private *id_priv;
2636 unsigned long flags;
2637 int ret;
2638
2639 id_priv = container_of(id, struct rdma_id_private, id);
2640 spin_lock_irqsave(&id_priv->lock, flags);
2641 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
2642 id_priv->options |= (1 << CMA_OPTION_AFONLY);
2643 id_priv->afonly = afonly;
2644 ret = 0;
2645 } else {
2646 ret = -EINVAL;
2647 }
2648 spin_unlock_irqrestore(&id_priv->lock, flags);
2649 return ret;
2650 }
2651 EXPORT_SYMBOL(rdma_set_afonly);
2652
2653 static void cma_bind_port(struct rdma_bind_list *bind_list,
2654 struct rdma_id_private *id_priv)
2655 {
2656 struct sockaddr *addr;
2657 struct sockaddr_ib *sib;
2658 u64 sid, mask;
2659 __be16 port;
2660
2661 addr = cma_src_addr(id_priv);
2662 port = htons(bind_list->port);
2663
2664 switch (addr->sa_family) {
2665 case AF_INET:
2666 ((struct sockaddr_in *) addr)->sin_port = port;
2667 break;
2668 case AF_INET6:
2669 ((struct sockaddr_in6 *) addr)->sin6_port = port;
2670 break;
2671 case AF_IB:
2672 sib = (struct sockaddr_ib *) addr;
2673 sid = be64_to_cpu(sib->sib_sid);
2674 mask = be64_to_cpu(sib->sib_sid_mask);
2675 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
2676 sib->sib_sid_mask = cpu_to_be64(~0ULL);
2677 break;
2678 }
2679 id_priv->bind_list = bind_list;
2680 hlist_add_head(&id_priv->node, &bind_list->owners);
2681 }
2682
2683 static int cma_alloc_port(enum rdma_port_space ps,
2684 struct rdma_id_private *id_priv, unsigned short snum)
2685 {
2686 struct rdma_bind_list *bind_list;
2687 int ret;
2688
2689 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
2690 if (!bind_list)
2691 return -ENOMEM;
2692
2693 ret = cma_ps_alloc(ps, bind_list, snum);
2694 if (ret < 0)
2695 goto err;
2696
2697 bind_list->ps = ps;
2698 bind_list->port = (unsigned short)ret;
2699 cma_bind_port(bind_list, id_priv);
2700 return 0;
2701 err:
2702 kfree(bind_list);
2703 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
2704 }
2705
2706 static int cma_alloc_any_port(enum rdma_port_space ps,
2707 struct rdma_id_private *id_priv)
2708 {
2709 static unsigned int last_used_port;
2710 int low, high, remaining;
2711 unsigned int rover;
2712
2713 inet_get_local_port_range(&init_net, &low, &high);
2714 remaining = (high - low) + 1;
2715 rover = prandom_u32() % remaining + low;
2716 retry:
2717 if (last_used_port != rover &&
2718 !cma_ps_find(ps, (unsigned short)rover)) {
2719 int ret = cma_alloc_port(ps, id_priv, rover);
2720 /*
2721 * Remember previously used port number in order to avoid
2722 * re-using same port immediately after it is closed.
2723 */
2724 if (!ret)
2725 last_used_port = rover;
2726 if (ret != -EADDRNOTAVAIL)
2727 return ret;
2728 }
2729 if (--remaining) {
2730 rover++;
2731 if ((rover < low) || (rover > high))
2732 rover = low;
2733 goto retry;
2734 }
2735 return -EADDRNOTAVAIL;
2736 }
2737
2738 /*
2739 * Check that the requested port is available. This is called when trying to
2740 * bind to a specific port, or when trying to listen on a bound port. In
2741 * the latter case, the provided id_priv may already be on the bind_list, but
2742 * we still need to check that it's okay to start listening.
2743 */
2744 static int cma_check_port(struct rdma_bind_list *bind_list,
2745 struct rdma_id_private *id_priv, uint8_t reuseaddr)
2746 {
2747 struct rdma_id_private *cur_id;
2748 struct sockaddr *addr, *cur_addr;
2749
2750 addr = cma_src_addr(id_priv);
2751 hlist_for_each_entry(cur_id, &bind_list->owners, node) {
2752 if (id_priv == cur_id)
2753 continue;
2754
2755 if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr &&
2756 cur_id->reuseaddr)
2757 continue;
2758
2759 cur_addr = cma_src_addr(cur_id);
2760 if (id_priv->afonly && cur_id->afonly &&
2761 (addr->sa_family != cur_addr->sa_family))
2762 continue;
2763
2764 if (cma_any_addr(addr) || cma_any_addr(cur_addr))
2765 return -EADDRNOTAVAIL;
2766
2767 if (!cma_addr_cmp(addr, cur_addr))
2768 return -EADDRINUSE;
2769 }
2770 return 0;
2771 }
2772
2773 static int cma_use_port(enum rdma_port_space ps,
2774 struct rdma_id_private *id_priv)
2775 {
2776 struct rdma_bind_list *bind_list;
2777 unsigned short snum;
2778 int ret;
2779
2780 snum = ntohs(cma_port(cma_src_addr(id_priv)));
2781 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
2782 return -EACCES;
2783
2784 bind_list = cma_ps_find(ps, snum);
2785 if (!bind_list) {
2786 ret = cma_alloc_port(ps, id_priv, snum);
2787 } else {
2788 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
2789 if (!ret)
2790 cma_bind_port(bind_list, id_priv);
2791 }
2792 return ret;
2793 }
2794
2795 static int cma_bind_listen(struct rdma_id_private *id_priv)
2796 {
2797 struct rdma_bind_list *bind_list = id_priv->bind_list;
2798 int ret = 0;
2799
2800 mutex_lock(&lock);
2801 if (bind_list->owners.first->next)
2802 ret = cma_check_port(bind_list, id_priv, 0);
2803 mutex_unlock(&lock);
2804 return ret;
2805 }
2806
2807 static enum rdma_port_space cma_select_inet_ps(
2808 struct rdma_id_private *id_priv)
2809 {
2810 switch (id_priv->id.ps) {
2811 case RDMA_PS_TCP:
2812 case RDMA_PS_UDP:
2813 case RDMA_PS_IPOIB:
2814 case RDMA_PS_IB:
2815 return id_priv->id.ps;
2816 default:
2817
2818 return 0;
2819 }
2820 }
2821
2822 static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv)
2823 {
2824 enum rdma_port_space ps = 0;
2825 struct sockaddr_ib *sib;
2826 u64 sid_ps, mask, sid;
2827
2828 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
2829 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
2830 sid = be64_to_cpu(sib->sib_sid) & mask;
2831
2832 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
2833 sid_ps = RDMA_IB_IP_PS_IB;
2834 ps = RDMA_PS_IB;
2835 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
2836 (sid == (RDMA_IB_IP_PS_TCP & mask))) {
2837 sid_ps = RDMA_IB_IP_PS_TCP;
2838 ps = RDMA_PS_TCP;
2839 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
2840 (sid == (RDMA_IB_IP_PS_UDP & mask))) {
2841 sid_ps = RDMA_IB_IP_PS_UDP;
2842 ps = RDMA_PS_UDP;
2843 }
2844
2845 if (ps) {
2846 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
2847 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
2848 be64_to_cpu(sib->sib_sid_mask));
2849 }
2850 return ps;
2851 }
2852
2853 static int cma_get_port(struct rdma_id_private *id_priv)
2854 {
2855 enum rdma_port_space ps;
2856 int ret;
2857
2858 if (cma_family(id_priv) != AF_IB)
2859 ps = cma_select_inet_ps(id_priv);
2860 else
2861 ps = cma_select_ib_ps(id_priv);
2862 if (!ps)
2863 return -EPROTONOSUPPORT;
2864
2865 mutex_lock(&lock);
2866 if (cma_any_port(cma_src_addr(id_priv)))
2867 ret = cma_alloc_any_port(ps, id_priv);
2868 else
2869 ret = cma_use_port(ps, id_priv);
2870 mutex_unlock(&lock);
2871
2872 return ret;
2873 }
2874
2875 static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
2876 struct sockaddr *addr)
2877 {
2878 #if IS_ENABLED(CONFIG_IPV6)
2879 struct sockaddr_in6 *sin6;
2880
2881 if (addr->sa_family != AF_INET6)
2882 return 0;
2883
2884 sin6 = (struct sockaddr_in6 *) addr;
2885
2886 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
2887 return 0;
2888
2889 if (!sin6->sin6_scope_id)
2890 return -EINVAL;
2891
2892 dev_addr->bound_dev_if = sin6->sin6_scope_id;
2893 #endif
2894 return 0;
2895 }
2896
2897 int rdma_listen(struct rdma_cm_id *id, int backlog)
2898 {
2899 struct rdma_id_private *id_priv;
2900 int ret;
2901
2902 id_priv = container_of(id, struct rdma_id_private, id);
2903 if (id_priv->state == RDMA_CM_IDLE) {
2904 id->route.addr.src_addr.ss_family = AF_INET;
2905 ret = rdma_bind_addr(id, cma_src_addr(id_priv));
2906 if (ret)
2907 return ret;
2908 }
2909
2910 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
2911 return -EINVAL;
2912
2913 if (id_priv->reuseaddr) {
2914 ret = cma_bind_listen(id_priv);
2915 if (ret)
2916 goto err;
2917 }
2918
2919 id_priv->backlog = backlog;
2920 if (id->device) {
2921 if (rdma_cap_ib_cm(id->device, 1)) {
2922 ret = cma_ib_listen(id_priv);
2923 if (ret)
2924 goto err;
2925 } else if (rdma_cap_iw_cm(id->device, 1)) {
2926 ret = cma_iw_listen(id_priv, backlog);
2927 if (ret)
2928 goto err;
2929 } else {
2930 ret = -ENOSYS;
2931 goto err;
2932 }
2933 } else
2934 cma_listen_on_all(id_priv);
2935
2936 return 0;
2937 err:
2938 id_priv->backlog = 0;
2939 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
2940 return ret;
2941 }
2942 EXPORT_SYMBOL(rdma_listen);
2943
2944 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2945 {
2946 struct rdma_id_private *id_priv;
2947 int ret;
2948
2949 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
2950 addr->sa_family != AF_IB)
2951 return -EAFNOSUPPORT;
2952
2953 id_priv = container_of(id, struct rdma_id_private, id);
2954 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
2955 return -EINVAL;
2956
2957 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
2958 if (ret)
2959 goto err1;
2960
2961 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
2962 if (!cma_any_addr(addr)) {
2963 ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
2964 if (ret)
2965 goto err1;
2966
2967 ret = cma_acquire_dev(id_priv, NULL);
2968 if (ret)
2969 goto err1;
2970 }
2971
2972 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
2973 if (addr->sa_family == AF_INET)
2974 id_priv->afonly = 1;
2975 #if IS_ENABLED(CONFIG_IPV6)
2976 else if (addr->sa_family == AF_INET6)
2977 id_priv->afonly = init_net.ipv6.sysctl.bindv6only;
2978 #endif
2979 }
2980 ret = cma_get_port(id_priv);
2981 if (ret)
2982 goto err2;
2983
2984 return 0;
2985 err2:
2986 if (id_priv->cma_dev)
2987 cma_release_dev(id_priv);
2988 err1:
2989 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
2990 return ret;
2991 }
2992 EXPORT_SYMBOL(rdma_bind_addr);
2993
2994 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
2995 {
2996 struct cma_hdr *cma_hdr;
2997
2998 cma_hdr = hdr;
2999 cma_hdr->cma_version = CMA_VERSION;
3000 if (cma_family(id_priv) == AF_INET) {
3001 struct sockaddr_in *src4, *dst4;
3002
3003 src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
3004 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
3005
3006 cma_set_ip_ver(cma_hdr, 4);
3007 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
3008 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
3009 cma_hdr->port = src4->sin_port;
3010 } else if (cma_family(id_priv) == AF_INET6) {
3011 struct sockaddr_in6 *src6, *dst6;
3012
3013 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
3014 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
3015
3016 cma_set_ip_ver(cma_hdr, 6);
3017 cma_hdr->src_addr.ip6 = src6->sin6_addr;
3018 cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
3019 cma_hdr->port = src6->sin6_port;
3020 }
3021 return 0;
3022 }
3023
3024 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
3025 struct ib_cm_event *ib_event)
3026 {
3027 struct rdma_id_private *id_priv = cm_id->context;
3028 struct rdma_cm_event event;
3029 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
3030 int ret = 0;
3031
3032 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
3033 return 0;
3034
3035 memset(&event, 0, sizeof event);
3036 switch (ib_event->event) {
3037 case IB_CM_SIDR_REQ_ERROR:
3038 event.event = RDMA_CM_EVENT_UNREACHABLE;
3039 event.status = -ETIMEDOUT;
3040 break;
3041 case IB_CM_SIDR_REP_RECEIVED:
3042 event.param.ud.private_data = ib_event->private_data;
3043 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
3044 if (rep->status != IB_SIDR_SUCCESS) {
3045 event.event = RDMA_CM_EVENT_UNREACHABLE;
3046 event.status = ib_event->param.sidr_rep_rcvd.status;
3047 break;
3048 }
3049 ret = cma_set_qkey(id_priv, rep->qkey);
3050 if (ret) {
3051 event.event = RDMA_CM_EVENT_ADDR_ERROR;
3052 event.status = ret;
3053 break;
3054 }
3055 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
3056 id_priv->id.route.path_rec,
3057 &event.param.ud.ah_attr);
3058 event.param.ud.qp_num = rep->qpn;
3059 event.param.ud.qkey = rep->qkey;
3060 event.event = RDMA_CM_EVENT_ESTABLISHED;
3061 event.status = 0;
3062 break;
3063 default:
3064 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
3065 ib_event->event);
3066 goto out;
3067 }
3068
3069 ret = id_priv->id.event_handler(&id_priv->id, &event);
3070 if (ret) {
3071 /* Destroy the CM ID by returning a non-zero value. */
3072 id_priv->cm_id.ib = NULL;
3073 cma_exch(id_priv, RDMA_CM_DESTROYING);
3074 mutex_unlock(&id_priv->handler_mutex);
3075 rdma_destroy_id(&id_priv->id);
3076 return ret;
3077 }
3078 out:
3079 mutex_unlock(&id_priv->handler_mutex);
3080 return ret;
3081 }
3082
3083 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
3084 struct rdma_conn_param *conn_param)
3085 {
3086 struct ib_cm_sidr_req_param req;
3087 struct ib_cm_id *id;
3088 void *private_data;
3089 int offset, ret;
3090
3091 memset(&req, 0, sizeof req);
3092 offset = cma_user_data_offset(id_priv);
3093 req.private_data_len = offset + conn_param->private_data_len;
3094 if (req.private_data_len < conn_param->private_data_len)
3095 return -EINVAL;
3096
3097 if (req.private_data_len) {
3098 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
3099 if (!private_data)
3100 return -ENOMEM;
3101 } else {
3102 private_data = NULL;
3103 }
3104
3105 if (conn_param->private_data && conn_param->private_data_len)
3106 memcpy(private_data + offset, conn_param->private_data,
3107 conn_param->private_data_len);
3108
3109 if (private_data) {
3110 ret = cma_format_hdr(private_data, id_priv);
3111 if (ret)
3112 goto out;
3113 req.private_data = private_data;
3114 }
3115
3116 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
3117 id_priv);
3118 if (IS_ERR(id)) {
3119 ret = PTR_ERR(id);
3120 goto out;
3121 }
3122 id_priv->cm_id.ib = id;
3123
3124 req.path = id_priv->id.route.path_rec;
3125 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
3126 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
3127 req.max_cm_retries = CMA_MAX_CM_RETRIES;
3128
3129 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
3130 if (ret) {
3131 ib_destroy_cm_id(id_priv->cm_id.ib);
3132 id_priv->cm_id.ib = NULL;
3133 }
3134 out:
3135 kfree(private_data);
3136 return ret;
3137 }
3138
3139 static int cma_connect_ib(struct rdma_id_private *id_priv,
3140 struct rdma_conn_param *conn_param)
3141 {
3142 struct ib_cm_req_param req;
3143 struct rdma_route *route;
3144 void *private_data;
3145 struct ib_cm_id *id;
3146 int offset, ret;
3147
3148 memset(&req, 0, sizeof req);
3149 offset = cma_user_data_offset(id_priv);
3150 req.private_data_len = offset + conn_param->private_data_len;
3151 if (req.private_data_len < conn_param->private_data_len)
3152 return -EINVAL;
3153
3154 if (req.private_data_len) {
3155 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
3156 if (!private_data)
3157 return -ENOMEM;
3158 } else {
3159 private_data = NULL;
3160 }
3161
3162 if (conn_param->private_data && conn_param->private_data_len)
3163 memcpy(private_data + offset, conn_param->private_data,
3164 conn_param->private_data_len);
3165
3166 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
3167 if (IS_ERR(id)) {
3168 ret = PTR_ERR(id);
3169 goto out;
3170 }
3171 id_priv->cm_id.ib = id;
3172
3173 route = &id_priv->id.route;
3174 if (private_data) {
3175 ret = cma_format_hdr(private_data, id_priv);
3176 if (ret)
3177 goto out;
3178 req.private_data = private_data;
3179 }
3180
3181 req.primary_path = &route->path_rec[0];
3182 if (route->num_paths == 2)
3183 req.alternate_path = &route->path_rec[1];
3184
3185 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
3186 req.qp_num = id_priv->qp_num;
3187 req.qp_type = id_priv->id.qp_type;
3188 req.starting_psn = id_priv->seq_num;
3189 req.responder_resources = conn_param->responder_resources;
3190 req.initiator_depth = conn_param->initiator_depth;
3191 req.flow_control = conn_param->flow_control;
3192 req.retry_count = min_t(u8, 7, conn_param->retry_count);
3193 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
3194 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
3195 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
3196 req.max_cm_retries = CMA_MAX_CM_RETRIES;
3197 req.srq = id_priv->srq ? 1 : 0;
3198
3199 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
3200 out:
3201 if (ret && !IS_ERR(id)) {
3202 ib_destroy_cm_id(id);
3203 id_priv->cm_id.ib = NULL;
3204 }
3205
3206 kfree(private_data);
3207 return ret;
3208 }
3209
3210 static int cma_connect_iw(struct rdma_id_private *id_priv,
3211 struct rdma_conn_param *conn_param)
3212 {
3213 struct iw_cm_id *cm_id;
3214 int ret;
3215 struct iw_cm_conn_param iw_param;
3216
3217 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
3218 if (IS_ERR(cm_id))
3219 return PTR_ERR(cm_id);
3220
3221 cm_id->tos = id_priv->tos;
3222 id_priv->cm_id.iw = cm_id;
3223
3224 memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
3225 rdma_addr_size(cma_src_addr(id_priv)));
3226 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
3227 rdma_addr_size(cma_dst_addr(id_priv)));
3228
3229 ret = cma_modify_qp_rtr(id_priv, conn_param);
3230 if (ret)
3231 goto out;
3232
3233 if (conn_param) {
3234 iw_param.ord = conn_param->initiator_depth;
3235 iw_param.ird = conn_param->responder_resources;
3236 iw_param.private_data = conn_param->private_data;
3237 iw_param.private_data_len = conn_param->private_data_len;
3238 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
3239 } else {
3240 memset(&iw_param, 0, sizeof iw_param);
3241 iw_param.qpn = id_priv->qp_num;
3242 }
3243 ret = iw_cm_connect(cm_id, &iw_param);
3244 out:
3245 if (ret) {
3246 iw_destroy_cm_id(cm_id);
3247 id_priv->cm_id.iw = NULL;
3248 }
3249 return ret;
3250 }
3251
3252 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
3253 {
3254 struct rdma_id_private *id_priv;
3255 int ret;
3256
3257 id_priv = container_of(id, struct rdma_id_private, id);
3258 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
3259 return -EINVAL;
3260
3261 if (!id->qp) {
3262 id_priv->qp_num = conn_param->qp_num;
3263 id_priv->srq = conn_param->srq;
3264 }
3265
3266 if (rdma_cap_ib_cm(id->device, id->port_num)) {
3267 if (id->qp_type == IB_QPT_UD)
3268 ret = cma_resolve_ib_udp(id_priv, conn_param);
3269 else
3270 ret = cma_connect_ib(id_priv, conn_param);
3271 } else if (rdma_cap_iw_cm(id->device, id->port_num))
3272 ret = cma_connect_iw(id_priv, conn_param);
3273 else
3274 ret = -ENOSYS;
3275 if (ret)
3276 goto err;
3277
3278 return 0;
3279 err:
3280 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
3281 return ret;
3282 }
3283 EXPORT_SYMBOL(rdma_connect);
3284
3285 static int cma_accept_ib(struct rdma_id_private *id_priv,
3286 struct rdma_conn_param *conn_param)
3287 {
3288 struct ib_cm_rep_param rep;
3289 int ret;
3290
3291 ret = cma_modify_qp_rtr(id_priv, conn_param);
3292 if (ret)
3293 goto out;
3294
3295 ret = cma_modify_qp_rts(id_priv, conn_param);
3296 if (ret)
3297 goto out;
3298
3299 memset(&rep, 0, sizeof rep);
3300 rep.qp_num = id_priv->qp_num;
3301 rep.starting_psn = id_priv->seq_num;
3302 rep.private_data = conn_param->private_data;
3303 rep.private_data_len = conn_param->private_data_len;
3304 rep.responder_resources = conn_param->responder_resources;
3305 rep.initiator_depth = conn_param->initiator_depth;
3306 rep.failover_accepted = 0;
3307 rep.flow_control = conn_param->flow_control;
3308 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
3309 rep.srq = id_priv->srq ? 1 : 0;
3310
3311 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
3312 out:
3313 return ret;
3314 }
3315
3316 static int cma_accept_iw(struct rdma_id_private *id_priv,
3317 struct rdma_conn_param *conn_param)
3318 {
3319 struct iw_cm_conn_param iw_param;
3320 int ret;
3321
3322 ret = cma_modify_qp_rtr(id_priv, conn_param);
3323 if (ret)
3324 return ret;
3325
3326 iw_param.ord = conn_param->initiator_depth;
3327 iw_param.ird = conn_param->responder_resources;
3328 iw_param.private_data = conn_param->private_data;
3329 iw_param.private_data_len = conn_param->private_data_len;
3330 if (id_priv->id.qp) {
3331 iw_param.qpn = id_priv->qp_num;
3332 } else
3333 iw_param.qpn = conn_param->qp_num;
3334
3335 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
3336 }
3337
3338 static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
3339 enum ib_cm_sidr_status status, u32 qkey,
3340 const void *private_data, int private_data_len)
3341 {
3342 struct ib_cm_sidr_rep_param rep;
3343 int ret;
3344
3345 memset(&rep, 0, sizeof rep);
3346 rep.status = status;
3347 if (status == IB_SIDR_SUCCESS) {
3348 ret = cma_set_qkey(id_priv, qkey);
3349 if (ret)
3350 return ret;
3351 rep.qp_num = id_priv->qp_num;
3352 rep.qkey = id_priv->qkey;
3353 }
3354 rep.private_data = private_data;
3355 rep.private_data_len = private_data_len;
3356
3357 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
3358 }
3359
3360 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
3361 {
3362 struct rdma_id_private *id_priv;
3363 int ret;
3364
3365 id_priv = container_of(id, struct rdma_id_private, id);
3366
3367 id_priv->owner = task_pid_nr(current);
3368
3369 if (!cma_comp(id_priv, RDMA_CM_CONNECT))
3370 return -EINVAL;
3371
3372 if (!id->qp && conn_param) {
3373 id_priv->qp_num = conn_param->qp_num;
3374 id_priv->srq = conn_param->srq;
3375 }
3376
3377 if (rdma_cap_ib_cm(id->device, id->port_num)) {
3378 if (id->qp_type == IB_QPT_UD) {
3379 if (conn_param)
3380 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
3381 conn_param->qkey,
3382 conn_param->private_data,
3383 conn_param->private_data_len);
3384 else
3385 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
3386 0, NULL, 0);
3387 } else {
3388 if (conn_param)
3389 ret = cma_accept_ib(id_priv, conn_param);
3390 else
3391 ret = cma_rep_recv(id_priv);
3392 }
3393 } else if (rdma_cap_iw_cm(id->device, id->port_num))
3394 ret = cma_accept_iw(id_priv, conn_param);
3395 else
3396 ret = -ENOSYS;
3397
3398 if (ret)
3399 goto reject;
3400
3401 return 0;
3402 reject:
3403 cma_modify_qp_err(id_priv);
3404 rdma_reject(id, NULL, 0);
3405 return ret;
3406 }
3407 EXPORT_SYMBOL(rdma_accept);
3408
3409 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
3410 {
3411 struct rdma_id_private *id_priv;
3412 int ret;
3413
3414 id_priv = container_of(id, struct rdma_id_private, id);
3415 if (!id_priv->cm_id.ib)
3416 return -EINVAL;
3417
3418 switch (id->device->node_type) {
3419 case RDMA_NODE_IB_CA:
3420 ret = ib_cm_notify(id_priv->cm_id.ib, event);
3421 break;
3422 default:
3423 ret = 0;
3424 break;
3425 }
3426 return ret;
3427 }
3428 EXPORT_SYMBOL(rdma_notify);
3429
3430 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
3431 u8 private_data_len)
3432 {
3433 struct rdma_id_private *id_priv;
3434 int ret;
3435
3436 id_priv = container_of(id, struct rdma_id_private, id);
3437 if (!id_priv->cm_id.ib)
3438 return -EINVAL;
3439
3440 if (rdma_cap_ib_cm(id->device, id->port_num)) {
3441 if (id->qp_type == IB_QPT_UD)
3442 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
3443 private_data, private_data_len);
3444 else
3445 ret = ib_send_cm_rej(id_priv->cm_id.ib,
3446 IB_CM_REJ_CONSUMER_DEFINED, NULL,
3447 0, private_data, private_data_len);
3448 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
3449 ret = iw_cm_reject(id_priv->cm_id.iw,
3450 private_data, private_data_len);
3451 } else
3452 ret = -ENOSYS;
3453
3454 return ret;
3455 }
3456 EXPORT_SYMBOL(rdma_reject);
3457
3458 int rdma_disconnect(struct rdma_cm_id *id)
3459 {
3460 struct rdma_id_private *id_priv;
3461 int ret;
3462
3463 id_priv = container_of(id, struct rdma_id_private, id);
3464 if (!id_priv->cm_id.ib)
3465 return -EINVAL;
3466
3467 if (rdma_cap_ib_cm(id->device, id->port_num)) {
3468 ret = cma_modify_qp_err(id_priv);
3469 if (ret)
3470 goto out;
3471 /* Initiate or respond to a disconnect. */
3472 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
3473 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
3474 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
3475 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
3476 } else
3477 ret = -EINVAL;
3478
3479 out:
3480 return ret;
3481 }
3482 EXPORT_SYMBOL(rdma_disconnect);
3483
3484 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
3485 {
3486 struct rdma_id_private *id_priv;
3487 struct cma_multicast *mc = multicast->context;
3488 struct rdma_cm_event event;
3489 int ret;
3490
3491 id_priv = mc->id_priv;
3492 if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
3493 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
3494 return 0;
3495
3496 if (!status)
3497 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
3498 mutex_lock(&id_priv->qp_mutex);
3499 if (!status && id_priv->id.qp)
3500 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
3501 be16_to_cpu(multicast->rec.mlid));
3502 mutex_unlock(&id_priv->qp_mutex);
3503
3504 memset(&event, 0, sizeof event);
3505 event.status = status;
3506 event.param.ud.private_data = mc->context;
3507 if (!status) {
3508 event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
3509 ib_init_ah_from_mcmember(id_priv->id.device,
3510 id_priv->id.port_num, &multicast->rec,
3511 &event.param.ud.ah_attr);
3512 event.param.ud.qp_num = 0xFFFFFF;
3513 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
3514 } else
3515 event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
3516
3517 ret = id_priv->id.event_handler(&id_priv->id, &event);
3518 if (ret) {
3519 cma_exch(id_priv, RDMA_CM_DESTROYING);
3520 mutex_unlock(&id_priv->handler_mutex);
3521 rdma_destroy_id(&id_priv->id);
3522 return 0;
3523 }
3524
3525 mutex_unlock(&id_priv->handler_mutex);
3526 return 0;
3527 }
3528
3529 static void cma_set_mgid(struct rdma_id_private *id_priv,
3530 struct sockaddr *addr, union ib_gid *mgid)
3531 {
3532 unsigned char mc_map[MAX_ADDR_LEN];
3533 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3534 struct sockaddr_in *sin = (struct sockaddr_in *) addr;
3535 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
3536
3537 if (cma_any_addr(addr)) {
3538 memset(mgid, 0, sizeof *mgid);
3539 } else if ((addr->sa_family == AF_INET6) &&
3540 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
3541 0xFF10A01B)) {
3542 /* IPv6 address is an SA assigned MGID. */
3543 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
3544 } else if (addr->sa_family == AF_IB) {
3545 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
3546 } else if ((addr->sa_family == AF_INET6)) {
3547 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
3548 if (id_priv->id.ps == RDMA_PS_UDP)
3549 mc_map[7] = 0x01; /* Use RDMA CM signature */
3550 *mgid = *(union ib_gid *) (mc_map + 4);
3551 } else {
3552 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
3553 if (id_priv->id.ps == RDMA_PS_UDP)
3554 mc_map[7] = 0x01; /* Use RDMA CM signature */
3555 *mgid = *(union ib_gid *) (mc_map + 4);
3556 }
3557 }
3558
3559 static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
3560 struct cma_multicast *mc)
3561 {
3562 struct ib_sa_mcmember_rec rec;
3563 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3564 ib_sa_comp_mask comp_mask;
3565 int ret;
3566
3567 ib_addr_get_mgid(dev_addr, &rec.mgid);
3568 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
3569 &rec.mgid, &rec);
3570 if (ret)
3571 return ret;
3572
3573 ret = cma_set_qkey(id_priv, 0);
3574 if (ret)
3575 return ret;
3576
3577 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
3578 rec.qkey = cpu_to_be32(id_priv->qkey);
3579 rdma_addr_get_sgid(dev_addr, &rec.port_gid);
3580 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
3581 rec.join_state = 1;
3582
3583 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
3584 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
3585 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
3586 IB_SA_MCMEMBER_REC_FLOW_LABEL |
3587 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
3588
3589 if (id_priv->id.ps == RDMA_PS_IPOIB)
3590 comp_mask |= IB_SA_MCMEMBER_REC_RATE |
3591 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
3592 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
3593 IB_SA_MCMEMBER_REC_MTU |
3594 IB_SA_MCMEMBER_REC_HOP_LIMIT;
3595
3596 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
3597 id_priv->id.port_num, &rec,
3598 comp_mask, GFP_KERNEL,
3599 cma_ib_mc_handler, mc);
3600 return PTR_ERR_OR_ZERO(mc->multicast.ib);
3601 }
3602
3603 static void iboe_mcast_work_handler(struct work_struct *work)
3604 {
3605 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
3606 struct cma_multicast *mc = mw->mc;
3607 struct ib_sa_multicast *m = mc->multicast.ib;
3608
3609 mc->multicast.ib->context = mc;
3610 cma_ib_mc_handler(0, m);
3611 kref_put(&mc->mcref, release_mc);
3612 kfree(mw);
3613 }
3614
3615 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
3616 {
3617 struct sockaddr_in *sin = (struct sockaddr_in *)addr;
3618 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
3619
3620 if (cma_any_addr(addr)) {
3621 memset(mgid, 0, sizeof *mgid);
3622 } else if (addr->sa_family == AF_INET6) {
3623 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
3624 } else {
3625 mgid->raw[0] = 0xff;
3626 mgid->raw[1] = 0x0e;
3627 mgid->raw[2] = 0;
3628 mgid->raw[3] = 0;
3629 mgid->raw[4] = 0;
3630 mgid->raw[5] = 0;
3631 mgid->raw[6] = 0;
3632 mgid->raw[7] = 0;
3633 mgid->raw[8] = 0;
3634 mgid->raw[9] = 0;
3635 mgid->raw[10] = 0xff;
3636 mgid->raw[11] = 0xff;
3637 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
3638 }
3639 }
3640
3641 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
3642 struct cma_multicast *mc)
3643 {
3644 struct iboe_mcast_work *work;
3645 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3646 int err;
3647 struct sockaddr *addr = (struct sockaddr *)&mc->addr;
3648 struct net_device *ndev = NULL;
3649
3650 if (cma_zero_addr((struct sockaddr *)&mc->addr))
3651 return -EINVAL;
3652
3653 work = kzalloc(sizeof *work, GFP_KERNEL);
3654 if (!work)
3655 return -ENOMEM;
3656
3657 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
3658 if (!mc->multicast.ib) {
3659 err = -ENOMEM;
3660 goto out1;
3661 }
3662
3663 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
3664
3665 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
3666 if (id_priv->id.ps == RDMA_PS_UDP)
3667 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
3668
3669 if (dev_addr->bound_dev_if)
3670 ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
3671 if (!ndev) {
3672 err = -ENODEV;
3673 goto out2;
3674 }
3675 mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
3676 mc->multicast.ib->rec.hop_limit = 1;
3677 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
3678 dev_put(ndev);
3679 if (!mc->multicast.ib->rec.mtu) {
3680 err = -EINVAL;
3681 goto out2;
3682 }
3683 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
3684 &mc->multicast.ib->rec.port_gid);
3685 work->id = id_priv;
3686 work->mc = mc;
3687 INIT_WORK(&work->work, iboe_mcast_work_handler);
3688 kref_get(&mc->mcref);
3689 queue_work(cma_wq, &work->work);
3690
3691 return 0;
3692
3693 out2:
3694 kfree(mc->multicast.ib);
3695 out1:
3696 kfree(work);
3697 return err;
3698 }
3699
3700 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
3701 void *context)
3702 {
3703 struct rdma_id_private *id_priv;
3704 struct cma_multicast *mc;
3705 int ret;
3706
3707 id_priv = container_of(id, struct rdma_id_private, id);
3708 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
3709 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
3710 return -EINVAL;
3711
3712 mc = kmalloc(sizeof *mc, GFP_KERNEL);
3713 if (!mc)
3714 return -ENOMEM;
3715
3716 memcpy(&mc->addr, addr, rdma_addr_size(addr));
3717 mc->context = context;
3718 mc->id_priv = id_priv;
3719
3720 spin_lock(&id_priv->lock);
3721 list_add(&mc->list, &id_priv->mc_list);
3722 spin_unlock(&id_priv->lock);
3723
3724 if (rdma_protocol_roce(id->device, id->port_num)) {
3725 kref_init(&mc->mcref);
3726 ret = cma_iboe_join_multicast(id_priv, mc);
3727 } else if (rdma_cap_ib_mcast(id->device, id->port_num))
3728 ret = cma_join_ib_multicast(id_priv, mc);
3729 else
3730 ret = -ENOSYS;
3731
3732 if (ret) {
3733 spin_lock_irq(&id_priv->lock);
3734 list_del(&mc->list);
3735 spin_unlock_irq(&id_priv->lock);
3736 kfree(mc);
3737 }
3738 return ret;
3739 }
3740 EXPORT_SYMBOL(rdma_join_multicast);
3741
3742 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
3743 {
3744 struct rdma_id_private *id_priv;
3745 struct cma_multicast *mc;
3746
3747 id_priv = container_of(id, struct rdma_id_private, id);
3748 spin_lock_irq(&id_priv->lock);
3749 list_for_each_entry(mc, &id_priv->mc_list, list) {
3750 if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
3751 list_del(&mc->list);
3752 spin_unlock_irq(&id_priv->lock);
3753
3754 if (id->qp)
3755 ib_detach_mcast(id->qp,
3756 &mc->multicast.ib->rec.mgid,
3757 be16_to_cpu(mc->multicast.ib->rec.mlid));
3758
3759 BUG_ON(id_priv->cma_dev->device != id->device);
3760
3761 if (rdma_cap_ib_mcast(id->device, id->port_num)) {
3762 ib_sa_free_multicast(mc->multicast.ib);
3763 kfree(mc);
3764 } else if (rdma_protocol_roce(id->device, id->port_num))
3765 kref_put(&mc->mcref, release_mc);
3766
3767 return;
3768 }
3769 }
3770 spin_unlock_irq(&id_priv->lock);
3771 }
3772 EXPORT_SYMBOL(rdma_leave_multicast);
3773
3774 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
3775 {
3776 struct rdma_dev_addr *dev_addr;
3777 struct cma_ndev_work *work;
3778
3779 dev_addr = &id_priv->id.route.addr.dev_addr;
3780
3781 if ((dev_addr->bound_dev_if == ndev->ifindex) &&
3782 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
3783 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
3784 ndev->name, &id_priv->id);
3785 work = kzalloc(sizeof *work, GFP_KERNEL);
3786 if (!work)
3787 return -ENOMEM;
3788
3789 INIT_WORK(&work->work, cma_ndev_work_handler);
3790 work->id = id_priv;
3791 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
3792 atomic_inc(&id_priv->refcount);
3793 queue_work(cma_wq, &work->work);
3794 }
3795
3796 return 0;
3797 }
3798
3799 static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
3800 void *ptr)
3801 {
3802 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3803 struct cma_device *cma_dev;
3804 struct rdma_id_private *id_priv;
3805 int ret = NOTIFY_DONE;
3806
3807 if (dev_net(ndev) != &init_net)
3808 return NOTIFY_DONE;
3809
3810 if (event != NETDEV_BONDING_FAILOVER)
3811 return NOTIFY_DONE;
3812
3813 if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
3814 return NOTIFY_DONE;
3815
3816 mutex_lock(&lock);
3817 list_for_each_entry(cma_dev, &dev_list, list)
3818 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
3819 ret = cma_netdev_change(ndev, id_priv);
3820 if (ret)
3821 goto out;
3822 }
3823
3824 out:
3825 mutex_unlock(&lock);
3826 return ret;
3827 }
3828
3829 static struct notifier_block cma_nb = {
3830 .notifier_call = cma_netdev_callback
3831 };
3832
3833 static void cma_add_one(struct ib_device *device)
3834 {
3835 struct cma_device *cma_dev;
3836 struct rdma_id_private *id_priv;
3837
3838 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
3839 if (!cma_dev)
3840 return;
3841
3842 cma_dev->device = device;
3843
3844 init_completion(&cma_dev->comp);
3845 atomic_set(&cma_dev->refcount, 1);
3846 INIT_LIST_HEAD(&cma_dev->id_list);
3847 ib_set_client_data(device, &cma_client, cma_dev);
3848
3849 mutex_lock(&lock);
3850 list_add_tail(&cma_dev->list, &dev_list);
3851 list_for_each_entry(id_priv, &listen_any_list, list)
3852 cma_listen_on_dev(id_priv, cma_dev);
3853 mutex_unlock(&lock);
3854 }
3855
3856 static int cma_remove_id_dev(struct rdma_id_private *id_priv)
3857 {
3858 struct rdma_cm_event event;
3859 enum rdma_cm_state state;
3860 int ret = 0;
3861
3862 /* Record that we want to remove the device */
3863 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
3864 if (state == RDMA_CM_DESTROYING)
3865 return 0;
3866
3867 cma_cancel_operation(id_priv, state);
3868 mutex_lock(&id_priv->handler_mutex);
3869
3870 /* Check for destruction from another callback. */
3871 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
3872 goto out;
3873
3874 memset(&event, 0, sizeof event);
3875 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
3876 ret = id_priv->id.event_handler(&id_priv->id, &event);
3877 out:
3878 mutex_unlock(&id_priv->handler_mutex);
3879 return ret;
3880 }
3881
3882 static void cma_process_remove(struct cma_device *cma_dev)
3883 {
3884 struct rdma_id_private *id_priv;
3885 int ret;
3886
3887 mutex_lock(&lock);
3888 while (!list_empty(&cma_dev->id_list)) {
3889 id_priv = list_entry(cma_dev->id_list.next,
3890 struct rdma_id_private, list);
3891
3892 list_del(&id_priv->listen_list);
3893 list_del_init(&id_priv->list);
3894 atomic_inc(&id_priv->refcount);
3895 mutex_unlock(&lock);
3896
3897 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
3898 cma_deref_id(id_priv);
3899 if (ret)
3900 rdma_destroy_id(&id_priv->id);
3901
3902 mutex_lock(&lock);
3903 }
3904 mutex_unlock(&lock);
3905
3906 cma_deref_dev(cma_dev);
3907 wait_for_completion(&cma_dev->comp);
3908 }
3909
3910 static void cma_remove_one(struct ib_device *device, void *client_data)
3911 {
3912 struct cma_device *cma_dev = client_data;
3913
3914 if (!cma_dev)
3915 return;
3916
3917 mutex_lock(&lock);
3918 list_del(&cma_dev->list);
3919 mutex_unlock(&lock);
3920
3921 cma_process_remove(cma_dev);
3922 kfree(cma_dev);
3923 }
3924
3925 static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
3926 {
3927 struct nlmsghdr *nlh;
3928 struct rdma_cm_id_stats *id_stats;
3929 struct rdma_id_private *id_priv;
3930 struct rdma_cm_id *id = NULL;
3931 struct cma_device *cma_dev;
3932 int i_dev = 0, i_id = 0;
3933
3934 /*
3935 * We export all of the IDs as a sequence of messages. Each
3936 * ID gets its own netlink message.
3937 */
3938 mutex_lock(&lock);
3939
3940 list_for_each_entry(cma_dev, &dev_list, list) {
3941 if (i_dev < cb->args[0]) {
3942 i_dev++;
3943 continue;
3944 }
3945
3946 i_id = 0;
3947 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
3948 if (i_id < cb->args[1]) {
3949 i_id++;
3950 continue;
3951 }
3952
3953 id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
3954 sizeof *id_stats, RDMA_NL_RDMA_CM,
3955 RDMA_NL_RDMA_CM_ID_STATS,
3956 NLM_F_MULTI);
3957 if (!id_stats)
3958 goto out;
3959
3960 memset(id_stats, 0, sizeof *id_stats);
3961 id = &id_priv->id;
3962 id_stats->node_type = id->route.addr.dev_addr.dev_type;
3963 id_stats->port_num = id->port_num;
3964 id_stats->bound_dev_if =
3965 id->route.addr.dev_addr.bound_dev_if;
3966
3967 if (ibnl_put_attr(skb, nlh,
3968 rdma_addr_size(cma_src_addr(id_priv)),
3969 cma_src_addr(id_priv),
3970 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
3971 goto out;
3972 if (ibnl_put_attr(skb, nlh,
3973 rdma_addr_size(cma_src_addr(id_priv)),
3974 cma_dst_addr(id_priv),
3975 RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
3976 goto out;
3977
3978 id_stats->pid = id_priv->owner;
3979 id_stats->port_space = id->ps;
3980 id_stats->cm_state = id_priv->state;
3981 id_stats->qp_num = id_priv->qp_num;
3982 id_stats->qp_type = id->qp_type;
3983
3984 i_id++;
3985 }
3986
3987 cb->args[1] = 0;
3988 i_dev++;
3989 }
3990
3991 out:
3992 mutex_unlock(&lock);
3993 cb->args[0] = i_dev;
3994 cb->args[1] = i_id;
3995
3996 return skb->len;
3997 }
3998
3999 static const struct ibnl_client_cbs cma_cb_table[] = {
4000 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
4001 .module = THIS_MODULE },
4002 };
4003
4004 static int __init cma_init(void)
4005 {
4006 int ret;
4007
4008 cma_wq = create_singlethread_workqueue("rdma_cm");
4009 if (!cma_wq)
4010 return -ENOMEM;
4011
4012 ib_sa_register_client(&sa_client);
4013 rdma_addr_register_client(&addr_client);
4014 register_netdevice_notifier(&cma_nb);
4015
4016 ret = ib_register_client(&cma_client);
4017 if (ret)
4018 goto err;
4019
4020 if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
4021 printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
4022
4023 return 0;
4024
4025 err:
4026 unregister_netdevice_notifier(&cma_nb);
4027 rdma_addr_unregister_client(&addr_client);
4028 ib_sa_unregister_client(&sa_client);
4029 destroy_workqueue(cma_wq);
4030 return ret;
4031 }
4032
4033 static void __exit cma_cleanup(void)
4034 {
4035 ibnl_remove_client(RDMA_NL_RDMA_CM);
4036 ib_unregister_client(&cma_client);
4037 unregister_netdevice_notifier(&cma_nb);
4038 rdma_addr_unregister_client(&addr_client);
4039 ib_sa_unregister_client(&sa_client);
4040 destroy_workqueue(cma_wq);
4041 idr_destroy(&tcp_ps);
4042 idr_destroy(&udp_ps);
4043 idr_destroy(&ipoib_ps);
4044 idr_destroy(&ib_ps);
4045 }
4046
4047 module_init(cma_init);
4048 module_exit(cma_cleanup);
This page took 0.121138 seconds and 5 git commands to generate.