2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/idr.h>
42 #include <linux/inetdevice.h>
43 #include <linux/slab.h>
44 #include <linux/module.h>
45 #include <net/route.h>
49 #include <net/ip_fib.h>
50 #include <net/ip6_route.h>
52 #include <rdma/rdma_cm.h>
53 #include <rdma/rdma_cm_ib.h>
54 #include <rdma/rdma_netlink.h>
56 #include <rdma/ib_cache.h>
57 #include <rdma/ib_cm.h>
58 #include <rdma/ib_sa.h>
59 #include <rdma/iw_cm.h>
61 MODULE_AUTHOR("Sean Hefty");
62 MODULE_DESCRIPTION("Generic RDMA CM Agent");
63 MODULE_LICENSE("Dual BSD/GPL");
65 #define CMA_CM_RESPONSE_TIMEOUT 20
66 #define CMA_MAX_CM_RETRIES 15
67 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
68 #define CMA_IBOE_PACKET_LIFETIME 18
70 static const char * const cma_events
[] = {
71 [RDMA_CM_EVENT_ADDR_RESOLVED
] = "address resolved",
72 [RDMA_CM_EVENT_ADDR_ERROR
] = "address error",
73 [RDMA_CM_EVENT_ROUTE_RESOLVED
] = "route resolved ",
74 [RDMA_CM_EVENT_ROUTE_ERROR
] = "route error",
75 [RDMA_CM_EVENT_CONNECT_REQUEST
] = "connect request",
76 [RDMA_CM_EVENT_CONNECT_RESPONSE
] = "connect response",
77 [RDMA_CM_EVENT_CONNECT_ERROR
] = "connect error",
78 [RDMA_CM_EVENT_UNREACHABLE
] = "unreachable",
79 [RDMA_CM_EVENT_REJECTED
] = "rejected",
80 [RDMA_CM_EVENT_ESTABLISHED
] = "established",
81 [RDMA_CM_EVENT_DISCONNECTED
] = "disconnected",
82 [RDMA_CM_EVENT_DEVICE_REMOVAL
] = "device removal",
83 [RDMA_CM_EVENT_MULTICAST_JOIN
] = "multicast join",
84 [RDMA_CM_EVENT_MULTICAST_ERROR
] = "multicast error",
85 [RDMA_CM_EVENT_ADDR_CHANGE
] = "address change",
86 [RDMA_CM_EVENT_TIMEWAIT_EXIT
] = "timewait exit",
89 const char *rdma_event_msg(enum rdma_cm_event_type event
)
93 return (index
< ARRAY_SIZE(cma_events
) && cma_events
[index
]) ?
94 cma_events
[index
] : "unrecognized event";
96 EXPORT_SYMBOL(rdma_event_msg
);
98 static void cma_add_one(struct ib_device
*device
);
99 static void cma_remove_one(struct ib_device
*device
, void *client_data
);
101 static struct ib_client cma_client
= {
104 .remove
= cma_remove_one
107 static struct ib_sa_client sa_client
;
108 static struct rdma_addr_client addr_client
;
109 static LIST_HEAD(dev_list
);
110 static LIST_HEAD(listen_any_list
);
111 static DEFINE_MUTEX(lock
);
112 static struct workqueue_struct
*cma_wq
;
113 static DEFINE_IDR(tcp_ps
);
114 static DEFINE_IDR(udp_ps
);
115 static DEFINE_IDR(ipoib_ps
);
116 static DEFINE_IDR(ib_ps
);
118 static struct idr
*cma_idr(enum rdma_port_space ps
)
135 struct list_head list
;
136 struct ib_device
*device
;
137 struct completion comp
;
139 struct list_head id_list
;
142 struct rdma_bind_list
{
143 enum rdma_port_space ps
;
144 struct hlist_head owners
;
148 static int cma_ps_alloc(enum rdma_port_space ps
,
149 struct rdma_bind_list
*bind_list
, int snum
)
151 struct idr
*idr
= cma_idr(ps
);
153 return idr_alloc(idr
, bind_list
, snum
, snum
+ 1, GFP_KERNEL
);
156 static struct rdma_bind_list
*cma_ps_find(enum rdma_port_space ps
, int snum
)
158 struct idr
*idr
= cma_idr(ps
);
160 return idr_find(idr
, snum
);
163 static void cma_ps_remove(enum rdma_port_space ps
, int snum
)
165 struct idr
*idr
= cma_idr(ps
);
167 idr_remove(idr
, snum
);
175 * Device removal can occur at anytime, so we need extra handling to
176 * serialize notifying the user of device removal with other callbacks.
177 * We do this by disabling removal notification while a callback is in process,
178 * and reporting it after the callback completes.
180 struct rdma_id_private
{
181 struct rdma_cm_id id
;
183 struct rdma_bind_list
*bind_list
;
184 struct hlist_node node
;
185 struct list_head list
; /* listen_any_list or cma_device.list */
186 struct list_head listen_list
; /* per device listens */
187 struct cma_device
*cma_dev
;
188 struct list_head mc_list
;
191 enum rdma_cm_state state
;
193 struct mutex qp_mutex
;
195 struct completion comp
;
197 struct mutex handler_mutex
;
201 struct ib_sa_query
*query
;
219 struct cma_multicast
{
220 struct rdma_id_private
*id_priv
;
222 struct ib_sa_multicast
*ib
;
224 struct list_head list
;
226 struct sockaddr_storage addr
;
231 struct work_struct work
;
232 struct rdma_id_private
*id
;
233 enum rdma_cm_state old_state
;
234 enum rdma_cm_state new_state
;
235 struct rdma_cm_event event
;
238 struct cma_ndev_work
{
239 struct work_struct work
;
240 struct rdma_id_private
*id
;
241 struct rdma_cm_event event
;
244 struct iboe_mcast_work
{
245 struct work_struct work
;
246 struct rdma_id_private
*id
;
247 struct cma_multicast
*mc
;
260 u8 ip_version
; /* IP version: 7:4 */
262 union cma_ip_addr src_addr
;
263 union cma_ip_addr dst_addr
;
266 #define CMA_VERSION 0x00
268 struct cma_req_info
{
269 struct ib_device
*device
;
271 union ib_gid local_gid
;
277 static int cma_comp(struct rdma_id_private
*id_priv
, enum rdma_cm_state comp
)
282 spin_lock_irqsave(&id_priv
->lock
, flags
);
283 ret
= (id_priv
->state
== comp
);
284 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
288 static int cma_comp_exch(struct rdma_id_private
*id_priv
,
289 enum rdma_cm_state comp
, enum rdma_cm_state exch
)
294 spin_lock_irqsave(&id_priv
->lock
, flags
);
295 if ((ret
= (id_priv
->state
== comp
)))
296 id_priv
->state
= exch
;
297 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
301 static enum rdma_cm_state
cma_exch(struct rdma_id_private
*id_priv
,
302 enum rdma_cm_state exch
)
305 enum rdma_cm_state old
;
307 spin_lock_irqsave(&id_priv
->lock
, flags
);
308 old
= id_priv
->state
;
309 id_priv
->state
= exch
;
310 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
314 static inline u8
cma_get_ip_ver(const struct cma_hdr
*hdr
)
316 return hdr
->ip_version
>> 4;
319 static inline void cma_set_ip_ver(struct cma_hdr
*hdr
, u8 ip_ver
)
321 hdr
->ip_version
= (ip_ver
<< 4) | (hdr
->ip_version
& 0xF);
324 static void cma_attach_to_dev(struct rdma_id_private
*id_priv
,
325 struct cma_device
*cma_dev
)
327 atomic_inc(&cma_dev
->refcount
);
328 id_priv
->cma_dev
= cma_dev
;
329 id_priv
->id
.device
= cma_dev
->device
;
330 id_priv
->id
.route
.addr
.dev_addr
.transport
=
331 rdma_node_get_transport(cma_dev
->device
->node_type
);
332 list_add_tail(&id_priv
->list
, &cma_dev
->id_list
);
335 static inline void cma_deref_dev(struct cma_device
*cma_dev
)
337 if (atomic_dec_and_test(&cma_dev
->refcount
))
338 complete(&cma_dev
->comp
);
341 static inline void release_mc(struct kref
*kref
)
343 struct cma_multicast
*mc
= container_of(kref
, struct cma_multicast
, mcref
);
345 kfree(mc
->multicast
.ib
);
349 static void cma_release_dev(struct rdma_id_private
*id_priv
)
352 list_del(&id_priv
->list
);
353 cma_deref_dev(id_priv
->cma_dev
);
354 id_priv
->cma_dev
= NULL
;
358 static inline struct sockaddr
*cma_src_addr(struct rdma_id_private
*id_priv
)
360 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.src_addr
;
363 static inline struct sockaddr
*cma_dst_addr(struct rdma_id_private
*id_priv
)
365 return (struct sockaddr
*) &id_priv
->id
.route
.addr
.dst_addr
;
368 static inline unsigned short cma_family(struct rdma_id_private
*id_priv
)
370 return id_priv
->id
.route
.addr
.src_addr
.ss_family
;
373 static int cma_set_qkey(struct rdma_id_private
*id_priv
, u32 qkey
)
375 struct ib_sa_mcmember_rec rec
;
379 if (qkey
&& id_priv
->qkey
!= qkey
)
385 id_priv
->qkey
= qkey
;
389 switch (id_priv
->id
.ps
) {
392 id_priv
->qkey
= RDMA_UDP_QKEY
;
395 ib_addr_get_mgid(&id_priv
->id
.route
.addr
.dev_addr
, &rec
.mgid
);
396 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
,
397 id_priv
->id
.port_num
, &rec
.mgid
,
400 id_priv
->qkey
= be32_to_cpu(rec
.qkey
);
408 static void cma_translate_ib(struct sockaddr_ib
*sib
, struct rdma_dev_addr
*dev_addr
)
410 dev_addr
->dev_type
= ARPHRD_INFINIBAND
;
411 rdma_addr_set_sgid(dev_addr
, (union ib_gid
*) &sib
->sib_addr
);
412 ib_addr_set_pkey(dev_addr
, ntohs(sib
->sib_pkey
));
415 static int cma_translate_addr(struct sockaddr
*addr
, struct rdma_dev_addr
*dev_addr
)
419 if (addr
->sa_family
!= AF_IB
) {
420 ret
= rdma_translate_ip(addr
, dev_addr
, NULL
);
422 cma_translate_ib((struct sockaddr_ib
*) addr
, dev_addr
);
429 static inline int cma_validate_port(struct ib_device
*device
, u8 port
,
430 union ib_gid
*gid
, int dev_type
)
435 if ((dev_type
== ARPHRD_INFINIBAND
) && !rdma_protocol_ib(device
, port
))
438 if ((dev_type
!= ARPHRD_INFINIBAND
) && rdma_protocol_ib(device
, port
))
441 ret
= ib_find_cached_gid(device
, gid
, &found_port
, NULL
);
442 if (port
!= found_port
)
448 static int cma_acquire_dev(struct rdma_id_private
*id_priv
,
449 struct rdma_id_private
*listen_id_priv
)
451 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
452 struct cma_device
*cma_dev
;
453 union ib_gid gid
, iboe_gid
, *gidp
;
457 if (dev_addr
->dev_type
!= ARPHRD_INFINIBAND
&&
458 id_priv
->id
.ps
== RDMA_PS_IPOIB
)
462 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
465 memcpy(&gid
, dev_addr
->src_dev_addr
+
466 rdma_addr_gid_offset(dev_addr
), sizeof gid
);
468 if (listen_id_priv
) {
469 cma_dev
= listen_id_priv
->cma_dev
;
470 port
= listen_id_priv
->id
.port_num
;
471 gidp
= rdma_protocol_roce(cma_dev
->device
, port
) ?
474 ret
= cma_validate_port(cma_dev
->device
, port
, gidp
,
477 id_priv
->id
.port_num
= port
;
482 list_for_each_entry(cma_dev
, &dev_list
, list
) {
483 for (port
= 1; port
<= cma_dev
->device
->phys_port_cnt
; ++port
) {
484 if (listen_id_priv
&&
485 listen_id_priv
->cma_dev
== cma_dev
&&
486 listen_id_priv
->id
.port_num
== port
)
489 gidp
= rdma_protocol_roce(cma_dev
->device
, port
) ?
492 ret
= cma_validate_port(cma_dev
->device
, port
, gidp
,
495 id_priv
->id
.port_num
= port
;
503 cma_attach_to_dev(id_priv
, cma_dev
);
510 * Select the source IB device and address to reach the destination IB address.
512 static int cma_resolve_ib_dev(struct rdma_id_private
*id_priv
)
514 struct cma_device
*cma_dev
, *cur_dev
;
515 struct sockaddr_ib
*addr
;
516 union ib_gid gid
, sgid
, *dgid
;
522 addr
= (struct sockaddr_ib
*) cma_dst_addr(id_priv
);
523 dgid
= (union ib_gid
*) &addr
->sib_addr
;
524 pkey
= ntohs(addr
->sib_pkey
);
526 list_for_each_entry(cur_dev
, &dev_list
, list
) {
527 for (p
= 1; p
<= cur_dev
->device
->phys_port_cnt
; ++p
) {
528 if (!rdma_cap_af_ib(cur_dev
->device
, p
))
531 if (ib_find_cached_pkey(cur_dev
->device
, p
, pkey
, &index
))
534 for (i
= 0; !ib_get_cached_gid(cur_dev
->device
, p
, i
, &gid
); i
++) {
535 if (!memcmp(&gid
, dgid
, sizeof(gid
))) {
538 id_priv
->id
.port_num
= p
;
542 if (!cma_dev
&& (gid
.global
.subnet_prefix
==
543 dgid
->global
.subnet_prefix
)) {
546 id_priv
->id
.port_num
= p
;
556 cma_attach_to_dev(id_priv
, cma_dev
);
557 addr
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
558 memcpy(&addr
->sib_addr
, &sgid
, sizeof sgid
);
559 cma_translate_ib(addr
, &id_priv
->id
.route
.addr
.dev_addr
);
563 static void cma_deref_id(struct rdma_id_private
*id_priv
)
565 if (atomic_dec_and_test(&id_priv
->refcount
))
566 complete(&id_priv
->comp
);
569 static int cma_disable_callback(struct rdma_id_private
*id_priv
,
570 enum rdma_cm_state state
)
572 mutex_lock(&id_priv
->handler_mutex
);
573 if (id_priv
->state
!= state
) {
574 mutex_unlock(&id_priv
->handler_mutex
);
580 struct rdma_cm_id
*rdma_create_id(rdma_cm_event_handler event_handler
,
581 void *context
, enum rdma_port_space ps
,
582 enum ib_qp_type qp_type
)
584 struct rdma_id_private
*id_priv
;
586 id_priv
= kzalloc(sizeof *id_priv
, GFP_KERNEL
);
588 return ERR_PTR(-ENOMEM
);
590 id_priv
->owner
= task_pid_nr(current
);
591 id_priv
->state
= RDMA_CM_IDLE
;
592 id_priv
->id
.context
= context
;
593 id_priv
->id
.event_handler
= event_handler
;
595 id_priv
->id
.qp_type
= qp_type
;
596 spin_lock_init(&id_priv
->lock
);
597 mutex_init(&id_priv
->qp_mutex
);
598 init_completion(&id_priv
->comp
);
599 atomic_set(&id_priv
->refcount
, 1);
600 mutex_init(&id_priv
->handler_mutex
);
601 INIT_LIST_HEAD(&id_priv
->listen_list
);
602 INIT_LIST_HEAD(&id_priv
->mc_list
);
603 get_random_bytes(&id_priv
->seq_num
, sizeof id_priv
->seq_num
);
607 EXPORT_SYMBOL(rdma_create_id
);
609 static int cma_init_ud_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
611 struct ib_qp_attr qp_attr
;
612 int qp_attr_mask
, ret
;
614 qp_attr
.qp_state
= IB_QPS_INIT
;
615 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
619 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
623 qp_attr
.qp_state
= IB_QPS_RTR
;
624 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
);
628 qp_attr
.qp_state
= IB_QPS_RTS
;
630 ret
= ib_modify_qp(qp
, &qp_attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
635 static int cma_init_conn_qp(struct rdma_id_private
*id_priv
, struct ib_qp
*qp
)
637 struct ib_qp_attr qp_attr
;
638 int qp_attr_mask
, ret
;
640 qp_attr
.qp_state
= IB_QPS_INIT
;
641 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
645 return ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
648 int rdma_create_qp(struct rdma_cm_id
*id
, struct ib_pd
*pd
,
649 struct ib_qp_init_attr
*qp_init_attr
)
651 struct rdma_id_private
*id_priv
;
655 id_priv
= container_of(id
, struct rdma_id_private
, id
);
656 if (id
->device
!= pd
->device
)
659 qp
= ib_create_qp(pd
, qp_init_attr
);
663 if (id
->qp_type
== IB_QPT_UD
)
664 ret
= cma_init_ud_qp(id_priv
, qp
);
666 ret
= cma_init_conn_qp(id_priv
, qp
);
671 id_priv
->qp_num
= qp
->qp_num
;
672 id_priv
->srq
= (qp
->srq
!= NULL
);
678 EXPORT_SYMBOL(rdma_create_qp
);
680 void rdma_destroy_qp(struct rdma_cm_id
*id
)
682 struct rdma_id_private
*id_priv
;
684 id_priv
= container_of(id
, struct rdma_id_private
, id
);
685 mutex_lock(&id_priv
->qp_mutex
);
686 ib_destroy_qp(id_priv
->id
.qp
);
687 id_priv
->id
.qp
= NULL
;
688 mutex_unlock(&id_priv
->qp_mutex
);
690 EXPORT_SYMBOL(rdma_destroy_qp
);
692 static int cma_modify_qp_rtr(struct rdma_id_private
*id_priv
,
693 struct rdma_conn_param
*conn_param
)
695 struct ib_qp_attr qp_attr
;
696 int qp_attr_mask
, ret
;
699 mutex_lock(&id_priv
->qp_mutex
);
700 if (!id_priv
->id
.qp
) {
705 /* Need to update QP attributes from default values. */
706 qp_attr
.qp_state
= IB_QPS_INIT
;
707 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
711 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
715 qp_attr
.qp_state
= IB_QPS_RTR
;
716 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
720 ret
= ib_query_gid(id_priv
->id
.device
, id_priv
->id
.port_num
,
721 qp_attr
.ah_attr
.grh
.sgid_index
, &sgid
);
725 BUG_ON(id_priv
->cma_dev
->device
!= id_priv
->id
.device
);
727 if (rdma_protocol_roce(id_priv
->id
.device
, id_priv
->id
.port_num
)) {
728 ret
= rdma_addr_find_smac_by_sgid(&sgid
, qp_attr
.smac
, NULL
);
734 qp_attr
.max_dest_rd_atomic
= conn_param
->responder_resources
;
735 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
737 mutex_unlock(&id_priv
->qp_mutex
);
741 static int cma_modify_qp_rts(struct rdma_id_private
*id_priv
,
742 struct rdma_conn_param
*conn_param
)
744 struct ib_qp_attr qp_attr
;
745 int qp_attr_mask
, ret
;
747 mutex_lock(&id_priv
->qp_mutex
);
748 if (!id_priv
->id
.qp
) {
753 qp_attr
.qp_state
= IB_QPS_RTS
;
754 ret
= rdma_init_qp_attr(&id_priv
->id
, &qp_attr
, &qp_attr_mask
);
759 qp_attr
.max_rd_atomic
= conn_param
->initiator_depth
;
760 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, qp_attr_mask
);
762 mutex_unlock(&id_priv
->qp_mutex
);
766 static int cma_modify_qp_err(struct rdma_id_private
*id_priv
)
768 struct ib_qp_attr qp_attr
;
771 mutex_lock(&id_priv
->qp_mutex
);
772 if (!id_priv
->id
.qp
) {
777 qp_attr
.qp_state
= IB_QPS_ERR
;
778 ret
= ib_modify_qp(id_priv
->id
.qp
, &qp_attr
, IB_QP_STATE
);
780 mutex_unlock(&id_priv
->qp_mutex
);
784 static int cma_ib_init_qp_attr(struct rdma_id_private
*id_priv
,
785 struct ib_qp_attr
*qp_attr
, int *qp_attr_mask
)
787 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
791 if (rdma_cap_eth_ah(id_priv
->id
.device
, id_priv
->id
.port_num
))
794 pkey
= ib_addr_get_pkey(dev_addr
);
796 ret
= ib_find_cached_pkey(id_priv
->id
.device
, id_priv
->id
.port_num
,
797 pkey
, &qp_attr
->pkey_index
);
801 qp_attr
->port_num
= id_priv
->id
.port_num
;
802 *qp_attr_mask
= IB_QP_STATE
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
804 if (id_priv
->id
.qp_type
== IB_QPT_UD
) {
805 ret
= cma_set_qkey(id_priv
, 0);
809 qp_attr
->qkey
= id_priv
->qkey
;
810 *qp_attr_mask
|= IB_QP_QKEY
;
812 qp_attr
->qp_access_flags
= 0;
813 *qp_attr_mask
|= IB_QP_ACCESS_FLAGS
;
818 int rdma_init_qp_attr(struct rdma_cm_id
*id
, struct ib_qp_attr
*qp_attr
,
821 struct rdma_id_private
*id_priv
;
824 id_priv
= container_of(id
, struct rdma_id_private
, id
);
825 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
826 if (!id_priv
->cm_id
.ib
|| (id_priv
->id
.qp_type
== IB_QPT_UD
))
827 ret
= cma_ib_init_qp_attr(id_priv
, qp_attr
, qp_attr_mask
);
829 ret
= ib_cm_init_qp_attr(id_priv
->cm_id
.ib
, qp_attr
,
832 if (qp_attr
->qp_state
== IB_QPS_RTR
)
833 qp_attr
->rq_psn
= id_priv
->seq_num
;
834 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
835 if (!id_priv
->cm_id
.iw
) {
836 qp_attr
->qp_access_flags
= 0;
837 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
;
839 ret
= iw_cm_init_qp_attr(id_priv
->cm_id
.iw
, qp_attr
,
846 EXPORT_SYMBOL(rdma_init_qp_attr
);
848 static inline int cma_zero_addr(struct sockaddr
*addr
)
850 switch (addr
->sa_family
) {
852 return ipv4_is_zeronet(((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
);
854 return ipv6_addr_any(&((struct sockaddr_in6
*) addr
)->sin6_addr
);
856 return ib_addr_any(&((struct sockaddr_ib
*) addr
)->sib_addr
);
862 static inline int cma_loopback_addr(struct sockaddr
*addr
)
864 switch (addr
->sa_family
) {
866 return ipv4_is_loopback(((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
);
868 return ipv6_addr_loopback(&((struct sockaddr_in6
*) addr
)->sin6_addr
);
870 return ib_addr_loopback(&((struct sockaddr_ib
*) addr
)->sib_addr
);
876 static inline int cma_any_addr(struct sockaddr
*addr
)
878 return cma_zero_addr(addr
) || cma_loopback_addr(addr
);
881 static int cma_addr_cmp(struct sockaddr
*src
, struct sockaddr
*dst
)
883 if (src
->sa_family
!= dst
->sa_family
)
886 switch (src
->sa_family
) {
888 return ((struct sockaddr_in
*) src
)->sin_addr
.s_addr
!=
889 ((struct sockaddr_in
*) dst
)->sin_addr
.s_addr
;
891 return ipv6_addr_cmp(&((struct sockaddr_in6
*) src
)->sin6_addr
,
892 &((struct sockaddr_in6
*) dst
)->sin6_addr
);
894 return ib_addr_cmp(&((struct sockaddr_ib
*) src
)->sib_addr
,
895 &((struct sockaddr_ib
*) dst
)->sib_addr
);
899 static __be16
cma_port(struct sockaddr
*addr
)
901 struct sockaddr_ib
*sib
;
903 switch (addr
->sa_family
) {
905 return ((struct sockaddr_in
*) addr
)->sin_port
;
907 return ((struct sockaddr_in6
*) addr
)->sin6_port
;
909 sib
= (struct sockaddr_ib
*) addr
;
910 return htons((u16
) (be64_to_cpu(sib
->sib_sid
) &
911 be64_to_cpu(sib
->sib_sid_mask
)));
917 static inline int cma_any_port(struct sockaddr
*addr
)
919 return !cma_port(addr
);
922 static void cma_save_ib_info(struct sockaddr
*src_addr
,
923 struct sockaddr
*dst_addr
,
924 struct rdma_cm_id
*listen_id
,
925 struct ib_sa_path_rec
*path
)
927 struct sockaddr_ib
*listen_ib
, *ib
;
929 listen_ib
= (struct sockaddr_ib
*) &listen_id
->route
.addr
.src_addr
;
931 ib
= (struct sockaddr_ib
*)src_addr
;
932 ib
->sib_family
= AF_IB
;
934 ib
->sib_pkey
= path
->pkey
;
935 ib
->sib_flowinfo
= path
->flow_label
;
936 memcpy(&ib
->sib_addr
, &path
->sgid
, 16);
937 ib
->sib_sid
= path
->service_id
;
938 ib
->sib_scope_id
= 0;
940 ib
->sib_pkey
= listen_ib
->sib_pkey
;
941 ib
->sib_flowinfo
= listen_ib
->sib_flowinfo
;
942 ib
->sib_addr
= listen_ib
->sib_addr
;
943 ib
->sib_sid
= listen_ib
->sib_sid
;
944 ib
->sib_scope_id
= listen_ib
->sib_scope_id
;
946 ib
->sib_sid_mask
= cpu_to_be64(0xffffffffffffffffULL
);
949 ib
= (struct sockaddr_ib
*)dst_addr
;
950 ib
->sib_family
= AF_IB
;
952 ib
->sib_pkey
= path
->pkey
;
953 ib
->sib_flowinfo
= path
->flow_label
;
954 memcpy(&ib
->sib_addr
, &path
->dgid
, 16);
959 static void cma_save_ip4_info(struct sockaddr
*src_addr
,
960 struct sockaddr
*dst_addr
,
964 struct sockaddr_in
*ip4
;
967 ip4
= (struct sockaddr_in
*)src_addr
;
968 ip4
->sin_family
= AF_INET
;
969 ip4
->sin_addr
.s_addr
= hdr
->dst_addr
.ip4
.addr
;
970 ip4
->sin_port
= local_port
;
974 ip4
= (struct sockaddr_in
*)dst_addr
;
975 ip4
->sin_family
= AF_INET
;
976 ip4
->sin_addr
.s_addr
= hdr
->src_addr
.ip4
.addr
;
977 ip4
->sin_port
= hdr
->port
;
981 static void cma_save_ip6_info(struct sockaddr
*src_addr
,
982 struct sockaddr
*dst_addr
,
986 struct sockaddr_in6
*ip6
;
989 ip6
= (struct sockaddr_in6
*)src_addr
;
990 ip6
->sin6_family
= AF_INET6
;
991 ip6
->sin6_addr
= hdr
->dst_addr
.ip6
;
992 ip6
->sin6_port
= local_port
;
996 ip6
= (struct sockaddr_in6
*)dst_addr
;
997 ip6
->sin6_family
= AF_INET6
;
998 ip6
->sin6_addr
= hdr
->src_addr
.ip6
;
999 ip6
->sin6_port
= hdr
->port
;
1003 static u16
cma_port_from_service_id(__be64 service_id
)
1005 return (u16
)be64_to_cpu(service_id
);
1008 static int cma_save_ip_info(struct sockaddr
*src_addr
,
1009 struct sockaddr
*dst_addr
,
1010 struct ib_cm_event
*ib_event
,
1013 struct cma_hdr
*hdr
;
1016 hdr
= ib_event
->private_data
;
1017 if (hdr
->cma_version
!= CMA_VERSION
)
1020 port
= htons(cma_port_from_service_id(service_id
));
1022 switch (cma_get_ip_ver(hdr
)) {
1024 cma_save_ip4_info(src_addr
, dst_addr
, hdr
, port
);
1027 cma_save_ip6_info(src_addr
, dst_addr
, hdr
, port
);
1030 return -EAFNOSUPPORT
;
1036 static int cma_save_net_info(struct sockaddr
*src_addr
,
1037 struct sockaddr
*dst_addr
,
1038 struct rdma_cm_id
*listen_id
,
1039 struct ib_cm_event
*ib_event
,
1040 sa_family_t sa_family
, __be64 service_id
)
1042 if (sa_family
== AF_IB
) {
1043 if (ib_event
->event
== IB_CM_REQ_RECEIVED
)
1044 cma_save_ib_info(src_addr
, dst_addr
, listen_id
,
1045 ib_event
->param
.req_rcvd
.primary_path
);
1046 else if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
)
1047 cma_save_ib_info(src_addr
, dst_addr
, listen_id
, NULL
);
1051 return cma_save_ip_info(src_addr
, dst_addr
, ib_event
, service_id
);
1054 static int cma_save_req_info(const struct ib_cm_event
*ib_event
,
1055 struct cma_req_info
*req
)
1057 const struct ib_cm_req_event_param
*req_param
=
1058 &ib_event
->param
.req_rcvd
;
1059 const struct ib_cm_sidr_req_event_param
*sidr_param
=
1060 &ib_event
->param
.sidr_req_rcvd
;
1062 switch (ib_event
->event
) {
1063 case IB_CM_REQ_RECEIVED
:
1064 req
->device
= req_param
->listen_id
->device
;
1065 req
->port
= req_param
->port
;
1066 memcpy(&req
->local_gid
, &req_param
->primary_path
->sgid
,
1067 sizeof(req
->local_gid
));
1068 req
->has_gid
= true;
1069 req
->service_id
= req_param
->primary_path
->service_id
;
1070 req
->pkey
= req_param
->bth_pkey
;
1072 case IB_CM_SIDR_REQ_RECEIVED
:
1073 req
->device
= sidr_param
->listen_id
->device
;
1074 req
->port
= sidr_param
->port
;
1075 req
->has_gid
= false;
1076 req
->service_id
= sidr_param
->service_id
;
1077 req
->pkey
= sidr_param
->bth_pkey
;
1086 static bool validate_ipv4_net_dev(struct net_device
*net_dev
,
1087 const struct sockaddr_in
*dst_addr
,
1088 const struct sockaddr_in
*src_addr
)
1090 __be32 daddr
= dst_addr
->sin_addr
.s_addr
,
1091 saddr
= src_addr
->sin_addr
.s_addr
;
1092 struct fib_result res
;
1097 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
1098 ipv4_is_lbcast(daddr
) || ipv4_is_zeronet(saddr
) ||
1099 ipv4_is_zeronet(daddr
) || ipv4_is_loopback(daddr
) ||
1100 ipv4_is_loopback(saddr
))
1103 memset(&fl4
, 0, sizeof(fl4
));
1104 fl4
.flowi4_iif
= net_dev
->ifindex
;
1109 err
= fib_lookup(dev_net(net_dev
), &fl4
, &res
, 0);
1113 ret
= FIB_RES_DEV(res
) == net_dev
;
1119 static bool validate_ipv6_net_dev(struct net_device
*net_dev
,
1120 const struct sockaddr_in6
*dst_addr
,
1121 const struct sockaddr_in6
*src_addr
)
1123 #if IS_ENABLED(CONFIG_IPV6)
1124 const int strict
= ipv6_addr_type(&dst_addr
->sin6_addr
) &
1125 IPV6_ADDR_LINKLOCAL
;
1126 struct rt6_info
*rt
= rt6_lookup(dev_net(net_dev
), &dst_addr
->sin6_addr
,
1127 &src_addr
->sin6_addr
, net_dev
->ifindex
,
1134 ret
= rt
->rt6i_idev
->dev
== net_dev
;
1143 static bool validate_net_dev(struct net_device
*net_dev
,
1144 const struct sockaddr
*daddr
,
1145 const struct sockaddr
*saddr
)
1147 const struct sockaddr_in
*daddr4
= (const struct sockaddr_in
*)daddr
;
1148 const struct sockaddr_in
*saddr4
= (const struct sockaddr_in
*)saddr
;
1149 const struct sockaddr_in6
*daddr6
= (const struct sockaddr_in6
*)daddr
;
1150 const struct sockaddr_in6
*saddr6
= (const struct sockaddr_in6
*)saddr
;
1152 switch (daddr
->sa_family
) {
1154 return saddr
->sa_family
== AF_INET
&&
1155 validate_ipv4_net_dev(net_dev
, daddr4
, saddr4
);
1158 return saddr
->sa_family
== AF_INET6
&&
1159 validate_ipv6_net_dev(net_dev
, daddr6
, saddr6
);
1166 static struct net_device
*cma_get_net_dev(struct ib_cm_event
*ib_event
,
1167 const struct cma_req_info
*req
)
1169 struct sockaddr_storage listen_addr_storage
, src_addr_storage
;
1170 struct sockaddr
*listen_addr
= (struct sockaddr
*)&listen_addr_storage
,
1171 *src_addr
= (struct sockaddr
*)&src_addr_storage
;
1172 struct net_device
*net_dev
;
1173 const union ib_gid
*gid
= req
->has_gid
? &req
->local_gid
: NULL
;
1176 err
= cma_save_ip_info(listen_addr
, src_addr
, ib_event
,
1179 return ERR_PTR(err
);
1181 net_dev
= ib_get_net_dev_by_params(req
->device
, req
->port
, req
->pkey
,
1184 return ERR_PTR(-ENODEV
);
1186 if (!validate_net_dev(net_dev
, listen_addr
, src_addr
)) {
1188 return ERR_PTR(-EHOSTUNREACH
);
1194 static enum rdma_port_space
rdma_ps_from_service_id(__be64 service_id
)
1196 return (be64_to_cpu(service_id
) >> 16) & 0xffff;
1199 static bool cma_match_private_data(struct rdma_id_private
*id_priv
,
1200 const struct cma_hdr
*hdr
)
1202 struct sockaddr
*addr
= cma_src_addr(id_priv
);
1204 struct in6_addr ip6_addr
;
1206 if (cma_any_addr(addr
) && !id_priv
->afonly
)
1209 switch (addr
->sa_family
) {
1211 ip4_addr
= ((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
;
1212 if (cma_get_ip_ver(hdr
) != 4)
1214 if (!cma_any_addr(addr
) &&
1215 hdr
->dst_addr
.ip4
.addr
!= ip4_addr
)
1219 ip6_addr
= ((struct sockaddr_in6
*)addr
)->sin6_addr
;
1220 if (cma_get_ip_ver(hdr
) != 6)
1222 if (!cma_any_addr(addr
) &&
1223 memcmp(&hdr
->dst_addr
.ip6
, &ip6_addr
, sizeof(ip6_addr
)))
1235 static bool cma_match_net_dev(const struct rdma_id_private
*id_priv
,
1236 const struct net_device
*net_dev
)
1238 const struct rdma_addr
*addr
= &id_priv
->id
.route
.addr
;
1241 /* This request is an AF_IB request */
1242 return addr
->src_addr
.ss_family
== AF_IB
;
1244 return !addr
->dev_addr
.bound_dev_if
||
1245 (net_eq(dev_net(net_dev
), &init_net
) &&
1246 addr
->dev_addr
.bound_dev_if
== net_dev
->ifindex
);
1249 static struct rdma_id_private
*cma_find_listener(
1250 const struct rdma_bind_list
*bind_list
,
1251 const struct ib_cm_id
*cm_id
,
1252 const struct ib_cm_event
*ib_event
,
1253 const struct cma_req_info
*req
,
1254 const struct net_device
*net_dev
)
1256 struct rdma_id_private
*id_priv
, *id_priv_dev
;
1259 return ERR_PTR(-EINVAL
);
1261 hlist_for_each_entry(id_priv
, &bind_list
->owners
, node
) {
1262 if (cma_match_private_data(id_priv
, ib_event
->private_data
)) {
1263 if (id_priv
->id
.device
== cm_id
->device
&&
1264 cma_match_net_dev(id_priv
, net_dev
))
1266 list_for_each_entry(id_priv_dev
,
1267 &id_priv
->listen_list
,
1269 if (id_priv_dev
->id
.device
== cm_id
->device
&&
1270 cma_match_net_dev(id_priv_dev
, net_dev
))
1276 return ERR_PTR(-EINVAL
);
1279 static struct rdma_id_private
*cma_id_from_event(struct ib_cm_id
*cm_id
,
1280 struct ib_cm_event
*ib_event
,
1281 struct net_device
**net_dev
)
1283 struct cma_req_info req
;
1284 struct rdma_bind_list
*bind_list
;
1285 struct rdma_id_private
*id_priv
;
1288 err
= cma_save_req_info(ib_event
, &req
);
1290 return ERR_PTR(err
);
1292 *net_dev
= cma_get_net_dev(ib_event
, &req
);
1293 if (IS_ERR(*net_dev
)) {
1294 if (PTR_ERR(*net_dev
) == -EAFNOSUPPORT
) {
1295 /* Assuming the protocol is AF_IB */
1298 return ERR_CAST(*net_dev
);
1302 bind_list
= cma_ps_find(rdma_ps_from_service_id(req
.service_id
),
1303 cma_port_from_service_id(req
.service_id
));
1304 id_priv
= cma_find_listener(bind_list
, cm_id
, ib_event
, &req
, *net_dev
);
1305 if (IS_ERR(id_priv
)) {
1313 static inline int cma_user_data_offset(struct rdma_id_private
*id_priv
)
1315 return cma_family(id_priv
) == AF_IB
? 0 : sizeof(struct cma_hdr
);
1318 static void cma_cancel_route(struct rdma_id_private
*id_priv
)
1320 if (rdma_cap_ib_sa(id_priv
->id
.device
, id_priv
->id
.port_num
)) {
1322 ib_sa_cancel_query(id_priv
->query_id
, id_priv
->query
);
1326 static void cma_cancel_listens(struct rdma_id_private
*id_priv
)
1328 struct rdma_id_private
*dev_id_priv
;
1331 * Remove from listen_any_list to prevent added devices from spawning
1332 * additional listen requests.
1335 list_del(&id_priv
->list
);
1337 while (!list_empty(&id_priv
->listen_list
)) {
1338 dev_id_priv
= list_entry(id_priv
->listen_list
.next
,
1339 struct rdma_id_private
, listen_list
);
1340 /* sync with device removal to avoid duplicate destruction */
1341 list_del_init(&dev_id_priv
->list
);
1342 list_del(&dev_id_priv
->listen_list
);
1343 mutex_unlock(&lock
);
1345 rdma_destroy_id(&dev_id_priv
->id
);
1348 mutex_unlock(&lock
);
1351 static void cma_cancel_operation(struct rdma_id_private
*id_priv
,
1352 enum rdma_cm_state state
)
1355 case RDMA_CM_ADDR_QUERY
:
1356 rdma_addr_cancel(&id_priv
->id
.route
.addr
.dev_addr
);
1358 case RDMA_CM_ROUTE_QUERY
:
1359 cma_cancel_route(id_priv
);
1361 case RDMA_CM_LISTEN
:
1362 if (cma_any_addr(cma_src_addr(id_priv
)) && !id_priv
->cma_dev
)
1363 cma_cancel_listens(id_priv
);
1370 static void cma_release_port(struct rdma_id_private
*id_priv
)
1372 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
1378 hlist_del(&id_priv
->node
);
1379 if (hlist_empty(&bind_list
->owners
)) {
1380 cma_ps_remove(bind_list
->ps
, bind_list
->port
);
1383 mutex_unlock(&lock
);
1386 static void cma_leave_mc_groups(struct rdma_id_private
*id_priv
)
1388 struct cma_multicast
*mc
;
1390 while (!list_empty(&id_priv
->mc_list
)) {
1391 mc
= container_of(id_priv
->mc_list
.next
,
1392 struct cma_multicast
, list
);
1393 list_del(&mc
->list
);
1394 if (rdma_cap_ib_mcast(id_priv
->cma_dev
->device
,
1395 id_priv
->id
.port_num
)) {
1396 ib_sa_free_multicast(mc
->multicast
.ib
);
1399 kref_put(&mc
->mcref
, release_mc
);
1403 void rdma_destroy_id(struct rdma_cm_id
*id
)
1405 struct rdma_id_private
*id_priv
;
1406 enum rdma_cm_state state
;
1408 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1409 state
= cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1410 cma_cancel_operation(id_priv
, state
);
1413 * Wait for any active callback to finish. New callbacks will find
1414 * the id_priv state set to destroying and abort.
1416 mutex_lock(&id_priv
->handler_mutex
);
1417 mutex_unlock(&id_priv
->handler_mutex
);
1419 if (id_priv
->cma_dev
) {
1420 if (rdma_cap_ib_cm(id_priv
->id
.device
, 1)) {
1421 if (id_priv
->cm_id
.ib
)
1422 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
1423 } else if (rdma_cap_iw_cm(id_priv
->id
.device
, 1)) {
1424 if (id_priv
->cm_id
.iw
)
1425 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1427 cma_leave_mc_groups(id_priv
);
1428 cma_release_dev(id_priv
);
1431 cma_release_port(id_priv
);
1432 cma_deref_id(id_priv
);
1433 wait_for_completion(&id_priv
->comp
);
1435 if (id_priv
->internal_id
)
1436 cma_deref_id(id_priv
->id
.context
);
1438 kfree(id_priv
->id
.route
.path_rec
);
1441 EXPORT_SYMBOL(rdma_destroy_id
);
1443 static int cma_rep_recv(struct rdma_id_private
*id_priv
)
1447 ret
= cma_modify_qp_rtr(id_priv
, NULL
);
1451 ret
= cma_modify_qp_rts(id_priv
, NULL
);
1455 ret
= ib_send_cm_rtu(id_priv
->cm_id
.ib
, NULL
, 0);
1461 cma_modify_qp_err(id_priv
);
1462 ib_send_cm_rej(id_priv
->cm_id
.ib
, IB_CM_REJ_CONSUMER_DEFINED
,
1467 static void cma_set_rep_event_data(struct rdma_cm_event
*event
,
1468 struct ib_cm_rep_event_param
*rep_data
,
1471 event
->param
.conn
.private_data
= private_data
;
1472 event
->param
.conn
.private_data_len
= IB_CM_REP_PRIVATE_DATA_SIZE
;
1473 event
->param
.conn
.responder_resources
= rep_data
->responder_resources
;
1474 event
->param
.conn
.initiator_depth
= rep_data
->initiator_depth
;
1475 event
->param
.conn
.flow_control
= rep_data
->flow_control
;
1476 event
->param
.conn
.rnr_retry_count
= rep_data
->rnr_retry_count
;
1477 event
->param
.conn
.srq
= rep_data
->srq
;
1478 event
->param
.conn
.qp_num
= rep_data
->remote_qpn
;
1481 static int cma_ib_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1483 struct rdma_id_private
*id_priv
= cm_id
->context
;
1484 struct rdma_cm_event event
;
1487 if ((ib_event
->event
!= IB_CM_TIMEWAIT_EXIT
&&
1488 cma_disable_callback(id_priv
, RDMA_CM_CONNECT
)) ||
1489 (ib_event
->event
== IB_CM_TIMEWAIT_EXIT
&&
1490 cma_disable_callback(id_priv
, RDMA_CM_DISCONNECT
)))
1493 memset(&event
, 0, sizeof event
);
1494 switch (ib_event
->event
) {
1495 case IB_CM_REQ_ERROR
:
1496 case IB_CM_REP_ERROR
:
1497 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1498 event
.status
= -ETIMEDOUT
;
1500 case IB_CM_REP_RECEIVED
:
1501 if (id_priv
->id
.qp
) {
1502 event
.status
= cma_rep_recv(id_priv
);
1503 event
.event
= event
.status
? RDMA_CM_EVENT_CONNECT_ERROR
:
1504 RDMA_CM_EVENT_ESTABLISHED
;
1506 event
.event
= RDMA_CM_EVENT_CONNECT_RESPONSE
;
1508 cma_set_rep_event_data(&event
, &ib_event
->param
.rep_rcvd
,
1509 ib_event
->private_data
);
1511 case IB_CM_RTU_RECEIVED
:
1512 case IB_CM_USER_ESTABLISHED
:
1513 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1515 case IB_CM_DREQ_ERROR
:
1516 event
.status
= -ETIMEDOUT
; /* fall through */
1517 case IB_CM_DREQ_RECEIVED
:
1518 case IB_CM_DREP_RECEIVED
:
1519 if (!cma_comp_exch(id_priv
, RDMA_CM_CONNECT
,
1520 RDMA_CM_DISCONNECT
))
1522 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1524 case IB_CM_TIMEWAIT_EXIT
:
1525 event
.event
= RDMA_CM_EVENT_TIMEWAIT_EXIT
;
1527 case IB_CM_MRA_RECEIVED
:
1530 case IB_CM_REJ_RECEIVED
:
1531 cma_modify_qp_err(id_priv
);
1532 event
.status
= ib_event
->param
.rej_rcvd
.reason
;
1533 event
.event
= RDMA_CM_EVENT_REJECTED
;
1534 event
.param
.conn
.private_data
= ib_event
->private_data
;
1535 event
.param
.conn
.private_data_len
= IB_CM_REJ_PRIVATE_DATA_SIZE
;
1538 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
1543 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1545 /* Destroy the CM ID by returning a non-zero value. */
1546 id_priv
->cm_id
.ib
= NULL
;
1547 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1548 mutex_unlock(&id_priv
->handler_mutex
);
1549 rdma_destroy_id(&id_priv
->id
);
1553 mutex_unlock(&id_priv
->handler_mutex
);
1557 static struct rdma_id_private
*cma_new_conn_id(struct rdma_cm_id
*listen_id
,
1558 struct ib_cm_event
*ib_event
,
1559 struct net_device
*net_dev
)
1561 struct rdma_id_private
*id_priv
;
1562 struct rdma_cm_id
*id
;
1563 struct rdma_route
*rt
;
1564 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
1565 const __be64 service_id
=
1566 ib_event
->param
.req_rcvd
.primary_path
->service_id
;
1569 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1570 listen_id
->ps
, ib_event
->param
.req_rcvd
.qp_type
);
1574 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1575 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
1576 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
1577 listen_id
, ib_event
, ss_family
, service_id
))
1581 rt
->num_paths
= ib_event
->param
.req_rcvd
.alternate_path
? 2 : 1;
1582 rt
->path_rec
= kmalloc(sizeof *rt
->path_rec
* rt
->num_paths
,
1587 rt
->path_rec
[0] = *ib_event
->param
.req_rcvd
.primary_path
;
1588 if (rt
->num_paths
== 2)
1589 rt
->path_rec
[1] = *ib_event
->param
.req_rcvd
.alternate_path
;
1592 ret
= rdma_copy_addr(&rt
->addr
.dev_addr
, net_dev
, NULL
);
1596 /* An AF_IB connection */
1597 WARN_ON_ONCE(ss_family
!= AF_IB
);
1599 cma_translate_ib((struct sockaddr_ib
*)cma_src_addr(id_priv
),
1600 &rt
->addr
.dev_addr
);
1602 rdma_addr_set_dgid(&rt
->addr
.dev_addr
, &rt
->path_rec
[0].dgid
);
1604 id_priv
->state
= RDMA_CM_CONNECT
;
1608 rdma_destroy_id(id
);
1612 static struct rdma_id_private
*cma_new_udp_id(struct rdma_cm_id
*listen_id
,
1613 struct ib_cm_event
*ib_event
,
1614 struct net_device
*net_dev
)
1616 struct rdma_id_private
*id_priv
;
1617 struct rdma_cm_id
*id
;
1618 const sa_family_t ss_family
= listen_id
->route
.addr
.src_addr
.ss_family
;
1621 id
= rdma_create_id(listen_id
->event_handler
, listen_id
->context
,
1622 listen_id
->ps
, IB_QPT_UD
);
1626 id_priv
= container_of(id
, struct rdma_id_private
, id
);
1627 if (cma_save_net_info((struct sockaddr
*)&id
->route
.addr
.src_addr
,
1628 (struct sockaddr
*)&id
->route
.addr
.dst_addr
,
1629 listen_id
, ib_event
, ss_family
,
1630 ib_event
->param
.sidr_req_rcvd
.service_id
))
1634 ret
= rdma_copy_addr(&id
->route
.addr
.dev_addr
, net_dev
, NULL
);
1638 /* An AF_IB connection */
1639 WARN_ON_ONCE(ss_family
!= AF_IB
);
1641 if (!cma_any_addr(cma_src_addr(id_priv
)))
1642 cma_translate_ib((struct sockaddr_ib
*)
1643 cma_src_addr(id_priv
),
1644 &id
->route
.addr
.dev_addr
);
1647 id_priv
->state
= RDMA_CM_CONNECT
;
1650 rdma_destroy_id(id
);
1654 static void cma_set_req_event_data(struct rdma_cm_event
*event
,
1655 struct ib_cm_req_event_param
*req_data
,
1656 void *private_data
, int offset
)
1658 event
->param
.conn
.private_data
= private_data
+ offset
;
1659 event
->param
.conn
.private_data_len
= IB_CM_REQ_PRIVATE_DATA_SIZE
- offset
;
1660 event
->param
.conn
.responder_resources
= req_data
->responder_resources
;
1661 event
->param
.conn
.initiator_depth
= req_data
->initiator_depth
;
1662 event
->param
.conn
.flow_control
= req_data
->flow_control
;
1663 event
->param
.conn
.retry_count
= req_data
->retry_count
;
1664 event
->param
.conn
.rnr_retry_count
= req_data
->rnr_retry_count
;
1665 event
->param
.conn
.srq
= req_data
->srq
;
1666 event
->param
.conn
.qp_num
= req_data
->remote_qpn
;
1669 static int cma_check_req_qp_type(struct rdma_cm_id
*id
, struct ib_cm_event
*ib_event
)
1671 return (((ib_event
->event
== IB_CM_REQ_RECEIVED
) &&
1672 (ib_event
->param
.req_rcvd
.qp_type
== id
->qp_type
)) ||
1673 ((ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) &&
1674 (id
->qp_type
== IB_QPT_UD
)) ||
1678 static int cma_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*ib_event
)
1680 struct rdma_id_private
*listen_id
, *conn_id
;
1681 struct rdma_cm_event event
;
1682 struct net_device
*net_dev
;
1685 listen_id
= cma_id_from_event(cm_id
, ib_event
, &net_dev
);
1686 if (IS_ERR(listen_id
))
1687 return PTR_ERR(listen_id
);
1689 if (!cma_check_req_qp_type(&listen_id
->id
, ib_event
)) {
1694 if (cma_disable_callback(listen_id
, RDMA_CM_LISTEN
)) {
1695 ret
= -ECONNABORTED
;
1699 memset(&event
, 0, sizeof event
);
1700 offset
= cma_user_data_offset(listen_id
);
1701 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1702 if (ib_event
->event
== IB_CM_SIDR_REQ_RECEIVED
) {
1703 conn_id
= cma_new_udp_id(&listen_id
->id
, ib_event
, net_dev
);
1704 event
.param
.ud
.private_data
= ib_event
->private_data
+ offset
;
1705 event
.param
.ud
.private_data_len
=
1706 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
- offset
;
1708 conn_id
= cma_new_conn_id(&listen_id
->id
, ib_event
, net_dev
);
1709 cma_set_req_event_data(&event
, &ib_event
->param
.req_rcvd
,
1710 ib_event
->private_data
, offset
);
1717 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1718 ret
= cma_acquire_dev(conn_id
, listen_id
);
1722 conn_id
->cm_id
.ib
= cm_id
;
1723 cm_id
->context
= conn_id
;
1724 cm_id
->cm_handler
= cma_ib_handler
;
1727 * Protect against the user destroying conn_id from another thread
1728 * until we're done accessing it.
1730 atomic_inc(&conn_id
->refcount
);
1731 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1735 * Acquire mutex to prevent user executing rdma_destroy_id()
1736 * while we're accessing the cm_id.
1739 if (cma_comp(conn_id
, RDMA_CM_CONNECT
) &&
1740 (conn_id
->id
.qp_type
!= IB_QPT_UD
))
1741 ib_send_cm_mra(cm_id
, CMA_CM_MRA_SETTING
, NULL
, 0);
1742 mutex_unlock(&lock
);
1743 mutex_unlock(&conn_id
->handler_mutex
);
1744 mutex_unlock(&listen_id
->handler_mutex
);
1745 cma_deref_id(conn_id
);
1751 cma_deref_id(conn_id
);
1752 /* Destroy the CM ID by returning a non-zero value. */
1753 conn_id
->cm_id
.ib
= NULL
;
1755 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
1756 mutex_unlock(&conn_id
->handler_mutex
);
1758 mutex_unlock(&listen_id
->handler_mutex
);
1760 rdma_destroy_id(&conn_id
->id
);
1769 __be64
rdma_get_service_id(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
1771 if (addr
->sa_family
== AF_IB
)
1772 return ((struct sockaddr_ib
*) addr
)->sib_sid
;
1774 return cpu_to_be64(((u64
)id
->ps
<< 16) + be16_to_cpu(cma_port(addr
)));
1776 EXPORT_SYMBOL(rdma_get_service_id
);
1778 static int cma_iw_handler(struct iw_cm_id
*iw_id
, struct iw_cm_event
*iw_event
)
1780 struct rdma_id_private
*id_priv
= iw_id
->context
;
1781 struct rdma_cm_event event
;
1783 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
1784 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
1786 if (cma_disable_callback(id_priv
, RDMA_CM_CONNECT
))
1789 memset(&event
, 0, sizeof event
);
1790 switch (iw_event
->event
) {
1791 case IW_CM_EVENT_CLOSE
:
1792 event
.event
= RDMA_CM_EVENT_DISCONNECTED
;
1794 case IW_CM_EVENT_CONNECT_REPLY
:
1795 memcpy(cma_src_addr(id_priv
), laddr
,
1796 rdma_addr_size(laddr
));
1797 memcpy(cma_dst_addr(id_priv
), raddr
,
1798 rdma_addr_size(raddr
));
1799 switch (iw_event
->status
) {
1801 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1802 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
1803 event
.param
.conn
.responder_resources
= iw_event
->ord
;
1807 event
.event
= RDMA_CM_EVENT_REJECTED
;
1810 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
1813 event
.event
= RDMA_CM_EVENT_CONNECT_ERROR
;
1817 case IW_CM_EVENT_ESTABLISHED
:
1818 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
1819 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
1820 event
.param
.conn
.responder_resources
= iw_event
->ord
;
1826 event
.status
= iw_event
->status
;
1827 event
.param
.conn
.private_data
= iw_event
->private_data
;
1828 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1829 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
1831 /* Destroy the CM ID by returning a non-zero value. */
1832 id_priv
->cm_id
.iw
= NULL
;
1833 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
1834 mutex_unlock(&id_priv
->handler_mutex
);
1835 rdma_destroy_id(&id_priv
->id
);
1839 mutex_unlock(&id_priv
->handler_mutex
);
1843 static int iw_conn_req_handler(struct iw_cm_id
*cm_id
,
1844 struct iw_cm_event
*iw_event
)
1846 struct rdma_cm_id
*new_cm_id
;
1847 struct rdma_id_private
*listen_id
, *conn_id
;
1848 struct rdma_cm_event event
;
1850 struct ib_device_attr attr
;
1851 struct sockaddr
*laddr
= (struct sockaddr
*)&iw_event
->local_addr
;
1852 struct sockaddr
*raddr
= (struct sockaddr
*)&iw_event
->remote_addr
;
1854 listen_id
= cm_id
->context
;
1855 if (cma_disable_callback(listen_id
, RDMA_CM_LISTEN
))
1856 return -ECONNABORTED
;
1858 /* Create a new RDMA id for the new IW CM ID */
1859 new_cm_id
= rdma_create_id(listen_id
->id
.event_handler
,
1860 listen_id
->id
.context
,
1861 RDMA_PS_TCP
, IB_QPT_RC
);
1862 if (IS_ERR(new_cm_id
)) {
1866 conn_id
= container_of(new_cm_id
, struct rdma_id_private
, id
);
1867 mutex_lock_nested(&conn_id
->handler_mutex
, SINGLE_DEPTH_NESTING
);
1868 conn_id
->state
= RDMA_CM_CONNECT
;
1870 ret
= rdma_translate_ip(laddr
, &conn_id
->id
.route
.addr
.dev_addr
, NULL
);
1872 mutex_unlock(&conn_id
->handler_mutex
);
1873 rdma_destroy_id(new_cm_id
);
1877 ret
= cma_acquire_dev(conn_id
, listen_id
);
1879 mutex_unlock(&conn_id
->handler_mutex
);
1880 rdma_destroy_id(new_cm_id
);
1884 conn_id
->cm_id
.iw
= cm_id
;
1885 cm_id
->context
= conn_id
;
1886 cm_id
->cm_handler
= cma_iw_handler
;
1888 memcpy(cma_src_addr(conn_id
), laddr
, rdma_addr_size(laddr
));
1889 memcpy(cma_dst_addr(conn_id
), raddr
, rdma_addr_size(raddr
));
1891 ret
= ib_query_device(conn_id
->id
.device
, &attr
);
1893 mutex_unlock(&conn_id
->handler_mutex
);
1894 rdma_destroy_id(new_cm_id
);
1898 memset(&event
, 0, sizeof event
);
1899 event
.event
= RDMA_CM_EVENT_CONNECT_REQUEST
;
1900 event
.param
.conn
.private_data
= iw_event
->private_data
;
1901 event
.param
.conn
.private_data_len
= iw_event
->private_data_len
;
1902 event
.param
.conn
.initiator_depth
= iw_event
->ird
;
1903 event
.param
.conn
.responder_resources
= iw_event
->ord
;
1906 * Protect against the user destroying conn_id from another thread
1907 * until we're done accessing it.
1909 atomic_inc(&conn_id
->refcount
);
1910 ret
= conn_id
->id
.event_handler(&conn_id
->id
, &event
);
1912 /* User wants to destroy the CM ID */
1913 conn_id
->cm_id
.iw
= NULL
;
1914 cma_exch(conn_id
, RDMA_CM_DESTROYING
);
1915 mutex_unlock(&conn_id
->handler_mutex
);
1916 cma_deref_id(conn_id
);
1917 rdma_destroy_id(&conn_id
->id
);
1921 mutex_unlock(&conn_id
->handler_mutex
);
1922 cma_deref_id(conn_id
);
1925 mutex_unlock(&listen_id
->handler_mutex
);
1929 static int cma_ib_listen(struct rdma_id_private
*id_priv
)
1931 struct sockaddr
*addr
;
1932 struct ib_cm_id
*id
;
1935 addr
= cma_src_addr(id_priv
);
1936 svc_id
= rdma_get_service_id(&id_priv
->id
, addr
);
1937 id
= ib_cm_insert_listen(id_priv
->id
.device
, cma_req_handler
, svc_id
);
1940 id_priv
->cm_id
.ib
= id
;
1945 static int cma_iw_listen(struct rdma_id_private
*id_priv
, int backlog
)
1948 struct iw_cm_id
*id
;
1950 id
= iw_create_cm_id(id_priv
->id
.device
,
1951 iw_conn_req_handler
,
1956 id
->tos
= id_priv
->tos
;
1957 id_priv
->cm_id
.iw
= id
;
1959 memcpy(&id_priv
->cm_id
.iw
->local_addr
, cma_src_addr(id_priv
),
1960 rdma_addr_size(cma_src_addr(id_priv
)));
1962 ret
= iw_cm_listen(id_priv
->cm_id
.iw
, backlog
);
1965 iw_destroy_cm_id(id_priv
->cm_id
.iw
);
1966 id_priv
->cm_id
.iw
= NULL
;
1972 static int cma_listen_handler(struct rdma_cm_id
*id
,
1973 struct rdma_cm_event
*event
)
1975 struct rdma_id_private
*id_priv
= id
->context
;
1977 id
->context
= id_priv
->id
.context
;
1978 id
->event_handler
= id_priv
->id
.event_handler
;
1979 return id_priv
->id
.event_handler(id
, event
);
1982 static void cma_listen_on_dev(struct rdma_id_private
*id_priv
,
1983 struct cma_device
*cma_dev
)
1985 struct rdma_id_private
*dev_id_priv
;
1986 struct rdma_cm_id
*id
;
1989 if (cma_family(id_priv
) == AF_IB
&& !rdma_cap_ib_cm(cma_dev
->device
, 1))
1992 id
= rdma_create_id(cma_listen_handler
, id_priv
, id_priv
->id
.ps
,
1993 id_priv
->id
.qp_type
);
1997 dev_id_priv
= container_of(id
, struct rdma_id_private
, id
);
1999 dev_id_priv
->state
= RDMA_CM_ADDR_BOUND
;
2000 memcpy(cma_src_addr(dev_id_priv
), cma_src_addr(id_priv
),
2001 rdma_addr_size(cma_src_addr(id_priv
)));
2003 cma_attach_to_dev(dev_id_priv
, cma_dev
);
2004 list_add_tail(&dev_id_priv
->listen_list
, &id_priv
->listen_list
);
2005 atomic_inc(&id_priv
->refcount
);
2006 dev_id_priv
->internal_id
= 1;
2007 dev_id_priv
->afonly
= id_priv
->afonly
;
2009 ret
= rdma_listen(id
, id_priv
->backlog
);
2011 printk(KERN_WARNING
"RDMA CMA: cma_listen_on_dev, error %d, "
2012 "listening on device %s\n", ret
, cma_dev
->device
->name
);
2015 static void cma_listen_on_all(struct rdma_id_private
*id_priv
)
2017 struct cma_device
*cma_dev
;
2020 list_add_tail(&id_priv
->list
, &listen_any_list
);
2021 list_for_each_entry(cma_dev
, &dev_list
, list
)
2022 cma_listen_on_dev(id_priv
, cma_dev
);
2023 mutex_unlock(&lock
);
2026 void rdma_set_service_type(struct rdma_cm_id
*id
, int tos
)
2028 struct rdma_id_private
*id_priv
;
2030 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2031 id_priv
->tos
= (u8
) tos
;
2033 EXPORT_SYMBOL(rdma_set_service_type
);
2035 static void cma_query_handler(int status
, struct ib_sa_path_rec
*path_rec
,
2038 struct cma_work
*work
= context
;
2039 struct rdma_route
*route
;
2041 route
= &work
->id
->id
.route
;
2044 route
->num_paths
= 1;
2045 *route
->path_rec
= *path_rec
;
2047 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2048 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2049 work
->event
.event
= RDMA_CM_EVENT_ROUTE_ERROR
;
2050 work
->event
.status
= status
;
2053 queue_work(cma_wq
, &work
->work
);
2056 static int cma_query_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
,
2057 struct cma_work
*work
)
2059 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
2060 struct ib_sa_path_rec path_rec
;
2061 ib_sa_comp_mask comp_mask
;
2062 struct sockaddr_in6
*sin6
;
2063 struct sockaddr_ib
*sib
;
2065 memset(&path_rec
, 0, sizeof path_rec
);
2066 rdma_addr_get_sgid(dev_addr
, &path_rec
.sgid
);
2067 rdma_addr_get_dgid(dev_addr
, &path_rec
.dgid
);
2068 path_rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
2069 path_rec
.numb_path
= 1;
2070 path_rec
.reversible
= 1;
2071 path_rec
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
2073 comp_mask
= IB_SA_PATH_REC_DGID
| IB_SA_PATH_REC_SGID
|
2074 IB_SA_PATH_REC_PKEY
| IB_SA_PATH_REC_NUMB_PATH
|
2075 IB_SA_PATH_REC_REVERSIBLE
| IB_SA_PATH_REC_SERVICE_ID
;
2077 switch (cma_family(id_priv
)) {
2079 path_rec
.qos_class
= cpu_to_be16((u16
) id_priv
->tos
);
2080 comp_mask
|= IB_SA_PATH_REC_QOS_CLASS
;
2083 sin6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
2084 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sin6
->sin6_flowinfo
) >> 20);
2085 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2088 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
2089 path_rec
.traffic_class
= (u8
) (be32_to_cpu(sib
->sib_flowinfo
) >> 20);
2090 comp_mask
|= IB_SA_PATH_REC_TRAFFIC_CLASS
;
2094 id_priv
->query_id
= ib_sa_path_rec_get(&sa_client
, id_priv
->id
.device
,
2095 id_priv
->id
.port_num
, &path_rec
,
2096 comp_mask
, timeout_ms
,
2097 GFP_KERNEL
, cma_query_handler
,
2098 work
, &id_priv
->query
);
2100 return (id_priv
->query_id
< 0) ? id_priv
->query_id
: 0;
2103 static void cma_work_handler(struct work_struct
*_work
)
2105 struct cma_work
*work
= container_of(_work
, struct cma_work
, work
);
2106 struct rdma_id_private
*id_priv
= work
->id
;
2109 mutex_lock(&id_priv
->handler_mutex
);
2110 if (!cma_comp_exch(id_priv
, work
->old_state
, work
->new_state
))
2113 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
2114 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2118 mutex_unlock(&id_priv
->handler_mutex
);
2119 cma_deref_id(id_priv
);
2121 rdma_destroy_id(&id_priv
->id
);
2125 static void cma_ndev_work_handler(struct work_struct
*_work
)
2127 struct cma_ndev_work
*work
= container_of(_work
, struct cma_ndev_work
, work
);
2128 struct rdma_id_private
*id_priv
= work
->id
;
2131 mutex_lock(&id_priv
->handler_mutex
);
2132 if (id_priv
->state
== RDMA_CM_DESTROYING
||
2133 id_priv
->state
== RDMA_CM_DEVICE_REMOVAL
)
2136 if (id_priv
->id
.event_handler(&id_priv
->id
, &work
->event
)) {
2137 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2142 mutex_unlock(&id_priv
->handler_mutex
);
2143 cma_deref_id(id_priv
);
2145 rdma_destroy_id(&id_priv
->id
);
2149 static int cma_resolve_ib_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
2151 struct rdma_route
*route
= &id_priv
->id
.route
;
2152 struct cma_work
*work
;
2155 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2160 INIT_WORK(&work
->work
, cma_work_handler
);
2161 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2162 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2163 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2165 route
->path_rec
= kmalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
2166 if (!route
->path_rec
) {
2171 ret
= cma_query_ib_route(id_priv
, timeout_ms
, work
);
2177 kfree(route
->path_rec
);
2178 route
->path_rec
= NULL
;
2184 int rdma_set_ib_paths(struct rdma_cm_id
*id
,
2185 struct ib_sa_path_rec
*path_rec
, int num_paths
)
2187 struct rdma_id_private
*id_priv
;
2190 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2191 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
2192 RDMA_CM_ROUTE_RESOLVED
))
2195 id
->route
.path_rec
= kmemdup(path_rec
, sizeof *path_rec
* num_paths
,
2197 if (!id
->route
.path_rec
) {
2202 id
->route
.num_paths
= num_paths
;
2205 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_ADDR_RESOLVED
);
2208 EXPORT_SYMBOL(rdma_set_ib_paths
);
2210 static int cma_resolve_iw_route(struct rdma_id_private
*id_priv
, int timeout_ms
)
2212 struct cma_work
*work
;
2214 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2219 INIT_WORK(&work
->work
, cma_work_handler
);
2220 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2221 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2222 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2223 queue_work(cma_wq
, &work
->work
);
2227 static int iboe_tos_to_sl(struct net_device
*ndev
, int tos
)
2230 struct net_device
*dev
;
2232 prio
= rt_tos2priority(tos
);
2233 dev
= ndev
->priv_flags
& IFF_802_1Q_VLAN
?
2234 vlan_dev_real_dev(ndev
) : ndev
;
2237 return netdev_get_prio_tc_map(dev
, prio
);
2239 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2240 if (ndev
->priv_flags
& IFF_802_1Q_VLAN
)
2241 return (vlan_dev_get_egress_qos_mask(ndev
, prio
) &
2242 VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
2247 static int cma_resolve_iboe_route(struct rdma_id_private
*id_priv
)
2249 struct rdma_route
*route
= &id_priv
->id
.route
;
2250 struct rdma_addr
*addr
= &route
->addr
;
2251 struct cma_work
*work
;
2253 struct net_device
*ndev
= NULL
;
2256 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2261 INIT_WORK(&work
->work
, cma_work_handler
);
2263 route
->path_rec
= kzalloc(sizeof *route
->path_rec
, GFP_KERNEL
);
2264 if (!route
->path_rec
) {
2269 route
->num_paths
= 1;
2271 if (addr
->dev_addr
.bound_dev_if
)
2272 ndev
= dev_get_by_index(&init_net
, addr
->dev_addr
.bound_dev_if
);
2278 route
->path_rec
->vlan_id
= rdma_vlan_dev_vlan_id(ndev
);
2279 memcpy(route
->path_rec
->dmac
, addr
->dev_addr
.dst_dev_addr
, ETH_ALEN
);
2280 memcpy(route
->path_rec
->smac
, ndev
->dev_addr
, ndev
->addr_len
);
2282 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
2283 &route
->path_rec
->sgid
);
2284 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.dst_addr
,
2285 &route
->path_rec
->dgid
);
2287 route
->path_rec
->hop_limit
= 1;
2288 route
->path_rec
->reversible
= 1;
2289 route
->path_rec
->pkey
= cpu_to_be16(0xffff);
2290 route
->path_rec
->mtu_selector
= IB_SA_EQ
;
2291 route
->path_rec
->sl
= iboe_tos_to_sl(ndev
, id_priv
->tos
);
2292 route
->path_rec
->mtu
= iboe_get_mtu(ndev
->mtu
);
2293 route
->path_rec
->rate_selector
= IB_SA_EQ
;
2294 route
->path_rec
->rate
= iboe_get_rate(ndev
);
2296 route
->path_rec
->packet_life_time_selector
= IB_SA_EQ
;
2297 route
->path_rec
->packet_life_time
= CMA_IBOE_PACKET_LIFETIME
;
2298 if (!route
->path_rec
->mtu
) {
2303 work
->old_state
= RDMA_CM_ROUTE_QUERY
;
2304 work
->new_state
= RDMA_CM_ROUTE_RESOLVED
;
2305 work
->event
.event
= RDMA_CM_EVENT_ROUTE_RESOLVED
;
2306 work
->event
.status
= 0;
2308 queue_work(cma_wq
, &work
->work
);
2313 kfree(route
->path_rec
);
2314 route
->path_rec
= NULL
;
2320 int rdma_resolve_route(struct rdma_cm_id
*id
, int timeout_ms
)
2322 struct rdma_id_private
*id_priv
;
2325 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2326 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
, RDMA_CM_ROUTE_QUERY
))
2329 atomic_inc(&id_priv
->refcount
);
2330 if (rdma_cap_ib_sa(id
->device
, id
->port_num
))
2331 ret
= cma_resolve_ib_route(id_priv
, timeout_ms
);
2332 else if (rdma_protocol_roce(id
->device
, id
->port_num
))
2333 ret
= cma_resolve_iboe_route(id_priv
);
2334 else if (rdma_protocol_iwarp(id
->device
, id
->port_num
))
2335 ret
= cma_resolve_iw_route(id_priv
, timeout_ms
);
2344 cma_comp_exch(id_priv
, RDMA_CM_ROUTE_QUERY
, RDMA_CM_ADDR_RESOLVED
);
2345 cma_deref_id(id_priv
);
2348 EXPORT_SYMBOL(rdma_resolve_route
);
2350 static void cma_set_loopback(struct sockaddr
*addr
)
2352 switch (addr
->sa_family
) {
2354 ((struct sockaddr_in
*) addr
)->sin_addr
.s_addr
= htonl(INADDR_LOOPBACK
);
2357 ipv6_addr_set(&((struct sockaddr_in6
*) addr
)->sin6_addr
,
2361 ib_addr_set(&((struct sockaddr_ib
*) addr
)->sib_addr
,
2367 static int cma_bind_loopback(struct rdma_id_private
*id_priv
)
2369 struct cma_device
*cma_dev
, *cur_dev
;
2370 struct ib_port_attr port_attr
;
2378 list_for_each_entry(cur_dev
, &dev_list
, list
) {
2379 if (cma_family(id_priv
) == AF_IB
&&
2380 !rdma_cap_ib_cm(cur_dev
->device
, 1))
2386 for (p
= 1; p
<= cur_dev
->device
->phys_port_cnt
; ++p
) {
2387 if (!ib_query_port(cur_dev
->device
, p
, &port_attr
) &&
2388 port_attr
.state
== IB_PORT_ACTIVE
) {
2403 ret
= ib_get_cached_gid(cma_dev
->device
, p
, 0, &gid
);
2407 ret
= ib_get_cached_pkey(cma_dev
->device
, p
, 0, &pkey
);
2411 id_priv
->id
.route
.addr
.dev_addr
.dev_type
=
2412 (rdma_protocol_ib(cma_dev
->device
, p
)) ?
2413 ARPHRD_INFINIBAND
: ARPHRD_ETHER
;
2415 rdma_addr_set_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2416 ib_addr_set_pkey(&id_priv
->id
.route
.addr
.dev_addr
, pkey
);
2417 id_priv
->id
.port_num
= p
;
2418 cma_attach_to_dev(id_priv
, cma_dev
);
2419 cma_set_loopback(cma_src_addr(id_priv
));
2421 mutex_unlock(&lock
);
2425 static void addr_handler(int status
, struct sockaddr
*src_addr
,
2426 struct rdma_dev_addr
*dev_addr
, void *context
)
2428 struct rdma_id_private
*id_priv
= context
;
2429 struct rdma_cm_event event
;
2431 memset(&event
, 0, sizeof event
);
2432 mutex_lock(&id_priv
->handler_mutex
);
2433 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
,
2434 RDMA_CM_ADDR_RESOLVED
))
2437 memcpy(cma_src_addr(id_priv
), src_addr
, rdma_addr_size(src_addr
));
2438 if (!status
&& !id_priv
->cma_dev
)
2439 status
= cma_acquire_dev(id_priv
, NULL
);
2442 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_RESOLVED
,
2443 RDMA_CM_ADDR_BOUND
))
2445 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
2446 event
.status
= status
;
2448 event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2450 if (id_priv
->id
.event_handler(&id_priv
->id
, &event
)) {
2451 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
2452 mutex_unlock(&id_priv
->handler_mutex
);
2453 cma_deref_id(id_priv
);
2454 rdma_destroy_id(&id_priv
->id
);
2458 mutex_unlock(&id_priv
->handler_mutex
);
2459 cma_deref_id(id_priv
);
2462 static int cma_resolve_loopback(struct rdma_id_private
*id_priv
)
2464 struct cma_work
*work
;
2468 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2472 if (!id_priv
->cma_dev
) {
2473 ret
= cma_bind_loopback(id_priv
);
2478 rdma_addr_get_sgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2479 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, &gid
);
2482 INIT_WORK(&work
->work
, cma_work_handler
);
2483 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2484 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2485 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2486 queue_work(cma_wq
, &work
->work
);
2493 static int cma_resolve_ib_addr(struct rdma_id_private
*id_priv
)
2495 struct cma_work
*work
;
2498 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
2502 if (!id_priv
->cma_dev
) {
2503 ret
= cma_resolve_ib_dev(id_priv
);
2508 rdma_addr_set_dgid(&id_priv
->id
.route
.addr
.dev_addr
, (union ib_gid
*)
2509 &(((struct sockaddr_ib
*) &id_priv
->id
.route
.addr
.dst_addr
)->sib_addr
));
2512 INIT_WORK(&work
->work
, cma_work_handler
);
2513 work
->old_state
= RDMA_CM_ADDR_QUERY
;
2514 work
->new_state
= RDMA_CM_ADDR_RESOLVED
;
2515 work
->event
.event
= RDMA_CM_EVENT_ADDR_RESOLVED
;
2516 queue_work(cma_wq
, &work
->work
);
2523 static int cma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2524 struct sockaddr
*dst_addr
)
2526 if (!src_addr
|| !src_addr
->sa_family
) {
2527 src_addr
= (struct sockaddr
*) &id
->route
.addr
.src_addr
;
2528 src_addr
->sa_family
= dst_addr
->sa_family
;
2529 if (dst_addr
->sa_family
== AF_INET6
) {
2530 struct sockaddr_in6
*src_addr6
= (struct sockaddr_in6
*) src_addr
;
2531 struct sockaddr_in6
*dst_addr6
= (struct sockaddr_in6
*) dst_addr
;
2532 src_addr6
->sin6_scope_id
= dst_addr6
->sin6_scope_id
;
2533 if (ipv6_addr_type(&dst_addr6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
)
2534 id
->route
.addr
.dev_addr
.bound_dev_if
= dst_addr6
->sin6_scope_id
;
2535 } else if (dst_addr
->sa_family
== AF_IB
) {
2536 ((struct sockaddr_ib
*) src_addr
)->sib_pkey
=
2537 ((struct sockaddr_ib
*) dst_addr
)->sib_pkey
;
2540 return rdma_bind_addr(id
, src_addr
);
2543 int rdma_resolve_addr(struct rdma_cm_id
*id
, struct sockaddr
*src_addr
,
2544 struct sockaddr
*dst_addr
, int timeout_ms
)
2546 struct rdma_id_private
*id_priv
;
2549 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2550 if (id_priv
->state
== RDMA_CM_IDLE
) {
2551 ret
= cma_bind_addr(id
, src_addr
, dst_addr
);
2556 if (cma_family(id_priv
) != dst_addr
->sa_family
)
2559 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_ADDR_QUERY
))
2562 atomic_inc(&id_priv
->refcount
);
2563 memcpy(cma_dst_addr(id_priv
), dst_addr
, rdma_addr_size(dst_addr
));
2564 if (cma_any_addr(dst_addr
)) {
2565 ret
= cma_resolve_loopback(id_priv
);
2567 if (dst_addr
->sa_family
== AF_IB
) {
2568 ret
= cma_resolve_ib_addr(id_priv
);
2570 ret
= rdma_resolve_ip(&addr_client
, cma_src_addr(id_priv
),
2571 dst_addr
, &id
->route
.addr
.dev_addr
,
2572 timeout_ms
, addr_handler
, id_priv
);
2580 cma_comp_exch(id_priv
, RDMA_CM_ADDR_QUERY
, RDMA_CM_ADDR_BOUND
);
2581 cma_deref_id(id_priv
);
2584 EXPORT_SYMBOL(rdma_resolve_addr
);
2586 int rdma_set_reuseaddr(struct rdma_cm_id
*id
, int reuse
)
2588 struct rdma_id_private
*id_priv
;
2589 unsigned long flags
;
2592 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2593 spin_lock_irqsave(&id_priv
->lock
, flags
);
2594 if (reuse
|| id_priv
->state
== RDMA_CM_IDLE
) {
2595 id_priv
->reuseaddr
= reuse
;
2600 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2603 EXPORT_SYMBOL(rdma_set_reuseaddr
);
2605 int rdma_set_afonly(struct rdma_cm_id
*id
, int afonly
)
2607 struct rdma_id_private
*id_priv
;
2608 unsigned long flags
;
2611 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2612 spin_lock_irqsave(&id_priv
->lock
, flags
);
2613 if (id_priv
->state
== RDMA_CM_IDLE
|| id_priv
->state
== RDMA_CM_ADDR_BOUND
) {
2614 id_priv
->options
|= (1 << CMA_OPTION_AFONLY
);
2615 id_priv
->afonly
= afonly
;
2620 spin_unlock_irqrestore(&id_priv
->lock
, flags
);
2623 EXPORT_SYMBOL(rdma_set_afonly
);
2625 static void cma_bind_port(struct rdma_bind_list
*bind_list
,
2626 struct rdma_id_private
*id_priv
)
2628 struct sockaddr
*addr
;
2629 struct sockaddr_ib
*sib
;
2633 addr
= cma_src_addr(id_priv
);
2634 port
= htons(bind_list
->port
);
2636 switch (addr
->sa_family
) {
2638 ((struct sockaddr_in
*) addr
)->sin_port
= port
;
2641 ((struct sockaddr_in6
*) addr
)->sin6_port
= port
;
2644 sib
= (struct sockaddr_ib
*) addr
;
2645 sid
= be64_to_cpu(sib
->sib_sid
);
2646 mask
= be64_to_cpu(sib
->sib_sid_mask
);
2647 sib
->sib_sid
= cpu_to_be64((sid
& mask
) | (u64
) ntohs(port
));
2648 sib
->sib_sid_mask
= cpu_to_be64(~0ULL);
2651 id_priv
->bind_list
= bind_list
;
2652 hlist_add_head(&id_priv
->node
, &bind_list
->owners
);
2655 static int cma_alloc_port(enum rdma_port_space ps
,
2656 struct rdma_id_private
*id_priv
, unsigned short snum
)
2658 struct rdma_bind_list
*bind_list
;
2661 bind_list
= kzalloc(sizeof *bind_list
, GFP_KERNEL
);
2665 ret
= cma_ps_alloc(ps
, bind_list
, snum
);
2670 bind_list
->port
= (unsigned short)ret
;
2671 cma_bind_port(bind_list
, id_priv
);
2675 return ret
== -ENOSPC
? -EADDRNOTAVAIL
: ret
;
2678 static int cma_alloc_any_port(enum rdma_port_space ps
,
2679 struct rdma_id_private
*id_priv
)
2681 static unsigned int last_used_port
;
2682 int low
, high
, remaining
;
2685 inet_get_local_port_range(&init_net
, &low
, &high
);
2686 remaining
= (high
- low
) + 1;
2687 rover
= prandom_u32() % remaining
+ low
;
2689 if (last_used_port
!= rover
&&
2690 !cma_ps_find(ps
, (unsigned short)rover
)) {
2691 int ret
= cma_alloc_port(ps
, id_priv
, rover
);
2693 * Remember previously used port number in order to avoid
2694 * re-using same port immediately after it is closed.
2697 last_used_port
= rover
;
2698 if (ret
!= -EADDRNOTAVAIL
)
2703 if ((rover
< low
) || (rover
> high
))
2707 return -EADDRNOTAVAIL
;
2711 * Check that the requested port is available. This is called when trying to
2712 * bind to a specific port, or when trying to listen on a bound port. In
2713 * the latter case, the provided id_priv may already be on the bind_list, but
2714 * we still need to check that it's okay to start listening.
2716 static int cma_check_port(struct rdma_bind_list
*bind_list
,
2717 struct rdma_id_private
*id_priv
, uint8_t reuseaddr
)
2719 struct rdma_id_private
*cur_id
;
2720 struct sockaddr
*addr
, *cur_addr
;
2722 addr
= cma_src_addr(id_priv
);
2723 hlist_for_each_entry(cur_id
, &bind_list
->owners
, node
) {
2724 if (id_priv
== cur_id
)
2727 if ((cur_id
->state
!= RDMA_CM_LISTEN
) && reuseaddr
&&
2731 cur_addr
= cma_src_addr(cur_id
);
2732 if (id_priv
->afonly
&& cur_id
->afonly
&&
2733 (addr
->sa_family
!= cur_addr
->sa_family
))
2736 if (cma_any_addr(addr
) || cma_any_addr(cur_addr
))
2737 return -EADDRNOTAVAIL
;
2739 if (!cma_addr_cmp(addr
, cur_addr
))
2745 static int cma_use_port(enum rdma_port_space ps
,
2746 struct rdma_id_private
*id_priv
)
2748 struct rdma_bind_list
*bind_list
;
2749 unsigned short snum
;
2752 snum
= ntohs(cma_port(cma_src_addr(id_priv
)));
2753 if (snum
< PROT_SOCK
&& !capable(CAP_NET_BIND_SERVICE
))
2756 bind_list
= cma_ps_find(ps
, snum
);
2758 ret
= cma_alloc_port(ps
, id_priv
, snum
);
2760 ret
= cma_check_port(bind_list
, id_priv
, id_priv
->reuseaddr
);
2762 cma_bind_port(bind_list
, id_priv
);
2767 static int cma_bind_listen(struct rdma_id_private
*id_priv
)
2769 struct rdma_bind_list
*bind_list
= id_priv
->bind_list
;
2773 if (bind_list
->owners
.first
->next
)
2774 ret
= cma_check_port(bind_list
, id_priv
, 0);
2775 mutex_unlock(&lock
);
2779 static enum rdma_port_space
cma_select_inet_ps(
2780 struct rdma_id_private
*id_priv
)
2782 switch (id_priv
->id
.ps
) {
2787 return id_priv
->id
.ps
;
2794 static enum rdma_port_space
cma_select_ib_ps(struct rdma_id_private
*id_priv
)
2796 enum rdma_port_space ps
= 0;
2797 struct sockaddr_ib
*sib
;
2798 u64 sid_ps
, mask
, sid
;
2800 sib
= (struct sockaddr_ib
*) cma_src_addr(id_priv
);
2801 mask
= be64_to_cpu(sib
->sib_sid_mask
) & RDMA_IB_IP_PS_MASK
;
2802 sid
= be64_to_cpu(sib
->sib_sid
) & mask
;
2804 if ((id_priv
->id
.ps
== RDMA_PS_IB
) && (sid
== (RDMA_IB_IP_PS_IB
& mask
))) {
2805 sid_ps
= RDMA_IB_IP_PS_IB
;
2807 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_TCP
)) &&
2808 (sid
== (RDMA_IB_IP_PS_TCP
& mask
))) {
2809 sid_ps
= RDMA_IB_IP_PS_TCP
;
2811 } else if (((id_priv
->id
.ps
== RDMA_PS_IB
) || (id_priv
->id
.ps
== RDMA_PS_UDP
)) &&
2812 (sid
== (RDMA_IB_IP_PS_UDP
& mask
))) {
2813 sid_ps
= RDMA_IB_IP_PS_UDP
;
2818 sib
->sib_sid
= cpu_to_be64(sid_ps
| ntohs(cma_port((struct sockaddr
*) sib
)));
2819 sib
->sib_sid_mask
= cpu_to_be64(RDMA_IB_IP_PS_MASK
|
2820 be64_to_cpu(sib
->sib_sid_mask
));
2825 static int cma_get_port(struct rdma_id_private
*id_priv
)
2827 enum rdma_port_space ps
;
2830 if (cma_family(id_priv
) != AF_IB
)
2831 ps
= cma_select_inet_ps(id_priv
);
2833 ps
= cma_select_ib_ps(id_priv
);
2835 return -EPROTONOSUPPORT
;
2838 if (cma_any_port(cma_src_addr(id_priv
)))
2839 ret
= cma_alloc_any_port(ps
, id_priv
);
2841 ret
= cma_use_port(ps
, id_priv
);
2842 mutex_unlock(&lock
);
2847 static int cma_check_linklocal(struct rdma_dev_addr
*dev_addr
,
2848 struct sockaddr
*addr
)
2850 #if IS_ENABLED(CONFIG_IPV6)
2851 struct sockaddr_in6
*sin6
;
2853 if (addr
->sa_family
!= AF_INET6
)
2856 sin6
= (struct sockaddr_in6
*) addr
;
2858 if (!(ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
))
2861 if (!sin6
->sin6_scope_id
)
2864 dev_addr
->bound_dev_if
= sin6
->sin6_scope_id
;
2869 int rdma_listen(struct rdma_cm_id
*id
, int backlog
)
2871 struct rdma_id_private
*id_priv
;
2874 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2875 if (id_priv
->state
== RDMA_CM_IDLE
) {
2876 id
->route
.addr
.src_addr
.ss_family
= AF_INET
;
2877 ret
= rdma_bind_addr(id
, cma_src_addr(id_priv
));
2882 if (!cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_LISTEN
))
2885 if (id_priv
->reuseaddr
) {
2886 ret
= cma_bind_listen(id_priv
);
2891 id_priv
->backlog
= backlog
;
2893 if (rdma_cap_ib_cm(id
->device
, 1)) {
2894 ret
= cma_ib_listen(id_priv
);
2897 } else if (rdma_cap_iw_cm(id
->device
, 1)) {
2898 ret
= cma_iw_listen(id_priv
, backlog
);
2906 cma_listen_on_all(id_priv
);
2910 id_priv
->backlog
= 0;
2911 cma_comp_exch(id_priv
, RDMA_CM_LISTEN
, RDMA_CM_ADDR_BOUND
);
2914 EXPORT_SYMBOL(rdma_listen
);
2916 int rdma_bind_addr(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
2918 struct rdma_id_private
*id_priv
;
2921 if (addr
->sa_family
!= AF_INET
&& addr
->sa_family
!= AF_INET6
&&
2922 addr
->sa_family
!= AF_IB
)
2923 return -EAFNOSUPPORT
;
2925 id_priv
= container_of(id
, struct rdma_id_private
, id
);
2926 if (!cma_comp_exch(id_priv
, RDMA_CM_IDLE
, RDMA_CM_ADDR_BOUND
))
2929 ret
= cma_check_linklocal(&id
->route
.addr
.dev_addr
, addr
);
2933 memcpy(cma_src_addr(id_priv
), addr
, rdma_addr_size(addr
));
2934 if (!cma_any_addr(addr
)) {
2935 ret
= cma_translate_addr(addr
, &id
->route
.addr
.dev_addr
);
2939 ret
= cma_acquire_dev(id_priv
, NULL
);
2944 if (!(id_priv
->options
& (1 << CMA_OPTION_AFONLY
))) {
2945 if (addr
->sa_family
== AF_INET
)
2946 id_priv
->afonly
= 1;
2947 #if IS_ENABLED(CONFIG_IPV6)
2948 else if (addr
->sa_family
== AF_INET6
)
2949 id_priv
->afonly
= init_net
.ipv6
.sysctl
.bindv6only
;
2952 ret
= cma_get_port(id_priv
);
2958 if (id_priv
->cma_dev
)
2959 cma_release_dev(id_priv
);
2961 cma_comp_exch(id_priv
, RDMA_CM_ADDR_BOUND
, RDMA_CM_IDLE
);
2964 EXPORT_SYMBOL(rdma_bind_addr
);
2966 static int cma_format_hdr(void *hdr
, struct rdma_id_private
*id_priv
)
2968 struct cma_hdr
*cma_hdr
;
2971 cma_hdr
->cma_version
= CMA_VERSION
;
2972 if (cma_family(id_priv
) == AF_INET
) {
2973 struct sockaddr_in
*src4
, *dst4
;
2975 src4
= (struct sockaddr_in
*) cma_src_addr(id_priv
);
2976 dst4
= (struct sockaddr_in
*) cma_dst_addr(id_priv
);
2978 cma_set_ip_ver(cma_hdr
, 4);
2979 cma_hdr
->src_addr
.ip4
.addr
= src4
->sin_addr
.s_addr
;
2980 cma_hdr
->dst_addr
.ip4
.addr
= dst4
->sin_addr
.s_addr
;
2981 cma_hdr
->port
= src4
->sin_port
;
2982 } else if (cma_family(id_priv
) == AF_INET6
) {
2983 struct sockaddr_in6
*src6
, *dst6
;
2985 src6
= (struct sockaddr_in6
*) cma_src_addr(id_priv
);
2986 dst6
= (struct sockaddr_in6
*) cma_dst_addr(id_priv
);
2988 cma_set_ip_ver(cma_hdr
, 6);
2989 cma_hdr
->src_addr
.ip6
= src6
->sin6_addr
;
2990 cma_hdr
->dst_addr
.ip6
= dst6
->sin6_addr
;
2991 cma_hdr
->port
= src6
->sin6_port
;
2996 static int cma_sidr_rep_handler(struct ib_cm_id
*cm_id
,
2997 struct ib_cm_event
*ib_event
)
2999 struct rdma_id_private
*id_priv
= cm_id
->context
;
3000 struct rdma_cm_event event
;
3001 struct ib_cm_sidr_rep_event_param
*rep
= &ib_event
->param
.sidr_rep_rcvd
;
3004 if (cma_disable_callback(id_priv
, RDMA_CM_CONNECT
))
3007 memset(&event
, 0, sizeof event
);
3008 switch (ib_event
->event
) {
3009 case IB_CM_SIDR_REQ_ERROR
:
3010 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
3011 event
.status
= -ETIMEDOUT
;
3013 case IB_CM_SIDR_REP_RECEIVED
:
3014 event
.param
.ud
.private_data
= ib_event
->private_data
;
3015 event
.param
.ud
.private_data_len
= IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
;
3016 if (rep
->status
!= IB_SIDR_SUCCESS
) {
3017 event
.event
= RDMA_CM_EVENT_UNREACHABLE
;
3018 event
.status
= ib_event
->param
.sidr_rep_rcvd
.status
;
3021 ret
= cma_set_qkey(id_priv
, rep
->qkey
);
3023 event
.event
= RDMA_CM_EVENT_ADDR_ERROR
;
3027 ib_init_ah_from_path(id_priv
->id
.device
, id_priv
->id
.port_num
,
3028 id_priv
->id
.route
.path_rec
,
3029 &event
.param
.ud
.ah_attr
);
3030 event
.param
.ud
.qp_num
= rep
->qpn
;
3031 event
.param
.ud
.qkey
= rep
->qkey
;
3032 event
.event
= RDMA_CM_EVENT_ESTABLISHED
;
3036 printk(KERN_ERR
"RDMA CMA: unexpected IB CM event: %d\n",
3041 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3043 /* Destroy the CM ID by returning a non-zero value. */
3044 id_priv
->cm_id
.ib
= NULL
;
3045 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
3046 mutex_unlock(&id_priv
->handler_mutex
);
3047 rdma_destroy_id(&id_priv
->id
);
3051 mutex_unlock(&id_priv
->handler_mutex
);
3055 static int cma_resolve_ib_udp(struct rdma_id_private
*id_priv
,
3056 struct rdma_conn_param
*conn_param
)
3058 struct ib_cm_sidr_req_param req
;
3059 struct ib_cm_id
*id
;
3063 memset(&req
, 0, sizeof req
);
3064 offset
= cma_user_data_offset(id_priv
);
3065 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
3066 if (req
.private_data_len
< conn_param
->private_data_len
)
3069 if (req
.private_data_len
) {
3070 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
3074 private_data
= NULL
;
3077 if (conn_param
->private_data
&& conn_param
->private_data_len
)
3078 memcpy(private_data
+ offset
, conn_param
->private_data
,
3079 conn_param
->private_data_len
);
3082 ret
= cma_format_hdr(private_data
, id_priv
);
3085 req
.private_data
= private_data
;
3088 id
= ib_create_cm_id(id_priv
->id
.device
, cma_sidr_rep_handler
,
3094 id_priv
->cm_id
.ib
= id
;
3096 req
.path
= id_priv
->id
.route
.path_rec
;
3097 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
3098 req
.timeout_ms
= 1 << (CMA_CM_RESPONSE_TIMEOUT
- 8);
3099 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
3101 ret
= ib_send_cm_sidr_req(id_priv
->cm_id
.ib
, &req
);
3103 ib_destroy_cm_id(id_priv
->cm_id
.ib
);
3104 id_priv
->cm_id
.ib
= NULL
;
3107 kfree(private_data
);
3111 static int cma_connect_ib(struct rdma_id_private
*id_priv
,
3112 struct rdma_conn_param
*conn_param
)
3114 struct ib_cm_req_param req
;
3115 struct rdma_route
*route
;
3117 struct ib_cm_id
*id
;
3120 memset(&req
, 0, sizeof req
);
3121 offset
= cma_user_data_offset(id_priv
);
3122 req
.private_data_len
= offset
+ conn_param
->private_data_len
;
3123 if (req
.private_data_len
< conn_param
->private_data_len
)
3126 if (req
.private_data_len
) {
3127 private_data
= kzalloc(req
.private_data_len
, GFP_ATOMIC
);
3131 private_data
= NULL
;
3134 if (conn_param
->private_data
&& conn_param
->private_data_len
)
3135 memcpy(private_data
+ offset
, conn_param
->private_data
,
3136 conn_param
->private_data_len
);
3138 id
= ib_create_cm_id(id_priv
->id
.device
, cma_ib_handler
, id_priv
);
3143 id_priv
->cm_id
.ib
= id
;
3145 route
= &id_priv
->id
.route
;
3147 ret
= cma_format_hdr(private_data
, id_priv
);
3150 req
.private_data
= private_data
;
3153 req
.primary_path
= &route
->path_rec
[0];
3154 if (route
->num_paths
== 2)
3155 req
.alternate_path
= &route
->path_rec
[1];
3157 req
.service_id
= rdma_get_service_id(&id_priv
->id
, cma_dst_addr(id_priv
));
3158 req
.qp_num
= id_priv
->qp_num
;
3159 req
.qp_type
= id_priv
->id
.qp_type
;
3160 req
.starting_psn
= id_priv
->seq_num
;
3161 req
.responder_resources
= conn_param
->responder_resources
;
3162 req
.initiator_depth
= conn_param
->initiator_depth
;
3163 req
.flow_control
= conn_param
->flow_control
;
3164 req
.retry_count
= min_t(u8
, 7, conn_param
->retry_count
);
3165 req
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
3166 req
.remote_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
3167 req
.local_cm_response_timeout
= CMA_CM_RESPONSE_TIMEOUT
;
3168 req
.max_cm_retries
= CMA_MAX_CM_RETRIES
;
3169 req
.srq
= id_priv
->srq
? 1 : 0;
3171 ret
= ib_send_cm_req(id_priv
->cm_id
.ib
, &req
);
3173 if (ret
&& !IS_ERR(id
)) {
3174 ib_destroy_cm_id(id
);
3175 id_priv
->cm_id
.ib
= NULL
;
3178 kfree(private_data
);
3182 static int cma_connect_iw(struct rdma_id_private
*id_priv
,
3183 struct rdma_conn_param
*conn_param
)
3185 struct iw_cm_id
*cm_id
;
3187 struct iw_cm_conn_param iw_param
;
3189 cm_id
= iw_create_cm_id(id_priv
->id
.device
, cma_iw_handler
, id_priv
);
3191 return PTR_ERR(cm_id
);
3193 cm_id
->tos
= id_priv
->tos
;
3194 id_priv
->cm_id
.iw
= cm_id
;
3196 memcpy(&cm_id
->local_addr
, cma_src_addr(id_priv
),
3197 rdma_addr_size(cma_src_addr(id_priv
)));
3198 memcpy(&cm_id
->remote_addr
, cma_dst_addr(id_priv
),
3199 rdma_addr_size(cma_dst_addr(id_priv
)));
3201 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3206 iw_param
.ord
= conn_param
->initiator_depth
;
3207 iw_param
.ird
= conn_param
->responder_resources
;
3208 iw_param
.private_data
= conn_param
->private_data
;
3209 iw_param
.private_data_len
= conn_param
->private_data_len
;
3210 iw_param
.qpn
= id_priv
->id
.qp
? id_priv
->qp_num
: conn_param
->qp_num
;
3212 memset(&iw_param
, 0, sizeof iw_param
);
3213 iw_param
.qpn
= id_priv
->qp_num
;
3215 ret
= iw_cm_connect(cm_id
, &iw_param
);
3218 iw_destroy_cm_id(cm_id
);
3219 id_priv
->cm_id
.iw
= NULL
;
3224 int rdma_connect(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
3226 struct rdma_id_private
*id_priv
;
3229 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3230 if (!cma_comp_exch(id_priv
, RDMA_CM_ROUTE_RESOLVED
, RDMA_CM_CONNECT
))
3234 id_priv
->qp_num
= conn_param
->qp_num
;
3235 id_priv
->srq
= conn_param
->srq
;
3238 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3239 if (id
->qp_type
== IB_QPT_UD
)
3240 ret
= cma_resolve_ib_udp(id_priv
, conn_param
);
3242 ret
= cma_connect_ib(id_priv
, conn_param
);
3243 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
))
3244 ret
= cma_connect_iw(id_priv
, conn_param
);
3252 cma_comp_exch(id_priv
, RDMA_CM_CONNECT
, RDMA_CM_ROUTE_RESOLVED
);
3255 EXPORT_SYMBOL(rdma_connect
);
3257 static int cma_accept_ib(struct rdma_id_private
*id_priv
,
3258 struct rdma_conn_param
*conn_param
)
3260 struct ib_cm_rep_param rep
;
3263 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3267 ret
= cma_modify_qp_rts(id_priv
, conn_param
);
3271 memset(&rep
, 0, sizeof rep
);
3272 rep
.qp_num
= id_priv
->qp_num
;
3273 rep
.starting_psn
= id_priv
->seq_num
;
3274 rep
.private_data
= conn_param
->private_data
;
3275 rep
.private_data_len
= conn_param
->private_data_len
;
3276 rep
.responder_resources
= conn_param
->responder_resources
;
3277 rep
.initiator_depth
= conn_param
->initiator_depth
;
3278 rep
.failover_accepted
= 0;
3279 rep
.flow_control
= conn_param
->flow_control
;
3280 rep
.rnr_retry_count
= min_t(u8
, 7, conn_param
->rnr_retry_count
);
3281 rep
.srq
= id_priv
->srq
? 1 : 0;
3283 ret
= ib_send_cm_rep(id_priv
->cm_id
.ib
, &rep
);
3288 static int cma_accept_iw(struct rdma_id_private
*id_priv
,
3289 struct rdma_conn_param
*conn_param
)
3291 struct iw_cm_conn_param iw_param
;
3294 ret
= cma_modify_qp_rtr(id_priv
, conn_param
);
3298 iw_param
.ord
= conn_param
->initiator_depth
;
3299 iw_param
.ird
= conn_param
->responder_resources
;
3300 iw_param
.private_data
= conn_param
->private_data
;
3301 iw_param
.private_data_len
= conn_param
->private_data_len
;
3302 if (id_priv
->id
.qp
) {
3303 iw_param
.qpn
= id_priv
->qp_num
;
3305 iw_param
.qpn
= conn_param
->qp_num
;
3307 return iw_cm_accept(id_priv
->cm_id
.iw
, &iw_param
);
3310 static int cma_send_sidr_rep(struct rdma_id_private
*id_priv
,
3311 enum ib_cm_sidr_status status
, u32 qkey
,
3312 const void *private_data
, int private_data_len
)
3314 struct ib_cm_sidr_rep_param rep
;
3317 memset(&rep
, 0, sizeof rep
);
3318 rep
.status
= status
;
3319 if (status
== IB_SIDR_SUCCESS
) {
3320 ret
= cma_set_qkey(id_priv
, qkey
);
3323 rep
.qp_num
= id_priv
->qp_num
;
3324 rep
.qkey
= id_priv
->qkey
;
3326 rep
.private_data
= private_data
;
3327 rep
.private_data_len
= private_data_len
;
3329 return ib_send_cm_sidr_rep(id_priv
->cm_id
.ib
, &rep
);
3332 int rdma_accept(struct rdma_cm_id
*id
, struct rdma_conn_param
*conn_param
)
3334 struct rdma_id_private
*id_priv
;
3337 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3339 id_priv
->owner
= task_pid_nr(current
);
3341 if (!cma_comp(id_priv
, RDMA_CM_CONNECT
))
3344 if (!id
->qp
&& conn_param
) {
3345 id_priv
->qp_num
= conn_param
->qp_num
;
3346 id_priv
->srq
= conn_param
->srq
;
3349 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3350 if (id
->qp_type
== IB_QPT_UD
) {
3352 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
3354 conn_param
->private_data
,
3355 conn_param
->private_data_len
);
3357 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_SUCCESS
,
3361 ret
= cma_accept_ib(id_priv
, conn_param
);
3363 ret
= cma_rep_recv(id_priv
);
3365 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
))
3366 ret
= cma_accept_iw(id_priv
, conn_param
);
3375 cma_modify_qp_err(id_priv
);
3376 rdma_reject(id
, NULL
, 0);
3379 EXPORT_SYMBOL(rdma_accept
);
3381 int rdma_notify(struct rdma_cm_id
*id
, enum ib_event_type event
)
3383 struct rdma_id_private
*id_priv
;
3386 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3387 if (!id_priv
->cm_id
.ib
)
3390 switch (id
->device
->node_type
) {
3391 case RDMA_NODE_IB_CA
:
3392 ret
= ib_cm_notify(id_priv
->cm_id
.ib
, event
);
3400 EXPORT_SYMBOL(rdma_notify
);
3402 int rdma_reject(struct rdma_cm_id
*id
, const void *private_data
,
3403 u8 private_data_len
)
3405 struct rdma_id_private
*id_priv
;
3408 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3409 if (!id_priv
->cm_id
.ib
)
3412 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3413 if (id
->qp_type
== IB_QPT_UD
)
3414 ret
= cma_send_sidr_rep(id_priv
, IB_SIDR_REJECT
, 0,
3415 private_data
, private_data_len
);
3417 ret
= ib_send_cm_rej(id_priv
->cm_id
.ib
,
3418 IB_CM_REJ_CONSUMER_DEFINED
, NULL
,
3419 0, private_data
, private_data_len
);
3420 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
3421 ret
= iw_cm_reject(id_priv
->cm_id
.iw
,
3422 private_data
, private_data_len
);
3428 EXPORT_SYMBOL(rdma_reject
);
3430 int rdma_disconnect(struct rdma_cm_id
*id
)
3432 struct rdma_id_private
*id_priv
;
3435 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3436 if (!id_priv
->cm_id
.ib
)
3439 if (rdma_cap_ib_cm(id
->device
, id
->port_num
)) {
3440 ret
= cma_modify_qp_err(id_priv
);
3443 /* Initiate or respond to a disconnect. */
3444 if (ib_send_cm_dreq(id_priv
->cm_id
.ib
, NULL
, 0))
3445 ib_send_cm_drep(id_priv
->cm_id
.ib
, NULL
, 0);
3446 } else if (rdma_cap_iw_cm(id
->device
, id
->port_num
)) {
3447 ret
= iw_cm_disconnect(id_priv
->cm_id
.iw
, 0);
3454 EXPORT_SYMBOL(rdma_disconnect
);
3456 static int cma_ib_mc_handler(int status
, struct ib_sa_multicast
*multicast
)
3458 struct rdma_id_private
*id_priv
;
3459 struct cma_multicast
*mc
= multicast
->context
;
3460 struct rdma_cm_event event
;
3463 id_priv
= mc
->id_priv
;
3464 if (cma_disable_callback(id_priv
, RDMA_CM_ADDR_BOUND
) &&
3465 cma_disable_callback(id_priv
, RDMA_CM_ADDR_RESOLVED
))
3469 status
= cma_set_qkey(id_priv
, be32_to_cpu(multicast
->rec
.qkey
));
3470 mutex_lock(&id_priv
->qp_mutex
);
3471 if (!status
&& id_priv
->id
.qp
)
3472 status
= ib_attach_mcast(id_priv
->id
.qp
, &multicast
->rec
.mgid
,
3473 be16_to_cpu(multicast
->rec
.mlid
));
3474 mutex_unlock(&id_priv
->qp_mutex
);
3476 memset(&event
, 0, sizeof event
);
3477 event
.status
= status
;
3478 event
.param
.ud
.private_data
= mc
->context
;
3480 event
.event
= RDMA_CM_EVENT_MULTICAST_JOIN
;
3481 ib_init_ah_from_mcmember(id_priv
->id
.device
,
3482 id_priv
->id
.port_num
, &multicast
->rec
,
3483 &event
.param
.ud
.ah_attr
);
3484 event
.param
.ud
.qp_num
= 0xFFFFFF;
3485 event
.param
.ud
.qkey
= be32_to_cpu(multicast
->rec
.qkey
);
3487 event
.event
= RDMA_CM_EVENT_MULTICAST_ERROR
;
3489 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3491 cma_exch(id_priv
, RDMA_CM_DESTROYING
);
3492 mutex_unlock(&id_priv
->handler_mutex
);
3493 rdma_destroy_id(&id_priv
->id
);
3497 mutex_unlock(&id_priv
->handler_mutex
);
3501 static void cma_set_mgid(struct rdma_id_private
*id_priv
,
3502 struct sockaddr
*addr
, union ib_gid
*mgid
)
3504 unsigned char mc_map
[MAX_ADDR_LEN
];
3505 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3506 struct sockaddr_in
*sin
= (struct sockaddr_in
*) addr
;
3507 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*) addr
;
3509 if (cma_any_addr(addr
)) {
3510 memset(mgid
, 0, sizeof *mgid
);
3511 } else if ((addr
->sa_family
== AF_INET6
) &&
3512 ((be32_to_cpu(sin6
->sin6_addr
.s6_addr32
[0]) & 0xFFF0FFFF) ==
3514 /* IPv6 address is an SA assigned MGID. */
3515 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
3516 } else if (addr
->sa_family
== AF_IB
) {
3517 memcpy(mgid
, &((struct sockaddr_ib
*) addr
)->sib_addr
, sizeof *mgid
);
3518 } else if ((addr
->sa_family
== AF_INET6
)) {
3519 ipv6_ib_mc_map(&sin6
->sin6_addr
, dev_addr
->broadcast
, mc_map
);
3520 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3521 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3522 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3524 ip_ib_mc_map(sin
->sin_addr
.s_addr
, dev_addr
->broadcast
, mc_map
);
3525 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3526 mc_map
[7] = 0x01; /* Use RDMA CM signature */
3527 *mgid
= *(union ib_gid
*) (mc_map
+ 4);
3531 static int cma_join_ib_multicast(struct rdma_id_private
*id_priv
,
3532 struct cma_multicast
*mc
)
3534 struct ib_sa_mcmember_rec rec
;
3535 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3536 ib_sa_comp_mask comp_mask
;
3539 ib_addr_get_mgid(dev_addr
, &rec
.mgid
);
3540 ret
= ib_sa_get_mcmember_rec(id_priv
->id
.device
, id_priv
->id
.port_num
,
3545 ret
= cma_set_qkey(id_priv
, 0);
3549 cma_set_mgid(id_priv
, (struct sockaddr
*) &mc
->addr
, &rec
.mgid
);
3550 rec
.qkey
= cpu_to_be32(id_priv
->qkey
);
3551 rdma_addr_get_sgid(dev_addr
, &rec
.port_gid
);
3552 rec
.pkey
= cpu_to_be16(ib_addr_get_pkey(dev_addr
));
3555 comp_mask
= IB_SA_MCMEMBER_REC_MGID
| IB_SA_MCMEMBER_REC_PORT_GID
|
3556 IB_SA_MCMEMBER_REC_PKEY
| IB_SA_MCMEMBER_REC_JOIN_STATE
|
3557 IB_SA_MCMEMBER_REC_QKEY
| IB_SA_MCMEMBER_REC_SL
|
3558 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
3559 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
;
3561 if (id_priv
->id
.ps
== RDMA_PS_IPOIB
)
3562 comp_mask
|= IB_SA_MCMEMBER_REC_RATE
|
3563 IB_SA_MCMEMBER_REC_RATE_SELECTOR
|
3564 IB_SA_MCMEMBER_REC_MTU_SELECTOR
|
3565 IB_SA_MCMEMBER_REC_MTU
|
3566 IB_SA_MCMEMBER_REC_HOP_LIMIT
;
3568 mc
->multicast
.ib
= ib_sa_join_multicast(&sa_client
, id_priv
->id
.device
,
3569 id_priv
->id
.port_num
, &rec
,
3570 comp_mask
, GFP_KERNEL
,
3571 cma_ib_mc_handler
, mc
);
3572 return PTR_ERR_OR_ZERO(mc
->multicast
.ib
);
3575 static void iboe_mcast_work_handler(struct work_struct
*work
)
3577 struct iboe_mcast_work
*mw
= container_of(work
, struct iboe_mcast_work
, work
);
3578 struct cma_multicast
*mc
= mw
->mc
;
3579 struct ib_sa_multicast
*m
= mc
->multicast
.ib
;
3581 mc
->multicast
.ib
->context
= mc
;
3582 cma_ib_mc_handler(0, m
);
3583 kref_put(&mc
->mcref
, release_mc
);
3587 static void cma_iboe_set_mgid(struct sockaddr
*addr
, union ib_gid
*mgid
)
3589 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
3590 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
3592 if (cma_any_addr(addr
)) {
3593 memset(mgid
, 0, sizeof *mgid
);
3594 } else if (addr
->sa_family
== AF_INET6
) {
3595 memcpy(mgid
, &sin6
->sin6_addr
, sizeof *mgid
);
3597 mgid
->raw
[0] = 0xff;
3598 mgid
->raw
[1] = 0x0e;
3607 mgid
->raw
[10] = 0xff;
3608 mgid
->raw
[11] = 0xff;
3609 *(__be32
*)(&mgid
->raw
[12]) = sin
->sin_addr
.s_addr
;
3613 static int cma_iboe_join_multicast(struct rdma_id_private
*id_priv
,
3614 struct cma_multicast
*mc
)
3616 struct iboe_mcast_work
*work
;
3617 struct rdma_dev_addr
*dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3619 struct sockaddr
*addr
= (struct sockaddr
*)&mc
->addr
;
3620 struct net_device
*ndev
= NULL
;
3622 if (cma_zero_addr((struct sockaddr
*)&mc
->addr
))
3625 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3629 mc
->multicast
.ib
= kzalloc(sizeof(struct ib_sa_multicast
), GFP_KERNEL
);
3630 if (!mc
->multicast
.ib
) {
3635 cma_iboe_set_mgid(addr
, &mc
->multicast
.ib
->rec
.mgid
);
3637 mc
->multicast
.ib
->rec
.pkey
= cpu_to_be16(0xffff);
3638 if (id_priv
->id
.ps
== RDMA_PS_UDP
)
3639 mc
->multicast
.ib
->rec
.qkey
= cpu_to_be32(RDMA_UDP_QKEY
);
3641 if (dev_addr
->bound_dev_if
)
3642 ndev
= dev_get_by_index(&init_net
, dev_addr
->bound_dev_if
);
3647 mc
->multicast
.ib
->rec
.rate
= iboe_get_rate(ndev
);
3648 mc
->multicast
.ib
->rec
.hop_limit
= 1;
3649 mc
->multicast
.ib
->rec
.mtu
= iboe_get_mtu(ndev
->mtu
);
3651 if (!mc
->multicast
.ib
->rec
.mtu
) {
3655 rdma_ip2gid((struct sockaddr
*)&id_priv
->id
.route
.addr
.src_addr
,
3656 &mc
->multicast
.ib
->rec
.port_gid
);
3659 INIT_WORK(&work
->work
, iboe_mcast_work_handler
);
3660 kref_get(&mc
->mcref
);
3661 queue_work(cma_wq
, &work
->work
);
3666 kfree(mc
->multicast
.ib
);
3672 int rdma_join_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
,
3675 struct rdma_id_private
*id_priv
;
3676 struct cma_multicast
*mc
;
3679 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3680 if (!cma_comp(id_priv
, RDMA_CM_ADDR_BOUND
) &&
3681 !cma_comp(id_priv
, RDMA_CM_ADDR_RESOLVED
))
3684 mc
= kmalloc(sizeof *mc
, GFP_KERNEL
);
3688 memcpy(&mc
->addr
, addr
, rdma_addr_size(addr
));
3689 mc
->context
= context
;
3690 mc
->id_priv
= id_priv
;
3692 spin_lock(&id_priv
->lock
);
3693 list_add(&mc
->list
, &id_priv
->mc_list
);
3694 spin_unlock(&id_priv
->lock
);
3696 if (rdma_protocol_roce(id
->device
, id
->port_num
)) {
3697 kref_init(&mc
->mcref
);
3698 ret
= cma_iboe_join_multicast(id_priv
, mc
);
3699 } else if (rdma_cap_ib_mcast(id
->device
, id
->port_num
))
3700 ret
= cma_join_ib_multicast(id_priv
, mc
);
3705 spin_lock_irq(&id_priv
->lock
);
3706 list_del(&mc
->list
);
3707 spin_unlock_irq(&id_priv
->lock
);
3712 EXPORT_SYMBOL(rdma_join_multicast
);
3714 void rdma_leave_multicast(struct rdma_cm_id
*id
, struct sockaddr
*addr
)
3716 struct rdma_id_private
*id_priv
;
3717 struct cma_multicast
*mc
;
3719 id_priv
= container_of(id
, struct rdma_id_private
, id
);
3720 spin_lock_irq(&id_priv
->lock
);
3721 list_for_each_entry(mc
, &id_priv
->mc_list
, list
) {
3722 if (!memcmp(&mc
->addr
, addr
, rdma_addr_size(addr
))) {
3723 list_del(&mc
->list
);
3724 spin_unlock_irq(&id_priv
->lock
);
3727 ib_detach_mcast(id
->qp
,
3728 &mc
->multicast
.ib
->rec
.mgid
,
3729 be16_to_cpu(mc
->multicast
.ib
->rec
.mlid
));
3731 BUG_ON(id_priv
->cma_dev
->device
!= id
->device
);
3733 if (rdma_cap_ib_mcast(id
->device
, id
->port_num
)) {
3734 ib_sa_free_multicast(mc
->multicast
.ib
);
3736 } else if (rdma_protocol_roce(id
->device
, id
->port_num
))
3737 kref_put(&mc
->mcref
, release_mc
);
3742 spin_unlock_irq(&id_priv
->lock
);
3744 EXPORT_SYMBOL(rdma_leave_multicast
);
3746 static int cma_netdev_change(struct net_device
*ndev
, struct rdma_id_private
*id_priv
)
3748 struct rdma_dev_addr
*dev_addr
;
3749 struct cma_ndev_work
*work
;
3751 dev_addr
= &id_priv
->id
.route
.addr
.dev_addr
;
3753 if ((dev_addr
->bound_dev_if
== ndev
->ifindex
) &&
3754 memcmp(dev_addr
->src_dev_addr
, ndev
->dev_addr
, ndev
->addr_len
)) {
3755 printk(KERN_INFO
"RDMA CM addr change for ndev %s used by id %p\n",
3756 ndev
->name
, &id_priv
->id
);
3757 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
3761 INIT_WORK(&work
->work
, cma_ndev_work_handler
);
3763 work
->event
.event
= RDMA_CM_EVENT_ADDR_CHANGE
;
3764 atomic_inc(&id_priv
->refcount
);
3765 queue_work(cma_wq
, &work
->work
);
3771 static int cma_netdev_callback(struct notifier_block
*self
, unsigned long event
,
3774 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
3775 struct cma_device
*cma_dev
;
3776 struct rdma_id_private
*id_priv
;
3777 int ret
= NOTIFY_DONE
;
3779 if (dev_net(ndev
) != &init_net
)
3782 if (event
!= NETDEV_BONDING_FAILOVER
)
3785 if (!(ndev
->flags
& IFF_MASTER
) || !(ndev
->priv_flags
& IFF_BONDING
))
3789 list_for_each_entry(cma_dev
, &dev_list
, list
)
3790 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
3791 ret
= cma_netdev_change(ndev
, id_priv
);
3797 mutex_unlock(&lock
);
3801 static struct notifier_block cma_nb
= {
3802 .notifier_call
= cma_netdev_callback
3805 static void cma_add_one(struct ib_device
*device
)
3807 struct cma_device
*cma_dev
;
3808 struct rdma_id_private
*id_priv
;
3810 cma_dev
= kmalloc(sizeof *cma_dev
, GFP_KERNEL
);
3814 cma_dev
->device
= device
;
3816 init_completion(&cma_dev
->comp
);
3817 atomic_set(&cma_dev
->refcount
, 1);
3818 INIT_LIST_HEAD(&cma_dev
->id_list
);
3819 ib_set_client_data(device
, &cma_client
, cma_dev
);
3822 list_add_tail(&cma_dev
->list
, &dev_list
);
3823 list_for_each_entry(id_priv
, &listen_any_list
, list
)
3824 cma_listen_on_dev(id_priv
, cma_dev
);
3825 mutex_unlock(&lock
);
3828 static int cma_remove_id_dev(struct rdma_id_private
*id_priv
)
3830 struct rdma_cm_event event
;
3831 enum rdma_cm_state state
;
3834 /* Record that we want to remove the device */
3835 state
= cma_exch(id_priv
, RDMA_CM_DEVICE_REMOVAL
);
3836 if (state
== RDMA_CM_DESTROYING
)
3839 cma_cancel_operation(id_priv
, state
);
3840 mutex_lock(&id_priv
->handler_mutex
);
3842 /* Check for destruction from another callback. */
3843 if (!cma_comp(id_priv
, RDMA_CM_DEVICE_REMOVAL
))
3846 memset(&event
, 0, sizeof event
);
3847 event
.event
= RDMA_CM_EVENT_DEVICE_REMOVAL
;
3848 ret
= id_priv
->id
.event_handler(&id_priv
->id
, &event
);
3850 mutex_unlock(&id_priv
->handler_mutex
);
3854 static void cma_process_remove(struct cma_device
*cma_dev
)
3856 struct rdma_id_private
*id_priv
;
3860 while (!list_empty(&cma_dev
->id_list
)) {
3861 id_priv
= list_entry(cma_dev
->id_list
.next
,
3862 struct rdma_id_private
, list
);
3864 list_del(&id_priv
->listen_list
);
3865 list_del_init(&id_priv
->list
);
3866 atomic_inc(&id_priv
->refcount
);
3867 mutex_unlock(&lock
);
3869 ret
= id_priv
->internal_id
? 1 : cma_remove_id_dev(id_priv
);
3870 cma_deref_id(id_priv
);
3872 rdma_destroy_id(&id_priv
->id
);
3876 mutex_unlock(&lock
);
3878 cma_deref_dev(cma_dev
);
3879 wait_for_completion(&cma_dev
->comp
);
3882 static void cma_remove_one(struct ib_device
*device
, void *client_data
)
3884 struct cma_device
*cma_dev
= client_data
;
3890 list_del(&cma_dev
->list
);
3891 mutex_unlock(&lock
);
3893 cma_process_remove(cma_dev
);
3897 static int cma_get_id_stats(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3899 struct nlmsghdr
*nlh
;
3900 struct rdma_cm_id_stats
*id_stats
;
3901 struct rdma_id_private
*id_priv
;
3902 struct rdma_cm_id
*id
= NULL
;
3903 struct cma_device
*cma_dev
;
3904 int i_dev
= 0, i_id
= 0;
3907 * We export all of the IDs as a sequence of messages. Each
3908 * ID gets its own netlink message.
3912 list_for_each_entry(cma_dev
, &dev_list
, list
) {
3913 if (i_dev
< cb
->args
[0]) {
3919 list_for_each_entry(id_priv
, &cma_dev
->id_list
, list
) {
3920 if (i_id
< cb
->args
[1]) {
3925 id_stats
= ibnl_put_msg(skb
, &nlh
, cb
->nlh
->nlmsg_seq
,
3926 sizeof *id_stats
, RDMA_NL_RDMA_CM
,
3927 RDMA_NL_RDMA_CM_ID_STATS
,
3932 memset(id_stats
, 0, sizeof *id_stats
);
3934 id_stats
->node_type
= id
->route
.addr
.dev_addr
.dev_type
;
3935 id_stats
->port_num
= id
->port_num
;
3936 id_stats
->bound_dev_if
=
3937 id
->route
.addr
.dev_addr
.bound_dev_if
;
3939 if (ibnl_put_attr(skb
, nlh
,
3940 rdma_addr_size(cma_src_addr(id_priv
)),
3941 cma_src_addr(id_priv
),
3942 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR
))
3944 if (ibnl_put_attr(skb
, nlh
,
3945 rdma_addr_size(cma_src_addr(id_priv
)),
3946 cma_dst_addr(id_priv
),
3947 RDMA_NL_RDMA_CM_ATTR_DST_ADDR
))
3950 id_stats
->pid
= id_priv
->owner
;
3951 id_stats
->port_space
= id
->ps
;
3952 id_stats
->cm_state
= id_priv
->state
;
3953 id_stats
->qp_num
= id_priv
->qp_num
;
3954 id_stats
->qp_type
= id
->qp_type
;
3964 mutex_unlock(&lock
);
3965 cb
->args
[0] = i_dev
;
3971 static const struct ibnl_client_cbs cma_cb_table
[] = {
3972 [RDMA_NL_RDMA_CM_ID_STATS
] = { .dump
= cma_get_id_stats
,
3973 .module
= THIS_MODULE
},
3976 static int __init
cma_init(void)
3980 cma_wq
= create_singlethread_workqueue("rdma_cm");
3984 ib_sa_register_client(&sa_client
);
3985 rdma_addr_register_client(&addr_client
);
3986 register_netdevice_notifier(&cma_nb
);
3988 ret
= ib_register_client(&cma_client
);
3992 if (ibnl_add_client(RDMA_NL_RDMA_CM
, RDMA_NL_RDMA_CM_NUM_OPS
, cma_cb_table
))
3993 printk(KERN_WARNING
"RDMA CMA: failed to add netlink callback\n");
3998 unregister_netdevice_notifier(&cma_nb
);
3999 rdma_addr_unregister_client(&addr_client
);
4000 ib_sa_unregister_client(&sa_client
);
4001 destroy_workqueue(cma_wq
);
4005 static void __exit
cma_cleanup(void)
4007 ibnl_remove_client(RDMA_NL_RDMA_CM
);
4008 ib_unregister_client(&cma_client
);
4009 unregister_netdevice_notifier(&cma_nb
);
4010 rdma_addr_unregister_client(&addr_client
);
4011 ib_sa_unregister_client(&sa_client
);
4012 destroy_workqueue(cma_wq
);
4013 idr_destroy(&tcp_ps
);
4014 idr_destroy(&udp_ps
);
4015 idr_destroy(&ipoib_ps
);
4016 idr_destroy(&ib_ps
);
4019 module_init(cma_init
);
4020 module_exit(cma_cleanup
);