2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
37 #include <linux/dma-mapping.h>
38 #include <linux/err.h>
39 #include <linux/idr.h>
40 #include <linux/interrupt.h>
41 #include <linux/pci.h>
42 #include <linux/rbtree.h>
43 #include <linux/spinlock.h>
44 #include <linux/workqueue.h>
46 #include <rdma/ib_cache.h>
47 #include <rdma/ib_cm.h>
50 MODULE_AUTHOR("Sean Hefty");
51 MODULE_DESCRIPTION("InfiniBand CM");
52 MODULE_LICENSE("Dual BSD/GPL");
54 static void cm_add_one(struct ib_device
*device
);
55 static void cm_remove_one(struct ib_device
*device
);
57 static struct ib_client cm_client
= {
60 .remove
= cm_remove_one
65 struct list_head device_list
;
67 struct rb_root listen_service_table
;
68 u64 listen_service_id
;
69 /* struct rb_root peer_service_table; todo: fix peer to peer */
70 struct rb_root remote_qp_table
;
71 struct rb_root remote_id_table
;
72 struct rb_root remote_sidr_table
;
73 struct idr local_id_table
;
74 struct workqueue_struct
*wq
;
78 struct cm_device
*cm_dev
;
79 struct ib_mad_agent
*mad_agent
;
84 struct list_head list
;
85 struct ib_device
*device
;
87 struct cm_port port
[0];
93 struct ib_ah_attr ah_attr
;
99 struct work_struct work
;
100 struct list_head list
;
101 struct cm_port
*port
;
102 struct ib_mad_recv_wc
*mad_recv_wc
; /* Received MADs */
103 __be32 local_id
; /* Established / timewait */
105 struct ib_cm_event cm_event
;
106 struct ib_sa_path_rec path
[0];
109 struct cm_timewait_info
{
110 struct cm_work work
; /* Must be first. */
111 struct rb_node remote_qp_node
;
112 struct rb_node remote_id_node
;
113 __be64 remote_ca_guid
;
115 u8 inserted_remote_qp
;
116 u8 inserted_remote_id
;
119 struct cm_id_private
{
122 struct rb_node service_node
;
123 struct rb_node sidr_id_node
;
125 wait_queue_head_t wait
;
128 struct ib_mad_send_buf
*msg
;
129 struct cm_timewait_info
*timewait_info
;
130 /* todo: use alternate port on send failure */
138 enum ib_qp_type qp_type
;
142 enum ib_mtu path_mtu
;
146 u8 responder_resources
;
148 u8 local_ack_timeout
;
153 struct list_head work_list
;
157 static void cm_work_handler(void *data
);
159 static inline void cm_deref_id(struct cm_id_private
*cm_id_priv
)
161 if (atomic_dec_and_test(&cm_id_priv
->refcount
))
162 wake_up(&cm_id_priv
->wait
);
165 static int cm_alloc_msg(struct cm_id_private
*cm_id_priv
,
166 struct ib_mad_send_buf
**msg
)
168 struct ib_mad_agent
*mad_agent
;
169 struct ib_mad_send_buf
*m
;
172 mad_agent
= cm_id_priv
->av
.port
->mad_agent
;
173 ah
= ib_create_ah(mad_agent
->qp
->pd
, &cm_id_priv
->av
.ah_attr
);
177 m
= ib_create_send_mad(mad_agent
, cm_id_priv
->id
.remote_cm_qpn
,
178 cm_id_priv
->av
.pkey_index
,
179 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
186 /* Timeout set by caller if response is expected. */
188 m
->retries
= cm_id_priv
->max_cm_retries
;
190 atomic_inc(&cm_id_priv
->refcount
);
191 m
->context
[0] = cm_id_priv
;
196 static int cm_alloc_response_msg(struct cm_port
*port
,
197 struct ib_mad_recv_wc
*mad_recv_wc
,
198 struct ib_mad_send_buf
**msg
)
200 struct ib_mad_send_buf
*m
;
203 ah
= ib_create_ah_from_wc(port
->mad_agent
->qp
->pd
, mad_recv_wc
->wc
,
204 mad_recv_wc
->recv_buf
.grh
, port
->port_num
);
208 m
= ib_create_send_mad(port
->mad_agent
, 1, mad_recv_wc
->wc
->pkey_index
,
209 0, IB_MGMT_MAD_HDR
, IB_MGMT_MAD_DATA
,
220 static void cm_free_msg(struct ib_mad_send_buf
*msg
)
222 ib_destroy_ah(msg
->ah
);
224 cm_deref_id(msg
->context
[0]);
225 ib_free_send_mad(msg
);
228 static void * cm_copy_private_data(const void *private_data
,
233 if (!private_data
|| !private_data_len
)
236 data
= kmalloc(private_data_len
, GFP_KERNEL
);
238 return ERR_PTR(-ENOMEM
);
240 memcpy(data
, private_data
, private_data_len
);
244 static void cm_set_private_data(struct cm_id_private
*cm_id_priv
,
245 void *private_data
, u8 private_data_len
)
247 if (cm_id_priv
->private_data
&& cm_id_priv
->private_data_len
)
248 kfree(cm_id_priv
->private_data
);
250 cm_id_priv
->private_data
= private_data
;
251 cm_id_priv
->private_data_len
= private_data_len
;
254 static void cm_set_ah_attr(struct ib_ah_attr
*ah_attr
, u8 port_num
,
255 u16 dlid
, u8 sl
, u16 src_path_bits
)
257 memset(ah_attr
, 0, sizeof ah_attr
);
258 ah_attr
->dlid
= dlid
;
260 ah_attr
->src_path_bits
= src_path_bits
;
261 ah_attr
->port_num
= port_num
;
264 static void cm_init_av_for_response(struct cm_port
*port
,
265 struct ib_wc
*wc
, struct cm_av
*av
)
268 av
->pkey_index
= wc
->pkey_index
;
269 cm_set_ah_attr(&av
->ah_attr
, port
->port_num
, wc
->slid
,
270 wc
->sl
, wc
->dlid_path_bits
);
273 static int cm_init_av_by_path(struct ib_sa_path_rec
*path
, struct cm_av
*av
)
275 struct cm_device
*cm_dev
;
276 struct cm_port
*port
= NULL
;
281 read_lock_irqsave(&cm
.device_lock
, flags
);
282 list_for_each_entry(cm_dev
, &cm
.device_list
, list
) {
283 if (!ib_find_cached_gid(cm_dev
->device
, &path
->sgid
,
285 port
= &cm_dev
->port
[p
-1];
289 read_unlock_irqrestore(&cm
.device_lock
, flags
);
294 ret
= ib_find_cached_pkey(cm_dev
->device
, port
->port_num
,
295 be16_to_cpu(path
->pkey
), &av
->pkey_index
);
300 cm_set_ah_attr(&av
->ah_attr
, av
->port
->port_num
,
301 be16_to_cpu(path
->dlid
), path
->sl
,
302 be16_to_cpu(path
->slid
) & 0x7F);
303 av
->packet_life_time
= path
->packet_life_time
;
307 static int cm_alloc_id(struct cm_id_private
*cm_id_priv
)
314 spin_lock_irqsave(&cm
.lock
, flags
);
315 ret
= idr_get_new_above(&cm
.local_id_table
, cm_id_priv
, next_id
++,
316 (__force
int *) &cm_id_priv
->id
.local_id
);
317 spin_unlock_irqrestore(&cm
.lock
, flags
);
318 } while( (ret
== -EAGAIN
) && idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
) );
322 static void cm_free_id(__be32 local_id
)
326 spin_lock_irqsave(&cm
.lock
, flags
);
327 idr_remove(&cm
.local_id_table
, (__force
int) local_id
);
328 spin_unlock_irqrestore(&cm
.lock
, flags
);
331 static struct cm_id_private
* cm_get_id(__be32 local_id
, __be32 remote_id
)
333 struct cm_id_private
*cm_id_priv
;
335 cm_id_priv
= idr_find(&cm
.local_id_table
, (__force
int) local_id
);
337 if (cm_id_priv
->id
.remote_id
== remote_id
)
338 atomic_inc(&cm_id_priv
->refcount
);
346 static struct cm_id_private
* cm_acquire_id(__be32 local_id
, __be32 remote_id
)
348 struct cm_id_private
*cm_id_priv
;
351 spin_lock_irqsave(&cm
.lock
, flags
);
352 cm_id_priv
= cm_get_id(local_id
, remote_id
);
353 spin_unlock_irqrestore(&cm
.lock
, flags
);
358 static struct cm_id_private
* cm_insert_listen(struct cm_id_private
*cm_id_priv
)
360 struct rb_node
**link
= &cm
.listen_service_table
.rb_node
;
361 struct rb_node
*parent
= NULL
;
362 struct cm_id_private
*cur_cm_id_priv
;
363 __be64 service_id
= cm_id_priv
->id
.service_id
;
364 __be64 service_mask
= cm_id_priv
->id
.service_mask
;
368 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
370 if ((cur_cm_id_priv
->id
.service_mask
& service_id
) ==
371 (service_mask
& cur_cm_id_priv
->id
.service_id
) &&
372 (cm_id_priv
->id
.device
== cur_cm_id_priv
->id
.device
))
373 return cur_cm_id_priv
;
375 if (cm_id_priv
->id
.device
< cur_cm_id_priv
->id
.device
)
376 link
= &(*link
)->rb_left
;
377 else if (cm_id_priv
->id
.device
> cur_cm_id_priv
->id
.device
)
378 link
= &(*link
)->rb_right
;
379 else if (service_id
< cur_cm_id_priv
->id
.service_id
)
380 link
= &(*link
)->rb_left
;
382 link
= &(*link
)->rb_right
;
384 rb_link_node(&cm_id_priv
->service_node
, parent
, link
);
385 rb_insert_color(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
389 static struct cm_id_private
* cm_find_listen(struct ib_device
*device
,
392 struct rb_node
*node
= cm
.listen_service_table
.rb_node
;
393 struct cm_id_private
*cm_id_priv
;
396 cm_id_priv
= rb_entry(node
, struct cm_id_private
, service_node
);
397 if ((cm_id_priv
->id
.service_mask
& service_id
) ==
398 cm_id_priv
->id
.service_id
&&
399 (cm_id_priv
->id
.device
== device
))
402 if (device
< cm_id_priv
->id
.device
)
403 node
= node
->rb_left
;
404 else if (device
> cm_id_priv
->id
.device
)
405 node
= node
->rb_right
;
406 else if (service_id
< cm_id_priv
->id
.service_id
)
407 node
= node
->rb_left
;
409 node
= node
->rb_right
;
414 static struct cm_timewait_info
* cm_insert_remote_id(struct cm_timewait_info
417 struct rb_node
**link
= &cm
.remote_id_table
.rb_node
;
418 struct rb_node
*parent
= NULL
;
419 struct cm_timewait_info
*cur_timewait_info
;
420 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
421 __be32 remote_id
= timewait_info
->work
.remote_id
;
425 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
427 if (remote_id
< cur_timewait_info
->work
.remote_id
)
428 link
= &(*link
)->rb_left
;
429 else if (remote_id
> cur_timewait_info
->work
.remote_id
)
430 link
= &(*link
)->rb_right
;
431 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
432 link
= &(*link
)->rb_left
;
433 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
434 link
= &(*link
)->rb_right
;
436 return cur_timewait_info
;
438 timewait_info
->inserted_remote_id
= 1;
439 rb_link_node(&timewait_info
->remote_id_node
, parent
, link
);
440 rb_insert_color(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
444 static struct cm_timewait_info
* cm_find_remote_id(__be64 remote_ca_guid
,
447 struct rb_node
*node
= cm
.remote_id_table
.rb_node
;
448 struct cm_timewait_info
*timewait_info
;
451 timewait_info
= rb_entry(node
, struct cm_timewait_info
,
453 if (remote_id
< timewait_info
->work
.remote_id
)
454 node
= node
->rb_left
;
455 else if (remote_id
> timewait_info
->work
.remote_id
)
456 node
= node
->rb_right
;
457 else if (remote_ca_guid
< timewait_info
->remote_ca_guid
)
458 node
= node
->rb_left
;
459 else if (remote_ca_guid
> timewait_info
->remote_ca_guid
)
460 node
= node
->rb_right
;
462 return timewait_info
;
467 static struct cm_timewait_info
* cm_insert_remote_qpn(struct cm_timewait_info
470 struct rb_node
**link
= &cm
.remote_qp_table
.rb_node
;
471 struct rb_node
*parent
= NULL
;
472 struct cm_timewait_info
*cur_timewait_info
;
473 __be64 remote_ca_guid
= timewait_info
->remote_ca_guid
;
474 __be32 remote_qpn
= timewait_info
->remote_qpn
;
478 cur_timewait_info
= rb_entry(parent
, struct cm_timewait_info
,
480 if (remote_qpn
< cur_timewait_info
->remote_qpn
)
481 link
= &(*link
)->rb_left
;
482 else if (remote_qpn
> cur_timewait_info
->remote_qpn
)
483 link
= &(*link
)->rb_right
;
484 else if (remote_ca_guid
< cur_timewait_info
->remote_ca_guid
)
485 link
= &(*link
)->rb_left
;
486 else if (remote_ca_guid
> cur_timewait_info
->remote_ca_guid
)
487 link
= &(*link
)->rb_right
;
489 return cur_timewait_info
;
491 timewait_info
->inserted_remote_qp
= 1;
492 rb_link_node(&timewait_info
->remote_qp_node
, parent
, link
);
493 rb_insert_color(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
497 static struct cm_id_private
* cm_insert_remote_sidr(struct cm_id_private
500 struct rb_node
**link
= &cm
.remote_sidr_table
.rb_node
;
501 struct rb_node
*parent
= NULL
;
502 struct cm_id_private
*cur_cm_id_priv
;
503 union ib_gid
*port_gid
= &cm_id_priv
->av
.dgid
;
504 __be32 remote_id
= cm_id_priv
->id
.remote_id
;
508 cur_cm_id_priv
= rb_entry(parent
, struct cm_id_private
,
510 if (remote_id
< cur_cm_id_priv
->id
.remote_id
)
511 link
= &(*link
)->rb_left
;
512 else if (remote_id
> cur_cm_id_priv
->id
.remote_id
)
513 link
= &(*link
)->rb_right
;
516 cmp
= memcmp(port_gid
, &cur_cm_id_priv
->av
.dgid
,
519 link
= &(*link
)->rb_left
;
521 link
= &(*link
)->rb_right
;
523 return cur_cm_id_priv
;
526 rb_link_node(&cm_id_priv
->sidr_id_node
, parent
, link
);
527 rb_insert_color(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
531 static void cm_reject_sidr_req(struct cm_id_private
*cm_id_priv
,
532 enum ib_cm_sidr_status status
)
534 struct ib_cm_sidr_rep_param param
;
536 memset(¶m
, 0, sizeof param
);
537 param
.status
= status
;
538 ib_send_cm_sidr_rep(&cm_id_priv
->id
, ¶m
);
541 struct ib_cm_id
*ib_create_cm_id(struct ib_device
*device
,
542 ib_cm_handler cm_handler
,
545 struct cm_id_private
*cm_id_priv
;
548 cm_id_priv
= kzalloc(sizeof *cm_id_priv
, GFP_KERNEL
);
550 return ERR_PTR(-ENOMEM
);
552 cm_id_priv
->id
.state
= IB_CM_IDLE
;
553 cm_id_priv
->id
.device
= device
;
554 cm_id_priv
->id
.cm_handler
= cm_handler
;
555 cm_id_priv
->id
.context
= context
;
556 cm_id_priv
->id
.remote_cm_qpn
= 1;
557 ret
= cm_alloc_id(cm_id_priv
);
561 spin_lock_init(&cm_id_priv
->lock
);
562 init_waitqueue_head(&cm_id_priv
->wait
);
563 INIT_LIST_HEAD(&cm_id_priv
->work_list
);
564 atomic_set(&cm_id_priv
->work_count
, -1);
565 atomic_set(&cm_id_priv
->refcount
, 1);
566 return &cm_id_priv
->id
;
570 return ERR_PTR(-ENOMEM
);
572 EXPORT_SYMBOL(ib_create_cm_id
);
574 static struct cm_work
* cm_dequeue_work(struct cm_id_private
*cm_id_priv
)
576 struct cm_work
*work
;
578 if (list_empty(&cm_id_priv
->work_list
))
581 work
= list_entry(cm_id_priv
->work_list
.next
, struct cm_work
, list
);
582 list_del(&work
->list
);
586 static void cm_free_work(struct cm_work
*work
)
588 if (work
->mad_recv_wc
)
589 ib_free_recv_mad(work
->mad_recv_wc
);
593 static inline int cm_convert_to_ms(int iba_time
)
595 /* approximate conversion to ms from 4.096us x 2^iba_time */
596 return 1 << max(iba_time
- 8, 0);
599 static void cm_cleanup_timewait(struct cm_timewait_info
*timewait_info
)
603 if (!timewait_info
->inserted_remote_id
&&
604 !timewait_info
->inserted_remote_qp
)
607 spin_lock_irqsave(&cm
.lock
, flags
);
608 if (timewait_info
->inserted_remote_id
) {
609 rb_erase(&timewait_info
->remote_id_node
, &cm
.remote_id_table
);
610 timewait_info
->inserted_remote_id
= 0;
613 if (timewait_info
->inserted_remote_qp
) {
614 rb_erase(&timewait_info
->remote_qp_node
, &cm
.remote_qp_table
);
615 timewait_info
->inserted_remote_qp
= 0;
617 spin_unlock_irqrestore(&cm
.lock
, flags
);
620 static struct cm_timewait_info
* cm_create_timewait_info(__be32 local_id
)
622 struct cm_timewait_info
*timewait_info
;
624 timewait_info
= kzalloc(sizeof *timewait_info
, GFP_KERNEL
);
626 return ERR_PTR(-ENOMEM
);
628 timewait_info
->work
.local_id
= local_id
;
629 INIT_WORK(&timewait_info
->work
.work
, cm_work_handler
,
630 &timewait_info
->work
);
631 timewait_info
->work
.cm_event
.event
= IB_CM_TIMEWAIT_EXIT
;
632 return timewait_info
;
635 static void cm_enter_timewait(struct cm_id_private
*cm_id_priv
)
640 * The cm_id could be destroyed by the user before we exit timewait.
641 * To protect against this, we search for the cm_id after exiting
642 * timewait before notifying the user that we've exited timewait.
644 cm_id_priv
->id
.state
= IB_CM_TIMEWAIT
;
645 wait_time
= cm_convert_to_ms(cm_id_priv
->local_ack_timeout
);
646 queue_delayed_work(cm
.wq
, &cm_id_priv
->timewait_info
->work
.work
,
647 msecs_to_jiffies(wait_time
));
648 cm_id_priv
->timewait_info
= NULL
;
651 static void cm_reset_to_idle(struct cm_id_private
*cm_id_priv
)
653 cm_id_priv
->id
.state
= IB_CM_IDLE
;
654 if (cm_id_priv
->timewait_info
) {
655 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
656 kfree(cm_id_priv
->timewait_info
);
657 cm_id_priv
->timewait_info
= NULL
;
661 void ib_destroy_cm_id(struct ib_cm_id
*cm_id
)
663 struct cm_id_private
*cm_id_priv
;
664 struct cm_work
*work
;
667 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
669 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
670 switch (cm_id
->state
) {
672 cm_id
->state
= IB_CM_IDLE
;
673 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
674 spin_lock_irqsave(&cm
.lock
, flags
);
675 rb_erase(&cm_id_priv
->service_node
, &cm
.listen_service_table
);
676 spin_unlock_irqrestore(&cm
.lock
, flags
);
678 case IB_CM_SIDR_REQ_SENT
:
679 cm_id
->state
= IB_CM_IDLE
;
680 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
681 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
683 case IB_CM_SIDR_REQ_RCVD
:
684 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
685 cm_reject_sidr_req(cm_id_priv
, IB_SIDR_REJECT
);
688 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
689 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
690 ib_send_cm_rej(cm_id
, IB_CM_REJ_TIMEOUT
,
691 &cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
692 sizeof cm_id_priv
->av
.port
->cm_dev
->ca_guid
,
695 case IB_CM_MRA_REQ_RCVD
:
697 case IB_CM_MRA_REP_RCVD
:
698 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
701 case IB_CM_MRA_REQ_SENT
:
703 case IB_CM_MRA_REP_SENT
:
704 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
705 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
708 case IB_CM_ESTABLISHED
:
709 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
710 ib_send_cm_dreq(cm_id
, NULL
, 0);
712 case IB_CM_DREQ_SENT
:
713 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
714 cm_enter_timewait(cm_id_priv
);
715 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
717 case IB_CM_DREQ_RCVD
:
718 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
719 ib_send_cm_drep(cm_id
, NULL
, 0);
722 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
726 cm_free_id(cm_id
->local_id
);
727 atomic_dec(&cm_id_priv
->refcount
);
728 wait_event(cm_id_priv
->wait
, !atomic_read(&cm_id_priv
->refcount
));
729 while ((work
= cm_dequeue_work(cm_id_priv
)) != NULL
)
731 if (cm_id_priv
->private_data
&& cm_id_priv
->private_data_len
)
732 kfree(cm_id_priv
->private_data
);
735 EXPORT_SYMBOL(ib_destroy_cm_id
);
737 int ib_cm_listen(struct ib_cm_id
*cm_id
,
741 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
745 service_mask
= service_mask
? service_mask
:
746 __constant_cpu_to_be64(~0ULL);
747 service_id
&= service_mask
;
748 if ((service_id
& IB_SERVICE_ID_AGN_MASK
) == IB_CM_ASSIGN_SERVICE_ID
&&
749 (service_id
!= IB_CM_ASSIGN_SERVICE_ID
))
752 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
753 BUG_ON(cm_id
->state
!= IB_CM_IDLE
);
755 cm_id
->state
= IB_CM_LISTEN
;
757 spin_lock_irqsave(&cm
.lock
, flags
);
758 if (service_id
== IB_CM_ASSIGN_SERVICE_ID
) {
759 cm_id
->service_id
= cpu_to_be64(cm
.listen_service_id
++);
760 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
762 cm_id
->service_id
= service_id
;
763 cm_id
->service_mask
= service_mask
;
765 cur_cm_id_priv
= cm_insert_listen(cm_id_priv
);
766 spin_unlock_irqrestore(&cm
.lock
, flags
);
768 if (cur_cm_id_priv
) {
769 cm_id
->state
= IB_CM_IDLE
;
774 EXPORT_SYMBOL(ib_cm_listen
);
776 static __be64
cm_form_tid(struct cm_id_private
*cm_id_priv
,
777 enum cm_msg_sequence msg_seq
)
781 hi_tid
= ((u64
) cm_id_priv
->av
.port
->mad_agent
->hi_tid
) << 32;
782 low_tid
= (u64
) ((__force u32
)cm_id_priv
->id
.local_id
|
784 return cpu_to_be64(hi_tid
| low_tid
);
787 static void cm_format_mad_hdr(struct ib_mad_hdr
*hdr
,
788 __be16 attr_id
, __be64 tid
)
790 hdr
->base_version
= IB_MGMT_BASE_VERSION
;
791 hdr
->mgmt_class
= IB_MGMT_CLASS_CM
;
792 hdr
->class_version
= IB_CM_CLASS_VERSION
;
793 hdr
->method
= IB_MGMT_METHOD_SEND
;
794 hdr
->attr_id
= attr_id
;
798 static void cm_format_req(struct cm_req_msg
*req_msg
,
799 struct cm_id_private
*cm_id_priv
,
800 struct ib_cm_req_param
*param
)
802 cm_format_mad_hdr(&req_msg
->hdr
, CM_REQ_ATTR_ID
,
803 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_REQ
));
805 req_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
806 req_msg
->service_id
= param
->service_id
;
807 req_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
808 cm_req_set_local_qpn(req_msg
, cpu_to_be32(param
->qp_num
));
809 cm_req_set_resp_res(req_msg
, param
->responder_resources
);
810 cm_req_set_init_depth(req_msg
, param
->initiator_depth
);
811 cm_req_set_remote_resp_timeout(req_msg
,
812 param
->remote_cm_response_timeout
);
813 cm_req_set_qp_type(req_msg
, param
->qp_type
);
814 cm_req_set_flow_ctrl(req_msg
, param
->flow_control
);
815 cm_req_set_starting_psn(req_msg
, cpu_to_be32(param
->starting_psn
));
816 cm_req_set_local_resp_timeout(req_msg
,
817 param
->local_cm_response_timeout
);
818 cm_req_set_retry_count(req_msg
, param
->retry_count
);
819 req_msg
->pkey
= param
->primary_path
->pkey
;
820 cm_req_set_path_mtu(req_msg
, param
->primary_path
->mtu
);
821 cm_req_set_rnr_retry_count(req_msg
, param
->rnr_retry_count
);
822 cm_req_set_max_cm_retries(req_msg
, param
->max_cm_retries
);
823 cm_req_set_srq(req_msg
, param
->srq
);
825 req_msg
->primary_local_lid
= param
->primary_path
->slid
;
826 req_msg
->primary_remote_lid
= param
->primary_path
->dlid
;
827 req_msg
->primary_local_gid
= param
->primary_path
->sgid
;
828 req_msg
->primary_remote_gid
= param
->primary_path
->dgid
;
829 cm_req_set_primary_flow_label(req_msg
, param
->primary_path
->flow_label
);
830 cm_req_set_primary_packet_rate(req_msg
, param
->primary_path
->rate
);
831 req_msg
->primary_traffic_class
= param
->primary_path
->traffic_class
;
832 req_msg
->primary_hop_limit
= param
->primary_path
->hop_limit
;
833 cm_req_set_primary_sl(req_msg
, param
->primary_path
->sl
);
834 cm_req_set_primary_subnet_local(req_msg
, 1); /* local only... */
835 cm_req_set_primary_local_ack_timeout(req_msg
,
836 min(31, param
->primary_path
->packet_life_time
+ 1));
838 if (param
->alternate_path
) {
839 req_msg
->alt_local_lid
= param
->alternate_path
->slid
;
840 req_msg
->alt_remote_lid
= param
->alternate_path
->dlid
;
841 req_msg
->alt_local_gid
= param
->alternate_path
->sgid
;
842 req_msg
->alt_remote_gid
= param
->alternate_path
->dgid
;
843 cm_req_set_alt_flow_label(req_msg
,
844 param
->alternate_path
->flow_label
);
845 cm_req_set_alt_packet_rate(req_msg
, param
->alternate_path
->rate
);
846 req_msg
->alt_traffic_class
= param
->alternate_path
->traffic_class
;
847 req_msg
->alt_hop_limit
= param
->alternate_path
->hop_limit
;
848 cm_req_set_alt_sl(req_msg
, param
->alternate_path
->sl
);
849 cm_req_set_alt_subnet_local(req_msg
, 1); /* local only... */
850 cm_req_set_alt_local_ack_timeout(req_msg
,
851 min(31, param
->alternate_path
->packet_life_time
+ 1));
854 if (param
->private_data
&& param
->private_data_len
)
855 memcpy(req_msg
->private_data
, param
->private_data
,
856 param
->private_data_len
);
859 static inline int cm_validate_req_param(struct ib_cm_req_param
*param
)
861 /* peer-to-peer not supported */
862 if (param
->peer_to_peer
)
865 if (!param
->primary_path
)
868 if (param
->qp_type
!= IB_QPT_RC
&& param
->qp_type
!= IB_QPT_UC
)
871 if (param
->private_data
&&
872 param
->private_data_len
> IB_CM_REQ_PRIVATE_DATA_SIZE
)
875 if (param
->alternate_path
&&
876 (param
->alternate_path
->pkey
!= param
->primary_path
->pkey
||
877 param
->alternate_path
->mtu
!= param
->primary_path
->mtu
))
883 int ib_send_cm_req(struct ib_cm_id
*cm_id
,
884 struct ib_cm_req_param
*param
)
886 struct cm_id_private
*cm_id_priv
;
887 struct cm_req_msg
*req_msg
;
891 ret
= cm_validate_req_param(param
);
895 /* Verify that we're not in timewait. */
896 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
897 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
898 if (cm_id
->state
!= IB_CM_IDLE
) {
899 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
903 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
905 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
907 if (IS_ERR(cm_id_priv
->timewait_info
))
910 ret
= cm_init_av_by_path(param
->primary_path
, &cm_id_priv
->av
);
913 if (param
->alternate_path
) {
914 ret
= cm_init_av_by_path(param
->alternate_path
,
915 &cm_id_priv
->alt_av
);
919 cm_id
->service_id
= param
->service_id
;
920 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
921 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
922 param
->primary_path
->packet_life_time
) * 2 +
924 param
->remote_cm_response_timeout
);
925 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
926 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
927 cm_id_priv
->responder_resources
= param
->responder_resources
;
928 cm_id_priv
->retry_count
= param
->retry_count
;
929 cm_id_priv
->path_mtu
= param
->primary_path
->mtu
;
930 cm_id_priv
->qp_type
= param
->qp_type
;
932 ret
= cm_alloc_msg(cm_id_priv
, &cm_id_priv
->msg
);
936 req_msg
= (struct cm_req_msg
*) cm_id_priv
->msg
->mad
;
937 cm_format_req(req_msg
, cm_id_priv
, param
);
938 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
939 cm_id_priv
->msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
940 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long) IB_CM_REQ_SENT
;
942 cm_id_priv
->local_qpn
= cm_req_get_local_qpn(req_msg
);
943 cm_id_priv
->rq_psn
= cm_req_get_starting_psn(req_msg
);
944 cm_id_priv
->local_ack_timeout
=
945 cm_req_get_primary_local_ack_timeout(req_msg
);
947 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
948 ret
= ib_post_send_mad(cm_id_priv
->msg
, NULL
);
950 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
953 BUG_ON(cm_id
->state
!= IB_CM_IDLE
);
954 cm_id
->state
= IB_CM_REQ_SENT
;
955 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
958 error2
: cm_free_msg(cm_id_priv
->msg
);
959 error1
: kfree(cm_id_priv
->timewait_info
);
962 EXPORT_SYMBOL(ib_send_cm_req
);
964 static int cm_issue_rej(struct cm_port
*port
,
965 struct ib_mad_recv_wc
*mad_recv_wc
,
966 enum ib_cm_rej_reason reason
,
967 enum cm_msg_response msg_rejected
,
968 void *ari
, u8 ari_length
)
970 struct ib_mad_send_buf
*msg
= NULL
;
971 struct cm_rej_msg
*rej_msg
, *rcv_msg
;
974 ret
= cm_alloc_response_msg(port
, mad_recv_wc
, &msg
);
978 /* We just need common CM header information. Cast to any message. */
979 rcv_msg
= (struct cm_rej_msg
*) mad_recv_wc
->recv_buf
.mad
;
980 rej_msg
= (struct cm_rej_msg
*) msg
->mad
;
982 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, rcv_msg
->hdr
.tid
);
983 rej_msg
->remote_comm_id
= rcv_msg
->local_comm_id
;
984 rej_msg
->local_comm_id
= rcv_msg
->remote_comm_id
;
985 cm_rej_set_msg_rejected(rej_msg
, msg_rejected
);
986 rej_msg
->reason
= cpu_to_be16(reason
);
988 if (ari
&& ari_length
) {
989 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
990 memcpy(rej_msg
->ari
, ari
, ari_length
);
993 ret
= ib_post_send_mad(msg
, NULL
);
1000 static inline int cm_is_active_peer(__be64 local_ca_guid
, __be64 remote_ca_guid
,
1001 __be32 local_qpn
, __be32 remote_qpn
)
1003 return (be64_to_cpu(local_ca_guid
) > be64_to_cpu(remote_ca_guid
) ||
1004 ((local_ca_guid
== remote_ca_guid
) &&
1005 (be32_to_cpu(local_qpn
) > be32_to_cpu(remote_qpn
))));
1008 static inline void cm_format_paths_from_req(struct cm_req_msg
*req_msg
,
1009 struct ib_sa_path_rec
*primary_path
,
1010 struct ib_sa_path_rec
*alt_path
)
1012 memset(primary_path
, 0, sizeof *primary_path
);
1013 primary_path
->dgid
= req_msg
->primary_local_gid
;
1014 primary_path
->sgid
= req_msg
->primary_remote_gid
;
1015 primary_path
->dlid
= req_msg
->primary_local_lid
;
1016 primary_path
->slid
= req_msg
->primary_remote_lid
;
1017 primary_path
->flow_label
= cm_req_get_primary_flow_label(req_msg
);
1018 primary_path
->hop_limit
= req_msg
->primary_hop_limit
;
1019 primary_path
->traffic_class
= req_msg
->primary_traffic_class
;
1020 primary_path
->reversible
= 1;
1021 primary_path
->pkey
= req_msg
->pkey
;
1022 primary_path
->sl
= cm_req_get_primary_sl(req_msg
);
1023 primary_path
->mtu_selector
= IB_SA_EQ
;
1024 primary_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1025 primary_path
->rate_selector
= IB_SA_EQ
;
1026 primary_path
->rate
= cm_req_get_primary_packet_rate(req_msg
);
1027 primary_path
->packet_life_time_selector
= IB_SA_EQ
;
1028 primary_path
->packet_life_time
=
1029 cm_req_get_primary_local_ack_timeout(req_msg
);
1030 primary_path
->packet_life_time
-= (primary_path
->packet_life_time
> 0);
1032 if (req_msg
->alt_local_lid
) {
1033 memset(alt_path
, 0, sizeof *alt_path
);
1034 alt_path
->dgid
= req_msg
->alt_local_gid
;
1035 alt_path
->sgid
= req_msg
->alt_remote_gid
;
1036 alt_path
->dlid
= req_msg
->alt_local_lid
;
1037 alt_path
->slid
= req_msg
->alt_remote_lid
;
1038 alt_path
->flow_label
= cm_req_get_alt_flow_label(req_msg
);
1039 alt_path
->hop_limit
= req_msg
->alt_hop_limit
;
1040 alt_path
->traffic_class
= req_msg
->alt_traffic_class
;
1041 alt_path
->reversible
= 1;
1042 alt_path
->pkey
= req_msg
->pkey
;
1043 alt_path
->sl
= cm_req_get_alt_sl(req_msg
);
1044 alt_path
->mtu_selector
= IB_SA_EQ
;
1045 alt_path
->mtu
= cm_req_get_path_mtu(req_msg
);
1046 alt_path
->rate_selector
= IB_SA_EQ
;
1047 alt_path
->rate
= cm_req_get_alt_packet_rate(req_msg
);
1048 alt_path
->packet_life_time_selector
= IB_SA_EQ
;
1049 alt_path
->packet_life_time
=
1050 cm_req_get_alt_local_ack_timeout(req_msg
);
1051 alt_path
->packet_life_time
-= (alt_path
->packet_life_time
> 0);
1055 static void cm_format_req_event(struct cm_work
*work
,
1056 struct cm_id_private
*cm_id_priv
,
1057 struct ib_cm_id
*listen_id
)
1059 struct cm_req_msg
*req_msg
;
1060 struct ib_cm_req_event_param
*param
;
1062 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1063 param
= &work
->cm_event
.param
.req_rcvd
;
1064 param
->listen_id
= listen_id
;
1065 param
->port
= cm_id_priv
->av
.port
->port_num
;
1066 param
->primary_path
= &work
->path
[0];
1067 if (req_msg
->alt_local_lid
)
1068 param
->alternate_path
= &work
->path
[1];
1070 param
->alternate_path
= NULL
;
1071 param
->remote_ca_guid
= req_msg
->local_ca_guid
;
1072 param
->remote_qkey
= be32_to_cpu(req_msg
->local_qkey
);
1073 param
->remote_qpn
= be32_to_cpu(cm_req_get_local_qpn(req_msg
));
1074 param
->qp_type
= cm_req_get_qp_type(req_msg
);
1075 param
->starting_psn
= be32_to_cpu(cm_req_get_starting_psn(req_msg
));
1076 param
->responder_resources
= cm_req_get_init_depth(req_msg
);
1077 param
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1078 param
->local_cm_response_timeout
=
1079 cm_req_get_remote_resp_timeout(req_msg
);
1080 param
->flow_control
= cm_req_get_flow_ctrl(req_msg
);
1081 param
->remote_cm_response_timeout
=
1082 cm_req_get_local_resp_timeout(req_msg
);
1083 param
->retry_count
= cm_req_get_retry_count(req_msg
);
1084 param
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1085 param
->srq
= cm_req_get_srq(req_msg
);
1086 work
->cm_event
.private_data
= &req_msg
->private_data
;
1089 static void cm_process_work(struct cm_id_private
*cm_id_priv
,
1090 struct cm_work
*work
)
1092 unsigned long flags
;
1095 /* We will typically only have the current event to report. */
1096 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &work
->cm_event
);
1099 while (!ret
&& !atomic_add_negative(-1, &cm_id_priv
->work_count
)) {
1100 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1101 work
= cm_dequeue_work(cm_id_priv
);
1102 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1104 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
,
1108 cm_deref_id(cm_id_priv
);
1110 ib_destroy_cm_id(&cm_id_priv
->id
);
1113 static void cm_format_mra(struct cm_mra_msg
*mra_msg
,
1114 struct cm_id_private
*cm_id_priv
,
1115 enum cm_msg_response msg_mraed
, u8 service_timeout
,
1116 const void *private_data
, u8 private_data_len
)
1118 cm_format_mad_hdr(&mra_msg
->hdr
, CM_MRA_ATTR_ID
, cm_id_priv
->tid
);
1119 cm_mra_set_msg_mraed(mra_msg
, msg_mraed
);
1120 mra_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1121 mra_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1122 cm_mra_set_service_timeout(mra_msg
, service_timeout
);
1124 if (private_data
&& private_data_len
)
1125 memcpy(mra_msg
->private_data
, private_data
, private_data_len
);
1128 static void cm_format_rej(struct cm_rej_msg
*rej_msg
,
1129 struct cm_id_private
*cm_id_priv
,
1130 enum ib_cm_rej_reason reason
,
1133 const void *private_data
,
1134 u8 private_data_len
)
1136 cm_format_mad_hdr(&rej_msg
->hdr
, CM_REJ_ATTR_ID
, cm_id_priv
->tid
);
1137 rej_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1139 switch(cm_id_priv
->id
.state
) {
1140 case IB_CM_REQ_RCVD
:
1141 rej_msg
->local_comm_id
= 0;
1142 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1144 case IB_CM_MRA_REQ_SENT
:
1145 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1146 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REQ
);
1148 case IB_CM_REP_RCVD
:
1149 case IB_CM_MRA_REP_SENT
:
1150 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1151 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_REP
);
1154 rej_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1155 cm_rej_set_msg_rejected(rej_msg
, CM_MSG_RESPONSE_OTHER
);
1159 rej_msg
->reason
= cpu_to_be16(reason
);
1160 if (ari
&& ari_length
) {
1161 cm_rej_set_reject_info_len(rej_msg
, ari_length
);
1162 memcpy(rej_msg
->ari
, ari
, ari_length
);
1165 if (private_data
&& private_data_len
)
1166 memcpy(rej_msg
->private_data
, private_data
, private_data_len
);
1169 static void cm_dup_req_handler(struct cm_work
*work
,
1170 struct cm_id_private
*cm_id_priv
)
1172 struct ib_mad_send_buf
*msg
= NULL
;
1173 unsigned long flags
;
1176 /* Quick state check to discard duplicate REQs. */
1177 if (cm_id_priv
->id
.state
== IB_CM_REQ_RCVD
)
1180 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1184 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1185 switch (cm_id_priv
->id
.state
) {
1186 case IB_CM_MRA_REQ_SENT
:
1187 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1188 CM_MSG_RESPONSE_REQ
, cm_id_priv
->service_timeout
,
1189 cm_id_priv
->private_data
,
1190 cm_id_priv
->private_data_len
);
1192 case IB_CM_TIMEWAIT
:
1193 cm_format_rej((struct cm_rej_msg
*) msg
->mad
, cm_id_priv
,
1194 IB_CM_REJ_STALE_CONN
, NULL
, 0, NULL
, 0);
1199 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1201 ret
= ib_post_send_mad(msg
, NULL
);
1206 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1207 free
: cm_free_msg(msg
);
1210 static struct cm_id_private
* cm_match_req(struct cm_work
*work
,
1211 struct cm_id_private
*cm_id_priv
)
1213 struct cm_id_private
*listen_cm_id_priv
, *cur_cm_id_priv
;
1214 struct cm_timewait_info
*timewait_info
;
1215 struct cm_req_msg
*req_msg
;
1216 unsigned long flags
;
1218 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1220 /* Check for duplicate REQ and stale connections. */
1221 spin_lock_irqsave(&cm
.lock
, flags
);
1222 timewait_info
= cm_insert_remote_id(cm_id_priv
->timewait_info
);
1224 timewait_info
= cm_insert_remote_qpn(cm_id_priv
->timewait_info
);
1226 if (timewait_info
) {
1227 cur_cm_id_priv
= cm_get_id(timewait_info
->work
.local_id
,
1228 timewait_info
->work
.remote_id
);
1229 spin_unlock_irqrestore(&cm
.lock
, flags
);
1230 if (cur_cm_id_priv
) {
1231 cm_dup_req_handler(work
, cur_cm_id_priv
);
1232 cm_deref_id(cur_cm_id_priv
);
1234 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1235 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REQ
,
1240 /* Find matching listen request. */
1241 listen_cm_id_priv
= cm_find_listen(cm_id_priv
->id
.device
,
1242 req_msg
->service_id
);
1243 if (!listen_cm_id_priv
) {
1244 spin_unlock_irqrestore(&cm
.lock
, flags
);
1245 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1246 IB_CM_REJ_INVALID_SERVICE_ID
, CM_MSG_RESPONSE_REQ
,
1250 atomic_inc(&listen_cm_id_priv
->refcount
);
1251 atomic_inc(&cm_id_priv
->refcount
);
1252 cm_id_priv
->id
.state
= IB_CM_REQ_RCVD
;
1253 atomic_inc(&cm_id_priv
->work_count
);
1254 spin_unlock_irqrestore(&cm
.lock
, flags
);
1255 return listen_cm_id_priv
;
1257 error
: cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1261 static int cm_req_handler(struct cm_work
*work
)
1263 struct ib_cm_id
*cm_id
;
1264 struct cm_id_private
*cm_id_priv
, *listen_cm_id_priv
;
1265 struct cm_req_msg
*req_msg
;
1268 req_msg
= (struct cm_req_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1270 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
1272 return PTR_ERR(cm_id
);
1274 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1275 cm_id_priv
->id
.remote_id
= req_msg
->local_comm_id
;
1276 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
1278 cm_id_priv
->timewait_info
= cm_create_timewait_info(cm_id_priv
->
1280 if (IS_ERR(cm_id_priv
->timewait_info
)) {
1281 ret
= PTR_ERR(cm_id_priv
->timewait_info
);
1284 cm_id_priv
->timewait_info
->work
.remote_id
= req_msg
->local_comm_id
;
1285 cm_id_priv
->timewait_info
->remote_ca_guid
= req_msg
->local_ca_guid
;
1286 cm_id_priv
->timewait_info
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1288 listen_cm_id_priv
= cm_match_req(work
, cm_id_priv
);
1289 if (!listen_cm_id_priv
) {
1294 cm_id_priv
->id
.cm_handler
= listen_cm_id_priv
->id
.cm_handler
;
1295 cm_id_priv
->id
.context
= listen_cm_id_priv
->id
.context
;
1296 cm_id_priv
->id
.service_id
= req_msg
->service_id
;
1297 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
1299 cm_format_paths_from_req(req_msg
, &work
->path
[0], &work
->path
[1]);
1300 ret
= cm_init_av_by_path(&work
->path
[0], &cm_id_priv
->av
);
1303 if (req_msg
->alt_local_lid
) {
1304 ret
= cm_init_av_by_path(&work
->path
[1], &cm_id_priv
->alt_av
);
1308 cm_id_priv
->tid
= req_msg
->hdr
.tid
;
1309 cm_id_priv
->timeout_ms
= cm_convert_to_ms(
1310 cm_req_get_local_resp_timeout(req_msg
));
1311 cm_id_priv
->max_cm_retries
= cm_req_get_max_cm_retries(req_msg
);
1312 cm_id_priv
->remote_qpn
= cm_req_get_local_qpn(req_msg
);
1313 cm_id_priv
->initiator_depth
= cm_req_get_resp_res(req_msg
);
1314 cm_id_priv
->responder_resources
= cm_req_get_init_depth(req_msg
);
1315 cm_id_priv
->path_mtu
= cm_req_get_path_mtu(req_msg
);
1316 cm_id_priv
->sq_psn
= cm_req_get_starting_psn(req_msg
);
1317 cm_id_priv
->local_ack_timeout
=
1318 cm_req_get_primary_local_ack_timeout(req_msg
);
1319 cm_id_priv
->retry_count
= cm_req_get_retry_count(req_msg
);
1320 cm_id_priv
->rnr_retry_count
= cm_req_get_rnr_retry_count(req_msg
);
1321 cm_id_priv
->qp_type
= cm_req_get_qp_type(req_msg
);
1323 cm_format_req_event(work
, cm_id_priv
, &listen_cm_id_priv
->id
);
1324 cm_process_work(cm_id_priv
, work
);
1325 cm_deref_id(listen_cm_id_priv
);
1328 error3
: atomic_dec(&cm_id_priv
->refcount
);
1329 cm_deref_id(listen_cm_id_priv
);
1330 cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1331 error2
: kfree(cm_id_priv
->timewait_info
);
1332 cm_id_priv
->timewait_info
= NULL
;
1333 error1
: ib_destroy_cm_id(&cm_id_priv
->id
);
1337 static void cm_format_rep(struct cm_rep_msg
*rep_msg
,
1338 struct cm_id_private
*cm_id_priv
,
1339 struct ib_cm_rep_param
*param
)
1341 cm_format_mad_hdr(&rep_msg
->hdr
, CM_REP_ATTR_ID
, cm_id_priv
->tid
);
1342 rep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1343 rep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1344 cm_rep_set_local_qpn(rep_msg
, cpu_to_be32(param
->qp_num
));
1345 cm_rep_set_starting_psn(rep_msg
, cpu_to_be32(param
->starting_psn
));
1346 rep_msg
->resp_resources
= param
->responder_resources
;
1347 rep_msg
->initiator_depth
= param
->initiator_depth
;
1348 cm_rep_set_target_ack_delay(rep_msg
, param
->target_ack_delay
);
1349 cm_rep_set_failover(rep_msg
, param
->failover_accepted
);
1350 cm_rep_set_flow_ctrl(rep_msg
, param
->flow_control
);
1351 cm_rep_set_rnr_retry_count(rep_msg
, param
->rnr_retry_count
);
1352 cm_rep_set_srq(rep_msg
, param
->srq
);
1353 rep_msg
->local_ca_guid
= cm_id_priv
->av
.port
->cm_dev
->ca_guid
;
1355 if (param
->private_data
&& param
->private_data_len
)
1356 memcpy(rep_msg
->private_data
, param
->private_data
,
1357 param
->private_data_len
);
1360 int ib_send_cm_rep(struct ib_cm_id
*cm_id
,
1361 struct ib_cm_rep_param
*param
)
1363 struct cm_id_private
*cm_id_priv
;
1364 struct ib_mad_send_buf
*msg
;
1365 struct cm_rep_msg
*rep_msg
;
1366 unsigned long flags
;
1369 if (param
->private_data
&&
1370 param
->private_data_len
> IB_CM_REP_PRIVATE_DATA_SIZE
)
1373 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1374 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1375 if (cm_id
->state
!= IB_CM_REQ_RCVD
&&
1376 cm_id
->state
!= IB_CM_MRA_REQ_SENT
) {
1381 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1385 rep_msg
= (struct cm_rep_msg
*) msg
->mad
;
1386 cm_format_rep(rep_msg
, cm_id_priv
, param
);
1387 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1388 msg
->context
[1] = (void *) (unsigned long) IB_CM_REP_SENT
;
1390 ret
= ib_post_send_mad(msg
, NULL
);
1392 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1397 cm_id
->state
= IB_CM_REP_SENT
;
1398 cm_id_priv
->msg
= msg
;
1399 cm_id_priv
->initiator_depth
= param
->initiator_depth
;
1400 cm_id_priv
->responder_resources
= param
->responder_resources
;
1401 cm_id_priv
->rq_psn
= cm_rep_get_starting_psn(rep_msg
);
1402 cm_id_priv
->local_qpn
= cm_rep_get_local_qpn(rep_msg
);
1404 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1407 EXPORT_SYMBOL(ib_send_cm_rep
);
1409 static void cm_format_rtu(struct cm_rtu_msg
*rtu_msg
,
1410 struct cm_id_private
*cm_id_priv
,
1411 const void *private_data
,
1412 u8 private_data_len
)
1414 cm_format_mad_hdr(&rtu_msg
->hdr
, CM_RTU_ATTR_ID
, cm_id_priv
->tid
);
1415 rtu_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1416 rtu_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1418 if (private_data
&& private_data_len
)
1419 memcpy(rtu_msg
->private_data
, private_data
, private_data_len
);
1422 int ib_send_cm_rtu(struct ib_cm_id
*cm_id
,
1423 const void *private_data
,
1424 u8 private_data_len
)
1426 struct cm_id_private
*cm_id_priv
;
1427 struct ib_mad_send_buf
*msg
;
1428 unsigned long flags
;
1432 if (private_data
&& private_data_len
> IB_CM_RTU_PRIVATE_DATA_SIZE
)
1435 data
= cm_copy_private_data(private_data
, private_data_len
);
1437 return PTR_ERR(data
);
1439 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1440 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1441 if (cm_id
->state
!= IB_CM_REP_RCVD
&&
1442 cm_id
->state
!= IB_CM_MRA_REP_SENT
) {
1447 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1451 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1452 private_data
, private_data_len
);
1454 ret
= ib_post_send_mad(msg
, NULL
);
1456 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1462 cm_id
->state
= IB_CM_ESTABLISHED
;
1463 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1464 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1467 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1471 EXPORT_SYMBOL(ib_send_cm_rtu
);
1473 static void cm_format_rep_event(struct cm_work
*work
)
1475 struct cm_rep_msg
*rep_msg
;
1476 struct ib_cm_rep_event_param
*param
;
1478 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1479 param
= &work
->cm_event
.param
.rep_rcvd
;
1480 param
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1481 param
->remote_qkey
= be32_to_cpu(rep_msg
->local_qkey
);
1482 param
->remote_qpn
= be32_to_cpu(cm_rep_get_local_qpn(rep_msg
));
1483 param
->starting_psn
= be32_to_cpu(cm_rep_get_starting_psn(rep_msg
));
1484 param
->responder_resources
= rep_msg
->initiator_depth
;
1485 param
->initiator_depth
= rep_msg
->resp_resources
;
1486 param
->target_ack_delay
= cm_rep_get_target_ack_delay(rep_msg
);
1487 param
->failover_accepted
= cm_rep_get_failover(rep_msg
);
1488 param
->flow_control
= cm_rep_get_flow_ctrl(rep_msg
);
1489 param
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1490 param
->srq
= cm_rep_get_srq(rep_msg
);
1491 work
->cm_event
.private_data
= &rep_msg
->private_data
;
1494 static void cm_dup_rep_handler(struct cm_work
*work
)
1496 struct cm_id_private
*cm_id_priv
;
1497 struct cm_rep_msg
*rep_msg
;
1498 struct ib_mad_send_buf
*msg
= NULL
;
1499 unsigned long flags
;
1502 rep_msg
= (struct cm_rep_msg
*) work
->mad_recv_wc
->recv_buf
.mad
;
1503 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
,
1504 rep_msg
->local_comm_id
);
1508 ret
= cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
);
1512 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1513 if (cm_id_priv
->id
.state
== IB_CM_ESTABLISHED
)
1514 cm_format_rtu((struct cm_rtu_msg
*) msg
->mad
, cm_id_priv
,
1515 cm_id_priv
->private_data
,
1516 cm_id_priv
->private_data_len
);
1517 else if (cm_id_priv
->id
.state
== IB_CM_MRA_REP_SENT
)
1518 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
1519 CM_MSG_RESPONSE_REP
, cm_id_priv
->service_timeout
,
1520 cm_id_priv
->private_data
,
1521 cm_id_priv
->private_data_len
);
1524 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1526 ret
= ib_post_send_mad(msg
, NULL
);
1531 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1532 free
: cm_free_msg(msg
);
1533 deref
: cm_deref_id(cm_id_priv
);
1536 static int cm_rep_handler(struct cm_work
*work
)
1538 struct cm_id_private
*cm_id_priv
;
1539 struct cm_rep_msg
*rep_msg
;
1540 unsigned long flags
;
1543 rep_msg
= (struct cm_rep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1544 cm_id_priv
= cm_acquire_id(rep_msg
->remote_comm_id
, 0);
1546 cm_dup_rep_handler(work
);
1550 cm_id_priv
->timewait_info
->work
.remote_id
= rep_msg
->local_comm_id
;
1551 cm_id_priv
->timewait_info
->remote_ca_guid
= rep_msg
->local_ca_guid
;
1552 cm_id_priv
->timewait_info
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1554 spin_lock_irqsave(&cm
.lock
, flags
);
1555 /* Check for duplicate REP. */
1556 if (cm_insert_remote_id(cm_id_priv
->timewait_info
)) {
1557 spin_unlock_irqrestore(&cm
.lock
, flags
);
1561 /* Check for a stale connection. */
1562 if (cm_insert_remote_qpn(cm_id_priv
->timewait_info
)) {
1563 spin_unlock_irqrestore(&cm
.lock
, flags
);
1564 cm_issue_rej(work
->port
, work
->mad_recv_wc
,
1565 IB_CM_REJ_STALE_CONN
, CM_MSG_RESPONSE_REP
,
1570 spin_unlock_irqrestore(&cm
.lock
, flags
);
1572 cm_format_rep_event(work
);
1574 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1575 switch (cm_id_priv
->id
.state
) {
1576 case IB_CM_REQ_SENT
:
1577 case IB_CM_MRA_REQ_RCVD
:
1580 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1584 cm_id_priv
->id
.state
= IB_CM_REP_RCVD
;
1585 cm_id_priv
->id
.remote_id
= rep_msg
->local_comm_id
;
1586 cm_id_priv
->remote_qpn
= cm_rep_get_local_qpn(rep_msg
);
1587 cm_id_priv
->initiator_depth
= rep_msg
->resp_resources
;
1588 cm_id_priv
->responder_resources
= rep_msg
->initiator_depth
;
1589 cm_id_priv
->sq_psn
= cm_rep_get_starting_psn(rep_msg
);
1590 cm_id_priv
->rnr_retry_count
= cm_rep_get_rnr_retry_count(rep_msg
);
1592 /* todo: handle peer_to_peer */
1594 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1595 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1597 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1598 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1601 cm_process_work(cm_id_priv
, work
);
1603 cm_deref_id(cm_id_priv
);
1606 error
: cm_cleanup_timewait(cm_id_priv
->timewait_info
);
1607 cm_deref_id(cm_id_priv
);
1611 static int cm_establish_handler(struct cm_work
*work
)
1613 struct cm_id_private
*cm_id_priv
;
1614 unsigned long flags
;
1617 /* See comment in ib_cm_establish about lookup. */
1618 cm_id_priv
= cm_acquire_id(work
->local_id
, work
->remote_id
);
1622 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1623 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
) {
1624 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1628 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1629 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1631 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1632 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1635 cm_process_work(cm_id_priv
, work
);
1637 cm_deref_id(cm_id_priv
);
1640 cm_deref_id(cm_id_priv
);
1644 static int cm_rtu_handler(struct cm_work
*work
)
1646 struct cm_id_private
*cm_id_priv
;
1647 struct cm_rtu_msg
*rtu_msg
;
1648 unsigned long flags
;
1651 rtu_msg
= (struct cm_rtu_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1652 cm_id_priv
= cm_acquire_id(rtu_msg
->remote_comm_id
,
1653 rtu_msg
->local_comm_id
);
1657 work
->cm_event
.private_data
= &rtu_msg
->private_data
;
1659 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1660 if (cm_id_priv
->id
.state
!= IB_CM_REP_SENT
&&
1661 cm_id_priv
->id
.state
!= IB_CM_MRA_REP_RCVD
) {
1662 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1665 cm_id_priv
->id
.state
= IB_CM_ESTABLISHED
;
1667 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1668 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1670 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1671 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1674 cm_process_work(cm_id_priv
, work
);
1676 cm_deref_id(cm_id_priv
);
1679 cm_deref_id(cm_id_priv
);
1683 static void cm_format_dreq(struct cm_dreq_msg
*dreq_msg
,
1684 struct cm_id_private
*cm_id_priv
,
1685 const void *private_data
,
1686 u8 private_data_len
)
1688 cm_format_mad_hdr(&dreq_msg
->hdr
, CM_DREQ_ATTR_ID
,
1689 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_DREQ
));
1690 dreq_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1691 dreq_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1692 cm_dreq_set_remote_qpn(dreq_msg
, cm_id_priv
->remote_qpn
);
1694 if (private_data
&& private_data_len
)
1695 memcpy(dreq_msg
->private_data
, private_data
, private_data_len
);
1698 int ib_send_cm_dreq(struct ib_cm_id
*cm_id
,
1699 const void *private_data
,
1700 u8 private_data_len
)
1702 struct cm_id_private
*cm_id_priv
;
1703 struct ib_mad_send_buf
*msg
;
1704 unsigned long flags
;
1707 if (private_data
&& private_data_len
> IB_CM_DREQ_PRIVATE_DATA_SIZE
)
1710 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1711 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1712 if (cm_id
->state
!= IB_CM_ESTABLISHED
) {
1717 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1719 cm_enter_timewait(cm_id_priv
);
1723 cm_format_dreq((struct cm_dreq_msg
*) msg
->mad
, cm_id_priv
,
1724 private_data
, private_data_len
);
1725 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
1726 msg
->context
[1] = (void *) (unsigned long) IB_CM_DREQ_SENT
;
1728 ret
= ib_post_send_mad(msg
, NULL
);
1730 cm_enter_timewait(cm_id_priv
);
1731 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1736 cm_id
->state
= IB_CM_DREQ_SENT
;
1737 cm_id_priv
->msg
= msg
;
1738 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1741 EXPORT_SYMBOL(ib_send_cm_dreq
);
1743 static void cm_format_drep(struct cm_drep_msg
*drep_msg
,
1744 struct cm_id_private
*cm_id_priv
,
1745 const void *private_data
,
1746 u8 private_data_len
)
1748 cm_format_mad_hdr(&drep_msg
->hdr
, CM_DREP_ATTR_ID
, cm_id_priv
->tid
);
1749 drep_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
1750 drep_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
1752 if (private_data
&& private_data_len
)
1753 memcpy(drep_msg
->private_data
, private_data
, private_data_len
);
1756 int ib_send_cm_drep(struct ib_cm_id
*cm_id
,
1757 const void *private_data
,
1758 u8 private_data_len
)
1760 struct cm_id_private
*cm_id_priv
;
1761 struct ib_mad_send_buf
*msg
;
1762 unsigned long flags
;
1766 if (private_data
&& private_data_len
> IB_CM_DREP_PRIVATE_DATA_SIZE
)
1769 data
= cm_copy_private_data(private_data
, private_data_len
);
1771 return PTR_ERR(data
);
1773 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1774 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1775 if (cm_id
->state
!= IB_CM_DREQ_RCVD
) {
1776 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1781 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
1782 cm_enter_timewait(cm_id_priv
);
1784 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1788 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1789 private_data
, private_data_len
);
1791 ret
= ib_post_send_mad(msg
, NULL
);
1793 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1798 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1801 EXPORT_SYMBOL(ib_send_cm_drep
);
1803 static int cm_dreq_handler(struct cm_work
*work
)
1805 struct cm_id_private
*cm_id_priv
;
1806 struct cm_dreq_msg
*dreq_msg
;
1807 struct ib_mad_send_buf
*msg
= NULL
;
1808 unsigned long flags
;
1811 dreq_msg
= (struct cm_dreq_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1812 cm_id_priv
= cm_acquire_id(dreq_msg
->remote_comm_id
,
1813 dreq_msg
->local_comm_id
);
1817 work
->cm_event
.private_data
= &dreq_msg
->private_data
;
1819 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1820 if (cm_id_priv
->local_qpn
!= cm_dreq_get_remote_qpn(dreq_msg
))
1823 switch (cm_id_priv
->id
.state
) {
1824 case IB_CM_REP_SENT
:
1825 case IB_CM_DREQ_SENT
:
1826 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1828 case IB_CM_ESTABLISHED
:
1829 case IB_CM_MRA_REP_RCVD
:
1831 case IB_CM_TIMEWAIT
:
1832 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
1835 cm_format_drep((struct cm_drep_msg
*) msg
->mad
, cm_id_priv
,
1836 cm_id_priv
->private_data
,
1837 cm_id_priv
->private_data_len
);
1838 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1840 if (ib_post_send_mad(msg
, NULL
))
1846 cm_id_priv
->id
.state
= IB_CM_DREQ_RCVD
;
1847 cm_id_priv
->tid
= dreq_msg
->hdr
.tid
;
1848 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1850 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1851 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1854 cm_process_work(cm_id_priv
, work
);
1856 cm_deref_id(cm_id_priv
);
1859 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1860 deref
: cm_deref_id(cm_id_priv
);
1864 static int cm_drep_handler(struct cm_work
*work
)
1866 struct cm_id_private
*cm_id_priv
;
1867 struct cm_drep_msg
*drep_msg
;
1868 unsigned long flags
;
1871 drep_msg
= (struct cm_drep_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1872 cm_id_priv
= cm_acquire_id(drep_msg
->remote_comm_id
,
1873 drep_msg
->local_comm_id
);
1877 work
->cm_event
.private_data
= &drep_msg
->private_data
;
1879 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1880 if (cm_id_priv
->id
.state
!= IB_CM_DREQ_SENT
&&
1881 cm_id_priv
->id
.state
!= IB_CM_DREQ_RCVD
) {
1882 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1885 cm_enter_timewait(cm_id_priv
);
1887 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
1888 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
1890 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
1891 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1894 cm_process_work(cm_id_priv
, work
);
1896 cm_deref_id(cm_id_priv
);
1899 cm_deref_id(cm_id_priv
);
1903 int ib_send_cm_rej(struct ib_cm_id
*cm_id
,
1904 enum ib_cm_rej_reason reason
,
1907 const void *private_data
,
1908 u8 private_data_len
)
1910 struct cm_id_private
*cm_id_priv
;
1911 struct ib_mad_send_buf
*msg
;
1912 unsigned long flags
;
1915 if ((private_data
&& private_data_len
> IB_CM_REJ_PRIVATE_DATA_SIZE
) ||
1916 (ari
&& ari_length
> IB_CM_REJ_ARI_LENGTH
))
1919 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
1921 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
1922 switch (cm_id
->state
) {
1923 case IB_CM_REQ_SENT
:
1924 case IB_CM_MRA_REQ_RCVD
:
1925 case IB_CM_REQ_RCVD
:
1926 case IB_CM_MRA_REQ_SENT
:
1927 case IB_CM_REP_RCVD
:
1928 case IB_CM_MRA_REP_SENT
:
1929 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1931 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
1932 cm_id_priv
, reason
, ari
, ari_length
,
1933 private_data
, private_data_len
);
1935 cm_reset_to_idle(cm_id_priv
);
1937 case IB_CM_REP_SENT
:
1938 case IB_CM_MRA_REP_RCVD
:
1939 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
1941 cm_format_rej((struct cm_rej_msg
*) msg
->mad
,
1942 cm_id_priv
, reason
, ari
, ari_length
,
1943 private_data
, private_data_len
);
1945 cm_enter_timewait(cm_id_priv
);
1955 ret
= ib_post_send_mad(msg
, NULL
);
1959 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
1962 EXPORT_SYMBOL(ib_send_cm_rej
);
1964 static void cm_format_rej_event(struct cm_work
*work
)
1966 struct cm_rej_msg
*rej_msg
;
1967 struct ib_cm_rej_event_param
*param
;
1969 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
1970 param
= &work
->cm_event
.param
.rej_rcvd
;
1971 param
->ari
= rej_msg
->ari
;
1972 param
->ari_length
= cm_rej_get_reject_info_len(rej_msg
);
1973 param
->reason
= __be16_to_cpu(rej_msg
->reason
);
1974 work
->cm_event
.private_data
= &rej_msg
->private_data
;
1977 static struct cm_id_private
* cm_acquire_rejected_id(struct cm_rej_msg
*rej_msg
)
1979 struct cm_timewait_info
*timewait_info
;
1980 struct cm_id_private
*cm_id_priv
;
1981 unsigned long flags
;
1984 remote_id
= rej_msg
->local_comm_id
;
1986 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_TIMEOUT
) {
1987 spin_lock_irqsave(&cm
.lock
, flags
);
1988 timewait_info
= cm_find_remote_id( *((__be64
*) rej_msg
->ari
),
1990 if (!timewait_info
) {
1991 spin_unlock_irqrestore(&cm
.lock
, flags
);
1994 cm_id_priv
= idr_find(&cm
.local_id_table
,
1995 (__force
int) timewait_info
->work
.local_id
);
1997 if (cm_id_priv
->id
.remote_id
== remote_id
)
1998 atomic_inc(&cm_id_priv
->refcount
);
2002 spin_unlock_irqrestore(&cm
.lock
, flags
);
2003 } else if (cm_rej_get_msg_rejected(rej_msg
) == CM_MSG_RESPONSE_REQ
)
2004 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, 0);
2006 cm_id_priv
= cm_acquire_id(rej_msg
->remote_comm_id
, remote_id
);
2011 static int cm_rej_handler(struct cm_work
*work
)
2013 struct cm_id_private
*cm_id_priv
;
2014 struct cm_rej_msg
*rej_msg
;
2015 unsigned long flags
;
2018 rej_msg
= (struct cm_rej_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2019 cm_id_priv
= cm_acquire_rejected_id(rej_msg
);
2023 cm_format_rej_event(work
);
2025 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2026 switch (cm_id_priv
->id
.state
) {
2027 case IB_CM_REQ_SENT
:
2028 case IB_CM_MRA_REQ_RCVD
:
2029 case IB_CM_REP_SENT
:
2030 case IB_CM_MRA_REP_RCVD
:
2031 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2033 case IB_CM_REQ_RCVD
:
2034 case IB_CM_MRA_REQ_SENT
:
2035 if (__be16_to_cpu(rej_msg
->reason
) == IB_CM_REJ_STALE_CONN
)
2036 cm_enter_timewait(cm_id_priv
);
2038 cm_reset_to_idle(cm_id_priv
);
2040 case IB_CM_DREQ_SENT
:
2041 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2043 case IB_CM_REP_RCVD
:
2044 case IB_CM_MRA_REP_SENT
:
2045 case IB_CM_ESTABLISHED
:
2046 cm_enter_timewait(cm_id_priv
);
2049 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2054 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2056 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2057 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2060 cm_process_work(cm_id_priv
, work
);
2062 cm_deref_id(cm_id_priv
);
2065 cm_deref_id(cm_id_priv
);
2069 int ib_send_cm_mra(struct ib_cm_id
*cm_id
,
2071 const void *private_data
,
2072 u8 private_data_len
)
2074 struct cm_id_private
*cm_id_priv
;
2075 struct ib_mad_send_buf
*msg
;
2077 unsigned long flags
;
2080 if (private_data
&& private_data_len
> IB_CM_MRA_PRIVATE_DATA_SIZE
)
2083 data
= cm_copy_private_data(private_data
, private_data_len
);
2085 return PTR_ERR(data
);
2087 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2089 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2090 switch(cm_id_priv
->id
.state
) {
2091 case IB_CM_REQ_RCVD
:
2092 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2096 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2097 CM_MSG_RESPONSE_REQ
, service_timeout
,
2098 private_data
, private_data_len
);
2099 ret
= ib_post_send_mad(msg
, NULL
);
2102 cm_id
->state
= IB_CM_MRA_REQ_SENT
;
2104 case IB_CM_REP_RCVD
:
2105 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2109 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2110 CM_MSG_RESPONSE_REP
, service_timeout
,
2111 private_data
, private_data_len
);
2112 ret
= ib_post_send_mad(msg
, NULL
);
2115 cm_id
->state
= IB_CM_MRA_REP_SENT
;
2117 case IB_CM_ESTABLISHED
:
2118 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2122 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2123 CM_MSG_RESPONSE_OTHER
, service_timeout
,
2124 private_data
, private_data_len
);
2125 ret
= ib_post_send_mad(msg
, NULL
);
2128 cm_id
->lap_state
= IB_CM_MRA_LAP_SENT
;
2134 cm_id_priv
->service_timeout
= service_timeout
;
2135 cm_set_private_data(cm_id_priv
, data
, private_data_len
);
2136 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2139 error1
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2143 error2
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2148 EXPORT_SYMBOL(ib_send_cm_mra
);
2150 static struct cm_id_private
* cm_acquire_mraed_id(struct cm_mra_msg
*mra_msg
)
2152 switch (cm_mra_get_msg_mraed(mra_msg
)) {
2153 case CM_MSG_RESPONSE_REQ
:
2154 return cm_acquire_id(mra_msg
->remote_comm_id
, 0);
2155 case CM_MSG_RESPONSE_REP
:
2156 case CM_MSG_RESPONSE_OTHER
:
2157 return cm_acquire_id(mra_msg
->remote_comm_id
,
2158 mra_msg
->local_comm_id
);
2164 static int cm_mra_handler(struct cm_work
*work
)
2166 struct cm_id_private
*cm_id_priv
;
2167 struct cm_mra_msg
*mra_msg
;
2168 unsigned long flags
;
2171 mra_msg
= (struct cm_mra_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2172 cm_id_priv
= cm_acquire_mraed_id(mra_msg
);
2176 work
->cm_event
.private_data
= &mra_msg
->private_data
;
2177 work
->cm_event
.param
.mra_rcvd
.service_timeout
=
2178 cm_mra_get_service_timeout(mra_msg
);
2179 timeout
= cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg
)) +
2180 cm_convert_to_ms(cm_id_priv
->av
.packet_life_time
);
2182 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2183 switch (cm_id_priv
->id
.state
) {
2184 case IB_CM_REQ_SENT
:
2185 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REQ
||
2186 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2187 cm_id_priv
->msg
, timeout
))
2189 cm_id_priv
->id
.state
= IB_CM_MRA_REQ_RCVD
;
2191 case IB_CM_REP_SENT
:
2192 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_REP
||
2193 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2194 cm_id_priv
->msg
, timeout
))
2196 cm_id_priv
->id
.state
= IB_CM_MRA_REP_RCVD
;
2198 case IB_CM_ESTABLISHED
:
2199 if (cm_mra_get_msg_mraed(mra_msg
) != CM_MSG_RESPONSE_OTHER
||
2200 cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
||
2201 ib_modify_mad(cm_id_priv
->av
.port
->mad_agent
,
2202 cm_id_priv
->msg
, timeout
))
2204 cm_id_priv
->id
.lap_state
= IB_CM_MRA_LAP_RCVD
;
2210 cm_id_priv
->msg
->context
[1] = (void *) (unsigned long)
2211 cm_id_priv
->id
.state
;
2212 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2214 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2215 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2218 cm_process_work(cm_id_priv
, work
);
2220 cm_deref_id(cm_id_priv
);
2223 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2224 cm_deref_id(cm_id_priv
);
2228 static void cm_format_lap(struct cm_lap_msg
*lap_msg
,
2229 struct cm_id_private
*cm_id_priv
,
2230 struct ib_sa_path_rec
*alternate_path
,
2231 const void *private_data
,
2232 u8 private_data_len
)
2234 cm_format_mad_hdr(&lap_msg
->hdr
, CM_LAP_ATTR_ID
,
2235 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_LAP
));
2236 lap_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2237 lap_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2238 cm_lap_set_remote_qpn(lap_msg
, cm_id_priv
->remote_qpn
);
2239 /* todo: need remote CM response timeout */
2240 cm_lap_set_remote_resp_timeout(lap_msg
, 0x1F);
2241 lap_msg
->alt_local_lid
= alternate_path
->slid
;
2242 lap_msg
->alt_remote_lid
= alternate_path
->dlid
;
2243 lap_msg
->alt_local_gid
= alternate_path
->sgid
;
2244 lap_msg
->alt_remote_gid
= alternate_path
->dgid
;
2245 cm_lap_set_flow_label(lap_msg
, alternate_path
->flow_label
);
2246 cm_lap_set_traffic_class(lap_msg
, alternate_path
->traffic_class
);
2247 lap_msg
->alt_hop_limit
= alternate_path
->hop_limit
;
2248 cm_lap_set_packet_rate(lap_msg
, alternate_path
->rate
);
2249 cm_lap_set_sl(lap_msg
, alternate_path
->sl
);
2250 cm_lap_set_subnet_local(lap_msg
, 1); /* local only... */
2251 cm_lap_set_local_ack_timeout(lap_msg
,
2252 min(31, alternate_path
->packet_life_time
+ 1));
2254 if (private_data
&& private_data_len
)
2255 memcpy(lap_msg
->private_data
, private_data
, private_data_len
);
2258 int ib_send_cm_lap(struct ib_cm_id
*cm_id
,
2259 struct ib_sa_path_rec
*alternate_path
,
2260 const void *private_data
,
2261 u8 private_data_len
)
2263 struct cm_id_private
*cm_id_priv
;
2264 struct ib_mad_send_buf
*msg
;
2265 unsigned long flags
;
2268 if (private_data
&& private_data_len
> IB_CM_LAP_PRIVATE_DATA_SIZE
)
2271 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2272 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2273 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2274 cm_id
->lap_state
!= IB_CM_LAP_IDLE
) {
2279 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2283 cm_format_lap((struct cm_lap_msg
*) msg
->mad
, cm_id_priv
,
2284 alternate_path
, private_data
, private_data_len
);
2285 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2286 msg
->context
[1] = (void *) (unsigned long) IB_CM_ESTABLISHED
;
2288 ret
= ib_post_send_mad(msg
, NULL
);
2290 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2295 cm_id
->lap_state
= IB_CM_LAP_SENT
;
2296 cm_id_priv
->msg
= msg
;
2298 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2301 EXPORT_SYMBOL(ib_send_cm_lap
);
2303 static void cm_format_path_from_lap(struct ib_sa_path_rec
*path
,
2304 struct cm_lap_msg
*lap_msg
)
2306 memset(path
, 0, sizeof *path
);
2307 path
->dgid
= lap_msg
->alt_local_gid
;
2308 path
->sgid
= lap_msg
->alt_remote_gid
;
2309 path
->dlid
= lap_msg
->alt_local_lid
;
2310 path
->slid
= lap_msg
->alt_remote_lid
;
2311 path
->flow_label
= cm_lap_get_flow_label(lap_msg
);
2312 path
->hop_limit
= lap_msg
->alt_hop_limit
;
2313 path
->traffic_class
= cm_lap_get_traffic_class(lap_msg
);
2314 path
->reversible
= 1;
2315 /* pkey is same as in REQ */
2316 path
->sl
= cm_lap_get_sl(lap_msg
);
2317 path
->mtu_selector
= IB_SA_EQ
;
2318 /* mtu is same as in REQ */
2319 path
->rate_selector
= IB_SA_EQ
;
2320 path
->rate
= cm_lap_get_packet_rate(lap_msg
);
2321 path
->packet_life_time_selector
= IB_SA_EQ
;
2322 path
->packet_life_time
= cm_lap_get_local_ack_timeout(lap_msg
);
2323 path
->packet_life_time
-= (path
->packet_life_time
> 0);
2326 static int cm_lap_handler(struct cm_work
*work
)
2328 struct cm_id_private
*cm_id_priv
;
2329 struct cm_lap_msg
*lap_msg
;
2330 struct ib_cm_lap_event_param
*param
;
2331 struct ib_mad_send_buf
*msg
= NULL
;
2332 unsigned long flags
;
2335 /* todo: verify LAP request and send reject APR if invalid. */
2336 lap_msg
= (struct cm_lap_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2337 cm_id_priv
= cm_acquire_id(lap_msg
->remote_comm_id
,
2338 lap_msg
->local_comm_id
);
2342 param
= &work
->cm_event
.param
.lap_rcvd
;
2343 param
->alternate_path
= &work
->path
[0];
2344 cm_format_path_from_lap(param
->alternate_path
, lap_msg
);
2345 work
->cm_event
.private_data
= &lap_msg
->private_data
;
2347 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2348 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
)
2351 switch (cm_id_priv
->id
.lap_state
) {
2352 case IB_CM_LAP_IDLE
:
2354 case IB_CM_MRA_LAP_SENT
:
2355 if (cm_alloc_response_msg(work
->port
, work
->mad_recv_wc
, &msg
))
2358 cm_format_mra((struct cm_mra_msg
*) msg
->mad
, cm_id_priv
,
2359 CM_MSG_RESPONSE_OTHER
,
2360 cm_id_priv
->service_timeout
,
2361 cm_id_priv
->private_data
,
2362 cm_id_priv
->private_data_len
);
2363 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2365 if (ib_post_send_mad(msg
, NULL
))
2372 cm_id_priv
->id
.lap_state
= IB_CM_LAP_RCVD
;
2373 cm_id_priv
->tid
= lap_msg
->hdr
.tid
;
2374 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2376 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2377 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2380 cm_process_work(cm_id_priv
, work
);
2382 cm_deref_id(cm_id_priv
);
2385 unlock
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2386 deref
: cm_deref_id(cm_id_priv
);
2390 static void cm_format_apr(struct cm_apr_msg
*apr_msg
,
2391 struct cm_id_private
*cm_id_priv
,
2392 enum ib_cm_apr_status status
,
2395 const void *private_data
,
2396 u8 private_data_len
)
2398 cm_format_mad_hdr(&apr_msg
->hdr
, CM_APR_ATTR_ID
, cm_id_priv
->tid
);
2399 apr_msg
->local_comm_id
= cm_id_priv
->id
.local_id
;
2400 apr_msg
->remote_comm_id
= cm_id_priv
->id
.remote_id
;
2401 apr_msg
->ap_status
= (u8
) status
;
2403 if (info
&& info_length
) {
2404 apr_msg
->info_length
= info_length
;
2405 memcpy(apr_msg
->info
, info
, info_length
);
2408 if (private_data
&& private_data_len
)
2409 memcpy(apr_msg
->private_data
, private_data
, private_data_len
);
2412 int ib_send_cm_apr(struct ib_cm_id
*cm_id
,
2413 enum ib_cm_apr_status status
,
2416 const void *private_data
,
2417 u8 private_data_len
)
2419 struct cm_id_private
*cm_id_priv
;
2420 struct ib_mad_send_buf
*msg
;
2421 unsigned long flags
;
2424 if ((private_data
&& private_data_len
> IB_CM_APR_PRIVATE_DATA_SIZE
) ||
2425 (info
&& info_length
> IB_CM_APR_INFO_LENGTH
))
2428 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2429 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2430 if (cm_id
->state
!= IB_CM_ESTABLISHED
||
2431 (cm_id
->lap_state
!= IB_CM_LAP_RCVD
&&
2432 cm_id
->lap_state
!= IB_CM_MRA_LAP_SENT
)) {
2437 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2441 cm_format_apr((struct cm_apr_msg
*) msg
->mad
, cm_id_priv
, status
,
2442 info
, info_length
, private_data
, private_data_len
);
2443 ret
= ib_post_send_mad(msg
, NULL
);
2445 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2450 cm_id
->lap_state
= IB_CM_LAP_IDLE
;
2451 out
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2454 EXPORT_SYMBOL(ib_send_cm_apr
);
2456 static int cm_apr_handler(struct cm_work
*work
)
2458 struct cm_id_private
*cm_id_priv
;
2459 struct cm_apr_msg
*apr_msg
;
2460 unsigned long flags
;
2463 apr_msg
= (struct cm_apr_msg
*)work
->mad_recv_wc
->recv_buf
.mad
;
2464 cm_id_priv
= cm_acquire_id(apr_msg
->remote_comm_id
,
2465 apr_msg
->local_comm_id
);
2467 return -EINVAL
; /* Unmatched reply. */
2469 work
->cm_event
.param
.apr_rcvd
.ap_status
= apr_msg
->ap_status
;
2470 work
->cm_event
.param
.apr_rcvd
.apr_info
= &apr_msg
->info
;
2471 work
->cm_event
.param
.apr_rcvd
.info_len
= apr_msg
->info_length
;
2472 work
->cm_event
.private_data
= &apr_msg
->private_data
;
2474 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2475 if (cm_id_priv
->id
.state
!= IB_CM_ESTABLISHED
||
2476 (cm_id_priv
->id
.lap_state
!= IB_CM_LAP_SENT
&&
2477 cm_id_priv
->id
.lap_state
!= IB_CM_MRA_LAP_RCVD
)) {
2478 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2481 cm_id_priv
->id
.lap_state
= IB_CM_LAP_IDLE
;
2482 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2483 cm_id_priv
->msg
= NULL
;
2485 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2487 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2488 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2491 cm_process_work(cm_id_priv
, work
);
2493 cm_deref_id(cm_id_priv
);
2496 cm_deref_id(cm_id_priv
);
2500 static int cm_timewait_handler(struct cm_work
*work
)
2502 struct cm_timewait_info
*timewait_info
;
2503 struct cm_id_private
*cm_id_priv
;
2504 unsigned long flags
;
2507 timewait_info
= (struct cm_timewait_info
*)work
;
2508 cm_cleanup_timewait(timewait_info
);
2510 cm_id_priv
= cm_acquire_id(timewait_info
->work
.local_id
,
2511 timewait_info
->work
.remote_id
);
2515 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2516 if (cm_id_priv
->id
.state
!= IB_CM_TIMEWAIT
||
2517 cm_id_priv
->remote_qpn
!= timewait_info
->remote_qpn
) {
2518 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2521 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2522 ret
= atomic_inc_and_test(&cm_id_priv
->work_count
);
2524 list_add_tail(&work
->list
, &cm_id_priv
->work_list
);
2525 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2528 cm_process_work(cm_id_priv
, work
);
2530 cm_deref_id(cm_id_priv
);
2533 cm_deref_id(cm_id_priv
);
2537 static void cm_format_sidr_req(struct cm_sidr_req_msg
*sidr_req_msg
,
2538 struct cm_id_private
*cm_id_priv
,
2539 struct ib_cm_sidr_req_param
*param
)
2541 cm_format_mad_hdr(&sidr_req_msg
->hdr
, CM_SIDR_REQ_ATTR_ID
,
2542 cm_form_tid(cm_id_priv
, CM_MSG_SEQUENCE_SIDR
));
2543 sidr_req_msg
->request_id
= cm_id_priv
->id
.local_id
;
2544 sidr_req_msg
->pkey
= cpu_to_be16(param
->pkey
);
2545 sidr_req_msg
->service_id
= param
->service_id
;
2547 if (param
->private_data
&& param
->private_data_len
)
2548 memcpy(sidr_req_msg
->private_data
, param
->private_data
,
2549 param
->private_data_len
);
2552 int ib_send_cm_sidr_req(struct ib_cm_id
*cm_id
,
2553 struct ib_cm_sidr_req_param
*param
)
2555 struct cm_id_private
*cm_id_priv
;
2556 struct ib_mad_send_buf
*msg
;
2557 unsigned long flags
;
2560 if (!param
->path
|| (param
->private_data
&&
2561 param
->private_data_len
> IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
))
2564 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2565 ret
= cm_init_av_by_path(param
->path
, &cm_id_priv
->av
);
2569 cm_id
->service_id
= param
->service_id
;
2570 cm_id
->service_mask
= __constant_cpu_to_be64(~0ULL);
2571 cm_id_priv
->timeout_ms
= param
->timeout_ms
;
2572 cm_id_priv
->max_cm_retries
= param
->max_cm_retries
;
2573 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2577 cm_format_sidr_req((struct cm_sidr_req_msg
*) msg
->mad
, cm_id_priv
,
2579 msg
->timeout_ms
= cm_id_priv
->timeout_ms
;
2580 msg
->context
[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT
;
2582 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2583 if (cm_id
->state
== IB_CM_IDLE
)
2584 ret
= ib_post_send_mad(msg
, NULL
);
2589 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2593 cm_id
->state
= IB_CM_SIDR_REQ_SENT
;
2594 cm_id_priv
->msg
= msg
;
2595 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2599 EXPORT_SYMBOL(ib_send_cm_sidr_req
);
2601 static void cm_format_sidr_req_event(struct cm_work
*work
,
2602 struct ib_cm_id
*listen_id
)
2604 struct cm_sidr_req_msg
*sidr_req_msg
;
2605 struct ib_cm_sidr_req_event_param
*param
;
2607 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2608 work
->mad_recv_wc
->recv_buf
.mad
;
2609 param
= &work
->cm_event
.param
.sidr_req_rcvd
;
2610 param
->pkey
= __be16_to_cpu(sidr_req_msg
->pkey
);
2611 param
->listen_id
= listen_id
;
2612 param
->port
= work
->port
->port_num
;
2613 work
->cm_event
.private_data
= &sidr_req_msg
->private_data
;
2616 static int cm_sidr_req_handler(struct cm_work
*work
)
2618 struct ib_cm_id
*cm_id
;
2619 struct cm_id_private
*cm_id_priv
, *cur_cm_id_priv
;
2620 struct cm_sidr_req_msg
*sidr_req_msg
;
2622 unsigned long flags
;
2624 cm_id
= ib_create_cm_id(work
->port
->cm_dev
->device
, NULL
, NULL
);
2626 return PTR_ERR(cm_id
);
2627 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2629 /* Record SGID/SLID and request ID for lookup. */
2630 sidr_req_msg
= (struct cm_sidr_req_msg
*)
2631 work
->mad_recv_wc
->recv_buf
.mad
;
2632 wc
= work
->mad_recv_wc
->wc
;
2633 cm_id_priv
->av
.dgid
.global
.subnet_prefix
= cpu_to_be64(wc
->slid
);
2634 cm_id_priv
->av
.dgid
.global
.interface_id
= 0;
2635 cm_init_av_for_response(work
->port
, work
->mad_recv_wc
->wc
,
2637 cm_id_priv
->id
.remote_id
= sidr_req_msg
->request_id
;
2638 cm_id_priv
->id
.state
= IB_CM_SIDR_REQ_RCVD
;
2639 cm_id_priv
->tid
= sidr_req_msg
->hdr
.tid
;
2640 atomic_inc(&cm_id_priv
->work_count
);
2642 spin_lock_irqsave(&cm
.lock
, flags
);
2643 cur_cm_id_priv
= cm_insert_remote_sidr(cm_id_priv
);
2644 if (cur_cm_id_priv
) {
2645 spin_unlock_irqrestore(&cm
.lock
, flags
);
2646 goto out
; /* Duplicate message. */
2648 cur_cm_id_priv
= cm_find_listen(cm_id
->device
,
2649 sidr_req_msg
->service_id
);
2650 if (!cur_cm_id_priv
) {
2651 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2652 spin_unlock_irqrestore(&cm
.lock
, flags
);
2653 /* todo: reply with no match */
2654 goto out
; /* No match. */
2656 atomic_inc(&cur_cm_id_priv
->refcount
);
2657 spin_unlock_irqrestore(&cm
.lock
, flags
);
2659 cm_id_priv
->id
.cm_handler
= cur_cm_id_priv
->id
.cm_handler
;
2660 cm_id_priv
->id
.context
= cur_cm_id_priv
->id
.context
;
2661 cm_id_priv
->id
.service_id
= sidr_req_msg
->service_id
;
2662 cm_id_priv
->id
.service_mask
= __constant_cpu_to_be64(~0ULL);
2664 cm_format_sidr_req_event(work
, &cur_cm_id_priv
->id
);
2665 cm_process_work(cm_id_priv
, work
);
2666 cm_deref_id(cur_cm_id_priv
);
2669 ib_destroy_cm_id(&cm_id_priv
->id
);
2673 static void cm_format_sidr_rep(struct cm_sidr_rep_msg
*sidr_rep_msg
,
2674 struct cm_id_private
*cm_id_priv
,
2675 struct ib_cm_sidr_rep_param
*param
)
2677 cm_format_mad_hdr(&sidr_rep_msg
->hdr
, CM_SIDR_REP_ATTR_ID
,
2679 sidr_rep_msg
->request_id
= cm_id_priv
->id
.remote_id
;
2680 sidr_rep_msg
->status
= param
->status
;
2681 cm_sidr_rep_set_qpn(sidr_rep_msg
, cpu_to_be32(param
->qp_num
));
2682 sidr_rep_msg
->service_id
= cm_id_priv
->id
.service_id
;
2683 sidr_rep_msg
->qkey
= cpu_to_be32(param
->qkey
);
2685 if (param
->info
&& param
->info_length
)
2686 memcpy(sidr_rep_msg
->info
, param
->info
, param
->info_length
);
2688 if (param
->private_data
&& param
->private_data_len
)
2689 memcpy(sidr_rep_msg
->private_data
, param
->private_data
,
2690 param
->private_data_len
);
2693 int ib_send_cm_sidr_rep(struct ib_cm_id
*cm_id
,
2694 struct ib_cm_sidr_rep_param
*param
)
2696 struct cm_id_private
*cm_id_priv
;
2697 struct ib_mad_send_buf
*msg
;
2698 unsigned long flags
;
2701 if ((param
->info
&& param
->info_length
> IB_CM_SIDR_REP_INFO_LENGTH
) ||
2702 (param
->private_data
&&
2703 param
->private_data_len
> IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
))
2706 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2707 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2708 if (cm_id
->state
!= IB_CM_SIDR_REQ_RCVD
) {
2713 ret
= cm_alloc_msg(cm_id_priv
, &msg
);
2717 cm_format_sidr_rep((struct cm_sidr_rep_msg
*) msg
->mad
, cm_id_priv
,
2719 ret
= ib_post_send_mad(msg
, NULL
);
2721 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2725 cm_id
->state
= IB_CM_IDLE
;
2726 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2728 spin_lock_irqsave(&cm
.lock
, flags
);
2729 rb_erase(&cm_id_priv
->sidr_id_node
, &cm
.remote_sidr_table
);
2730 spin_unlock_irqrestore(&cm
.lock
, flags
);
2733 error
: spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2736 EXPORT_SYMBOL(ib_send_cm_sidr_rep
);
2738 static void cm_format_sidr_rep_event(struct cm_work
*work
)
2740 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2741 struct ib_cm_sidr_rep_event_param
*param
;
2743 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2744 work
->mad_recv_wc
->recv_buf
.mad
;
2745 param
= &work
->cm_event
.param
.sidr_rep_rcvd
;
2746 param
->status
= sidr_rep_msg
->status
;
2747 param
->qkey
= be32_to_cpu(sidr_rep_msg
->qkey
);
2748 param
->qpn
= be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg
));
2749 param
->info
= &sidr_rep_msg
->info
;
2750 param
->info_len
= sidr_rep_msg
->info_length
;
2751 work
->cm_event
.private_data
= &sidr_rep_msg
->private_data
;
2754 static int cm_sidr_rep_handler(struct cm_work
*work
)
2756 struct cm_sidr_rep_msg
*sidr_rep_msg
;
2757 struct cm_id_private
*cm_id_priv
;
2758 unsigned long flags
;
2760 sidr_rep_msg
= (struct cm_sidr_rep_msg
*)
2761 work
->mad_recv_wc
->recv_buf
.mad
;
2762 cm_id_priv
= cm_acquire_id(sidr_rep_msg
->request_id
, 0);
2764 return -EINVAL
; /* Unmatched reply. */
2766 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2767 if (cm_id_priv
->id
.state
!= IB_CM_SIDR_REQ_SENT
) {
2768 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2771 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2772 ib_cancel_mad(cm_id_priv
->av
.port
->mad_agent
, cm_id_priv
->msg
);
2773 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2775 cm_format_sidr_rep_event(work
);
2776 cm_process_work(cm_id_priv
, work
);
2779 cm_deref_id(cm_id_priv
);
2783 static void cm_process_send_error(struct ib_mad_send_buf
*msg
,
2784 enum ib_wc_status wc_status
)
2786 struct cm_id_private
*cm_id_priv
;
2787 struct ib_cm_event cm_event
;
2788 enum ib_cm_state state
;
2789 unsigned long flags
;
2792 memset(&cm_event
, 0, sizeof cm_event
);
2793 cm_id_priv
= msg
->context
[0];
2795 /* Discard old sends or ones without a response. */
2796 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2797 state
= (enum ib_cm_state
) (unsigned long) msg
->context
[1];
2798 if (msg
!= cm_id_priv
->msg
|| state
!= cm_id_priv
->id
.state
)
2802 case IB_CM_REQ_SENT
:
2803 case IB_CM_MRA_REQ_RCVD
:
2804 cm_reset_to_idle(cm_id_priv
);
2805 cm_event
.event
= IB_CM_REQ_ERROR
;
2807 case IB_CM_REP_SENT
:
2808 case IB_CM_MRA_REP_RCVD
:
2809 cm_reset_to_idle(cm_id_priv
);
2810 cm_event
.event
= IB_CM_REP_ERROR
;
2812 case IB_CM_DREQ_SENT
:
2813 cm_enter_timewait(cm_id_priv
);
2814 cm_event
.event
= IB_CM_DREQ_ERROR
;
2816 case IB_CM_SIDR_REQ_SENT
:
2817 cm_id_priv
->id
.state
= IB_CM_IDLE
;
2818 cm_event
.event
= IB_CM_SIDR_REQ_ERROR
;
2823 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2824 cm_event
.param
.send_status
= wc_status
;
2826 /* No other events can occur on the cm_id at this point. */
2827 ret
= cm_id_priv
->id
.cm_handler(&cm_id_priv
->id
, &cm_event
);
2830 ib_destroy_cm_id(&cm_id_priv
->id
);
2833 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2837 static void cm_send_handler(struct ib_mad_agent
*mad_agent
,
2838 struct ib_mad_send_wc
*mad_send_wc
)
2840 struct ib_mad_send_buf
*msg
= mad_send_wc
->send_buf
;
2842 switch (mad_send_wc
->status
) {
2844 case IB_WC_WR_FLUSH_ERR
:
2848 if (msg
->context
[0] && msg
->context
[1])
2849 cm_process_send_error(msg
, mad_send_wc
->status
);
2856 static void cm_work_handler(void *data
)
2858 struct cm_work
*work
= data
;
2861 switch (work
->cm_event
.event
) {
2862 case IB_CM_REQ_RECEIVED
:
2863 ret
= cm_req_handler(work
);
2865 case IB_CM_MRA_RECEIVED
:
2866 ret
= cm_mra_handler(work
);
2868 case IB_CM_REJ_RECEIVED
:
2869 ret
= cm_rej_handler(work
);
2871 case IB_CM_REP_RECEIVED
:
2872 ret
= cm_rep_handler(work
);
2874 case IB_CM_RTU_RECEIVED
:
2875 ret
= cm_rtu_handler(work
);
2877 case IB_CM_USER_ESTABLISHED
:
2878 ret
= cm_establish_handler(work
);
2880 case IB_CM_DREQ_RECEIVED
:
2881 ret
= cm_dreq_handler(work
);
2883 case IB_CM_DREP_RECEIVED
:
2884 ret
= cm_drep_handler(work
);
2886 case IB_CM_SIDR_REQ_RECEIVED
:
2887 ret
= cm_sidr_req_handler(work
);
2889 case IB_CM_SIDR_REP_RECEIVED
:
2890 ret
= cm_sidr_rep_handler(work
);
2892 case IB_CM_LAP_RECEIVED
:
2893 ret
= cm_lap_handler(work
);
2895 case IB_CM_APR_RECEIVED
:
2896 ret
= cm_apr_handler(work
);
2898 case IB_CM_TIMEWAIT_EXIT
:
2899 ret
= cm_timewait_handler(work
);
2909 int ib_cm_establish(struct ib_cm_id
*cm_id
)
2911 struct cm_id_private
*cm_id_priv
;
2912 struct cm_work
*work
;
2913 unsigned long flags
;
2916 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
2920 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
2921 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
2922 switch (cm_id
->state
)
2924 case IB_CM_REP_SENT
:
2925 case IB_CM_MRA_REP_RCVD
:
2926 cm_id
->state
= IB_CM_ESTABLISHED
;
2928 case IB_CM_ESTABLISHED
:
2935 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
2943 * The CM worker thread may try to destroy the cm_id before it
2944 * can execute this work item. To prevent potential deadlock,
2945 * we need to find the cm_id once we're in the context of the
2946 * worker thread, rather than holding a reference on it.
2948 INIT_WORK(&work
->work
, cm_work_handler
, work
);
2949 work
->local_id
= cm_id
->local_id
;
2950 work
->remote_id
= cm_id
->remote_id
;
2951 work
->mad_recv_wc
= NULL
;
2952 work
->cm_event
.event
= IB_CM_USER_ESTABLISHED
;
2953 queue_work(cm
.wq
, &work
->work
);
2957 EXPORT_SYMBOL(ib_cm_establish
);
2959 static void cm_recv_handler(struct ib_mad_agent
*mad_agent
,
2960 struct ib_mad_recv_wc
*mad_recv_wc
)
2962 struct cm_work
*work
;
2963 enum ib_cm_event_type event
;
2966 switch (mad_recv_wc
->recv_buf
.mad
->mad_hdr
.attr_id
) {
2967 case CM_REQ_ATTR_ID
:
2968 paths
= 1 + (((struct cm_req_msg
*) mad_recv_wc
->recv_buf
.mad
)->
2969 alt_local_lid
!= 0);
2970 event
= IB_CM_REQ_RECEIVED
;
2972 case CM_MRA_ATTR_ID
:
2973 event
= IB_CM_MRA_RECEIVED
;
2975 case CM_REJ_ATTR_ID
:
2976 event
= IB_CM_REJ_RECEIVED
;
2978 case CM_REP_ATTR_ID
:
2979 event
= IB_CM_REP_RECEIVED
;
2981 case CM_RTU_ATTR_ID
:
2982 event
= IB_CM_RTU_RECEIVED
;
2984 case CM_DREQ_ATTR_ID
:
2985 event
= IB_CM_DREQ_RECEIVED
;
2987 case CM_DREP_ATTR_ID
:
2988 event
= IB_CM_DREP_RECEIVED
;
2990 case CM_SIDR_REQ_ATTR_ID
:
2991 event
= IB_CM_SIDR_REQ_RECEIVED
;
2993 case CM_SIDR_REP_ATTR_ID
:
2994 event
= IB_CM_SIDR_REP_RECEIVED
;
2996 case CM_LAP_ATTR_ID
:
2998 event
= IB_CM_LAP_RECEIVED
;
3000 case CM_APR_ATTR_ID
:
3001 event
= IB_CM_APR_RECEIVED
;
3004 ib_free_recv_mad(mad_recv_wc
);
3008 work
= kmalloc(sizeof *work
+ sizeof(struct ib_sa_path_rec
) * paths
,
3011 ib_free_recv_mad(mad_recv_wc
);
3015 INIT_WORK(&work
->work
, cm_work_handler
, work
);
3016 work
->cm_event
.event
= event
;
3017 work
->mad_recv_wc
= mad_recv_wc
;
3018 work
->port
= (struct cm_port
*)mad_agent
->context
;
3019 queue_work(cm
.wq
, &work
->work
);
3022 static int cm_init_qp_init_attr(struct cm_id_private
*cm_id_priv
,
3023 struct ib_qp_attr
*qp_attr
,
3026 unsigned long flags
;
3029 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3030 switch (cm_id_priv
->id
.state
) {
3031 case IB_CM_REQ_SENT
:
3032 case IB_CM_MRA_REQ_RCVD
:
3033 case IB_CM_REQ_RCVD
:
3034 case IB_CM_MRA_REQ_SENT
:
3035 case IB_CM_REP_RCVD
:
3036 case IB_CM_MRA_REP_SENT
:
3037 case IB_CM_REP_SENT
:
3038 case IB_CM_MRA_REP_RCVD
:
3039 case IB_CM_ESTABLISHED
:
3040 *qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
|
3041 IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3042 qp_attr
->qp_access_flags
= IB_ACCESS_LOCAL_WRITE
|
3043 IB_ACCESS_REMOTE_WRITE
;
3044 if (cm_id_priv
->responder_resources
)
3045 qp_attr
->qp_access_flags
|= IB_ACCESS_REMOTE_READ
;
3046 qp_attr
->pkey_index
= cm_id_priv
->av
.pkey_index
;
3047 qp_attr
->port_num
= cm_id_priv
->av
.port
->port_num
;
3054 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3058 static int cm_init_qp_rtr_attr(struct cm_id_private
*cm_id_priv
,
3059 struct ib_qp_attr
*qp_attr
,
3062 unsigned long flags
;
3065 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3066 switch (cm_id_priv
->id
.state
) {
3067 case IB_CM_REQ_RCVD
:
3068 case IB_CM_MRA_REQ_SENT
:
3069 case IB_CM_REP_RCVD
:
3070 case IB_CM_MRA_REP_SENT
:
3071 case IB_CM_REP_SENT
:
3072 case IB_CM_MRA_REP_RCVD
:
3073 case IB_CM_ESTABLISHED
:
3074 *qp_attr_mask
= IB_QP_STATE
| IB_QP_AV
| IB_QP_PATH_MTU
|
3075 IB_QP_DEST_QPN
| IB_QP_RQ_PSN
;
3076 qp_attr
->ah_attr
= cm_id_priv
->av
.ah_attr
;
3077 qp_attr
->path_mtu
= cm_id_priv
->path_mtu
;
3078 qp_attr
->dest_qp_num
= be32_to_cpu(cm_id_priv
->remote_qpn
);
3079 qp_attr
->rq_psn
= be32_to_cpu(cm_id_priv
->rq_psn
);
3080 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3081 *qp_attr_mask
|= IB_QP_MAX_DEST_RD_ATOMIC
|
3082 IB_QP_MIN_RNR_TIMER
;
3083 qp_attr
->max_dest_rd_atomic
=
3084 cm_id_priv
->responder_resources
;
3085 qp_attr
->min_rnr_timer
= 0;
3087 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3088 *qp_attr_mask
|= IB_QP_ALT_PATH
;
3089 qp_attr
->alt_ah_attr
= cm_id_priv
->alt_av
.ah_attr
;
3097 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3101 static int cm_init_qp_rts_attr(struct cm_id_private
*cm_id_priv
,
3102 struct ib_qp_attr
*qp_attr
,
3105 unsigned long flags
;
3108 spin_lock_irqsave(&cm_id_priv
->lock
, flags
);
3109 switch (cm_id_priv
->id
.state
) {
3110 case IB_CM_REP_RCVD
:
3111 case IB_CM_MRA_REP_SENT
:
3112 case IB_CM_REP_SENT
:
3113 case IB_CM_MRA_REP_RCVD
:
3114 case IB_CM_ESTABLISHED
:
3115 *qp_attr_mask
= IB_QP_STATE
| IB_QP_SQ_PSN
;
3116 qp_attr
->sq_psn
= be32_to_cpu(cm_id_priv
->sq_psn
);
3117 if (cm_id_priv
->qp_type
== IB_QPT_RC
) {
3118 *qp_attr_mask
|= IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
|
3120 IB_QP_MAX_QP_RD_ATOMIC
;
3121 qp_attr
->timeout
= cm_id_priv
->local_ack_timeout
;
3122 qp_attr
->retry_cnt
= cm_id_priv
->retry_count
;
3123 qp_attr
->rnr_retry
= cm_id_priv
->rnr_retry_count
;
3124 qp_attr
->max_rd_atomic
= cm_id_priv
->initiator_depth
;
3126 if (cm_id_priv
->alt_av
.ah_attr
.dlid
) {
3127 *qp_attr_mask
|= IB_QP_PATH_MIG_STATE
;
3128 qp_attr
->path_mig_state
= IB_MIG_REARM
;
3136 spin_unlock_irqrestore(&cm_id_priv
->lock
, flags
);
3140 int ib_cm_init_qp_attr(struct ib_cm_id
*cm_id
,
3141 struct ib_qp_attr
*qp_attr
,
3144 struct cm_id_private
*cm_id_priv
;
3147 cm_id_priv
= container_of(cm_id
, struct cm_id_private
, id
);
3148 switch (qp_attr
->qp_state
) {
3150 ret
= cm_init_qp_init_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3153 ret
= cm_init_qp_rtr_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3156 ret
= cm_init_qp_rts_attr(cm_id_priv
, qp_attr
, qp_attr_mask
);
3164 EXPORT_SYMBOL(ib_cm_init_qp_attr
);
3166 static void cm_add_one(struct ib_device
*device
)
3168 struct cm_device
*cm_dev
;
3169 struct cm_port
*port
;
3170 struct ib_mad_reg_req reg_req
= {
3171 .mgmt_class
= IB_MGMT_CLASS_CM
,
3172 .mgmt_class_version
= IB_CM_CLASS_VERSION
3174 struct ib_port_modify port_modify
= {
3175 .set_port_cap_mask
= IB_PORT_CM_SUP
3177 unsigned long flags
;
3181 cm_dev
= kmalloc(sizeof(*cm_dev
) + sizeof(*port
) *
3182 device
->phys_port_cnt
, GFP_KERNEL
);
3186 cm_dev
->device
= device
;
3187 cm_dev
->ca_guid
= device
->node_guid
;
3189 set_bit(IB_MGMT_METHOD_SEND
, reg_req
.method_mask
);
3190 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3191 port
= &cm_dev
->port
[i
-1];
3192 port
->cm_dev
= cm_dev
;
3194 port
->mad_agent
= ib_register_mad_agent(device
, i
,
3201 if (IS_ERR(port
->mad_agent
))
3204 ret
= ib_modify_port(device
, i
, 0, &port_modify
);
3208 ib_set_client_data(device
, &cm_client
, cm_dev
);
3210 write_lock_irqsave(&cm
.device_lock
, flags
);
3211 list_add_tail(&cm_dev
->list
, &cm
.device_list
);
3212 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3216 ib_unregister_mad_agent(port
->mad_agent
);
3218 port_modify
.set_port_cap_mask
= 0;
3219 port_modify
.clr_port_cap_mask
= IB_PORT_CM_SUP
;
3221 port
= &cm_dev
->port
[i
-1];
3222 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3223 ib_unregister_mad_agent(port
->mad_agent
);
3228 static void cm_remove_one(struct ib_device
*device
)
3230 struct cm_device
*cm_dev
;
3231 struct cm_port
*port
;
3232 struct ib_port_modify port_modify
= {
3233 .clr_port_cap_mask
= IB_PORT_CM_SUP
3235 unsigned long flags
;
3238 cm_dev
= ib_get_client_data(device
, &cm_client
);
3242 write_lock_irqsave(&cm
.device_lock
, flags
);
3243 list_del(&cm_dev
->list
);
3244 write_unlock_irqrestore(&cm
.device_lock
, flags
);
3246 for (i
= 1; i
<= device
->phys_port_cnt
; i
++) {
3247 port
= &cm_dev
->port
[i
-1];
3248 ib_modify_port(device
, port
->port_num
, 0, &port_modify
);
3249 ib_unregister_mad_agent(port
->mad_agent
);
3254 static int __init
ib_cm_init(void)
3258 memset(&cm
, 0, sizeof cm
);
3259 INIT_LIST_HEAD(&cm
.device_list
);
3260 rwlock_init(&cm
.device_lock
);
3261 spin_lock_init(&cm
.lock
);
3262 cm
.listen_service_table
= RB_ROOT
;
3263 cm
.listen_service_id
= __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID
);
3264 cm
.remote_id_table
= RB_ROOT
;
3265 cm
.remote_qp_table
= RB_ROOT
;
3266 cm
.remote_sidr_table
= RB_ROOT
;
3267 idr_init(&cm
.local_id_table
);
3268 idr_pre_get(&cm
.local_id_table
, GFP_KERNEL
);
3270 cm
.wq
= create_workqueue("ib_cm");
3274 ret
= ib_register_client(&cm_client
);
3280 destroy_workqueue(cm
.wq
);
3284 static void __exit
ib_cm_cleanup(void)
3286 flush_workqueue(cm
.wq
);
3287 destroy_workqueue(cm
.wq
);
3288 ib_unregister_client(&cm_client
);
3289 idr_destroy(&cm
.local_id_table
);
3292 module_init(ib_cm_init
);
3293 module_exit(ib_cm_cleanup
);