IB/sa: Add ib_init_ah_from_path()
[deliverable/linux.git] / drivers / infiniband / core / cm.c
CommitLineData
a977049d
HR
1/*
2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
6e61d04f 35 * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
a977049d 36 */
1b52fa98
SH
37
38#include <linux/completion.h>
a977049d
HR
39#include <linux/dma-mapping.h>
40#include <linux/err.h>
41#include <linux/idr.h>
42#include <linux/interrupt.h>
43#include <linux/pci.h>
44#include <linux/rbtree.h>
45#include <linux/spinlock.h>
46#include <linux/workqueue.h>
47
a4d61e84
RD
48#include <rdma/ib_cache.h>
49#include <rdma/ib_cm.h>
a977049d
HR
50#include "cm_msgs.h"
51
52MODULE_AUTHOR("Sean Hefty");
53MODULE_DESCRIPTION("InfiniBand CM");
54MODULE_LICENSE("Dual BSD/GPL");
55
56static void cm_add_one(struct ib_device *device);
57static void cm_remove_one(struct ib_device *device);
58
59static struct ib_client cm_client = {
60 .name = "cm",
61 .add = cm_add_one,
62 .remove = cm_remove_one
63};
64
65static struct ib_cm {
66 spinlock_t lock;
67 struct list_head device_list;
68 rwlock_t device_lock;
69 struct rb_root listen_service_table;
70 u64 listen_service_id;
71 /* struct rb_root peer_service_table; todo: fix peer to peer */
72 struct rb_root remote_qp_table;
73 struct rb_root remote_id_table;
74 struct rb_root remote_sidr_table;
75 struct idr local_id_table;
76 struct workqueue_struct *wq;
77} cm;
78
79struct cm_port {
80 struct cm_device *cm_dev;
81 struct ib_mad_agent *mad_agent;
82 u8 port_num;
83};
84
85struct cm_device {
86 struct list_head list;
87 struct ib_device *device;
97f52eb4 88 __be64 ca_guid;
a977049d
HR
89 struct cm_port port[0];
90};
91
92struct cm_av {
93 struct cm_port *port;
94 union ib_gid dgid;
95 struct ib_ah_attr ah_attr;
96 u16 pkey_index;
97 u8 packet_life_time;
98};
99
100struct cm_work {
101 struct work_struct work;
102 struct list_head list;
103 struct cm_port *port;
104 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
97f52eb4
SH
105 __be32 local_id; /* Established / timewait */
106 __be32 remote_id;
a977049d
HR
107 struct ib_cm_event cm_event;
108 struct ib_sa_path_rec path[0];
109};
110
111struct cm_timewait_info {
112 struct cm_work work; /* Must be first. */
113 struct rb_node remote_qp_node;
114 struct rb_node remote_id_node;
97f52eb4
SH
115 __be64 remote_ca_guid;
116 __be32 remote_qpn;
a977049d
HR
117 u8 inserted_remote_qp;
118 u8 inserted_remote_id;
119};
120
121struct cm_id_private {
122 struct ib_cm_id id;
123
124 struct rb_node service_node;
125 struct rb_node sidr_id_node;
87fd1a11 126 spinlock_t lock; /* Do not acquire inside cm.lock */
1b52fa98 127 struct completion comp;
a977049d
HR
128 atomic_t refcount;
129
130 struct ib_mad_send_buf *msg;
131 struct cm_timewait_info *timewait_info;
132 /* todo: use alternate port on send failure */
133 struct cm_av av;
134 struct cm_av alt_av;
6e61d04f 135 struct ib_cm_compare_data *compare_data;
a977049d
HR
136
137 void *private_data;
97f52eb4
SH
138 __be64 tid;
139 __be32 local_qpn;
140 __be32 remote_qpn;
ae7971a7 141 enum ib_qp_type qp_type;
97f52eb4
SH
142 __be32 sq_psn;
143 __be32 rq_psn;
a977049d
HR
144 int timeout_ms;
145 enum ib_mtu path_mtu;
146 u8 private_data_len;
147 u8 max_cm_retries;
148 u8 peer_to_peer;
149 u8 responder_resources;
150 u8 initiator_depth;
151 u8 local_ack_timeout;
152 u8 retry_count;
153 u8 rnr_retry_count;
154 u8 service_timeout;
155
156 struct list_head work_list;
157 atomic_t work_count;
158};
159
160static void cm_work_handler(void *data);
161
162static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
163{
164 if (atomic_dec_and_test(&cm_id_priv->refcount))
1b52fa98 165 complete(&cm_id_priv->comp);
a977049d
HR
166}
167
168static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
169 struct ib_mad_send_buf **msg)
170{
171 struct ib_mad_agent *mad_agent;
172 struct ib_mad_send_buf *m;
173 struct ib_ah *ah;
174
175 mad_agent = cm_id_priv->av.port->mad_agent;
176 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
177 if (IS_ERR(ah))
178 return PTR_ERR(ah);
179
354ba39c
JK
180 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
181 cm_id_priv->av.pkey_index,
34816ad9 182 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
a977049d
HR
183 GFP_ATOMIC);
184 if (IS_ERR(m)) {
185 ib_destroy_ah(ah);
186 return PTR_ERR(m);
187 }
188
189 /* Timeout set by caller if response is expected. */
34816ad9
SH
190 m->ah = ah;
191 m->retries = cm_id_priv->max_cm_retries;
a977049d
HR
192
193 atomic_inc(&cm_id_priv->refcount);
194 m->context[0] = cm_id_priv;
195 *msg = m;
196 return 0;
197}
198
199static int cm_alloc_response_msg(struct cm_port *port,
200 struct ib_mad_recv_wc *mad_recv_wc,
201 struct ib_mad_send_buf **msg)
202{
203 struct ib_mad_send_buf *m;
204 struct ib_ah *ah;
205
206 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
207 mad_recv_wc->recv_buf.grh, port->port_num);
208 if (IS_ERR(ah))
209 return PTR_ERR(ah);
210
211 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
34816ad9 212 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
a977049d
HR
213 GFP_ATOMIC);
214 if (IS_ERR(m)) {
215 ib_destroy_ah(ah);
216 return PTR_ERR(m);
217 }
34816ad9 218 m->ah = ah;
a977049d
HR
219 *msg = m;
220 return 0;
221}
222
223static void cm_free_msg(struct ib_mad_send_buf *msg)
224{
34816ad9 225 ib_destroy_ah(msg->ah);
a977049d
HR
226 if (msg->context[0])
227 cm_deref_id(msg->context[0]);
228 ib_free_send_mad(msg);
229}
230
231static void * cm_copy_private_data(const void *private_data,
232 u8 private_data_len)
233{
234 void *data;
235
236 if (!private_data || !private_data_len)
237 return NULL;
238
239 data = kmalloc(private_data_len, GFP_KERNEL);
240 if (!data)
241 return ERR_PTR(-ENOMEM);
242
243 memcpy(data, private_data, private_data_len);
244 return data;
245}
246
247static void cm_set_private_data(struct cm_id_private *cm_id_priv,
248 void *private_data, u8 private_data_len)
249{
250 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
251 kfree(cm_id_priv->private_data);
252
253 cm_id_priv->private_data = private_data;
254 cm_id_priv->private_data_len = private_data_len;
255}
256
257static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
258 u16 dlid, u8 sl, u16 src_path_bits)
259{
260 memset(ah_attr, 0, sizeof ah_attr);
97f52eb4 261 ah_attr->dlid = dlid;
a977049d
HR
262 ah_attr->sl = sl;
263 ah_attr->src_path_bits = src_path_bits;
264 ah_attr->port_num = port_num;
265}
266
267static void cm_init_av_for_response(struct cm_port *port,
268 struct ib_wc *wc, struct cm_av *av)
269{
270 av->port = port;
271 av->pkey_index = wc->pkey_index;
97f52eb4 272 cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
a977049d
HR
273 wc->sl, wc->dlid_path_bits);
274}
275
276static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
277{
278 struct cm_device *cm_dev;
279 struct cm_port *port = NULL;
280 unsigned long flags;
281 int ret;
282 u8 p;
283
284 read_lock_irqsave(&cm.device_lock, flags);
285 list_for_each_entry(cm_dev, &cm.device_list, list) {
286 if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
287 &p, NULL)) {
288 port = &cm_dev->port[p-1];
289 break;
290 }
291 }
292 read_unlock_irqrestore(&cm.device_lock, flags);
293
294 if (!port)
295 return -EINVAL;
296
297 ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
298 be16_to_cpu(path->pkey), &av->pkey_index);
299 if (ret)
300 return ret;
301
302 av->port = port;
97f52eb4
SH
303 cm_set_ah_attr(&av->ah_attr, av->port->port_num,
304 be16_to_cpu(path->dlid), path->sl,
305 be16_to_cpu(path->slid) & 0x7F);
a977049d
HR
306 av->packet_life_time = path->packet_life_time;
307 return 0;
308}
309
310static int cm_alloc_id(struct cm_id_private *cm_id_priv)
311{
312 unsigned long flags;
313 int ret;
de1bb1a6 314 static int next_id;
a977049d
HR
315
316 do {
317 spin_lock_irqsave(&cm.lock, flags);
de1bb1a6 318 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++,
97f52eb4 319 (__force int *) &cm_id_priv->id.local_id);
a977049d
HR
320 spin_unlock_irqrestore(&cm.lock, flags);
321 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
322 return ret;
323}
324
97f52eb4 325static void cm_free_id(__be32 local_id)
a977049d
HR
326{
327 unsigned long flags;
328
329 spin_lock_irqsave(&cm.lock, flags);
97f52eb4 330 idr_remove(&cm.local_id_table, (__force int) local_id);
a977049d
HR
331 spin_unlock_irqrestore(&cm.lock, flags);
332}
333
97f52eb4 334static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
a977049d
HR
335{
336 struct cm_id_private *cm_id_priv;
337
97f52eb4 338 cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
a977049d
HR
339 if (cm_id_priv) {
340 if (cm_id_priv->id.remote_id == remote_id)
341 atomic_inc(&cm_id_priv->refcount);
342 else
343 cm_id_priv = NULL;
344 }
345
346 return cm_id_priv;
347}
348
97f52eb4 349static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
a977049d
HR
350{
351 struct cm_id_private *cm_id_priv;
352 unsigned long flags;
353
354 spin_lock_irqsave(&cm.lock, flags);
355 cm_id_priv = cm_get_id(local_id, remote_id);
356 spin_unlock_irqrestore(&cm.lock, flags);
357
358 return cm_id_priv;
359}
360
6e61d04f
SH
361static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
362{
363 int i;
364
365 for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
366 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
367 ((unsigned long *) mask)[i];
368}
369
370static int cm_compare_data(struct ib_cm_compare_data *src_data,
371 struct ib_cm_compare_data *dst_data)
372{
373 u8 src[IB_CM_COMPARE_SIZE];
374 u8 dst[IB_CM_COMPARE_SIZE];
375
376 if (!src_data || !dst_data)
377 return 0;
378
379 cm_mask_copy(src, src_data->data, dst_data->mask);
380 cm_mask_copy(dst, dst_data->data, src_data->mask);
381 return memcmp(src, dst, IB_CM_COMPARE_SIZE);
382}
383
384static int cm_compare_private_data(u8 *private_data,
385 struct ib_cm_compare_data *dst_data)
386{
387 u8 src[IB_CM_COMPARE_SIZE];
388
389 if (!dst_data)
390 return 0;
391
392 cm_mask_copy(src, private_data, dst_data->mask);
393 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
394}
395
a977049d
HR
396static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
397{
398 struct rb_node **link = &cm.listen_service_table.rb_node;
399 struct rb_node *parent = NULL;
400 struct cm_id_private *cur_cm_id_priv;
97f52eb4
SH
401 __be64 service_id = cm_id_priv->id.service_id;
402 __be64 service_mask = cm_id_priv->id.service_mask;
6e61d04f 403 int data_cmp;
a977049d
HR
404
405 while (*link) {
406 parent = *link;
407 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
408 service_node);
6e61d04f
SH
409 data_cmp = cm_compare_data(cm_id_priv->compare_data,
410 cur_cm_id_priv->compare_data);
a977049d 411 if ((cur_cm_id_priv->id.service_mask & service_id) ==
07d357d0 412 (service_mask & cur_cm_id_priv->id.service_id) &&
6e61d04f
SH
413 (cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
414 !data_cmp)
07d357d0
SH
415 return cur_cm_id_priv;
416
417 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
418 link = &(*link)->rb_left;
419 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
420 link = &(*link)->rb_right;
421 else if (service_id < cur_cm_id_priv->id.service_id)
a977049d 422 link = &(*link)->rb_left;
6e61d04f
SH
423 else if (service_id > cur_cm_id_priv->id.service_id)
424 link = &(*link)->rb_right;
425 else if (data_cmp < 0)
426 link = &(*link)->rb_left;
a977049d
HR
427 else
428 link = &(*link)->rb_right;
429 }
430 rb_link_node(&cm_id_priv->service_node, parent, link);
431 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
432 return NULL;
433}
434
07d357d0 435static struct cm_id_private * cm_find_listen(struct ib_device *device,
6e61d04f
SH
436 __be64 service_id,
437 u8 *private_data)
a977049d
HR
438{
439 struct rb_node *node = cm.listen_service_table.rb_node;
440 struct cm_id_private *cm_id_priv;
6e61d04f 441 int data_cmp;
a977049d
HR
442
443 while (node) {
444 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
6e61d04f
SH
445 data_cmp = cm_compare_private_data(private_data,
446 cm_id_priv->compare_data);
a977049d 447 if ((cm_id_priv->id.service_mask & service_id) ==
07d357d0 448 cm_id_priv->id.service_id &&
6e61d04f 449 (cm_id_priv->id.device == device) && !data_cmp)
a977049d 450 return cm_id_priv;
07d357d0
SH
451
452 if (device < cm_id_priv->id.device)
453 node = node->rb_left;
454 else if (device > cm_id_priv->id.device)
455 node = node->rb_right;
456 else if (service_id < cm_id_priv->id.service_id)
a977049d 457 node = node->rb_left;
6e61d04f
SH
458 else if (service_id > cm_id_priv->id.service_id)
459 node = node->rb_right;
460 else if (data_cmp < 0)
461 node = node->rb_left;
a977049d
HR
462 else
463 node = node->rb_right;
464 }
465 return NULL;
466}
467
468static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
469 *timewait_info)
470{
471 struct rb_node **link = &cm.remote_id_table.rb_node;
472 struct rb_node *parent = NULL;
473 struct cm_timewait_info *cur_timewait_info;
97f52eb4
SH
474 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
475 __be32 remote_id = timewait_info->work.remote_id;
a977049d
HR
476
477 while (*link) {
478 parent = *link;
479 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
480 remote_id_node);
481 if (remote_id < cur_timewait_info->work.remote_id)
482 link = &(*link)->rb_left;
483 else if (remote_id > cur_timewait_info->work.remote_id)
484 link = &(*link)->rb_right;
485 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
486 link = &(*link)->rb_left;
487 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
488 link = &(*link)->rb_right;
489 else
490 return cur_timewait_info;
491 }
492 timewait_info->inserted_remote_id = 1;
493 rb_link_node(&timewait_info->remote_id_node, parent, link);
494 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
495 return NULL;
496}
497
97f52eb4
SH
498static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
499 __be32 remote_id)
a977049d
HR
500{
501 struct rb_node *node = cm.remote_id_table.rb_node;
502 struct cm_timewait_info *timewait_info;
503
504 while (node) {
505 timewait_info = rb_entry(node, struct cm_timewait_info,
506 remote_id_node);
507 if (remote_id < timewait_info->work.remote_id)
508 node = node->rb_left;
509 else if (remote_id > timewait_info->work.remote_id)
510 node = node->rb_right;
511 else if (remote_ca_guid < timewait_info->remote_ca_guid)
512 node = node->rb_left;
513 else if (remote_ca_guid > timewait_info->remote_ca_guid)
514 node = node->rb_right;
515 else
516 return timewait_info;
517 }
518 return NULL;
519}
520
521static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
522 *timewait_info)
523{
524 struct rb_node **link = &cm.remote_qp_table.rb_node;
525 struct rb_node *parent = NULL;
526 struct cm_timewait_info *cur_timewait_info;
97f52eb4
SH
527 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
528 __be32 remote_qpn = timewait_info->remote_qpn;
a977049d
HR
529
530 while (*link) {
531 parent = *link;
532 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
533 remote_qp_node);
534 if (remote_qpn < cur_timewait_info->remote_qpn)
535 link = &(*link)->rb_left;
536 else if (remote_qpn > cur_timewait_info->remote_qpn)
537 link = &(*link)->rb_right;
538 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
539 link = &(*link)->rb_left;
540 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
541 link = &(*link)->rb_right;
542 else
543 return cur_timewait_info;
544 }
545 timewait_info->inserted_remote_qp = 1;
546 rb_link_node(&timewait_info->remote_qp_node, parent, link);
547 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
548 return NULL;
549}
550
551static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
552 *cm_id_priv)
553{
554 struct rb_node **link = &cm.remote_sidr_table.rb_node;
555 struct rb_node *parent = NULL;
556 struct cm_id_private *cur_cm_id_priv;
557 union ib_gid *port_gid = &cm_id_priv->av.dgid;
97f52eb4 558 __be32 remote_id = cm_id_priv->id.remote_id;
a977049d
HR
559
560 while (*link) {
561 parent = *link;
562 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
563 sidr_id_node);
564 if (remote_id < cur_cm_id_priv->id.remote_id)
565 link = &(*link)->rb_left;
566 else if (remote_id > cur_cm_id_priv->id.remote_id)
567 link = &(*link)->rb_right;
568 else {
569 int cmp;
570 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
571 sizeof *port_gid);
572 if (cmp < 0)
573 link = &(*link)->rb_left;
574 else if (cmp > 0)
575 link = &(*link)->rb_right;
576 else
577 return cur_cm_id_priv;
578 }
579 }
580 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
581 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
582 return NULL;
583}
584
585static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
586 enum ib_cm_sidr_status status)
587{
588 struct ib_cm_sidr_rep_param param;
589
590 memset(&param, 0, sizeof param);
591 param.status = status;
592 ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
593}
594
07d357d0
SH
595struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
596 ib_cm_handler cm_handler,
a977049d
HR
597 void *context)
598{
599 struct cm_id_private *cm_id_priv;
600 int ret;
601
de6eb66b 602 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
a977049d
HR
603 if (!cm_id_priv)
604 return ERR_PTR(-ENOMEM);
605
a977049d 606 cm_id_priv->id.state = IB_CM_IDLE;
07d357d0 607 cm_id_priv->id.device = device;
a977049d
HR
608 cm_id_priv->id.cm_handler = cm_handler;
609 cm_id_priv->id.context = context;
354ba39c 610 cm_id_priv->id.remote_cm_qpn = 1;
a977049d
HR
611 ret = cm_alloc_id(cm_id_priv);
612 if (ret)
613 goto error;
614
615 spin_lock_init(&cm_id_priv->lock);
1b52fa98 616 init_completion(&cm_id_priv->comp);
a977049d
HR
617 INIT_LIST_HEAD(&cm_id_priv->work_list);
618 atomic_set(&cm_id_priv->work_count, -1);
619 atomic_set(&cm_id_priv->refcount, 1);
620 return &cm_id_priv->id;
621
622error:
623 kfree(cm_id_priv);
624 return ERR_PTR(-ENOMEM);
625}
626EXPORT_SYMBOL(ib_create_cm_id);
627
628static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
629{
630 struct cm_work *work;
631
632 if (list_empty(&cm_id_priv->work_list))
633 return NULL;
634
635 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
636 list_del(&work->list);
637 return work;
638}
639
640static void cm_free_work(struct cm_work *work)
641{
642 if (work->mad_recv_wc)
643 ib_free_recv_mad(work->mad_recv_wc);
644 kfree(work);
645}
646
647static inline int cm_convert_to_ms(int iba_time)
648{
649 /* approximate conversion to ms from 4.096us x 2^iba_time */
650 return 1 << max(iba_time - 8, 0);
651}
652
653static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
654{
655 unsigned long flags;
656
657 if (!timewait_info->inserted_remote_id &&
658 !timewait_info->inserted_remote_qp)
659 return;
660
661 spin_lock_irqsave(&cm.lock, flags);
662 if (timewait_info->inserted_remote_id) {
663 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
664 timewait_info->inserted_remote_id = 0;
665 }
666
667 if (timewait_info->inserted_remote_qp) {
668 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
669 timewait_info->inserted_remote_qp = 0;
670 }
671 spin_unlock_irqrestore(&cm.lock, flags);
672}
673
97f52eb4 674static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
a977049d
HR
675{
676 struct cm_timewait_info *timewait_info;
677
de6eb66b 678 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
a977049d
HR
679 if (!timewait_info)
680 return ERR_PTR(-ENOMEM);
a977049d
HR
681
682 timewait_info->work.local_id = local_id;
683 INIT_WORK(&timewait_info->work.work, cm_work_handler,
684 &timewait_info->work);
685 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
686 return timewait_info;
687}
688
689static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
690{
691 int wait_time;
692
693 /*
694 * The cm_id could be destroyed by the user before we exit timewait.
695 * To protect against this, we search for the cm_id after exiting
696 * timewait before notifying the user that we've exited timewait.
697 */
698 cm_id_priv->id.state = IB_CM_TIMEWAIT;
699 wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
700 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
701 msecs_to_jiffies(wait_time));
702 cm_id_priv->timewait_info = NULL;
703}
704
705static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
706{
707 cm_id_priv->id.state = IB_CM_IDLE;
708 if (cm_id_priv->timewait_info) {
709 cm_cleanup_timewait(cm_id_priv->timewait_info);
710 kfree(cm_id_priv->timewait_info);
711 cm_id_priv->timewait_info = NULL;
712 }
713}
714
715void ib_destroy_cm_id(struct ib_cm_id *cm_id)
716{
717 struct cm_id_private *cm_id_priv;
718 struct cm_work *work;
719 unsigned long flags;
720
721 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
722retest:
723 spin_lock_irqsave(&cm_id_priv->lock, flags);
724 switch (cm_id->state) {
725 case IB_CM_LISTEN:
726 cm_id->state = IB_CM_IDLE;
727 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
728 spin_lock_irqsave(&cm.lock, flags);
729 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
730 spin_unlock_irqrestore(&cm.lock, flags);
731 break;
732 case IB_CM_SIDR_REQ_SENT:
733 cm_id->state = IB_CM_IDLE;
34816ad9 734 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
a977049d
HR
735 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
736 break;
737 case IB_CM_SIDR_REQ_RCVD:
738 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
739 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
740 break;
741 case IB_CM_REQ_SENT:
227eca83
SH
742 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
743 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
744 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
745 &cm_id_priv->av.port->cm_dev->ca_guid,
746 sizeof cm_id_priv->av.port->cm_dev->ca_guid,
747 NULL, 0);
748 break;
a977049d
HR
749 case IB_CM_MRA_REQ_RCVD:
750 case IB_CM_REP_SENT:
751 case IB_CM_MRA_REP_RCVD:
34816ad9 752 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
a977049d
HR
753 /* Fall through */
754 case IB_CM_REQ_RCVD:
755 case IB_CM_MRA_REQ_SENT:
756 case IB_CM_REP_RCVD:
757 case IB_CM_MRA_REP_SENT:
758 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
227eca83
SH
759 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
760 NULL, 0, NULL, 0);
a977049d
HR
761 break;
762 case IB_CM_ESTABLISHED:
763 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
764 ib_send_cm_dreq(cm_id, NULL, 0);
765 goto retest;
766 case IB_CM_DREQ_SENT:
34816ad9 767 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
a977049d
HR
768 cm_enter_timewait(cm_id_priv);
769 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
770 break;
771 case IB_CM_DREQ_RCVD:
772 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
773 ib_send_cm_drep(cm_id, NULL, 0);
774 break;
775 default:
776 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
777 break;
778 }
779
780 cm_free_id(cm_id->local_id);
1b52fa98
SH
781 cm_deref_id(cm_id_priv);
782 wait_for_completion(&cm_id_priv->comp);
a977049d
HR
783 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
784 cm_free_work(work);
6e61d04f
SH
785 kfree(cm_id_priv->compare_data);
786 kfree(cm_id_priv->private_data);
a977049d
HR
787 kfree(cm_id_priv);
788}
789EXPORT_SYMBOL(ib_destroy_cm_id);
790
6e61d04f
SH
791int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
792 struct ib_cm_compare_data *compare_data)
a977049d
HR
793{
794 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
795 unsigned long flags;
796 int ret = 0;
797
97f52eb4
SH
798 service_mask = service_mask ? service_mask :
799 __constant_cpu_to_be64(~0ULL);
a977049d
HR
800 service_id &= service_mask;
801 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
802 (service_id != IB_CM_ASSIGN_SERVICE_ID))
803 return -EINVAL;
804
805 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
6e61d04f
SH
806 if (cm_id->state != IB_CM_IDLE)
807 return -EINVAL;
808
809 if (compare_data) {
810 cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
811 GFP_KERNEL);
812 if (!cm_id_priv->compare_data)
813 return -ENOMEM;
814 cm_mask_copy(cm_id_priv->compare_data->data,
815 compare_data->data, compare_data->mask);
816 memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
817 IB_CM_COMPARE_SIZE);
818 }
a977049d
HR
819
820 cm_id->state = IB_CM_LISTEN;
821
822 spin_lock_irqsave(&cm.lock, flags);
823 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
97f52eb4
SH
824 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
825 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
a977049d
HR
826 } else {
827 cm_id->service_id = service_id;
828 cm_id->service_mask = service_mask;
829 }
830 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
831 spin_unlock_irqrestore(&cm.lock, flags);
832
833 if (cur_cm_id_priv) {
834 cm_id->state = IB_CM_IDLE;
6e61d04f
SH
835 kfree(cm_id_priv->compare_data);
836 cm_id_priv->compare_data = NULL;
a977049d
HR
837 ret = -EBUSY;
838 }
839 return ret;
840}
841EXPORT_SYMBOL(ib_cm_listen);
842
97f52eb4
SH
843static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
844 enum cm_msg_sequence msg_seq)
a977049d
HR
845{
846 u64 hi_tid, low_tid;
847
848 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
97f52eb4
SH
849 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
850 (msg_seq << 30));
a977049d
HR
851 return cpu_to_be64(hi_tid | low_tid);
852}
853
854static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
97f52eb4 855 __be16 attr_id, __be64 tid)
a977049d
HR
856{
857 hdr->base_version = IB_MGMT_BASE_VERSION;
858 hdr->mgmt_class = IB_MGMT_CLASS_CM;
859 hdr->class_version = IB_CM_CLASS_VERSION;
860 hdr->method = IB_MGMT_METHOD_SEND;
861 hdr->attr_id = attr_id;
862 hdr->tid = tid;
863}
864
865static void cm_format_req(struct cm_req_msg *req_msg,
866 struct cm_id_private *cm_id_priv,
867 struct ib_cm_req_param *param)
868{
869 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
870 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
871
872 req_msg->local_comm_id = cm_id_priv->id.local_id;
873 req_msg->service_id = param->service_id;
874 req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
875 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
876 cm_req_set_resp_res(req_msg, param->responder_resources);
877 cm_req_set_init_depth(req_msg, param->initiator_depth);
878 cm_req_set_remote_resp_timeout(req_msg,
879 param->remote_cm_response_timeout);
880 cm_req_set_qp_type(req_msg, param->qp_type);
881 cm_req_set_flow_ctrl(req_msg, param->flow_control);
882 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
883 cm_req_set_local_resp_timeout(req_msg,
884 param->local_cm_response_timeout);
885 cm_req_set_retry_count(req_msg, param->retry_count);
886 req_msg->pkey = param->primary_path->pkey;
887 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
888 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
889 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
890 cm_req_set_srq(req_msg, param->srq);
891
892 req_msg->primary_local_lid = param->primary_path->slid;
893 req_msg->primary_remote_lid = param->primary_path->dlid;
894 req_msg->primary_local_gid = param->primary_path->sgid;
895 req_msg->primary_remote_gid = param->primary_path->dgid;
896 cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
897 cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
898 req_msg->primary_traffic_class = param->primary_path->traffic_class;
899 req_msg->primary_hop_limit = param->primary_path->hop_limit;
900 cm_req_set_primary_sl(req_msg, param->primary_path->sl);
901 cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
902 cm_req_set_primary_local_ack_timeout(req_msg,
903 min(31, param->primary_path->packet_life_time + 1));
904
905 if (param->alternate_path) {
906 req_msg->alt_local_lid = param->alternate_path->slid;
907 req_msg->alt_remote_lid = param->alternate_path->dlid;
908 req_msg->alt_local_gid = param->alternate_path->sgid;
909 req_msg->alt_remote_gid = param->alternate_path->dgid;
910 cm_req_set_alt_flow_label(req_msg,
911 param->alternate_path->flow_label);
912 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
913 req_msg->alt_traffic_class = param->alternate_path->traffic_class;
914 req_msg->alt_hop_limit = param->alternate_path->hop_limit;
915 cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
916 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
917 cm_req_set_alt_local_ack_timeout(req_msg,
918 min(31, param->alternate_path->packet_life_time + 1));
919 }
920
921 if (param->private_data && param->private_data_len)
922 memcpy(req_msg->private_data, param->private_data,
923 param->private_data_len);
924}
925
858119e1 926static int cm_validate_req_param(struct ib_cm_req_param *param)
a977049d
HR
927{
928 /* peer-to-peer not supported */
929 if (param->peer_to_peer)
930 return -EINVAL;
931
932 if (!param->primary_path)
933 return -EINVAL;
934
935 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
936 return -EINVAL;
937
938 if (param->private_data &&
939 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
940 return -EINVAL;
941
942 if (param->alternate_path &&
943 (param->alternate_path->pkey != param->primary_path->pkey ||
944 param->alternate_path->mtu != param->primary_path->mtu))
945 return -EINVAL;
946
947 return 0;
948}
949
950int ib_send_cm_req(struct ib_cm_id *cm_id,
951 struct ib_cm_req_param *param)
952{
953 struct cm_id_private *cm_id_priv;
a977049d
HR
954 struct cm_req_msg *req_msg;
955 unsigned long flags;
956 int ret;
957
958 ret = cm_validate_req_param(param);
959 if (ret)
960 return ret;
961
962 /* Verify that we're not in timewait. */
963 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
964 spin_lock_irqsave(&cm_id_priv->lock, flags);
965 if (cm_id->state != IB_CM_IDLE) {
966 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
967 ret = -EINVAL;
968 goto out;
969 }
970 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
971
972 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
973 id.local_id);
974 if (IS_ERR(cm_id_priv->timewait_info))
975 goto out;
976
977 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
978 if (ret)
979 goto error1;
980 if (param->alternate_path) {
981 ret = cm_init_av_by_path(param->alternate_path,
982 &cm_id_priv->alt_av);
983 if (ret)
984 goto error1;
985 }
986 cm_id->service_id = param->service_id;
97f52eb4 987 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
a977049d
HR
988 cm_id_priv->timeout_ms = cm_convert_to_ms(
989 param->primary_path->packet_life_time) * 2 +
990 cm_convert_to_ms(
991 param->remote_cm_response_timeout);
992 cm_id_priv->max_cm_retries = param->max_cm_retries;
993 cm_id_priv->initiator_depth = param->initiator_depth;
994 cm_id_priv->responder_resources = param->responder_resources;
995 cm_id_priv->retry_count = param->retry_count;
996 cm_id_priv->path_mtu = param->primary_path->mtu;
ae7971a7 997 cm_id_priv->qp_type = param->qp_type;
a977049d
HR
998
999 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1000 if (ret)
1001 goto error1;
1002
1003 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1004 cm_format_req(req_msg, cm_id_priv, param);
1005 cm_id_priv->tid = req_msg->hdr.tid;
34816ad9 1006 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
a977049d
HR
1007 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1008
1009 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1010 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1011 cm_id_priv->local_ack_timeout =
1012 cm_req_get_primary_local_ack_timeout(req_msg);
1013
1014 spin_lock_irqsave(&cm_id_priv->lock, flags);
34816ad9 1015 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
a977049d
HR
1016 if (ret) {
1017 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1018 goto error2;
1019 }
1020 BUG_ON(cm_id->state != IB_CM_IDLE);
1021 cm_id->state = IB_CM_REQ_SENT;
1022 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1023 return 0;
1024
1025error2: cm_free_msg(cm_id_priv->msg);
1026error1: kfree(cm_id_priv->timewait_info);
1027out: return ret;
1028}
1029EXPORT_SYMBOL(ib_send_cm_req);
1030
1031static int cm_issue_rej(struct cm_port *port,
1032 struct ib_mad_recv_wc *mad_recv_wc,
1033 enum ib_cm_rej_reason reason,
1034 enum cm_msg_response msg_rejected,
1035 void *ari, u8 ari_length)
1036{
1037 struct ib_mad_send_buf *msg = NULL;
a977049d
HR
1038 struct cm_rej_msg *rej_msg, *rcv_msg;
1039 int ret;
1040
1041 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1042 if (ret)
1043 return ret;
1044
1045 /* We just need common CM header information. Cast to any message. */
1046 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1047 rej_msg = (struct cm_rej_msg *) msg->mad;
1048
1049 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1050 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1051 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1052 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
97f52eb4 1053 rej_msg->reason = cpu_to_be16(reason);
a977049d
HR
1054
1055 if (ari && ari_length) {
1056 cm_rej_set_reject_info_len(rej_msg, ari_length);
1057 memcpy(rej_msg->ari, ari, ari_length);
1058 }
1059
34816ad9 1060 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
1061 if (ret)
1062 cm_free_msg(msg);
1063
1064 return ret;
1065}
1066
97f52eb4
SH
1067static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1068 __be32 local_qpn, __be32 remote_qpn)
a977049d
HR
1069{
1070 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1071 ((local_ca_guid == remote_ca_guid) &&
1072 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1073}
1074
858119e1 1075static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
a977049d
HR
1076 struct ib_sa_path_rec *primary_path,
1077 struct ib_sa_path_rec *alt_path)
1078{
1079 memset(primary_path, 0, sizeof *primary_path);
1080 primary_path->dgid = req_msg->primary_local_gid;
1081 primary_path->sgid = req_msg->primary_remote_gid;
1082 primary_path->dlid = req_msg->primary_local_lid;
1083 primary_path->slid = req_msg->primary_remote_lid;
1084 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1085 primary_path->hop_limit = req_msg->primary_hop_limit;
1086 primary_path->traffic_class = req_msg->primary_traffic_class;
1087 primary_path->reversible = 1;
1088 primary_path->pkey = req_msg->pkey;
1089 primary_path->sl = cm_req_get_primary_sl(req_msg);
1090 primary_path->mtu_selector = IB_SA_EQ;
1091 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1092 primary_path->rate_selector = IB_SA_EQ;
1093 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1094 primary_path->packet_life_time_selector = IB_SA_EQ;
1095 primary_path->packet_life_time =
1096 cm_req_get_primary_local_ack_timeout(req_msg);
1097 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1098
1099 if (req_msg->alt_local_lid) {
1100 memset(alt_path, 0, sizeof *alt_path);
1101 alt_path->dgid = req_msg->alt_local_gid;
1102 alt_path->sgid = req_msg->alt_remote_gid;
1103 alt_path->dlid = req_msg->alt_local_lid;
1104 alt_path->slid = req_msg->alt_remote_lid;
1105 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1106 alt_path->hop_limit = req_msg->alt_hop_limit;
1107 alt_path->traffic_class = req_msg->alt_traffic_class;
1108 alt_path->reversible = 1;
1109 alt_path->pkey = req_msg->pkey;
1110 alt_path->sl = cm_req_get_alt_sl(req_msg);
1111 alt_path->mtu_selector = IB_SA_EQ;
1112 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1113 alt_path->rate_selector = IB_SA_EQ;
1114 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1115 alt_path->packet_life_time_selector = IB_SA_EQ;
1116 alt_path->packet_life_time =
1117 cm_req_get_alt_local_ack_timeout(req_msg);
1118 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1119 }
1120}
1121
1122static void cm_format_req_event(struct cm_work *work,
1123 struct cm_id_private *cm_id_priv,
1124 struct ib_cm_id *listen_id)
1125{
1126 struct cm_req_msg *req_msg;
1127 struct ib_cm_req_event_param *param;
1128
1129 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1130 param = &work->cm_event.param.req_rcvd;
1131 param->listen_id = listen_id;
a977049d
HR
1132 param->port = cm_id_priv->av.port->port_num;
1133 param->primary_path = &work->path[0];
1134 if (req_msg->alt_local_lid)
1135 param->alternate_path = &work->path[1];
1136 else
1137 param->alternate_path = NULL;
1138 param->remote_ca_guid = req_msg->local_ca_guid;
1139 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1140 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1141 param->qp_type = cm_req_get_qp_type(req_msg);
1142 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1143 param->responder_resources = cm_req_get_init_depth(req_msg);
1144 param->initiator_depth = cm_req_get_resp_res(req_msg);
1145 param->local_cm_response_timeout =
1146 cm_req_get_remote_resp_timeout(req_msg);
1147 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1148 param->remote_cm_response_timeout =
1149 cm_req_get_local_resp_timeout(req_msg);
1150 param->retry_count = cm_req_get_retry_count(req_msg);
1151 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1152 param->srq = cm_req_get_srq(req_msg);
1153 work->cm_event.private_data = &req_msg->private_data;
1154}
1155
1156static void cm_process_work(struct cm_id_private *cm_id_priv,
1157 struct cm_work *work)
1158{
1159 unsigned long flags;
1160 int ret;
1161
1162 /* We will typically only have the current event to report. */
1163 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1164 cm_free_work(work);
1165
1166 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1167 spin_lock_irqsave(&cm_id_priv->lock, flags);
1168 work = cm_dequeue_work(cm_id_priv);
1169 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1170 BUG_ON(!work);
1171 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1172 &work->cm_event);
1173 cm_free_work(work);
1174 }
1175 cm_deref_id(cm_id_priv);
1176 if (ret)
1177 ib_destroy_cm_id(&cm_id_priv->id);
1178}
1179
1180static void cm_format_mra(struct cm_mra_msg *mra_msg,
1181 struct cm_id_private *cm_id_priv,
1182 enum cm_msg_response msg_mraed, u8 service_timeout,
1183 const void *private_data, u8 private_data_len)
1184{
1185 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1186 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1187 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1188 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1189 cm_mra_set_service_timeout(mra_msg, service_timeout);
1190
1191 if (private_data && private_data_len)
1192 memcpy(mra_msg->private_data, private_data, private_data_len);
1193}
1194
1195static void cm_format_rej(struct cm_rej_msg *rej_msg,
1196 struct cm_id_private *cm_id_priv,
1197 enum ib_cm_rej_reason reason,
1198 void *ari,
1199 u8 ari_length,
1200 const void *private_data,
1201 u8 private_data_len)
1202{
1203 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1204 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1205
1206 switch(cm_id_priv->id.state) {
1207 case IB_CM_REQ_RCVD:
1208 rej_msg->local_comm_id = 0;
1209 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1210 break;
1211 case IB_CM_MRA_REQ_SENT:
1212 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1213 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1214 break;
1215 case IB_CM_REP_RCVD:
1216 case IB_CM_MRA_REP_SENT:
1217 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1218 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1219 break;
1220 default:
1221 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1222 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1223 break;
1224 }
1225
97f52eb4 1226 rej_msg->reason = cpu_to_be16(reason);
a977049d
HR
1227 if (ari && ari_length) {
1228 cm_rej_set_reject_info_len(rej_msg, ari_length);
1229 memcpy(rej_msg->ari, ari, ari_length);
1230 }
1231
1232 if (private_data && private_data_len)
1233 memcpy(rej_msg->private_data, private_data, private_data_len);
1234}
1235
1236static void cm_dup_req_handler(struct cm_work *work,
1237 struct cm_id_private *cm_id_priv)
1238{
1239 struct ib_mad_send_buf *msg = NULL;
a977049d
HR
1240 unsigned long flags;
1241 int ret;
1242
1243 /* Quick state check to discard duplicate REQs. */
1244 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1245 return;
1246
1247 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1248 if (ret)
1249 return;
1250
1251 spin_lock_irqsave(&cm_id_priv->lock, flags);
1252 switch (cm_id_priv->id.state) {
1253 case IB_CM_MRA_REQ_SENT:
1254 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1255 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1256 cm_id_priv->private_data,
1257 cm_id_priv->private_data_len);
1258 break;
1259 case IB_CM_TIMEWAIT:
1260 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1261 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1262 break;
1263 default:
1264 goto unlock;
1265 }
1266 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1267
34816ad9 1268 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
1269 if (ret)
1270 goto free;
1271 return;
1272
1273unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1274free: cm_free_msg(msg);
1275}
1276
1277static struct cm_id_private * cm_match_req(struct cm_work *work,
1278 struct cm_id_private *cm_id_priv)
1279{
1280 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1281 struct cm_timewait_info *timewait_info;
1282 struct cm_req_msg *req_msg;
1283 unsigned long flags;
1284
1285 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1286
1287 /* Check for duplicate REQ and stale connections. */
1288 spin_lock_irqsave(&cm.lock, flags);
1289 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1290 if (!timewait_info)
1291 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1292
1293 if (timewait_info) {
1294 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1295 timewait_info->work.remote_id);
1296 spin_unlock_irqrestore(&cm.lock, flags);
1297 if (cur_cm_id_priv) {
1298 cm_dup_req_handler(work, cur_cm_id_priv);
1299 cm_deref_id(cur_cm_id_priv);
1300 } else
1301 cm_issue_rej(work->port, work->mad_recv_wc,
1302 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1303 NULL, 0);
1304 goto error;
1305 }
1306
1307 /* Find matching listen request. */
07d357d0 1308 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
6e61d04f
SH
1309 req_msg->service_id,
1310 req_msg->private_data);
a977049d
HR
1311 if (!listen_cm_id_priv) {
1312 spin_unlock_irqrestore(&cm.lock, flags);
1313 cm_issue_rej(work->port, work->mad_recv_wc,
1314 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1315 NULL, 0);
1316 goto error;
1317 }
1318 atomic_inc(&listen_cm_id_priv->refcount);
1319 atomic_inc(&cm_id_priv->refcount);
1320 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1321 atomic_inc(&cm_id_priv->work_count);
1322 spin_unlock_irqrestore(&cm.lock, flags);
1323 return listen_cm_id_priv;
1324
1325error: cm_cleanup_timewait(cm_id_priv->timewait_info);
1326 return NULL;
1327}
1328
1329static int cm_req_handler(struct cm_work *work)
1330{
1331 struct ib_cm_id *cm_id;
1332 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1333 struct cm_req_msg *req_msg;
1334 int ret;
1335
1336 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1337
07d357d0 1338 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
a977049d
HR
1339 if (IS_ERR(cm_id))
1340 return PTR_ERR(cm_id);
1341
1342 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1343 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1344 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1345 &cm_id_priv->av);
1346 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1347 id.local_id);
1348 if (IS_ERR(cm_id_priv->timewait_info)) {
1349 ret = PTR_ERR(cm_id_priv->timewait_info);
1350 goto error1;
1351 }
1352 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1353 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1354 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1355
1356 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1357 if (!listen_cm_id_priv) {
1358 ret = -EINVAL;
1359 goto error2;
1360 }
1361
1362 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1363 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1364 cm_id_priv->id.service_id = req_msg->service_id;
97f52eb4 1365 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
a977049d
HR
1366
1367 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1368 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1369 if (ret)
1370 goto error3;
1371 if (req_msg->alt_local_lid) {
1372 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1373 if (ret)
1374 goto error3;
1375 }
1376 cm_id_priv->tid = req_msg->hdr.tid;
1377 cm_id_priv->timeout_ms = cm_convert_to_ms(
1378 cm_req_get_local_resp_timeout(req_msg));
1379 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1380 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1381 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1382 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1383 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1384 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1385 cm_id_priv->local_ack_timeout =
1386 cm_req_get_primary_local_ack_timeout(req_msg);
1387 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1388 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
ae7971a7 1389 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
a977049d
HR
1390
1391 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1392 cm_process_work(cm_id_priv, work);
1393 cm_deref_id(listen_cm_id_priv);
1394 return 0;
1395
1396error3: atomic_dec(&cm_id_priv->refcount);
1397 cm_deref_id(listen_cm_id_priv);
1398 cm_cleanup_timewait(cm_id_priv->timewait_info);
1399error2: kfree(cm_id_priv->timewait_info);
1b205c2d 1400 cm_id_priv->timewait_info = NULL;
a977049d
HR
1401error1: ib_destroy_cm_id(&cm_id_priv->id);
1402 return ret;
1403}
1404
1405static void cm_format_rep(struct cm_rep_msg *rep_msg,
1406 struct cm_id_private *cm_id_priv,
1407 struct ib_cm_rep_param *param)
1408{
1409 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1410 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1411 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1412 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1413 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1414 rep_msg->resp_resources = param->responder_resources;
1415 rep_msg->initiator_depth = param->initiator_depth;
1416 cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
1417 cm_rep_set_failover(rep_msg, param->failover_accepted);
1418 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1419 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1420 cm_rep_set_srq(rep_msg, param->srq);
1421 rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
1422
1423 if (param->private_data && param->private_data_len)
1424 memcpy(rep_msg->private_data, param->private_data,
1425 param->private_data_len);
1426}
1427
1428int ib_send_cm_rep(struct ib_cm_id *cm_id,
1429 struct ib_cm_rep_param *param)
1430{
1431 struct cm_id_private *cm_id_priv;
1432 struct ib_mad_send_buf *msg;
1433 struct cm_rep_msg *rep_msg;
a977049d
HR
1434 unsigned long flags;
1435 int ret;
1436
1437 if (param->private_data &&
1438 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1439 return -EINVAL;
1440
1441 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1442 spin_lock_irqsave(&cm_id_priv->lock, flags);
1443 if (cm_id->state != IB_CM_REQ_RCVD &&
1444 cm_id->state != IB_CM_MRA_REQ_SENT) {
1445 ret = -EINVAL;
1446 goto out;
1447 }
1448
1449 ret = cm_alloc_msg(cm_id_priv, &msg);
1450 if (ret)
1451 goto out;
1452
1453 rep_msg = (struct cm_rep_msg *) msg->mad;
1454 cm_format_rep(rep_msg, cm_id_priv, param);
34816ad9 1455 msg->timeout_ms = cm_id_priv->timeout_ms;
a977049d
HR
1456 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1457
34816ad9 1458 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
1459 if (ret) {
1460 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1461 cm_free_msg(msg);
1462 return ret;
1463 }
1464
1465 cm_id->state = IB_CM_REP_SENT;
1466 cm_id_priv->msg = msg;
1467 cm_id_priv->initiator_depth = param->initiator_depth;
1468 cm_id_priv->responder_resources = param->responder_resources;
1469 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1470 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1471
1472out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1473 return ret;
1474}
1475EXPORT_SYMBOL(ib_send_cm_rep);
1476
1477static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1478 struct cm_id_private *cm_id_priv,
1479 const void *private_data,
1480 u8 private_data_len)
1481{
1482 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1483 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1484 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1485
1486 if (private_data && private_data_len)
1487 memcpy(rtu_msg->private_data, private_data, private_data_len);
1488}
1489
1490int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1491 const void *private_data,
1492 u8 private_data_len)
1493{
1494 struct cm_id_private *cm_id_priv;
1495 struct ib_mad_send_buf *msg;
a977049d
HR
1496 unsigned long flags;
1497 void *data;
1498 int ret;
1499
1500 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1501 return -EINVAL;
1502
1503 data = cm_copy_private_data(private_data, private_data_len);
1504 if (IS_ERR(data))
1505 return PTR_ERR(data);
1506
1507 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1508 spin_lock_irqsave(&cm_id_priv->lock, flags);
1509 if (cm_id->state != IB_CM_REP_RCVD &&
1510 cm_id->state != IB_CM_MRA_REP_SENT) {
1511 ret = -EINVAL;
1512 goto error;
1513 }
1514
1515 ret = cm_alloc_msg(cm_id_priv, &msg);
1516 if (ret)
1517 goto error;
1518
1519 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1520 private_data, private_data_len);
1521
34816ad9 1522 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
1523 if (ret) {
1524 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1525 cm_free_msg(msg);
1526 kfree(data);
1527 return ret;
1528 }
1529
1530 cm_id->state = IB_CM_ESTABLISHED;
1531 cm_set_private_data(cm_id_priv, data, private_data_len);
1532 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1533 return 0;
1534
1535error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1536 kfree(data);
1537 return ret;
1538}
1539EXPORT_SYMBOL(ib_send_cm_rtu);
1540
1541static void cm_format_rep_event(struct cm_work *work)
1542{
1543 struct cm_rep_msg *rep_msg;
1544 struct ib_cm_rep_event_param *param;
1545
1546 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1547 param = &work->cm_event.param.rep_rcvd;
1548 param->remote_ca_guid = rep_msg->local_ca_guid;
1549 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1550 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1551 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1552 param->responder_resources = rep_msg->initiator_depth;
1553 param->initiator_depth = rep_msg->resp_resources;
1554 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1555 param->failover_accepted = cm_rep_get_failover(rep_msg);
1556 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1557 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1558 param->srq = cm_rep_get_srq(rep_msg);
1559 work->cm_event.private_data = &rep_msg->private_data;
1560}
1561
1562static void cm_dup_rep_handler(struct cm_work *work)
1563{
1564 struct cm_id_private *cm_id_priv;
1565 struct cm_rep_msg *rep_msg;
1566 struct ib_mad_send_buf *msg = NULL;
a977049d
HR
1567 unsigned long flags;
1568 int ret;
1569
1570 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1571 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1572 rep_msg->local_comm_id);
1573 if (!cm_id_priv)
1574 return;
1575
1576 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1577 if (ret)
1578 goto deref;
1579
1580 spin_lock_irqsave(&cm_id_priv->lock, flags);
1581 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1582 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1583 cm_id_priv->private_data,
1584 cm_id_priv->private_data_len);
1585 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1586 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1587 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1588 cm_id_priv->private_data,
1589 cm_id_priv->private_data_len);
1590 else
1591 goto unlock;
1592 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1593
34816ad9 1594 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
1595 if (ret)
1596 goto free;
1597 goto deref;
1598
1599unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1600free: cm_free_msg(msg);
1601deref: cm_deref_id(cm_id_priv);
1602}
1603
1604static int cm_rep_handler(struct cm_work *work)
1605{
1606 struct cm_id_private *cm_id_priv;
1607 struct cm_rep_msg *rep_msg;
1608 unsigned long flags;
1609 int ret;
1610
1611 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1612 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1613 if (!cm_id_priv) {
1614 cm_dup_rep_handler(work);
1615 return -EINVAL;
1616 }
1617
87fd1a11
SH
1618 cm_format_rep_event(work);
1619
1620 spin_lock_irqsave(&cm_id_priv->lock, flags);
1621 switch (cm_id_priv->id.state) {
1622 case IB_CM_REQ_SENT:
1623 case IB_CM_MRA_REQ_RCVD:
1624 break;
1625 default:
1626 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1627 ret = -EINVAL;
1628 goto error;
1629 }
1630
a977049d
HR
1631 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1632 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1633 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1634
87fd1a11 1635 spin_lock(&cm.lock);
a977049d
HR
1636 /* Check for duplicate REP. */
1637 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
87fd1a11
SH
1638 spin_unlock(&cm.lock);
1639 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
a977049d
HR
1640 ret = -EINVAL;
1641 goto error;
1642 }
1643 /* Check for a stale connection. */
1644 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
87fd1a11
SH
1645 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
1646 &cm.remote_id_table);
1647 cm_id_priv->timewait_info->inserted_remote_id = 0;
1648 spin_unlock(&cm.lock);
1649 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
a977049d
HR
1650 cm_issue_rej(work->port, work->mad_recv_wc,
1651 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1652 NULL, 0);
1653 ret = -EINVAL;
1654 goto error;
1655 }
87fd1a11 1656 spin_unlock(&cm.lock);
a977049d 1657
a977049d
HR
1658 cm_id_priv->id.state = IB_CM_REP_RCVD;
1659 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1660 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1661 cm_id_priv->initiator_depth = rep_msg->resp_resources;
1662 cm_id_priv->responder_resources = rep_msg->initiator_depth;
1663 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1664 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1665
1666 /* todo: handle peer_to_peer */
1667
34816ad9 1668 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
a977049d
HR
1669 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1670 if (!ret)
1671 list_add_tail(&work->list, &cm_id_priv->work_list);
1672 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1673
1674 if (ret)
1675 cm_process_work(cm_id_priv, work);
1676 else
1677 cm_deref_id(cm_id_priv);
1678 return 0;
1679
87fd1a11 1680error:
a977049d
HR
1681 cm_deref_id(cm_id_priv);
1682 return ret;
1683}
1684
1685static int cm_establish_handler(struct cm_work *work)
1686{
1687 struct cm_id_private *cm_id_priv;
1688 unsigned long flags;
1689 int ret;
1690
1691 /* See comment in ib_cm_establish about lookup. */
1692 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1693 if (!cm_id_priv)
1694 return -EINVAL;
1695
1696 spin_lock_irqsave(&cm_id_priv->lock, flags);
1697 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1698 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1699 goto out;
1700 }
1701
34816ad9 1702 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
a977049d
HR
1703 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1704 if (!ret)
1705 list_add_tail(&work->list, &cm_id_priv->work_list);
1706 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1707
1708 if (ret)
1709 cm_process_work(cm_id_priv, work);
1710 else
1711 cm_deref_id(cm_id_priv);
1712 return 0;
1713out:
1714 cm_deref_id(cm_id_priv);
1715 return -EINVAL;
1716}
1717
1718static int cm_rtu_handler(struct cm_work *work)
1719{
1720 struct cm_id_private *cm_id_priv;
1721 struct cm_rtu_msg *rtu_msg;
1722 unsigned long flags;
1723 int ret;
1724
1725 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1726 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1727 rtu_msg->local_comm_id);
1728 if (!cm_id_priv)
1729 return -EINVAL;
1730
1731 work->cm_event.private_data = &rtu_msg->private_data;
1732
1733 spin_lock_irqsave(&cm_id_priv->lock, flags);
1734 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1735 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1736 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1737 goto out;
1738 }
1739 cm_id_priv->id.state = IB_CM_ESTABLISHED;
1740
34816ad9 1741 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
a977049d
HR
1742 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1743 if (!ret)
1744 list_add_tail(&work->list, &cm_id_priv->work_list);
1745 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1746
1747 if (ret)
1748 cm_process_work(cm_id_priv, work);
1749 else
1750 cm_deref_id(cm_id_priv);
1751 return 0;
1752out:
1753 cm_deref_id(cm_id_priv);
1754 return -EINVAL;
1755}
1756
1757static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1758 struct cm_id_private *cm_id_priv,
1759 const void *private_data,
1760 u8 private_data_len)
1761{
1762 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1763 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1764 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1765 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1766 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1767
1768 if (private_data && private_data_len)
1769 memcpy(dreq_msg->private_data, private_data, private_data_len);
1770}
1771
1772int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1773 const void *private_data,
1774 u8 private_data_len)
1775{
1776 struct cm_id_private *cm_id_priv;
1777 struct ib_mad_send_buf *msg;
a977049d
HR
1778 unsigned long flags;
1779 int ret;
1780
1781 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1782 return -EINVAL;
1783
1784 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1785 spin_lock_irqsave(&cm_id_priv->lock, flags);
1786 if (cm_id->state != IB_CM_ESTABLISHED) {
1787 ret = -EINVAL;
1788 goto out;
1789 }
1790
1791 ret = cm_alloc_msg(cm_id_priv, &msg);
1792 if (ret) {
1793 cm_enter_timewait(cm_id_priv);
1794 goto out;
1795 }
1796
1797 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
1798 private_data, private_data_len);
34816ad9 1799 msg->timeout_ms = cm_id_priv->timeout_ms;
a977049d
HR
1800 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
1801
34816ad9 1802 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
1803 if (ret) {
1804 cm_enter_timewait(cm_id_priv);
1805 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1806 cm_free_msg(msg);
1807 return ret;
1808 }
1809
1810 cm_id->state = IB_CM_DREQ_SENT;
1811 cm_id_priv->msg = msg;
1812out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1813 return ret;
1814}
1815EXPORT_SYMBOL(ib_send_cm_dreq);
1816
1817static void cm_format_drep(struct cm_drep_msg *drep_msg,
1818 struct cm_id_private *cm_id_priv,
1819 const void *private_data,
1820 u8 private_data_len)
1821{
1822 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
1823 drep_msg->local_comm_id = cm_id_priv->id.local_id;
1824 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1825
1826 if (private_data && private_data_len)
1827 memcpy(drep_msg->private_data, private_data, private_data_len);
1828}
1829
1830int ib_send_cm_drep(struct ib_cm_id *cm_id,
1831 const void *private_data,
1832 u8 private_data_len)
1833{
1834 struct cm_id_private *cm_id_priv;
1835 struct ib_mad_send_buf *msg;
a977049d
HR
1836 unsigned long flags;
1837 void *data;
1838 int ret;
1839
1840 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
1841 return -EINVAL;
1842
1843 data = cm_copy_private_data(private_data, private_data_len);
1844 if (IS_ERR(data))
1845 return PTR_ERR(data);
1846
1847 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1848 spin_lock_irqsave(&cm_id_priv->lock, flags);
1849 if (cm_id->state != IB_CM_DREQ_RCVD) {
1850 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1851 kfree(data);
1852 return -EINVAL;
1853 }
1854
1855 cm_set_private_data(cm_id_priv, data, private_data_len);
1856 cm_enter_timewait(cm_id_priv);
1857
1858 ret = cm_alloc_msg(cm_id_priv, &msg);
1859 if (ret)
1860 goto out;
1861
1862 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1863 private_data, private_data_len);
1864
34816ad9 1865 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
1866 if (ret) {
1867 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1868 cm_free_msg(msg);
1869 return ret;
1870 }
1871
1872out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1873 return ret;
1874}
1875EXPORT_SYMBOL(ib_send_cm_drep);
1876
1877static int cm_dreq_handler(struct cm_work *work)
1878{
1879 struct cm_id_private *cm_id_priv;
1880 struct cm_dreq_msg *dreq_msg;
1881 struct ib_mad_send_buf *msg = NULL;
a977049d
HR
1882 unsigned long flags;
1883 int ret;
1884
1885 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
1886 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
1887 dreq_msg->local_comm_id);
1888 if (!cm_id_priv)
1889 return -EINVAL;
1890
1891 work->cm_event.private_data = &dreq_msg->private_data;
1892
1893 spin_lock_irqsave(&cm_id_priv->lock, flags);
1894 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
1895 goto unlock;
1896
1897 switch (cm_id_priv->id.state) {
1898 case IB_CM_REP_SENT:
1899 case IB_CM_DREQ_SENT:
34816ad9 1900 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
a977049d
HR
1901 break;
1902 case IB_CM_ESTABLISHED:
1903 case IB_CM_MRA_REP_RCVD:
1904 break;
1905 case IB_CM_TIMEWAIT:
1906 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
1907 goto unlock;
1908
1909 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1910 cm_id_priv->private_data,
1911 cm_id_priv->private_data_len);
1912 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1913
34816ad9 1914 if (ib_post_send_mad(msg, NULL))
a977049d
HR
1915 cm_free_msg(msg);
1916 goto deref;
1917 default:
1918 goto unlock;
1919 }
1920 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
1921 cm_id_priv->tid = dreq_msg->hdr.tid;
1922 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1923 if (!ret)
1924 list_add_tail(&work->list, &cm_id_priv->work_list);
1925 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1926
1927 if (ret)
1928 cm_process_work(cm_id_priv, work);
1929 else
1930 cm_deref_id(cm_id_priv);
1931 return 0;
1932
1933unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1934deref: cm_deref_id(cm_id_priv);
1935 return -EINVAL;
1936}
1937
1938static int cm_drep_handler(struct cm_work *work)
1939{
1940 struct cm_id_private *cm_id_priv;
1941 struct cm_drep_msg *drep_msg;
1942 unsigned long flags;
1943 int ret;
1944
1945 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
1946 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
1947 drep_msg->local_comm_id);
1948 if (!cm_id_priv)
1949 return -EINVAL;
1950
1951 work->cm_event.private_data = &drep_msg->private_data;
1952
1953 spin_lock_irqsave(&cm_id_priv->lock, flags);
1954 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
1955 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
1956 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1957 goto out;
1958 }
1959 cm_enter_timewait(cm_id_priv);
1960
34816ad9 1961 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
a977049d
HR
1962 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1963 if (!ret)
1964 list_add_tail(&work->list, &cm_id_priv->work_list);
1965 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1966
1967 if (ret)
1968 cm_process_work(cm_id_priv, work);
1969 else
1970 cm_deref_id(cm_id_priv);
1971 return 0;
1972out:
1973 cm_deref_id(cm_id_priv);
1974 return -EINVAL;
1975}
1976
1977int ib_send_cm_rej(struct ib_cm_id *cm_id,
1978 enum ib_cm_rej_reason reason,
1979 void *ari,
1980 u8 ari_length,
1981 const void *private_data,
1982 u8 private_data_len)
1983{
1984 struct cm_id_private *cm_id_priv;
1985 struct ib_mad_send_buf *msg;
a977049d
HR
1986 unsigned long flags;
1987 int ret;
1988
1989 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
1990 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
1991 return -EINVAL;
1992
1993 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1994
1995 spin_lock_irqsave(&cm_id_priv->lock, flags);
1996 switch (cm_id->state) {
1997 case IB_CM_REQ_SENT:
1998 case IB_CM_MRA_REQ_RCVD:
1999 case IB_CM_REQ_RCVD:
2000 case IB_CM_MRA_REQ_SENT:
2001 case IB_CM_REP_RCVD:
2002 case IB_CM_MRA_REP_SENT:
2003 ret = cm_alloc_msg(cm_id_priv, &msg);
2004 if (!ret)
2005 cm_format_rej((struct cm_rej_msg *) msg->mad,
2006 cm_id_priv, reason, ari, ari_length,
2007 private_data, private_data_len);
2008
2009 cm_reset_to_idle(cm_id_priv);
2010 break;
2011 case IB_CM_REP_SENT:
2012 case IB_CM_MRA_REP_RCVD:
2013 ret = cm_alloc_msg(cm_id_priv, &msg);
2014 if (!ret)
2015 cm_format_rej((struct cm_rej_msg *) msg->mad,
2016 cm_id_priv, reason, ari, ari_length,
2017 private_data, private_data_len);
2018
2019 cm_enter_timewait(cm_id_priv);
2020 break;
2021 default:
2022 ret = -EINVAL;
2023 goto out;
2024 }
2025
2026 if (ret)
2027 goto out;
2028
34816ad9 2029 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
2030 if (ret)
2031 cm_free_msg(msg);
2032
2033out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2034 return ret;
2035}
2036EXPORT_SYMBOL(ib_send_cm_rej);
2037
2038static void cm_format_rej_event(struct cm_work *work)
2039{
2040 struct cm_rej_msg *rej_msg;
2041 struct ib_cm_rej_event_param *param;
2042
2043 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2044 param = &work->cm_event.param.rej_rcvd;
2045 param->ari = rej_msg->ari;
2046 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
97f52eb4 2047 param->reason = __be16_to_cpu(rej_msg->reason);
a977049d
HR
2048 work->cm_event.private_data = &rej_msg->private_data;
2049}
2050
2051static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2052{
2053 struct cm_timewait_info *timewait_info;
2054 struct cm_id_private *cm_id_priv;
2055 unsigned long flags;
97f52eb4 2056 __be32 remote_id;
a977049d
HR
2057
2058 remote_id = rej_msg->local_comm_id;
2059
97f52eb4 2060 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
a977049d 2061 spin_lock_irqsave(&cm.lock, flags);
97f52eb4 2062 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
a977049d
HR
2063 remote_id);
2064 if (!timewait_info) {
2065 spin_unlock_irqrestore(&cm.lock, flags);
2066 return NULL;
2067 }
2068 cm_id_priv = idr_find(&cm.local_id_table,
97f52eb4 2069 (__force int) timewait_info->work.local_id);
a977049d
HR
2070 if (cm_id_priv) {
2071 if (cm_id_priv->id.remote_id == remote_id)
2072 atomic_inc(&cm_id_priv->refcount);
2073 else
2074 cm_id_priv = NULL;
2075 }
2076 spin_unlock_irqrestore(&cm.lock, flags);
2077 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2078 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2079 else
2080 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2081
2082 return cm_id_priv;
2083}
2084
2085static int cm_rej_handler(struct cm_work *work)
2086{
2087 struct cm_id_private *cm_id_priv;
2088 struct cm_rej_msg *rej_msg;
2089 unsigned long flags;
2090 int ret;
2091
2092 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2093 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2094 if (!cm_id_priv)
2095 return -EINVAL;
2096
2097 cm_format_rej_event(work);
2098
2099 spin_lock_irqsave(&cm_id_priv->lock, flags);
2100 switch (cm_id_priv->id.state) {
2101 case IB_CM_REQ_SENT:
2102 case IB_CM_MRA_REQ_RCVD:
2103 case IB_CM_REP_SENT:
2104 case IB_CM_MRA_REP_RCVD:
34816ad9 2105 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
a977049d
HR
2106 /* fall through */
2107 case IB_CM_REQ_RCVD:
2108 case IB_CM_MRA_REQ_SENT:
97f52eb4 2109 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
a977049d
HR
2110 cm_enter_timewait(cm_id_priv);
2111 else
2112 cm_reset_to_idle(cm_id_priv);
2113 break;
2114 case IB_CM_DREQ_SENT:
34816ad9 2115 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
a977049d
HR
2116 /* fall through */
2117 case IB_CM_REP_RCVD:
2118 case IB_CM_MRA_REP_SENT:
2119 case IB_CM_ESTABLISHED:
2120 cm_enter_timewait(cm_id_priv);
2121 break;
2122 default:
2123 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2124 ret = -EINVAL;
2125 goto out;
2126 }
2127
2128 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2129 if (!ret)
2130 list_add_tail(&work->list, &cm_id_priv->work_list);
2131 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2132
2133 if (ret)
2134 cm_process_work(cm_id_priv, work);
2135 else
2136 cm_deref_id(cm_id_priv);
2137 return 0;
2138out:
2139 cm_deref_id(cm_id_priv);
2140 return -EINVAL;
2141}
2142
2143int ib_send_cm_mra(struct ib_cm_id *cm_id,
2144 u8 service_timeout,
2145 const void *private_data,
2146 u8 private_data_len)
2147{
2148 struct cm_id_private *cm_id_priv;
2149 struct ib_mad_send_buf *msg;
a977049d
HR
2150 void *data;
2151 unsigned long flags;
2152 int ret;
2153
2154 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2155 return -EINVAL;
2156
2157 data = cm_copy_private_data(private_data, private_data_len);
2158 if (IS_ERR(data))
2159 return PTR_ERR(data);
2160
2161 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2162
2163 spin_lock_irqsave(&cm_id_priv->lock, flags);
2164 switch(cm_id_priv->id.state) {
2165 case IB_CM_REQ_RCVD:
2166 ret = cm_alloc_msg(cm_id_priv, &msg);
2167 if (ret)
2168 goto error1;
2169
2170 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2171 CM_MSG_RESPONSE_REQ, service_timeout,
2172 private_data, private_data_len);
34816ad9 2173 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
2174 if (ret)
2175 goto error2;
2176 cm_id->state = IB_CM_MRA_REQ_SENT;
2177 break;
2178 case IB_CM_REP_RCVD:
2179 ret = cm_alloc_msg(cm_id_priv, &msg);
2180 if (ret)
2181 goto error1;
2182
2183 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2184 CM_MSG_RESPONSE_REP, service_timeout,
2185 private_data, private_data_len);
34816ad9 2186 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
2187 if (ret)
2188 goto error2;
2189 cm_id->state = IB_CM_MRA_REP_SENT;
2190 break;
2191 case IB_CM_ESTABLISHED:
2192 ret = cm_alloc_msg(cm_id_priv, &msg);
2193 if (ret)
2194 goto error1;
2195
2196 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2197 CM_MSG_RESPONSE_OTHER, service_timeout,
2198 private_data, private_data_len);
34816ad9 2199 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
2200 if (ret)
2201 goto error2;
2202 cm_id->lap_state = IB_CM_MRA_LAP_SENT;
2203 break;
2204 default:
2205 ret = -EINVAL;
2206 goto error1;
2207 }
2208 cm_id_priv->service_timeout = service_timeout;
2209 cm_set_private_data(cm_id_priv, data, private_data_len);
2210 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2211 return 0;
2212
2213error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2214 kfree(data);
2215 return ret;
2216
2217error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2218 kfree(data);
2219 cm_free_msg(msg);
2220 return ret;
2221}
2222EXPORT_SYMBOL(ib_send_cm_mra);
2223
2224static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2225{
2226 switch (cm_mra_get_msg_mraed(mra_msg)) {
2227 case CM_MSG_RESPONSE_REQ:
2228 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2229 case CM_MSG_RESPONSE_REP:
2230 case CM_MSG_RESPONSE_OTHER:
2231 return cm_acquire_id(mra_msg->remote_comm_id,
2232 mra_msg->local_comm_id);
2233 default:
2234 return NULL;
2235 }
2236}
2237
2238static int cm_mra_handler(struct cm_work *work)
2239{
2240 struct cm_id_private *cm_id_priv;
2241 struct cm_mra_msg *mra_msg;
2242 unsigned long flags;
2243 int timeout, ret;
2244
2245 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2246 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2247 if (!cm_id_priv)
2248 return -EINVAL;
2249
2250 work->cm_event.private_data = &mra_msg->private_data;
2251 work->cm_event.param.mra_rcvd.service_timeout =
2252 cm_mra_get_service_timeout(mra_msg);
2253 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2254 cm_convert_to_ms(cm_id_priv->av.packet_life_time);
2255
2256 spin_lock_irqsave(&cm_id_priv->lock, flags);
2257 switch (cm_id_priv->id.state) {
2258 case IB_CM_REQ_SENT:
2259 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2260 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34816ad9 2261 cm_id_priv->msg, timeout))
a977049d
HR
2262 goto out;
2263 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2264 break;
2265 case IB_CM_REP_SENT:
2266 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2267 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34816ad9 2268 cm_id_priv->msg, timeout))
a977049d
HR
2269 goto out;
2270 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2271 break;
2272 case IB_CM_ESTABLISHED:
2273 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2274 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2275 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34816ad9 2276 cm_id_priv->msg, timeout))
a977049d
HR
2277 goto out;
2278 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2279 break;
2280 default:
2281 goto out;
2282 }
2283
2284 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2285 cm_id_priv->id.state;
2286 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2287 if (!ret)
2288 list_add_tail(&work->list, &cm_id_priv->work_list);
2289 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2290
2291 if (ret)
2292 cm_process_work(cm_id_priv, work);
2293 else
2294 cm_deref_id(cm_id_priv);
2295 return 0;
2296out:
2297 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2298 cm_deref_id(cm_id_priv);
2299 return -EINVAL;
2300}
2301
2302static void cm_format_lap(struct cm_lap_msg *lap_msg,
2303 struct cm_id_private *cm_id_priv,
2304 struct ib_sa_path_rec *alternate_path,
2305 const void *private_data,
2306 u8 private_data_len)
2307{
2308 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2309 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2310 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2311 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2312 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2313 /* todo: need remote CM response timeout */
2314 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2315 lap_msg->alt_local_lid = alternate_path->slid;
2316 lap_msg->alt_remote_lid = alternate_path->dlid;
2317 lap_msg->alt_local_gid = alternate_path->sgid;
2318 lap_msg->alt_remote_gid = alternate_path->dgid;
2319 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2320 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2321 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2322 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2323 cm_lap_set_sl(lap_msg, alternate_path->sl);
2324 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2325 cm_lap_set_local_ack_timeout(lap_msg,
2326 min(31, alternate_path->packet_life_time + 1));
2327
2328 if (private_data && private_data_len)
2329 memcpy(lap_msg->private_data, private_data, private_data_len);
2330}
2331
2332int ib_send_cm_lap(struct ib_cm_id *cm_id,
2333 struct ib_sa_path_rec *alternate_path,
2334 const void *private_data,
2335 u8 private_data_len)
2336{
2337 struct cm_id_private *cm_id_priv;
2338 struct ib_mad_send_buf *msg;
a977049d
HR
2339 unsigned long flags;
2340 int ret;
2341
2342 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2343 return -EINVAL;
2344
2345 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2346 spin_lock_irqsave(&cm_id_priv->lock, flags);
2347 if (cm_id->state != IB_CM_ESTABLISHED ||
2348 cm_id->lap_state != IB_CM_LAP_IDLE) {
2349 ret = -EINVAL;
2350 goto out;
2351 }
2352
2353 ret = cm_alloc_msg(cm_id_priv, &msg);
2354 if (ret)
2355 goto out;
2356
2357 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2358 alternate_path, private_data, private_data_len);
34816ad9 2359 msg->timeout_ms = cm_id_priv->timeout_ms;
a977049d
HR
2360 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2361
34816ad9 2362 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
2363 if (ret) {
2364 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2365 cm_free_msg(msg);
2366 return ret;
2367 }
2368
2369 cm_id->lap_state = IB_CM_LAP_SENT;
2370 cm_id_priv->msg = msg;
2371
2372out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2373 return ret;
2374}
2375EXPORT_SYMBOL(ib_send_cm_lap);
2376
2377static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
2378 struct cm_lap_msg *lap_msg)
2379{
2380 memset(path, 0, sizeof *path);
2381 path->dgid = lap_msg->alt_local_gid;
2382 path->sgid = lap_msg->alt_remote_gid;
2383 path->dlid = lap_msg->alt_local_lid;
2384 path->slid = lap_msg->alt_remote_lid;
2385 path->flow_label = cm_lap_get_flow_label(lap_msg);
2386 path->hop_limit = lap_msg->alt_hop_limit;
2387 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2388 path->reversible = 1;
2389 /* pkey is same as in REQ */
2390 path->sl = cm_lap_get_sl(lap_msg);
2391 path->mtu_selector = IB_SA_EQ;
2392 /* mtu is same as in REQ */
2393 path->rate_selector = IB_SA_EQ;
2394 path->rate = cm_lap_get_packet_rate(lap_msg);
2395 path->packet_life_time_selector = IB_SA_EQ;
2396 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2397 path->packet_life_time -= (path->packet_life_time > 0);
2398}
2399
2400static int cm_lap_handler(struct cm_work *work)
2401{
2402 struct cm_id_private *cm_id_priv;
2403 struct cm_lap_msg *lap_msg;
2404 struct ib_cm_lap_event_param *param;
2405 struct ib_mad_send_buf *msg = NULL;
a977049d
HR
2406 unsigned long flags;
2407 int ret;
2408
2409 /* todo: verify LAP request and send reject APR if invalid. */
2410 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2411 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2412 lap_msg->local_comm_id);
2413 if (!cm_id_priv)
2414 return -EINVAL;
2415
2416 param = &work->cm_event.param.lap_rcvd;
2417 param->alternate_path = &work->path[0];
2418 cm_format_path_from_lap(param->alternate_path, lap_msg);
2419 work->cm_event.private_data = &lap_msg->private_data;
2420
2421 spin_lock_irqsave(&cm_id_priv->lock, flags);
2422 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2423 goto unlock;
2424
2425 switch (cm_id_priv->id.lap_state) {
2426 case IB_CM_LAP_IDLE:
2427 break;
2428 case IB_CM_MRA_LAP_SENT:
2429 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2430 goto unlock;
2431
2432 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2433 CM_MSG_RESPONSE_OTHER,
2434 cm_id_priv->service_timeout,
2435 cm_id_priv->private_data,
2436 cm_id_priv->private_data_len);
2437 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2438
34816ad9 2439 if (ib_post_send_mad(msg, NULL))
a977049d
HR
2440 cm_free_msg(msg);
2441 goto deref;
2442 default:
2443 goto unlock;
2444 }
2445
2446 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2447 cm_id_priv->tid = lap_msg->hdr.tid;
2448 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2449 if (!ret)
2450 list_add_tail(&work->list, &cm_id_priv->work_list);
2451 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2452
2453 if (ret)
2454 cm_process_work(cm_id_priv, work);
2455 else
2456 cm_deref_id(cm_id_priv);
2457 return 0;
2458
2459unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2460deref: cm_deref_id(cm_id_priv);
2461 return -EINVAL;
2462}
2463
2464static void cm_format_apr(struct cm_apr_msg *apr_msg,
2465 struct cm_id_private *cm_id_priv,
2466 enum ib_cm_apr_status status,
2467 void *info,
2468 u8 info_length,
2469 const void *private_data,
2470 u8 private_data_len)
2471{
2472 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2473 apr_msg->local_comm_id = cm_id_priv->id.local_id;
2474 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2475 apr_msg->ap_status = (u8) status;
2476
2477 if (info && info_length) {
2478 apr_msg->info_length = info_length;
2479 memcpy(apr_msg->info, info, info_length);
2480 }
2481
2482 if (private_data && private_data_len)
2483 memcpy(apr_msg->private_data, private_data, private_data_len);
2484}
2485
2486int ib_send_cm_apr(struct ib_cm_id *cm_id,
2487 enum ib_cm_apr_status status,
2488 void *info,
2489 u8 info_length,
2490 const void *private_data,
2491 u8 private_data_len)
2492{
2493 struct cm_id_private *cm_id_priv;
2494 struct ib_mad_send_buf *msg;
a977049d
HR
2495 unsigned long flags;
2496 int ret;
2497
2498 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2499 (info && info_length > IB_CM_APR_INFO_LENGTH))
2500 return -EINVAL;
2501
2502 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2503 spin_lock_irqsave(&cm_id_priv->lock, flags);
2504 if (cm_id->state != IB_CM_ESTABLISHED ||
2505 (cm_id->lap_state != IB_CM_LAP_RCVD &&
2506 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2507 ret = -EINVAL;
2508 goto out;
2509 }
2510
2511 ret = cm_alloc_msg(cm_id_priv, &msg);
2512 if (ret)
2513 goto out;
2514
2515 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2516 info, info_length, private_data, private_data_len);
34816ad9 2517 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
2518 if (ret) {
2519 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2520 cm_free_msg(msg);
2521 return ret;
2522 }
2523
2524 cm_id->lap_state = IB_CM_LAP_IDLE;
2525out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2526 return ret;
2527}
2528EXPORT_SYMBOL(ib_send_cm_apr);
2529
2530static int cm_apr_handler(struct cm_work *work)
2531{
2532 struct cm_id_private *cm_id_priv;
2533 struct cm_apr_msg *apr_msg;
2534 unsigned long flags;
2535 int ret;
2536
2537 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2538 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2539 apr_msg->local_comm_id);
2540 if (!cm_id_priv)
2541 return -EINVAL; /* Unmatched reply. */
2542
2543 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2544 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2545 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2546 work->cm_event.private_data = &apr_msg->private_data;
2547
2548 spin_lock_irqsave(&cm_id_priv->lock, flags);
2549 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2550 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2551 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2552 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2553 goto out;
2554 }
2555 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
34816ad9 2556 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
a977049d
HR
2557 cm_id_priv->msg = NULL;
2558
2559 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2560 if (!ret)
2561 list_add_tail(&work->list, &cm_id_priv->work_list);
2562 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2563
2564 if (ret)
2565 cm_process_work(cm_id_priv, work);
2566 else
2567 cm_deref_id(cm_id_priv);
2568 return 0;
2569out:
2570 cm_deref_id(cm_id_priv);
2571 return -EINVAL;
2572}
2573
2574static int cm_timewait_handler(struct cm_work *work)
2575{
2576 struct cm_timewait_info *timewait_info;
2577 struct cm_id_private *cm_id_priv;
2578 unsigned long flags;
2579 int ret;
2580
2581 timewait_info = (struct cm_timewait_info *)work;
2582 cm_cleanup_timewait(timewait_info);
2583
2584 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2585 timewait_info->work.remote_id);
2586 if (!cm_id_priv)
2587 return -EINVAL;
2588
2589 spin_lock_irqsave(&cm_id_priv->lock, flags);
2590 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2591 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2592 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2593 goto out;
2594 }
2595 cm_id_priv->id.state = IB_CM_IDLE;
2596 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2597 if (!ret)
2598 list_add_tail(&work->list, &cm_id_priv->work_list);
2599 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2600
2601 if (ret)
2602 cm_process_work(cm_id_priv, work);
2603 else
2604 cm_deref_id(cm_id_priv);
2605 return 0;
2606out:
2607 cm_deref_id(cm_id_priv);
2608 return -EINVAL;
2609}
2610
2611static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2612 struct cm_id_private *cm_id_priv,
2613 struct ib_cm_sidr_req_param *param)
2614{
2615 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2616 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2617 sidr_req_msg->request_id = cm_id_priv->id.local_id;
75af9088 2618 sidr_req_msg->pkey = cpu_to_be16(param->path->pkey);
a977049d
HR
2619 sidr_req_msg->service_id = param->service_id;
2620
2621 if (param->private_data && param->private_data_len)
2622 memcpy(sidr_req_msg->private_data, param->private_data,
2623 param->private_data_len);
2624}
2625
2626int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2627 struct ib_cm_sidr_req_param *param)
2628{
2629 struct cm_id_private *cm_id_priv;
2630 struct ib_mad_send_buf *msg;
a977049d
HR
2631 unsigned long flags;
2632 int ret;
2633
2634 if (!param->path || (param->private_data &&
2635 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2636 return -EINVAL;
2637
2638 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2639 ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2640 if (ret)
2641 goto out;
2642
2643 cm_id->service_id = param->service_id;
97f52eb4 2644 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
a977049d
HR
2645 cm_id_priv->timeout_ms = param->timeout_ms;
2646 cm_id_priv->max_cm_retries = param->max_cm_retries;
2647 ret = cm_alloc_msg(cm_id_priv, &msg);
2648 if (ret)
2649 goto out;
2650
2651 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2652 param);
34816ad9 2653 msg->timeout_ms = cm_id_priv->timeout_ms;
a977049d
HR
2654 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2655
2656 spin_lock_irqsave(&cm_id_priv->lock, flags);
2657 if (cm_id->state == IB_CM_IDLE)
34816ad9 2658 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
2659 else
2660 ret = -EINVAL;
2661
2662 if (ret) {
2663 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2664 cm_free_msg(msg);
2665 goto out;
2666 }
2667 cm_id->state = IB_CM_SIDR_REQ_SENT;
2668 cm_id_priv->msg = msg;
2669 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2670out:
2671 return ret;
2672}
2673EXPORT_SYMBOL(ib_send_cm_sidr_req);
2674
2675static void cm_format_sidr_req_event(struct cm_work *work,
2676 struct ib_cm_id *listen_id)
2677{
2678 struct cm_sidr_req_msg *sidr_req_msg;
2679 struct ib_cm_sidr_req_event_param *param;
2680
2681 sidr_req_msg = (struct cm_sidr_req_msg *)
2682 work->mad_recv_wc->recv_buf.mad;
2683 param = &work->cm_event.param.sidr_req_rcvd;
97f52eb4 2684 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
a977049d 2685 param->listen_id = listen_id;
a977049d
HR
2686 param->port = work->port->port_num;
2687 work->cm_event.private_data = &sidr_req_msg->private_data;
2688}
2689
2690static int cm_sidr_req_handler(struct cm_work *work)
2691{
2692 struct ib_cm_id *cm_id;
2693 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2694 struct cm_sidr_req_msg *sidr_req_msg;
2695 struct ib_wc *wc;
2696 unsigned long flags;
2697
07d357d0 2698 cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
a977049d
HR
2699 if (IS_ERR(cm_id))
2700 return PTR_ERR(cm_id);
2701 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2702
2703 /* Record SGID/SLID and request ID for lookup. */
2704 sidr_req_msg = (struct cm_sidr_req_msg *)
2705 work->mad_recv_wc->recv_buf.mad;
2706 wc = work->mad_recv_wc->wc;
97f52eb4 2707 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
a977049d
HR
2708 cm_id_priv->av.dgid.global.interface_id = 0;
2709 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2710 &cm_id_priv->av);
2711 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2712 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
2713 cm_id_priv->tid = sidr_req_msg->hdr.tid;
2714 atomic_inc(&cm_id_priv->work_count);
2715
2716 spin_lock_irqsave(&cm.lock, flags);
2717 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2718 if (cur_cm_id_priv) {
2719 spin_unlock_irqrestore(&cm.lock, flags);
2720 goto out; /* Duplicate message. */
2721 }
07d357d0 2722 cur_cm_id_priv = cm_find_listen(cm_id->device,
6e61d04f
SH
2723 sidr_req_msg->service_id,
2724 sidr_req_msg->private_data);
a977049d
HR
2725 if (!cur_cm_id_priv) {
2726 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2727 spin_unlock_irqrestore(&cm.lock, flags);
2728 /* todo: reply with no match */
2729 goto out; /* No match. */
2730 }
2731 atomic_inc(&cur_cm_id_priv->refcount);
2732 spin_unlock_irqrestore(&cm.lock, flags);
2733
2734 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2735 cm_id_priv->id.context = cur_cm_id_priv->id.context;
2736 cm_id_priv->id.service_id = sidr_req_msg->service_id;
97f52eb4 2737 cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
a977049d
HR
2738
2739 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2740 cm_process_work(cm_id_priv, work);
2741 cm_deref_id(cur_cm_id_priv);
2742 return 0;
2743out:
2744 ib_destroy_cm_id(&cm_id_priv->id);
2745 return -EINVAL;
2746}
2747
2748static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
2749 struct cm_id_private *cm_id_priv,
2750 struct ib_cm_sidr_rep_param *param)
2751{
2752 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
2753 cm_id_priv->tid);
2754 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
2755 sidr_rep_msg->status = param->status;
2756 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
2757 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
2758 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
2759
2760 if (param->info && param->info_length)
2761 memcpy(sidr_rep_msg->info, param->info, param->info_length);
2762
2763 if (param->private_data && param->private_data_len)
2764 memcpy(sidr_rep_msg->private_data, param->private_data,
2765 param->private_data_len);
2766}
2767
2768int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
2769 struct ib_cm_sidr_rep_param *param)
2770{
2771 struct cm_id_private *cm_id_priv;
2772 struct ib_mad_send_buf *msg;
a977049d
HR
2773 unsigned long flags;
2774 int ret;
2775
2776 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
2777 (param->private_data &&
2778 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
2779 return -EINVAL;
2780
2781 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2782 spin_lock_irqsave(&cm_id_priv->lock, flags);
2783 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
2784 ret = -EINVAL;
2785 goto error;
2786 }
2787
2788 ret = cm_alloc_msg(cm_id_priv, &msg);
2789 if (ret)
2790 goto error;
2791
2792 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
2793 param);
34816ad9 2794 ret = ib_post_send_mad(msg, NULL);
a977049d
HR
2795 if (ret) {
2796 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2797 cm_free_msg(msg);
2798 return ret;
2799 }
2800 cm_id->state = IB_CM_IDLE;
2801 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2802
2803 spin_lock_irqsave(&cm.lock, flags);
2804 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2805 spin_unlock_irqrestore(&cm.lock, flags);
2806 return 0;
2807
2808error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2809 return ret;
2810}
2811EXPORT_SYMBOL(ib_send_cm_sidr_rep);
2812
2813static void cm_format_sidr_rep_event(struct cm_work *work)
2814{
2815 struct cm_sidr_rep_msg *sidr_rep_msg;
2816 struct ib_cm_sidr_rep_event_param *param;
2817
2818 sidr_rep_msg = (struct cm_sidr_rep_msg *)
2819 work->mad_recv_wc->recv_buf.mad;
2820 param = &work->cm_event.param.sidr_rep_rcvd;
2821 param->status = sidr_rep_msg->status;
2822 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
2823 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
2824 param->info = &sidr_rep_msg->info;
2825 param->info_len = sidr_rep_msg->info_length;
2826 work->cm_event.private_data = &sidr_rep_msg->private_data;
2827}
2828
2829static int cm_sidr_rep_handler(struct cm_work *work)
2830{
2831 struct cm_sidr_rep_msg *sidr_rep_msg;
2832 struct cm_id_private *cm_id_priv;
2833 unsigned long flags;
2834
2835 sidr_rep_msg = (struct cm_sidr_rep_msg *)
2836 work->mad_recv_wc->recv_buf.mad;
2837 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
2838 if (!cm_id_priv)
2839 return -EINVAL; /* Unmatched reply. */
2840
2841 spin_lock_irqsave(&cm_id_priv->lock, flags);
2842 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
2843 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2844 goto out;
2845 }
2846 cm_id_priv->id.state = IB_CM_IDLE;
34816ad9 2847 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
a977049d
HR
2848 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2849
2850 cm_format_sidr_rep_event(work);
2851 cm_process_work(cm_id_priv, work);
2852 return 0;
2853out:
2854 cm_deref_id(cm_id_priv);
2855 return -EINVAL;
2856}
2857
2858static void cm_process_send_error(struct ib_mad_send_buf *msg,
2859 enum ib_wc_status wc_status)
2860{
2861 struct cm_id_private *cm_id_priv;
2862 struct ib_cm_event cm_event;
2863 enum ib_cm_state state;
2864 unsigned long flags;
2865 int ret;
2866
2867 memset(&cm_event, 0, sizeof cm_event);
2868 cm_id_priv = msg->context[0];
2869
2870 /* Discard old sends or ones without a response. */
2871 spin_lock_irqsave(&cm_id_priv->lock, flags);
2872 state = (enum ib_cm_state) (unsigned long) msg->context[1];
2873 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
2874 goto discard;
2875
2876 switch (state) {
2877 case IB_CM_REQ_SENT:
2878 case IB_CM_MRA_REQ_RCVD:
2879 cm_reset_to_idle(cm_id_priv);
2880 cm_event.event = IB_CM_REQ_ERROR;
2881 break;
2882 case IB_CM_REP_SENT:
2883 case IB_CM_MRA_REP_RCVD:
2884 cm_reset_to_idle(cm_id_priv);
2885 cm_event.event = IB_CM_REP_ERROR;
2886 break;
2887 case IB_CM_DREQ_SENT:
2888 cm_enter_timewait(cm_id_priv);
2889 cm_event.event = IB_CM_DREQ_ERROR;
2890 break;
2891 case IB_CM_SIDR_REQ_SENT:
2892 cm_id_priv->id.state = IB_CM_IDLE;
2893 cm_event.event = IB_CM_SIDR_REQ_ERROR;
2894 break;
2895 default:
2896 goto discard;
2897 }
2898 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2899 cm_event.param.send_status = wc_status;
2900
2901 /* No other events can occur on the cm_id at this point. */
2902 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
2903 cm_free_msg(msg);
2904 if (ret)
2905 ib_destroy_cm_id(&cm_id_priv->id);
2906 return;
2907discard:
2908 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2909 cm_free_msg(msg);
2910}
2911
2912static void cm_send_handler(struct ib_mad_agent *mad_agent,
2913 struct ib_mad_send_wc *mad_send_wc)
2914{
34816ad9 2915 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
a977049d
HR
2916
2917 switch (mad_send_wc->status) {
2918 case IB_WC_SUCCESS:
2919 case IB_WC_WR_FLUSH_ERR:
2920 cm_free_msg(msg);
2921 break;
2922 default:
2923 if (msg->context[0] && msg->context[1])
2924 cm_process_send_error(msg, mad_send_wc->status);
2925 else
2926 cm_free_msg(msg);
2927 break;
2928 }
2929}
2930
2931static void cm_work_handler(void *data)
2932{
2933 struct cm_work *work = data;
2934 int ret;
2935
2936 switch (work->cm_event.event) {
2937 case IB_CM_REQ_RECEIVED:
2938 ret = cm_req_handler(work);
2939 break;
2940 case IB_CM_MRA_RECEIVED:
2941 ret = cm_mra_handler(work);
2942 break;
2943 case IB_CM_REJ_RECEIVED:
2944 ret = cm_rej_handler(work);
2945 break;
2946 case IB_CM_REP_RECEIVED:
2947 ret = cm_rep_handler(work);
2948 break;
2949 case IB_CM_RTU_RECEIVED:
2950 ret = cm_rtu_handler(work);
2951 break;
2952 case IB_CM_USER_ESTABLISHED:
2953 ret = cm_establish_handler(work);
2954 break;
2955 case IB_CM_DREQ_RECEIVED:
2956 ret = cm_dreq_handler(work);
2957 break;
2958 case IB_CM_DREP_RECEIVED:
2959 ret = cm_drep_handler(work);
2960 break;
2961 case IB_CM_SIDR_REQ_RECEIVED:
2962 ret = cm_sidr_req_handler(work);
2963 break;
2964 case IB_CM_SIDR_REP_RECEIVED:
2965 ret = cm_sidr_rep_handler(work);
2966 break;
2967 case IB_CM_LAP_RECEIVED:
2968 ret = cm_lap_handler(work);
2969 break;
2970 case IB_CM_APR_RECEIVED:
2971 ret = cm_apr_handler(work);
2972 break;
2973 case IB_CM_TIMEWAIT_EXIT:
2974 ret = cm_timewait_handler(work);
2975 break;
2976 default:
2977 ret = -EINVAL;
2978 break;
2979 }
2980 if (ret)
2981 cm_free_work(work);
2982}
2983
2984int ib_cm_establish(struct ib_cm_id *cm_id)
2985{
2986 struct cm_id_private *cm_id_priv;
2987 struct cm_work *work;
2988 unsigned long flags;
2989 int ret = 0;
2990
2991 work = kmalloc(sizeof *work, GFP_ATOMIC);
2992 if (!work)
2993 return -ENOMEM;
2994
2995 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2996 spin_lock_irqsave(&cm_id_priv->lock, flags);
2997 switch (cm_id->state)
2998 {
2999 case IB_CM_REP_SENT:
3000 case IB_CM_MRA_REP_RCVD:
3001 cm_id->state = IB_CM_ESTABLISHED;
3002 break;
3003 case IB_CM_ESTABLISHED:
3004 ret = -EISCONN;
3005 break;
3006 default:
3007 ret = -EINVAL;
3008 break;
3009 }
3010 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3011
3012 if (ret) {
3013 kfree(work);
3014 goto out;
3015 }
3016
3017 /*
3018 * The CM worker thread may try to destroy the cm_id before it
3019 * can execute this work item. To prevent potential deadlock,
3020 * we need to find the cm_id once we're in the context of the
3021 * worker thread, rather than holding a reference on it.
3022 */
3023 INIT_WORK(&work->work, cm_work_handler, work);
3024 work->local_id = cm_id->local_id;
3025 work->remote_id = cm_id->remote_id;
3026 work->mad_recv_wc = NULL;
3027 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3028 queue_work(cm.wq, &work->work);
3029out:
3030 return ret;
3031}
3032EXPORT_SYMBOL(ib_cm_establish);
3033
3034static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3035 struct ib_mad_recv_wc *mad_recv_wc)
3036{
3037 struct cm_work *work;
3038 enum ib_cm_event_type event;
3039 int paths = 0;
3040
3041 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3042 case CM_REQ_ATTR_ID:
3043 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3044 alt_local_lid != 0);
3045 event = IB_CM_REQ_RECEIVED;
3046 break;
3047 case CM_MRA_ATTR_ID:
3048 event = IB_CM_MRA_RECEIVED;
3049 break;
3050 case CM_REJ_ATTR_ID:
3051 event = IB_CM_REJ_RECEIVED;
3052 break;
3053 case CM_REP_ATTR_ID:
3054 event = IB_CM_REP_RECEIVED;
3055 break;
3056 case CM_RTU_ATTR_ID:
3057 event = IB_CM_RTU_RECEIVED;
3058 break;
3059 case CM_DREQ_ATTR_ID:
3060 event = IB_CM_DREQ_RECEIVED;
3061 break;
3062 case CM_DREP_ATTR_ID:
3063 event = IB_CM_DREP_RECEIVED;
3064 break;
3065 case CM_SIDR_REQ_ATTR_ID:
3066 event = IB_CM_SIDR_REQ_RECEIVED;
3067 break;
3068 case CM_SIDR_REP_ATTR_ID:
3069 event = IB_CM_SIDR_REP_RECEIVED;
3070 break;
3071 case CM_LAP_ATTR_ID:
3072 paths = 1;
3073 event = IB_CM_LAP_RECEIVED;
3074 break;
3075 case CM_APR_ATTR_ID:
3076 event = IB_CM_APR_RECEIVED;
3077 break;
3078 default:
3079 ib_free_recv_mad(mad_recv_wc);
3080 return;
3081 }
3082
3083 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3084 GFP_KERNEL);
3085 if (!work) {
3086 ib_free_recv_mad(mad_recv_wc);
3087 return;
3088 }
3089
3090 INIT_WORK(&work->work, cm_work_handler, work);
3091 work->cm_event.event = event;
3092 work->mad_recv_wc = mad_recv_wc;
3093 work->port = (struct cm_port *)mad_agent->context;
3094 queue_work(cm.wq, &work->work);
3095}
3096
3097static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3098 struct ib_qp_attr *qp_attr,
3099 int *qp_attr_mask)
3100{
3101 unsigned long flags;
3102 int ret;
3103
3104 spin_lock_irqsave(&cm_id_priv->lock, flags);
3105 switch (cm_id_priv->id.state) {
3106 case IB_CM_REQ_SENT:
3107 case IB_CM_MRA_REQ_RCVD:
3108 case IB_CM_REQ_RCVD:
3109 case IB_CM_MRA_REQ_SENT:
3110 case IB_CM_REP_RCVD:
3111 case IB_CM_MRA_REP_SENT:
3112 case IB_CM_REP_SENT:
3113 case IB_CM_MRA_REP_RCVD:
3114 case IB_CM_ESTABLISHED:
3115 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3116 IB_QP_PKEY_INDEX | IB_QP_PORT;
ae7971a7
SH
3117 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
3118 IB_ACCESS_REMOTE_WRITE;
a977049d 3119 if (cm_id_priv->responder_resources)
ae7971a7 3120 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
a977049d
HR
3121 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3122 qp_attr->port_num = cm_id_priv->av.port->port_num;
3123 ret = 0;
3124 break;
3125 default:
3126 ret = -EINVAL;
3127 break;
3128 }
3129 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3130 return ret;
3131}
3132
3133static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3134 struct ib_qp_attr *qp_attr,
3135 int *qp_attr_mask)
3136{
3137 unsigned long flags;
3138 int ret;
3139
3140 spin_lock_irqsave(&cm_id_priv->lock, flags);
3141 switch (cm_id_priv->id.state) {
3142 case IB_CM_REQ_RCVD:
3143 case IB_CM_MRA_REQ_SENT:
3144 case IB_CM_REP_RCVD:
3145 case IB_CM_MRA_REP_SENT:
3146 case IB_CM_REP_SENT:
3147 case IB_CM_MRA_REP_RCVD:
3148 case IB_CM_ESTABLISHED:
3149 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
ae7971a7 3150 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
a977049d
HR
3151 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3152 qp_attr->path_mtu = cm_id_priv->path_mtu;
3153 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3154 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
ae7971a7
SH
3155 if (cm_id_priv->qp_type == IB_QPT_RC) {
3156 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3157 IB_QP_MIN_RNR_TIMER;
3158 qp_attr->max_dest_rd_atomic =
3159 cm_id_priv->responder_resources;
3160 qp_attr->min_rnr_timer = 0;
3161 }
a977049d
HR
3162 if (cm_id_priv->alt_av.ah_attr.dlid) {
3163 *qp_attr_mask |= IB_QP_ALT_PATH;
3164 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3165 }
3166 ret = 0;
3167 break;
3168 default:
3169 ret = -EINVAL;
3170 break;
3171 }
3172 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3173 return ret;
3174}
3175
3176static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3177 struct ib_qp_attr *qp_attr,
3178 int *qp_attr_mask)
3179{
3180 unsigned long flags;
3181 int ret;
3182
3183 spin_lock_irqsave(&cm_id_priv->lock, flags);
3184 switch (cm_id_priv->id.state) {
3185 case IB_CM_REP_RCVD:
3186 case IB_CM_MRA_REP_SENT:
3187 case IB_CM_REP_SENT:
3188 case IB_CM_MRA_REP_RCVD:
3189 case IB_CM_ESTABLISHED:
ae7971a7 3190 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
a977049d 3191 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
ae7971a7
SH
3192 if (cm_id_priv->qp_type == IB_QPT_RC) {
3193 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3194 IB_QP_RNR_RETRY |
3195 IB_QP_MAX_QP_RD_ATOMIC;
3196 qp_attr->timeout = cm_id_priv->local_ack_timeout;
3197 qp_attr->retry_cnt = cm_id_priv->retry_count;
3198 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3199 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3200 }
a977049d
HR
3201 if (cm_id_priv->alt_av.ah_attr.dlid) {
3202 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3203 qp_attr->path_mig_state = IB_MIG_REARM;
3204 }
3205 ret = 0;
3206 break;
3207 default:
3208 ret = -EINVAL;
3209 break;
3210 }
3211 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3212 return ret;
3213}
3214
3215int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3216 struct ib_qp_attr *qp_attr,
3217 int *qp_attr_mask)
3218{
3219 struct cm_id_private *cm_id_priv;
3220 int ret;
3221
3222 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3223 switch (qp_attr->qp_state) {
3224 case IB_QPS_INIT:
3225 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3226 break;
3227 case IB_QPS_RTR:
3228 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3229 break;
3230 case IB_QPS_RTS:
3231 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3232 break;
3233 default:
3234 ret = -EINVAL;
3235 break;
3236 }
3237 return ret;
3238}
3239EXPORT_SYMBOL(ib_cm_init_qp_attr);
3240
a977049d
HR
3241static void cm_add_one(struct ib_device *device)
3242{
3243 struct cm_device *cm_dev;
3244 struct cm_port *port;
3245 struct ib_mad_reg_req reg_req = {
3246 .mgmt_class = IB_MGMT_CLASS_CM,
3247 .mgmt_class_version = IB_CM_CLASS_VERSION
3248 };
3249 struct ib_port_modify port_modify = {
3250 .set_port_cap_mask = IB_PORT_CM_SUP
3251 };
3252 unsigned long flags;
3253 int ret;
3254 u8 i;
3255
3256 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
3257 device->phys_port_cnt, GFP_KERNEL);
3258 if (!cm_dev)
3259 return;
3260
3261 cm_dev->device = device;
cf311cd4 3262 cm_dev->ca_guid = device->node_guid;
a977049d
HR
3263
3264 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3265 for (i = 1; i <= device->phys_port_cnt; i++) {
3266 port = &cm_dev->port[i-1];
3267 port->cm_dev = cm_dev;
3268 port->port_num = i;
3269 port->mad_agent = ib_register_mad_agent(device, i,
3270 IB_QPT_GSI,
3271 &reg_req,
3272 0,
3273 cm_send_handler,
3274 cm_recv_handler,
3275 port);
3276 if (IS_ERR(port->mad_agent))
cf311cd4 3277 goto error1;
a977049d
HR
3278
3279 ret = ib_modify_port(device, i, 0, &port_modify);
3280 if (ret)
cf311cd4 3281 goto error2;
a977049d
HR
3282 }
3283 ib_set_client_data(device, &cm_client, cm_dev);
3284
3285 write_lock_irqsave(&cm.device_lock, flags);
3286 list_add_tail(&cm_dev->list, &cm.device_list);
3287 write_unlock_irqrestore(&cm.device_lock, flags);
3288 return;
3289
a977049d 3290error2:
cf311cd4
SH
3291 ib_unregister_mad_agent(port->mad_agent);
3292error1:
a977049d
HR
3293 port_modify.set_port_cap_mask = 0;
3294 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3295 while (--i) {
3296 port = &cm_dev->port[i-1];
3297 ib_modify_port(device, port->port_num, 0, &port_modify);
3298 ib_unregister_mad_agent(port->mad_agent);
3299 }
a977049d
HR
3300 kfree(cm_dev);
3301}
3302
3303static void cm_remove_one(struct ib_device *device)
3304{
3305 struct cm_device *cm_dev;
3306 struct cm_port *port;
3307 struct ib_port_modify port_modify = {
3308 .clr_port_cap_mask = IB_PORT_CM_SUP
3309 };
3310 unsigned long flags;
3311 int i;
3312
3313 cm_dev = ib_get_client_data(device, &cm_client);
3314 if (!cm_dev)
3315 return;
3316
3317 write_lock_irqsave(&cm.device_lock, flags);
3318 list_del(&cm_dev->list);
3319 write_unlock_irqrestore(&cm.device_lock, flags);
3320
3321 for (i = 1; i <= device->phys_port_cnt; i++) {
3322 port = &cm_dev->port[i-1];
3323 ib_modify_port(device, port->port_num, 0, &port_modify);
3324 ib_unregister_mad_agent(port->mad_agent);
3325 }
3326 kfree(cm_dev);
3327}
3328
3329static int __init ib_cm_init(void)
3330{
3331 int ret;
3332
3333 memset(&cm, 0, sizeof cm);
3334 INIT_LIST_HEAD(&cm.device_list);
3335 rwlock_init(&cm.device_lock);
3336 spin_lock_init(&cm.lock);
3337 cm.listen_service_table = RB_ROOT;
3338 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3339 cm.remote_id_table = RB_ROOT;
3340 cm.remote_qp_table = RB_ROOT;
3341 cm.remote_sidr_table = RB_ROOT;
3342 idr_init(&cm.local_id_table);
3343 idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3344
3345 cm.wq = create_workqueue("ib_cm");
3346 if (!cm.wq)
3347 return -ENOMEM;
3348
3349 ret = ib_register_client(&cm_client);
3350 if (ret)
3351 goto error;
3352
3353 return 0;
3354error:
3355 destroy_workqueue(cm.wq);
3356 return ret;
3357}
3358
3359static void __exit ib_cm_cleanup(void)
3360{
a977049d
HR
3361 destroy_workqueue(cm.wq);
3362 ib_unregister_client(&cm_client);
5d7edb3c 3363 idr_destroy(&cm.local_id_table);
a977049d
HR
3364}
3365
3366module_init(ib_cm_init);
3367module_exit(ib_cm_cleanup);
3368
This page took 0.543246 seconds and 5 git commands to generate.