2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mad.c 1389 2004-12-27 22:56:47Z roland $
35 #include <linux/dma-mapping.h>
36 #include <linux/interrupt.h>
44 MODULE_LICENSE("Dual BSD/GPL");
45 MODULE_DESCRIPTION("kernel IB MAD API");
46 MODULE_AUTHOR("Hal Rosenstock");
47 MODULE_AUTHOR("Sean Hefty");
50 kmem_cache_t
*ib_mad_cache
;
51 static struct list_head ib_mad_port_list
;
52 static u32 ib_mad_client_id
= 0;
55 static spinlock_t ib_mad_port_list_lock
;
58 /* Forward declarations */
59 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
60 struct ib_mad_reg_req
*mad_reg_req
);
61 static void remove_mad_reg_req(struct ib_mad_agent_private
*priv
);
62 static struct ib_mad_agent_private
*find_mad_agent(
63 struct ib_mad_port_private
*port_priv
,
64 struct ib_mad
*mad
, int solicited
);
65 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
66 struct ib_mad_private
*mad
);
67 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
);
68 static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
69 struct ib_mad_send_wc
*mad_send_wc
);
70 static void timeout_sends(void *data
);
71 static void cancel_sends(void *data
);
72 static void local_completions(void *data
);
73 static int solicited_mad(struct ib_mad
*mad
);
74 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
75 struct ib_mad_agent_private
*agent_priv
,
77 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
78 struct ib_mad_agent_private
*agent_priv
);
81 * Returns a ib_mad_port_private structure or NULL for a device/port
82 * Assumes ib_mad_port_list_lock is being held
84 static inline struct ib_mad_port_private
*
85 __ib_get_mad_port(struct ib_device
*device
, int port_num
)
87 struct ib_mad_port_private
*entry
;
89 list_for_each_entry(entry
, &ib_mad_port_list
, port_list
) {
90 if (entry
->device
== device
&& entry
->port_num
== port_num
)
97 * Wrapper function to return a ib_mad_port_private structure or NULL
100 static inline struct ib_mad_port_private
*
101 ib_get_mad_port(struct ib_device
*device
, int port_num
)
103 struct ib_mad_port_private
*entry
;
106 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
107 entry
= __ib_get_mad_port(device
, port_num
);
108 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
113 static inline u8
convert_mgmt_class(u8 mgmt_class
)
115 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
116 return mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
?
120 static int get_spl_qp_index(enum ib_qp_type qp_type
)
133 static int vendor_class_index(u8 mgmt_class
)
135 return mgmt_class
- IB_MGMT_CLASS_VENDOR_RANGE2_START
;
138 static int is_vendor_class(u8 mgmt_class
)
140 if ((mgmt_class
< IB_MGMT_CLASS_VENDOR_RANGE2_START
) ||
141 (mgmt_class
> IB_MGMT_CLASS_VENDOR_RANGE2_END
))
146 static int is_vendor_oui(char *oui
)
148 if (oui
[0] || oui
[1] || oui
[2])
153 static int is_vendor_method_in_use(
154 struct ib_mad_mgmt_vendor_class
*vendor_class
,
155 struct ib_mad_reg_req
*mad_reg_req
)
157 struct ib_mad_mgmt_method_table
*method
;
160 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
161 if (!memcmp(vendor_class
->oui
[i
], mad_reg_req
->oui
, 3)) {
162 method
= vendor_class
->method_table
[i
];
164 if (method_in_use(&method
, mad_reg_req
))
175 * ib_register_mad_agent - Register to send/receive MADs
177 struct ib_mad_agent
*ib_register_mad_agent(struct ib_device
*device
,
179 enum ib_qp_type qp_type
,
180 struct ib_mad_reg_req
*mad_reg_req
,
182 ib_mad_send_handler send_handler
,
183 ib_mad_recv_handler recv_handler
,
186 struct ib_mad_port_private
*port_priv
;
187 struct ib_mad_agent
*ret
= ERR_PTR(-EINVAL
);
188 struct ib_mad_agent_private
*mad_agent_priv
;
189 struct ib_mad_reg_req
*reg_req
= NULL
;
190 struct ib_mad_mgmt_class_table
*class;
191 struct ib_mad_mgmt_vendor_class_table
*vendor
;
192 struct ib_mad_mgmt_vendor_class
*vendor_class
;
193 struct ib_mad_mgmt_method_table
*method
;
196 u8 mgmt_class
, vclass
;
198 /* Validate parameters */
199 qpn
= get_spl_qp_index(qp_type
);
204 goto error1
; /* XXX: until RMPP implemented */
206 /* Validate MAD registration request if supplied */
208 if (mad_reg_req
->mgmt_class_version
>= MAX_MGMT_VERSION
)
212 if (mad_reg_req
->mgmt_class
>= MAX_MGMT_CLASS
) {
214 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
215 * one in this range currently allowed
217 if (mad_reg_req
->mgmt_class
!=
218 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)
220 } else if (mad_reg_req
->mgmt_class
== 0) {
222 * Class 0 is reserved in IBA and is used for
223 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
226 } else if (is_vendor_class(mad_reg_req
->mgmt_class
)) {
228 * If class is in "new" vendor range,
229 * ensure supplied OUI is not zero
231 if (!is_vendor_oui(mad_reg_req
->oui
))
234 /* Make sure class supplied is consistent with QP type */
235 if (qp_type
== IB_QPT_SMI
) {
236 if ((mad_reg_req
->mgmt_class
!=
237 IB_MGMT_CLASS_SUBN_LID_ROUTED
) &&
238 (mad_reg_req
->mgmt_class
!=
239 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
242 if ((mad_reg_req
->mgmt_class
==
243 IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
244 (mad_reg_req
->mgmt_class
==
245 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
))
249 /* No registration request supplied */
254 /* Validate device and port */
255 port_priv
= ib_get_mad_port(device
, port_num
);
257 ret
= ERR_PTR(-ENODEV
);
261 /* Allocate structures */
262 mad_agent_priv
= kmalloc(sizeof *mad_agent_priv
, GFP_KERNEL
);
263 if (!mad_agent_priv
) {
264 ret
= ERR_PTR(-ENOMEM
);
269 reg_req
= kmalloc(sizeof *reg_req
, GFP_KERNEL
);
271 ret
= ERR_PTR(-ENOMEM
);
274 /* Make a copy of the MAD registration request */
275 memcpy(reg_req
, mad_reg_req
, sizeof *reg_req
);
278 /* Now, fill in the various structures */
279 memset(mad_agent_priv
, 0, sizeof *mad_agent_priv
);
280 mad_agent_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
281 mad_agent_priv
->reg_req
= reg_req
;
282 mad_agent_priv
->rmpp_version
= rmpp_version
;
283 mad_agent_priv
->agent
.device
= device
;
284 mad_agent_priv
->agent
.recv_handler
= recv_handler
;
285 mad_agent_priv
->agent
.send_handler
= send_handler
;
286 mad_agent_priv
->agent
.context
= context
;
287 mad_agent_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
288 mad_agent_priv
->agent
.port_num
= port_num
;
290 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
291 mad_agent_priv
->agent
.hi_tid
= ++ib_mad_client_id
;
294 * Make sure MAD registration (if supplied)
295 * is non overlapping with any existing ones
298 mgmt_class
= convert_mgmt_class(mad_reg_req
->mgmt_class
);
299 if (!is_vendor_class(mgmt_class
)) {
300 class = port_priv
->version
[mad_reg_req
->
301 mgmt_class_version
].class;
303 method
= class->method_table
[mgmt_class
];
305 if (method_in_use(&method
,
310 ret2
= add_nonoui_reg_req(mad_reg_req
, mad_agent_priv
,
313 /* "New" vendor class range */
314 vendor
= port_priv
->version
[mad_reg_req
->
315 mgmt_class_version
].vendor
;
317 vclass
= vendor_class_index(mgmt_class
);
318 vendor_class
= vendor
->vendor_class
[vclass
];
320 if (is_vendor_method_in_use(
326 ret2
= add_oui_reg_req(mad_reg_req
, mad_agent_priv
);
334 /* Add mad agent into port's agent list */
335 list_add_tail(&mad_agent_priv
->agent_list
, &port_priv
->agent_list
);
336 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
338 spin_lock_init(&mad_agent_priv
->lock
);
339 INIT_LIST_HEAD(&mad_agent_priv
->send_list
);
340 INIT_LIST_HEAD(&mad_agent_priv
->wait_list
);
341 INIT_WORK(&mad_agent_priv
->timed_work
, timeout_sends
, mad_agent_priv
);
342 INIT_LIST_HEAD(&mad_agent_priv
->local_list
);
343 INIT_WORK(&mad_agent_priv
->local_work
, local_completions
,
345 INIT_LIST_HEAD(&mad_agent_priv
->canceled_list
);
346 INIT_WORK(&mad_agent_priv
->canceled_work
, cancel_sends
, mad_agent_priv
);
347 atomic_set(&mad_agent_priv
->refcount
, 1);
348 init_waitqueue_head(&mad_agent_priv
->wait
);
350 return &mad_agent_priv
->agent
;
353 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
356 kfree(mad_agent_priv
);
360 EXPORT_SYMBOL(ib_register_mad_agent
);
362 static inline int is_snooping_sends(int mad_snoop_flags
)
364 return (mad_snoop_flags
&
365 (/*IB_MAD_SNOOP_POSTED_SENDS |
366 IB_MAD_SNOOP_RMPP_SENDS |*/
367 IB_MAD_SNOOP_SEND_COMPLETIONS
/*|
368 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
371 static inline int is_snooping_recvs(int mad_snoop_flags
)
373 return (mad_snoop_flags
&
374 (IB_MAD_SNOOP_RECVS
/*|
375 IB_MAD_SNOOP_RMPP_RECVS*/));
378 static int register_snoop_agent(struct ib_mad_qp_info
*qp_info
,
379 struct ib_mad_snoop_private
*mad_snoop_priv
)
381 struct ib_mad_snoop_private
**new_snoop_table
;
385 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
386 /* Check for empty slot in array. */
387 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++)
388 if (!qp_info
->snoop_table
[i
])
391 if (i
== qp_info
->snoop_table_size
) {
393 new_snoop_table
= kmalloc(sizeof mad_snoop_priv
*
394 qp_info
->snoop_table_size
+ 1,
396 if (!new_snoop_table
) {
400 if (qp_info
->snoop_table
) {
401 memcpy(new_snoop_table
, qp_info
->snoop_table
,
402 sizeof mad_snoop_priv
*
403 qp_info
->snoop_table_size
);
404 kfree(qp_info
->snoop_table
);
406 qp_info
->snoop_table
= new_snoop_table
;
407 qp_info
->snoop_table_size
++;
409 qp_info
->snoop_table
[i
] = mad_snoop_priv
;
410 atomic_inc(&qp_info
->snoop_count
);
412 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
416 struct ib_mad_agent
*ib_register_mad_snoop(struct ib_device
*device
,
418 enum ib_qp_type qp_type
,
420 ib_mad_snoop_handler snoop_handler
,
421 ib_mad_recv_handler recv_handler
,
424 struct ib_mad_port_private
*port_priv
;
425 struct ib_mad_agent
*ret
;
426 struct ib_mad_snoop_private
*mad_snoop_priv
;
429 /* Validate parameters */
430 if ((is_snooping_sends(mad_snoop_flags
) && !snoop_handler
) ||
431 (is_snooping_recvs(mad_snoop_flags
) && !recv_handler
)) {
432 ret
= ERR_PTR(-EINVAL
);
435 qpn
= get_spl_qp_index(qp_type
);
437 ret
= ERR_PTR(-EINVAL
);
440 port_priv
= ib_get_mad_port(device
, port_num
);
442 ret
= ERR_PTR(-ENODEV
);
445 /* Allocate structures */
446 mad_snoop_priv
= kmalloc(sizeof *mad_snoop_priv
, GFP_KERNEL
);
447 if (!mad_snoop_priv
) {
448 ret
= ERR_PTR(-ENOMEM
);
452 /* Now, fill in the various structures */
453 memset(mad_snoop_priv
, 0, sizeof *mad_snoop_priv
);
454 mad_snoop_priv
->qp_info
= &port_priv
->qp_info
[qpn
];
455 mad_snoop_priv
->agent
.device
= device
;
456 mad_snoop_priv
->agent
.recv_handler
= recv_handler
;
457 mad_snoop_priv
->agent
.snoop_handler
= snoop_handler
;
458 mad_snoop_priv
->agent
.context
= context
;
459 mad_snoop_priv
->agent
.qp
= port_priv
->qp_info
[qpn
].qp
;
460 mad_snoop_priv
->agent
.port_num
= port_num
;
461 mad_snoop_priv
->mad_snoop_flags
= mad_snoop_flags
;
462 init_waitqueue_head(&mad_snoop_priv
->wait
);
463 mad_snoop_priv
->snoop_index
= register_snoop_agent(
464 &port_priv
->qp_info
[qpn
],
466 if (mad_snoop_priv
->snoop_index
< 0) {
467 ret
= ERR_PTR(mad_snoop_priv
->snoop_index
);
471 atomic_set(&mad_snoop_priv
->refcount
, 1);
472 return &mad_snoop_priv
->agent
;
475 kfree(mad_snoop_priv
);
479 EXPORT_SYMBOL(ib_register_mad_snoop
);
481 static void unregister_mad_agent(struct ib_mad_agent_private
*mad_agent_priv
)
483 struct ib_mad_port_private
*port_priv
;
486 /* Note that we could still be handling received MADs */
489 * Canceling all sends results in dropping received response
490 * MADs, preventing us from queuing additional work
492 cancel_mads(mad_agent_priv
);
494 port_priv
= mad_agent_priv
->qp_info
->port_priv
;
496 cancel_delayed_work(&mad_agent_priv
->timed_work
);
497 flush_workqueue(port_priv
->wq
);
499 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
500 remove_mad_reg_req(mad_agent_priv
);
501 list_del(&mad_agent_priv
->agent_list
);
502 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
504 /* XXX: Cleanup pending RMPP receives for this agent */
506 atomic_dec(&mad_agent_priv
->refcount
);
507 wait_event(mad_agent_priv
->wait
,
508 !atomic_read(&mad_agent_priv
->refcount
));
510 if (mad_agent_priv
->reg_req
)
511 kfree(mad_agent_priv
->reg_req
);
512 kfree(mad_agent_priv
);
515 static void unregister_mad_snoop(struct ib_mad_snoop_private
*mad_snoop_priv
)
517 struct ib_mad_qp_info
*qp_info
;
520 qp_info
= mad_snoop_priv
->qp_info
;
521 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
522 qp_info
->snoop_table
[mad_snoop_priv
->snoop_index
] = NULL
;
523 atomic_dec(&qp_info
->snoop_count
);
524 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
526 atomic_dec(&mad_snoop_priv
->refcount
);
527 wait_event(mad_snoop_priv
->wait
,
528 !atomic_read(&mad_snoop_priv
->refcount
));
530 kfree(mad_snoop_priv
);
534 * ib_unregister_mad_agent - Unregisters a client from using MAD services
536 int ib_unregister_mad_agent(struct ib_mad_agent
*mad_agent
)
538 struct ib_mad_agent_private
*mad_agent_priv
;
539 struct ib_mad_snoop_private
*mad_snoop_priv
;
541 /* If the TID is zero, the agent can only snoop. */
542 if (mad_agent
->hi_tid
) {
543 mad_agent_priv
= container_of(mad_agent
,
544 struct ib_mad_agent_private
,
546 unregister_mad_agent(mad_agent_priv
);
548 mad_snoop_priv
= container_of(mad_agent
,
549 struct ib_mad_snoop_private
,
551 unregister_mad_snoop(mad_snoop_priv
);
555 EXPORT_SYMBOL(ib_unregister_mad_agent
);
557 static void dequeue_mad(struct ib_mad_list_head
*mad_list
)
559 struct ib_mad_queue
*mad_queue
;
562 BUG_ON(!mad_list
->mad_queue
);
563 mad_queue
= mad_list
->mad_queue
;
564 spin_lock_irqsave(&mad_queue
->lock
, flags
);
565 list_del(&mad_list
->list
);
567 spin_unlock_irqrestore(&mad_queue
->lock
, flags
);
570 static void snoop_send(struct ib_mad_qp_info
*qp_info
,
571 struct ib_send_wr
*send_wr
,
572 struct ib_mad_send_wc
*mad_send_wc
,
575 struct ib_mad_snoop_private
*mad_snoop_priv
;
579 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
580 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
581 mad_snoop_priv
= qp_info
->snoop_table
[i
];
582 if (!mad_snoop_priv
||
583 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
586 atomic_inc(&mad_snoop_priv
->refcount
);
587 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
588 mad_snoop_priv
->agent
.snoop_handler(&mad_snoop_priv
->agent
,
589 send_wr
, mad_send_wc
);
590 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
591 wake_up(&mad_snoop_priv
->wait
);
592 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
594 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
597 static void snoop_recv(struct ib_mad_qp_info
*qp_info
,
598 struct ib_mad_recv_wc
*mad_recv_wc
,
601 struct ib_mad_snoop_private
*mad_snoop_priv
;
605 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
606 for (i
= 0; i
< qp_info
->snoop_table_size
; i
++) {
607 mad_snoop_priv
= qp_info
->snoop_table
[i
];
608 if (!mad_snoop_priv
||
609 !(mad_snoop_priv
->mad_snoop_flags
& mad_snoop_flags
))
612 atomic_inc(&mad_snoop_priv
->refcount
);
613 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
614 mad_snoop_priv
->agent
.recv_handler(&mad_snoop_priv
->agent
,
616 if (atomic_dec_and_test(&mad_snoop_priv
->refcount
))
617 wake_up(&mad_snoop_priv
->wait
);
618 spin_lock_irqsave(&qp_info
->snoop_lock
, flags
);
620 spin_unlock_irqrestore(&qp_info
->snoop_lock
, flags
);
623 static void build_smp_wc(u64 wr_id
, u16 slid
, u16 pkey_index
, u8 port_num
,
626 memset(wc
, 0, sizeof *wc
);
628 wc
->status
= IB_WC_SUCCESS
;
629 wc
->opcode
= IB_WC_RECV
;
630 wc
->pkey_index
= pkey_index
;
631 wc
->byte_len
= sizeof(struct ib_mad
) + sizeof(struct ib_grh
);
636 wc
->dlid_path_bits
= 0;
637 wc
->port_num
= port_num
;
641 * Return 0 if SMP is to be sent
642 * Return 1 if SMP was consumed locally (whether or not solicited)
643 * Return < 0 if error
645 static int handle_outgoing_dr_smp(struct ib_mad_agent_private
*mad_agent_priv
,
647 struct ib_send_wr
*send_wr
)
651 struct ib_mad_local_private
*local
;
652 struct ib_mad_private
*mad_priv
;
653 struct ib_mad_port_private
*port_priv
;
654 struct ib_mad_agent_private
*recv_mad_agent
= NULL
;
655 struct ib_device
*device
= mad_agent_priv
->agent
.device
;
656 u8 port_num
= mad_agent_priv
->agent
.port_num
;
659 if (!smi_handle_dr_smp_send(smp
, device
->node_type
, port_num
)) {
661 printk(KERN_ERR PFX
"Invalid directed route\n");
664 /* Check to post send on QP or process locally */
665 ret
= smi_check_local_dr_smp(smp
, device
, port_num
);
666 if (!ret
|| !device
->process_mad
)
669 local
= kmalloc(sizeof *local
, GFP_ATOMIC
);
672 printk(KERN_ERR PFX
"No memory for ib_mad_local_private\n");
675 local
->mad_priv
= NULL
;
676 local
->recv_mad_agent
= NULL
;
677 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_ATOMIC
);
680 printk(KERN_ERR PFX
"No memory for local response MAD\n");
685 build_smp_wc(send_wr
->wr_id
, smp
->dr_slid
, send_wr
->wr
.ud
.pkey_index
,
686 send_wr
->wr
.ud
.port_num
, &mad_wc
);
688 /* No GRH for DR SMP */
689 ret
= device
->process_mad(device
, 0, port_num
, &mad_wc
, NULL
,
690 (struct ib_mad
*)smp
,
691 (struct ib_mad
*)&mad_priv
->mad
);
694 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_REPLY
:
696 * See if response is solicited and
697 * there is a recv handler
699 if (solicited_mad(&mad_priv
->mad
.mad
) &&
700 mad_agent_priv
->agent
.recv_handler
) {
701 local
->mad_priv
= mad_priv
;
702 local
->recv_mad_agent
= mad_agent_priv
;
704 * Reference MAD agent until receive
705 * side of local completion handled
707 atomic_inc(&mad_agent_priv
->refcount
);
709 kmem_cache_free(ib_mad_cache
, mad_priv
);
711 case IB_MAD_RESULT_SUCCESS
| IB_MAD_RESULT_CONSUMED
:
712 kmem_cache_free(ib_mad_cache
, mad_priv
);
714 case IB_MAD_RESULT_SUCCESS
:
715 /* Treat like an incoming receive MAD */
716 solicited
= solicited_mad(&mad_priv
->mad
.mad
);
717 port_priv
= ib_get_mad_port(mad_agent_priv
->agent
.device
,
718 mad_agent_priv
->agent
.port_num
);
720 mad_priv
->mad
.mad
.mad_hdr
.tid
=
721 ((struct ib_mad
*)smp
)->mad_hdr
.tid
;
722 recv_mad_agent
= find_mad_agent(port_priv
,
726 if (!port_priv
|| !recv_mad_agent
) {
727 kmem_cache_free(ib_mad_cache
, mad_priv
);
732 local
->mad_priv
= mad_priv
;
733 local
->recv_mad_agent
= recv_mad_agent
;
736 kmem_cache_free(ib_mad_cache
, mad_priv
);
742 local
->send_wr
= *send_wr
;
743 local
->send_wr
.sg_list
= local
->sg_list
;
744 memcpy(local
->sg_list
, send_wr
->sg_list
,
745 sizeof *send_wr
->sg_list
* send_wr
->num_sge
);
746 local
->send_wr
.next
= NULL
;
747 local
->tid
= send_wr
->wr
.ud
.mad_hdr
->tid
;
748 local
->wr_id
= send_wr
->wr_id
;
749 /* Reference MAD agent until send side of local completion handled */
750 atomic_inc(&mad_agent_priv
->refcount
);
751 /* Queue local completion to local list */
752 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
753 list_add_tail(&local
->completion_list
, &mad_agent_priv
->local_list
);
754 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
755 queue_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
756 &mad_agent_priv
->local_work
);
762 static int ib_send_mad(struct ib_mad_agent_private
*mad_agent_priv
,
763 struct ib_mad_send_wr_private
*mad_send_wr
)
765 struct ib_mad_qp_info
*qp_info
;
766 struct ib_send_wr
*bad_send_wr
;
770 /* Replace user's WR ID with our own to find WR upon completion */
771 qp_info
= mad_agent_priv
->qp_info
;
772 mad_send_wr
->wr_id
= mad_send_wr
->send_wr
.wr_id
;
773 mad_send_wr
->send_wr
.wr_id
= (unsigned long)&mad_send_wr
->mad_list
;
774 mad_send_wr
->mad_list
.mad_queue
= &qp_info
->send_queue
;
776 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
777 if (qp_info
->send_queue
.count
++ < qp_info
->send_queue
.max_active
) {
778 list_add_tail(&mad_send_wr
->mad_list
.list
,
779 &qp_info
->send_queue
.list
);
780 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
781 ret
= ib_post_send(mad_agent_priv
->agent
.qp
,
782 &mad_send_wr
->send_wr
, &bad_send_wr
);
784 printk(KERN_ERR PFX
"ib_post_send failed: %d\n", ret
);
785 dequeue_mad(&mad_send_wr
->mad_list
);
788 list_add_tail(&mad_send_wr
->mad_list
.list
,
789 &qp_info
->overflow_list
);
790 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
797 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
798 * with the registered client
800 int ib_post_send_mad(struct ib_mad_agent
*mad_agent
,
801 struct ib_send_wr
*send_wr
,
802 struct ib_send_wr
**bad_send_wr
)
805 struct ib_mad_agent_private
*mad_agent_priv
;
807 /* Validate supplied parameters */
811 if (!mad_agent
|| !send_wr
)
814 if (!mad_agent
->send_handler
)
817 mad_agent_priv
= container_of(mad_agent
,
818 struct ib_mad_agent_private
,
821 /* Walk list of send WRs and post each on send list */
824 struct ib_send_wr
*next_send_wr
;
825 struct ib_mad_send_wr_private
*mad_send_wr
;
828 /* Validate more parameters */
829 if (send_wr
->num_sge
> IB_MAD_SEND_REQ_MAX_SG
)
832 if (send_wr
->wr
.ud
.timeout_ms
&& !mad_agent
->recv_handler
)
835 if (!send_wr
->wr
.ud
.mad_hdr
) {
836 printk(KERN_ERR PFX
"MAD header must be supplied "
837 "in WR %p\n", send_wr
);
842 * Save pointer to next work request to post in case the
843 * current one completes, and the user modifies the work
844 * request associated with the completion
846 next_send_wr
= (struct ib_send_wr
*)send_wr
->next
;
848 smp
= (struct ib_smp
*)send_wr
->wr
.ud
.mad_hdr
;
849 if (smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
850 ret
= handle_outgoing_dr_smp(mad_agent_priv
, smp
,
852 if (ret
< 0) /* error */
854 else if (ret
== 1) /* locally consumed */
858 /* Allocate MAD send WR tracking structure */
859 mad_send_wr
= kmalloc(sizeof *mad_send_wr
, GFP_ATOMIC
);
861 printk(KERN_ERR PFX
"No memory for "
862 "ib_mad_send_wr_private\n");
867 mad_send_wr
->send_wr
= *send_wr
;
868 mad_send_wr
->send_wr
.sg_list
= mad_send_wr
->sg_list
;
869 memcpy(mad_send_wr
->sg_list
, send_wr
->sg_list
,
870 sizeof *send_wr
->sg_list
* send_wr
->num_sge
);
871 mad_send_wr
->send_wr
.next
= NULL
;
872 mad_send_wr
->tid
= send_wr
->wr
.ud
.mad_hdr
->tid
;
873 mad_send_wr
->agent
= mad_agent
;
874 /* Timeout will be updated after send completes */
875 mad_send_wr
->timeout
= msecs_to_jiffies(send_wr
->wr
.
877 mad_send_wr
->retry
= 0;
878 /* One reference for each work request to QP + response */
879 mad_send_wr
->refcount
= 1 + (mad_send_wr
->timeout
> 0);
880 mad_send_wr
->status
= IB_WC_SUCCESS
;
882 /* Reference MAD agent until send completes */
883 atomic_inc(&mad_agent_priv
->refcount
);
884 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
885 list_add_tail(&mad_send_wr
->agent_list
,
886 &mad_agent_priv
->send_list
);
887 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
889 ret
= ib_send_mad(mad_agent_priv
, mad_send_wr
);
891 /* Fail send request */
892 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
893 list_del(&mad_send_wr
->agent_list
);
894 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
895 atomic_dec(&mad_agent_priv
->refcount
);
899 send_wr
= next_send_wr
;
904 *bad_send_wr
= send_wr
;
908 EXPORT_SYMBOL(ib_post_send_mad
);
911 * ib_free_recv_mad - Returns data buffers used to receive
912 * a MAD to the access layer
914 void ib_free_recv_mad(struct ib_mad_recv_wc
*mad_recv_wc
)
916 struct ib_mad_recv_buf
*entry
;
917 struct ib_mad_private_header
*mad_priv_hdr
;
918 struct ib_mad_private
*priv
;
920 mad_priv_hdr
= container_of(mad_recv_wc
,
921 struct ib_mad_private_header
,
923 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
926 * Walk receive buffer list associated with this WC
927 * No need to remove them from list of receive buffers
929 list_for_each_entry(entry
, &mad_recv_wc
->recv_buf
.list
, list
) {
930 /* Free previous receive buffer */
931 kmem_cache_free(ib_mad_cache
, priv
);
932 mad_priv_hdr
= container_of(mad_recv_wc
,
933 struct ib_mad_private_header
,
935 priv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
939 /* Free last buffer */
940 kmem_cache_free(ib_mad_cache
, priv
);
942 EXPORT_SYMBOL(ib_free_recv_mad
);
944 void ib_coalesce_recv_mad(struct ib_mad_recv_wc
*mad_recv_wc
,
947 printk(KERN_ERR PFX
"ib_coalesce_recv_mad() not implemented yet\n");
949 EXPORT_SYMBOL(ib_coalesce_recv_mad
);
951 struct ib_mad_agent
*ib_redirect_mad_qp(struct ib_qp
*qp
,
953 ib_mad_send_handler send_handler
,
954 ib_mad_recv_handler recv_handler
,
957 return ERR_PTR(-EINVAL
); /* XXX: for now */
959 EXPORT_SYMBOL(ib_redirect_mad_qp
);
961 int ib_process_mad_wc(struct ib_mad_agent
*mad_agent
,
964 printk(KERN_ERR PFX
"ib_process_mad_wc() not implemented yet\n");
967 EXPORT_SYMBOL(ib_process_mad_wc
);
969 static int method_in_use(struct ib_mad_mgmt_method_table
**method
,
970 struct ib_mad_reg_req
*mad_reg_req
)
974 for (i
= find_first_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
);
975 i
< IB_MGMT_MAX_METHODS
;
976 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
978 if ((*method
)->agent
[i
]) {
979 printk(KERN_ERR PFX
"Method %d already in use\n", i
);
986 static int allocate_method_table(struct ib_mad_mgmt_method_table
**method
)
988 /* Allocate management method table */
989 *method
= kmalloc(sizeof **method
, GFP_ATOMIC
);
991 printk(KERN_ERR PFX
"No memory for "
992 "ib_mad_mgmt_method_table\n");
995 /* Clear management method table */
996 memset(*method
, 0, sizeof **method
);
1002 * Check to see if there are any methods still in use
1004 static int check_method_table(struct ib_mad_mgmt_method_table
*method
)
1008 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++)
1009 if (method
->agent
[i
])
1015 * Check to see if there are any method tables for this class still in use
1017 static int check_class_table(struct ib_mad_mgmt_class_table
*class)
1021 for (i
= 0; i
< MAX_MGMT_CLASS
; i
++)
1022 if (class->method_table
[i
])
1027 static int check_vendor_class(struct ib_mad_mgmt_vendor_class
*vendor_class
)
1031 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1032 if (vendor_class
->method_table
[i
])
1037 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class
*vendor_class
,
1042 for (i
= 0; i
< MAX_MGMT_OUI
; i
++)
1043 /* Is there matching OUI for this vendor class ? */
1044 if (!memcmp(vendor_class
->oui
[i
], oui
, 3))
1050 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table
*vendor
)
1054 for (i
= 0; i
< MAX_MGMT_VENDOR_RANGE2
; i
++)
1055 if (vendor
->vendor_class
[i
])
1061 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table
*method
,
1062 struct ib_mad_agent_private
*agent
)
1066 /* Remove any methods for this mad agent */
1067 for (i
= 0; i
< IB_MGMT_MAX_METHODS
; i
++) {
1068 if (method
->agent
[i
] == agent
) {
1069 method
->agent
[i
] = NULL
;
1074 static int add_nonoui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1075 struct ib_mad_agent_private
*agent_priv
,
1078 struct ib_mad_port_private
*port_priv
;
1079 struct ib_mad_mgmt_class_table
**class;
1080 struct ib_mad_mgmt_method_table
**method
;
1083 port_priv
= agent_priv
->qp_info
->port_priv
;
1084 class = &port_priv
->version
[mad_reg_req
->mgmt_class_version
].class;
1086 /* Allocate management class table for "new" class version */
1087 *class = kmalloc(sizeof **class, GFP_ATOMIC
);
1089 printk(KERN_ERR PFX
"No memory for "
1090 "ib_mad_mgmt_class_table\n");
1094 /* Clear management class table */
1095 memset(*class, 0, sizeof(**class));
1096 /* Allocate method table for this management class */
1097 method
= &(*class)->method_table
[mgmt_class
];
1098 if ((ret
= allocate_method_table(method
)))
1101 method
= &(*class)->method_table
[mgmt_class
];
1103 /* Allocate method table for this management class */
1104 if ((ret
= allocate_method_table(method
)))
1109 /* Now, make sure methods are not already in use */
1110 if (method_in_use(method
, mad_reg_req
))
1113 /* Finally, add in methods being registered */
1114 for (i
= find_first_bit(mad_reg_req
->method_mask
,
1115 IB_MGMT_MAX_METHODS
);
1116 i
< IB_MGMT_MAX_METHODS
;
1117 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1119 (*method
)->agent
[i
] = agent_priv
;
1124 /* Remove any methods for this mad agent */
1125 remove_methods_mad_agent(*method
, agent_priv
);
1126 /* Now, check to see if there are any methods in use */
1127 if (!check_method_table(*method
)) {
1128 /* If not, release management method table */
1141 static int add_oui_reg_req(struct ib_mad_reg_req
*mad_reg_req
,
1142 struct ib_mad_agent_private
*agent_priv
)
1144 struct ib_mad_port_private
*port_priv
;
1145 struct ib_mad_mgmt_vendor_class_table
**vendor_table
;
1146 struct ib_mad_mgmt_vendor_class_table
*vendor
= NULL
;
1147 struct ib_mad_mgmt_vendor_class
*vendor_class
= NULL
;
1148 struct ib_mad_mgmt_method_table
**method
;
1149 int i
, ret
= -ENOMEM
;
1152 /* "New" vendor (with OUI) class */
1153 vclass
= vendor_class_index(mad_reg_req
->mgmt_class
);
1154 port_priv
= agent_priv
->qp_info
->port_priv
;
1155 vendor_table
= &port_priv
->version
[
1156 mad_reg_req
->mgmt_class_version
].vendor
;
1157 if (!*vendor_table
) {
1158 /* Allocate mgmt vendor class table for "new" class version */
1159 vendor
= kmalloc(sizeof *vendor
, GFP_ATOMIC
);
1161 printk(KERN_ERR PFX
"No memory for "
1162 "ib_mad_mgmt_vendor_class_table\n");
1165 /* Clear management vendor class table */
1166 memset(vendor
, 0, sizeof(*vendor
));
1167 *vendor_table
= vendor
;
1169 if (!(*vendor_table
)->vendor_class
[vclass
]) {
1170 /* Allocate table for this management vendor class */
1171 vendor_class
= kmalloc(sizeof *vendor_class
, GFP_ATOMIC
);
1172 if (!vendor_class
) {
1173 printk(KERN_ERR PFX
"No memory for "
1174 "ib_mad_mgmt_vendor_class\n");
1177 memset(vendor_class
, 0, sizeof(*vendor_class
));
1178 (*vendor_table
)->vendor_class
[vclass
] = vendor_class
;
1180 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1181 /* Is there matching OUI for this vendor class ? */
1182 if (!memcmp((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1183 mad_reg_req
->oui
, 3)) {
1184 method
= &(*vendor_table
)->vendor_class
[
1185 vclass
]->method_table
[i
];
1190 for (i
= 0; i
< MAX_MGMT_OUI
; i
++) {
1191 /* OUI slot available ? */
1192 if (!is_vendor_oui((*vendor_table
)->vendor_class
[
1194 method
= &(*vendor_table
)->vendor_class
[
1195 vclass
]->method_table
[i
];
1197 /* Allocate method table for this OUI */
1198 if ((ret
= allocate_method_table(method
)))
1200 memcpy((*vendor_table
)->vendor_class
[vclass
]->oui
[i
],
1201 mad_reg_req
->oui
, 3);
1205 printk(KERN_ERR PFX
"All OUI slots in use\n");
1209 /* Now, make sure methods are not already in use */
1210 if (method_in_use(method
, mad_reg_req
))
1213 /* Finally, add in methods being registered */
1214 for (i
= find_first_bit(mad_reg_req
->method_mask
,
1215 IB_MGMT_MAX_METHODS
);
1216 i
< IB_MGMT_MAX_METHODS
;
1217 i
= find_next_bit(mad_reg_req
->method_mask
, IB_MGMT_MAX_METHODS
,
1219 (*method
)->agent
[i
] = agent_priv
;
1224 /* Remove any methods for this mad agent */
1225 remove_methods_mad_agent(*method
, agent_priv
);
1226 /* Now, check to see if there are any methods in use */
1227 if (!check_method_table(*method
)) {
1228 /* If not, release management method table */
1235 (*vendor_table
)->vendor_class
[vclass
] = NULL
;
1236 kfree(vendor_class
);
1240 *vendor_table
= NULL
;
1247 static void remove_mad_reg_req(struct ib_mad_agent_private
*agent_priv
)
1249 struct ib_mad_port_private
*port_priv
;
1250 struct ib_mad_mgmt_class_table
*class;
1251 struct ib_mad_mgmt_method_table
*method
;
1252 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1253 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1258 * Was MAD registration request supplied
1259 * with original registration ?
1261 if (!agent_priv
->reg_req
) {
1265 port_priv
= agent_priv
->qp_info
->port_priv
;
1266 mgmt_class
= convert_mgmt_class(agent_priv
->reg_req
->mgmt_class
);
1267 class = port_priv
->version
[
1268 agent_priv
->reg_req
->mgmt_class_version
].class;
1272 method
= class->method_table
[mgmt_class
];
1274 /* Remove any methods for this mad agent */
1275 remove_methods_mad_agent(method
, agent_priv
);
1276 /* Now, check to see if there are any methods still in use */
1277 if (!check_method_table(method
)) {
1278 /* If not, release management method table */
1280 class->method_table
[mgmt_class
] = NULL
;
1281 /* Any management classes left ? */
1282 if (!check_class_table(class)) {
1283 /* If not, release management class table */
1286 agent_priv
->reg_req
->
1287 mgmt_class_version
].class = NULL
;
1293 if (!is_vendor_class(mgmt_class
))
1296 /* normalize mgmt_class to vendor range 2 */
1297 mgmt_class
= vendor_class_index(agent_priv
->reg_req
->mgmt_class
);
1298 vendor
= port_priv
->version
[
1299 agent_priv
->reg_req
->mgmt_class_version
].vendor
;
1304 vendor_class
= vendor
->vendor_class
[mgmt_class
];
1306 index
= find_vendor_oui(vendor_class
, agent_priv
->reg_req
->oui
);
1309 method
= vendor_class
->method_table
[index
];
1311 /* Remove any methods for this mad agent */
1312 remove_methods_mad_agent(method
, agent_priv
);
1314 * Now, check to see if there are
1315 * any methods still in use
1317 if (!check_method_table(method
)) {
1318 /* If not, release management method table */
1320 vendor_class
->method_table
[index
] = NULL
;
1321 memset(vendor_class
->oui
[index
], 0, 3);
1322 /* Any OUIs left ? */
1323 if (!check_vendor_class(vendor_class
)) {
1324 /* If not, release vendor class table */
1325 kfree(vendor_class
);
1326 vendor
->vendor_class
[mgmt_class
] = NULL
;
1327 /* Any other vendor classes left ? */
1328 if (!check_vendor_table(vendor
)) {
1331 agent_priv
->reg_req
->
1332 mgmt_class_version
].
1344 static int response_mad(struct ib_mad
*mad
)
1346 /* Trap represses are responses although response bit is reset */
1347 return ((mad
->mad_hdr
.method
== IB_MGMT_METHOD_TRAP_REPRESS
) ||
1348 (mad
->mad_hdr
.method
& IB_MGMT_METHOD_RESP
));
1351 static int solicited_mad(struct ib_mad
*mad
)
1353 /* CM MADs are never solicited */
1354 if (mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_CM
) {
1358 /* XXX: Determine whether MAD is using RMPP */
1360 /* Not using RMPP */
1361 /* Is this MAD a response to a previous MAD ? */
1362 return response_mad(mad
);
1365 static struct ib_mad_agent_private
*
1366 find_mad_agent(struct ib_mad_port_private
*port_priv
,
1370 struct ib_mad_agent_private
*mad_agent
= NULL
;
1371 unsigned long flags
;
1373 spin_lock_irqsave(&port_priv
->reg_lock
, flags
);
1376 * Whether MAD was solicited determines type of routing to
1381 struct ib_mad_agent_private
*entry
;
1384 * Routing is based on high 32 bits of transaction ID
1387 hi_tid
= be64_to_cpu(mad
->mad_hdr
.tid
) >> 32;
1388 list_for_each_entry(entry
, &port_priv
->agent_list
,
1390 if (entry
->agent
.hi_tid
== hi_tid
) {
1396 struct ib_mad_mgmt_class_table
*class;
1397 struct ib_mad_mgmt_method_table
*method
;
1398 struct ib_mad_mgmt_vendor_class_table
*vendor
;
1399 struct ib_mad_mgmt_vendor_class
*vendor_class
;
1400 struct ib_vendor_mad
*vendor_mad
;
1404 * Routing is based on version, class, and method
1405 * For "newer" vendor MADs, also based on OUI
1407 if (mad
->mad_hdr
.class_version
>= MAX_MGMT_VERSION
)
1409 if (!is_vendor_class(mad
->mad_hdr
.mgmt_class
)) {
1410 class = port_priv
->version
[
1411 mad
->mad_hdr
.class_version
].class;
1414 method
= class->method_table
[convert_mgmt_class(
1415 mad
->mad_hdr
.mgmt_class
)];
1417 mad_agent
= method
->agent
[mad
->mad_hdr
.method
&
1418 ~IB_MGMT_METHOD_RESP
];
1420 vendor
= port_priv
->version
[
1421 mad
->mad_hdr
.class_version
].vendor
;
1424 vendor_class
= vendor
->vendor_class
[vendor_class_index(
1425 mad
->mad_hdr
.mgmt_class
)];
1428 /* Find matching OUI */
1429 vendor_mad
= (struct ib_vendor_mad
*)mad
;
1430 index
= find_vendor_oui(vendor_class
, vendor_mad
->oui
);
1433 method
= vendor_class
->method_table
[index
];
1435 mad_agent
= method
->agent
[mad
->mad_hdr
.method
&
1436 ~IB_MGMT_METHOD_RESP
];
1442 if (mad_agent
->agent
.recv_handler
)
1443 atomic_inc(&mad_agent
->refcount
);
1445 printk(KERN_NOTICE PFX
"No receive handler for client "
1447 &mad_agent
->agent
, port_priv
->port_num
);
1452 spin_unlock_irqrestore(&port_priv
->reg_lock
, flags
);
1457 static int validate_mad(struct ib_mad
*mad
, u32 qp_num
)
1461 /* Make sure MAD base version is understood */
1462 if (mad
->mad_hdr
.base_version
!= IB_MGMT_BASE_VERSION
) {
1463 printk(KERN_ERR PFX
"MAD received with unsupported base "
1464 "version %d\n", mad
->mad_hdr
.base_version
);
1468 /* Filter SMI packets sent to other than QP0 */
1469 if ((mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
) ||
1470 (mad
->mad_hdr
.mgmt_class
== IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
)) {
1474 /* Filter GSI packets sent to QP0 */
1484 * Return start of fully reassembled MAD, or NULL, if MAD isn't assembled yet
1486 static struct ib_mad_private
*
1487 reassemble_recv(struct ib_mad_agent_private
*mad_agent_priv
,
1488 struct ib_mad_private
*recv
)
1490 /* Until we have RMPP, all receives are reassembled!... */
1491 INIT_LIST_HEAD(&recv
->header
.recv_wc
.recv_buf
.list
);
1495 static struct ib_mad_send_wr_private
*
1496 find_send_req(struct ib_mad_agent_private
*mad_agent_priv
,
1499 struct ib_mad_send_wr_private
*mad_send_wr
;
1501 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
1503 if (mad_send_wr
->tid
== tid
)
1508 * It's possible to receive the response before we've
1509 * been notified that the send has completed
1511 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
1513 if (mad_send_wr
->tid
== tid
&& mad_send_wr
->timeout
) {
1514 /* Verify request has not been canceled */
1515 return (mad_send_wr
->status
== IB_WC_SUCCESS
) ?
1522 static void ib_mad_complete_recv(struct ib_mad_agent_private
*mad_agent_priv
,
1523 struct ib_mad_private
*recv
,
1526 struct ib_mad_send_wr_private
*mad_send_wr
;
1527 struct ib_mad_send_wc mad_send_wc
;
1528 unsigned long flags
;
1530 /* Fully reassemble receive before processing */
1531 recv
= reassemble_recv(mad_agent_priv
, recv
);
1533 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
1534 wake_up(&mad_agent_priv
->wait
);
1538 /* Complete corresponding request */
1540 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1541 mad_send_wr
= find_send_req(mad_agent_priv
,
1542 recv
->mad
.mad
.mad_hdr
.tid
);
1544 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1545 ib_free_recv_mad(&recv
->header
.recv_wc
);
1546 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
1547 wake_up(&mad_agent_priv
->wait
);
1550 /* Timeout = 0 means that we won't wait for a response */
1551 mad_send_wr
->timeout
= 0;
1552 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1554 /* Defined behavior is to complete response before request */
1555 recv
->header
.recv_wc
.wc
->wr_id
= mad_send_wr
->wr_id
;
1556 mad_agent_priv
->agent
.recv_handler(
1557 &mad_agent_priv
->agent
,
1558 &recv
->header
.recv_wc
);
1559 atomic_dec(&mad_agent_priv
->refcount
);
1561 mad_send_wc
.status
= IB_WC_SUCCESS
;
1562 mad_send_wc
.vendor_err
= 0;
1563 mad_send_wc
.wr_id
= mad_send_wr
->wr_id
;
1564 ib_mad_complete_send_wr(mad_send_wr
, &mad_send_wc
);
1566 mad_agent_priv
->agent
.recv_handler(
1567 &mad_agent_priv
->agent
,
1568 &recv
->header
.recv_wc
);
1569 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
1570 wake_up(&mad_agent_priv
->wait
);
1574 static void ib_mad_recv_done_handler(struct ib_mad_port_private
*port_priv
,
1577 struct ib_mad_qp_info
*qp_info
;
1578 struct ib_mad_private_header
*mad_priv_hdr
;
1579 struct ib_mad_private
*recv
, *response
;
1580 struct ib_mad_list_head
*mad_list
;
1581 struct ib_mad_agent_private
*mad_agent
;
1584 response
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
1586 printk(KERN_ERR PFX
"ib_mad_recv_done_handler no memory "
1587 "for response buffer\n");
1589 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
1590 qp_info
= mad_list
->mad_queue
->qp_info
;
1591 dequeue_mad(mad_list
);
1593 mad_priv_hdr
= container_of(mad_list
, struct ib_mad_private_header
,
1595 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
, header
);
1596 dma_unmap_single(port_priv
->device
->dma_device
,
1597 pci_unmap_addr(&recv
->header
, mapping
),
1598 sizeof(struct ib_mad_private
) -
1599 sizeof(struct ib_mad_private_header
),
1602 /* Setup MAD receive work completion from "normal" work completion */
1603 recv
->header
.recv_wc
.wc
= wc
;
1604 recv
->header
.recv_wc
.mad_len
= sizeof(struct ib_mad
);
1605 recv
->header
.recv_wc
.recv_buf
.mad
= &recv
->mad
.mad
;
1606 recv
->header
.recv_wc
.recv_buf
.grh
= &recv
->grh
;
1608 if (atomic_read(&qp_info
->snoop_count
))
1609 snoop_recv(qp_info
, &recv
->header
.recv_wc
, IB_MAD_SNOOP_RECVS
);
1612 if (!validate_mad(&recv
->mad
.mad
, qp_info
->qp
->qp_num
))
1615 if (recv
->mad
.mad
.mad_hdr
.mgmt_class
==
1616 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
1617 if (!smi_handle_dr_smp_recv(&recv
->mad
.smp
,
1618 port_priv
->device
->node_type
,
1619 port_priv
->port_num
,
1620 port_priv
->device
->phys_port_cnt
))
1622 if (!smi_check_forward_dr_smp(&recv
->mad
.smp
))
1624 if (!smi_handle_dr_smp_send(&recv
->mad
.smp
,
1625 port_priv
->device
->node_type
,
1626 port_priv
->port_num
))
1628 if (!smi_check_local_dr_smp(&recv
->mad
.smp
,
1630 port_priv
->port_num
))
1635 /* Give driver "right of first refusal" on incoming MAD */
1636 if (port_priv
->device
->process_mad
) {
1640 printk(KERN_ERR PFX
"No memory for response MAD\n");
1642 * Is it better to assume that
1643 * it wouldn't be processed ?
1648 ret
= port_priv
->device
->process_mad(port_priv
->device
, 0,
1649 port_priv
->port_num
,
1652 &response
->mad
.mad
);
1653 if (ret
& IB_MAD_RESULT_SUCCESS
) {
1654 if (ret
& IB_MAD_RESULT_CONSUMED
)
1656 if (ret
& IB_MAD_RESULT_REPLY
) {
1658 if (!agent_send(response
, &recv
->grh
, wc
,
1660 port_priv
->port_num
))
1667 /* Determine corresponding MAD agent for incoming receive MAD */
1668 solicited
= solicited_mad(&recv
->mad
.mad
);
1669 mad_agent
= find_mad_agent(port_priv
, &recv
->mad
.mad
, solicited
);
1671 ib_mad_complete_recv(mad_agent
, recv
, solicited
);
1673 * recv is freed up in error cases in ib_mad_complete_recv
1674 * or via recv_handler in ib_mad_complete_recv()
1680 /* Post another receive request for this QP */
1682 ib_mad_post_receive_mads(qp_info
, response
);
1684 kmem_cache_free(ib_mad_cache
, recv
);
1686 ib_mad_post_receive_mads(qp_info
, recv
);
1689 static void adjust_timeout(struct ib_mad_agent_private
*mad_agent_priv
)
1691 struct ib_mad_send_wr_private
*mad_send_wr
;
1692 unsigned long delay
;
1694 if (list_empty(&mad_agent_priv
->wait_list
)) {
1695 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1697 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
1698 struct ib_mad_send_wr_private
,
1701 if (time_after(mad_agent_priv
->timeout
,
1702 mad_send_wr
->timeout
)) {
1703 mad_agent_priv
->timeout
= mad_send_wr
->timeout
;
1704 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1705 delay
= mad_send_wr
->timeout
- jiffies
;
1706 if ((long)delay
<= 0)
1708 queue_delayed_work(mad_agent_priv
->qp_info
->
1710 &mad_agent_priv
->timed_work
, delay
);
1715 static void wait_for_response(struct ib_mad_agent_private
*mad_agent_priv
,
1716 struct ib_mad_send_wr_private
*mad_send_wr
)
1718 struct ib_mad_send_wr_private
*temp_mad_send_wr
;
1719 struct list_head
*list_item
;
1720 unsigned long delay
;
1722 list_del(&mad_send_wr
->agent_list
);
1724 delay
= mad_send_wr
->timeout
;
1725 mad_send_wr
->timeout
+= jiffies
;
1727 list_for_each_prev(list_item
, &mad_agent_priv
->wait_list
) {
1728 temp_mad_send_wr
= list_entry(list_item
,
1729 struct ib_mad_send_wr_private
,
1731 if (time_after(mad_send_wr
->timeout
,
1732 temp_mad_send_wr
->timeout
))
1735 list_add(&mad_send_wr
->agent_list
, list_item
);
1737 /* Reschedule a work item if we have a shorter timeout */
1738 if (mad_agent_priv
->wait_list
.next
== &mad_send_wr
->agent_list
) {
1739 cancel_delayed_work(&mad_agent_priv
->timed_work
);
1740 queue_delayed_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
1741 &mad_agent_priv
->timed_work
, delay
);
1746 * Process a send work completion
1748 static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private
*mad_send_wr
,
1749 struct ib_mad_send_wc
*mad_send_wc
)
1751 struct ib_mad_agent_private
*mad_agent_priv
;
1752 unsigned long flags
;
1754 mad_agent_priv
= container_of(mad_send_wr
->agent
,
1755 struct ib_mad_agent_private
, agent
);
1757 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1758 if (mad_send_wc
->status
!= IB_WC_SUCCESS
&&
1759 mad_send_wr
->status
== IB_WC_SUCCESS
) {
1760 mad_send_wr
->status
= mad_send_wc
->status
;
1761 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
1764 if (--mad_send_wr
->refcount
> 0) {
1765 if (mad_send_wr
->refcount
== 1 && mad_send_wr
->timeout
&&
1766 mad_send_wr
->status
== IB_WC_SUCCESS
) {
1767 wait_for_response(mad_agent_priv
, mad_send_wr
);
1769 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1773 /* Remove send from MAD agent and notify client of completion */
1774 list_del(&mad_send_wr
->agent_list
);
1775 adjust_timeout(mad_agent_priv
);
1776 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1778 if (mad_send_wr
->status
!= IB_WC_SUCCESS
)
1779 mad_send_wc
->status
= mad_send_wr
->status
;
1780 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
1783 /* Release reference on agent taken when sending */
1784 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
1785 wake_up(&mad_agent_priv
->wait
);
1790 static void ib_mad_send_done_handler(struct ib_mad_port_private
*port_priv
,
1793 struct ib_mad_send_wr_private
*mad_send_wr
, *queued_send_wr
;
1794 struct ib_mad_list_head
*mad_list
;
1795 struct ib_mad_qp_info
*qp_info
;
1796 struct ib_mad_queue
*send_queue
;
1797 struct ib_send_wr
*bad_send_wr
;
1798 unsigned long flags
;
1801 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
1802 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
1804 send_queue
= mad_list
->mad_queue
;
1805 qp_info
= send_queue
->qp_info
;
1808 queued_send_wr
= NULL
;
1809 spin_lock_irqsave(&send_queue
->lock
, flags
);
1810 list_del(&mad_list
->list
);
1812 /* Move queued send to the send queue */
1813 if (send_queue
->count
-- > send_queue
->max_active
) {
1814 mad_list
= container_of(qp_info
->overflow_list
.next
,
1815 struct ib_mad_list_head
, list
);
1816 queued_send_wr
= container_of(mad_list
,
1817 struct ib_mad_send_wr_private
,
1819 list_del(&mad_list
->list
);
1820 list_add_tail(&mad_list
->list
, &send_queue
->list
);
1822 spin_unlock_irqrestore(&send_queue
->lock
, flags
);
1824 /* Restore client wr_id in WC and complete send */
1825 wc
->wr_id
= mad_send_wr
->wr_id
;
1826 if (atomic_read(&qp_info
->snoop_count
))
1827 snoop_send(qp_info
, &mad_send_wr
->send_wr
,
1828 (struct ib_mad_send_wc
*)wc
,
1829 IB_MAD_SNOOP_SEND_COMPLETIONS
);
1830 ib_mad_complete_send_wr(mad_send_wr
, (struct ib_mad_send_wc
*)wc
);
1832 if (queued_send_wr
) {
1833 ret
= ib_post_send(qp_info
->qp
, &queued_send_wr
->send_wr
,
1836 printk(KERN_ERR PFX
"ib_post_send failed: %d\n", ret
);
1837 mad_send_wr
= queued_send_wr
;
1838 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
1844 static void mark_sends_for_retry(struct ib_mad_qp_info
*qp_info
)
1846 struct ib_mad_send_wr_private
*mad_send_wr
;
1847 struct ib_mad_list_head
*mad_list
;
1848 unsigned long flags
;
1850 spin_lock_irqsave(&qp_info
->send_queue
.lock
, flags
);
1851 list_for_each_entry(mad_list
, &qp_info
->send_queue
.list
, list
) {
1852 mad_send_wr
= container_of(mad_list
,
1853 struct ib_mad_send_wr_private
,
1855 mad_send_wr
->retry
= 1;
1857 spin_unlock_irqrestore(&qp_info
->send_queue
.lock
, flags
);
1860 static void mad_error_handler(struct ib_mad_port_private
*port_priv
,
1863 struct ib_mad_list_head
*mad_list
;
1864 struct ib_mad_qp_info
*qp_info
;
1865 struct ib_mad_send_wr_private
*mad_send_wr
;
1868 /* Determine if failure was a send or receive */
1869 mad_list
= (struct ib_mad_list_head
*)(unsigned long)wc
->wr_id
;
1870 qp_info
= mad_list
->mad_queue
->qp_info
;
1871 if (mad_list
->mad_queue
== &qp_info
->recv_queue
)
1873 * Receive errors indicate that the QP has entered the error
1874 * state - error handling/shutdown code will cleanup
1879 * Send errors will transition the QP to SQE - move
1880 * QP to RTS and repost flushed work requests
1882 mad_send_wr
= container_of(mad_list
, struct ib_mad_send_wr_private
,
1884 if (wc
->status
== IB_WC_WR_FLUSH_ERR
) {
1885 if (mad_send_wr
->retry
) {
1887 struct ib_send_wr
*bad_send_wr
;
1889 mad_send_wr
->retry
= 0;
1890 ret
= ib_post_send(qp_info
->qp
, &mad_send_wr
->send_wr
,
1893 ib_mad_send_done_handler(port_priv
, wc
);
1895 ib_mad_send_done_handler(port_priv
, wc
);
1897 struct ib_qp_attr
*attr
;
1899 /* Transition QP to RTS and fail offending send */
1900 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
1902 attr
->qp_state
= IB_QPS_RTS
;
1903 attr
->cur_qp_state
= IB_QPS_SQE
;
1904 ret
= ib_modify_qp(qp_info
->qp
, attr
,
1905 IB_QP_STATE
| IB_QP_CUR_STATE
);
1908 printk(KERN_ERR PFX
"mad_error_handler - "
1909 "ib_modify_qp to RTS : %d\n", ret
);
1911 mark_sends_for_retry(qp_info
);
1913 ib_mad_send_done_handler(port_priv
, wc
);
1918 * IB MAD completion callback
1920 static void ib_mad_completion_handler(void *data
)
1922 struct ib_mad_port_private
*port_priv
;
1925 port_priv
= (struct ib_mad_port_private
*)data
;
1926 ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
1928 while (ib_poll_cq(port_priv
->cq
, 1, &wc
) == 1) {
1929 if (wc
.status
== IB_WC_SUCCESS
) {
1930 switch (wc
.opcode
) {
1932 ib_mad_send_done_handler(port_priv
, &wc
);
1935 ib_mad_recv_done_handler(port_priv
, &wc
);
1942 mad_error_handler(port_priv
, &wc
);
1946 static void cancel_mads(struct ib_mad_agent_private
*mad_agent_priv
)
1948 unsigned long flags
;
1949 struct ib_mad_send_wr_private
*mad_send_wr
, *temp_mad_send_wr
;
1950 struct ib_mad_send_wc mad_send_wc
;
1951 struct list_head cancel_list
;
1953 INIT_LIST_HEAD(&cancel_list
);
1955 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
1956 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
1957 &mad_agent_priv
->send_list
, agent_list
) {
1958 if (mad_send_wr
->status
== IB_WC_SUCCESS
) {
1959 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
1960 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
1964 /* Empty wait list to prevent receives from finding a request */
1965 list_splice_init(&mad_agent_priv
->wait_list
, &cancel_list
);
1966 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
1968 /* Report all cancelled requests */
1969 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
1970 mad_send_wc
.vendor_err
= 0;
1972 list_for_each_entry_safe(mad_send_wr
, temp_mad_send_wr
,
1973 &cancel_list
, agent_list
) {
1974 mad_send_wc
.wr_id
= mad_send_wr
->wr_id
;
1975 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
1978 list_del(&mad_send_wr
->agent_list
);
1980 atomic_dec(&mad_agent_priv
->refcount
);
1984 static struct ib_mad_send_wr_private
*
1985 find_send_by_wr_id(struct ib_mad_agent_private
*mad_agent_priv
,
1988 struct ib_mad_send_wr_private
*mad_send_wr
;
1990 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->wait_list
,
1992 if (mad_send_wr
->wr_id
== wr_id
)
1996 list_for_each_entry(mad_send_wr
, &mad_agent_priv
->send_list
,
1998 if (mad_send_wr
->wr_id
== wr_id
)
2004 void cancel_sends(void *data
)
2006 struct ib_mad_agent_private
*mad_agent_priv
;
2007 struct ib_mad_send_wr_private
*mad_send_wr
;
2008 struct ib_mad_send_wc mad_send_wc
;
2009 unsigned long flags
;
2011 mad_agent_priv
= data
;
2013 mad_send_wc
.status
= IB_WC_WR_FLUSH_ERR
;
2014 mad_send_wc
.vendor_err
= 0;
2016 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2017 while (!list_empty(&mad_agent_priv
->canceled_list
)) {
2018 mad_send_wr
= list_entry(mad_agent_priv
->canceled_list
.next
,
2019 struct ib_mad_send_wr_private
,
2022 list_del(&mad_send_wr
->agent_list
);
2023 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2025 mad_send_wc
.wr_id
= mad_send_wr
->wr_id
;
2026 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2030 if (atomic_dec_and_test(&mad_agent_priv
->refcount
))
2031 wake_up(&mad_agent_priv
->wait
);
2032 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2034 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2037 void ib_cancel_mad(struct ib_mad_agent
*mad_agent
,
2040 struct ib_mad_agent_private
*mad_agent_priv
;
2041 struct ib_mad_send_wr_private
*mad_send_wr
;
2042 unsigned long flags
;
2044 mad_agent_priv
= container_of(mad_agent
, struct ib_mad_agent_private
,
2046 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2047 mad_send_wr
= find_send_by_wr_id(mad_agent_priv
, wr_id
);
2049 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2053 if (mad_send_wr
->status
== IB_WC_SUCCESS
)
2054 mad_send_wr
->refcount
-= (mad_send_wr
->timeout
> 0);
2056 if (mad_send_wr
->refcount
!= 0) {
2057 mad_send_wr
->status
= IB_WC_WR_FLUSH_ERR
;
2058 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2062 list_del(&mad_send_wr
->agent_list
);
2063 list_add_tail(&mad_send_wr
->agent_list
, &mad_agent_priv
->canceled_list
);
2064 adjust_timeout(mad_agent_priv
);
2065 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2067 queue_work(mad_agent_priv
->qp_info
->port_priv
->wq
,
2068 &mad_agent_priv
->canceled_work
);
2072 EXPORT_SYMBOL(ib_cancel_mad
);
2074 static void local_completions(void *data
)
2076 struct ib_mad_agent_private
*mad_agent_priv
;
2077 struct ib_mad_local_private
*local
;
2078 struct ib_mad_agent_private
*recv_mad_agent
;
2079 unsigned long flags
;
2081 struct ib_mad_send_wc mad_send_wc
;
2083 mad_agent_priv
= (struct ib_mad_agent_private
*)data
;
2085 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2086 while (!list_empty(&mad_agent_priv
->local_list
)) {
2087 local
= list_entry(mad_agent_priv
->local_list
.next
,
2088 struct ib_mad_local_private
,
2090 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2091 if (local
->mad_priv
) {
2092 recv_mad_agent
= local
->recv_mad_agent
;
2093 if (!recv_mad_agent
) {
2094 printk(KERN_ERR PFX
"No receive MAD agent for local completion\n");
2095 kmem_cache_free(ib_mad_cache
, local
->mad_priv
);
2096 goto local_send_completion
;
2100 * Defined behavior is to complete response
2103 build_smp_wc(local
->wr_id
, IB_LID_PERMISSIVE
,
2105 recv_mad_agent
->agent
.port_num
, &wc
);
2107 local
->mad_priv
->header
.recv_wc
.wc
= &wc
;
2108 local
->mad_priv
->header
.recv_wc
.mad_len
=
2109 sizeof(struct ib_mad
);
2110 INIT_LIST_HEAD(&local
->mad_priv
->header
.recv_wc
.recv_buf
.list
);
2111 local
->mad_priv
->header
.recv_wc
.recv_buf
.grh
= NULL
;
2112 local
->mad_priv
->header
.recv_wc
.recv_buf
.mad
=
2113 &local
->mad_priv
->mad
.mad
;
2114 if (atomic_read(&recv_mad_agent
->qp_info
->snoop_count
))
2115 snoop_recv(recv_mad_agent
->qp_info
,
2116 &local
->mad_priv
->header
.recv_wc
,
2117 IB_MAD_SNOOP_RECVS
);
2118 recv_mad_agent
->agent
.recv_handler(
2119 &recv_mad_agent
->agent
,
2120 &local
->mad_priv
->header
.recv_wc
);
2121 spin_lock_irqsave(&recv_mad_agent
->lock
, flags
);
2122 atomic_dec(&recv_mad_agent
->refcount
);
2123 spin_unlock_irqrestore(&recv_mad_agent
->lock
, flags
);
2126 local_send_completion
:
2128 mad_send_wc
.status
= IB_WC_SUCCESS
;
2129 mad_send_wc
.vendor_err
= 0;
2130 mad_send_wc
.wr_id
= local
->wr_id
;
2131 if (atomic_read(&mad_agent_priv
->qp_info
->snoop_count
))
2132 snoop_send(mad_agent_priv
->qp_info
, &local
->send_wr
,
2134 IB_MAD_SNOOP_SEND_COMPLETIONS
);
2135 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2138 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2139 list_del(&local
->completion_list
);
2140 atomic_dec(&mad_agent_priv
->refcount
);
2143 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2146 static void timeout_sends(void *data
)
2148 struct ib_mad_agent_private
*mad_agent_priv
;
2149 struct ib_mad_send_wr_private
*mad_send_wr
;
2150 struct ib_mad_send_wc mad_send_wc
;
2151 unsigned long flags
, delay
;
2153 mad_agent_priv
= (struct ib_mad_agent_private
*)data
;
2155 mad_send_wc
.status
= IB_WC_RESP_TIMEOUT_ERR
;
2156 mad_send_wc
.vendor_err
= 0;
2158 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2159 while (!list_empty(&mad_agent_priv
->wait_list
)) {
2160 mad_send_wr
= list_entry(mad_agent_priv
->wait_list
.next
,
2161 struct ib_mad_send_wr_private
,
2164 if (time_after(mad_send_wr
->timeout
, jiffies
)) {
2165 delay
= mad_send_wr
->timeout
- jiffies
;
2166 if ((long)delay
<= 0)
2168 queue_delayed_work(mad_agent_priv
->qp_info
->
2170 &mad_agent_priv
->timed_work
, delay
);
2174 list_del(&mad_send_wr
->agent_list
);
2175 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2177 mad_send_wc
.wr_id
= mad_send_wr
->wr_id
;
2178 mad_agent_priv
->agent
.send_handler(&mad_agent_priv
->agent
,
2182 atomic_dec(&mad_agent_priv
->refcount
);
2183 spin_lock_irqsave(&mad_agent_priv
->lock
, flags
);
2185 spin_unlock_irqrestore(&mad_agent_priv
->lock
, flags
);
2188 static void ib_mad_thread_completion_handler(struct ib_cq
*cq
)
2190 struct ib_mad_port_private
*port_priv
= cq
->cq_context
;
2192 queue_work(port_priv
->wq
, &port_priv
->work
);
2196 * Allocate receive MADs and post receive WRs for them
2198 static int ib_mad_post_receive_mads(struct ib_mad_qp_info
*qp_info
,
2199 struct ib_mad_private
*mad
)
2201 unsigned long flags
;
2203 struct ib_mad_private
*mad_priv
;
2204 struct ib_sge sg_list
;
2205 struct ib_recv_wr recv_wr
, *bad_recv_wr
;
2206 struct ib_mad_queue
*recv_queue
= &qp_info
->recv_queue
;
2208 /* Initialize common scatter list fields */
2209 sg_list
.length
= sizeof *mad_priv
- sizeof mad_priv
->header
;
2210 sg_list
.lkey
= (*qp_info
->port_priv
->mr
).lkey
;
2212 /* Initialize common receive WR fields */
2213 recv_wr
.next
= NULL
;
2214 recv_wr
.sg_list
= &sg_list
;
2215 recv_wr
.num_sge
= 1;
2218 /* Allocate and map receive buffer */
2223 mad_priv
= kmem_cache_alloc(ib_mad_cache
, GFP_KERNEL
);
2225 printk(KERN_ERR PFX
"No memory for receive buffer\n");
2230 sg_list
.addr
= dma_map_single(qp_info
->port_priv
->
2234 sizeof mad_priv
->header
,
2236 pci_unmap_addr_set(&mad_priv
->header
, mapping
, sg_list
.addr
);
2237 recv_wr
.wr_id
= (unsigned long)&mad_priv
->header
.mad_list
;
2238 mad_priv
->header
.mad_list
.mad_queue
= recv_queue
;
2240 /* Post receive WR */
2241 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2242 post
= (++recv_queue
->count
< recv_queue
->max_active
);
2243 list_add_tail(&mad_priv
->header
.mad_list
.list
, &recv_queue
->list
);
2244 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2245 ret
= ib_post_recv(qp_info
->qp
, &recv_wr
, &bad_recv_wr
);
2247 spin_lock_irqsave(&recv_queue
->lock
, flags
);
2248 list_del(&mad_priv
->header
.mad_list
.list
);
2249 recv_queue
->count
--;
2250 spin_unlock_irqrestore(&recv_queue
->lock
, flags
);
2251 dma_unmap_single(qp_info
->port_priv
->device
->dma_device
,
2252 pci_unmap_addr(&mad_priv
->header
,
2255 sizeof mad_priv
->header
,
2257 kmem_cache_free(ib_mad_cache
, mad_priv
);
2258 printk(KERN_ERR PFX
"ib_post_recv failed: %d\n", ret
);
2267 * Return all the posted receive MADs
2269 static void cleanup_recv_queue(struct ib_mad_qp_info
*qp_info
)
2271 struct ib_mad_private_header
*mad_priv_hdr
;
2272 struct ib_mad_private
*recv
;
2273 struct ib_mad_list_head
*mad_list
;
2275 while (!list_empty(&qp_info
->recv_queue
.list
)) {
2277 mad_list
= list_entry(qp_info
->recv_queue
.list
.next
,
2278 struct ib_mad_list_head
, list
);
2279 mad_priv_hdr
= container_of(mad_list
,
2280 struct ib_mad_private_header
,
2282 recv
= container_of(mad_priv_hdr
, struct ib_mad_private
,
2285 /* Remove from posted receive MAD list */
2286 list_del(&mad_list
->list
);
2288 /* Undo PCI mapping */
2289 dma_unmap_single(qp_info
->port_priv
->device
->dma_device
,
2290 pci_unmap_addr(&recv
->header
, mapping
),
2291 sizeof(struct ib_mad_private
) -
2292 sizeof(struct ib_mad_private_header
),
2294 kmem_cache_free(ib_mad_cache
, recv
);
2297 qp_info
->recv_queue
.count
= 0;
2303 static int ib_mad_port_start(struct ib_mad_port_private
*port_priv
)
2306 struct ib_qp_attr
*attr
;
2309 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
2311 printk(KERN_ERR PFX
"Couldn't kmalloc ib_qp_attr\n");
2315 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2316 qp
= port_priv
->qp_info
[i
].qp
;
2318 * PKey index for QP1 is irrelevant but
2319 * one is needed for the Reset to Init transition
2321 attr
->qp_state
= IB_QPS_INIT
;
2322 attr
->pkey_index
= 0;
2323 attr
->qkey
= (qp
->qp_num
== 0) ? 0 : IB_QP1_QKEY
;
2324 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
|
2325 IB_QP_PKEY_INDEX
| IB_QP_QKEY
);
2327 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2328 "INIT: %d\n", i
, ret
);
2332 attr
->qp_state
= IB_QPS_RTR
;
2333 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
);
2335 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2336 "RTR: %d\n", i
, ret
);
2340 attr
->qp_state
= IB_QPS_RTS
;
2341 attr
->sq_psn
= IB_MAD_SEND_Q_PSN
;
2342 ret
= ib_modify_qp(qp
, attr
, IB_QP_STATE
| IB_QP_SQ_PSN
);
2344 printk(KERN_ERR PFX
"Couldn't change QP%d state to "
2345 "RTS: %d\n", i
, ret
);
2350 ret
= ib_req_notify_cq(port_priv
->cq
, IB_CQ_NEXT_COMP
);
2352 printk(KERN_ERR PFX
"Failed to request completion "
2353 "notification: %d\n", ret
);
2357 for (i
= 0; i
< IB_MAD_QPS_CORE
; i
++) {
2358 ret
= ib_mad_post_receive_mads(&port_priv
->qp_info
[i
], NULL
);
2360 printk(KERN_ERR PFX
"Couldn't post receive WRs\n");
2369 static void qp_event_handler(struct ib_event
*event
, void *qp_context
)
2371 struct ib_mad_qp_info
*qp_info
= qp_context
;
2373 /* It's worse than that! He's dead, Jim! */
2374 printk(KERN_ERR PFX
"Fatal error (%d) on MAD QP (%d)\n",
2375 event
->event
, qp_info
->qp
->qp_num
);
2378 static void init_mad_queue(struct ib_mad_qp_info
*qp_info
,
2379 struct ib_mad_queue
*mad_queue
)
2381 mad_queue
->qp_info
= qp_info
;
2382 mad_queue
->count
= 0;
2383 spin_lock_init(&mad_queue
->lock
);
2384 INIT_LIST_HEAD(&mad_queue
->list
);
2387 static void init_mad_qp(struct ib_mad_port_private
*port_priv
,
2388 struct ib_mad_qp_info
*qp_info
)
2390 qp_info
->port_priv
= port_priv
;
2391 init_mad_queue(qp_info
, &qp_info
->send_queue
);
2392 init_mad_queue(qp_info
, &qp_info
->recv_queue
);
2393 INIT_LIST_HEAD(&qp_info
->overflow_list
);
2394 spin_lock_init(&qp_info
->snoop_lock
);
2395 qp_info
->snoop_table
= NULL
;
2396 qp_info
->snoop_table_size
= 0;
2397 atomic_set(&qp_info
->snoop_count
, 0);
2400 static int create_mad_qp(struct ib_mad_qp_info
*qp_info
,
2401 enum ib_qp_type qp_type
)
2403 struct ib_qp_init_attr qp_init_attr
;
2406 memset(&qp_init_attr
, 0, sizeof qp_init_attr
);
2407 qp_init_attr
.send_cq
= qp_info
->port_priv
->cq
;
2408 qp_init_attr
.recv_cq
= qp_info
->port_priv
->cq
;
2409 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
2410 qp_init_attr
.cap
.max_send_wr
= IB_MAD_QP_SEND_SIZE
;
2411 qp_init_attr
.cap
.max_recv_wr
= IB_MAD_QP_RECV_SIZE
;
2412 qp_init_attr
.cap
.max_send_sge
= IB_MAD_SEND_REQ_MAX_SG
;
2413 qp_init_attr
.cap
.max_recv_sge
= IB_MAD_RECV_REQ_MAX_SG
;
2414 qp_init_attr
.qp_type
= qp_type
;
2415 qp_init_attr
.port_num
= qp_info
->port_priv
->port_num
;
2416 qp_init_attr
.qp_context
= qp_info
;
2417 qp_init_attr
.event_handler
= qp_event_handler
;
2418 qp_info
->qp
= ib_create_qp(qp_info
->port_priv
->pd
, &qp_init_attr
);
2419 if (IS_ERR(qp_info
->qp
)) {
2420 printk(KERN_ERR PFX
"Couldn't create ib_mad QP%d\n",
2421 get_spl_qp_index(qp_type
));
2422 ret
= PTR_ERR(qp_info
->qp
);
2425 /* Use minimum queue sizes unless the CQ is resized */
2426 qp_info
->send_queue
.max_active
= IB_MAD_QP_SEND_SIZE
;
2427 qp_info
->recv_queue
.max_active
= IB_MAD_QP_RECV_SIZE
;
2434 static void destroy_mad_qp(struct ib_mad_qp_info
*qp_info
)
2436 ib_destroy_qp(qp_info
->qp
);
2437 if (qp_info
->snoop_table
)
2438 kfree(qp_info
->snoop_table
);
2443 * Create the QP, PD, MR, and CQ if needed
2445 static int ib_mad_port_open(struct ib_device
*device
,
2449 struct ib_mad_port_private
*port_priv
;
2450 unsigned long flags
;
2451 char name
[sizeof "ib_mad123"];
2453 /* First, check if port already open at MAD layer */
2454 port_priv
= ib_get_mad_port(device
, port_num
);
2456 printk(KERN_DEBUG PFX
"%s port %d already open\n",
2457 device
->name
, port_num
);
2461 /* Create new device info */
2462 port_priv
= kmalloc(sizeof *port_priv
, GFP_KERNEL
);
2464 printk(KERN_ERR PFX
"No memory for ib_mad_port_private\n");
2467 memset(port_priv
, 0, sizeof *port_priv
);
2468 port_priv
->device
= device
;
2469 port_priv
->port_num
= port_num
;
2470 spin_lock_init(&port_priv
->reg_lock
);
2471 INIT_LIST_HEAD(&port_priv
->agent_list
);
2472 init_mad_qp(port_priv
, &port_priv
->qp_info
[0]);
2473 init_mad_qp(port_priv
, &port_priv
->qp_info
[1]);
2475 cq_size
= (IB_MAD_QP_SEND_SIZE
+ IB_MAD_QP_RECV_SIZE
) * 2;
2476 port_priv
->cq
= ib_create_cq(port_priv
->device
,
2478 ib_mad_thread_completion_handler
,
2479 NULL
, port_priv
, cq_size
);
2480 if (IS_ERR(port_priv
->cq
)) {
2481 printk(KERN_ERR PFX
"Couldn't create ib_mad CQ\n");
2482 ret
= PTR_ERR(port_priv
->cq
);
2486 port_priv
->pd
= ib_alloc_pd(device
);
2487 if (IS_ERR(port_priv
->pd
)) {
2488 printk(KERN_ERR PFX
"Couldn't create ib_mad PD\n");
2489 ret
= PTR_ERR(port_priv
->pd
);
2493 port_priv
->mr
= ib_get_dma_mr(port_priv
->pd
, IB_ACCESS_LOCAL_WRITE
);
2494 if (IS_ERR(port_priv
->mr
)) {
2495 printk(KERN_ERR PFX
"Couldn't get ib_mad DMA MR\n");
2496 ret
= PTR_ERR(port_priv
->mr
);
2500 ret
= create_mad_qp(&port_priv
->qp_info
[0], IB_QPT_SMI
);
2503 ret
= create_mad_qp(&port_priv
->qp_info
[1], IB_QPT_GSI
);
2507 snprintf(name
, sizeof name
, "ib_mad%d", port_num
);
2508 port_priv
->wq
= create_singlethread_workqueue(name
);
2509 if (!port_priv
->wq
) {
2513 INIT_WORK(&port_priv
->work
, ib_mad_completion_handler
, port_priv
);
2515 ret
= ib_mad_port_start(port_priv
);
2517 printk(KERN_ERR PFX
"Couldn't start port\n");
2521 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2522 list_add_tail(&port_priv
->port_list
, &ib_mad_port_list
);
2523 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2527 destroy_workqueue(port_priv
->wq
);
2529 destroy_mad_qp(&port_priv
->qp_info
[1]);
2531 destroy_mad_qp(&port_priv
->qp_info
[0]);
2533 ib_dereg_mr(port_priv
->mr
);
2535 ib_dealloc_pd(port_priv
->pd
);
2537 ib_destroy_cq(port_priv
->cq
);
2538 cleanup_recv_queue(&port_priv
->qp_info
[1]);
2539 cleanup_recv_queue(&port_priv
->qp_info
[0]);
2548 * If there are no classes using the port, free the port
2549 * resources (CQ, MR, PD, QP) and remove the port's info structure
2551 static int ib_mad_port_close(struct ib_device
*device
, int port_num
)
2553 struct ib_mad_port_private
*port_priv
;
2554 unsigned long flags
;
2556 spin_lock_irqsave(&ib_mad_port_list_lock
, flags
);
2557 port_priv
= __ib_get_mad_port(device
, port_num
);
2558 if (port_priv
== NULL
) {
2559 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2560 printk(KERN_ERR PFX
"Port %d not found\n", port_num
);
2563 list_del(&port_priv
->port_list
);
2564 spin_unlock_irqrestore(&ib_mad_port_list_lock
, flags
);
2566 /* Stop processing completions. */
2567 flush_workqueue(port_priv
->wq
);
2568 destroy_workqueue(port_priv
->wq
);
2569 destroy_mad_qp(&port_priv
->qp_info
[1]);
2570 destroy_mad_qp(&port_priv
->qp_info
[0]);
2571 ib_dereg_mr(port_priv
->mr
);
2572 ib_dealloc_pd(port_priv
->pd
);
2573 ib_destroy_cq(port_priv
->cq
);
2574 cleanup_recv_queue(&port_priv
->qp_info
[1]);
2575 cleanup_recv_queue(&port_priv
->qp_info
[0]);
2576 /* XXX: Handle deallocation of MAD registration tables */
2583 static void ib_mad_init_device(struct ib_device
*device
)
2585 int ret
, num_ports
, cur_port
, i
, ret2
;
2587 if (device
->node_type
== IB_NODE_SWITCH
) {
2591 num_ports
= device
->phys_port_cnt
;
2594 for (i
= 0; i
< num_ports
; i
++, cur_port
++) {
2595 ret
= ib_mad_port_open(device
, cur_port
);
2597 printk(KERN_ERR PFX
"Couldn't open %s port %d\n",
2598 device
->name
, cur_port
);
2599 goto error_device_open
;
2601 ret
= ib_agent_port_open(device
, cur_port
);
2603 printk(KERN_ERR PFX
"Couldn't open %s port %d "
2605 device
->name
, cur_port
);
2606 goto error_device_open
;
2610 goto error_device_query
;
2615 ret2
= ib_agent_port_close(device
, cur_port
);
2617 printk(KERN_ERR PFX
"Couldn't close %s port %d "
2619 device
->name
, cur_port
);
2621 ret2
= ib_mad_port_close(device
, cur_port
);
2623 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2624 device
->name
, cur_port
);
2633 static void ib_mad_remove_device(struct ib_device
*device
)
2635 int ret
= 0, i
, num_ports
, cur_port
, ret2
;
2637 if (device
->node_type
== IB_NODE_SWITCH
) {
2641 num_ports
= device
->phys_port_cnt
;
2644 for (i
= 0; i
< num_ports
; i
++, cur_port
++) {
2645 ret2
= ib_agent_port_close(device
, cur_port
);
2647 printk(KERN_ERR PFX
"Couldn't close %s port %d "
2649 device
->name
, cur_port
);
2653 ret2
= ib_mad_port_close(device
, cur_port
);
2655 printk(KERN_ERR PFX
"Couldn't close %s port %d\n",
2656 device
->name
, cur_port
);
2663 static struct ib_client mad_client
= {
2665 .add
= ib_mad_init_device
,
2666 .remove
= ib_mad_remove_device
2669 static int __init
ib_mad_init_module(void)
2673 spin_lock_init(&ib_mad_port_list_lock
);
2674 spin_lock_init(&ib_agent_port_list_lock
);
2676 ib_mad_cache
= kmem_cache_create("ib_mad",
2677 sizeof(struct ib_mad_private
),
2682 if (!ib_mad_cache
) {
2683 printk(KERN_ERR PFX
"Couldn't create ib_mad cache\n");
2688 INIT_LIST_HEAD(&ib_mad_port_list
);
2690 if (ib_register_client(&mad_client
)) {
2691 printk(KERN_ERR PFX
"Couldn't register ib_mad client\n");
2699 kmem_cache_destroy(ib_mad_cache
);
2704 static void __exit
ib_mad_cleanup_module(void)
2706 ib_unregister_client(&mad_client
);
2708 if (kmem_cache_destroy(ib_mad_cache
)) {
2709 printk(KERN_DEBUG PFX
"Failed to destroy ib_mad cache\n");
2713 module_init(ib_mad_init_module
);
2714 module_exit(ib_mad_cleanup_module
);