IB: remove in-kernel support for memory windows
[deliverable/linux.git] / drivers / infiniband / core / uverbs_cmd.c
1 /*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/file.h>
37 #include <linux/fs.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
40
41 #include <asm/uaccess.h>
42
43 #include "uverbs.h"
44 #include "core_priv.h"
45
46 struct uverbs_lock_class {
47 struct lock_class_key key;
48 char name[16];
49 };
50
51 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
52 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
53 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
54 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
55 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
56 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
57 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
58 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
59 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
60
61 /*
62 * The ib_uobject locking scheme is as follows:
63 *
64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65 * needs to be held during all idr write operations. When an object is
66 * looked up, a reference must be taken on the object's kref before
67 * dropping this lock. For read operations, the rcu_read_lock()
68 * and rcu_write_lock() but similarly the kref reference is grabbed
69 * before the rcu_read_unlock().
70 *
71 * - Each object also has an rwsem. This rwsem must be held for
72 * reading while an operation that uses the object is performed.
73 * For example, while registering an MR, the associated PD's
74 * uobject.mutex must be held for reading. The rwsem must be held
75 * for writing while initializing or destroying an object.
76 *
77 * - In addition, each object has a "live" flag. If this flag is not
78 * set, then lookups of the object will fail even if it is found in
79 * the idr. This handles a reader that blocks and does not acquire
80 * the rwsem until after the object is destroyed. The destroy
81 * operation will set the live flag to 0 and then drop the rwsem;
82 * this will allow the reader to acquire the rwsem, see that the
83 * live flag is 0, and then drop the rwsem and its reference to
84 * object. The underlying storage will not be freed until the last
85 * reference to the object is dropped.
86 */
87
88 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
89 struct ib_ucontext *context, struct uverbs_lock_class *c)
90 {
91 uobj->user_handle = user_handle;
92 uobj->context = context;
93 kref_init(&uobj->ref);
94 init_rwsem(&uobj->mutex);
95 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
96 uobj->live = 0;
97 }
98
99 static void release_uobj(struct kref *kref)
100 {
101 kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu);
102 }
103
104 static void put_uobj(struct ib_uobject *uobj)
105 {
106 kref_put(&uobj->ref, release_uobj);
107 }
108
109 static void put_uobj_read(struct ib_uobject *uobj)
110 {
111 up_read(&uobj->mutex);
112 put_uobj(uobj);
113 }
114
115 static void put_uobj_write(struct ib_uobject *uobj)
116 {
117 up_write(&uobj->mutex);
118 put_uobj(uobj);
119 }
120
121 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
122 {
123 int ret;
124
125 idr_preload(GFP_KERNEL);
126 spin_lock(&ib_uverbs_idr_lock);
127
128 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
129 if (ret >= 0)
130 uobj->id = ret;
131
132 spin_unlock(&ib_uverbs_idr_lock);
133 idr_preload_end();
134
135 return ret < 0 ? ret : 0;
136 }
137
138 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
139 {
140 spin_lock(&ib_uverbs_idr_lock);
141 idr_remove(idr, uobj->id);
142 spin_unlock(&ib_uverbs_idr_lock);
143 }
144
145 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
146 struct ib_ucontext *context)
147 {
148 struct ib_uobject *uobj;
149
150 rcu_read_lock();
151 uobj = idr_find(idr, id);
152 if (uobj) {
153 if (uobj->context == context)
154 kref_get(&uobj->ref);
155 else
156 uobj = NULL;
157 }
158 rcu_read_unlock();
159
160 return uobj;
161 }
162
163 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
164 struct ib_ucontext *context, int nested)
165 {
166 struct ib_uobject *uobj;
167
168 uobj = __idr_get_uobj(idr, id, context);
169 if (!uobj)
170 return NULL;
171
172 if (nested)
173 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
174 else
175 down_read(&uobj->mutex);
176 if (!uobj->live) {
177 put_uobj_read(uobj);
178 return NULL;
179 }
180
181 return uobj;
182 }
183
184 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
185 struct ib_ucontext *context)
186 {
187 struct ib_uobject *uobj;
188
189 uobj = __idr_get_uobj(idr, id, context);
190 if (!uobj)
191 return NULL;
192
193 down_write(&uobj->mutex);
194 if (!uobj->live) {
195 put_uobj_write(uobj);
196 return NULL;
197 }
198
199 return uobj;
200 }
201
202 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
203 int nested)
204 {
205 struct ib_uobject *uobj;
206
207 uobj = idr_read_uobj(idr, id, context, nested);
208 return uobj ? uobj->object : NULL;
209 }
210
211 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
212 {
213 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
214 }
215
216 static void put_pd_read(struct ib_pd *pd)
217 {
218 put_uobj_read(pd->uobject);
219 }
220
221 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
222 {
223 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
224 }
225
226 static void put_cq_read(struct ib_cq *cq)
227 {
228 put_uobj_read(cq->uobject);
229 }
230
231 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
232 {
233 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
234 }
235
236 static void put_ah_read(struct ib_ah *ah)
237 {
238 put_uobj_read(ah->uobject);
239 }
240
241 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
242 {
243 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
244 }
245
246 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
247 {
248 struct ib_uobject *uobj;
249
250 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
251 return uobj ? uobj->object : NULL;
252 }
253
254 static void put_qp_read(struct ib_qp *qp)
255 {
256 put_uobj_read(qp->uobject);
257 }
258
259 static void put_qp_write(struct ib_qp *qp)
260 {
261 put_uobj_write(qp->uobject);
262 }
263
264 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
265 {
266 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
267 }
268
269 static void put_srq_read(struct ib_srq *srq)
270 {
271 put_uobj_read(srq->uobject);
272 }
273
274 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
275 struct ib_uobject **uobj)
276 {
277 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
278 return *uobj ? (*uobj)->object : NULL;
279 }
280
281 static void put_xrcd_read(struct ib_uobject *uobj)
282 {
283 put_uobj_read(uobj);
284 }
285
286 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
287 struct ib_device *ib_dev,
288 const char __user *buf,
289 int in_len, int out_len)
290 {
291 struct ib_uverbs_get_context cmd;
292 struct ib_uverbs_get_context_resp resp;
293 struct ib_udata udata;
294 struct ib_ucontext *ucontext;
295 struct file *filp;
296 int ret;
297
298 if (out_len < sizeof resp)
299 return -ENOSPC;
300
301 if (copy_from_user(&cmd, buf, sizeof cmd))
302 return -EFAULT;
303
304 mutex_lock(&file->mutex);
305
306 if (file->ucontext) {
307 ret = -EINVAL;
308 goto err;
309 }
310
311 INIT_UDATA(&udata, buf + sizeof cmd,
312 (unsigned long) cmd.response + sizeof resp,
313 in_len - sizeof cmd, out_len - sizeof resp);
314
315 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
316 if (IS_ERR(ucontext)) {
317 ret = PTR_ERR(ucontext);
318 goto err;
319 }
320
321 ucontext->device = ib_dev;
322 INIT_LIST_HEAD(&ucontext->pd_list);
323 INIT_LIST_HEAD(&ucontext->mr_list);
324 INIT_LIST_HEAD(&ucontext->mw_list);
325 INIT_LIST_HEAD(&ucontext->cq_list);
326 INIT_LIST_HEAD(&ucontext->qp_list);
327 INIT_LIST_HEAD(&ucontext->srq_list);
328 INIT_LIST_HEAD(&ucontext->ah_list);
329 INIT_LIST_HEAD(&ucontext->xrcd_list);
330 INIT_LIST_HEAD(&ucontext->rule_list);
331 rcu_read_lock();
332 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
333 rcu_read_unlock();
334 ucontext->closing = 0;
335
336 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
337 ucontext->umem_tree = RB_ROOT;
338 init_rwsem(&ucontext->umem_rwsem);
339 ucontext->odp_mrs_count = 0;
340 INIT_LIST_HEAD(&ucontext->no_private_counters);
341
342 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
343 ucontext->invalidate_range = NULL;
344
345 #endif
346
347 resp.num_comp_vectors = file->device->num_comp_vectors;
348
349 ret = get_unused_fd_flags(O_CLOEXEC);
350 if (ret < 0)
351 goto err_free;
352 resp.async_fd = ret;
353
354 filp = ib_uverbs_alloc_event_file(file, ib_dev, 1);
355 if (IS_ERR(filp)) {
356 ret = PTR_ERR(filp);
357 goto err_fd;
358 }
359
360 if (copy_to_user((void __user *) (unsigned long) cmd.response,
361 &resp, sizeof resp)) {
362 ret = -EFAULT;
363 goto err_file;
364 }
365
366 file->ucontext = ucontext;
367
368 fd_install(resp.async_fd, filp);
369
370 mutex_unlock(&file->mutex);
371
372 return in_len;
373
374 err_file:
375 ib_uverbs_free_async_event_file(file);
376 fput(filp);
377
378 err_fd:
379 put_unused_fd(resp.async_fd);
380
381 err_free:
382 put_pid(ucontext->tgid);
383 ib_dev->dealloc_ucontext(ucontext);
384
385 err:
386 mutex_unlock(&file->mutex);
387 return ret;
388 }
389
390 static void copy_query_dev_fields(struct ib_uverbs_file *file,
391 struct ib_device *ib_dev,
392 struct ib_uverbs_query_device_resp *resp,
393 struct ib_device_attr *attr)
394 {
395 resp->fw_ver = attr->fw_ver;
396 resp->node_guid = ib_dev->node_guid;
397 resp->sys_image_guid = attr->sys_image_guid;
398 resp->max_mr_size = attr->max_mr_size;
399 resp->page_size_cap = attr->page_size_cap;
400 resp->vendor_id = attr->vendor_id;
401 resp->vendor_part_id = attr->vendor_part_id;
402 resp->hw_ver = attr->hw_ver;
403 resp->max_qp = attr->max_qp;
404 resp->max_qp_wr = attr->max_qp_wr;
405 resp->device_cap_flags = attr->device_cap_flags;
406 resp->max_sge = attr->max_sge;
407 resp->max_sge_rd = attr->max_sge_rd;
408 resp->max_cq = attr->max_cq;
409 resp->max_cqe = attr->max_cqe;
410 resp->max_mr = attr->max_mr;
411 resp->max_pd = attr->max_pd;
412 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
413 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
414 resp->max_res_rd_atom = attr->max_res_rd_atom;
415 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
416 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
417 resp->atomic_cap = attr->atomic_cap;
418 resp->max_ee = attr->max_ee;
419 resp->max_rdd = attr->max_rdd;
420 resp->max_mw = attr->max_mw;
421 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
422 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
423 resp->max_mcast_grp = attr->max_mcast_grp;
424 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
425 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
426 resp->max_ah = attr->max_ah;
427 resp->max_fmr = attr->max_fmr;
428 resp->max_map_per_fmr = attr->max_map_per_fmr;
429 resp->max_srq = attr->max_srq;
430 resp->max_srq_wr = attr->max_srq_wr;
431 resp->max_srq_sge = attr->max_srq_sge;
432 resp->max_pkeys = attr->max_pkeys;
433 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
434 resp->phys_port_cnt = ib_dev->phys_port_cnt;
435 }
436
437 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
438 struct ib_device *ib_dev,
439 const char __user *buf,
440 int in_len, int out_len)
441 {
442 struct ib_uverbs_query_device cmd;
443 struct ib_uverbs_query_device_resp resp;
444
445 if (out_len < sizeof resp)
446 return -ENOSPC;
447
448 if (copy_from_user(&cmd, buf, sizeof cmd))
449 return -EFAULT;
450
451 memset(&resp, 0, sizeof resp);
452 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
453
454 if (copy_to_user((void __user *) (unsigned long) cmd.response,
455 &resp, sizeof resp))
456 return -EFAULT;
457
458 return in_len;
459 }
460
461 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
462 struct ib_device *ib_dev,
463 const char __user *buf,
464 int in_len, int out_len)
465 {
466 struct ib_uverbs_query_port cmd;
467 struct ib_uverbs_query_port_resp resp;
468 struct ib_port_attr attr;
469 int ret;
470
471 if (out_len < sizeof resp)
472 return -ENOSPC;
473
474 if (copy_from_user(&cmd, buf, sizeof cmd))
475 return -EFAULT;
476
477 ret = ib_query_port(ib_dev, cmd.port_num, &attr);
478 if (ret)
479 return ret;
480
481 memset(&resp, 0, sizeof resp);
482
483 resp.state = attr.state;
484 resp.max_mtu = attr.max_mtu;
485 resp.active_mtu = attr.active_mtu;
486 resp.gid_tbl_len = attr.gid_tbl_len;
487 resp.port_cap_flags = attr.port_cap_flags;
488 resp.max_msg_sz = attr.max_msg_sz;
489 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
490 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
491 resp.pkey_tbl_len = attr.pkey_tbl_len;
492 resp.lid = attr.lid;
493 resp.sm_lid = attr.sm_lid;
494 resp.lmc = attr.lmc;
495 resp.max_vl_num = attr.max_vl_num;
496 resp.sm_sl = attr.sm_sl;
497 resp.subnet_timeout = attr.subnet_timeout;
498 resp.init_type_reply = attr.init_type_reply;
499 resp.active_width = attr.active_width;
500 resp.active_speed = attr.active_speed;
501 resp.phys_state = attr.phys_state;
502 resp.link_layer = rdma_port_get_link_layer(ib_dev,
503 cmd.port_num);
504
505 if (copy_to_user((void __user *) (unsigned long) cmd.response,
506 &resp, sizeof resp))
507 return -EFAULT;
508
509 return in_len;
510 }
511
512 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
513 struct ib_device *ib_dev,
514 const char __user *buf,
515 int in_len, int out_len)
516 {
517 struct ib_uverbs_alloc_pd cmd;
518 struct ib_uverbs_alloc_pd_resp resp;
519 struct ib_udata udata;
520 struct ib_uobject *uobj;
521 struct ib_pd *pd;
522 int ret;
523
524 if (out_len < sizeof resp)
525 return -ENOSPC;
526
527 if (copy_from_user(&cmd, buf, sizeof cmd))
528 return -EFAULT;
529
530 INIT_UDATA(&udata, buf + sizeof cmd,
531 (unsigned long) cmd.response + sizeof resp,
532 in_len - sizeof cmd, out_len - sizeof resp);
533
534 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
535 if (!uobj)
536 return -ENOMEM;
537
538 init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
539 down_write(&uobj->mutex);
540
541 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
542 if (IS_ERR(pd)) {
543 ret = PTR_ERR(pd);
544 goto err;
545 }
546
547 pd->device = ib_dev;
548 pd->uobject = uobj;
549 pd->local_mr = NULL;
550 atomic_set(&pd->usecnt, 0);
551
552 uobj->object = pd;
553 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
554 if (ret)
555 goto err_idr;
556
557 memset(&resp, 0, sizeof resp);
558 resp.pd_handle = uobj->id;
559
560 if (copy_to_user((void __user *) (unsigned long) cmd.response,
561 &resp, sizeof resp)) {
562 ret = -EFAULT;
563 goto err_copy;
564 }
565
566 mutex_lock(&file->mutex);
567 list_add_tail(&uobj->list, &file->ucontext->pd_list);
568 mutex_unlock(&file->mutex);
569
570 uobj->live = 1;
571
572 up_write(&uobj->mutex);
573
574 return in_len;
575
576 err_copy:
577 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
578
579 err_idr:
580 ib_dealloc_pd(pd);
581
582 err:
583 put_uobj_write(uobj);
584 return ret;
585 }
586
587 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
588 struct ib_device *ib_dev,
589 const char __user *buf,
590 int in_len, int out_len)
591 {
592 struct ib_uverbs_dealloc_pd cmd;
593 struct ib_uobject *uobj;
594 struct ib_pd *pd;
595 int ret;
596
597 if (copy_from_user(&cmd, buf, sizeof cmd))
598 return -EFAULT;
599
600 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
601 if (!uobj)
602 return -EINVAL;
603 pd = uobj->object;
604
605 if (atomic_read(&pd->usecnt)) {
606 ret = -EBUSY;
607 goto err_put;
608 }
609
610 ret = pd->device->dealloc_pd(uobj->object);
611 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
612 if (ret)
613 goto err_put;
614
615 uobj->live = 0;
616 put_uobj_write(uobj);
617
618 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
619
620 mutex_lock(&file->mutex);
621 list_del(&uobj->list);
622 mutex_unlock(&file->mutex);
623
624 put_uobj(uobj);
625
626 return in_len;
627
628 err_put:
629 put_uobj_write(uobj);
630 return ret;
631 }
632
633 struct xrcd_table_entry {
634 struct rb_node node;
635 struct ib_xrcd *xrcd;
636 struct inode *inode;
637 };
638
639 static int xrcd_table_insert(struct ib_uverbs_device *dev,
640 struct inode *inode,
641 struct ib_xrcd *xrcd)
642 {
643 struct xrcd_table_entry *entry, *scan;
644 struct rb_node **p = &dev->xrcd_tree.rb_node;
645 struct rb_node *parent = NULL;
646
647 entry = kmalloc(sizeof *entry, GFP_KERNEL);
648 if (!entry)
649 return -ENOMEM;
650
651 entry->xrcd = xrcd;
652 entry->inode = inode;
653
654 while (*p) {
655 parent = *p;
656 scan = rb_entry(parent, struct xrcd_table_entry, node);
657
658 if (inode < scan->inode) {
659 p = &(*p)->rb_left;
660 } else if (inode > scan->inode) {
661 p = &(*p)->rb_right;
662 } else {
663 kfree(entry);
664 return -EEXIST;
665 }
666 }
667
668 rb_link_node(&entry->node, parent, p);
669 rb_insert_color(&entry->node, &dev->xrcd_tree);
670 igrab(inode);
671 return 0;
672 }
673
674 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
675 struct inode *inode)
676 {
677 struct xrcd_table_entry *entry;
678 struct rb_node *p = dev->xrcd_tree.rb_node;
679
680 while (p) {
681 entry = rb_entry(p, struct xrcd_table_entry, node);
682
683 if (inode < entry->inode)
684 p = p->rb_left;
685 else if (inode > entry->inode)
686 p = p->rb_right;
687 else
688 return entry;
689 }
690
691 return NULL;
692 }
693
694 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
695 {
696 struct xrcd_table_entry *entry;
697
698 entry = xrcd_table_search(dev, inode);
699 if (!entry)
700 return NULL;
701
702 return entry->xrcd;
703 }
704
705 static void xrcd_table_delete(struct ib_uverbs_device *dev,
706 struct inode *inode)
707 {
708 struct xrcd_table_entry *entry;
709
710 entry = xrcd_table_search(dev, inode);
711 if (entry) {
712 iput(inode);
713 rb_erase(&entry->node, &dev->xrcd_tree);
714 kfree(entry);
715 }
716 }
717
718 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
719 struct ib_device *ib_dev,
720 const char __user *buf, int in_len,
721 int out_len)
722 {
723 struct ib_uverbs_open_xrcd cmd;
724 struct ib_uverbs_open_xrcd_resp resp;
725 struct ib_udata udata;
726 struct ib_uxrcd_object *obj;
727 struct ib_xrcd *xrcd = NULL;
728 struct fd f = {NULL, 0};
729 struct inode *inode = NULL;
730 int ret = 0;
731 int new_xrcd = 0;
732
733 if (out_len < sizeof resp)
734 return -ENOSPC;
735
736 if (copy_from_user(&cmd, buf, sizeof cmd))
737 return -EFAULT;
738
739 INIT_UDATA(&udata, buf + sizeof cmd,
740 (unsigned long) cmd.response + sizeof resp,
741 in_len - sizeof cmd, out_len - sizeof resp);
742
743 mutex_lock(&file->device->xrcd_tree_mutex);
744
745 if (cmd.fd != -1) {
746 /* search for file descriptor */
747 f = fdget(cmd.fd);
748 if (!f.file) {
749 ret = -EBADF;
750 goto err_tree_mutex_unlock;
751 }
752
753 inode = file_inode(f.file);
754 xrcd = find_xrcd(file->device, inode);
755 if (!xrcd && !(cmd.oflags & O_CREAT)) {
756 /* no file descriptor. Need CREATE flag */
757 ret = -EAGAIN;
758 goto err_tree_mutex_unlock;
759 }
760
761 if (xrcd && cmd.oflags & O_EXCL) {
762 ret = -EINVAL;
763 goto err_tree_mutex_unlock;
764 }
765 }
766
767 obj = kmalloc(sizeof *obj, GFP_KERNEL);
768 if (!obj) {
769 ret = -ENOMEM;
770 goto err_tree_mutex_unlock;
771 }
772
773 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
774
775 down_write(&obj->uobject.mutex);
776
777 if (!xrcd) {
778 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
779 if (IS_ERR(xrcd)) {
780 ret = PTR_ERR(xrcd);
781 goto err;
782 }
783
784 xrcd->inode = inode;
785 xrcd->device = ib_dev;
786 atomic_set(&xrcd->usecnt, 0);
787 mutex_init(&xrcd->tgt_qp_mutex);
788 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
789 new_xrcd = 1;
790 }
791
792 atomic_set(&obj->refcnt, 0);
793 obj->uobject.object = xrcd;
794 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
795 if (ret)
796 goto err_idr;
797
798 memset(&resp, 0, sizeof resp);
799 resp.xrcd_handle = obj->uobject.id;
800
801 if (inode) {
802 if (new_xrcd) {
803 /* create new inode/xrcd table entry */
804 ret = xrcd_table_insert(file->device, inode, xrcd);
805 if (ret)
806 goto err_insert_xrcd;
807 }
808 atomic_inc(&xrcd->usecnt);
809 }
810
811 if (copy_to_user((void __user *) (unsigned long) cmd.response,
812 &resp, sizeof resp)) {
813 ret = -EFAULT;
814 goto err_copy;
815 }
816
817 if (f.file)
818 fdput(f);
819
820 mutex_lock(&file->mutex);
821 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
822 mutex_unlock(&file->mutex);
823
824 obj->uobject.live = 1;
825 up_write(&obj->uobject.mutex);
826
827 mutex_unlock(&file->device->xrcd_tree_mutex);
828 return in_len;
829
830 err_copy:
831 if (inode) {
832 if (new_xrcd)
833 xrcd_table_delete(file->device, inode);
834 atomic_dec(&xrcd->usecnt);
835 }
836
837 err_insert_xrcd:
838 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
839
840 err_idr:
841 ib_dealloc_xrcd(xrcd);
842
843 err:
844 put_uobj_write(&obj->uobject);
845
846 err_tree_mutex_unlock:
847 if (f.file)
848 fdput(f);
849
850 mutex_unlock(&file->device->xrcd_tree_mutex);
851
852 return ret;
853 }
854
855 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
856 struct ib_device *ib_dev,
857 const char __user *buf, int in_len,
858 int out_len)
859 {
860 struct ib_uverbs_close_xrcd cmd;
861 struct ib_uobject *uobj;
862 struct ib_xrcd *xrcd = NULL;
863 struct inode *inode = NULL;
864 struct ib_uxrcd_object *obj;
865 int live;
866 int ret = 0;
867
868 if (copy_from_user(&cmd, buf, sizeof cmd))
869 return -EFAULT;
870
871 mutex_lock(&file->device->xrcd_tree_mutex);
872 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
873 if (!uobj) {
874 ret = -EINVAL;
875 goto out;
876 }
877
878 xrcd = uobj->object;
879 inode = xrcd->inode;
880 obj = container_of(uobj, struct ib_uxrcd_object, uobject);
881 if (atomic_read(&obj->refcnt)) {
882 put_uobj_write(uobj);
883 ret = -EBUSY;
884 goto out;
885 }
886
887 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
888 ret = ib_dealloc_xrcd(uobj->object);
889 if (!ret)
890 uobj->live = 0;
891 }
892
893 live = uobj->live;
894 if (inode && ret)
895 atomic_inc(&xrcd->usecnt);
896
897 put_uobj_write(uobj);
898
899 if (ret)
900 goto out;
901
902 if (inode && !live)
903 xrcd_table_delete(file->device, inode);
904
905 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
906 mutex_lock(&file->mutex);
907 list_del(&uobj->list);
908 mutex_unlock(&file->mutex);
909
910 put_uobj(uobj);
911 ret = in_len;
912
913 out:
914 mutex_unlock(&file->device->xrcd_tree_mutex);
915 return ret;
916 }
917
918 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
919 struct ib_xrcd *xrcd)
920 {
921 struct inode *inode;
922
923 inode = xrcd->inode;
924 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
925 return;
926
927 ib_dealloc_xrcd(xrcd);
928
929 if (inode)
930 xrcd_table_delete(dev, inode);
931 }
932
933 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
934 struct ib_device *ib_dev,
935 const char __user *buf, int in_len,
936 int out_len)
937 {
938 struct ib_uverbs_reg_mr cmd;
939 struct ib_uverbs_reg_mr_resp resp;
940 struct ib_udata udata;
941 struct ib_uobject *uobj;
942 struct ib_pd *pd;
943 struct ib_mr *mr;
944 int ret;
945
946 if (out_len < sizeof resp)
947 return -ENOSPC;
948
949 if (copy_from_user(&cmd, buf, sizeof cmd))
950 return -EFAULT;
951
952 INIT_UDATA(&udata, buf + sizeof cmd,
953 (unsigned long) cmd.response + sizeof resp,
954 in_len - sizeof cmd, out_len - sizeof resp);
955
956 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
957 return -EINVAL;
958
959 ret = ib_check_mr_access(cmd.access_flags);
960 if (ret)
961 return ret;
962
963 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
964 if (!uobj)
965 return -ENOMEM;
966
967 init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
968 down_write(&uobj->mutex);
969
970 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
971 if (!pd) {
972 ret = -EINVAL;
973 goto err_free;
974 }
975
976 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
977 if (!(pd->device->attrs.device_cap_flags &
978 IB_DEVICE_ON_DEMAND_PAGING)) {
979 pr_debug("ODP support not available\n");
980 ret = -EINVAL;
981 goto err_put;
982 }
983 }
984
985 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
986 cmd.access_flags, &udata);
987 if (IS_ERR(mr)) {
988 ret = PTR_ERR(mr);
989 goto err_put;
990 }
991
992 mr->device = pd->device;
993 mr->pd = pd;
994 mr->uobject = uobj;
995 atomic_inc(&pd->usecnt);
996 atomic_set(&mr->usecnt, 0);
997
998 uobj->object = mr;
999 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
1000 if (ret)
1001 goto err_unreg;
1002
1003 memset(&resp, 0, sizeof resp);
1004 resp.lkey = mr->lkey;
1005 resp.rkey = mr->rkey;
1006 resp.mr_handle = uobj->id;
1007
1008 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1009 &resp, sizeof resp)) {
1010 ret = -EFAULT;
1011 goto err_copy;
1012 }
1013
1014 put_pd_read(pd);
1015
1016 mutex_lock(&file->mutex);
1017 list_add_tail(&uobj->list, &file->ucontext->mr_list);
1018 mutex_unlock(&file->mutex);
1019
1020 uobj->live = 1;
1021
1022 up_write(&uobj->mutex);
1023
1024 return in_len;
1025
1026 err_copy:
1027 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1028
1029 err_unreg:
1030 ib_dereg_mr(mr);
1031
1032 err_put:
1033 put_pd_read(pd);
1034
1035 err_free:
1036 put_uobj_write(uobj);
1037 return ret;
1038 }
1039
1040 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
1041 struct ib_device *ib_dev,
1042 const char __user *buf, int in_len,
1043 int out_len)
1044 {
1045 struct ib_uverbs_rereg_mr cmd;
1046 struct ib_uverbs_rereg_mr_resp resp;
1047 struct ib_udata udata;
1048 struct ib_pd *pd = NULL;
1049 struct ib_mr *mr;
1050 struct ib_pd *old_pd;
1051 int ret;
1052 struct ib_uobject *uobj;
1053
1054 if (out_len < sizeof(resp))
1055 return -ENOSPC;
1056
1057 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1058 return -EFAULT;
1059
1060 INIT_UDATA(&udata, buf + sizeof(cmd),
1061 (unsigned long) cmd.response + sizeof(resp),
1062 in_len - sizeof(cmd), out_len - sizeof(resp));
1063
1064 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
1065 return -EINVAL;
1066
1067 if ((cmd.flags & IB_MR_REREG_TRANS) &&
1068 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
1069 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
1070 return -EINVAL;
1071
1072 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
1073 file->ucontext);
1074
1075 if (!uobj)
1076 return -EINVAL;
1077
1078 mr = uobj->object;
1079
1080 if (cmd.flags & IB_MR_REREG_ACCESS) {
1081 ret = ib_check_mr_access(cmd.access_flags);
1082 if (ret)
1083 goto put_uobjs;
1084 }
1085
1086 if (cmd.flags & IB_MR_REREG_PD) {
1087 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1088 if (!pd) {
1089 ret = -EINVAL;
1090 goto put_uobjs;
1091 }
1092 }
1093
1094 if (atomic_read(&mr->usecnt)) {
1095 ret = -EBUSY;
1096 goto put_uobj_pd;
1097 }
1098
1099 old_pd = mr->pd;
1100 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
1101 cmd.length, cmd.hca_va,
1102 cmd.access_flags, pd, &udata);
1103 if (!ret) {
1104 if (cmd.flags & IB_MR_REREG_PD) {
1105 atomic_inc(&pd->usecnt);
1106 mr->pd = pd;
1107 atomic_dec(&old_pd->usecnt);
1108 }
1109 } else {
1110 goto put_uobj_pd;
1111 }
1112
1113 memset(&resp, 0, sizeof(resp));
1114 resp.lkey = mr->lkey;
1115 resp.rkey = mr->rkey;
1116
1117 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1118 &resp, sizeof(resp)))
1119 ret = -EFAULT;
1120 else
1121 ret = in_len;
1122
1123 put_uobj_pd:
1124 if (cmd.flags & IB_MR_REREG_PD)
1125 put_pd_read(pd);
1126
1127 put_uobjs:
1128
1129 put_uobj_write(mr->uobject);
1130
1131 return ret;
1132 }
1133
1134 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1135 struct ib_device *ib_dev,
1136 const char __user *buf, int in_len,
1137 int out_len)
1138 {
1139 struct ib_uverbs_dereg_mr cmd;
1140 struct ib_mr *mr;
1141 struct ib_uobject *uobj;
1142 int ret = -EINVAL;
1143
1144 if (copy_from_user(&cmd, buf, sizeof cmd))
1145 return -EFAULT;
1146
1147 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1148 if (!uobj)
1149 return -EINVAL;
1150
1151 mr = uobj->object;
1152
1153 ret = ib_dereg_mr(mr);
1154 if (!ret)
1155 uobj->live = 0;
1156
1157 put_uobj_write(uobj);
1158
1159 if (ret)
1160 return ret;
1161
1162 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1163
1164 mutex_lock(&file->mutex);
1165 list_del(&uobj->list);
1166 mutex_unlock(&file->mutex);
1167
1168 put_uobj(uobj);
1169
1170 return in_len;
1171 }
1172
1173 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1174 struct ib_device *ib_dev,
1175 const char __user *buf, int in_len,
1176 int out_len)
1177 {
1178 struct ib_uverbs_alloc_mw cmd;
1179 struct ib_uverbs_alloc_mw_resp resp;
1180 struct ib_uobject *uobj;
1181 struct ib_pd *pd;
1182 struct ib_mw *mw;
1183 int ret;
1184
1185 if (out_len < sizeof(resp))
1186 return -ENOSPC;
1187
1188 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1189 return -EFAULT;
1190
1191 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1192 if (!uobj)
1193 return -ENOMEM;
1194
1195 init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1196 down_write(&uobj->mutex);
1197
1198 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1199 if (!pd) {
1200 ret = -EINVAL;
1201 goto err_free;
1202 }
1203
1204 mw = pd->device->alloc_mw(pd, cmd.mw_type);
1205 if (IS_ERR(mw)) {
1206 ret = PTR_ERR(mw);
1207 goto err_put;
1208 }
1209
1210 mw->device = pd->device;
1211 mw->pd = pd;
1212 mw->uobject = uobj;
1213 atomic_inc(&pd->usecnt);
1214
1215 uobj->object = mw;
1216 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1217 if (ret)
1218 goto err_unalloc;
1219
1220 memset(&resp, 0, sizeof(resp));
1221 resp.rkey = mw->rkey;
1222 resp.mw_handle = uobj->id;
1223
1224 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1225 &resp, sizeof(resp))) {
1226 ret = -EFAULT;
1227 goto err_copy;
1228 }
1229
1230 put_pd_read(pd);
1231
1232 mutex_lock(&file->mutex);
1233 list_add_tail(&uobj->list, &file->ucontext->mw_list);
1234 mutex_unlock(&file->mutex);
1235
1236 uobj->live = 1;
1237
1238 up_write(&uobj->mutex);
1239
1240 return in_len;
1241
1242 err_copy:
1243 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1244
1245 err_unalloc:
1246 uverbs_dealloc_mw(mw);
1247
1248 err_put:
1249 put_pd_read(pd);
1250
1251 err_free:
1252 put_uobj_write(uobj);
1253 return ret;
1254 }
1255
1256 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1257 struct ib_device *ib_dev,
1258 const char __user *buf, int in_len,
1259 int out_len)
1260 {
1261 struct ib_uverbs_dealloc_mw cmd;
1262 struct ib_mw *mw;
1263 struct ib_uobject *uobj;
1264 int ret = -EINVAL;
1265
1266 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1267 return -EFAULT;
1268
1269 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1270 if (!uobj)
1271 return -EINVAL;
1272
1273 mw = uobj->object;
1274
1275 ret = uverbs_dealloc_mw(mw);
1276 if (!ret)
1277 uobj->live = 0;
1278
1279 put_uobj_write(uobj);
1280
1281 if (ret)
1282 return ret;
1283
1284 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1285
1286 mutex_lock(&file->mutex);
1287 list_del(&uobj->list);
1288 mutex_unlock(&file->mutex);
1289
1290 put_uobj(uobj);
1291
1292 return in_len;
1293 }
1294
1295 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1296 struct ib_device *ib_dev,
1297 const char __user *buf, int in_len,
1298 int out_len)
1299 {
1300 struct ib_uverbs_create_comp_channel cmd;
1301 struct ib_uverbs_create_comp_channel_resp resp;
1302 struct file *filp;
1303 int ret;
1304
1305 if (out_len < sizeof resp)
1306 return -ENOSPC;
1307
1308 if (copy_from_user(&cmd, buf, sizeof cmd))
1309 return -EFAULT;
1310
1311 ret = get_unused_fd_flags(O_CLOEXEC);
1312 if (ret < 0)
1313 return ret;
1314 resp.fd = ret;
1315
1316 filp = ib_uverbs_alloc_event_file(file, ib_dev, 0);
1317 if (IS_ERR(filp)) {
1318 put_unused_fd(resp.fd);
1319 return PTR_ERR(filp);
1320 }
1321
1322 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1323 &resp, sizeof resp)) {
1324 put_unused_fd(resp.fd);
1325 fput(filp);
1326 return -EFAULT;
1327 }
1328
1329 fd_install(resp.fd, filp);
1330 return in_len;
1331 }
1332
1333 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
1334 struct ib_device *ib_dev,
1335 struct ib_udata *ucore,
1336 struct ib_udata *uhw,
1337 struct ib_uverbs_ex_create_cq *cmd,
1338 size_t cmd_sz,
1339 int (*cb)(struct ib_uverbs_file *file,
1340 struct ib_ucq_object *obj,
1341 struct ib_uverbs_ex_create_cq_resp *resp,
1342 struct ib_udata *udata,
1343 void *context),
1344 void *context)
1345 {
1346 struct ib_ucq_object *obj;
1347 struct ib_uverbs_event_file *ev_file = NULL;
1348 struct ib_cq *cq;
1349 int ret;
1350 struct ib_uverbs_ex_create_cq_resp resp;
1351 struct ib_cq_init_attr attr = {};
1352
1353 if (cmd->comp_vector >= file->device->num_comp_vectors)
1354 return ERR_PTR(-EINVAL);
1355
1356 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1357 if (!obj)
1358 return ERR_PTR(-ENOMEM);
1359
1360 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class);
1361 down_write(&obj->uobject.mutex);
1362
1363 if (cmd->comp_channel >= 0) {
1364 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel);
1365 if (!ev_file) {
1366 ret = -EINVAL;
1367 goto err;
1368 }
1369 }
1370
1371 obj->uverbs_file = file;
1372 obj->comp_events_reported = 0;
1373 obj->async_events_reported = 0;
1374 INIT_LIST_HEAD(&obj->comp_list);
1375 INIT_LIST_HEAD(&obj->async_list);
1376
1377 attr.cqe = cmd->cqe;
1378 attr.comp_vector = cmd->comp_vector;
1379
1380 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1381 attr.flags = cmd->flags;
1382
1383 cq = ib_dev->create_cq(ib_dev, &attr,
1384 file->ucontext, uhw);
1385 if (IS_ERR(cq)) {
1386 ret = PTR_ERR(cq);
1387 goto err_file;
1388 }
1389
1390 cq->device = ib_dev;
1391 cq->uobject = &obj->uobject;
1392 cq->comp_handler = ib_uverbs_comp_handler;
1393 cq->event_handler = ib_uverbs_cq_event_handler;
1394 cq->cq_context = ev_file;
1395 atomic_set(&cq->usecnt, 0);
1396
1397 obj->uobject.object = cq;
1398 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1399 if (ret)
1400 goto err_free;
1401
1402 memset(&resp, 0, sizeof resp);
1403 resp.base.cq_handle = obj->uobject.id;
1404 resp.base.cqe = cq->cqe;
1405
1406 resp.response_length = offsetof(typeof(resp), response_length) +
1407 sizeof(resp.response_length);
1408
1409 ret = cb(file, obj, &resp, ucore, context);
1410 if (ret)
1411 goto err_cb;
1412
1413 mutex_lock(&file->mutex);
1414 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1415 mutex_unlock(&file->mutex);
1416
1417 obj->uobject.live = 1;
1418
1419 up_write(&obj->uobject.mutex);
1420
1421 return obj;
1422
1423 err_cb:
1424 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1425
1426 err_free:
1427 ib_destroy_cq(cq);
1428
1429 err_file:
1430 if (ev_file)
1431 ib_uverbs_release_ucq(file, ev_file, obj);
1432
1433 err:
1434 put_uobj_write(&obj->uobject);
1435
1436 return ERR_PTR(ret);
1437 }
1438
1439 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1440 struct ib_ucq_object *obj,
1441 struct ib_uverbs_ex_create_cq_resp *resp,
1442 struct ib_udata *ucore, void *context)
1443 {
1444 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1445 return -EFAULT;
1446
1447 return 0;
1448 }
1449
1450 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1451 struct ib_device *ib_dev,
1452 const char __user *buf, int in_len,
1453 int out_len)
1454 {
1455 struct ib_uverbs_create_cq cmd;
1456 struct ib_uverbs_ex_create_cq cmd_ex;
1457 struct ib_uverbs_create_cq_resp resp;
1458 struct ib_udata ucore;
1459 struct ib_udata uhw;
1460 struct ib_ucq_object *obj;
1461
1462 if (out_len < sizeof(resp))
1463 return -ENOSPC;
1464
1465 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1466 return -EFAULT;
1467
1468 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp));
1469
1470 INIT_UDATA(&uhw, buf + sizeof(cmd),
1471 (unsigned long)cmd.response + sizeof(resp),
1472 in_len - sizeof(cmd), out_len - sizeof(resp));
1473
1474 memset(&cmd_ex, 0, sizeof(cmd_ex));
1475 cmd_ex.user_handle = cmd.user_handle;
1476 cmd_ex.cqe = cmd.cqe;
1477 cmd_ex.comp_vector = cmd.comp_vector;
1478 cmd_ex.comp_channel = cmd.comp_channel;
1479
1480 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
1481 offsetof(typeof(cmd_ex), comp_channel) +
1482 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1483 NULL);
1484
1485 if (IS_ERR(obj))
1486 return PTR_ERR(obj);
1487
1488 return in_len;
1489 }
1490
1491 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1492 struct ib_ucq_object *obj,
1493 struct ib_uverbs_ex_create_cq_resp *resp,
1494 struct ib_udata *ucore, void *context)
1495 {
1496 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1497 return -EFAULT;
1498
1499 return 0;
1500 }
1501
1502 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
1503 struct ib_device *ib_dev,
1504 struct ib_udata *ucore,
1505 struct ib_udata *uhw)
1506 {
1507 struct ib_uverbs_ex_create_cq_resp resp;
1508 struct ib_uverbs_ex_create_cq cmd;
1509 struct ib_ucq_object *obj;
1510 int err;
1511
1512 if (ucore->inlen < sizeof(cmd))
1513 return -EINVAL;
1514
1515 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1516 if (err)
1517 return err;
1518
1519 if (cmd.comp_mask)
1520 return -EINVAL;
1521
1522 if (cmd.reserved)
1523 return -EINVAL;
1524
1525 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1526 sizeof(resp.response_length)))
1527 return -ENOSPC;
1528
1529 obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
1530 min(ucore->inlen, sizeof(cmd)),
1531 ib_uverbs_ex_create_cq_cb, NULL);
1532
1533 if (IS_ERR(obj))
1534 return PTR_ERR(obj);
1535
1536 return 0;
1537 }
1538
1539 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1540 struct ib_device *ib_dev,
1541 const char __user *buf, int in_len,
1542 int out_len)
1543 {
1544 struct ib_uverbs_resize_cq cmd;
1545 struct ib_uverbs_resize_cq_resp resp;
1546 struct ib_udata udata;
1547 struct ib_cq *cq;
1548 int ret = -EINVAL;
1549
1550 if (copy_from_user(&cmd, buf, sizeof cmd))
1551 return -EFAULT;
1552
1553 INIT_UDATA(&udata, buf + sizeof cmd,
1554 (unsigned long) cmd.response + sizeof resp,
1555 in_len - sizeof cmd, out_len - sizeof resp);
1556
1557 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1558 if (!cq)
1559 return -EINVAL;
1560
1561 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1562 if (ret)
1563 goto out;
1564
1565 resp.cqe = cq->cqe;
1566
1567 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1568 &resp, sizeof resp.cqe))
1569 ret = -EFAULT;
1570
1571 out:
1572 put_cq_read(cq);
1573
1574 return ret ? ret : in_len;
1575 }
1576
1577 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1578 {
1579 struct ib_uverbs_wc tmp;
1580
1581 tmp.wr_id = wc->wr_id;
1582 tmp.status = wc->status;
1583 tmp.opcode = wc->opcode;
1584 tmp.vendor_err = wc->vendor_err;
1585 tmp.byte_len = wc->byte_len;
1586 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1587 tmp.qp_num = wc->qp->qp_num;
1588 tmp.src_qp = wc->src_qp;
1589 tmp.wc_flags = wc->wc_flags;
1590 tmp.pkey_index = wc->pkey_index;
1591 tmp.slid = wc->slid;
1592 tmp.sl = wc->sl;
1593 tmp.dlid_path_bits = wc->dlid_path_bits;
1594 tmp.port_num = wc->port_num;
1595 tmp.reserved = 0;
1596
1597 if (copy_to_user(dest, &tmp, sizeof tmp))
1598 return -EFAULT;
1599
1600 return 0;
1601 }
1602
1603 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1604 struct ib_device *ib_dev,
1605 const char __user *buf, int in_len,
1606 int out_len)
1607 {
1608 struct ib_uverbs_poll_cq cmd;
1609 struct ib_uverbs_poll_cq_resp resp;
1610 u8 __user *header_ptr;
1611 u8 __user *data_ptr;
1612 struct ib_cq *cq;
1613 struct ib_wc wc;
1614 int ret;
1615
1616 if (copy_from_user(&cmd, buf, sizeof cmd))
1617 return -EFAULT;
1618
1619 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1620 if (!cq)
1621 return -EINVAL;
1622
1623 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1624 header_ptr = (void __user *)(unsigned long) cmd.response;
1625 data_ptr = header_ptr + sizeof resp;
1626
1627 memset(&resp, 0, sizeof resp);
1628 while (resp.count < cmd.ne) {
1629 ret = ib_poll_cq(cq, 1, &wc);
1630 if (ret < 0)
1631 goto out_put;
1632 if (!ret)
1633 break;
1634
1635 ret = copy_wc_to_user(data_ptr, &wc);
1636 if (ret)
1637 goto out_put;
1638
1639 data_ptr += sizeof(struct ib_uverbs_wc);
1640 ++resp.count;
1641 }
1642
1643 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1644 ret = -EFAULT;
1645 goto out_put;
1646 }
1647
1648 ret = in_len;
1649
1650 out_put:
1651 put_cq_read(cq);
1652 return ret;
1653 }
1654
1655 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1656 struct ib_device *ib_dev,
1657 const char __user *buf, int in_len,
1658 int out_len)
1659 {
1660 struct ib_uverbs_req_notify_cq cmd;
1661 struct ib_cq *cq;
1662
1663 if (copy_from_user(&cmd, buf, sizeof cmd))
1664 return -EFAULT;
1665
1666 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1667 if (!cq)
1668 return -EINVAL;
1669
1670 ib_req_notify_cq(cq, cmd.solicited_only ?
1671 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1672
1673 put_cq_read(cq);
1674
1675 return in_len;
1676 }
1677
1678 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1679 struct ib_device *ib_dev,
1680 const char __user *buf, int in_len,
1681 int out_len)
1682 {
1683 struct ib_uverbs_destroy_cq cmd;
1684 struct ib_uverbs_destroy_cq_resp resp;
1685 struct ib_uobject *uobj;
1686 struct ib_cq *cq;
1687 struct ib_ucq_object *obj;
1688 struct ib_uverbs_event_file *ev_file;
1689 int ret = -EINVAL;
1690
1691 if (copy_from_user(&cmd, buf, sizeof cmd))
1692 return -EFAULT;
1693
1694 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1695 if (!uobj)
1696 return -EINVAL;
1697 cq = uobj->object;
1698 ev_file = cq->cq_context;
1699 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1700
1701 ret = ib_destroy_cq(cq);
1702 if (!ret)
1703 uobj->live = 0;
1704
1705 put_uobj_write(uobj);
1706
1707 if (ret)
1708 return ret;
1709
1710 idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1711
1712 mutex_lock(&file->mutex);
1713 list_del(&uobj->list);
1714 mutex_unlock(&file->mutex);
1715
1716 ib_uverbs_release_ucq(file, ev_file, obj);
1717
1718 memset(&resp, 0, sizeof resp);
1719 resp.comp_events_reported = obj->comp_events_reported;
1720 resp.async_events_reported = obj->async_events_reported;
1721
1722 put_uobj(uobj);
1723
1724 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1725 &resp, sizeof resp))
1726 return -EFAULT;
1727
1728 return in_len;
1729 }
1730
1731 static int create_qp(struct ib_uverbs_file *file,
1732 struct ib_udata *ucore,
1733 struct ib_udata *uhw,
1734 struct ib_uverbs_ex_create_qp *cmd,
1735 size_t cmd_sz,
1736 int (*cb)(struct ib_uverbs_file *file,
1737 struct ib_uverbs_ex_create_qp_resp *resp,
1738 struct ib_udata *udata),
1739 void *context)
1740 {
1741 struct ib_uqp_object *obj;
1742 struct ib_device *device;
1743 struct ib_pd *pd = NULL;
1744 struct ib_xrcd *xrcd = NULL;
1745 struct ib_uobject *uninitialized_var(xrcd_uobj);
1746 struct ib_cq *scq = NULL, *rcq = NULL;
1747 struct ib_srq *srq = NULL;
1748 struct ib_qp *qp;
1749 char *buf;
1750 struct ib_qp_init_attr attr;
1751 struct ib_uverbs_ex_create_qp_resp resp;
1752 int ret;
1753
1754 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1755 return -EPERM;
1756
1757 obj = kzalloc(sizeof *obj, GFP_KERNEL);
1758 if (!obj)
1759 return -ENOMEM;
1760
1761 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext,
1762 &qp_lock_class);
1763 down_write(&obj->uevent.uobject.mutex);
1764
1765 if (cmd->qp_type == IB_QPT_XRC_TGT) {
1766 xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext,
1767 &xrcd_uobj);
1768 if (!xrcd) {
1769 ret = -EINVAL;
1770 goto err_put;
1771 }
1772 device = xrcd->device;
1773 } else {
1774 if (cmd->qp_type == IB_QPT_XRC_INI) {
1775 cmd->max_recv_wr = 0;
1776 cmd->max_recv_sge = 0;
1777 } else {
1778 if (cmd->is_srq) {
1779 srq = idr_read_srq(cmd->srq_handle,
1780 file->ucontext);
1781 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1782 ret = -EINVAL;
1783 goto err_put;
1784 }
1785 }
1786
1787 if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1788 rcq = idr_read_cq(cmd->recv_cq_handle,
1789 file->ucontext, 0);
1790 if (!rcq) {
1791 ret = -EINVAL;
1792 goto err_put;
1793 }
1794 }
1795 }
1796
1797 scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq);
1798 rcq = rcq ?: scq;
1799 pd = idr_read_pd(cmd->pd_handle, file->ucontext);
1800 if (!pd || !scq) {
1801 ret = -EINVAL;
1802 goto err_put;
1803 }
1804
1805 device = pd->device;
1806 }
1807
1808 attr.event_handler = ib_uverbs_qp_event_handler;
1809 attr.qp_context = file;
1810 attr.send_cq = scq;
1811 attr.recv_cq = rcq;
1812 attr.srq = srq;
1813 attr.xrcd = xrcd;
1814 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1815 IB_SIGNAL_REQ_WR;
1816 attr.qp_type = cmd->qp_type;
1817 attr.create_flags = 0;
1818
1819 attr.cap.max_send_wr = cmd->max_send_wr;
1820 attr.cap.max_recv_wr = cmd->max_recv_wr;
1821 attr.cap.max_send_sge = cmd->max_send_sge;
1822 attr.cap.max_recv_sge = cmd->max_recv_sge;
1823 attr.cap.max_inline_data = cmd->max_inline_data;
1824
1825 obj->uevent.events_reported = 0;
1826 INIT_LIST_HEAD(&obj->uevent.event_list);
1827 INIT_LIST_HEAD(&obj->mcast_list);
1828
1829 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) +
1830 sizeof(cmd->create_flags))
1831 attr.create_flags = cmd->create_flags;
1832
1833 if (attr.create_flags & ~IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
1834 ret = -EINVAL;
1835 goto err_put;
1836 }
1837
1838 buf = (void *)cmd + sizeof(*cmd);
1839 if (cmd_sz > sizeof(*cmd))
1840 if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
1841 cmd_sz - sizeof(*cmd) - 1))) {
1842 ret = -EINVAL;
1843 goto err_put;
1844 }
1845
1846 if (cmd->qp_type == IB_QPT_XRC_TGT)
1847 qp = ib_create_qp(pd, &attr);
1848 else
1849 qp = device->create_qp(pd, &attr, uhw);
1850
1851 if (IS_ERR(qp)) {
1852 ret = PTR_ERR(qp);
1853 goto err_put;
1854 }
1855
1856 if (cmd->qp_type != IB_QPT_XRC_TGT) {
1857 qp->real_qp = qp;
1858 qp->device = device;
1859 qp->pd = pd;
1860 qp->send_cq = attr.send_cq;
1861 qp->recv_cq = attr.recv_cq;
1862 qp->srq = attr.srq;
1863 qp->event_handler = attr.event_handler;
1864 qp->qp_context = attr.qp_context;
1865 qp->qp_type = attr.qp_type;
1866 atomic_set(&qp->usecnt, 0);
1867 atomic_inc(&pd->usecnt);
1868 atomic_inc(&attr.send_cq->usecnt);
1869 if (attr.recv_cq)
1870 atomic_inc(&attr.recv_cq->usecnt);
1871 if (attr.srq)
1872 atomic_inc(&attr.srq->usecnt);
1873 }
1874 qp->uobject = &obj->uevent.uobject;
1875
1876 obj->uevent.uobject.object = qp;
1877 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1878 if (ret)
1879 goto err_destroy;
1880
1881 memset(&resp, 0, sizeof resp);
1882 resp.base.qpn = qp->qp_num;
1883 resp.base.qp_handle = obj->uevent.uobject.id;
1884 resp.base.max_recv_sge = attr.cap.max_recv_sge;
1885 resp.base.max_send_sge = attr.cap.max_send_sge;
1886 resp.base.max_recv_wr = attr.cap.max_recv_wr;
1887 resp.base.max_send_wr = attr.cap.max_send_wr;
1888 resp.base.max_inline_data = attr.cap.max_inline_data;
1889
1890 resp.response_length = offsetof(typeof(resp), response_length) +
1891 sizeof(resp.response_length);
1892
1893 ret = cb(file, &resp, ucore);
1894 if (ret)
1895 goto err_cb;
1896
1897 if (xrcd) {
1898 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1899 uobject);
1900 atomic_inc(&obj->uxrcd->refcnt);
1901 put_xrcd_read(xrcd_uobj);
1902 }
1903
1904 if (pd)
1905 put_pd_read(pd);
1906 if (scq)
1907 put_cq_read(scq);
1908 if (rcq && rcq != scq)
1909 put_cq_read(rcq);
1910 if (srq)
1911 put_srq_read(srq);
1912
1913 mutex_lock(&file->mutex);
1914 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1915 mutex_unlock(&file->mutex);
1916
1917 obj->uevent.uobject.live = 1;
1918
1919 up_write(&obj->uevent.uobject.mutex);
1920
1921 return 0;
1922 err_cb:
1923 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1924
1925 err_destroy:
1926 ib_destroy_qp(qp);
1927
1928 err_put:
1929 if (xrcd)
1930 put_xrcd_read(xrcd_uobj);
1931 if (pd)
1932 put_pd_read(pd);
1933 if (scq)
1934 put_cq_read(scq);
1935 if (rcq && rcq != scq)
1936 put_cq_read(rcq);
1937 if (srq)
1938 put_srq_read(srq);
1939
1940 put_uobj_write(&obj->uevent.uobject);
1941 return ret;
1942 }
1943
1944 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file,
1945 struct ib_uverbs_ex_create_qp_resp *resp,
1946 struct ib_udata *ucore)
1947 {
1948 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1949 return -EFAULT;
1950
1951 return 0;
1952 }
1953
1954 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1955 struct ib_device *ib_dev,
1956 const char __user *buf, int in_len,
1957 int out_len)
1958 {
1959 struct ib_uverbs_create_qp cmd;
1960 struct ib_uverbs_ex_create_qp cmd_ex;
1961 struct ib_udata ucore;
1962 struct ib_udata uhw;
1963 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp);
1964 int err;
1965
1966 if (out_len < resp_size)
1967 return -ENOSPC;
1968
1969 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1970 return -EFAULT;
1971
1972 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd),
1973 resp_size);
1974 INIT_UDATA(&uhw, buf + sizeof(cmd),
1975 (unsigned long)cmd.response + resp_size,
1976 in_len - sizeof(cmd), out_len - resp_size);
1977
1978 memset(&cmd_ex, 0, sizeof(cmd_ex));
1979 cmd_ex.user_handle = cmd.user_handle;
1980 cmd_ex.pd_handle = cmd.pd_handle;
1981 cmd_ex.send_cq_handle = cmd.send_cq_handle;
1982 cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1983 cmd_ex.srq_handle = cmd.srq_handle;
1984 cmd_ex.max_send_wr = cmd.max_send_wr;
1985 cmd_ex.max_recv_wr = cmd.max_recv_wr;
1986 cmd_ex.max_send_sge = cmd.max_send_sge;
1987 cmd_ex.max_recv_sge = cmd.max_recv_sge;
1988 cmd_ex.max_inline_data = cmd.max_inline_data;
1989 cmd_ex.sq_sig_all = cmd.sq_sig_all;
1990 cmd_ex.qp_type = cmd.qp_type;
1991 cmd_ex.is_srq = cmd.is_srq;
1992
1993 err = create_qp(file, &ucore, &uhw, &cmd_ex,
1994 offsetof(typeof(cmd_ex), is_srq) +
1995 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb,
1996 NULL);
1997
1998 if (err)
1999 return err;
2000
2001 return in_len;
2002 }
2003
2004 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file,
2005 struct ib_uverbs_ex_create_qp_resp *resp,
2006 struct ib_udata *ucore)
2007 {
2008 if (ib_copy_to_udata(ucore, resp, resp->response_length))
2009 return -EFAULT;
2010
2011 return 0;
2012 }
2013
2014 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
2015 struct ib_device *ib_dev,
2016 struct ib_udata *ucore,
2017 struct ib_udata *uhw)
2018 {
2019 struct ib_uverbs_ex_create_qp_resp resp;
2020 struct ib_uverbs_ex_create_qp cmd = {0};
2021 int err;
2022
2023 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) +
2024 sizeof(cmd.comp_mask)))
2025 return -EINVAL;
2026
2027 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2028 if (err)
2029 return err;
2030
2031 if (cmd.comp_mask)
2032 return -EINVAL;
2033
2034 if (cmd.reserved)
2035 return -EINVAL;
2036
2037 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
2038 sizeof(resp.response_length)))
2039 return -ENOSPC;
2040
2041 err = create_qp(file, ucore, uhw, &cmd,
2042 min(ucore->inlen, sizeof(cmd)),
2043 ib_uverbs_ex_create_qp_cb, NULL);
2044
2045 if (err)
2046 return err;
2047
2048 return 0;
2049 }
2050
2051 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
2052 struct ib_device *ib_dev,
2053 const char __user *buf, int in_len, int out_len)
2054 {
2055 struct ib_uverbs_open_qp cmd;
2056 struct ib_uverbs_create_qp_resp resp;
2057 struct ib_udata udata;
2058 struct ib_uqp_object *obj;
2059 struct ib_xrcd *xrcd;
2060 struct ib_uobject *uninitialized_var(xrcd_uobj);
2061 struct ib_qp *qp;
2062 struct ib_qp_open_attr attr;
2063 int ret;
2064
2065 if (out_len < sizeof resp)
2066 return -ENOSPC;
2067
2068 if (copy_from_user(&cmd, buf, sizeof cmd))
2069 return -EFAULT;
2070
2071 INIT_UDATA(&udata, buf + sizeof cmd,
2072 (unsigned long) cmd.response + sizeof resp,
2073 in_len - sizeof cmd, out_len - sizeof resp);
2074
2075 obj = kmalloc(sizeof *obj, GFP_KERNEL);
2076 if (!obj)
2077 return -ENOMEM;
2078
2079 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
2080 down_write(&obj->uevent.uobject.mutex);
2081
2082 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
2083 if (!xrcd) {
2084 ret = -EINVAL;
2085 goto err_put;
2086 }
2087
2088 attr.event_handler = ib_uverbs_qp_event_handler;
2089 attr.qp_context = file;
2090 attr.qp_num = cmd.qpn;
2091 attr.qp_type = cmd.qp_type;
2092
2093 obj->uevent.events_reported = 0;
2094 INIT_LIST_HEAD(&obj->uevent.event_list);
2095 INIT_LIST_HEAD(&obj->mcast_list);
2096
2097 qp = ib_open_qp(xrcd, &attr);
2098 if (IS_ERR(qp)) {
2099 ret = PTR_ERR(qp);
2100 goto err_put;
2101 }
2102
2103 qp->uobject = &obj->uevent.uobject;
2104
2105 obj->uevent.uobject.object = qp;
2106 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
2107 if (ret)
2108 goto err_destroy;
2109
2110 memset(&resp, 0, sizeof resp);
2111 resp.qpn = qp->qp_num;
2112 resp.qp_handle = obj->uevent.uobject.id;
2113
2114 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2115 &resp, sizeof resp)) {
2116 ret = -EFAULT;
2117 goto err_remove;
2118 }
2119
2120 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2121 atomic_inc(&obj->uxrcd->refcnt);
2122 put_xrcd_read(xrcd_uobj);
2123
2124 mutex_lock(&file->mutex);
2125 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
2126 mutex_unlock(&file->mutex);
2127
2128 obj->uevent.uobject.live = 1;
2129
2130 up_write(&obj->uevent.uobject.mutex);
2131
2132 return in_len;
2133
2134 err_remove:
2135 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
2136
2137 err_destroy:
2138 ib_destroy_qp(qp);
2139
2140 err_put:
2141 put_xrcd_read(xrcd_uobj);
2142 put_uobj_write(&obj->uevent.uobject);
2143 return ret;
2144 }
2145
2146 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
2147 struct ib_device *ib_dev,
2148 const char __user *buf, int in_len,
2149 int out_len)
2150 {
2151 struct ib_uverbs_query_qp cmd;
2152 struct ib_uverbs_query_qp_resp resp;
2153 struct ib_qp *qp;
2154 struct ib_qp_attr *attr;
2155 struct ib_qp_init_attr *init_attr;
2156 int ret;
2157
2158 if (copy_from_user(&cmd, buf, sizeof cmd))
2159 return -EFAULT;
2160
2161 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2162 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
2163 if (!attr || !init_attr) {
2164 ret = -ENOMEM;
2165 goto out;
2166 }
2167
2168 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2169 if (!qp) {
2170 ret = -EINVAL;
2171 goto out;
2172 }
2173
2174 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
2175
2176 put_qp_read(qp);
2177
2178 if (ret)
2179 goto out;
2180
2181 memset(&resp, 0, sizeof resp);
2182
2183 resp.qp_state = attr->qp_state;
2184 resp.cur_qp_state = attr->cur_qp_state;
2185 resp.path_mtu = attr->path_mtu;
2186 resp.path_mig_state = attr->path_mig_state;
2187 resp.qkey = attr->qkey;
2188 resp.rq_psn = attr->rq_psn;
2189 resp.sq_psn = attr->sq_psn;
2190 resp.dest_qp_num = attr->dest_qp_num;
2191 resp.qp_access_flags = attr->qp_access_flags;
2192 resp.pkey_index = attr->pkey_index;
2193 resp.alt_pkey_index = attr->alt_pkey_index;
2194 resp.sq_draining = attr->sq_draining;
2195 resp.max_rd_atomic = attr->max_rd_atomic;
2196 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
2197 resp.min_rnr_timer = attr->min_rnr_timer;
2198 resp.port_num = attr->port_num;
2199 resp.timeout = attr->timeout;
2200 resp.retry_cnt = attr->retry_cnt;
2201 resp.rnr_retry = attr->rnr_retry;
2202 resp.alt_port_num = attr->alt_port_num;
2203 resp.alt_timeout = attr->alt_timeout;
2204
2205 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
2206 resp.dest.flow_label = attr->ah_attr.grh.flow_label;
2207 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
2208 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
2209 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
2210 resp.dest.dlid = attr->ah_attr.dlid;
2211 resp.dest.sl = attr->ah_attr.sl;
2212 resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
2213 resp.dest.static_rate = attr->ah_attr.static_rate;
2214 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
2215 resp.dest.port_num = attr->ah_attr.port_num;
2216
2217 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
2218 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
2219 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
2220 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
2221 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
2222 resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
2223 resp.alt_dest.sl = attr->alt_ah_attr.sl;
2224 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
2225 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
2226 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
2227 resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
2228
2229 resp.max_send_wr = init_attr->cap.max_send_wr;
2230 resp.max_recv_wr = init_attr->cap.max_recv_wr;
2231 resp.max_send_sge = init_attr->cap.max_send_sge;
2232 resp.max_recv_sge = init_attr->cap.max_recv_sge;
2233 resp.max_inline_data = init_attr->cap.max_inline_data;
2234 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
2235
2236 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2237 &resp, sizeof resp))
2238 ret = -EFAULT;
2239
2240 out:
2241 kfree(attr);
2242 kfree(init_attr);
2243
2244 return ret ? ret : in_len;
2245 }
2246
2247 /* Remove ignored fields set in the attribute mask */
2248 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
2249 {
2250 switch (qp_type) {
2251 case IB_QPT_XRC_INI:
2252 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
2253 case IB_QPT_XRC_TGT:
2254 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
2255 IB_QP_RNR_RETRY);
2256 default:
2257 return mask;
2258 }
2259 }
2260
2261 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2262 struct ib_device *ib_dev,
2263 const char __user *buf, int in_len,
2264 int out_len)
2265 {
2266 struct ib_uverbs_modify_qp cmd;
2267 struct ib_udata udata;
2268 struct ib_qp *qp;
2269 struct ib_qp_attr *attr;
2270 int ret;
2271
2272 if (copy_from_user(&cmd, buf, sizeof cmd))
2273 return -EFAULT;
2274
2275 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2276 out_len);
2277
2278 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2279 if (!attr)
2280 return -ENOMEM;
2281
2282 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2283 if (!qp) {
2284 ret = -EINVAL;
2285 goto out;
2286 }
2287
2288 attr->qp_state = cmd.qp_state;
2289 attr->cur_qp_state = cmd.cur_qp_state;
2290 attr->path_mtu = cmd.path_mtu;
2291 attr->path_mig_state = cmd.path_mig_state;
2292 attr->qkey = cmd.qkey;
2293 attr->rq_psn = cmd.rq_psn;
2294 attr->sq_psn = cmd.sq_psn;
2295 attr->dest_qp_num = cmd.dest_qp_num;
2296 attr->qp_access_flags = cmd.qp_access_flags;
2297 attr->pkey_index = cmd.pkey_index;
2298 attr->alt_pkey_index = cmd.alt_pkey_index;
2299 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
2300 attr->max_rd_atomic = cmd.max_rd_atomic;
2301 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
2302 attr->min_rnr_timer = cmd.min_rnr_timer;
2303 attr->port_num = cmd.port_num;
2304 attr->timeout = cmd.timeout;
2305 attr->retry_cnt = cmd.retry_cnt;
2306 attr->rnr_retry = cmd.rnr_retry;
2307 attr->alt_port_num = cmd.alt_port_num;
2308 attr->alt_timeout = cmd.alt_timeout;
2309
2310 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
2311 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
2312 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
2313 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
2314 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
2315 attr->ah_attr.dlid = cmd.dest.dlid;
2316 attr->ah_attr.sl = cmd.dest.sl;
2317 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
2318 attr->ah_attr.static_rate = cmd.dest.static_rate;
2319 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
2320 attr->ah_attr.port_num = cmd.dest.port_num;
2321
2322 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
2323 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
2324 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
2325 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
2326 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
2327 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
2328 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
2329 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
2330 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
2331 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
2332 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
2333
2334 if (qp->real_qp == qp) {
2335 ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask);
2336 if (ret)
2337 goto release_qp;
2338 ret = qp->device->modify_qp(qp, attr,
2339 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2340 } else {
2341 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2342 }
2343
2344 if (ret)
2345 goto release_qp;
2346
2347 ret = in_len;
2348
2349 release_qp:
2350 put_qp_read(qp);
2351
2352 out:
2353 kfree(attr);
2354
2355 return ret;
2356 }
2357
2358 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2359 struct ib_device *ib_dev,
2360 const char __user *buf, int in_len,
2361 int out_len)
2362 {
2363 struct ib_uverbs_destroy_qp cmd;
2364 struct ib_uverbs_destroy_qp_resp resp;
2365 struct ib_uobject *uobj;
2366 struct ib_qp *qp;
2367 struct ib_uqp_object *obj;
2368 int ret = -EINVAL;
2369
2370 if (copy_from_user(&cmd, buf, sizeof cmd))
2371 return -EFAULT;
2372
2373 memset(&resp, 0, sizeof resp);
2374
2375 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2376 if (!uobj)
2377 return -EINVAL;
2378 qp = uobj->object;
2379 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2380
2381 if (!list_empty(&obj->mcast_list)) {
2382 put_uobj_write(uobj);
2383 return -EBUSY;
2384 }
2385
2386 ret = ib_destroy_qp(qp);
2387 if (!ret)
2388 uobj->live = 0;
2389
2390 put_uobj_write(uobj);
2391
2392 if (ret)
2393 return ret;
2394
2395 if (obj->uxrcd)
2396 atomic_dec(&obj->uxrcd->refcnt);
2397
2398 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2399
2400 mutex_lock(&file->mutex);
2401 list_del(&uobj->list);
2402 mutex_unlock(&file->mutex);
2403
2404 ib_uverbs_release_uevent(file, &obj->uevent);
2405
2406 resp.events_reported = obj->uevent.events_reported;
2407
2408 put_uobj(uobj);
2409
2410 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2411 &resp, sizeof resp))
2412 return -EFAULT;
2413
2414 return in_len;
2415 }
2416
2417 static void *alloc_wr(size_t wr_size, __u32 num_sge)
2418 {
2419 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2420 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2421 };
2422
2423 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2424 struct ib_device *ib_dev,
2425 const char __user *buf, int in_len,
2426 int out_len)
2427 {
2428 struct ib_uverbs_post_send cmd;
2429 struct ib_uverbs_post_send_resp resp;
2430 struct ib_uverbs_send_wr *user_wr;
2431 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2432 struct ib_qp *qp;
2433 int i, sg_ind;
2434 int is_ud;
2435 ssize_t ret = -EINVAL;
2436 size_t next_size;
2437
2438 if (copy_from_user(&cmd, buf, sizeof cmd))
2439 return -EFAULT;
2440
2441 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2442 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2443 return -EINVAL;
2444
2445 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2446 return -EINVAL;
2447
2448 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2449 if (!user_wr)
2450 return -ENOMEM;
2451
2452 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2453 if (!qp)
2454 goto out;
2455
2456 is_ud = qp->qp_type == IB_QPT_UD;
2457 sg_ind = 0;
2458 last = NULL;
2459 for (i = 0; i < cmd.wr_count; ++i) {
2460 if (copy_from_user(user_wr,
2461 buf + sizeof cmd + i * cmd.wqe_size,
2462 cmd.wqe_size)) {
2463 ret = -EFAULT;
2464 goto out_put;
2465 }
2466
2467 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2468 ret = -EINVAL;
2469 goto out_put;
2470 }
2471
2472 if (is_ud) {
2473 struct ib_ud_wr *ud;
2474
2475 if (user_wr->opcode != IB_WR_SEND &&
2476 user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2477 ret = -EINVAL;
2478 goto out_put;
2479 }
2480
2481 next_size = sizeof(*ud);
2482 ud = alloc_wr(next_size, user_wr->num_sge);
2483 if (!ud) {
2484 ret = -ENOMEM;
2485 goto out_put;
2486 }
2487
2488 ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext);
2489 if (!ud->ah) {
2490 kfree(ud);
2491 ret = -EINVAL;
2492 goto out_put;
2493 }
2494 ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2495 ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2496
2497 next = &ud->wr;
2498 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2499 user_wr->opcode == IB_WR_RDMA_WRITE ||
2500 user_wr->opcode == IB_WR_RDMA_READ) {
2501 struct ib_rdma_wr *rdma;
2502
2503 next_size = sizeof(*rdma);
2504 rdma = alloc_wr(next_size, user_wr->num_sge);
2505 if (!rdma) {
2506 ret = -ENOMEM;
2507 goto out_put;
2508 }
2509
2510 rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2511 rdma->rkey = user_wr->wr.rdma.rkey;
2512
2513 next = &rdma->wr;
2514 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2515 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2516 struct ib_atomic_wr *atomic;
2517
2518 next_size = sizeof(*atomic);
2519 atomic = alloc_wr(next_size, user_wr->num_sge);
2520 if (!atomic) {
2521 ret = -ENOMEM;
2522 goto out_put;
2523 }
2524
2525 atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2526 atomic->compare_add = user_wr->wr.atomic.compare_add;
2527 atomic->swap = user_wr->wr.atomic.swap;
2528 atomic->rkey = user_wr->wr.atomic.rkey;
2529
2530 next = &atomic->wr;
2531 } else if (user_wr->opcode == IB_WR_SEND ||
2532 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2533 user_wr->opcode == IB_WR_SEND_WITH_INV) {
2534 next_size = sizeof(*next);
2535 next = alloc_wr(next_size, user_wr->num_sge);
2536 if (!next) {
2537 ret = -ENOMEM;
2538 goto out_put;
2539 }
2540 } else {
2541 ret = -EINVAL;
2542 goto out_put;
2543 }
2544
2545 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2546 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2547 next->ex.imm_data =
2548 (__be32 __force) user_wr->ex.imm_data;
2549 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2550 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2551 }
2552
2553 if (!last)
2554 wr = next;
2555 else
2556 last->next = next;
2557 last = next;
2558
2559 next->next = NULL;
2560 next->wr_id = user_wr->wr_id;
2561 next->num_sge = user_wr->num_sge;
2562 next->opcode = user_wr->opcode;
2563 next->send_flags = user_wr->send_flags;
2564
2565 if (next->num_sge) {
2566 next->sg_list = (void *) next +
2567 ALIGN(next_size, sizeof(struct ib_sge));
2568 if (copy_from_user(next->sg_list,
2569 buf + sizeof cmd +
2570 cmd.wr_count * cmd.wqe_size +
2571 sg_ind * sizeof (struct ib_sge),
2572 next->num_sge * sizeof (struct ib_sge))) {
2573 ret = -EFAULT;
2574 goto out_put;
2575 }
2576 sg_ind += next->num_sge;
2577 } else
2578 next->sg_list = NULL;
2579 }
2580
2581 resp.bad_wr = 0;
2582 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2583 if (ret)
2584 for (next = wr; next; next = next->next) {
2585 ++resp.bad_wr;
2586 if (next == bad_wr)
2587 break;
2588 }
2589
2590 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2591 &resp, sizeof resp))
2592 ret = -EFAULT;
2593
2594 out_put:
2595 put_qp_read(qp);
2596
2597 while (wr) {
2598 if (is_ud && ud_wr(wr)->ah)
2599 put_ah_read(ud_wr(wr)->ah);
2600 next = wr->next;
2601 kfree(wr);
2602 wr = next;
2603 }
2604
2605 out:
2606 kfree(user_wr);
2607
2608 return ret ? ret : in_len;
2609 }
2610
2611 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2612 int in_len,
2613 u32 wr_count,
2614 u32 sge_count,
2615 u32 wqe_size)
2616 {
2617 struct ib_uverbs_recv_wr *user_wr;
2618 struct ib_recv_wr *wr = NULL, *last, *next;
2619 int sg_ind;
2620 int i;
2621 int ret;
2622
2623 if (in_len < wqe_size * wr_count +
2624 sge_count * sizeof (struct ib_uverbs_sge))
2625 return ERR_PTR(-EINVAL);
2626
2627 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2628 return ERR_PTR(-EINVAL);
2629
2630 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2631 if (!user_wr)
2632 return ERR_PTR(-ENOMEM);
2633
2634 sg_ind = 0;
2635 last = NULL;
2636 for (i = 0; i < wr_count; ++i) {
2637 if (copy_from_user(user_wr, buf + i * wqe_size,
2638 wqe_size)) {
2639 ret = -EFAULT;
2640 goto err;
2641 }
2642
2643 if (user_wr->num_sge + sg_ind > sge_count) {
2644 ret = -EINVAL;
2645 goto err;
2646 }
2647
2648 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2649 user_wr->num_sge * sizeof (struct ib_sge),
2650 GFP_KERNEL);
2651 if (!next) {
2652 ret = -ENOMEM;
2653 goto err;
2654 }
2655
2656 if (!last)
2657 wr = next;
2658 else
2659 last->next = next;
2660 last = next;
2661
2662 next->next = NULL;
2663 next->wr_id = user_wr->wr_id;
2664 next->num_sge = user_wr->num_sge;
2665
2666 if (next->num_sge) {
2667 next->sg_list = (void *) next +
2668 ALIGN(sizeof *next, sizeof (struct ib_sge));
2669 if (copy_from_user(next->sg_list,
2670 buf + wr_count * wqe_size +
2671 sg_ind * sizeof (struct ib_sge),
2672 next->num_sge * sizeof (struct ib_sge))) {
2673 ret = -EFAULT;
2674 goto err;
2675 }
2676 sg_ind += next->num_sge;
2677 } else
2678 next->sg_list = NULL;
2679 }
2680
2681 kfree(user_wr);
2682 return wr;
2683
2684 err:
2685 kfree(user_wr);
2686
2687 while (wr) {
2688 next = wr->next;
2689 kfree(wr);
2690 wr = next;
2691 }
2692
2693 return ERR_PTR(ret);
2694 }
2695
2696 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2697 struct ib_device *ib_dev,
2698 const char __user *buf, int in_len,
2699 int out_len)
2700 {
2701 struct ib_uverbs_post_recv cmd;
2702 struct ib_uverbs_post_recv_resp resp;
2703 struct ib_recv_wr *wr, *next, *bad_wr;
2704 struct ib_qp *qp;
2705 ssize_t ret = -EINVAL;
2706
2707 if (copy_from_user(&cmd, buf, sizeof cmd))
2708 return -EFAULT;
2709
2710 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2711 in_len - sizeof cmd, cmd.wr_count,
2712 cmd.sge_count, cmd.wqe_size);
2713 if (IS_ERR(wr))
2714 return PTR_ERR(wr);
2715
2716 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2717 if (!qp)
2718 goto out;
2719
2720 resp.bad_wr = 0;
2721 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2722
2723 put_qp_read(qp);
2724
2725 if (ret)
2726 for (next = wr; next; next = next->next) {
2727 ++resp.bad_wr;
2728 if (next == bad_wr)
2729 break;
2730 }
2731
2732 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2733 &resp, sizeof resp))
2734 ret = -EFAULT;
2735
2736 out:
2737 while (wr) {
2738 next = wr->next;
2739 kfree(wr);
2740 wr = next;
2741 }
2742
2743 return ret ? ret : in_len;
2744 }
2745
2746 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2747 struct ib_device *ib_dev,
2748 const char __user *buf, int in_len,
2749 int out_len)
2750 {
2751 struct ib_uverbs_post_srq_recv cmd;
2752 struct ib_uverbs_post_srq_recv_resp resp;
2753 struct ib_recv_wr *wr, *next, *bad_wr;
2754 struct ib_srq *srq;
2755 ssize_t ret = -EINVAL;
2756
2757 if (copy_from_user(&cmd, buf, sizeof cmd))
2758 return -EFAULT;
2759
2760 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2761 in_len - sizeof cmd, cmd.wr_count,
2762 cmd.sge_count, cmd.wqe_size);
2763 if (IS_ERR(wr))
2764 return PTR_ERR(wr);
2765
2766 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2767 if (!srq)
2768 goto out;
2769
2770 resp.bad_wr = 0;
2771 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2772
2773 put_srq_read(srq);
2774
2775 if (ret)
2776 for (next = wr; next; next = next->next) {
2777 ++resp.bad_wr;
2778 if (next == bad_wr)
2779 break;
2780 }
2781
2782 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2783 &resp, sizeof resp))
2784 ret = -EFAULT;
2785
2786 out:
2787 while (wr) {
2788 next = wr->next;
2789 kfree(wr);
2790 wr = next;
2791 }
2792
2793 return ret ? ret : in_len;
2794 }
2795
2796 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2797 struct ib_device *ib_dev,
2798 const char __user *buf, int in_len,
2799 int out_len)
2800 {
2801 struct ib_uverbs_create_ah cmd;
2802 struct ib_uverbs_create_ah_resp resp;
2803 struct ib_uobject *uobj;
2804 struct ib_pd *pd;
2805 struct ib_ah *ah;
2806 struct ib_ah_attr attr;
2807 int ret;
2808
2809 if (out_len < sizeof resp)
2810 return -ENOSPC;
2811
2812 if (copy_from_user(&cmd, buf, sizeof cmd))
2813 return -EFAULT;
2814
2815 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2816 if (!uobj)
2817 return -ENOMEM;
2818
2819 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2820 down_write(&uobj->mutex);
2821
2822 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2823 if (!pd) {
2824 ret = -EINVAL;
2825 goto err;
2826 }
2827
2828 attr.dlid = cmd.attr.dlid;
2829 attr.sl = cmd.attr.sl;
2830 attr.src_path_bits = cmd.attr.src_path_bits;
2831 attr.static_rate = cmd.attr.static_rate;
2832 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
2833 attr.port_num = cmd.attr.port_num;
2834 attr.grh.flow_label = cmd.attr.grh.flow_label;
2835 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
2836 attr.grh.hop_limit = cmd.attr.grh.hop_limit;
2837 attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2838 memset(&attr.dmac, 0, sizeof(attr.dmac));
2839 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2840
2841 ah = ib_create_ah(pd, &attr);
2842 if (IS_ERR(ah)) {
2843 ret = PTR_ERR(ah);
2844 goto err_put;
2845 }
2846
2847 ah->uobject = uobj;
2848 uobj->object = ah;
2849
2850 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2851 if (ret)
2852 goto err_destroy;
2853
2854 resp.ah_handle = uobj->id;
2855
2856 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2857 &resp, sizeof resp)) {
2858 ret = -EFAULT;
2859 goto err_copy;
2860 }
2861
2862 put_pd_read(pd);
2863
2864 mutex_lock(&file->mutex);
2865 list_add_tail(&uobj->list, &file->ucontext->ah_list);
2866 mutex_unlock(&file->mutex);
2867
2868 uobj->live = 1;
2869
2870 up_write(&uobj->mutex);
2871
2872 return in_len;
2873
2874 err_copy:
2875 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2876
2877 err_destroy:
2878 ib_destroy_ah(ah);
2879
2880 err_put:
2881 put_pd_read(pd);
2882
2883 err:
2884 put_uobj_write(uobj);
2885 return ret;
2886 }
2887
2888 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2889 struct ib_device *ib_dev,
2890 const char __user *buf, int in_len, int out_len)
2891 {
2892 struct ib_uverbs_destroy_ah cmd;
2893 struct ib_ah *ah;
2894 struct ib_uobject *uobj;
2895 int ret;
2896
2897 if (copy_from_user(&cmd, buf, sizeof cmd))
2898 return -EFAULT;
2899
2900 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2901 if (!uobj)
2902 return -EINVAL;
2903 ah = uobj->object;
2904
2905 ret = ib_destroy_ah(ah);
2906 if (!ret)
2907 uobj->live = 0;
2908
2909 put_uobj_write(uobj);
2910
2911 if (ret)
2912 return ret;
2913
2914 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2915
2916 mutex_lock(&file->mutex);
2917 list_del(&uobj->list);
2918 mutex_unlock(&file->mutex);
2919
2920 put_uobj(uobj);
2921
2922 return in_len;
2923 }
2924
2925 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2926 struct ib_device *ib_dev,
2927 const char __user *buf, int in_len,
2928 int out_len)
2929 {
2930 struct ib_uverbs_attach_mcast cmd;
2931 struct ib_qp *qp;
2932 struct ib_uqp_object *obj;
2933 struct ib_uverbs_mcast_entry *mcast;
2934 int ret;
2935
2936 if (copy_from_user(&cmd, buf, sizeof cmd))
2937 return -EFAULT;
2938
2939 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2940 if (!qp)
2941 return -EINVAL;
2942
2943 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2944
2945 list_for_each_entry(mcast, &obj->mcast_list, list)
2946 if (cmd.mlid == mcast->lid &&
2947 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2948 ret = 0;
2949 goto out_put;
2950 }
2951
2952 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2953 if (!mcast) {
2954 ret = -ENOMEM;
2955 goto out_put;
2956 }
2957
2958 mcast->lid = cmd.mlid;
2959 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2960
2961 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2962 if (!ret)
2963 list_add_tail(&mcast->list, &obj->mcast_list);
2964 else
2965 kfree(mcast);
2966
2967 out_put:
2968 put_qp_write(qp);
2969
2970 return ret ? ret : in_len;
2971 }
2972
2973 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2974 struct ib_device *ib_dev,
2975 const char __user *buf, int in_len,
2976 int out_len)
2977 {
2978 struct ib_uverbs_detach_mcast cmd;
2979 struct ib_uqp_object *obj;
2980 struct ib_qp *qp;
2981 struct ib_uverbs_mcast_entry *mcast;
2982 int ret = -EINVAL;
2983
2984 if (copy_from_user(&cmd, buf, sizeof cmd))
2985 return -EFAULT;
2986
2987 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2988 if (!qp)
2989 return -EINVAL;
2990
2991 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2992 if (ret)
2993 goto out_put;
2994
2995 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2996
2997 list_for_each_entry(mcast, &obj->mcast_list, list)
2998 if (cmd.mlid == mcast->lid &&
2999 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
3000 list_del(&mcast->list);
3001 kfree(mcast);
3002 break;
3003 }
3004
3005 out_put:
3006 put_qp_write(qp);
3007
3008 return ret ? ret : in_len;
3009 }
3010
3011 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
3012 union ib_flow_spec *ib_spec)
3013 {
3014 if (kern_spec->reserved)
3015 return -EINVAL;
3016
3017 ib_spec->type = kern_spec->type;
3018
3019 switch (ib_spec->type) {
3020 case IB_FLOW_SPEC_ETH:
3021 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
3022 if (ib_spec->eth.size != kern_spec->eth.size)
3023 return -EINVAL;
3024 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
3025 sizeof(struct ib_flow_eth_filter));
3026 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
3027 sizeof(struct ib_flow_eth_filter));
3028 break;
3029 case IB_FLOW_SPEC_IPV4:
3030 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
3031 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
3032 return -EINVAL;
3033 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
3034 sizeof(struct ib_flow_ipv4_filter));
3035 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
3036 sizeof(struct ib_flow_ipv4_filter));
3037 break;
3038 case IB_FLOW_SPEC_TCP:
3039 case IB_FLOW_SPEC_UDP:
3040 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
3041 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
3042 return -EINVAL;
3043 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
3044 sizeof(struct ib_flow_tcp_udp_filter));
3045 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
3046 sizeof(struct ib_flow_tcp_udp_filter));
3047 break;
3048 default:
3049 return -EINVAL;
3050 }
3051 return 0;
3052 }
3053
3054 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3055 struct ib_device *ib_dev,
3056 struct ib_udata *ucore,
3057 struct ib_udata *uhw)
3058 {
3059 struct ib_uverbs_create_flow cmd;
3060 struct ib_uverbs_create_flow_resp resp;
3061 struct ib_uobject *uobj;
3062 struct ib_flow *flow_id;
3063 struct ib_uverbs_flow_attr *kern_flow_attr;
3064 struct ib_flow_attr *flow_attr;
3065 struct ib_qp *qp;
3066 int err = 0;
3067 void *kern_spec;
3068 void *ib_spec;
3069 int i;
3070
3071 if (ucore->inlen < sizeof(cmd))
3072 return -EINVAL;
3073
3074 if (ucore->outlen < sizeof(resp))
3075 return -ENOSPC;
3076
3077 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3078 if (err)
3079 return err;
3080
3081 ucore->inbuf += sizeof(cmd);
3082 ucore->inlen -= sizeof(cmd);
3083
3084 if (cmd.comp_mask)
3085 return -EINVAL;
3086
3087 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
3088 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
3089 return -EPERM;
3090
3091 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3092 return -EINVAL;
3093
3094 if (cmd.flow_attr.size > ucore->inlen ||
3095 cmd.flow_attr.size >
3096 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3097 return -EINVAL;
3098
3099 if (cmd.flow_attr.reserved[0] ||
3100 cmd.flow_attr.reserved[1])
3101 return -EINVAL;
3102
3103 if (cmd.flow_attr.num_of_specs) {
3104 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3105 GFP_KERNEL);
3106 if (!kern_flow_attr)
3107 return -ENOMEM;
3108
3109 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
3110 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
3111 cmd.flow_attr.size);
3112 if (err)
3113 goto err_free_attr;
3114 } else {
3115 kern_flow_attr = &cmd.flow_attr;
3116 }
3117
3118 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
3119 if (!uobj) {
3120 err = -ENOMEM;
3121 goto err_free_attr;
3122 }
3123 init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
3124 down_write(&uobj->mutex);
3125
3126 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
3127 if (!qp) {
3128 err = -EINVAL;
3129 goto err_uobj;
3130 }
3131
3132 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
3133 if (!flow_attr) {
3134 err = -ENOMEM;
3135 goto err_put;
3136 }
3137
3138 flow_attr->type = kern_flow_attr->type;
3139 flow_attr->priority = kern_flow_attr->priority;
3140 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3141 flow_attr->port = kern_flow_attr->port;
3142 flow_attr->flags = kern_flow_attr->flags;
3143 flow_attr->size = sizeof(*flow_attr);
3144
3145 kern_spec = kern_flow_attr + 1;
3146 ib_spec = flow_attr + 1;
3147 for (i = 0; i < flow_attr->num_of_specs &&
3148 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
3149 cmd.flow_attr.size >=
3150 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
3151 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
3152 if (err)
3153 goto err_free;
3154 flow_attr->size +=
3155 ((union ib_flow_spec *) ib_spec)->size;
3156 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
3157 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
3158 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3159 }
3160 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3161 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3162 i, cmd.flow_attr.size);
3163 err = -EINVAL;
3164 goto err_free;
3165 }
3166 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
3167 if (IS_ERR(flow_id)) {
3168 err = PTR_ERR(flow_id);
3169 goto err_free;
3170 }
3171 flow_id->qp = qp;
3172 flow_id->uobject = uobj;
3173 uobj->object = flow_id;
3174
3175 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
3176 if (err)
3177 goto destroy_flow;
3178
3179 memset(&resp, 0, sizeof(resp));
3180 resp.flow_handle = uobj->id;
3181
3182 err = ib_copy_to_udata(ucore,
3183 &resp, sizeof(resp));
3184 if (err)
3185 goto err_copy;
3186
3187 put_qp_read(qp);
3188 mutex_lock(&file->mutex);
3189 list_add_tail(&uobj->list, &file->ucontext->rule_list);
3190 mutex_unlock(&file->mutex);
3191
3192 uobj->live = 1;
3193
3194 up_write(&uobj->mutex);
3195 kfree(flow_attr);
3196 if (cmd.flow_attr.num_of_specs)
3197 kfree(kern_flow_attr);
3198 return 0;
3199 err_copy:
3200 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3201 destroy_flow:
3202 ib_destroy_flow(flow_id);
3203 err_free:
3204 kfree(flow_attr);
3205 err_put:
3206 put_qp_read(qp);
3207 err_uobj:
3208 put_uobj_write(uobj);
3209 err_free_attr:
3210 if (cmd.flow_attr.num_of_specs)
3211 kfree(kern_flow_attr);
3212 return err;
3213 }
3214
3215 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
3216 struct ib_device *ib_dev,
3217 struct ib_udata *ucore,
3218 struct ib_udata *uhw)
3219 {
3220 struct ib_uverbs_destroy_flow cmd;
3221 struct ib_flow *flow_id;
3222 struct ib_uobject *uobj;
3223 int ret;
3224
3225 if (ucore->inlen < sizeof(cmd))
3226 return -EINVAL;
3227
3228 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3229 if (ret)
3230 return ret;
3231
3232 if (cmd.comp_mask)
3233 return -EINVAL;
3234
3235 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
3236 file->ucontext);
3237 if (!uobj)
3238 return -EINVAL;
3239 flow_id = uobj->object;
3240
3241 ret = ib_destroy_flow(flow_id);
3242 if (!ret)
3243 uobj->live = 0;
3244
3245 put_uobj_write(uobj);
3246
3247 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
3248
3249 mutex_lock(&file->mutex);
3250 list_del(&uobj->list);
3251 mutex_unlock(&file->mutex);
3252
3253 put_uobj(uobj);
3254
3255 return ret;
3256 }
3257
3258 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
3259 struct ib_device *ib_dev,
3260 struct ib_uverbs_create_xsrq *cmd,
3261 struct ib_udata *udata)
3262 {
3263 struct ib_uverbs_create_srq_resp resp;
3264 struct ib_usrq_object *obj;
3265 struct ib_pd *pd;
3266 struct ib_srq *srq;
3267 struct ib_uobject *uninitialized_var(xrcd_uobj);
3268 struct ib_srq_init_attr attr;
3269 int ret;
3270
3271 obj = kmalloc(sizeof *obj, GFP_KERNEL);
3272 if (!obj)
3273 return -ENOMEM;
3274
3275 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
3276 down_write(&obj->uevent.uobject.mutex);
3277
3278 if (cmd->srq_type == IB_SRQT_XRC) {
3279 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
3280 if (!attr.ext.xrc.xrcd) {
3281 ret = -EINVAL;
3282 goto err;
3283 }
3284
3285 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3286 atomic_inc(&obj->uxrcd->refcnt);
3287
3288 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
3289 if (!attr.ext.xrc.cq) {
3290 ret = -EINVAL;
3291 goto err_put_xrcd;
3292 }
3293 }
3294
3295 pd = idr_read_pd(cmd->pd_handle, file->ucontext);
3296 if (!pd) {
3297 ret = -EINVAL;
3298 goto err_put_cq;
3299 }
3300
3301 attr.event_handler = ib_uverbs_srq_event_handler;
3302 attr.srq_context = file;
3303 attr.srq_type = cmd->srq_type;
3304 attr.attr.max_wr = cmd->max_wr;
3305 attr.attr.max_sge = cmd->max_sge;
3306 attr.attr.srq_limit = cmd->srq_limit;
3307
3308 obj->uevent.events_reported = 0;
3309 INIT_LIST_HEAD(&obj->uevent.event_list);
3310
3311 srq = pd->device->create_srq(pd, &attr, udata);
3312 if (IS_ERR(srq)) {
3313 ret = PTR_ERR(srq);
3314 goto err_put;
3315 }
3316
3317 srq->device = pd->device;
3318 srq->pd = pd;
3319 srq->srq_type = cmd->srq_type;
3320 srq->uobject = &obj->uevent.uobject;
3321 srq->event_handler = attr.event_handler;
3322 srq->srq_context = attr.srq_context;
3323
3324 if (cmd->srq_type == IB_SRQT_XRC) {
3325 srq->ext.xrc.cq = attr.ext.xrc.cq;
3326 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3327 atomic_inc(&attr.ext.xrc.cq->usecnt);
3328 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3329 }
3330
3331 atomic_inc(&pd->usecnt);
3332 atomic_set(&srq->usecnt, 0);
3333
3334 obj->uevent.uobject.object = srq;
3335 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3336 if (ret)
3337 goto err_destroy;
3338
3339 memset(&resp, 0, sizeof resp);
3340 resp.srq_handle = obj->uevent.uobject.id;
3341 resp.max_wr = attr.attr.max_wr;
3342 resp.max_sge = attr.attr.max_sge;
3343 if (cmd->srq_type == IB_SRQT_XRC)
3344 resp.srqn = srq->ext.xrc.srq_num;
3345
3346 if (copy_to_user((void __user *) (unsigned long) cmd->response,
3347 &resp, sizeof resp)) {
3348 ret = -EFAULT;
3349 goto err_copy;
3350 }
3351
3352 if (cmd->srq_type == IB_SRQT_XRC) {
3353 put_uobj_read(xrcd_uobj);
3354 put_cq_read(attr.ext.xrc.cq);
3355 }
3356 put_pd_read(pd);
3357
3358 mutex_lock(&file->mutex);
3359 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
3360 mutex_unlock(&file->mutex);
3361
3362 obj->uevent.uobject.live = 1;
3363
3364 up_write(&obj->uevent.uobject.mutex);
3365
3366 return 0;
3367
3368 err_copy:
3369 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3370
3371 err_destroy:
3372 ib_destroy_srq(srq);
3373
3374 err_put:
3375 put_pd_read(pd);
3376
3377 err_put_cq:
3378 if (cmd->srq_type == IB_SRQT_XRC)
3379 put_cq_read(attr.ext.xrc.cq);
3380
3381 err_put_xrcd:
3382 if (cmd->srq_type == IB_SRQT_XRC) {
3383 atomic_dec(&obj->uxrcd->refcnt);
3384 put_uobj_read(xrcd_uobj);
3385 }
3386
3387 err:
3388 put_uobj_write(&obj->uevent.uobject);
3389 return ret;
3390 }
3391
3392 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3393 struct ib_device *ib_dev,
3394 const char __user *buf, int in_len,
3395 int out_len)
3396 {
3397 struct ib_uverbs_create_srq cmd;
3398 struct ib_uverbs_create_xsrq xcmd;
3399 struct ib_uverbs_create_srq_resp resp;
3400 struct ib_udata udata;
3401 int ret;
3402
3403 if (out_len < sizeof resp)
3404 return -ENOSPC;
3405
3406 if (copy_from_user(&cmd, buf, sizeof cmd))
3407 return -EFAULT;
3408
3409 xcmd.response = cmd.response;
3410 xcmd.user_handle = cmd.user_handle;
3411 xcmd.srq_type = IB_SRQT_BASIC;
3412 xcmd.pd_handle = cmd.pd_handle;
3413 xcmd.max_wr = cmd.max_wr;
3414 xcmd.max_sge = cmd.max_sge;
3415 xcmd.srq_limit = cmd.srq_limit;
3416
3417 INIT_UDATA(&udata, buf + sizeof cmd,
3418 (unsigned long) cmd.response + sizeof resp,
3419 in_len - sizeof cmd, out_len - sizeof resp);
3420
3421 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
3422 if (ret)
3423 return ret;
3424
3425 return in_len;
3426 }
3427
3428 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3429 struct ib_device *ib_dev,
3430 const char __user *buf, int in_len, int out_len)
3431 {
3432 struct ib_uverbs_create_xsrq cmd;
3433 struct ib_uverbs_create_srq_resp resp;
3434 struct ib_udata udata;
3435 int ret;
3436
3437 if (out_len < sizeof resp)
3438 return -ENOSPC;
3439
3440 if (copy_from_user(&cmd, buf, sizeof cmd))
3441 return -EFAULT;
3442
3443 INIT_UDATA(&udata, buf + sizeof cmd,
3444 (unsigned long) cmd.response + sizeof resp,
3445 in_len - sizeof cmd, out_len - sizeof resp);
3446
3447 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
3448 if (ret)
3449 return ret;
3450
3451 return in_len;
3452 }
3453
3454 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3455 struct ib_device *ib_dev,
3456 const char __user *buf, int in_len,
3457 int out_len)
3458 {
3459 struct ib_uverbs_modify_srq cmd;
3460 struct ib_udata udata;
3461 struct ib_srq *srq;
3462 struct ib_srq_attr attr;
3463 int ret;
3464
3465 if (copy_from_user(&cmd, buf, sizeof cmd))
3466 return -EFAULT;
3467
3468 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3469 out_len);
3470
3471 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3472 if (!srq)
3473 return -EINVAL;
3474
3475 attr.max_wr = cmd.max_wr;
3476 attr.srq_limit = cmd.srq_limit;
3477
3478 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3479
3480 put_srq_read(srq);
3481
3482 return ret ? ret : in_len;
3483 }
3484
3485 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3486 struct ib_device *ib_dev,
3487 const char __user *buf,
3488 int in_len, int out_len)
3489 {
3490 struct ib_uverbs_query_srq cmd;
3491 struct ib_uverbs_query_srq_resp resp;
3492 struct ib_srq_attr attr;
3493 struct ib_srq *srq;
3494 int ret;
3495
3496 if (out_len < sizeof resp)
3497 return -ENOSPC;
3498
3499 if (copy_from_user(&cmd, buf, sizeof cmd))
3500 return -EFAULT;
3501
3502 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3503 if (!srq)
3504 return -EINVAL;
3505
3506 ret = ib_query_srq(srq, &attr);
3507
3508 put_srq_read(srq);
3509
3510 if (ret)
3511 return ret;
3512
3513 memset(&resp, 0, sizeof resp);
3514
3515 resp.max_wr = attr.max_wr;
3516 resp.max_sge = attr.max_sge;
3517 resp.srq_limit = attr.srq_limit;
3518
3519 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3520 &resp, sizeof resp))
3521 return -EFAULT;
3522
3523 return in_len;
3524 }
3525
3526 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3527 struct ib_device *ib_dev,
3528 const char __user *buf, int in_len,
3529 int out_len)
3530 {
3531 struct ib_uverbs_destroy_srq cmd;
3532 struct ib_uverbs_destroy_srq_resp resp;
3533 struct ib_uobject *uobj;
3534 struct ib_srq *srq;
3535 struct ib_uevent_object *obj;
3536 int ret = -EINVAL;
3537 struct ib_usrq_object *us;
3538 enum ib_srq_type srq_type;
3539
3540 if (copy_from_user(&cmd, buf, sizeof cmd))
3541 return -EFAULT;
3542
3543 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3544 if (!uobj)
3545 return -EINVAL;
3546 srq = uobj->object;
3547 obj = container_of(uobj, struct ib_uevent_object, uobject);
3548 srq_type = srq->srq_type;
3549
3550 ret = ib_destroy_srq(srq);
3551 if (!ret)
3552 uobj->live = 0;
3553
3554 put_uobj_write(uobj);
3555
3556 if (ret)
3557 return ret;
3558
3559 if (srq_type == IB_SRQT_XRC) {
3560 us = container_of(obj, struct ib_usrq_object, uevent);
3561 atomic_dec(&us->uxrcd->refcnt);
3562 }
3563
3564 idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3565
3566 mutex_lock(&file->mutex);
3567 list_del(&uobj->list);
3568 mutex_unlock(&file->mutex);
3569
3570 ib_uverbs_release_uevent(file, obj);
3571
3572 memset(&resp, 0, sizeof resp);
3573 resp.events_reported = obj->events_reported;
3574
3575 put_uobj(uobj);
3576
3577 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3578 &resp, sizeof resp))
3579 ret = -EFAULT;
3580
3581 return ret ? ret : in_len;
3582 }
3583
3584 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3585 struct ib_device *ib_dev,
3586 struct ib_udata *ucore,
3587 struct ib_udata *uhw)
3588 {
3589 struct ib_uverbs_ex_query_device_resp resp;
3590 struct ib_uverbs_ex_query_device cmd;
3591 struct ib_device_attr attr;
3592 int err;
3593
3594 if (ucore->inlen < sizeof(cmd))
3595 return -EINVAL;
3596
3597 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3598 if (err)
3599 return err;
3600
3601 if (cmd.comp_mask)
3602 return -EINVAL;
3603
3604 if (cmd.reserved)
3605 return -EINVAL;
3606
3607 resp.response_length = offsetof(typeof(resp), odp_caps);
3608
3609 if (ucore->outlen < resp.response_length)
3610 return -ENOSPC;
3611
3612 memset(&attr, 0, sizeof(attr));
3613
3614 err = ib_dev->query_device(ib_dev, &attr, uhw);
3615 if (err)
3616 return err;
3617
3618 copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
3619 resp.comp_mask = 0;
3620
3621 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3622 goto end;
3623
3624 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3625 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3626 resp.odp_caps.per_transport_caps.rc_odp_caps =
3627 attr.odp_caps.per_transport_caps.rc_odp_caps;
3628 resp.odp_caps.per_transport_caps.uc_odp_caps =
3629 attr.odp_caps.per_transport_caps.uc_odp_caps;
3630 resp.odp_caps.per_transport_caps.ud_odp_caps =
3631 attr.odp_caps.per_transport_caps.ud_odp_caps;
3632 resp.odp_caps.reserved = 0;
3633 #else
3634 memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
3635 #endif
3636 resp.response_length += sizeof(resp.odp_caps);
3637
3638 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3639 goto end;
3640
3641 resp.timestamp_mask = attr.timestamp_mask;
3642 resp.response_length += sizeof(resp.timestamp_mask);
3643
3644 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3645 goto end;
3646
3647 resp.hca_core_clock = attr.hca_core_clock;
3648 resp.response_length += sizeof(resp.hca_core_clock);
3649
3650 end:
3651 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3652 if (err)
3653 return err;
3654
3655 return 0;
3656 }
This page took 0.111593 seconds and 5 git commands to generate.