Merge tag 'arc-4.6-rc7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[deliverable/linux.git] / drivers / infiniband / sw / rdmavt / qp.c
1 /*
2 * Copyright(c) 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include "qp.h"
55 #include "vt.h"
56 #include "trace.h"
57
58 /*
59 * Note that it is OK to post send work requests in the SQE and ERR
60 * states; rvt_do_send() will process them and generate error
61 * completions as per IB 1.2 C10-96.
62 */
63 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
64 [IB_QPS_RESET] = 0,
65 [IB_QPS_INIT] = RVT_POST_RECV_OK,
66 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
67 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
68 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
69 RVT_PROCESS_NEXT_SEND_OK,
70 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
71 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
72 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
73 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
74 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
75 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
76 };
77 EXPORT_SYMBOL(ib_rvt_state_ops);
78
79 static void get_map_page(struct rvt_qpn_table *qpt,
80 struct rvt_qpn_map *map,
81 gfp_t gfp)
82 {
83 unsigned long page = get_zeroed_page(gfp);
84
85 /*
86 * Free the page if someone raced with us installing it.
87 */
88
89 spin_lock(&qpt->lock);
90 if (map->page)
91 free_page(page);
92 else
93 map->page = (void *)page;
94 spin_unlock(&qpt->lock);
95 }
96
97 /**
98 * init_qpn_table - initialize the QP number table for a device
99 * @qpt: the QPN table
100 */
101 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
102 {
103 u32 offset, i;
104 struct rvt_qpn_map *map;
105 int ret = 0;
106
107 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
108 return -EINVAL;
109
110 spin_lock_init(&qpt->lock);
111
112 qpt->last = rdi->dparms.qpn_start;
113 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
114
115 /*
116 * Drivers may want some QPs beyond what we need for verbs let them use
117 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
118 * for those. The reserved range must be *after* the range which verbs
119 * will pick from.
120 */
121
122 /* Figure out number of bit maps needed before reserved range */
123 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
124
125 /* This should always be zero */
126 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
127
128 /* Starting with the first reserved bit map */
129 map = &qpt->map[qpt->nmaps];
130
131 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
132 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
133 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
134 if (!map->page) {
135 get_map_page(qpt, map, GFP_KERNEL);
136 if (!map->page) {
137 ret = -ENOMEM;
138 break;
139 }
140 }
141 set_bit(offset, map->page);
142 offset++;
143 if (offset == RVT_BITS_PER_PAGE) {
144 /* next page */
145 qpt->nmaps++;
146 map++;
147 offset = 0;
148 }
149 }
150 return ret;
151 }
152
153 /**
154 * free_qpn_table - free the QP number table for a device
155 * @qpt: the QPN table
156 */
157 static void free_qpn_table(struct rvt_qpn_table *qpt)
158 {
159 int i;
160
161 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
162 free_page((unsigned long)qpt->map[i].page);
163 }
164
165 /**
166 * rvt_driver_qp_init - Init driver qp resources
167 * @rdi: rvt dev strucutre
168 *
169 * Return: 0 on success
170 */
171 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
172 {
173 int i;
174 int ret = -ENOMEM;
175
176 if (!rdi->dparms.qp_table_size)
177 return -EINVAL;
178
179 /*
180 * If driver is not doing any QP allocation then make sure it is
181 * providing the necessary QP functions.
182 */
183 if (!rdi->driver_f.free_all_qps ||
184 !rdi->driver_f.qp_priv_alloc ||
185 !rdi->driver_f.qp_priv_free ||
186 !rdi->driver_f.notify_qp_reset)
187 return -EINVAL;
188
189 /* allocate parent object */
190 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
191 rdi->dparms.node);
192 if (!rdi->qp_dev)
193 return -ENOMEM;
194
195 /* allocate hash table */
196 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
197 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
198 rdi->qp_dev->qp_table =
199 kmalloc_node(rdi->qp_dev->qp_table_size *
200 sizeof(*rdi->qp_dev->qp_table),
201 GFP_KERNEL, rdi->dparms.node);
202 if (!rdi->qp_dev->qp_table)
203 goto no_qp_table;
204
205 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
206 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
207
208 spin_lock_init(&rdi->qp_dev->qpt_lock);
209
210 /* initialize qpn map */
211 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
212 goto fail_table;
213
214 spin_lock_init(&rdi->n_qps_lock);
215
216 return 0;
217
218 fail_table:
219 kfree(rdi->qp_dev->qp_table);
220 free_qpn_table(&rdi->qp_dev->qpn_table);
221
222 no_qp_table:
223 kfree(rdi->qp_dev);
224
225 return ret;
226 }
227
228 /**
229 * free_all_qps - check for QPs still in use
230 * @qpt: the QP table to empty
231 *
232 * There should not be any QPs still in use.
233 * Free memory for table.
234 */
235 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
236 {
237 unsigned long flags;
238 struct rvt_qp *qp;
239 unsigned n, qp_inuse = 0;
240 spinlock_t *ql; /* work around too long line below */
241
242 if (rdi->driver_f.free_all_qps)
243 qp_inuse = rdi->driver_f.free_all_qps(rdi);
244
245 qp_inuse += rvt_mcast_tree_empty(rdi);
246
247 if (!rdi->qp_dev)
248 return qp_inuse;
249
250 ql = &rdi->qp_dev->qpt_lock;
251 spin_lock_irqsave(ql, flags);
252 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
253 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
254 lockdep_is_held(ql));
255 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
256
257 for (; qp; qp = rcu_dereference_protected(qp->next,
258 lockdep_is_held(ql)))
259 qp_inuse++;
260 }
261 spin_unlock_irqrestore(ql, flags);
262 synchronize_rcu();
263 return qp_inuse;
264 }
265
266 /**
267 * rvt_qp_exit - clean up qps on device exit
268 * @rdi: rvt dev structure
269 *
270 * Check for qp leaks and free resources.
271 */
272 void rvt_qp_exit(struct rvt_dev_info *rdi)
273 {
274 u32 qps_inuse = rvt_free_all_qps(rdi);
275
276 if (qps_inuse)
277 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
278 qps_inuse);
279 if (!rdi->qp_dev)
280 return;
281
282 kfree(rdi->qp_dev->qp_table);
283 free_qpn_table(&rdi->qp_dev->qpn_table);
284 kfree(rdi->qp_dev);
285 }
286
287 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
288 struct rvt_qpn_map *map, unsigned off)
289 {
290 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
291 }
292
293 /**
294 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
295 * IB_QPT_SMI/IB_QPT_GSI
296 *@rdi: rvt device info structure
297 *@qpt: queue pair number table pointer
298 *@port_num: IB port number, 1 based, comes from core
299 *
300 * Return: The queue pair number
301 */
302 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
303 enum ib_qp_type type, u8 port_num, gfp_t gfp)
304 {
305 u32 i, offset, max_scan, qpn;
306 struct rvt_qpn_map *map;
307 u32 ret;
308
309 if (rdi->driver_f.alloc_qpn)
310 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp);
311
312 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
313 unsigned n;
314
315 ret = type == IB_QPT_GSI;
316 n = 1 << (ret + 2 * (port_num - 1));
317 spin_lock(&qpt->lock);
318 if (qpt->flags & n)
319 ret = -EINVAL;
320 else
321 qpt->flags |= n;
322 spin_unlock(&qpt->lock);
323 goto bail;
324 }
325
326 qpn = qpt->last + qpt->incr;
327 if (qpn >= RVT_QPN_MAX)
328 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
329 /* offset carries bit 0 */
330 offset = qpn & RVT_BITS_PER_PAGE_MASK;
331 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
332 max_scan = qpt->nmaps - !offset;
333 for (i = 0;;) {
334 if (unlikely(!map->page)) {
335 get_map_page(qpt, map, gfp);
336 if (unlikely(!map->page))
337 break;
338 }
339 do {
340 if (!test_and_set_bit(offset, map->page)) {
341 qpt->last = qpn;
342 ret = qpn;
343 goto bail;
344 }
345 offset += qpt->incr;
346 /*
347 * This qpn might be bogus if offset >= BITS_PER_PAGE.
348 * That is OK. It gets re-assigned below
349 */
350 qpn = mk_qpn(qpt, map, offset);
351 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
352 /*
353 * In order to keep the number of pages allocated to a
354 * minimum, we scan the all existing pages before increasing
355 * the size of the bitmap table.
356 */
357 if (++i > max_scan) {
358 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
359 break;
360 map = &qpt->map[qpt->nmaps++];
361 /* start at incr with current bit 0 */
362 offset = qpt->incr | (offset & 1);
363 } else if (map < &qpt->map[qpt->nmaps]) {
364 ++map;
365 /* start at incr with current bit 0 */
366 offset = qpt->incr | (offset & 1);
367 } else {
368 map = &qpt->map[0];
369 /* wrap to first map page, invert bit 0 */
370 offset = qpt->incr | ((offset & 1) ^ 1);
371 }
372 /* there can be no bits at shift and below */
373 WARN_ON(offset & (rdi->dparms.qos_shift - 1));
374 qpn = mk_qpn(qpt, map, offset);
375 }
376
377 ret = -ENOMEM;
378
379 bail:
380 return ret;
381 }
382
383 static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
384 {
385 struct rvt_qpn_map *map;
386
387 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
388 if (map->page)
389 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
390 }
391
392 /**
393 * rvt_clear_mr_refs - Drop help mr refs
394 * @qp: rvt qp data structure
395 * @clr_sends: If shoudl clear send side or not
396 */
397 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
398 {
399 unsigned n;
400
401 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
402 rvt_put_ss(&qp->s_rdma_read_sge);
403
404 rvt_put_ss(&qp->r_sge);
405
406 if (clr_sends) {
407 while (qp->s_last != qp->s_head) {
408 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
409 unsigned i;
410
411 for (i = 0; i < wqe->wr.num_sge; i++) {
412 struct rvt_sge *sge = &wqe->sg_list[i];
413
414 rvt_put_mr(sge->mr);
415 }
416 if (qp->ibqp.qp_type == IB_QPT_UD ||
417 qp->ibqp.qp_type == IB_QPT_SMI ||
418 qp->ibqp.qp_type == IB_QPT_GSI)
419 atomic_dec(&ibah_to_rvtah(
420 wqe->ud_wr.ah)->refcount);
421 if (++qp->s_last >= qp->s_size)
422 qp->s_last = 0;
423 smp_wmb(); /* see qp_set_savail */
424 }
425 if (qp->s_rdma_mr) {
426 rvt_put_mr(qp->s_rdma_mr);
427 qp->s_rdma_mr = NULL;
428 }
429 }
430
431 if (qp->ibqp.qp_type != IB_QPT_RC)
432 return;
433
434 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
435 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
436
437 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
438 e->rdma_sge.mr) {
439 rvt_put_mr(e->rdma_sge.mr);
440 e->rdma_sge.mr = NULL;
441 }
442 }
443 }
444
445 /**
446 * rvt_remove_qp - remove qp form table
447 * @rdi: rvt dev struct
448 * @qp: qp to remove
449 *
450 * Remove the QP from the table so it can't be found asynchronously by
451 * the receive routine.
452 */
453 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
454 {
455 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
456 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
457 unsigned long flags;
458 int removed = 1;
459
460 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
461
462 if (rcu_dereference_protected(rvp->qp[0],
463 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
464 RCU_INIT_POINTER(rvp->qp[0], NULL);
465 } else if (rcu_dereference_protected(rvp->qp[1],
466 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
467 RCU_INIT_POINTER(rvp->qp[1], NULL);
468 } else {
469 struct rvt_qp *q;
470 struct rvt_qp __rcu **qpp;
471
472 removed = 0;
473 qpp = &rdi->qp_dev->qp_table[n];
474 for (; (q = rcu_dereference_protected(*qpp,
475 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
476 qpp = &q->next) {
477 if (q == qp) {
478 RCU_INIT_POINTER(*qpp,
479 rcu_dereference_protected(qp->next,
480 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
481 removed = 1;
482 trace_rvt_qpremove(qp, n);
483 break;
484 }
485 }
486 }
487
488 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
489 if (removed) {
490 synchronize_rcu();
491 if (atomic_dec_and_test(&qp->refcount))
492 wake_up(&qp->wait);
493 }
494 }
495
496 /**
497 * reset_qp - initialize the QP state to the reset state
498 * @qp: the QP to reset
499 * @type: the QP type
500 * r and s lock are required to be held by the caller
501 */
502 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
503 enum ib_qp_type type)
504 {
505 if (qp->state != IB_QPS_RESET) {
506 qp->state = IB_QPS_RESET;
507
508 /* Let drivers flush their waitlist */
509 rdi->driver_f.flush_qp_waiters(qp);
510 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
511 spin_unlock(&qp->s_lock);
512 spin_unlock(&qp->s_hlock);
513 spin_unlock_irq(&qp->r_lock);
514
515 /* Stop the send queue and the retry timer */
516 rdi->driver_f.stop_send_queue(qp);
517
518 /* Wait for things to stop */
519 rdi->driver_f.quiesce_qp(qp);
520
521 /* take qp out the hash and wait for it to be unused */
522 rvt_remove_qp(rdi, qp);
523 wait_event(qp->wait, !atomic_read(&qp->refcount));
524
525 /* grab the lock b/c it was locked at call time */
526 spin_lock_irq(&qp->r_lock);
527 spin_lock(&qp->s_hlock);
528 spin_lock(&qp->s_lock);
529
530 rvt_clear_mr_refs(qp, 1);
531 }
532
533 /*
534 * Let the driver do any tear down it needs to for a qp
535 * that has been reset
536 */
537 rdi->driver_f.notify_qp_reset(qp);
538
539 qp->remote_qpn = 0;
540 qp->qkey = 0;
541 qp->qp_access_flags = 0;
542 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
543 qp->s_hdrwords = 0;
544 qp->s_wqe = NULL;
545 qp->s_draining = 0;
546 qp->s_next_psn = 0;
547 qp->s_last_psn = 0;
548 qp->s_sending_psn = 0;
549 qp->s_sending_hpsn = 0;
550 qp->s_psn = 0;
551 qp->r_psn = 0;
552 qp->r_msn = 0;
553 if (type == IB_QPT_RC) {
554 qp->s_state = IB_OPCODE_RC_SEND_LAST;
555 qp->r_state = IB_OPCODE_RC_SEND_LAST;
556 } else {
557 qp->s_state = IB_OPCODE_UC_SEND_LAST;
558 qp->r_state = IB_OPCODE_UC_SEND_LAST;
559 }
560 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
561 qp->r_nak_state = 0;
562 qp->r_aflags = 0;
563 qp->r_flags = 0;
564 qp->s_head = 0;
565 qp->s_tail = 0;
566 qp->s_cur = 0;
567 qp->s_acked = 0;
568 qp->s_last = 0;
569 qp->s_ssn = 1;
570 qp->s_lsn = 0;
571 qp->s_mig_state = IB_MIG_MIGRATED;
572 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
573 qp->r_head_ack_queue = 0;
574 qp->s_tail_ack_queue = 0;
575 qp->s_num_rd_atomic = 0;
576 if (qp->r_rq.wq) {
577 qp->r_rq.wq->head = 0;
578 qp->r_rq.wq->tail = 0;
579 }
580 qp->r_sge.num_sge = 0;
581 }
582
583 /**
584 * rvt_create_qp - create a queue pair for a device
585 * @ibpd: the protection domain who's device we create the queue pair for
586 * @init_attr: the attributes of the queue pair
587 * @udata: user data for libibverbs.so
588 *
589 * Queue pair creation is mostly an rvt issue. However, drivers have their own
590 * unique idea of what queue pair numbers mean. For instance there is a reserved
591 * range for PSM.
592 *
593 * Return: the queue pair on success, otherwise returns an errno.
594 *
595 * Called by the ib_create_qp() core verbs function.
596 */
597 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
598 struct ib_qp_init_attr *init_attr,
599 struct ib_udata *udata)
600 {
601 struct rvt_qp *qp;
602 int err;
603 struct rvt_swqe *swq = NULL;
604 size_t sz;
605 size_t sg_list_sz;
606 struct ib_qp *ret = ERR_PTR(-ENOMEM);
607 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
608 void *priv = NULL;
609 gfp_t gfp;
610
611 if (!rdi)
612 return ERR_PTR(-EINVAL);
613
614 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
615 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
616 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
617 return ERR_PTR(-EINVAL);
618
619 /* GFP_NOIO is applicable to RC QP's only */
620
621 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
622 init_attr->qp_type != IB_QPT_RC)
623 return ERR_PTR(-EINVAL);
624
625 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
626 GFP_NOIO : GFP_KERNEL;
627
628 /* Check receive queue parameters if no SRQ is specified. */
629 if (!init_attr->srq) {
630 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
631 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
632 return ERR_PTR(-EINVAL);
633
634 if (init_attr->cap.max_send_sge +
635 init_attr->cap.max_send_wr +
636 init_attr->cap.max_recv_sge +
637 init_attr->cap.max_recv_wr == 0)
638 return ERR_PTR(-EINVAL);
639 }
640
641 switch (init_attr->qp_type) {
642 case IB_QPT_SMI:
643 case IB_QPT_GSI:
644 if (init_attr->port_num == 0 ||
645 init_attr->port_num > ibpd->device->phys_port_cnt)
646 return ERR_PTR(-EINVAL);
647 case IB_QPT_UC:
648 case IB_QPT_RC:
649 case IB_QPT_UD:
650 sz = sizeof(struct rvt_sge) *
651 init_attr->cap.max_send_sge +
652 sizeof(struct rvt_swqe);
653 if (gfp == GFP_NOIO)
654 swq = __vmalloc(
655 (init_attr->cap.max_send_wr + 1) * sz,
656 gfp, PAGE_KERNEL);
657 else
658 swq = vmalloc_node(
659 (init_attr->cap.max_send_wr + 1) * sz,
660 rdi->dparms.node);
661 if (!swq)
662 return ERR_PTR(-ENOMEM);
663
664 sz = sizeof(*qp);
665 sg_list_sz = 0;
666 if (init_attr->srq) {
667 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
668
669 if (srq->rq.max_sge > 1)
670 sg_list_sz = sizeof(*qp->r_sg_list) *
671 (srq->rq.max_sge - 1);
672 } else if (init_attr->cap.max_recv_sge > 1)
673 sg_list_sz = sizeof(*qp->r_sg_list) *
674 (init_attr->cap.max_recv_sge - 1);
675 qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
676 if (!qp)
677 goto bail_swq;
678
679 RCU_INIT_POINTER(qp->next, NULL);
680
681 /*
682 * Driver needs to set up it's private QP structure and do any
683 * initialization that is needed.
684 */
685 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
686 if (!priv)
687 goto bail_qp;
688 qp->priv = priv;
689 qp->timeout_jiffies =
690 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
691 1000UL);
692 if (init_attr->srq) {
693 sz = 0;
694 } else {
695 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
696 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
697 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
698 sizeof(struct rvt_rwqe);
699 if (udata)
700 qp->r_rq.wq = vmalloc_user(
701 sizeof(struct rvt_rwq) +
702 qp->r_rq.size * sz);
703 else if (gfp == GFP_NOIO)
704 qp->r_rq.wq = __vmalloc(
705 sizeof(struct rvt_rwq) +
706 qp->r_rq.size * sz,
707 gfp, PAGE_KERNEL);
708 else
709 qp->r_rq.wq = vmalloc_node(
710 sizeof(struct rvt_rwq) +
711 qp->r_rq.size * sz,
712 rdi->dparms.node);
713 if (!qp->r_rq.wq)
714 goto bail_driver_priv;
715 }
716
717 /*
718 * ib_create_qp() will initialize qp->ibqp
719 * except for qp->ibqp.qp_num.
720 */
721 spin_lock_init(&qp->r_lock);
722 spin_lock_init(&qp->s_hlock);
723 spin_lock_init(&qp->s_lock);
724 spin_lock_init(&qp->r_rq.lock);
725 atomic_set(&qp->refcount, 0);
726 init_waitqueue_head(&qp->wait);
727 init_timer(&qp->s_timer);
728 qp->s_timer.data = (unsigned long)qp;
729 INIT_LIST_HEAD(&qp->rspwait);
730 qp->state = IB_QPS_RESET;
731 qp->s_wq = swq;
732 qp->s_size = init_attr->cap.max_send_wr + 1;
733 qp->s_avail = init_attr->cap.max_send_wr;
734 qp->s_max_sge = init_attr->cap.max_send_sge;
735 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
736 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
737
738 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
739 init_attr->qp_type,
740 init_attr->port_num, gfp);
741 if (err < 0) {
742 ret = ERR_PTR(err);
743 goto bail_rq_wq;
744 }
745 qp->ibqp.qp_num = err;
746 qp->port_num = init_attr->port_num;
747 rvt_reset_qp(rdi, qp, init_attr->qp_type);
748 break;
749
750 default:
751 /* Don't support raw QPs */
752 return ERR_PTR(-EINVAL);
753 }
754
755 init_attr->cap.max_inline_data = 0;
756
757 /*
758 * Return the address of the RWQ as the offset to mmap.
759 * See rvt_mmap() for details.
760 */
761 if (udata && udata->outlen >= sizeof(__u64)) {
762 if (!qp->r_rq.wq) {
763 __u64 offset = 0;
764
765 err = ib_copy_to_udata(udata, &offset,
766 sizeof(offset));
767 if (err) {
768 ret = ERR_PTR(err);
769 goto bail_qpn;
770 }
771 } else {
772 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
773
774 qp->ip = rvt_create_mmap_info(rdi, s,
775 ibpd->uobject->context,
776 qp->r_rq.wq);
777 if (!qp->ip) {
778 ret = ERR_PTR(-ENOMEM);
779 goto bail_qpn;
780 }
781
782 err = ib_copy_to_udata(udata, &qp->ip->offset,
783 sizeof(qp->ip->offset));
784 if (err) {
785 ret = ERR_PTR(err);
786 goto bail_ip;
787 }
788 }
789 qp->pid = current->pid;
790 }
791
792 spin_lock(&rdi->n_qps_lock);
793 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
794 spin_unlock(&rdi->n_qps_lock);
795 ret = ERR_PTR(-ENOMEM);
796 goto bail_ip;
797 }
798
799 rdi->n_qps_allocated++;
800 /*
801 * Maintain a busy_jiffies variable that will be added to the timeout
802 * period in mod_retry_timer and add_retry_timer. This busy jiffies
803 * is scaled by the number of rc qps created for the device to reduce
804 * the number of timeouts occurring when there is a large number of
805 * qps. busy_jiffies is incremented every rc qp scaling interval.
806 * The scaling interval is selected based on extensive performance
807 * evaluation of targeted workloads.
808 */
809 if (init_attr->qp_type == IB_QPT_RC) {
810 rdi->n_rc_qps++;
811 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
812 }
813 spin_unlock(&rdi->n_qps_lock);
814
815 if (qp->ip) {
816 spin_lock_irq(&rdi->pending_lock);
817 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
818 spin_unlock_irq(&rdi->pending_lock);
819 }
820
821 ret = &qp->ibqp;
822
823 /*
824 * We have our QP and its good, now keep track of what types of opcodes
825 * can be processed on this QP. We do this by keeping track of what the
826 * 3 high order bits of the opcode are.
827 */
828 switch (init_attr->qp_type) {
829 case IB_QPT_SMI:
830 case IB_QPT_GSI:
831 case IB_QPT_UD:
832 qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK;
833 break;
834 case IB_QPT_RC:
835 qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK;
836 break;
837 case IB_QPT_UC:
838 qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK;
839 break;
840 default:
841 ret = ERR_PTR(-EINVAL);
842 goto bail_ip;
843 }
844
845 return ret;
846
847 bail_ip:
848 kref_put(&qp->ip->ref, rvt_release_mmap_info);
849
850 bail_qpn:
851 free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
852
853 bail_rq_wq:
854 vfree(qp->r_rq.wq);
855
856 bail_driver_priv:
857 rdi->driver_f.qp_priv_free(rdi, qp);
858
859 bail_qp:
860 kfree(qp);
861
862 bail_swq:
863 vfree(swq);
864
865 return ret;
866 }
867
868 /**
869 * rvt_error_qp - put a QP into the error state
870 * @qp: the QP to put into the error state
871 * @err: the receive completion error to signal if a RWQE is active
872 *
873 * Flushes both send and receive work queues.
874 *
875 * Return: true if last WQE event should be generated.
876 * The QP r_lock and s_lock should be held and interrupts disabled.
877 * If we are already in error state, just return.
878 */
879 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
880 {
881 struct ib_wc wc;
882 int ret = 0;
883 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
884
885 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
886 goto bail;
887
888 qp->state = IB_QPS_ERR;
889
890 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
891 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
892 del_timer(&qp->s_timer);
893 }
894
895 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
896 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
897
898 rdi->driver_f.notify_error_qp(qp);
899
900 /* Schedule the sending tasklet to drain the send work queue. */
901 if (ACCESS_ONCE(qp->s_last) != qp->s_head)
902 rdi->driver_f.schedule_send(qp);
903
904 rvt_clear_mr_refs(qp, 0);
905
906 memset(&wc, 0, sizeof(wc));
907 wc.qp = &qp->ibqp;
908 wc.opcode = IB_WC_RECV;
909
910 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
911 wc.wr_id = qp->r_wr_id;
912 wc.status = err;
913 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
914 }
915 wc.status = IB_WC_WR_FLUSH_ERR;
916
917 if (qp->r_rq.wq) {
918 struct rvt_rwq *wq;
919 u32 head;
920 u32 tail;
921
922 spin_lock(&qp->r_rq.lock);
923
924 /* sanity check pointers before trusting them */
925 wq = qp->r_rq.wq;
926 head = wq->head;
927 if (head >= qp->r_rq.size)
928 head = 0;
929 tail = wq->tail;
930 if (tail >= qp->r_rq.size)
931 tail = 0;
932 while (tail != head) {
933 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
934 if (++tail >= qp->r_rq.size)
935 tail = 0;
936 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
937 }
938 wq->tail = tail;
939
940 spin_unlock(&qp->r_rq.lock);
941 } else if (qp->ibqp.event_handler) {
942 ret = 1;
943 }
944
945 bail:
946 return ret;
947 }
948 EXPORT_SYMBOL(rvt_error_qp);
949
950 /*
951 * Put the QP into the hash table.
952 * The hash table holds a reference to the QP.
953 */
954 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
955 {
956 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
957 unsigned long flags;
958
959 atomic_inc(&qp->refcount);
960 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
961
962 if (qp->ibqp.qp_num <= 1) {
963 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
964 } else {
965 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
966
967 qp->next = rdi->qp_dev->qp_table[n];
968 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
969 trace_rvt_qpinsert(qp, n);
970 }
971
972 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
973 }
974
975 /**
976 * qib_modify_qp - modify the attributes of a queue pair
977 * @ibqp: the queue pair who's attributes we're modifying
978 * @attr: the new attributes
979 * @attr_mask: the mask of attributes to modify
980 * @udata: user data for libibverbs.so
981 *
982 * Return: 0 on success, otherwise returns an errno.
983 */
984 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
985 int attr_mask, struct ib_udata *udata)
986 {
987 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
988 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
989 enum ib_qp_state cur_state, new_state;
990 struct ib_event ev;
991 int lastwqe = 0;
992 int mig = 0;
993 int pmtu = 0; /* for gcc warning only */
994 enum rdma_link_layer link;
995
996 link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
997
998 spin_lock_irq(&qp->r_lock);
999 spin_lock(&qp->s_hlock);
1000 spin_lock(&qp->s_lock);
1001
1002 cur_state = attr_mask & IB_QP_CUR_STATE ?
1003 attr->cur_qp_state : qp->state;
1004 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1005
1006 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1007 attr_mask, link))
1008 goto inval;
1009
1010 if (rdi->driver_f.check_modify_qp &&
1011 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1012 goto inval;
1013
1014 if (attr_mask & IB_QP_AV) {
1015 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1016 goto inval;
1017 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1018 goto inval;
1019 }
1020
1021 if (attr_mask & IB_QP_ALT_PATH) {
1022 if (attr->alt_ah_attr.dlid >=
1023 be16_to_cpu(IB_MULTICAST_LID_BASE))
1024 goto inval;
1025 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1026 goto inval;
1027 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1028 goto inval;
1029 }
1030
1031 if (attr_mask & IB_QP_PKEY_INDEX)
1032 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1033 goto inval;
1034
1035 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1036 if (attr->min_rnr_timer > 31)
1037 goto inval;
1038
1039 if (attr_mask & IB_QP_PORT)
1040 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1041 qp->ibqp.qp_type == IB_QPT_GSI ||
1042 attr->port_num == 0 ||
1043 attr->port_num > ibqp->device->phys_port_cnt)
1044 goto inval;
1045
1046 if (attr_mask & IB_QP_DEST_QPN)
1047 if (attr->dest_qp_num > RVT_QPN_MASK)
1048 goto inval;
1049
1050 if (attr_mask & IB_QP_RETRY_CNT)
1051 if (attr->retry_cnt > 7)
1052 goto inval;
1053
1054 if (attr_mask & IB_QP_RNR_RETRY)
1055 if (attr->rnr_retry > 7)
1056 goto inval;
1057
1058 /*
1059 * Don't allow invalid path_mtu values. OK to set greater
1060 * than the active mtu (or even the max_cap, if we have tuned
1061 * that to a small mtu. We'll set qp->path_mtu
1062 * to the lesser of requested attribute mtu and active,
1063 * for packetizing messages.
1064 * Note that the QP port has to be set in INIT and MTU in RTR.
1065 */
1066 if (attr_mask & IB_QP_PATH_MTU) {
1067 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1068 if (pmtu < 0)
1069 goto inval;
1070 }
1071
1072 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1073 if (attr->path_mig_state == IB_MIG_REARM) {
1074 if (qp->s_mig_state == IB_MIG_ARMED)
1075 goto inval;
1076 if (new_state != IB_QPS_RTS)
1077 goto inval;
1078 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1079 if (qp->s_mig_state == IB_MIG_REARM)
1080 goto inval;
1081 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1082 goto inval;
1083 if (qp->s_mig_state == IB_MIG_ARMED)
1084 mig = 1;
1085 } else {
1086 goto inval;
1087 }
1088 }
1089
1090 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1091 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1092 goto inval;
1093
1094 switch (new_state) {
1095 case IB_QPS_RESET:
1096 if (qp->state != IB_QPS_RESET)
1097 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1098 break;
1099
1100 case IB_QPS_RTR:
1101 /* Allow event to re-trigger if QP set to RTR more than once */
1102 qp->r_flags &= ~RVT_R_COMM_EST;
1103 qp->state = new_state;
1104 break;
1105
1106 case IB_QPS_SQD:
1107 qp->s_draining = qp->s_last != qp->s_cur;
1108 qp->state = new_state;
1109 break;
1110
1111 case IB_QPS_SQE:
1112 if (qp->ibqp.qp_type == IB_QPT_RC)
1113 goto inval;
1114 qp->state = new_state;
1115 break;
1116
1117 case IB_QPS_ERR:
1118 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1119 break;
1120
1121 default:
1122 qp->state = new_state;
1123 break;
1124 }
1125
1126 if (attr_mask & IB_QP_PKEY_INDEX)
1127 qp->s_pkey_index = attr->pkey_index;
1128
1129 if (attr_mask & IB_QP_PORT)
1130 qp->port_num = attr->port_num;
1131
1132 if (attr_mask & IB_QP_DEST_QPN)
1133 qp->remote_qpn = attr->dest_qp_num;
1134
1135 if (attr_mask & IB_QP_SQ_PSN) {
1136 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1137 qp->s_psn = qp->s_next_psn;
1138 qp->s_sending_psn = qp->s_next_psn;
1139 qp->s_last_psn = qp->s_next_psn - 1;
1140 qp->s_sending_hpsn = qp->s_last_psn;
1141 }
1142
1143 if (attr_mask & IB_QP_RQ_PSN)
1144 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1145
1146 if (attr_mask & IB_QP_ACCESS_FLAGS)
1147 qp->qp_access_flags = attr->qp_access_flags;
1148
1149 if (attr_mask & IB_QP_AV) {
1150 qp->remote_ah_attr = attr->ah_attr;
1151 qp->s_srate = attr->ah_attr.static_rate;
1152 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1153 }
1154
1155 if (attr_mask & IB_QP_ALT_PATH) {
1156 qp->alt_ah_attr = attr->alt_ah_attr;
1157 qp->s_alt_pkey_index = attr->alt_pkey_index;
1158 }
1159
1160 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1161 qp->s_mig_state = attr->path_mig_state;
1162 if (mig) {
1163 qp->remote_ah_attr = qp->alt_ah_attr;
1164 qp->port_num = qp->alt_ah_attr.port_num;
1165 qp->s_pkey_index = qp->s_alt_pkey_index;
1166 }
1167 }
1168
1169 if (attr_mask & IB_QP_PATH_MTU) {
1170 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1171 qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1172 qp->log_pmtu = ilog2(qp->pmtu);
1173 }
1174
1175 if (attr_mask & IB_QP_RETRY_CNT) {
1176 qp->s_retry_cnt = attr->retry_cnt;
1177 qp->s_retry = attr->retry_cnt;
1178 }
1179
1180 if (attr_mask & IB_QP_RNR_RETRY) {
1181 qp->s_rnr_retry_cnt = attr->rnr_retry;
1182 qp->s_rnr_retry = attr->rnr_retry;
1183 }
1184
1185 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1186 qp->r_min_rnr_timer = attr->min_rnr_timer;
1187
1188 if (attr_mask & IB_QP_TIMEOUT) {
1189 qp->timeout = attr->timeout;
1190 qp->timeout_jiffies =
1191 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1192 1000UL);
1193 }
1194
1195 if (attr_mask & IB_QP_QKEY)
1196 qp->qkey = attr->qkey;
1197
1198 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1199 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1200
1201 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1202 qp->s_max_rd_atomic = attr->max_rd_atomic;
1203
1204 if (rdi->driver_f.modify_qp)
1205 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1206
1207 spin_unlock(&qp->s_lock);
1208 spin_unlock(&qp->s_hlock);
1209 spin_unlock_irq(&qp->r_lock);
1210
1211 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1212 rvt_insert_qp(rdi, qp);
1213
1214 if (lastwqe) {
1215 ev.device = qp->ibqp.device;
1216 ev.element.qp = &qp->ibqp;
1217 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1218 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1219 }
1220 if (mig) {
1221 ev.device = qp->ibqp.device;
1222 ev.element.qp = &qp->ibqp;
1223 ev.event = IB_EVENT_PATH_MIG;
1224 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1225 }
1226 return 0;
1227
1228 inval:
1229 spin_unlock(&qp->s_lock);
1230 spin_unlock(&qp->s_hlock);
1231 spin_unlock_irq(&qp->r_lock);
1232 return -EINVAL;
1233 }
1234
1235 /** rvt_free_qpn - Free a qpn from the bit map
1236 * @qpt: QP table
1237 * @qpn: queue pair number to free
1238 */
1239 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
1240 {
1241 struct rvt_qpn_map *map;
1242
1243 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
1244 if (map->page)
1245 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
1246 }
1247
1248 /**
1249 * rvt_destroy_qp - destroy a queue pair
1250 * @ibqp: the queue pair to destroy
1251 *
1252 * Note that this can be called while the QP is actively sending or
1253 * receiving!
1254 *
1255 * Return: 0 on success.
1256 */
1257 int rvt_destroy_qp(struct ib_qp *ibqp)
1258 {
1259 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1260 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1261
1262 spin_lock_irq(&qp->r_lock);
1263 spin_lock(&qp->s_hlock);
1264 spin_lock(&qp->s_lock);
1265 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1266 spin_unlock(&qp->s_lock);
1267 spin_unlock(&qp->s_hlock);
1268 spin_unlock_irq(&qp->r_lock);
1269
1270 /* qpn is now available for use again */
1271 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1272
1273 spin_lock(&rdi->n_qps_lock);
1274 rdi->n_qps_allocated--;
1275 if (qp->ibqp.qp_type == IB_QPT_RC) {
1276 rdi->n_rc_qps--;
1277 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1278 }
1279 spin_unlock(&rdi->n_qps_lock);
1280
1281 if (qp->ip)
1282 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1283 else
1284 vfree(qp->r_rq.wq);
1285 vfree(qp->s_wq);
1286 rdi->driver_f.qp_priv_free(rdi, qp);
1287 kfree(qp);
1288 return 0;
1289 }
1290
1291 /**
1292 * rvt_query_qp - query an ipbq
1293 * @ibqp: IB qp to query
1294 * @attr: attr struct to fill in
1295 * @attr_mask: attr mask ignored
1296 * @init_attr: struct to fill in
1297 *
1298 * Return: always 0
1299 */
1300 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1301 int attr_mask, struct ib_qp_init_attr *init_attr)
1302 {
1303 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1304 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1305
1306 attr->qp_state = qp->state;
1307 attr->cur_qp_state = attr->qp_state;
1308 attr->path_mtu = qp->path_mtu;
1309 attr->path_mig_state = qp->s_mig_state;
1310 attr->qkey = qp->qkey;
1311 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1312 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1313 attr->dest_qp_num = qp->remote_qpn;
1314 attr->qp_access_flags = qp->qp_access_flags;
1315 attr->cap.max_send_wr = qp->s_size - 1;
1316 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1317 attr->cap.max_send_sge = qp->s_max_sge;
1318 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1319 attr->cap.max_inline_data = 0;
1320 attr->ah_attr = qp->remote_ah_attr;
1321 attr->alt_ah_attr = qp->alt_ah_attr;
1322 attr->pkey_index = qp->s_pkey_index;
1323 attr->alt_pkey_index = qp->s_alt_pkey_index;
1324 attr->en_sqd_async_notify = 0;
1325 attr->sq_draining = qp->s_draining;
1326 attr->max_rd_atomic = qp->s_max_rd_atomic;
1327 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1328 attr->min_rnr_timer = qp->r_min_rnr_timer;
1329 attr->port_num = qp->port_num;
1330 attr->timeout = qp->timeout;
1331 attr->retry_cnt = qp->s_retry_cnt;
1332 attr->rnr_retry = qp->s_rnr_retry_cnt;
1333 attr->alt_port_num = qp->alt_ah_attr.port_num;
1334 attr->alt_timeout = qp->alt_timeout;
1335
1336 init_attr->event_handler = qp->ibqp.event_handler;
1337 init_attr->qp_context = qp->ibqp.qp_context;
1338 init_attr->send_cq = qp->ibqp.send_cq;
1339 init_attr->recv_cq = qp->ibqp.recv_cq;
1340 init_attr->srq = qp->ibqp.srq;
1341 init_attr->cap = attr->cap;
1342 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1343 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1344 else
1345 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1346 init_attr->qp_type = qp->ibqp.qp_type;
1347 init_attr->port_num = qp->port_num;
1348 return 0;
1349 }
1350
1351 /**
1352 * rvt_post_receive - post a receive on a QP
1353 * @ibqp: the QP to post the receive on
1354 * @wr: the WR to post
1355 * @bad_wr: the first bad WR is put here
1356 *
1357 * This may be called from interrupt context.
1358 *
1359 * Return: 0 on success otherwise errno
1360 */
1361 int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1362 struct ib_recv_wr **bad_wr)
1363 {
1364 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1365 struct rvt_rwq *wq = qp->r_rq.wq;
1366 unsigned long flags;
1367 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1368 !qp->ibqp.srq;
1369
1370 /* Check that state is OK to post receive. */
1371 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1372 *bad_wr = wr;
1373 return -EINVAL;
1374 }
1375
1376 for (; wr; wr = wr->next) {
1377 struct rvt_rwqe *wqe;
1378 u32 next;
1379 int i;
1380
1381 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1382 *bad_wr = wr;
1383 return -EINVAL;
1384 }
1385
1386 spin_lock_irqsave(&qp->r_rq.lock, flags);
1387 next = wq->head + 1;
1388 if (next >= qp->r_rq.size)
1389 next = 0;
1390 if (next == wq->tail) {
1391 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1392 *bad_wr = wr;
1393 return -ENOMEM;
1394 }
1395 if (unlikely(qp_err_flush)) {
1396 struct ib_wc wc;
1397
1398 memset(&wc, 0, sizeof(wc));
1399 wc.qp = &qp->ibqp;
1400 wc.opcode = IB_WC_RECV;
1401 wc.wr_id = wr->wr_id;
1402 wc.status = IB_WC_WR_FLUSH_ERR;
1403 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1404 } else {
1405 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1406 wqe->wr_id = wr->wr_id;
1407 wqe->num_sge = wr->num_sge;
1408 for (i = 0; i < wr->num_sge; i++)
1409 wqe->sg_list[i] = wr->sg_list[i];
1410 /*
1411 * Make sure queue entry is written
1412 * before the head index.
1413 */
1414 smp_wmb();
1415 wq->head = next;
1416 }
1417 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1418 }
1419 return 0;
1420 }
1421
1422 /**
1423 * qp_get_savail - return number of avail send entries
1424 *
1425 * @qp - the qp
1426 *
1427 * This assumes the s_hlock is held but the s_last
1428 * qp variable is uncontrolled.
1429 */
1430 static inline u32 qp_get_savail(struct rvt_qp *qp)
1431 {
1432 u32 slast;
1433 u32 ret;
1434
1435 smp_read_barrier_depends(); /* see rc.c */
1436 slast = ACCESS_ONCE(qp->s_last);
1437 if (qp->s_head >= slast)
1438 ret = qp->s_size - (qp->s_head - slast);
1439 else
1440 ret = slast - qp->s_head;
1441 return ret - 1;
1442 }
1443
1444 /**
1445 * rvt_post_one_wr - post one RC, UC, or UD send work request
1446 * @qp: the QP to post on
1447 * @wr: the work request to send
1448 */
1449 static int rvt_post_one_wr(struct rvt_qp *qp,
1450 struct ib_send_wr *wr,
1451 int *call_send)
1452 {
1453 struct rvt_swqe *wqe;
1454 u32 next;
1455 int i;
1456 int j;
1457 int acc;
1458 struct rvt_lkey_table *rkt;
1459 struct rvt_pd *pd;
1460 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1461 u8 log_pmtu;
1462 int ret;
1463
1464 /* IB spec says that num_sge == 0 is OK. */
1465 if (unlikely(wr->num_sge > qp->s_max_sge))
1466 return -EINVAL;
1467
1468 /*
1469 * Don't allow RDMA reads or atomic operations on UC or
1470 * undefined operations.
1471 * Make sure buffer is large enough to hold the result for atomics.
1472 */
1473 if (qp->ibqp.qp_type == IB_QPT_UC) {
1474 if ((unsigned)wr->opcode >= IB_WR_RDMA_READ)
1475 return -EINVAL;
1476 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
1477 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
1478 if (wr->opcode != IB_WR_SEND &&
1479 wr->opcode != IB_WR_SEND_WITH_IMM)
1480 return -EINVAL;
1481 /* Check UD destination address PD */
1482 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1483 return -EINVAL;
1484 } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
1485 return -EINVAL;
1486 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
1487 (wr->num_sge == 0 ||
1488 wr->sg_list[0].length < sizeof(u64) ||
1489 wr->sg_list[0].addr & (sizeof(u64) - 1))) {
1490 return -EINVAL;
1491 } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
1492 return -EINVAL;
1493 }
1494 /* check for avail */
1495 if (unlikely(!qp->s_avail)) {
1496 qp->s_avail = qp_get_savail(qp);
1497 if (WARN_ON(qp->s_avail > (qp->s_size - 1)))
1498 rvt_pr_err(rdi,
1499 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1500 qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1501 qp->s_head, qp->s_tail, qp->s_cur,
1502 qp->s_acked, qp->s_last);
1503 if (!qp->s_avail)
1504 return -ENOMEM;
1505 }
1506 next = qp->s_head + 1;
1507 if (next >= qp->s_size)
1508 next = 0;
1509
1510 rkt = &rdi->lkey_table;
1511 pd = ibpd_to_rvtpd(qp->ibqp.pd);
1512 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1513
1514 if (qp->ibqp.qp_type != IB_QPT_UC &&
1515 qp->ibqp.qp_type != IB_QPT_RC)
1516 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
1517 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
1518 wr->opcode == IB_WR_RDMA_WRITE ||
1519 wr->opcode == IB_WR_RDMA_READ)
1520 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
1521 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1522 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1523 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
1524 else
1525 memcpy(&wqe->wr, wr, sizeof(wqe->wr));
1526
1527 wqe->length = 0;
1528 j = 0;
1529 if (wr->num_sge) {
1530 acc = wr->opcode >= IB_WR_RDMA_READ ?
1531 IB_ACCESS_LOCAL_WRITE : 0;
1532 for (i = 0; i < wr->num_sge; i++) {
1533 u32 length = wr->sg_list[i].length;
1534 int ok;
1535
1536 if (length == 0)
1537 continue;
1538 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
1539 &wr->sg_list[i], acc);
1540 if (!ok) {
1541 ret = -EINVAL;
1542 goto bail_inval_free;
1543 }
1544 wqe->length += length;
1545 j++;
1546 }
1547 wqe->wr.num_sge = j;
1548 }
1549
1550 /* general part of wqe valid - allow for driver checks */
1551 if (rdi->driver_f.check_send_wqe) {
1552 ret = rdi->driver_f.check_send_wqe(qp, wqe);
1553 if (ret < 0)
1554 goto bail_inval_free;
1555 if (ret)
1556 *call_send = ret;
1557 }
1558
1559 log_pmtu = qp->log_pmtu;
1560 if (qp->ibqp.qp_type != IB_QPT_UC &&
1561 qp->ibqp.qp_type != IB_QPT_RC) {
1562 struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
1563
1564 log_pmtu = ah->log_pmtu;
1565 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
1566 }
1567
1568 wqe->ssn = qp->s_ssn++;
1569 wqe->psn = qp->s_next_psn;
1570 wqe->lpsn = wqe->psn +
1571 (wqe->length ? ((wqe->length - 1) >> log_pmtu) : 0);
1572 qp->s_next_psn = wqe->lpsn + 1;
1573 trace_rvt_post_one_wr(qp, wqe);
1574 smp_wmb(); /* see request builders */
1575 qp->s_avail--;
1576 qp->s_head = next;
1577
1578 return 0;
1579
1580 bail_inval_free:
1581 /* release mr holds */
1582 while (j) {
1583 struct rvt_sge *sge = &wqe->sg_list[--j];
1584
1585 rvt_put_mr(sge->mr);
1586 }
1587 return ret;
1588 }
1589
1590 /**
1591 * rvt_post_send - post a send on a QP
1592 * @ibqp: the QP to post the send on
1593 * @wr: the list of work requests to post
1594 * @bad_wr: the first bad WR is put here
1595 *
1596 * This may be called from interrupt context.
1597 *
1598 * Return: 0 on success else errno
1599 */
1600 int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1601 struct ib_send_wr **bad_wr)
1602 {
1603 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1604 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1605 unsigned long flags = 0;
1606 int call_send;
1607 unsigned nreq = 0;
1608 int err = 0;
1609
1610 spin_lock_irqsave(&qp->s_hlock, flags);
1611
1612 /*
1613 * Ensure QP state is such that we can send. If not bail out early,
1614 * there is no need to do this every time we post a send.
1615 */
1616 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
1617 spin_unlock_irqrestore(&qp->s_hlock, flags);
1618 return -EINVAL;
1619 }
1620
1621 /*
1622 * If the send queue is empty, and we only have a single WR then just go
1623 * ahead and kick the send engine into gear. Otherwise we will always
1624 * just schedule the send to happen later.
1625 */
1626 call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
1627
1628 for (; wr; wr = wr->next) {
1629 err = rvt_post_one_wr(qp, wr, &call_send);
1630 if (unlikely(err)) {
1631 *bad_wr = wr;
1632 goto bail;
1633 }
1634 nreq++;
1635 }
1636 bail:
1637 spin_unlock_irqrestore(&qp->s_hlock, flags);
1638 if (nreq) {
1639 if (call_send)
1640 rdi->driver_f.do_send(qp);
1641 else
1642 rdi->driver_f.schedule_send_no_lock(qp);
1643 }
1644 return err;
1645 }
1646
1647 /**
1648 * rvt_post_srq_receive - post a receive on a shared receive queue
1649 * @ibsrq: the SRQ to post the receive on
1650 * @wr: the list of work requests to post
1651 * @bad_wr: A pointer to the first WR to cause a problem is put here
1652 *
1653 * This may be called from interrupt context.
1654 *
1655 * Return: 0 on success else errno
1656 */
1657 int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1658 struct ib_recv_wr **bad_wr)
1659 {
1660 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
1661 struct rvt_rwq *wq;
1662 unsigned long flags;
1663
1664 for (; wr; wr = wr->next) {
1665 struct rvt_rwqe *wqe;
1666 u32 next;
1667 int i;
1668
1669 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
1670 *bad_wr = wr;
1671 return -EINVAL;
1672 }
1673
1674 spin_lock_irqsave(&srq->rq.lock, flags);
1675 wq = srq->rq.wq;
1676 next = wq->head + 1;
1677 if (next >= srq->rq.size)
1678 next = 0;
1679 if (next == wq->tail) {
1680 spin_unlock_irqrestore(&srq->rq.lock, flags);
1681 *bad_wr = wr;
1682 return -ENOMEM;
1683 }
1684
1685 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
1686 wqe->wr_id = wr->wr_id;
1687 wqe->num_sge = wr->num_sge;
1688 for (i = 0; i < wr->num_sge; i++)
1689 wqe->sg_list[i] = wr->sg_list[i];
1690 /* Make sure queue entry is written before the head index. */
1691 smp_wmb();
1692 wq->head = next;
1693 spin_unlock_irqrestore(&srq->rq.lock, flags);
1694 }
1695 return 0;
1696 }
This page took 0.066146 seconds and 5 git commands to generate.