IB/qib: Rename several functions by adding a "qib_" prefix
[deliverable/linux.git] / drivers / staging / rdma / hfi1 / qp.c
CommitLineData
77241056
MM
1/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51#include <linux/err.h>
52#include <linux/vmalloc.h>
53#include <linux/hash.h>
54#include <linux/module.h>
55#include <linux/random.h>
56#include <linux/seq_file.h>
ec4274f1
DD
57#include <rdma/rdma_vt.h>
58#include <rdma/rdmavt_qp.h>
77241056
MM
59
60#include "hfi.h"
61#include "qp.h"
62#include "trace.h"
63#include "sdma.h"
64
a2c2d608 65unsigned int hfi1_qp_table_size = 256;
77241056
MM
66module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
67MODULE_PARM_DESC(qp_table_size, "QP table size");
68
895420dd 69static void flush_tx_list(struct rvt_qp *qp);
77241056
MM
70static int iowait_sleep(
71 struct sdma_engine *sde,
72 struct iowait *wait,
73 struct sdma_txreq *stx,
74 unsigned seq);
75static void iowait_wakeup(struct iowait *wait, int reason);
76
1c4b7d97
DD
77static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
78 struct rvt_qpn_map *map, unsigned off)
77241056 79{
1c4b7d97 80 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
77241056
MM
81}
82
83/*
84 * Convert the AETH credit code into the number of credits.
85 */
86static const u16 credit_table[31] = {
87 0, /* 0 */
88 1, /* 1 */
89 2, /* 2 */
90 3, /* 3 */
91 4, /* 4 */
92 6, /* 5 */
93 8, /* 6 */
94 12, /* 7 */
95 16, /* 8 */
96 24, /* 9 */
97 32, /* A */
98 48, /* B */
99 64, /* C */
100 96, /* D */
101 128, /* E */
102 192, /* F */
103 256, /* 10 */
104 384, /* 11 */
105 512, /* 12 */
106 768, /* 13 */
107 1024, /* 14 */
108 1536, /* 15 */
109 2048, /* 16 */
110 3072, /* 17 */
111 4096, /* 18 */
112 6144, /* 19 */
113 8192, /* 1A */
114 12288, /* 1B */
115 16384, /* 1C */
116 24576, /* 1D */
117 32768 /* 1E */
118};
119
895420dd 120static void flush_tx_list(struct rvt_qp *qp)
77241056 121{
4c6829c5
DD
122 struct hfi1_qp_priv *priv = qp->priv;
123
124 while (!list_empty(&priv->s_iowait.tx_head)) {
77241056
MM
125 struct sdma_txreq *tx;
126
127 tx = list_first_entry(
4c6829c5 128 &priv->s_iowait.tx_head,
77241056
MM
129 struct sdma_txreq,
130 list);
131 list_del_init(&tx->list);
132 hfi1_put_txreq(
133 container_of(tx, struct verbs_txreq, txreq));
134 }
135}
136
895420dd 137static void flush_iowait(struct rvt_qp *qp)
77241056 138{
4c6829c5 139 struct hfi1_qp_priv *priv = qp->priv;
77241056
MM
140 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
141 unsigned long flags;
142
143 write_seqlock_irqsave(&dev->iowait_lock, flags);
4c6829c5
DD
144 if (!list_empty(&priv->s_iowait.list)) {
145 list_del_init(&priv->s_iowait.list);
77241056
MM
146 if (atomic_dec_and_test(&qp->refcount))
147 wake_up(&qp->wait);
148 }
149 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
150}
151
152static inline int opa_mtu_enum_to_int(int mtu)
153{
154 switch (mtu) {
155 case OPA_MTU_8192: return 8192;
156 case OPA_MTU_10240: return 10240;
157 default: return -1;
158 }
159}
160
161/**
162 * This function is what we would push to the core layer if we wanted to be a
163 * "first class citizen". Instead we hide this here and rely on Verbs ULPs
164 * to blindly pass the MTU enum value from the PathRecord to us.
165 *
166 * The actual flag used to determine "8k MTU" will change and is currently
167 * unknown.
168 */
169static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
170{
171 int val = opa_mtu_enum_to_int((int)mtu);
172
173 if (val > 0)
174 return val;
175 return ib_mtu_enum_to_int(mtu);
176}
177
ec4274f1
DD
178int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
179 int attr_mask, struct ib_udata *udata)
77241056 180{
ec4274f1 181 struct ib_qp *ibqp = &qp->ibqp;
77241056 182 struct hfi1_ibdev *dev = to_idev(ibqp->device);
d7b8ba51 183 struct hfi1_devdata *dd = dd_from_dev(dev);
ec4274f1 184 u8 sc;
77241056
MM
185
186 if (attr_mask & IB_QP_AV) {
d7b8ba51 187 sc = ah_to_sc(ibqp->device, &attr->ah_attr);
31e7af1c
IW
188 if (sc == 0xf)
189 return -EINVAL;
190
d7b8ba51
MM
191 if (!qp_to_sdma_engine(qp, sc) &&
192 dd->flags & HFI1_HAS_SEND_DMA)
ec4274f1 193 return -EINVAL;
77241056
MM
194 }
195
196 if (attr_mask & IB_QP_ALT_PATH) {
d7b8ba51 197 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
31e7af1c
IW
198 if (sc == 0xf)
199 return -EINVAL;
200
d7b8ba51
MM
201 if (!qp_to_sdma_engine(qp, sc) &&
202 dd->flags & HFI1_HAS_SEND_DMA)
ec4274f1 203 return -EINVAL;
77241056
MM
204 }
205
ec4274f1
DD
206 return 0;
207}
77241056 208
ec4274f1
DD
209void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
210 int attr_mask, struct ib_udata *udata)
211{
212 struct ib_qp *ibqp = &qp->ibqp;
213 struct hfi1_qp_priv *priv = qp->priv;
77241056
MM
214
215 if (attr_mask & IB_QP_AV) {
4c6829c5
DD
216 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
217 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
77241056
MM
218 }
219
ec4274f1
DD
220 if (attr_mask & IB_QP_PATH_MIG_STATE &&
221 attr->path_mig_state == IB_MIG_MIGRATED &&
222 qp->s_mig_state == IB_MIG_ARMED) {
223 qp->s_flags |= RVT_S_AHG_CLEAR;
224 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
225 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
77241056 226 }
77241056
MM
227}
228
31e7af1c
IW
229int hfi1_check_send_wr(struct rvt_qp *qp, struct ib_send_wr *wr)
230{
231 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
232 struct rvt_ah *ah = ibah_to_rvtah(ud_wr(wr)->ah);
233
234 if (qp->ibqp.qp_type != IB_QPT_RC &&
235 qp->ibqp.qp_type != IB_QPT_UC &&
236 qp->ibqp.qp_type != IB_QPT_SMI &&
237 ibp->sl_to_sc[ah->attr.sl] == 0xf) {
238 return -EINVAL;
239 }
240 return 0;
241}
242
77241056
MM
243/**
244 * hfi1_compute_aeth - compute the AETH (syndrome + MSN)
245 * @qp: the queue pair to compute the AETH for
246 *
247 * Returns the AETH.
248 */
895420dd 249__be32 hfi1_compute_aeth(struct rvt_qp *qp)
77241056
MM
250{
251 u32 aeth = qp->r_msn & HFI1_MSN_MASK;
252
253 if (qp->ibqp.srq) {
254 /*
255 * Shared receive queues don't generate credits.
256 * Set the credit field to the invalid value.
257 */
258 aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT;
259 } else {
260 u32 min, max, x;
261 u32 credits;
895420dd 262 struct rvt_rwq *wq = qp->r_rq.wq;
77241056
MM
263 u32 head;
264 u32 tail;
265
266 /* sanity check pointers before trusting them */
267 head = wq->head;
268 if (head >= qp->r_rq.size)
269 head = 0;
270 tail = wq->tail;
271 if (tail >= qp->r_rq.size)
272 tail = 0;
273 /*
274 * Compute the number of credits available (RWQEs).
275 * There is a small chance that the pair of reads are
276 * not atomic, which is OK, since the fuzziness is
277 * resolved as further ACKs go out.
278 */
279 credits = head - tail;
280 if ((int)credits < 0)
281 credits += qp->r_rq.size;
282 /*
283 * Binary search the credit table to find the code to
284 * use.
285 */
286 min = 0;
287 max = 31;
288 for (;;) {
289 x = (min + max) / 2;
290 if (credit_table[x] == credits)
291 break;
292 if (credit_table[x] > credits)
293 max = x;
294 else if (min == x)
295 break;
296 else
297 min = x;
298 }
299 aeth |= x << HFI1_AETH_CREDIT_SHIFT;
300 }
301 return cpu_to_be32(aeth);
302}
303
77241056
MM
304/**
305 * hfi1_get_credit - flush the send work queue of a QP
306 * @qp: the qp who's send work queue to flush
307 * @aeth: the Acknowledge Extended Transport Header
308 *
309 * The QP s_lock should be held.
310 */
895420dd 311void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
77241056
MM
312{
313 u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK;
314
315 /*
316 * If the credit is invalid, we can send
317 * as many packets as we like. Otherwise, we have to
318 * honor the credit field.
319 */
320 if (credit == HFI1_AETH_CREDIT_INVAL) {
54d10c1e
DD
321 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
322 qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
323 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
324 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
77241056
MM
325 hfi1_schedule_send(qp);
326 }
327 }
54d10c1e 328 } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
77241056
MM
329 /* Compute new LSN (i.e., MSN + credit) */
330 credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
331 if (cmp_msn(credit, qp->s_lsn) > 0) {
332 qp->s_lsn = credit;
54d10c1e
DD
333 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
334 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
77241056
MM
335 hfi1_schedule_send(qp);
336 }
337 }
338 }
339}
340
895420dd 341void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
77241056
MM
342{
343 unsigned long flags;
344
345 spin_lock_irqsave(&qp->s_lock, flags);
346 if (qp->s_flags & flag) {
347 qp->s_flags &= ~flag;
348 trace_hfi1_qpwakeup(qp, flag);
349 hfi1_schedule_send(qp);
350 }
351 spin_unlock_irqrestore(&qp->s_lock, flags);
352 /* Notify hfi1_destroy_qp() if it is waiting. */
353 if (atomic_dec_and_test(&qp->refcount))
354 wake_up(&qp->wait);
355}
356
357static int iowait_sleep(
358 struct sdma_engine *sde,
359 struct iowait *wait,
360 struct sdma_txreq *stx,
361 unsigned seq)
362{
363 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
895420dd 364 struct rvt_qp *qp;
4c6829c5 365 struct hfi1_qp_priv *priv;
77241056
MM
366 unsigned long flags;
367 int ret = 0;
368 struct hfi1_ibdev *dev;
369
370 qp = tx->qp;
4c6829c5 371 priv = qp->priv;
77241056
MM
372
373 spin_lock_irqsave(&qp->s_lock, flags);
83693bd1 374 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
77241056
MM
375
376 /*
377 * If we couldn't queue the DMA request, save the info
378 * and try again later rather than destroying the
379 * buffer and undoing the side effects of the copy.
380 */
381 /* Make a common routine? */
382 dev = &sde->dd->verbs_dev;
383 list_add_tail(&stx->list, &wait->tx_head);
384 write_seqlock(&dev->iowait_lock);
385 if (sdma_progress(sde, seq, stx))
386 goto eagain;
4c6829c5 387 if (list_empty(&priv->s_iowait.list)) {
77241056
MM
388 struct hfi1_ibport *ibp =
389 to_iport(qp->ibqp.device, qp->port_num);
390
4eb06882 391 ibp->rvp.n_dmawait++;
54d10c1e 392 qp->s_flags |= RVT_S_WAIT_DMA_DESC;
4c6829c5 393 list_add_tail(&priv->s_iowait.list, &sde->dmawait);
54d10c1e 394 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
77241056
MM
395 atomic_inc(&qp->refcount);
396 }
397 write_sequnlock(&dev->iowait_lock);
54d10c1e 398 qp->s_flags &= ~RVT_S_BUSY;
77241056
MM
399 spin_unlock_irqrestore(&qp->s_lock, flags);
400 ret = -EBUSY;
401 } else {
402 spin_unlock_irqrestore(&qp->s_lock, flags);
403 hfi1_put_txreq(tx);
404 }
405 return ret;
406eagain:
407 write_sequnlock(&dev->iowait_lock);
408 spin_unlock_irqrestore(&qp->s_lock, flags);
409 list_del_init(&stx->list);
410 return -EAGAIN;
411}
412
413static void iowait_wakeup(struct iowait *wait, int reason)
414{
895420dd 415 struct rvt_qp *qp = iowait_to_qp(wait);
77241056
MM
416
417 WARN_ON(reason != SDMA_AVAIL_REASON);
54d10c1e 418 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
77241056
MM
419}
420
77241056
MM
421/**
422 *
423 * qp_to_sdma_engine - map a qp to a send engine
424 * @qp: the QP
425 * @sc5: the 5 bit sc
426 *
427 * Return:
428 * A send engine for the qp or NULL for SMI type qp.
429 */
895420dd 430struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
77241056
MM
431{
432 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
433 struct sdma_engine *sde;
434
435 if (!(dd->flags & HFI1_HAS_SEND_DMA))
436 return NULL;
437 switch (qp->ibqp.qp_type) {
77241056
MM
438 case IB_QPT_SMI:
439 return NULL;
440 default:
441 break;
442 }
443 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
444 return sde;
445}
446
447struct qp_iter {
448 struct hfi1_ibdev *dev;
895420dd 449 struct rvt_qp *qp;
77241056
MM
450 int specials;
451 int n;
452};
453
454struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
455{
456 struct qp_iter *iter;
457
458 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
459 if (!iter)
460 return NULL;
461
462 iter->dev = dev;
ec3f2c12 463 iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
77241056
MM
464 if (qp_iter_next(iter)) {
465 kfree(iter);
466 return NULL;
467 }
468
469 return iter;
470}
471
472int qp_iter_next(struct qp_iter *iter)
473{
474 struct hfi1_ibdev *dev = iter->dev;
475 int n = iter->n;
476 int ret = 1;
895420dd
DD
477 struct rvt_qp *pqp = iter->qp;
478 struct rvt_qp *qp;
77241056
MM
479
480 /*
481 * The approach is to consider the special qps
482 * as an additional table entries before the
483 * real hash table. Since the qp code sets
484 * the qp->next hash link to NULL, this works just fine.
485 *
486 * iter->specials is 2 * # ports
487 *
488 * n = 0..iter->specials is the special qp indices
489 *
1c4b7d97 490 * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are
77241056
MM
491 * the potential hash bucket entries
492 *
493 */
1c4b7d97 494 for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) {
77241056
MM
495 if (pqp) {
496 qp = rcu_dereference(pqp->next);
497 } else {
498 if (n < iter->specials) {
499 struct hfi1_pportdata *ppd;
500 struct hfi1_ibport *ibp;
501 int pidx;
502
ec3f2c12 503 pidx = n % dev->rdi.ibdev.phys_port_cnt;
77241056
MM
504 ppd = &dd_from_dev(dev)->pport[pidx];
505 ibp = &ppd->ibport_data;
506
507 if (!(n & 1))
4eb06882 508 qp = rcu_dereference(ibp->rvp.qp[0]);
77241056 509 else
4eb06882 510 qp = rcu_dereference(ibp->rvp.qp[1]);
77241056
MM
511 } else {
512 qp = rcu_dereference(
1c4b7d97 513 dev->rdi.qp_dev->qp_table[
77241056
MM
514 (n - iter->specials)]);
515 }
516 }
517 pqp = qp;
518 if (qp) {
519 iter->qp = qp;
520 iter->n = n;
521 return 0;
522 }
523 }
524 return ret;
525}
526
527static const char * const qp_type_str[] = {
528 "SMI", "GSI", "RC", "UC", "UD",
529};
530
895420dd 531static int qp_idle(struct rvt_qp *qp)
77241056
MM
532{
533 return
534 qp->s_last == qp->s_acked &&
535 qp->s_acked == qp->s_cur &&
536 qp->s_cur == qp->s_tail &&
537 qp->s_tail == qp->s_head;
538}
539
540void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
541{
895420dd
DD
542 struct rvt_swqe *wqe;
543 struct rvt_qp *qp = iter->qp;
4c6829c5 544 struct hfi1_qp_priv *priv = qp->priv;
77241056
MM
545 struct sdma_engine *sde;
546
4c6829c5 547 sde = qp_to_sdma_engine(qp, priv->s_sc);
83693bd1 548 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
77241056 549 seq_printf(s,
20658661 550 "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %u %u %u %u SDE %p,%u\n",
77241056
MM
551 iter->n,
552 qp_idle(qp) ? "I" : "B",
553 qp->ibqp.qp_num,
554 atomic_read(&qp->refcount),
555 qp_type_str[qp->ibqp.qp_type],
556 qp->state,
557 wqe ? wqe->wr.opcode : 0,
558 qp->s_hdrwords,
559 qp->s_flags,
4c6829c5
DD
560 atomic_read(&priv->s_iowait.sdma_busy),
561 !list_empty(&priv->s_iowait.list),
77241056
MM
562 qp->timeout,
563 wqe ? wqe->ssn : 0,
564 qp->s_lsn,
565 qp->s_last_psn,
566 qp->s_psn, qp->s_next_psn,
567 qp->s_sending_psn, qp->s_sending_hpsn,
568 qp->s_last, qp->s_acked, qp->s_cur,
569 qp->s_tail, qp->s_head, qp->s_size,
570 qp->remote_qpn,
571 qp->remote_ah_attr.dlid,
572 qp->remote_ah_attr.sl,
573 qp->pmtu,
20658661 574 qp->s_retry,
77241056 575 qp->s_retry_cnt,
77241056
MM
576 qp->s_rnr_retry_cnt,
577 sde,
578 sde ? sde->this_idx : 0);
579}
580
895420dd 581void qp_comm_est(struct rvt_qp *qp)
77241056 582{
54d10c1e 583 qp->r_flags |= RVT_R_COMM_EST;
77241056
MM
584 if (qp->ibqp.event_handler) {
585 struct ib_event ev;
586
587 ev.device = qp->ibqp.device;
588 ev.element.qp = &qp->ibqp;
589 ev.event = IB_EVENT_COMM_EST;
590 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
591 }
592}
c2f3ffb0 593
a2c2d608
DD
594void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
595 gfp_t gfp)
596{
597 struct hfi1_qp_priv *priv;
598
377f111e 599 priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node);
a2c2d608
DD
600 if (!priv)
601 return ERR_PTR(-ENOMEM);
602
603 priv->owner = qp;
604
377f111e 605 priv->s_hdr = kzalloc_node(sizeof(*priv->s_hdr), gfp, rdi->dparms.node);
a2c2d608
DD
606 if (!priv->s_hdr) {
607 kfree(priv);
608 return ERR_PTR(-ENOMEM);
609 }
3c9d149b 610 setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp);
08279d5c 611 qp->s_timer.function = hfi1_rc_timeout;
a2c2d608
DD
612 return priv;
613}
614
615void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
616{
617 struct hfi1_qp_priv *priv = qp->priv;
618
619 kfree(priv->s_hdr);
620 kfree(priv);
621}
622
623unsigned free_all_qps(struct rvt_dev_info *rdi)
624{
625 struct hfi1_ibdev *verbs_dev = container_of(rdi,
626 struct hfi1_ibdev,
627 rdi);
628 struct hfi1_devdata *dd = container_of(verbs_dev,
629 struct hfi1_devdata,
630 verbs_dev);
631 int n;
632 unsigned qp_inuse = 0;
633
634 for (n = 0; n < dd->num_pports; n++) {
635 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
636
a2c2d608
DD
637 rcu_read_lock();
638 if (rcu_dereference(ibp->rvp.qp[0]))
639 qp_inuse++;
640 if (rcu_dereference(ibp->rvp.qp[1]))
641 qp_inuse++;
642 rcu_read_unlock();
643 }
644
645 return qp_inuse;
646}
647
ec4274f1
DD
648void flush_qp_waiters(struct rvt_qp *qp)
649{
650 flush_iowait(qp);
08279d5c 651 hfi1_stop_rc_timers(qp);
ec4274f1
DD
652}
653
654void stop_send_queue(struct rvt_qp *qp)
655{
656 struct hfi1_qp_priv *priv = qp->priv;
657
658 cancel_work_sync(&priv->s_iowait.iowork);
08279d5c 659 hfi1_del_timers_sync(qp);
ec4274f1
DD
660}
661
662void quiesce_qp(struct rvt_qp *qp)
663{
664 struct hfi1_qp_priv *priv = qp->priv;
665
666 iowait_sdma_drain(&priv->s_iowait);
667 flush_tx_list(qp);
668}
669
a2c2d608
DD
670void notify_qp_reset(struct rvt_qp *qp)
671{
672 struct hfi1_qp_priv *priv = qp->priv;
673
674 iowait_init(
675 &priv->s_iowait,
676 1,
83693bd1 677 _hfi1_do_send,
a2c2d608
DD
678 iowait_sleep,
679 iowait_wakeup);
680 priv->r_adefered = 0;
681 clear_ahg(qp);
682}
683
c2f3ffb0
MM
684/*
685 * Switch to alternate path.
686 * The QP s_lock should be held and interrupts disabled.
687 */
895420dd 688void hfi1_migrate_qp(struct rvt_qp *qp)
c2f3ffb0 689{
4c6829c5 690 struct hfi1_qp_priv *priv = qp->priv;
c2f3ffb0
MM
691 struct ib_event ev;
692
693 qp->s_mig_state = IB_MIG_MIGRATED;
694 qp->remote_ah_attr = qp->alt_ah_attr;
695 qp->port_num = qp->alt_ah_attr.port_num;
696 qp->s_pkey_index = qp->s_alt_pkey_index;
54d10c1e 697 qp->s_flags |= RVT_S_AHG_CLEAR;
4c6829c5
DD
698 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
699 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
c2f3ffb0
MM
700
701 ev.device = qp->ibqp.device;
702 ev.element.qp = &qp->ibqp;
703 ev.event = IB_EVENT_PATH_MIG;
704 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
705}
ec4274f1
DD
706
707int mtu_to_path_mtu(u32 mtu)
708{
709 return mtu_to_enum(mtu, OPA_MTU_8192);
710}
711
712u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
713{
714 u32 mtu;
715 struct hfi1_ibdev *verbs_dev = container_of(rdi,
716 struct hfi1_ibdev,
717 rdi);
718 struct hfi1_devdata *dd = container_of(verbs_dev,
719 struct hfi1_devdata,
720 verbs_dev);
721 struct hfi1_ibport *ibp;
722 u8 sc, vl;
723
724 ibp = &dd->pport[qp->port_num - 1].ibport_data;
725 sc = ibp->sl_to_sc[qp->remote_ah_attr.sl];
726 vl = sc_to_vlt(dd, sc);
727
728 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
729 if (vl < PER_VL_SEND_CONTEXTS)
730 mtu = min_t(u32, mtu, dd->vld[vl].mtu);
731 return mtu;
732}
733
734int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
735 struct ib_qp_attr *attr)
736{
737 int mtu, pidx = qp->port_num - 1;
738 struct hfi1_ibdev *verbs_dev = container_of(rdi,
739 struct hfi1_ibdev,
740 rdi);
741 struct hfi1_devdata *dd = container_of(verbs_dev,
742 struct hfi1_devdata,
743 verbs_dev);
744 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu);
745 if (mtu == -1)
746 return -1; /* values less than 0 are error */
747
748 if (mtu > dd->pport[pidx].ibmtu)
749 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
750 else
751 return attr->path_mtu;
752}
753
754void notify_error_qp(struct rvt_qp *qp)
755{
756 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
757 struct hfi1_qp_priv *priv = qp->priv;
758
759 write_seqlock(&dev->iowait_lock);
760 if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
761 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
762 list_del_init(&priv->s_iowait.list);
763 if (atomic_dec_and_test(&qp->refcount))
764 wake_up(&qp->wait);
765 }
766 write_sequnlock(&dev->iowait_lock);
767
768 if (!(qp->s_flags & RVT_S_BUSY)) {
769 qp->s_hdrwords = 0;
770 if (qp->s_rdma_mr) {
771 rvt_put_mr(qp->s_rdma_mr);
772 qp->s_rdma_mr = NULL;
773 }
774 flush_tx_list(qp);
775 }
776}
777
This page took 0.099208 seconds and 5 git commands to generate.