Commit | Line | Data |
---|---|---|
77241056 MM |
1 | /* |
2 | * | |
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
4 | * redistributing this file, you may do so under either license. | |
5 | * | |
6 | * GPL LICENSE SUMMARY | |
7 | * | |
8 | * Copyright(c) 2015 Intel Corporation. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * BSD LICENSE | |
20 | * | |
21 | * Copyright(c) 2015 Intel Corporation. | |
22 | * | |
23 | * Redistribution and use in source and binary forms, with or without | |
24 | * modification, are permitted provided that the following conditions | |
25 | * are met: | |
26 | * | |
27 | * - Redistributions of source code must retain the above copyright | |
28 | * notice, this list of conditions and the following disclaimer. | |
29 | * - Redistributions in binary form must reproduce the above copyright | |
30 | * notice, this list of conditions and the following disclaimer in | |
31 | * the documentation and/or other materials provided with the | |
32 | * distribution. | |
33 | * - Neither the name of Intel Corporation nor the names of its | |
34 | * contributors may be used to endorse or promote products derived | |
35 | * from this software without specific prior written permission. | |
36 | * | |
37 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
38 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
39 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
40 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
41 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
42 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
43 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
44 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
45 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
46 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
47 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
48 | * | |
49 | */ | |
50 | ||
51 | #include <linux/err.h> | |
52 | #include <linux/vmalloc.h> | |
53 | #include <linux/hash.h> | |
54 | #include <linux/module.h> | |
55 | #include <linux/random.h> | |
56 | #include <linux/seq_file.h> | |
ec4274f1 DD |
57 | #include <rdma/rdma_vt.h> |
58 | #include <rdma/rdmavt_qp.h> | |
77241056 MM |
59 | |
60 | #include "hfi.h" | |
61 | #include "qp.h" | |
62 | #include "trace.h" | |
63 | #include "sdma.h" | |
64 | ||
a2c2d608 | 65 | unsigned int hfi1_qp_table_size = 256; |
77241056 MM |
66 | module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO); |
67 | MODULE_PARM_DESC(qp_table_size, "QP table size"); | |
68 | ||
895420dd | 69 | static void flush_tx_list(struct rvt_qp *qp); |
77241056 MM |
70 | static int iowait_sleep( |
71 | struct sdma_engine *sde, | |
72 | struct iowait *wait, | |
73 | struct sdma_txreq *stx, | |
74 | unsigned seq); | |
75 | static void iowait_wakeup(struct iowait *wait, int reason); | |
76 | ||
1c4b7d97 DD |
77 | static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, |
78 | struct rvt_qpn_map *map, unsigned off) | |
77241056 | 79 | { |
1c4b7d97 | 80 | return (map - qpt->map) * RVT_BITS_PER_PAGE + off; |
77241056 MM |
81 | } |
82 | ||
83 | /* | |
84 | * Convert the AETH credit code into the number of credits. | |
85 | */ | |
86 | static const u16 credit_table[31] = { | |
87 | 0, /* 0 */ | |
88 | 1, /* 1 */ | |
89 | 2, /* 2 */ | |
90 | 3, /* 3 */ | |
91 | 4, /* 4 */ | |
92 | 6, /* 5 */ | |
93 | 8, /* 6 */ | |
94 | 12, /* 7 */ | |
95 | 16, /* 8 */ | |
96 | 24, /* 9 */ | |
97 | 32, /* A */ | |
98 | 48, /* B */ | |
99 | 64, /* C */ | |
100 | 96, /* D */ | |
101 | 128, /* E */ | |
102 | 192, /* F */ | |
103 | 256, /* 10 */ | |
104 | 384, /* 11 */ | |
105 | 512, /* 12 */ | |
106 | 768, /* 13 */ | |
107 | 1024, /* 14 */ | |
108 | 1536, /* 15 */ | |
109 | 2048, /* 16 */ | |
110 | 3072, /* 17 */ | |
111 | 4096, /* 18 */ | |
112 | 6144, /* 19 */ | |
113 | 8192, /* 1A */ | |
114 | 12288, /* 1B */ | |
115 | 16384, /* 1C */ | |
116 | 24576, /* 1D */ | |
117 | 32768 /* 1E */ | |
118 | }; | |
119 | ||
895420dd | 120 | static void flush_tx_list(struct rvt_qp *qp) |
77241056 | 121 | { |
4c6829c5 DD |
122 | struct hfi1_qp_priv *priv = qp->priv; |
123 | ||
124 | while (!list_empty(&priv->s_iowait.tx_head)) { | |
77241056 MM |
125 | struct sdma_txreq *tx; |
126 | ||
127 | tx = list_first_entry( | |
4c6829c5 | 128 | &priv->s_iowait.tx_head, |
77241056 MM |
129 | struct sdma_txreq, |
130 | list); | |
131 | list_del_init(&tx->list); | |
132 | hfi1_put_txreq( | |
133 | container_of(tx, struct verbs_txreq, txreq)); | |
134 | } | |
135 | } | |
136 | ||
895420dd | 137 | static void flush_iowait(struct rvt_qp *qp) |
77241056 | 138 | { |
4c6829c5 | 139 | struct hfi1_qp_priv *priv = qp->priv; |
77241056 MM |
140 | struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); |
141 | unsigned long flags; | |
142 | ||
143 | write_seqlock_irqsave(&dev->iowait_lock, flags); | |
4c6829c5 DD |
144 | if (!list_empty(&priv->s_iowait.list)) { |
145 | list_del_init(&priv->s_iowait.list); | |
77241056 MM |
146 | if (atomic_dec_and_test(&qp->refcount)) |
147 | wake_up(&qp->wait); | |
148 | } | |
149 | write_sequnlock_irqrestore(&dev->iowait_lock, flags); | |
150 | } | |
151 | ||
152 | static inline int opa_mtu_enum_to_int(int mtu) | |
153 | { | |
154 | switch (mtu) { | |
155 | case OPA_MTU_8192: return 8192; | |
156 | case OPA_MTU_10240: return 10240; | |
157 | default: return -1; | |
158 | } | |
159 | } | |
160 | ||
161 | /** | |
162 | * This function is what we would push to the core layer if we wanted to be a | |
163 | * "first class citizen". Instead we hide this here and rely on Verbs ULPs | |
164 | * to blindly pass the MTU enum value from the PathRecord to us. | |
165 | * | |
166 | * The actual flag used to determine "8k MTU" will change and is currently | |
167 | * unknown. | |
168 | */ | |
169 | static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) | |
170 | { | |
171 | int val = opa_mtu_enum_to_int((int)mtu); | |
172 | ||
173 | if (val > 0) | |
174 | return val; | |
175 | return ib_mtu_enum_to_int(mtu); | |
176 | } | |
177 | ||
ec4274f1 DD |
178 | int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, |
179 | int attr_mask, struct ib_udata *udata) | |
77241056 | 180 | { |
ec4274f1 | 181 | struct ib_qp *ibqp = &qp->ibqp; |
77241056 | 182 | struct hfi1_ibdev *dev = to_idev(ibqp->device); |
d7b8ba51 | 183 | struct hfi1_devdata *dd = dd_from_dev(dev); |
ec4274f1 | 184 | u8 sc; |
77241056 MM |
185 | |
186 | if (attr_mask & IB_QP_AV) { | |
d7b8ba51 | 187 | sc = ah_to_sc(ibqp->device, &attr->ah_attr); |
31e7af1c IW |
188 | if (sc == 0xf) |
189 | return -EINVAL; | |
190 | ||
d7b8ba51 MM |
191 | if (!qp_to_sdma_engine(qp, sc) && |
192 | dd->flags & HFI1_HAS_SEND_DMA) | |
ec4274f1 | 193 | return -EINVAL; |
77241056 MM |
194 | } |
195 | ||
196 | if (attr_mask & IB_QP_ALT_PATH) { | |
d7b8ba51 | 197 | sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); |
31e7af1c IW |
198 | if (sc == 0xf) |
199 | return -EINVAL; | |
200 | ||
d7b8ba51 MM |
201 | if (!qp_to_sdma_engine(qp, sc) && |
202 | dd->flags & HFI1_HAS_SEND_DMA) | |
ec4274f1 | 203 | return -EINVAL; |
77241056 MM |
204 | } |
205 | ||
ec4274f1 DD |
206 | return 0; |
207 | } | |
77241056 | 208 | |
ec4274f1 DD |
209 | void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, |
210 | int attr_mask, struct ib_udata *udata) | |
211 | { | |
212 | struct ib_qp *ibqp = &qp->ibqp; | |
213 | struct hfi1_qp_priv *priv = qp->priv; | |
77241056 MM |
214 | |
215 | if (attr_mask & IB_QP_AV) { | |
4c6829c5 DD |
216 | priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); |
217 | priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); | |
77241056 MM |
218 | } |
219 | ||
ec4274f1 DD |
220 | if (attr_mask & IB_QP_PATH_MIG_STATE && |
221 | attr->path_mig_state == IB_MIG_MIGRATED && | |
222 | qp->s_mig_state == IB_MIG_ARMED) { | |
223 | qp->s_flags |= RVT_S_AHG_CLEAR; | |
224 | priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); | |
225 | priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); | |
77241056 | 226 | } |
77241056 MM |
227 | } |
228 | ||
46a80d62 MM |
229 | /** |
230 | * hfi1_check_send_wqe - validate wqe | |
231 | * @qp - The qp | |
232 | * @wqe - The built wqe | |
233 | * | |
234 | * validate wqe. This is called | |
235 | * prior to inserting the wqe into | |
236 | * the ring but after the wqe has been | |
237 | * setup. | |
238 | * | |
239 | * Returns 0 on success, -EINVAL on failure | |
240 | * | |
241 | */ | |
242 | int hfi1_check_send_wqe(struct rvt_qp *qp, | |
243 | struct rvt_swqe *wqe) | |
31e7af1c IW |
244 | { |
245 | struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | |
46a80d62 | 246 | struct rvt_ah *ah; |
31e7af1c | 247 | |
46a80d62 MM |
248 | switch (qp->ibqp.qp_type) { |
249 | case IB_QPT_RC: | |
250 | case IB_QPT_UC: | |
251 | if (wqe->length > 0x80000000U) | |
252 | return -EINVAL; | |
253 | break; | |
254 | case IB_QPT_SMI: | |
255 | ah = ibah_to_rvtah(wqe->ud_wr.ah); | |
256 | if (wqe->length > (1 << ah->log_pmtu)) | |
257 | return -EINVAL; | |
258 | break; | |
259 | case IB_QPT_GSI: | |
260 | case IB_QPT_UD: | |
261 | ah = ibah_to_rvtah(wqe->ud_wr.ah); | |
262 | if (wqe->length > (1 << ah->log_pmtu)) | |
263 | return -EINVAL; | |
264 | if (ibp->sl_to_sc[ah->attr.sl] == 0xf) | |
265 | return -EINVAL; | |
266 | default: | |
267 | break; | |
31e7af1c IW |
268 | } |
269 | return 0; | |
270 | } | |
271 | ||
77241056 MM |
272 | /** |
273 | * hfi1_compute_aeth - compute the AETH (syndrome + MSN) | |
274 | * @qp: the queue pair to compute the AETH for | |
275 | * | |
276 | * Returns the AETH. | |
277 | */ | |
895420dd | 278 | __be32 hfi1_compute_aeth(struct rvt_qp *qp) |
77241056 MM |
279 | { |
280 | u32 aeth = qp->r_msn & HFI1_MSN_MASK; | |
281 | ||
282 | if (qp->ibqp.srq) { | |
283 | /* | |
284 | * Shared receive queues don't generate credits. | |
285 | * Set the credit field to the invalid value. | |
286 | */ | |
287 | aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT; | |
288 | } else { | |
289 | u32 min, max, x; | |
290 | u32 credits; | |
895420dd | 291 | struct rvt_rwq *wq = qp->r_rq.wq; |
77241056 MM |
292 | u32 head; |
293 | u32 tail; | |
294 | ||
295 | /* sanity check pointers before trusting them */ | |
296 | head = wq->head; | |
297 | if (head >= qp->r_rq.size) | |
298 | head = 0; | |
299 | tail = wq->tail; | |
300 | if (tail >= qp->r_rq.size) | |
301 | tail = 0; | |
302 | /* | |
303 | * Compute the number of credits available (RWQEs). | |
304 | * There is a small chance that the pair of reads are | |
305 | * not atomic, which is OK, since the fuzziness is | |
306 | * resolved as further ACKs go out. | |
307 | */ | |
308 | credits = head - tail; | |
309 | if ((int)credits < 0) | |
310 | credits += qp->r_rq.size; | |
311 | /* | |
312 | * Binary search the credit table to find the code to | |
313 | * use. | |
314 | */ | |
315 | min = 0; | |
316 | max = 31; | |
317 | for (;;) { | |
318 | x = (min + max) / 2; | |
319 | if (credit_table[x] == credits) | |
320 | break; | |
321 | if (credit_table[x] > credits) | |
322 | max = x; | |
323 | else if (min == x) | |
324 | break; | |
325 | else | |
326 | min = x; | |
327 | } | |
328 | aeth |= x << HFI1_AETH_CREDIT_SHIFT; | |
329 | } | |
330 | return cpu_to_be32(aeth); | |
331 | } | |
332 | ||
46a80d62 MM |
333 | /** |
334 | * _hfi1_schedule_send - schedule progress | |
335 | * @qp: the QP | |
336 | * | |
337 | * This schedules qp progress w/o regard to the s_flags. | |
338 | * | |
339 | * It is only used in the post send, which doesn't hold | |
340 | * the s_lock. | |
341 | */ | |
342 | void _hfi1_schedule_send(struct rvt_qp *qp) | |
343 | { | |
344 | struct hfi1_qp_priv *priv = qp->priv; | |
345 | struct hfi1_ibport *ibp = | |
346 | to_iport(qp->ibqp.device, qp->port_num); | |
347 | struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); | |
348 | struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); | |
349 | ||
350 | iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, | |
351 | priv->s_sde ? | |
352 | priv->s_sde->cpu : | |
353 | cpumask_first(cpumask_of_node(dd->node))); | |
354 | } | |
355 | ||
356 | /** | |
357 | * hfi1_schedule_send - schedule progress | |
358 | * @qp: the QP | |
359 | * | |
360 | * This schedules qp progress and caller should hold | |
361 | * the s_lock. | |
362 | */ | |
363 | void hfi1_schedule_send(struct rvt_qp *qp) | |
364 | { | |
365 | if (hfi1_send_ok(qp)) | |
366 | _hfi1_schedule_send(qp); | |
367 | } | |
368 | ||
77241056 MM |
369 | /** |
370 | * hfi1_get_credit - flush the send work queue of a QP | |
371 | * @qp: the qp who's send work queue to flush | |
372 | * @aeth: the Acknowledge Extended Transport Header | |
373 | * | |
374 | * The QP s_lock should be held. | |
375 | */ | |
895420dd | 376 | void hfi1_get_credit(struct rvt_qp *qp, u32 aeth) |
77241056 MM |
377 | { |
378 | u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK; | |
379 | ||
380 | /* | |
381 | * If the credit is invalid, we can send | |
382 | * as many packets as we like. Otherwise, we have to | |
383 | * honor the credit field. | |
384 | */ | |
385 | if (credit == HFI1_AETH_CREDIT_INVAL) { | |
54d10c1e DD |
386 | if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { |
387 | qp->s_flags |= RVT_S_UNLIMITED_CREDIT; | |
388 | if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { | |
389 | qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; | |
77241056 MM |
390 | hfi1_schedule_send(qp); |
391 | } | |
392 | } | |
54d10c1e | 393 | } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { |
77241056 MM |
394 | /* Compute new LSN (i.e., MSN + credit) */ |
395 | credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK; | |
396 | if (cmp_msn(credit, qp->s_lsn) > 0) { | |
397 | qp->s_lsn = credit; | |
54d10c1e DD |
398 | if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { |
399 | qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; | |
77241056 MM |
400 | hfi1_schedule_send(qp); |
401 | } | |
402 | } | |
403 | } | |
404 | } | |
405 | ||
895420dd | 406 | void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) |
77241056 MM |
407 | { |
408 | unsigned long flags; | |
409 | ||
410 | spin_lock_irqsave(&qp->s_lock, flags); | |
411 | if (qp->s_flags & flag) { | |
412 | qp->s_flags &= ~flag; | |
413 | trace_hfi1_qpwakeup(qp, flag); | |
414 | hfi1_schedule_send(qp); | |
415 | } | |
416 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
417 | /* Notify hfi1_destroy_qp() if it is waiting. */ | |
418 | if (atomic_dec_and_test(&qp->refcount)) | |
419 | wake_up(&qp->wait); | |
420 | } | |
421 | ||
422 | static int iowait_sleep( | |
423 | struct sdma_engine *sde, | |
424 | struct iowait *wait, | |
425 | struct sdma_txreq *stx, | |
426 | unsigned seq) | |
427 | { | |
428 | struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); | |
895420dd | 429 | struct rvt_qp *qp; |
4c6829c5 | 430 | struct hfi1_qp_priv *priv; |
77241056 MM |
431 | unsigned long flags; |
432 | int ret = 0; | |
433 | struct hfi1_ibdev *dev; | |
434 | ||
435 | qp = tx->qp; | |
4c6829c5 | 436 | priv = qp->priv; |
77241056 MM |
437 | |
438 | spin_lock_irqsave(&qp->s_lock, flags); | |
83693bd1 | 439 | if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { |
77241056 MM |
440 | |
441 | /* | |
442 | * If we couldn't queue the DMA request, save the info | |
443 | * and try again later rather than destroying the | |
444 | * buffer and undoing the side effects of the copy. | |
445 | */ | |
446 | /* Make a common routine? */ | |
447 | dev = &sde->dd->verbs_dev; | |
448 | list_add_tail(&stx->list, &wait->tx_head); | |
449 | write_seqlock(&dev->iowait_lock); | |
450 | if (sdma_progress(sde, seq, stx)) | |
451 | goto eagain; | |
4c6829c5 | 452 | if (list_empty(&priv->s_iowait.list)) { |
77241056 MM |
453 | struct hfi1_ibport *ibp = |
454 | to_iport(qp->ibqp.device, qp->port_num); | |
455 | ||
4eb06882 | 456 | ibp->rvp.n_dmawait++; |
54d10c1e | 457 | qp->s_flags |= RVT_S_WAIT_DMA_DESC; |
4c6829c5 | 458 | list_add_tail(&priv->s_iowait.list, &sde->dmawait); |
54d10c1e | 459 | trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); |
77241056 MM |
460 | atomic_inc(&qp->refcount); |
461 | } | |
462 | write_sequnlock(&dev->iowait_lock); | |
54d10c1e | 463 | qp->s_flags &= ~RVT_S_BUSY; |
77241056 MM |
464 | spin_unlock_irqrestore(&qp->s_lock, flags); |
465 | ret = -EBUSY; | |
466 | } else { | |
467 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
468 | hfi1_put_txreq(tx); | |
469 | } | |
470 | return ret; | |
471 | eagain: | |
472 | write_sequnlock(&dev->iowait_lock); | |
473 | spin_unlock_irqrestore(&qp->s_lock, flags); | |
474 | list_del_init(&stx->list); | |
475 | return -EAGAIN; | |
476 | } | |
477 | ||
478 | static void iowait_wakeup(struct iowait *wait, int reason) | |
479 | { | |
895420dd | 480 | struct rvt_qp *qp = iowait_to_qp(wait); |
77241056 MM |
481 | |
482 | WARN_ON(reason != SDMA_AVAIL_REASON); | |
54d10c1e | 483 | hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); |
77241056 MM |
484 | } |
485 | ||
77241056 MM |
486 | /** |
487 | * | |
488 | * qp_to_sdma_engine - map a qp to a send engine | |
489 | * @qp: the QP | |
490 | * @sc5: the 5 bit sc | |
491 | * | |
492 | * Return: | |
493 | * A send engine for the qp or NULL for SMI type qp. | |
494 | */ | |
895420dd | 495 | struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) |
77241056 MM |
496 | { |
497 | struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); | |
498 | struct sdma_engine *sde; | |
499 | ||
500 | if (!(dd->flags & HFI1_HAS_SEND_DMA)) | |
501 | return NULL; | |
502 | switch (qp->ibqp.qp_type) { | |
77241056 MM |
503 | case IB_QPT_SMI: |
504 | return NULL; | |
505 | default: | |
506 | break; | |
507 | } | |
508 | sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); | |
509 | return sde; | |
510 | } | |
511 | ||
512 | struct qp_iter { | |
513 | struct hfi1_ibdev *dev; | |
895420dd | 514 | struct rvt_qp *qp; |
77241056 MM |
515 | int specials; |
516 | int n; | |
517 | }; | |
518 | ||
519 | struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev) | |
520 | { | |
521 | struct qp_iter *iter; | |
522 | ||
523 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | |
524 | if (!iter) | |
525 | return NULL; | |
526 | ||
527 | iter->dev = dev; | |
ec3f2c12 | 528 | iter->specials = dev->rdi.ibdev.phys_port_cnt * 2; |
77241056 MM |
529 | if (qp_iter_next(iter)) { |
530 | kfree(iter); | |
531 | return NULL; | |
532 | } | |
533 | ||
534 | return iter; | |
535 | } | |
536 | ||
537 | int qp_iter_next(struct qp_iter *iter) | |
538 | { | |
539 | struct hfi1_ibdev *dev = iter->dev; | |
540 | int n = iter->n; | |
541 | int ret = 1; | |
895420dd DD |
542 | struct rvt_qp *pqp = iter->qp; |
543 | struct rvt_qp *qp; | |
77241056 MM |
544 | |
545 | /* | |
546 | * The approach is to consider the special qps | |
547 | * as an additional table entries before the | |
548 | * real hash table. Since the qp code sets | |
549 | * the qp->next hash link to NULL, this works just fine. | |
550 | * | |
551 | * iter->specials is 2 * # ports | |
552 | * | |
553 | * n = 0..iter->specials is the special qp indices | |
554 | * | |
1c4b7d97 | 555 | * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are |
77241056 MM |
556 | * the potential hash bucket entries |
557 | * | |
558 | */ | |
1c4b7d97 | 559 | for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) { |
77241056 MM |
560 | if (pqp) { |
561 | qp = rcu_dereference(pqp->next); | |
562 | } else { | |
563 | if (n < iter->specials) { | |
564 | struct hfi1_pportdata *ppd; | |
565 | struct hfi1_ibport *ibp; | |
566 | int pidx; | |
567 | ||
ec3f2c12 | 568 | pidx = n % dev->rdi.ibdev.phys_port_cnt; |
77241056 MM |
569 | ppd = &dd_from_dev(dev)->pport[pidx]; |
570 | ibp = &ppd->ibport_data; | |
571 | ||
572 | if (!(n & 1)) | |
4eb06882 | 573 | qp = rcu_dereference(ibp->rvp.qp[0]); |
77241056 | 574 | else |
4eb06882 | 575 | qp = rcu_dereference(ibp->rvp.qp[1]); |
77241056 MM |
576 | } else { |
577 | qp = rcu_dereference( | |
1c4b7d97 | 578 | dev->rdi.qp_dev->qp_table[ |
77241056 MM |
579 | (n - iter->specials)]); |
580 | } | |
581 | } | |
582 | pqp = qp; | |
583 | if (qp) { | |
584 | iter->qp = qp; | |
585 | iter->n = n; | |
586 | return 0; | |
587 | } | |
588 | } | |
589 | return ret; | |
590 | } | |
591 | ||
592 | static const char * const qp_type_str[] = { | |
593 | "SMI", "GSI", "RC", "UC", "UD", | |
594 | }; | |
595 | ||
895420dd | 596 | static int qp_idle(struct rvt_qp *qp) |
77241056 MM |
597 | { |
598 | return | |
599 | qp->s_last == qp->s_acked && | |
600 | qp->s_acked == qp->s_cur && | |
601 | qp->s_cur == qp->s_tail && | |
602 | qp->s_tail == qp->s_head; | |
603 | } | |
604 | ||
605 | void qp_iter_print(struct seq_file *s, struct qp_iter *iter) | |
606 | { | |
895420dd DD |
607 | struct rvt_swqe *wqe; |
608 | struct rvt_qp *qp = iter->qp; | |
4c6829c5 | 609 | struct hfi1_qp_priv *priv = qp->priv; |
77241056 MM |
610 | struct sdma_engine *sde; |
611 | ||
4c6829c5 | 612 | sde = qp_to_sdma_engine(qp, priv->s_sc); |
83693bd1 | 613 | wqe = rvt_get_swqe_ptr(qp, qp->s_last); |
77241056 | 614 | seq_printf(s, |
3585254d | 615 | "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) QP%u LID %x SL %u MTU %u %u %u %u SDE %p,%u\n", |
77241056 MM |
616 | iter->n, |
617 | qp_idle(qp) ? "I" : "B", | |
618 | qp->ibqp.qp_num, | |
619 | atomic_read(&qp->refcount), | |
620 | qp_type_str[qp->ibqp.qp_type], | |
621 | qp->state, | |
622 | wqe ? wqe->wr.opcode : 0, | |
623 | qp->s_hdrwords, | |
624 | qp->s_flags, | |
4c6829c5 DD |
625 | atomic_read(&priv->s_iowait.sdma_busy), |
626 | !list_empty(&priv->s_iowait.list), | |
77241056 MM |
627 | qp->timeout, |
628 | wqe ? wqe->ssn : 0, | |
629 | qp->s_lsn, | |
630 | qp->s_last_psn, | |
631 | qp->s_psn, qp->s_next_psn, | |
632 | qp->s_sending_psn, qp->s_sending_hpsn, | |
633 | qp->s_last, qp->s_acked, qp->s_cur, | |
634 | qp->s_tail, qp->s_head, qp->s_size, | |
3585254d | 635 | qp->s_avail, |
77241056 MM |
636 | qp->remote_qpn, |
637 | qp->remote_ah_attr.dlid, | |
638 | qp->remote_ah_attr.sl, | |
639 | qp->pmtu, | |
20658661 | 640 | qp->s_retry, |
77241056 | 641 | qp->s_retry_cnt, |
77241056 MM |
642 | qp->s_rnr_retry_cnt, |
643 | sde, | |
644 | sde ? sde->this_idx : 0); | |
645 | } | |
646 | ||
895420dd | 647 | void qp_comm_est(struct rvt_qp *qp) |
77241056 | 648 | { |
54d10c1e | 649 | qp->r_flags |= RVT_R_COMM_EST; |
77241056 MM |
650 | if (qp->ibqp.event_handler) { |
651 | struct ib_event ev; | |
652 | ||
653 | ev.device = qp->ibqp.device; | |
654 | ev.element.qp = &qp->ibqp; | |
655 | ev.event = IB_EVENT_COMM_EST; | |
656 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | |
657 | } | |
658 | } | |
c2f3ffb0 | 659 | |
a2c2d608 DD |
660 | void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
661 | gfp_t gfp) | |
662 | { | |
663 | struct hfi1_qp_priv *priv; | |
664 | ||
377f111e | 665 | priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node); |
a2c2d608 DD |
666 | if (!priv) |
667 | return ERR_PTR(-ENOMEM); | |
668 | ||
669 | priv->owner = qp; | |
670 | ||
377f111e | 671 | priv->s_hdr = kzalloc_node(sizeof(*priv->s_hdr), gfp, rdi->dparms.node); |
a2c2d608 DD |
672 | if (!priv->s_hdr) { |
673 | kfree(priv); | |
674 | return ERR_PTR(-ENOMEM); | |
675 | } | |
3c9d149b | 676 | setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp); |
08279d5c | 677 | qp->s_timer.function = hfi1_rc_timeout; |
a2c2d608 DD |
678 | return priv; |
679 | } | |
680 | ||
681 | void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) | |
682 | { | |
683 | struct hfi1_qp_priv *priv = qp->priv; | |
684 | ||
685 | kfree(priv->s_hdr); | |
686 | kfree(priv); | |
687 | } | |
688 | ||
689 | unsigned free_all_qps(struct rvt_dev_info *rdi) | |
690 | { | |
691 | struct hfi1_ibdev *verbs_dev = container_of(rdi, | |
692 | struct hfi1_ibdev, | |
693 | rdi); | |
694 | struct hfi1_devdata *dd = container_of(verbs_dev, | |
695 | struct hfi1_devdata, | |
696 | verbs_dev); | |
697 | int n; | |
698 | unsigned qp_inuse = 0; | |
699 | ||
700 | for (n = 0; n < dd->num_pports; n++) { | |
701 | struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; | |
702 | ||
a2c2d608 DD |
703 | rcu_read_lock(); |
704 | if (rcu_dereference(ibp->rvp.qp[0])) | |
705 | qp_inuse++; | |
706 | if (rcu_dereference(ibp->rvp.qp[1])) | |
707 | qp_inuse++; | |
708 | rcu_read_unlock(); | |
709 | } | |
710 | ||
711 | return qp_inuse; | |
712 | } | |
713 | ||
ec4274f1 DD |
714 | void flush_qp_waiters(struct rvt_qp *qp) |
715 | { | |
716 | flush_iowait(qp); | |
08279d5c | 717 | hfi1_stop_rc_timers(qp); |
ec4274f1 DD |
718 | } |
719 | ||
720 | void stop_send_queue(struct rvt_qp *qp) | |
721 | { | |
722 | struct hfi1_qp_priv *priv = qp->priv; | |
723 | ||
724 | cancel_work_sync(&priv->s_iowait.iowork); | |
08279d5c | 725 | hfi1_del_timers_sync(qp); |
ec4274f1 DD |
726 | } |
727 | ||
728 | void quiesce_qp(struct rvt_qp *qp) | |
729 | { | |
730 | struct hfi1_qp_priv *priv = qp->priv; | |
731 | ||
732 | iowait_sdma_drain(&priv->s_iowait); | |
733 | flush_tx_list(qp); | |
734 | } | |
735 | ||
a2c2d608 DD |
736 | void notify_qp_reset(struct rvt_qp *qp) |
737 | { | |
738 | struct hfi1_qp_priv *priv = qp->priv; | |
739 | ||
740 | iowait_init( | |
741 | &priv->s_iowait, | |
742 | 1, | |
83693bd1 | 743 | _hfi1_do_send, |
a2c2d608 DD |
744 | iowait_sleep, |
745 | iowait_wakeup); | |
746 | priv->r_adefered = 0; | |
747 | clear_ahg(qp); | |
748 | } | |
749 | ||
c2f3ffb0 MM |
750 | /* |
751 | * Switch to alternate path. | |
752 | * The QP s_lock should be held and interrupts disabled. | |
753 | */ | |
895420dd | 754 | void hfi1_migrate_qp(struct rvt_qp *qp) |
c2f3ffb0 | 755 | { |
4c6829c5 | 756 | struct hfi1_qp_priv *priv = qp->priv; |
c2f3ffb0 MM |
757 | struct ib_event ev; |
758 | ||
759 | qp->s_mig_state = IB_MIG_MIGRATED; | |
760 | qp->remote_ah_attr = qp->alt_ah_attr; | |
761 | qp->port_num = qp->alt_ah_attr.port_num; | |
762 | qp->s_pkey_index = qp->s_alt_pkey_index; | |
54d10c1e | 763 | qp->s_flags |= RVT_S_AHG_CLEAR; |
4c6829c5 DD |
764 | priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); |
765 | priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); | |
c2f3ffb0 MM |
766 | |
767 | ev.device = qp->ibqp.device; | |
768 | ev.element.qp = &qp->ibqp; | |
769 | ev.event = IB_EVENT_PATH_MIG; | |
770 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | |
771 | } | |
ec4274f1 DD |
772 | |
773 | int mtu_to_path_mtu(u32 mtu) | |
774 | { | |
775 | return mtu_to_enum(mtu, OPA_MTU_8192); | |
776 | } | |
777 | ||
778 | u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) | |
779 | { | |
780 | u32 mtu; | |
781 | struct hfi1_ibdev *verbs_dev = container_of(rdi, | |
782 | struct hfi1_ibdev, | |
783 | rdi); | |
784 | struct hfi1_devdata *dd = container_of(verbs_dev, | |
785 | struct hfi1_devdata, | |
786 | verbs_dev); | |
787 | struct hfi1_ibport *ibp; | |
788 | u8 sc, vl; | |
789 | ||
790 | ibp = &dd->pport[qp->port_num - 1].ibport_data; | |
791 | sc = ibp->sl_to_sc[qp->remote_ah_attr.sl]; | |
792 | vl = sc_to_vlt(dd, sc); | |
793 | ||
794 | mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu); | |
795 | if (vl < PER_VL_SEND_CONTEXTS) | |
796 | mtu = min_t(u32, mtu, dd->vld[vl].mtu); | |
797 | return mtu; | |
798 | } | |
799 | ||
800 | int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, | |
801 | struct ib_qp_attr *attr) | |
802 | { | |
803 | int mtu, pidx = qp->port_num - 1; | |
804 | struct hfi1_ibdev *verbs_dev = container_of(rdi, | |
805 | struct hfi1_ibdev, | |
806 | rdi); | |
807 | struct hfi1_devdata *dd = container_of(verbs_dev, | |
808 | struct hfi1_devdata, | |
809 | verbs_dev); | |
810 | mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu); | |
811 | if (mtu == -1) | |
812 | return -1; /* values less than 0 are error */ | |
813 | ||
814 | if (mtu > dd->pport[pidx].ibmtu) | |
815 | return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048); | |
816 | else | |
817 | return attr->path_mtu; | |
818 | } | |
819 | ||
820 | void notify_error_qp(struct rvt_qp *qp) | |
821 | { | |
822 | struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); | |
823 | struct hfi1_qp_priv *priv = qp->priv; | |
824 | ||
825 | write_seqlock(&dev->iowait_lock); | |
826 | if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) { | |
827 | qp->s_flags &= ~RVT_S_ANY_WAIT_IO; | |
828 | list_del_init(&priv->s_iowait.list); | |
829 | if (atomic_dec_and_test(&qp->refcount)) | |
830 | wake_up(&qp->wait); | |
831 | } | |
832 | write_sequnlock(&dev->iowait_lock); | |
833 | ||
834 | if (!(qp->s_flags & RVT_S_BUSY)) { | |
835 | qp->s_hdrwords = 0; | |
836 | if (qp->s_rdma_mr) { | |
837 | rvt_put_mr(qp->s_rdma_mr); | |
838 | qp->s_rdma_mr = NULL; | |
839 | } | |
840 | flush_tx_list(qp); | |
841 | } | |
842 | } | |
843 | ||
0ec79e87 KW |
844 | /** |
845 | * hfi1_error_port_qps - put a port's RC/UC qps into error state | |
846 | * @ibp: the ibport. | |
847 | * @sl: the service level. | |
848 | * | |
849 | * This function places all RC/UC qps with a given service level into error | |
850 | * state. It is generally called to force upper lay apps to abandon stale qps | |
851 | * after an sl->sc mapping change. | |
852 | */ | |
853 | void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl) | |
854 | { | |
855 | struct rvt_qp *qp = NULL; | |
856 | struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); | |
857 | struct hfi1_ibdev *dev = &ppd->dd->verbs_dev; | |
858 | int n; | |
859 | int lastwqe; | |
860 | struct ib_event ev; | |
861 | ||
862 | rcu_read_lock(); | |
863 | ||
864 | /* Deal only with RC/UC qps that use the given SL. */ | |
865 | for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) { | |
866 | for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp; | |
867 | qp = rcu_dereference(qp->next)) { | |
868 | if (qp->port_num == ppd->port && | |
869 | (qp->ibqp.qp_type == IB_QPT_UC || | |
870 | qp->ibqp.qp_type == IB_QPT_RC) && | |
871 | qp->remote_ah_attr.sl == sl && | |
872 | (ib_rvt_state_ops[qp->state] & | |
873 | RVT_POST_SEND_OK)) { | |
874 | spin_lock_irq(&qp->r_lock); | |
875 | spin_lock(&qp->s_hlock); | |
876 | spin_lock(&qp->s_lock); | |
877 | lastwqe = rvt_error_qp(qp, | |
878 | IB_WC_WR_FLUSH_ERR); | |
879 | spin_unlock(&qp->s_lock); | |
880 | spin_unlock(&qp->s_hlock); | |
881 | spin_unlock_irq(&qp->r_lock); | |
882 | if (lastwqe) { | |
883 | ev.device = qp->ibqp.device; | |
884 | ev.element.qp = &qp->ibqp; | |
885 | ev.event = | |
886 | IB_EVENT_QP_LAST_WQE_REACHED; | |
887 | qp->ibqp.event_handler(&ev, | |
888 | qp->ibqp.qp_context); | |
889 | } | |
890 | } | |
891 | } | |
892 | } | |
893 | ||
894 | rcu_read_unlock(); | |
895 | } |