Commit | Line | Data |
---|---|---|
b038ced7 SW |
1 | /* |
2 | * Copyright (c) 2006 Chelsio, Inc. All rights reserved. | |
b038ced7 SW |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include <asm/delay.h> | |
33 | ||
34 | #include <linux/mutex.h> | |
35 | #include <linux/netdevice.h> | |
36 | #include <linux/sched.h> | |
37 | #include <linux/spinlock.h> | |
38 | #include <linux/pci.h> | |
c3bb1092 | 39 | #include <linux/dma-mapping.h> |
5a0e3ad6 | 40 | #include <linux/slab.h> |
881d966b | 41 | #include <net/net_namespace.h> |
b038ced7 SW |
42 | |
43 | #include "cxio_resource.h" | |
44 | #include "cxio_hal.h" | |
45 | #include "cxgb3_offload.h" | |
46 | #include "sge_defs.h" | |
47 | ||
48 | static LIST_HEAD(rdev_list); | |
49 | static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL; | |
50 | ||
2b540355 | 51 | static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name) |
b038ced7 SW |
52 | { |
53 | struct cxio_rdev *rdev; | |
54 | ||
55 | list_for_each_entry(rdev, &rdev_list, entry) | |
56 | if (!strcmp(rdev->dev_name, dev_name)) | |
57 | return rdev; | |
58 | return NULL; | |
59 | } | |
60 | ||
2b540355 | 61 | static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev) |
b038ced7 SW |
62 | { |
63 | struct cxio_rdev *rdev; | |
64 | ||
65 | list_for_each_entry(rdev, &rdev_list, entry) | |
66 | if (rdev->t3cdev_p == tdev) | |
67 | return rdev; | |
68 | return NULL; | |
69 | } | |
70 | ||
71 | int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, | |
72 | enum t3_cq_opcode op, u32 credit) | |
73 | { | |
74 | int ret; | |
75 | struct t3_cqe *cqe; | |
76 | u32 rptr; | |
77 | ||
78 | struct rdma_cq_op setup; | |
79 | setup.id = cq->cqid; | |
80 | setup.credits = (op == CQ_CREDIT_UPDATE) ? credit : 0; | |
81 | setup.op = op; | |
82 | ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_OP, &setup); | |
83 | ||
84 | if ((ret < 0) || (op == CQ_CREDIT_UPDATE)) | |
85 | return ret; | |
86 | ||
87 | /* | |
88 | * If the rearm returned an index other than our current index, | |
89 | * then there might be CQE's in flight (being DMA'd). We must wait | |
90 | * here for them to complete or the consumer can miss a notification. | |
91 | */ | |
92 | if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) { | |
93 | int i=0; | |
94 | ||
95 | rptr = cq->rptr; | |
96 | ||
97 | /* | |
98 | * Keep the generation correct by bumping rptr until it | |
99 | * matches the index returned by the rearm - 1. | |
100 | */ | |
101 | while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret) | |
102 | rptr++; | |
103 | ||
104 | /* | |
105 | * Now rptr is the index for the (last) cqe that was | |
106 | * in-flight at the time the HW rearmed the CQ. We | |
107 | * spin until that CQE is valid. | |
108 | */ | |
109 | cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2); | |
110 | while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { | |
111 | udelay(1); | |
112 | if (i++ > 1000000) { | |
b038ced7 SW |
113 | printk(KERN_ERR "%s: stalled rnic\n", |
114 | rdev_p->dev_name); | |
115 | return -EIO; | |
116 | } | |
117 | } | |
ed23a727 RD |
118 | |
119 | return 1; | |
b038ced7 | 120 | } |
ed23a727 | 121 | |
b038ced7 SW |
122 | return 0; |
123 | } | |
124 | ||
2b540355 | 125 | static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid) |
b038ced7 SW |
126 | { |
127 | struct rdma_cq_setup setup; | |
128 | setup.id = cqid; | |
129 | setup.base_addr = 0; /* NULL address */ | |
130 | setup.size = 0; /* disaable the CQ */ | |
131 | setup.credits = 0; | |
132 | setup.credit_thres = 0; | |
133 | setup.ovfl_mode = 0; | |
134 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | |
135 | } | |
136 | ||
2b540355 | 137 | static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) |
b038ced7 SW |
138 | { |
139 | u64 sge_cmd; | |
140 | struct t3_modify_qp_wr *wqe; | |
141 | struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); | |
142 | if (!skb) { | |
33718363 | 143 | PDBG("%s alloc_skb failed\n", __func__); |
b038ced7 SW |
144 | return -ENOMEM; |
145 | } | |
146 | wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); | |
147 | memset(wqe, 0, sizeof(*wqe)); | |
e7e55829 SW |
148 | build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, |
149 | T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7, | |
150 | T3_SOPEOP); | |
b038ced7 SW |
151 | wqe->flags = cpu_to_be32(MODQP_WRITE_EC); |
152 | sge_cmd = qpid << 8 | 3; | |
153 | wqe->sge_cmd = cpu_to_be64(sge_cmd); | |
154 | skb->priority = CPL_PRIORITY_CONTROL; | |
04b5d028 | 155 | return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); |
b038ced7 SW |
156 | } |
157 | ||
5279d3ac | 158 | int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) |
b038ced7 SW |
159 | { |
160 | struct rdma_cq_setup setup; | |
161 | int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); | |
162 | ||
b955150e | 163 | size += 1; /* one extra page for storing cq-in-err state */ |
b038ced7 SW |
164 | cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); |
165 | if (!cq->cqid) | |
166 | return -ENOMEM; | |
5279d3ac SW |
167 | if (kernel) { |
168 | cq->sw_queue = kzalloc(size, GFP_KERNEL); | |
169 | if (!cq->sw_queue) | |
170 | return -ENOMEM; | |
171 | } | |
172 | cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size, | |
b038ced7 SW |
173 | &(cq->dma_addr), GFP_KERNEL); |
174 | if (!cq->queue) { | |
175 | kfree(cq->sw_queue); | |
176 | return -ENOMEM; | |
177 | } | |
7960d6b9 | 178 | dma_unmap_addr_set(cq, mapping, cq->dma_addr); |
b038ced7 SW |
179 | memset(cq->queue, 0, size); |
180 | setup.id = cq->cqid; | |
181 | setup.base_addr = (u64) (cq->dma_addr); | |
182 | setup.size = 1UL << cq->size_log2; | |
183 | setup.credits = 65535; | |
184 | setup.credit_thres = 1; | |
8176d297 | 185 | if (rdev_p->t3cdev_p->type != T3A) |
b038ced7 SW |
186 | setup.ovfl_mode = 0; |
187 | else | |
188 | setup.ovfl_mode = 1; | |
189 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | |
190 | } | |
191 | ||
c9431091 | 192 | #ifdef notyet |
b038ced7 SW |
193 | int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) |
194 | { | |
195 | struct rdma_cq_setup setup; | |
196 | setup.id = cq->cqid; | |
197 | setup.base_addr = (u64) (cq->dma_addr); | |
198 | setup.size = 1UL << cq->size_log2; | |
199 | setup.credits = setup.size; | |
200 | setup.credit_thres = setup.size; /* TBD: overflow recovery */ | |
201 | setup.ovfl_mode = 1; | |
202 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | |
203 | } | |
c9431091 | 204 | #endif |
b038ced7 SW |
205 | |
206 | static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) | |
207 | { | |
208 | struct cxio_qpid_list *entry; | |
209 | u32 qpid; | |
210 | int i; | |
211 | ||
212 | mutex_lock(&uctx->lock); | |
213 | if (!list_empty(&uctx->qpids)) { | |
214 | entry = list_entry(uctx->qpids.next, struct cxio_qpid_list, | |
215 | entry); | |
216 | list_del(&entry->entry); | |
217 | qpid = entry->qpid; | |
218 | kfree(entry); | |
219 | } else { | |
220 | qpid = cxio_hal_get_qpid(rdev_p->rscp); | |
221 | if (!qpid) | |
222 | goto out; | |
223 | for (i = qpid+1; i & rdev_p->qpmask; i++) { | |
224 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | |
225 | if (!entry) | |
226 | break; | |
227 | entry->qpid = i; | |
228 | list_add_tail(&entry->entry, &uctx->qpids); | |
229 | } | |
230 | } | |
231 | out: | |
232 | mutex_unlock(&uctx->lock); | |
33718363 | 233 | PDBG("%s qpid 0x%x\n", __func__, qpid); |
b038ced7 SW |
234 | return qpid; |
235 | } | |
236 | ||
237 | static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid, | |
238 | struct cxio_ucontext *uctx) | |
239 | { | |
240 | struct cxio_qpid_list *entry; | |
241 | ||
242 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | |
243 | if (!entry) | |
244 | return; | |
33718363 | 245 | PDBG("%s qpid 0x%x\n", __func__, qpid); |
b038ced7 SW |
246 | entry->qpid = qpid; |
247 | mutex_lock(&uctx->lock); | |
248 | list_add_tail(&entry->entry, &uctx->qpids); | |
249 | mutex_unlock(&uctx->lock); | |
250 | } | |
251 | ||
252 | void cxio_release_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) | |
253 | { | |
254 | struct list_head *pos, *nxt; | |
255 | struct cxio_qpid_list *entry; | |
256 | ||
257 | mutex_lock(&uctx->lock); | |
258 | list_for_each_safe(pos, nxt, &uctx->qpids) { | |
259 | entry = list_entry(pos, struct cxio_qpid_list, entry); | |
260 | list_del_init(&entry->entry); | |
261 | if (!(entry->qpid & rdev_p->qpmask)) | |
262 | cxio_hal_put_qpid(rdev_p->rscp, entry->qpid); | |
263 | kfree(entry); | |
264 | } | |
265 | mutex_unlock(&uctx->lock); | |
266 | } | |
267 | ||
268 | void cxio_init_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) | |
269 | { | |
270 | INIT_LIST_HEAD(&uctx->qpids); | |
271 | mutex_init(&uctx->lock); | |
272 | } | |
273 | ||
274 | int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain, | |
275 | struct t3_wq *wq, struct cxio_ucontext *uctx) | |
276 | { | |
277 | int depth = 1UL << wq->size_log2; | |
278 | int rqsize = 1UL << wq->rq_size_log2; | |
279 | ||
280 | wq->qpid = get_qpid(rdev_p, uctx); | |
281 | if (!wq->qpid) | |
282 | return -ENOMEM; | |
283 | ||
4ab928f6 | 284 | wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL); |
b038ced7 SW |
285 | if (!wq->rq) |
286 | goto err1; | |
287 | ||
288 | wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize); | |
289 | if (!wq->rq_addr) | |
290 | goto err2; | |
291 | ||
292 | wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL); | |
293 | if (!wq->sq) | |
294 | goto err3; | |
295 | ||
296 | wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), | |
297 | depth * sizeof(union t3_wr), | |
298 | &(wq->dma_addr), GFP_KERNEL); | |
299 | if (!wq->queue) | |
300 | goto err4; | |
301 | ||
302 | memset(wq->queue, 0, depth * sizeof(union t3_wr)); | |
7960d6b9 | 303 | dma_unmap_addr_set(wq, mapping, wq->dma_addr); |
b038ced7 SW |
304 | wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; |
305 | if (!kernel_domain) | |
306 | wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + | |
307 | (wq->qpid << rdev_p->qpshift); | |
4ab928f6 | 308 | wq->rdev = rdev_p; |
33718363 | 309 | PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__, |
b038ced7 SW |
310 | wq->qpid, wq->doorbell, (unsigned long long) wq->udb); |
311 | return 0; | |
312 | err4: | |
313 | kfree(wq->sq); | |
314 | err3: | |
315 | cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize); | |
316 | err2: | |
317 | kfree(wq->rq); | |
318 | err1: | |
319 | put_qpid(rdev_p, wq->qpid, uctx); | |
320 | return -ENOMEM; | |
321 | } | |
322 | ||
323 | int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) | |
324 | { | |
325 | int err; | |
326 | err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid); | |
327 | kfree(cq->sw_queue); | |
328 | dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), | |
329 | (1UL << (cq->size_log2)) | |
0de4cbb3 | 330 | * sizeof(struct t3_cqe) + 1, cq->queue, |
7960d6b9 | 331 | dma_unmap_addr(cq, mapping)); |
b038ced7 SW |
332 | cxio_hal_put_cqid(rdev_p->rscp, cq->cqid); |
333 | return err; | |
334 | } | |
335 | ||
336 | int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq, | |
337 | struct cxio_ucontext *uctx) | |
338 | { | |
339 | dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), | |
340 | (1UL << (wq->size_log2)) | |
341 | * sizeof(union t3_wr), wq->queue, | |
7960d6b9 | 342 | dma_unmap_addr(wq, mapping)); |
b038ced7 SW |
343 | kfree(wq->sq); |
344 | cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2)); | |
345 | kfree(wq->rq); | |
346 | put_qpid(rdev_p, wq->qpid, uctx); | |
347 | return 0; | |
348 | } | |
349 | ||
350 | static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq) | |
351 | { | |
352 | struct t3_cqe cqe; | |
353 | ||
33718363 | 354 | PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__, |
b038ced7 SW |
355 | wq, cq, cq->sw_rptr, cq->sw_wptr); |
356 | memset(&cqe, 0, sizeof(cqe)); | |
357 | cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | | |
358 | V_CQE_OPCODE(T3_SEND) | | |
359 | V_CQE_TYPE(0) | | |
360 | V_CQE_SWCQE(1) | | |
361 | V_CQE_QPID(wq->qpid) | | |
362 | V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr, | |
363 | cq->size_log2))); | |
364 | *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe; | |
365 | cq->sw_wptr++; | |
366 | } | |
367 | ||
c8286944 | 368 | int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) |
b038ced7 SW |
369 | { |
370 | u32 ptr; | |
c8286944 | 371 | int flushed = 0; |
b038ced7 | 372 | |
33718363 | 373 | PDBG("%s wq %p cq %p\n", __func__, wq, cq); |
b038ced7 SW |
374 | |
375 | /* flush RQ */ | |
33718363 | 376 | PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__, |
b038ced7 SW |
377 | wq->rq_rptr, wq->rq_wptr, count); |
378 | ptr = wq->rq_rptr + count; | |
c8286944 | 379 | while (ptr++ != wq->rq_wptr) { |
b038ced7 | 380 | insert_recv_cqe(wq, cq); |
c8286944 SW |
381 | flushed++; |
382 | } | |
383 | return flushed; | |
b038ced7 SW |
384 | } |
385 | ||
386 | static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, | |
387 | struct t3_swsq *sqp) | |
388 | { | |
389 | struct t3_cqe cqe; | |
390 | ||
33718363 | 391 | PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__, |
b038ced7 SW |
392 | wq, cq, cq->sw_rptr, cq->sw_wptr); |
393 | memset(&cqe, 0, sizeof(cqe)); | |
394 | cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | | |
395 | V_CQE_OPCODE(sqp->opcode) | | |
396 | V_CQE_TYPE(1) | | |
397 | V_CQE_SWCQE(1) | | |
398 | V_CQE_QPID(wq->qpid) | | |
399 | V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr, | |
400 | cq->size_log2))); | |
401 | cqe.u.scqe.wrid_hi = sqp->sq_wptr; | |
402 | ||
403 | *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe; | |
404 | cq->sw_wptr++; | |
405 | } | |
406 | ||
c8286944 | 407 | int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) |
b038ced7 SW |
408 | { |
409 | __u32 ptr; | |
c8286944 | 410 | int flushed = 0; |
b038ced7 SW |
411 | struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2); |
412 | ||
413 | ptr = wq->sq_rptr + count; | |
a58e58fa | 414 | sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); |
b038ced7 | 415 | while (ptr != wq->sq_wptr) { |
ec6995dd | 416 | sqp->signaled = 0; |
b038ced7 | 417 | insert_sq_cqe(wq, cq, sqp); |
b038ced7 | 418 | ptr++; |
a58e58fa | 419 | sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); |
c8286944 | 420 | flushed++; |
b038ced7 | 421 | } |
c8286944 | 422 | return flushed; |
b038ced7 SW |
423 | } |
424 | ||
425 | /* | |
426 | * Move all CQEs from the HWCQ into the SWCQ. | |
427 | */ | |
428 | void cxio_flush_hw_cq(struct t3_cq *cq) | |
429 | { | |
430 | struct t3_cqe *cqe, *swcqe; | |
431 | ||
33718363 | 432 | PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid); |
b038ced7 SW |
433 | cqe = cxio_next_hw_cqe(cq); |
434 | while (cqe) { | |
435 | PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n", | |
33718363 | 436 | __func__, cq->rptr, cq->sw_wptr); |
b038ced7 SW |
437 | swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2); |
438 | *swcqe = *cqe; | |
439 | swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1)); | |
440 | cq->sw_wptr++; | |
441 | cq->rptr++; | |
442 | cqe = cxio_next_hw_cqe(cq); | |
443 | } | |
444 | } | |
445 | ||
2b540355 | 446 | static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) |
b038ced7 SW |
447 | { |
448 | if (CQE_OPCODE(*cqe) == T3_TERMINATE) | |
449 | return 0; | |
450 | ||
451 | if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe)) | |
452 | return 0; | |
453 | ||
454 | if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe)) | |
455 | return 0; | |
456 | ||
42fb61f0 | 457 | if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) && |
b038ced7 SW |
458 | Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) |
459 | return 0; | |
460 | ||
461 | return 1; | |
462 | } | |
463 | ||
464 | void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count) | |
465 | { | |
466 | struct t3_cqe *cqe; | |
467 | u32 ptr; | |
468 | ||
469 | *count = 0; | |
470 | ptr = cq->sw_rptr; | |
471 | while (!Q_EMPTY(ptr, cq->sw_wptr)) { | |
472 | cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2)); | |
f8b0dfd1 SW |
473 | if ((SQ_TYPE(*cqe) || |
474 | ((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) && | |
b038ced7 SW |
475 | (CQE_QPID(*cqe) == wq->qpid)) |
476 | (*count)++; | |
477 | ptr++; | |
478 | } | |
33718363 | 479 | PDBG("%s cq %p count %d\n", __func__, cq, *count); |
b038ced7 SW |
480 | } |
481 | ||
482 | void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) | |
483 | { | |
484 | struct t3_cqe *cqe; | |
485 | u32 ptr; | |
486 | ||
487 | *count = 0; | |
33718363 | 488 | PDBG("%s count zero %d\n", __func__, *count); |
b038ced7 SW |
489 | ptr = cq->sw_rptr; |
490 | while (!Q_EMPTY(ptr, cq->sw_wptr)) { | |
491 | cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2)); | |
492 | if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) && | |
493 | (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq)) | |
494 | (*count)++; | |
495 | ptr++; | |
496 | } | |
33718363 | 497 | PDBG("%s cq %p count %d\n", __func__, cq, *count); |
b038ced7 SW |
498 | } |
499 | ||
500 | static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p) | |
501 | { | |
502 | struct rdma_cq_setup setup; | |
503 | setup.id = 0; | |
504 | setup.base_addr = 0; /* NULL address */ | |
505 | setup.size = 1; /* enable the CQ */ | |
506 | setup.credits = 0; | |
507 | ||
508 | /* force SGE to redirect to RspQ and interrupt */ | |
509 | setup.credit_thres = 0; | |
510 | setup.ovfl_mode = 1; | |
511 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | |
512 | } | |
513 | ||
514 | static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p) | |
515 | { | |
516 | int err; | |
517 | u64 sge_cmd, ctx0, ctx1; | |
518 | u64 base_addr; | |
519 | struct t3_modify_qp_wr *wqe; | |
ed6ee517 | 520 | struct sk_buff *skb; |
b038ced7 | 521 | |
ed6ee517 | 522 | skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); |
b038ced7 | 523 | if (!skb) { |
33718363 | 524 | PDBG("%s alloc_skb failed\n", __func__); |
b038ced7 SW |
525 | return -ENOMEM; |
526 | } | |
527 | err = cxio_hal_init_ctrl_cq(rdev_p); | |
528 | if (err) { | |
33718363 | 529 | PDBG("%s err %d initializing ctrl_cq\n", __func__, err); |
ed6ee517 | 530 | goto err; |
b038ced7 SW |
531 | } |
532 | rdev_p->ctrl_qp.workq = dma_alloc_coherent( | |
533 | &(rdev_p->rnic_info.pdev->dev), | |
534 | (1 << T3_CTRL_QP_SIZE_LOG2) * | |
535 | sizeof(union t3_wr), | |
536 | &(rdev_p->ctrl_qp.dma_addr), | |
537 | GFP_KERNEL); | |
538 | if (!rdev_p->ctrl_qp.workq) { | |
33718363 | 539 | PDBG("%s dma_alloc_coherent failed\n", __func__); |
ed6ee517 SW |
540 | err = -ENOMEM; |
541 | goto err; | |
b038ced7 | 542 | } |
7960d6b9 | 543 | dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping, |
b038ced7 SW |
544 | rdev_p->ctrl_qp.dma_addr); |
545 | rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; | |
546 | memset(rdev_p->ctrl_qp.workq, 0, | |
547 | (1 << T3_CTRL_QP_SIZE_LOG2) * sizeof(union t3_wr)); | |
548 | ||
549 | mutex_init(&rdev_p->ctrl_qp.lock); | |
550 | init_waitqueue_head(&rdev_p->ctrl_qp.waitq); | |
551 | ||
552 | /* update HW Ctrl QP context */ | |
553 | base_addr = rdev_p->ctrl_qp.dma_addr; | |
554 | base_addr >>= 12; | |
555 | ctx0 = (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2)) | | |
556 | V_EC_BASE_LO((u32) base_addr & 0xffff)); | |
557 | ctx0 <<= 32; | |
558 | ctx0 |= V_EC_CREDITS(FW_WR_NUM); | |
559 | base_addr >>= 16; | |
560 | ctx1 = (u32) base_addr; | |
561 | base_addr >>= 32; | |
562 | ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) | | |
563 | V_EC_TYPE(0) | V_EC_GEN(1) | | |
564 | V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32; | |
565 | wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); | |
566 | memset(wqe, 0, sizeof(*wqe)); | |
6eda48d1 | 567 | build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0, |
e7e55829 | 568 | T3_CTL_QP_TID, 7, T3_SOPEOP); |
b038ced7 SW |
569 | wqe->flags = cpu_to_be32(MODQP_WRITE_EC); |
570 | sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3; | |
571 | wqe->sge_cmd = cpu_to_be64(sge_cmd); | |
572 | wqe->ctx1 = cpu_to_be64(ctx1); | |
573 | wqe->ctx0 = cpu_to_be64(ctx0); | |
574 | PDBG("CtrlQP dma_addr 0x%llx workq %p size %d\n", | |
575 | (unsigned long long) rdev_p->ctrl_qp.dma_addr, | |
576 | rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2); | |
577 | skb->priority = CPL_PRIORITY_CONTROL; | |
04b5d028 | 578 | return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); |
ed6ee517 SW |
579 | err: |
580 | kfree_skb(skb); | |
581 | return err; | |
b038ced7 SW |
582 | } |
583 | ||
584 | static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p) | |
585 | { | |
586 | dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), | |
587 | (1UL << T3_CTRL_QP_SIZE_LOG2) | |
588 | * sizeof(union t3_wr), rdev_p->ctrl_qp.workq, | |
7960d6b9 | 589 | dma_unmap_addr(&rdev_p->ctrl_qp, mapping)); |
b038ced7 SW |
590 | return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID); |
591 | } | |
592 | ||
593 | /* write len bytes of data into addr (32B aligned address) | |
594 | * If data is NULL, clear len byte of memory to zero. | |
21ae2956 | 595 | * caller acquires the ctrl_qp lock before the call |
b038ced7 SW |
596 | */ |
597 | static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr, | |
273748cc | 598 | u32 len, void *data) |
b038ced7 SW |
599 | { |
600 | u32 i, nr_wqe, copy_len; | |
601 | u8 *copy_data; | |
94545e8c | 602 | u8 wr_len, utx_len; /* length in 8 byte flit */ |
b038ced7 SW |
603 | enum t3_wr_flags flag; |
604 | __be64 *wqe; | |
605 | u64 utx_cmd; | |
606 | addr &= 0x7FFFFFF; | |
607 | nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */ | |
608 | PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n", | |
33718363 | 609 | __func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len, |
b038ced7 SW |
610 | nr_wqe, data, addr); |
611 | utx_len = 3; /* in 32B unit */ | |
612 | for (i = 0; i < nr_wqe; i++) { | |
613 | if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr, | |
614 | T3_CTRL_QP_SIZE_LOG2)) { | |
615 | PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, " | |
33718363 | 616 | "wait for more space i %d\n", __func__, |
b038ced7 SW |
617 | rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i); |
618 | if (wait_event_interruptible(rdev_p->ctrl_qp.waitq, | |
619 | !Q_FULL(rdev_p->ctrl_qp.rptr, | |
620 | rdev_p->ctrl_qp.wptr, | |
621 | T3_CTRL_QP_SIZE_LOG2))) { | |
622 | PDBG("%s ctrl_qp workq interrupted\n", | |
33718363 | 623 | __func__); |
b038ced7 SW |
624 | return -ERESTARTSYS; |
625 | } | |
626 | PDBG("%s ctrl_qp wakeup, continue posting work request " | |
33718363 | 627 | "i %d\n", __func__, i); |
b038ced7 SW |
628 | } |
629 | wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr % | |
630 | (1 << T3_CTRL_QP_SIZE_LOG2))); | |
631 | flag = 0; | |
632 | if (i == (nr_wqe - 1)) { | |
633 | /* last WQE */ | |
273748cc | 634 | flag = T3_COMPLETION_FLAG; |
b038ced7 SW |
635 | if (len % 32) |
636 | utx_len = len / 32 + 1; | |
637 | else | |
638 | utx_len = len / 32; | |
639 | } | |
640 | ||
641 | /* | |
642 | * Force a CQE to return the credit to the workq in case | |
643 | * we posted more than half the max QP size of WRs | |
644 | */ | |
645 | if ((i != 0) && | |
646 | (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) { | |
647 | flag = T3_COMPLETION_FLAG; | |
33718363 | 648 | PDBG("%s force completion at i %d\n", __func__, i); |
b038ced7 SW |
649 | } |
650 | ||
651 | /* build the utx mem command */ | |
652 | wqe += (sizeof(struct t3_bypass_wr) >> 3); | |
653 | utx_cmd = (T3_UTX_MEM_WRITE << 28) | (addr + i * 3); | |
654 | utx_cmd <<= 32; | |
655 | utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1); | |
656 | *wqe = cpu_to_be64(utx_cmd); | |
657 | wqe++; | |
658 | copy_data = (u8 *) data + i * 96; | |
659 | copy_len = len > 96 ? 96 : len; | |
660 | ||
661 | /* clear memory content if data is NULL */ | |
662 | if (data) | |
663 | memcpy(wqe, copy_data, copy_len); | |
664 | else | |
665 | memset(wqe, 0, copy_len); | |
666 | if (copy_len % 32) | |
667 | memset(((u8 *) wqe) + copy_len, 0, | |
668 | 32 - (copy_len % 32)); | |
669 | wr_len = ((sizeof(struct t3_bypass_wr)) >> 3) + 1 + | |
670 | (utx_len << 2); | |
671 | wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr % | |
672 | (1 << T3_CTRL_QP_SIZE_LOG2))); | |
673 | ||
674 | /* wptr in the WRID[31:0] */ | |
675 | ((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr; | |
676 | ||
677 | /* | |
678 | * This must be the last write with a memory barrier | |
679 | * for the genbit | |
680 | */ | |
681 | build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag, | |
682 | Q_GENBIT(rdev_p->ctrl_qp.wptr, | |
683 | T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID, | |
e7e55829 | 684 | wr_len, T3_SOPEOP); |
b038ced7 SW |
685 | if (flag == T3_COMPLETION_FLAG) |
686 | ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID); | |
687 | len -= 96; | |
688 | rdev_p->ctrl_qp.wptr++; | |
689 | } | |
690 | return 0; | |
691 | } | |
692 | ||
273748cc RD |
693 | /* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr |
694 | * OUT: stag index | |
b038ced7 SW |
695 | * TBD: shared memory region support |
696 | */ | |
697 | static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, | |
698 | u32 *stag, u8 stag_state, u32 pdid, | |
699 | enum tpt_mem_type type, enum tpt_mem_perm perm, | |
273748cc RD |
700 | u32 zbva, u64 to, u32 len, u8 page_size, |
701 | u32 pbl_size, u32 pbl_addr) | |
b038ced7 SW |
702 | { |
703 | int err; | |
704 | struct tpt_entry tpt; | |
705 | u32 stag_idx; | |
706 | u32 wptr; | |
b038ced7 | 707 | |
04b5d028 | 708 | if (cxio_fatal_error(rdev_p)) |
a73efd0a DLR |
709 | return -EIO; |
710 | ||
b038ced7 SW |
711 | stag_state = stag_state > 0; |
712 | stag_idx = (*stag) >> 8; | |
713 | ||
714 | if ((!reset_tpt_entry) && !(*stag != T3_STAG_UNSET)) { | |
715 | stag_idx = cxio_hal_get_stag(rdev_p->rscp); | |
716 | if (!stag_idx) | |
717 | return -ENOMEM; | |
718 | *stag = (stag_idx << 8) | ((*stag) & 0xFF); | |
719 | } | |
720 | PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", | |
33718363 | 721 | __func__, stag_state, type, pdid, stag_idx); |
b038ced7 | 722 | |
b038ced7 SW |
723 | mutex_lock(&rdev_p->ctrl_qp.lock); |
724 | ||
b038ced7 SW |
725 | /* write TPT entry */ |
726 | if (reset_tpt_entry) | |
727 | memset(&tpt, 0, sizeof(tpt)); | |
728 | else { | |
729 | tpt.valid_stag_pdid = cpu_to_be32(F_TPT_VALID | | |
730 | V_TPT_STAG_KEY((*stag) & M_TPT_STAG_KEY) | | |
731 | V_TPT_STAG_STATE(stag_state) | | |
732 | V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid)); | |
733 | BUG_ON(page_size >= 28); | |
734 | tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) | | |
1c355a6e SW |
735 | ((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) | |
736 | V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | | |
737 | V_TPT_PAGE_SIZE(page_size)); | |
3c735d48 | 738 | tpt.rsvd_pbl_addr = cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3)); |
b038ced7 SW |
739 | tpt.len = cpu_to_be32(len); |
740 | tpt.va_hi = cpu_to_be32((u32) (to >> 32)); | |
741 | tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL)); | |
742 | tpt.rsvd_bind_cnt_or_pstag = 0; | |
3c735d48 | 743 | tpt.rsvd_pbl_size = cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2)); |
b038ced7 SW |
744 | } |
745 | err = cxio_hal_ctrl_qp_write_mem(rdev_p, | |
746 | stag_idx + | |
747 | (rdev_p->rnic_info.tpt_base >> 5), | |
273748cc | 748 | sizeof(tpt), &tpt); |
b038ced7 SW |
749 | |
750 | /* release the stag index to free pool */ | |
751 | if (reset_tpt_entry) | |
752 | cxio_hal_put_stag(rdev_p->rscp, stag_idx); | |
273748cc | 753 | |
b038ced7 SW |
754 | wptr = rdev_p->ctrl_qp.wptr; |
755 | mutex_unlock(&rdev_p->ctrl_qp.lock); | |
756 | if (!err) | |
757 | if (wait_event_interruptible(rdev_p->ctrl_qp.waitq, | |
758 | SEQ32_GE(rdev_p->ctrl_qp.rptr, | |
759 | wptr))) | |
760 | return -ERESTARTSYS; | |
761 | return err; | |
762 | } | |
763 | ||
273748cc RD |
764 | int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl, |
765 | u32 pbl_addr, u32 pbl_size) | |
766 | { | |
767 | u32 wptr; | |
768 | int err; | |
769 | ||
770 | PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", | |
771 | __func__, pbl_addr, rdev_p->rnic_info.pbl_base, | |
772 | pbl_size); | |
773 | ||
774 | mutex_lock(&rdev_p->ctrl_qp.lock); | |
775 | err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3, | |
776 | pbl); | |
777 | wptr = rdev_p->ctrl_qp.wptr; | |
778 | mutex_unlock(&rdev_p->ctrl_qp.lock); | |
779 | if (err) | |
780 | return err; | |
781 | ||
782 | if (wait_event_interruptible(rdev_p->ctrl_qp.waitq, | |
783 | SEQ32_GE(rdev_p->ctrl_qp.rptr, | |
784 | wptr))) | |
785 | return -ERESTARTSYS; | |
786 | ||
787 | return 0; | |
788 | } | |
789 | ||
b038ced7 SW |
790 | int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, |
791 | enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, | |
273748cc | 792 | u8 page_size, u32 pbl_size, u32 pbl_addr) |
b038ced7 SW |
793 | { |
794 | *stag = T3_STAG_UNSET; | |
795 | return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm, | |
273748cc | 796 | zbva, to, len, page_size, pbl_size, pbl_addr); |
b038ced7 SW |
797 | } |
798 | ||
799 | int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, | |
800 | enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, | |
273748cc | 801 | u8 page_size, u32 pbl_size, u32 pbl_addr) |
b038ced7 SW |
802 | { |
803 | return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm, | |
273748cc | 804 | zbva, to, len, page_size, pbl_size, pbl_addr); |
b038ced7 SW |
805 | } |
806 | ||
807 | int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size, | |
808 | u32 pbl_addr) | |
809 | { | |
273748cc RD |
810 | return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, |
811 | pbl_size, pbl_addr); | |
b038ced7 SW |
812 | } |
813 | ||
814 | int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid) | |
815 | { | |
b038ced7 SW |
816 | *stag = T3_STAG_UNSET; |
817 | return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0, | |
273748cc | 818 | 0, 0); |
b038ced7 SW |
819 | } |
820 | ||
821 | int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag) | |
822 | { | |
273748cc RD |
823 | return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, |
824 | 0, 0); | |
b038ced7 SW |
825 | } |
826 | ||
e7e55829 SW |
827 | int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr) |
828 | { | |
829 | *stag = T3_STAG_UNSET; | |
830 | return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR, | |
831 | 0, 0, 0ULL, 0, 0, pbl_size, pbl_addr); | |
832 | } | |
833 | ||
b038ced7 SW |
834 | int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr) |
835 | { | |
836 | struct t3_rdma_init_wr *wqe; | |
837 | struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC); | |
838 | if (!skb) | |
839 | return -ENOMEM; | |
33718363 | 840 | PDBG("%s rdev_p %p\n", __func__, rdev_p); |
b038ced7 SW |
841 | wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe)); |
842 | wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT)); | |
843 | wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) | | |
844 | V_FW_RIWR_LEN(sizeof(*wqe) >> 3)); | |
845 | wqe->wrid.id1 = 0; | |
846 | wqe->qpid = cpu_to_be32(attr->qpid); | |
847 | wqe->pdid = cpu_to_be32(attr->pdid); | |
848 | wqe->scqid = cpu_to_be32(attr->scqid); | |
849 | wqe->rcqid = cpu_to_be32(attr->rcqid); | |
850 | wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base); | |
851 | wqe->rq_size = cpu_to_be32(attr->rq_size); | |
852 | wqe->mpaattrs = attr->mpaattrs; | |
853 | wqe->qpcaps = attr->qpcaps; | |
854 | wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss); | |
f8b0dfd1 | 855 | wqe->rqe_count = cpu_to_be16(attr->rqe_count); |
b496fe82 SW |
856 | wqe->flags_rtr_type = cpu_to_be16(attr->flags | |
857 | V_RTR_TYPE(attr->rtr_type) | | |
858 | V_CHAN(attr->chan)); | |
b038ced7 SW |
859 | wqe->ord = cpu_to_be32(attr->ord); |
860 | wqe->ird = cpu_to_be32(attr->ird); | |
861 | wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr); | |
862 | wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size); | |
de3d3530 | 863 | wqe->irs = cpu_to_be32(attr->irs); |
b038ced7 | 864 | skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */ |
04b5d028 | 865 | return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); |
b038ced7 SW |
866 | } |
867 | ||
868 | void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb) | |
869 | { | |
870 | cxio_ev_cb = ev_cb; | |
871 | } | |
872 | ||
873 | void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb) | |
874 | { | |
875 | cxio_ev_cb = NULL; | |
876 | } | |
877 | ||
878 | static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb) | |
879 | { | |
880 | static int cnt; | |
881 | struct cxio_rdev *rdev_p = NULL; | |
882 | struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data; | |
883 | PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x" | |
884 | " se %0x notify %0x cqbranch %0x creditth %0x\n", | |
33718363 | 885 | cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg), |
b038ced7 SW |
886 | RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg), |
887 | RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg), | |
888 | RSPQ_CREDIT_THRESH(rsp_msg)); | |
889 | PDBG("CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d " | |
890 | "len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", | |
891 | CQE_QPID(rsp_msg->cqe), CQE_GENBIT(rsp_msg->cqe), | |
892 | CQE_TYPE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe), | |
893 | CQE_OPCODE(rsp_msg->cqe), CQE_LEN(rsp_msg->cqe), | |
894 | CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); | |
895 | rdev_p = (struct cxio_rdev *)t3cdev_p->ulp; | |
896 | if (!rdev_p) { | |
33718363 | 897 | PDBG("%s called by t3cdev %p with null ulp\n", __func__, |
b038ced7 SW |
898 | t3cdev_p); |
899 | return 0; | |
900 | } | |
901 | if (CQE_QPID(rsp_msg->cqe) == T3_CTRL_QP_ID) { | |
902 | rdev_p->ctrl_qp.rptr = CQE_WRID_LOW(rsp_msg->cqe) + 1; | |
903 | wake_up_interruptible(&rdev_p->ctrl_qp.waitq); | |
904 | dev_kfree_skb_irq(skb); | |
905 | } else if (CQE_QPID(rsp_msg->cqe) == 0xfff8) | |
906 | dev_kfree_skb_irq(skb); | |
907 | else if (cxio_ev_cb) | |
908 | (*cxio_ev_cb) (rdev_p, skb); | |
909 | else | |
910 | dev_kfree_skb_irq(skb); | |
911 | cnt++; | |
912 | return 0; | |
913 | } | |
914 | ||
915 | /* Caller takes care of locking if needed */ | |
916 | int cxio_rdev_open(struct cxio_rdev *rdev_p) | |
917 | { | |
918 | struct net_device *netdev_p = NULL; | |
919 | int err = 0; | |
920 | if (strlen(rdev_p->dev_name)) { | |
921 | if (cxio_hal_find_rdev_by_name(rdev_p->dev_name)) { | |
922 | return -EBUSY; | |
923 | } | |
881d966b | 924 | netdev_p = dev_get_by_name(&init_net, rdev_p->dev_name); |
b038ced7 SW |
925 | if (!netdev_p) { |
926 | return -EINVAL; | |
927 | } | |
928 | dev_put(netdev_p); | |
929 | } else if (rdev_p->t3cdev_p) { | |
930 | if (cxio_hal_find_rdev_by_t3cdev(rdev_p->t3cdev_p)) { | |
931 | return -EBUSY; | |
932 | } | |
933 | netdev_p = rdev_p->t3cdev_p->lldev; | |
934 | strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name, | |
935 | T3_MAX_DEV_NAME_LEN); | |
936 | } else { | |
33718363 | 937 | PDBG("%s t3cdev_p or dev_name must be set\n", __func__); |
b038ced7 SW |
938 | return -EINVAL; |
939 | } | |
940 | ||
941 | list_add_tail(&rdev_p->entry, &rdev_list); | |
942 | ||
33718363 | 943 | PDBG("%s opening rnic dev %s\n", __func__, rdev_p->dev_name); |
b038ced7 SW |
944 | memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp)); |
945 | if (!rdev_p->t3cdev_p) | |
5fbf816f | 946 | rdev_p->t3cdev_p = dev2t3cdev(netdev_p); |
b038ced7 | 947 | rdev_p->t3cdev_p->ulp = (void *) rdev_p; |
d1fbe04e SW |
948 | |
949 | err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO, | |
950 | &(rdev_p->fw_info)); | |
951 | if (err) { | |
952 | printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", | |
953 | __func__, rdev_p->t3cdev_p, err); | |
954 | goto err1; | |
955 | } | |
956 | if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) { | |
957 | printk(KERN_ERR MOD "fatal firmware version mismatch: " | |
958 | "need version %u but adapter has version %u\n", | |
959 | CXIO_FW_MAJ, | |
960 | G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers)); | |
961 | err = -EINVAL; | |
962 | goto err1; | |
963 | } | |
964 | ||
b038ced7 SW |
965 | err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS, |
966 | &(rdev_p->rnic_info)); | |
967 | if (err) { | |
968 | printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", | |
33718363 | 969 | __func__, rdev_p->t3cdev_p, err); |
b038ced7 SW |
970 | goto err1; |
971 | } | |
972 | err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS, | |
973 | &(rdev_p->port_info)); | |
974 | if (err) { | |
975 | printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n", | |
33718363 | 976 | __func__, rdev_p->t3cdev_p, err); |
b038ced7 SW |
977 | goto err1; |
978 | } | |
979 | ||
980 | /* | |
981 | * qpshift is the number of bits to shift the qpid left in order | |
982 | * to get the correct address of the doorbell for that qp. | |
983 | */ | |
984 | cxio_init_ucontext(rdev_p, &rdev_p->uctx); | |
985 | rdev_p->qpshift = PAGE_SHIFT - | |
986 | ilog2(65536 >> | |
987 | ilog2(rdev_p->rnic_info.udbell_len >> | |
988 | PAGE_SHIFT)); | |
989 | rdev_p->qpnr = rdev_p->rnic_info.udbell_len >> PAGE_SHIFT; | |
990 | rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1; | |
991 | PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d " | |
992 | "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n", | |
33718363 | 993 | __func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base, |
b038ced7 SW |
994 | rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p), |
995 | rdev_p->rnic_info.pbl_base, | |
996 | rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base, | |
997 | rdev_p->rnic_info.rqt_top); | |
998 | PDBG("udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu " | |
999 | "qpnr %d qpmask 0x%x\n", | |
1000 | rdev_p->rnic_info.udbell_len, | |
1001 | rdev_p->rnic_info.udbell_physbase, rdev_p->rnic_info.kdb_addr, | |
1002 | rdev_p->qpshift, rdev_p->qpnr, rdev_p->qpmask); | |
1003 | ||
1004 | err = cxio_hal_init_ctrl_qp(rdev_p); | |
1005 | if (err) { | |
1006 | printk(KERN_ERR "%s error %d initializing ctrl_qp.\n", | |
33718363 | 1007 | __func__, err); |
b038ced7 SW |
1008 | goto err1; |
1009 | } | |
1010 | err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0, | |
1011 | 0, T3_MAX_NUM_QP, T3_MAX_NUM_CQ, | |
1012 | T3_MAX_NUM_PD); | |
1013 | if (err) { | |
1014 | printk(KERN_ERR "%s error %d initializing hal resources.\n", | |
33718363 | 1015 | __func__, err); |
b038ced7 SW |
1016 | goto err2; |
1017 | } | |
1018 | err = cxio_hal_pblpool_create(rdev_p); | |
1019 | if (err) { | |
1020 | printk(KERN_ERR "%s error %d initializing pbl mem pool.\n", | |
33718363 | 1021 | __func__, err); |
b038ced7 SW |
1022 | goto err3; |
1023 | } | |
1024 | err = cxio_hal_rqtpool_create(rdev_p); | |
1025 | if (err) { | |
1026 | printk(KERN_ERR "%s error %d initializing rqt mem pool.\n", | |
33718363 | 1027 | __func__, err); |
b038ced7 SW |
1028 | goto err4; |
1029 | } | |
1030 | return 0; | |
1031 | err4: | |
1032 | cxio_hal_pblpool_destroy(rdev_p); | |
1033 | err3: | |
1034 | cxio_hal_destroy_resource(rdev_p->rscp); | |
1035 | err2: | |
1036 | cxio_hal_destroy_ctrl_qp(rdev_p); | |
1037 | err1: | |
ffc40c64 | 1038 | rdev_p->t3cdev_p->ulp = NULL; |
b038ced7 SW |
1039 | list_del(&rdev_p->entry); |
1040 | return err; | |
1041 | } | |
1042 | ||
1043 | void cxio_rdev_close(struct cxio_rdev *rdev_p) | |
1044 | { | |
1045 | if (rdev_p) { | |
1046 | cxio_hal_pblpool_destroy(rdev_p); | |
1047 | cxio_hal_rqtpool_destroy(rdev_p); | |
1048 | list_del(&rdev_p->entry); | |
b038ced7 SW |
1049 | cxio_hal_destroy_ctrl_qp(rdev_p); |
1050 | cxio_hal_destroy_resource(rdev_p->rscp); | |
04b5d028 | 1051 | rdev_p->t3cdev_p->ulp = NULL; |
b038ced7 SW |
1052 | } |
1053 | } | |
1054 | ||
1055 | int __init cxio_hal_init(void) | |
1056 | { | |
1057 | if (cxio_hal_init_rhdl_resource(T3_MAX_NUM_RI)) | |
1058 | return -ENOMEM; | |
1059 | t3_register_cpl_handler(CPL_ASYNC_NOTIF, cxio_hal_ev_handler); | |
1060 | return 0; | |
1061 | } | |
1062 | ||
1063 | void __exit cxio_hal_exit(void) | |
1064 | { | |
1065 | struct cxio_rdev *rdev, *tmp; | |
1066 | ||
1067 | t3_register_cpl_handler(CPL_ASYNC_NOTIF, NULL); | |
1068 | list_for_each_entry_safe(rdev, tmp, &rdev_list, entry) | |
1069 | cxio_rdev_close(rdev); | |
1070 | cxio_hal_destroy_rhdl_resource(); | |
1071 | } | |
1072 | ||
2b540355 | 1073 | static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) |
b038ced7 SW |
1074 | { |
1075 | struct t3_swsq *sqp; | |
1076 | __u32 ptr = wq->sq_rptr; | |
1077 | int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr); | |
1078 | ||
1079 | sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); | |
1080 | while (count--) | |
1081 | if (!sqp->signaled) { | |
1082 | ptr++; | |
1083 | sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); | |
1084 | } else if (sqp->complete) { | |
1085 | ||
1086 | /* | |
1087 | * Insert this completed cqe into the swcq. | |
1088 | */ | |
1089 | PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n", | |
33718363 | 1090 | __func__, Q_PTR2IDX(ptr, wq->sq_size_log2), |
b038ced7 SW |
1091 | Q_PTR2IDX(cq->sw_wptr, cq->size_log2)); |
1092 | sqp->cqe.header |= htonl(V_CQE_SWCQE(1)); | |
1093 | *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) | |
1094 | = sqp->cqe; | |
1095 | cq->sw_wptr++; | |
1096 | sqp->signaled = 0; | |
1097 | break; | |
1098 | } else | |
1099 | break; | |
1100 | } | |
1101 | ||
2b540355 AB |
1102 | static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe, |
1103 | struct t3_cqe *read_cqe) | |
b038ced7 SW |
1104 | { |
1105 | read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr; | |
1106 | read_cqe->len = wq->oldest_read->read_len; | |
1107 | read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(*hw_cqe)) | | |
1108 | V_CQE_SWCQE(SW_CQE(*hw_cqe)) | | |
1109 | V_CQE_OPCODE(T3_READ_REQ) | | |
1110 | V_CQE_TYPE(1)); | |
1111 | } | |
1112 | ||
1113 | /* | |
1114 | * Return a ptr to the next read wr in the SWSQ or NULL. | |
1115 | */ | |
2b540355 | 1116 | static void advance_oldest_read(struct t3_wq *wq) |
b038ced7 SW |
1117 | { |
1118 | ||
1119 | u32 rptr = wq->oldest_read - wq->sq + 1; | |
1120 | u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2); | |
1121 | ||
1122 | while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) { | |
1123 | wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2); | |
1124 | ||
1125 | if (wq->oldest_read->opcode == T3_READ_REQ) | |
1126 | return; | |
1127 | rptr++; | |
1128 | } | |
1129 | wq->oldest_read = NULL; | |
1130 | } | |
1131 | ||
1132 | /* | |
1133 | * cxio_poll_cq | |
1134 | * | |
1135 | * Caller must: | |
1136 | * check the validity of the first CQE, | |
1137 | * supply the wq assicated with the qpid. | |
1138 | * | |
1139 | * credit: cq credit to return to sge. | |
1140 | * cqe_flushed: 1 iff the CQE is flushed. | |
1141 | * cqe: copy of the polled CQE. | |
1142 | * | |
1143 | * return value: | |
1144 | * 0 CQE returned, | |
1145 | * -1 CQE skipped, try again. | |
1146 | */ | |
1147 | int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, | |
1148 | u8 *cqe_flushed, u64 *cookie, u32 *credit) | |
1149 | { | |
1150 | int ret = 0; | |
1151 | struct t3_cqe *hw_cqe, read_cqe; | |
1152 | ||
1153 | *cqe_flushed = 0; | |
1154 | *credit = 0; | |
1155 | hw_cqe = cxio_next_cqe(cq); | |
1156 | ||
1157 | PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x" | |
1158 | " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", | |
33718363 | 1159 | __func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe), |
b038ced7 SW |
1160 | CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe), |
1161 | CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe), | |
1162 | CQE_WRID_LOW(*hw_cqe)); | |
1163 | ||
1164 | /* | |
1165 | * skip cqe's not affiliated with a QP. | |
1166 | */ | |
1167 | if (wq == NULL) { | |
1168 | ret = -1; | |
1169 | goto skip_cqe; | |
1170 | } | |
1171 | ||
1172 | /* | |
1173 | * Gotta tweak READ completions: | |
1174 | * 1) the cqe doesn't contain the sq_wptr from the wr. | |
1175 | * 2) opcode not reflected from the wr. | |
1176 | * 3) read_len not reflected from the wr. | |
1177 | * 4) cq_type is RQ_TYPE not SQ_TYPE. | |
1178 | */ | |
1179 | if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) { | |
1180 | ||
f8b0dfd1 SW |
1181 | /* |
1182 | * If this is an unsolicited read response, then the read | |
1183 | * was generated by the kernel driver as part of peer-2-peer | |
1184 | * connection setup. So ignore the completion. | |
1185 | */ | |
1186 | if (!wq->oldest_read) { | |
1187 | if (CQE_STATUS(*hw_cqe)) | |
1188 | wq->error = 1; | |
1189 | ret = -1; | |
1190 | goto skip_cqe; | |
1191 | } | |
1192 | ||
b038ced7 SW |
1193 | /* |
1194 | * Don't write to the HWCQ, so create a new read req CQE | |
1195 | * in local memory. | |
1196 | */ | |
1197 | create_read_req_cqe(wq, hw_cqe, &read_cqe); | |
1198 | hw_cqe = &read_cqe; | |
1199 | advance_oldest_read(wq); | |
1200 | } | |
1201 | ||
1202 | /* | |
1203 | * T3A: Discard TERMINATE CQEs. | |
1204 | */ | |
1205 | if (CQE_OPCODE(*hw_cqe) == T3_TERMINATE) { | |
1206 | ret = -1; | |
1207 | wq->error = 1; | |
1208 | goto skip_cqe; | |
1209 | } | |
1210 | ||
1211 | if (CQE_STATUS(*hw_cqe) || wq->error) { | |
1212 | *cqe_flushed = wq->error; | |
1213 | wq->error = 1; | |
1214 | ||
1215 | /* | |
1216 | * T3A inserts errors into the CQE. We cannot return | |
1217 | * these as work completions. | |
1218 | */ | |
1219 | /* incoming write failures */ | |
1220 | if ((CQE_OPCODE(*hw_cqe) == T3_RDMA_WRITE) | |
1221 | && RQ_TYPE(*hw_cqe)) { | |
1222 | ret = -1; | |
1223 | goto skip_cqe; | |
1224 | } | |
1225 | /* incoming read request failures */ | |
1226 | if ((CQE_OPCODE(*hw_cqe) == T3_READ_RESP) && SQ_TYPE(*hw_cqe)) { | |
1227 | ret = -1; | |
1228 | goto skip_cqe; | |
1229 | } | |
1230 | ||
1231 | /* incoming SEND with no receive posted failures */ | |
42fb61f0 | 1232 | if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) && |
b038ced7 SW |
1233 | Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { |
1234 | ret = -1; | |
1235 | goto skip_cqe; | |
1236 | } | |
42fb61f0 | 1237 | BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe)); |
b038ced7 SW |
1238 | goto proc_cqe; |
1239 | } | |
1240 | ||
1241 | /* | |
1242 | * RECV completion. | |
1243 | */ | |
1244 | if (RQ_TYPE(*hw_cqe)) { | |
1245 | ||
1246 | /* | |
1247 | * HW only validates 4 bits of MSN. So we must validate that | |
1248 | * the MSN in the SEND is the next expected MSN. If its not, | |
1249 | * then we complete this with TPT_ERR_MSN and mark the wq in | |
1250 | * error. | |
1251 | */ | |
42fb61f0 SW |
1252 | |
1253 | if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { | |
1254 | wq->error = 1; | |
1255 | ret = -1; | |
1256 | goto skip_cqe; | |
1257 | } | |
1258 | ||
b038ced7 SW |
1259 | if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) { |
1260 | wq->error = 1; | |
1261 | hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN)); | |
1262 | goto proc_cqe; | |
1263 | } | |
1264 | goto proc_cqe; | |
1265 | } | |
1266 | ||
1267 | /* | |
1268 | * If we get here its a send completion. | |
1269 | * | |
1270 | * Handle out of order completion. These get stuffed | |
1271 | * in the SW SQ. Then the SW SQ is walked to move any | |
1272 | * now in-order completions into the SW CQ. This handles | |
1273 | * 2 cases: | |
1274 | * 1) reaping unsignaled WRs when the first subsequent | |
1275 | * signaled WR is completed. | |
1276 | * 2) out of order read completions. | |
1277 | */ | |
1278 | if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) { | |
1279 | struct t3_swsq *sqp; | |
1280 | ||
1281 | PDBG("%s out of order completion going in swsq at idx %ld\n", | |
33718363 | 1282 | __func__, |
b038ced7 SW |
1283 | Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2)); |
1284 | sqp = wq->sq + | |
1285 | Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2); | |
1286 | sqp->cqe = *hw_cqe; | |
1287 | sqp->complete = 1; | |
1288 | ret = -1; | |
1289 | goto flush_wq; | |
1290 | } | |
1291 | ||
1292 | proc_cqe: | |
1293 | *cqe = *hw_cqe; | |
1294 | ||
1295 | /* | |
1296 | * Reap the associated WR(s) that are freed up with this | |
1297 | * completion. | |
1298 | */ | |
1299 | if (SQ_TYPE(*hw_cqe)) { | |
1300 | wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe); | |
33718363 | 1301 | PDBG("%s completing sq idx %ld\n", __func__, |
b038ced7 | 1302 | Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)); |
4ab928f6 | 1303 | *cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id; |
b038ced7 SW |
1304 | wq->sq_rptr++; |
1305 | } else { | |
33718363 | 1306 | PDBG("%s completing rq idx %ld\n", __func__, |
b038ced7 | 1307 | Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); |
4ab928f6 SW |
1308 | *cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id; |
1309 | if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr) | |
1310 | cxio_hal_pblpool_free(wq->rdev, | |
1311 | wq->rq[Q_PTR2IDX(wq->rq_rptr, | |
1312 | wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE); | |
42fb61f0 | 1313 | BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr)); |
b038ced7 SW |
1314 | wq->rq_rptr++; |
1315 | } | |
1316 | ||
1317 | flush_wq: | |
1318 | /* | |
1319 | * Flush any completed cqes that are now in-order. | |
1320 | */ | |
1321 | flush_completed_wrs(wq, cq); | |
1322 | ||
1323 | skip_cqe: | |
1324 | if (SW_CQE(*hw_cqe)) { | |
1325 | PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n", | |
33718363 | 1326 | __func__, cq, cq->cqid, cq->sw_rptr); |
b038ced7 SW |
1327 | ++cq->sw_rptr; |
1328 | } else { | |
1329 | PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n", | |
33718363 | 1330 | __func__, cq, cq->cqid, cq->rptr); |
b038ced7 SW |
1331 | ++cq->rptr; |
1332 | ||
1333 | /* | |
1334 | * T3A: compute credits. | |
1335 | */ | |
1336 | if (((cq->rptr - cq->wptr) > (1 << (cq->size_log2 - 1))) | |
1337 | || ((cq->rptr - cq->wptr) >= 128)) { | |
1338 | *credit = cq->rptr - cq->wptr; | |
1339 | cq->wptr = cq->rptr; | |
1340 | } | |
1341 | } | |
1342 | return ret; | |
1343 | } |