6743e9ddb088a1b03810ca1ecfbdcc4426759e53
[deliverable/linux.git] / drivers / infiniband / hw / cxgb3 / iwch_provider.c
1 /*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/sched.h>
41 #include <linux/spinlock.h>
42 #include <linux/ethtool.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/inetdevice.h>
45 #include <linux/slab.h>
46
47 #include <asm/io.h>
48 #include <asm/irq.h>
49 #include <asm/byteorder.h>
50
51 #include <rdma/iw_cm.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_smi.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_user_verbs.h>
56
57 #include "cxio_hal.h"
58 #include "iwch.h"
59 #include "iwch_provider.h"
60 #include "iwch_cm.h"
61 #include "iwch_user.h"
62 #include "common.h"
63
64 static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
65 struct ib_ah_attr *ah_attr)
66 {
67 return ERR_PTR(-ENOSYS);
68 }
69
70 static int iwch_ah_destroy(struct ib_ah *ah)
71 {
72 return -ENOSYS;
73 }
74
75 static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
76 {
77 return -ENOSYS;
78 }
79
80 static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
81 {
82 return -ENOSYS;
83 }
84
85 static int iwch_process_mad(struct ib_device *ibdev,
86 int mad_flags,
87 u8 port_num,
88 const struct ib_wc *in_wc,
89 const struct ib_grh *in_grh,
90 const struct ib_mad_hdr *in_mad,
91 size_t in_mad_size,
92 struct ib_mad_hdr *out_mad,
93 size_t *out_mad_size,
94 u16 *out_mad_pkey_index)
95 {
96 return -ENOSYS;
97 }
98
99 static int iwch_dealloc_ucontext(struct ib_ucontext *context)
100 {
101 struct iwch_dev *rhp = to_iwch_dev(context->device);
102 struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
103 struct iwch_mm_entry *mm, *tmp;
104
105 PDBG("%s context %p\n", __func__, context);
106 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
107 kfree(mm);
108 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
109 kfree(ucontext);
110 return 0;
111 }
112
113 static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
114 struct ib_udata *udata)
115 {
116 struct iwch_ucontext *context;
117 struct iwch_dev *rhp = to_iwch_dev(ibdev);
118
119 PDBG("%s ibdev %p\n", __func__, ibdev);
120 context = kzalloc(sizeof(*context), GFP_KERNEL);
121 if (!context)
122 return ERR_PTR(-ENOMEM);
123 cxio_init_ucontext(&rhp->rdev, &context->uctx);
124 INIT_LIST_HEAD(&context->mmaps);
125 spin_lock_init(&context->mmap_lock);
126 return &context->ibucontext;
127 }
128
129 static int iwch_destroy_cq(struct ib_cq *ib_cq)
130 {
131 struct iwch_cq *chp;
132
133 PDBG("%s ib_cq %p\n", __func__, ib_cq);
134 chp = to_iwch_cq(ib_cq);
135
136 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
137 atomic_dec(&chp->refcnt);
138 wait_event(chp->wait, !atomic_read(&chp->refcnt));
139
140 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
141 kfree(chp);
142 return 0;
143 }
144
145 static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
146 const struct ib_cq_init_attr *attr,
147 struct ib_ucontext *ib_context,
148 struct ib_udata *udata)
149 {
150 int entries = attr->cqe;
151 struct iwch_dev *rhp;
152 struct iwch_cq *chp;
153 struct iwch_create_cq_resp uresp;
154 struct iwch_create_cq_req ureq;
155 struct iwch_ucontext *ucontext = NULL;
156 static int warned;
157 size_t resplen;
158
159 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
160 if (attr->flags)
161 return ERR_PTR(-EINVAL);
162
163 rhp = to_iwch_dev(ibdev);
164 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
165 if (!chp)
166 return ERR_PTR(-ENOMEM);
167
168 if (ib_context) {
169 ucontext = to_iwch_ucontext(ib_context);
170 if (!t3a_device(rhp)) {
171 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
172 kfree(chp);
173 return ERR_PTR(-EFAULT);
174 }
175 chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
176 }
177 }
178
179 if (t3a_device(rhp)) {
180
181 /*
182 * T3A: Add some fluff to handle extra CQEs inserted
183 * for various errors.
184 * Additional CQE possibilities:
185 * TERMINATE,
186 * incoming RDMA WRITE Failures
187 * incoming RDMA READ REQUEST FAILUREs
188 * NOTE: We cannot ensure the CQ won't overflow.
189 */
190 entries += 16;
191 }
192 entries = roundup_pow_of_two(entries);
193 chp->cq.size_log2 = ilog2(entries);
194
195 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
196 kfree(chp);
197 return ERR_PTR(-ENOMEM);
198 }
199 chp->rhp = rhp;
200 chp->ibcq.cqe = 1 << chp->cq.size_log2;
201 spin_lock_init(&chp->lock);
202 spin_lock_init(&chp->comp_handler_lock);
203 atomic_set(&chp->refcnt, 1);
204 init_waitqueue_head(&chp->wait);
205 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
206 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
207 kfree(chp);
208 return ERR_PTR(-ENOMEM);
209 }
210
211 if (ucontext) {
212 struct iwch_mm_entry *mm;
213
214 mm = kmalloc(sizeof *mm, GFP_KERNEL);
215 if (!mm) {
216 iwch_destroy_cq(&chp->ibcq);
217 return ERR_PTR(-ENOMEM);
218 }
219 uresp.cqid = chp->cq.cqid;
220 uresp.size_log2 = chp->cq.size_log2;
221 spin_lock(&ucontext->mmap_lock);
222 uresp.key = ucontext->key;
223 ucontext->key += PAGE_SIZE;
224 spin_unlock(&ucontext->mmap_lock);
225 mm->key = uresp.key;
226 mm->addr = virt_to_phys(chp->cq.queue);
227 if (udata->outlen < sizeof uresp) {
228 if (!warned++)
229 printk(KERN_WARNING MOD "Warning - "
230 "downlevel libcxgb3 (non-fatal).\n");
231 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
232 sizeof(struct t3_cqe));
233 resplen = sizeof(struct iwch_create_cq_resp_v0);
234 } else {
235 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
236 sizeof(struct t3_cqe));
237 uresp.memsize = mm->len;
238 uresp.reserved = 0;
239 resplen = sizeof uresp;
240 }
241 if (ib_copy_to_udata(udata, &uresp, resplen)) {
242 kfree(mm);
243 iwch_destroy_cq(&chp->ibcq);
244 return ERR_PTR(-EFAULT);
245 }
246 insert_mmap(ucontext, mm);
247 }
248 PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
249 chp->cq.cqid, chp, (1 << chp->cq.size_log2),
250 (unsigned long long) chp->cq.dma_addr);
251 return &chp->ibcq;
252 }
253
254 static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
255 {
256 #ifdef notyet
257 struct iwch_cq *chp = to_iwch_cq(cq);
258 struct t3_cq oldcq, newcq;
259 int ret;
260
261 PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
262
263 /* We don't downsize... */
264 if (cqe <= cq->cqe)
265 return 0;
266
267 /* create new t3_cq with new size */
268 cqe = roundup_pow_of_two(cqe+1);
269 newcq.size_log2 = ilog2(cqe);
270
271 /* Dont allow resize to less than the current wce count */
272 if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
273 return -ENOMEM;
274 }
275
276 /* Quiesce all QPs using this CQ */
277 ret = iwch_quiesce_qps(chp);
278 if (ret) {
279 return ret;
280 }
281
282 ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
283 if (ret) {
284 return ret;
285 }
286
287 /* copy CQEs */
288 memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
289 sizeof(struct t3_cqe));
290
291 /* old iwch_qp gets new t3_cq but keeps old cqid */
292 oldcq = chp->cq;
293 chp->cq = newcq;
294 chp->cq.cqid = oldcq.cqid;
295
296 /* resize new t3_cq to update the HW context */
297 ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
298 if (ret) {
299 chp->cq = oldcq;
300 return ret;
301 }
302 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;
303
304 /* destroy old t3_cq */
305 oldcq.cqid = newcq.cqid;
306 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
307 if (ret) {
308 printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
309 __func__, ret);
310 }
311
312 /* add user hooks here */
313
314 /* resume qps */
315 ret = iwch_resume_qps(chp);
316 return ret;
317 #else
318 return -ENOSYS;
319 #endif
320 }
321
322 static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
323 {
324 struct iwch_dev *rhp;
325 struct iwch_cq *chp;
326 enum t3_cq_opcode cq_op;
327 int err;
328 unsigned long flag;
329 u32 rptr;
330
331 chp = to_iwch_cq(ibcq);
332 rhp = chp->rhp;
333 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
334 cq_op = CQ_ARM_SE;
335 else
336 cq_op = CQ_ARM_AN;
337 if (chp->user_rptr_addr) {
338 if (get_user(rptr, chp->user_rptr_addr))
339 return -EFAULT;
340 spin_lock_irqsave(&chp->lock, flag);
341 chp->cq.rptr = rptr;
342 } else
343 spin_lock_irqsave(&chp->lock, flag);
344 PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);
345 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
346 spin_unlock_irqrestore(&chp->lock, flag);
347 if (err < 0)
348 printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
349 chp->cq.cqid);
350 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
351 err = 0;
352 return err;
353 }
354
355 static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
356 {
357 int len = vma->vm_end - vma->vm_start;
358 u32 key = vma->vm_pgoff << PAGE_SHIFT;
359 struct cxio_rdev *rdev_p;
360 int ret = 0;
361 struct iwch_mm_entry *mm;
362 struct iwch_ucontext *ucontext;
363 u64 addr;
364
365 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
366 key, len);
367
368 if (vma->vm_start & (PAGE_SIZE-1)) {
369 return -EINVAL;
370 }
371
372 rdev_p = &(to_iwch_dev(context->device)->rdev);
373 ucontext = to_iwch_ucontext(context);
374
375 mm = remove_mmap(ucontext, key, len);
376 if (!mm)
377 return -EINVAL;
378 addr = mm->addr;
379 kfree(mm);
380
381 if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
382 (addr < (rdev_p->rnic_info.udbell_physbase +
383 rdev_p->rnic_info.udbell_len))) {
384
385 /*
386 * Map T3 DB register.
387 */
388 if (vma->vm_flags & VM_READ) {
389 return -EPERM;
390 }
391
392 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
393 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
394 vma->vm_flags &= ~VM_MAYREAD;
395 ret = io_remap_pfn_range(vma, vma->vm_start,
396 addr >> PAGE_SHIFT,
397 len, vma->vm_page_prot);
398 } else {
399
400 /*
401 * Map WQ or CQ contig dma memory...
402 */
403 ret = remap_pfn_range(vma, vma->vm_start,
404 addr >> PAGE_SHIFT,
405 len, vma->vm_page_prot);
406 }
407
408 return ret;
409 }
410
411 static int iwch_deallocate_pd(struct ib_pd *pd)
412 {
413 struct iwch_dev *rhp;
414 struct iwch_pd *php;
415
416 php = to_iwch_pd(pd);
417 rhp = php->rhp;
418 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
419 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
420 kfree(php);
421 return 0;
422 }
423
424 static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
425 struct ib_ucontext *context,
426 struct ib_udata *udata)
427 {
428 struct iwch_pd *php;
429 u32 pdid;
430 struct iwch_dev *rhp;
431
432 PDBG("%s ibdev %p\n", __func__, ibdev);
433 rhp = (struct iwch_dev *) ibdev;
434 pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
435 if (!pdid)
436 return ERR_PTR(-EINVAL);
437 php = kzalloc(sizeof(*php), GFP_KERNEL);
438 if (!php) {
439 cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
440 return ERR_PTR(-ENOMEM);
441 }
442 php->pdid = pdid;
443 php->rhp = rhp;
444 if (context) {
445 if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
446 iwch_deallocate_pd(&php->ibpd);
447 return ERR_PTR(-EFAULT);
448 }
449 }
450 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
451 return &php->ibpd;
452 }
453
454 static int iwch_dereg_mr(struct ib_mr *ib_mr)
455 {
456 struct iwch_dev *rhp;
457 struct iwch_mr *mhp;
458 u32 mmid;
459
460 PDBG("%s ib_mr %p\n", __func__, ib_mr);
461 /* There can be no memory windows */
462 if (atomic_read(&ib_mr->usecnt))
463 return -EINVAL;
464
465 mhp = to_iwch_mr(ib_mr);
466 kfree(mhp->pages);
467 rhp = mhp->rhp;
468 mmid = mhp->attr.stag >> 8;
469 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
470 mhp->attr.pbl_addr);
471 iwch_free_pbl(mhp);
472 remove_handle(rhp, &rhp->mmidr, mmid);
473 if (mhp->kva)
474 kfree((void *) (unsigned long) mhp->kva);
475 if (mhp->umem)
476 ib_umem_release(mhp->umem);
477 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
478 kfree(mhp);
479 return 0;
480 }
481
482 static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
483 {
484 const u64 total_size = 0xffffffff;
485 const u64 mask = (total_size + PAGE_SIZE - 1) & PAGE_MASK;
486 struct iwch_pd *php = to_iwch_pd(pd);
487 struct iwch_dev *rhp = php->rhp;
488 struct iwch_mr *mhp;
489 __be64 *page_list;
490 int shift = 26, npages, ret, i;
491
492 PDBG("%s ib_pd %p\n", __func__, pd);
493
494 /*
495 * T3 only supports 32 bits of size.
496 */
497 if (sizeof(phys_addr_t) > 4) {
498 pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
499 return ERR_PTR(-ENOTSUPP);
500 }
501
502 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
503 if (!mhp)
504 return ERR_PTR(-ENOMEM);
505
506 mhp->rhp = rhp;
507
508 npages = (total_size + (1ULL << shift) - 1) >> shift;
509 if (!npages) {
510 ret = -EINVAL;
511 goto err;
512 }
513
514 page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL);
515 if (!page_list) {
516 ret = -ENOMEM;
517 goto err;
518 }
519
520 for (i = 0; i < npages; i++)
521 page_list[i] = cpu_to_be64((u64)i << shift);
522
523 PDBG("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
524 __func__, mask, shift, total_size, npages);
525
526 ret = iwch_alloc_pbl(mhp, npages);
527 if (ret) {
528 kfree(page_list);
529 goto err_pbl;
530 }
531
532 ret = iwch_write_pbl(mhp, page_list, npages, 0);
533 kfree(page_list);
534 if (ret)
535 goto err_pbl;
536
537 mhp->attr.pdid = php->pdid;
538 mhp->attr.zbva = 0;
539
540 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
541 mhp->attr.va_fbo = 0;
542 mhp->attr.page_size = shift - 12;
543
544 mhp->attr.len = (u32) total_size;
545 mhp->attr.pbl_size = npages;
546 ret = iwch_register_mem(rhp, php, mhp, shift);
547 if (ret)
548 goto err_pbl;
549
550 return &mhp->ibmr;
551
552 err_pbl:
553 iwch_free_pbl(mhp);
554
555 err:
556 kfree(mhp);
557 return ERR_PTR(ret);
558 }
559
560 static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
561 u64 virt, int acc, struct ib_udata *udata)
562 {
563 __be64 *pages;
564 int shift, n, len;
565 int i, k, entry;
566 int err = 0;
567 struct iwch_dev *rhp;
568 struct iwch_pd *php;
569 struct iwch_mr *mhp;
570 struct iwch_reg_user_mr_resp uresp;
571 struct scatterlist *sg;
572 PDBG("%s ib_pd %p\n", __func__, pd);
573
574 php = to_iwch_pd(pd);
575 rhp = php->rhp;
576 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
577 if (!mhp)
578 return ERR_PTR(-ENOMEM);
579
580 mhp->rhp = rhp;
581
582 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
583 if (IS_ERR(mhp->umem)) {
584 err = PTR_ERR(mhp->umem);
585 kfree(mhp);
586 return ERR_PTR(err);
587 }
588
589 shift = ffs(mhp->umem->page_size) - 1;
590
591 n = mhp->umem->nmap;
592
593 err = iwch_alloc_pbl(mhp, n);
594 if (err)
595 goto err;
596
597 pages = (__be64 *) __get_free_page(GFP_KERNEL);
598 if (!pages) {
599 err = -ENOMEM;
600 goto err_pbl;
601 }
602
603 i = n = 0;
604
605 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
606 len = sg_dma_len(sg) >> shift;
607 for (k = 0; k < len; ++k) {
608 pages[i++] = cpu_to_be64(sg_dma_address(sg) +
609 mhp->umem->page_size * k);
610 if (i == PAGE_SIZE / sizeof *pages) {
611 err = iwch_write_pbl(mhp, pages, i, n);
612 if (err)
613 goto pbl_done;
614 n += i;
615 i = 0;
616 }
617 }
618 }
619
620 if (i)
621 err = iwch_write_pbl(mhp, pages, i, n);
622
623 pbl_done:
624 free_page((unsigned long) pages);
625 if (err)
626 goto err_pbl;
627
628 mhp->attr.pdid = php->pdid;
629 mhp->attr.zbva = 0;
630 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
631 mhp->attr.va_fbo = virt;
632 mhp->attr.page_size = shift - 12;
633 mhp->attr.len = (u32) length;
634
635 err = iwch_register_mem(rhp, php, mhp, shift);
636 if (err)
637 goto err_pbl;
638
639 if (udata && !t3a_device(rhp)) {
640 uresp.pbl_addr = (mhp->attr.pbl_addr -
641 rhp->rdev.rnic_info.pbl_base) >> 3;
642 PDBG("%s user resp pbl_addr 0x%x\n", __func__,
643 uresp.pbl_addr);
644
645 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
646 iwch_dereg_mr(&mhp->ibmr);
647 err = -EFAULT;
648 goto err;
649 }
650 }
651
652 return &mhp->ibmr;
653
654 err_pbl:
655 iwch_free_pbl(mhp);
656
657 err:
658 ib_umem_release(mhp->umem);
659 kfree(mhp);
660 return ERR_PTR(err);
661 }
662
663 static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
664 {
665 struct iwch_dev *rhp;
666 struct iwch_pd *php;
667 struct iwch_mw *mhp;
668 u32 mmid;
669 u32 stag = 0;
670 int ret;
671
672 if (type != IB_MW_TYPE_1)
673 return ERR_PTR(-EINVAL);
674
675 php = to_iwch_pd(pd);
676 rhp = php->rhp;
677 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
678 if (!mhp)
679 return ERR_PTR(-ENOMEM);
680 ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
681 if (ret) {
682 kfree(mhp);
683 return ERR_PTR(ret);
684 }
685 mhp->rhp = rhp;
686 mhp->attr.pdid = php->pdid;
687 mhp->attr.type = TPT_MW;
688 mhp->attr.stag = stag;
689 mmid = (stag) >> 8;
690 mhp->ibmw.rkey = stag;
691 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
692 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
693 kfree(mhp);
694 return ERR_PTR(-ENOMEM);
695 }
696 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
697 return &(mhp->ibmw);
698 }
699
700 static int iwch_dealloc_mw(struct ib_mw *mw)
701 {
702 struct iwch_dev *rhp;
703 struct iwch_mw *mhp;
704 u32 mmid;
705
706 mhp = to_iwch_mw(mw);
707 rhp = mhp->rhp;
708 mmid = (mw->rkey) >> 8;
709 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
710 remove_handle(rhp, &rhp->mmidr, mmid);
711 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
712 kfree(mhp);
713 return 0;
714 }
715
716 static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
717 enum ib_mr_type mr_type,
718 u32 max_num_sg)
719 {
720 struct iwch_dev *rhp;
721 struct iwch_pd *php;
722 struct iwch_mr *mhp;
723 u32 mmid;
724 u32 stag = 0;
725 int ret = 0;
726
727 if (mr_type != IB_MR_TYPE_MEM_REG ||
728 max_num_sg > T3_MAX_FASTREG_DEPTH)
729 return ERR_PTR(-EINVAL);
730
731 php = to_iwch_pd(pd);
732 rhp = php->rhp;
733 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
734 if (!mhp)
735 goto err;
736
737 mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
738 if (!mhp->pages) {
739 ret = -ENOMEM;
740 goto pl_err;
741 }
742
743 mhp->rhp = rhp;
744 ret = iwch_alloc_pbl(mhp, max_num_sg);
745 if (ret)
746 goto err1;
747 mhp->attr.pbl_size = max_num_sg;
748 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
749 mhp->attr.pbl_size, mhp->attr.pbl_addr);
750 if (ret)
751 goto err2;
752 mhp->attr.pdid = php->pdid;
753 mhp->attr.type = TPT_NON_SHARED_MR;
754 mhp->attr.stag = stag;
755 mhp->attr.state = 1;
756 mmid = (stag) >> 8;
757 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
758 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
759 goto err3;
760
761 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
762 return &(mhp->ibmr);
763 err3:
764 cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
765 mhp->attr.pbl_addr);
766 err2:
767 iwch_free_pbl(mhp);
768 err1:
769 kfree(mhp->pages);
770 pl_err:
771 kfree(mhp);
772 err:
773 return ERR_PTR(ret);
774 }
775
776 static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
777 {
778 struct iwch_mr *mhp = to_iwch_mr(ibmr);
779
780 if (unlikely(mhp->npages == mhp->attr.pbl_size))
781 return -ENOMEM;
782
783 mhp->pages[mhp->npages++] = addr;
784
785 return 0;
786 }
787
788 static int iwch_map_mr_sg(struct ib_mr *ibmr,
789 struct scatterlist *sg,
790 int sg_nents)
791 {
792 struct iwch_mr *mhp = to_iwch_mr(ibmr);
793
794 mhp->npages = 0;
795
796 return ib_sg_to_pages(ibmr, sg, sg_nents, iwch_set_page);
797 }
798
799 static int iwch_destroy_qp(struct ib_qp *ib_qp)
800 {
801 struct iwch_dev *rhp;
802 struct iwch_qp *qhp;
803 struct iwch_qp_attributes attrs;
804 struct iwch_ucontext *ucontext;
805
806 qhp = to_iwch_qp(ib_qp);
807 rhp = qhp->rhp;
808
809 attrs.next_state = IWCH_QP_STATE_ERROR;
810 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
811 wait_event(qhp->wait, !qhp->ep);
812
813 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
814
815 atomic_dec(&qhp->refcnt);
816 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
817
818 ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
819 : NULL;
820 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
821 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
822
823 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
824 ib_qp, qhp->wq.qpid, qhp);
825 kfree(qhp);
826 return 0;
827 }
828
829 static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
830 struct ib_qp_init_attr *attrs,
831 struct ib_udata *udata)
832 {
833 struct iwch_dev *rhp;
834 struct iwch_qp *qhp;
835 struct iwch_pd *php;
836 struct iwch_cq *schp;
837 struct iwch_cq *rchp;
838 struct iwch_create_qp_resp uresp;
839 int wqsize, sqsize, rqsize;
840 struct iwch_ucontext *ucontext;
841
842 PDBG("%s ib_pd %p\n", __func__, pd);
843 if (attrs->qp_type != IB_QPT_RC)
844 return ERR_PTR(-EINVAL);
845 php = to_iwch_pd(pd);
846 rhp = php->rhp;
847 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
848 rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
849 if (!schp || !rchp)
850 return ERR_PTR(-EINVAL);
851
852 /* The RQT size must be # of entries + 1 rounded up to a power of two */
853 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
854 if (rqsize == attrs->cap.max_recv_wr)
855 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
856
857 /* T3 doesn't support RQT depth < 16 */
858 if (rqsize < 16)
859 rqsize = 16;
860
861 if (rqsize > T3_MAX_RQ_SIZE)
862 return ERR_PTR(-EINVAL);
863
864 if (attrs->cap.max_inline_data > T3_MAX_INLINE)
865 return ERR_PTR(-EINVAL);
866
867 /*
868 * NOTE: The SQ and total WQ sizes don't need to be
869 * a power of two. However, all the code assumes
870 * they are. EG: Q_FREECNT() and friends.
871 */
872 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
873 wqsize = roundup_pow_of_two(rqsize + sqsize);
874
875 /*
876 * Kernel users need more wq space for fastreg WRs which can take
877 * 2 WR fragments.
878 */
879 ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
880 if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
881 wqsize = roundup_pow_of_two(rqsize +
882 roundup_pow_of_two(attrs->cap.max_send_wr * 2));
883 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
884 wqsize, sqsize, rqsize);
885 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
886 if (!qhp)
887 return ERR_PTR(-ENOMEM);
888 qhp->wq.size_log2 = ilog2(wqsize);
889 qhp->wq.rq_size_log2 = ilog2(rqsize);
890 qhp->wq.sq_size_log2 = ilog2(sqsize);
891 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
892 ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
893 kfree(qhp);
894 return ERR_PTR(-ENOMEM);
895 }
896
897 attrs->cap.max_recv_wr = rqsize - 1;
898 attrs->cap.max_send_wr = sqsize;
899 attrs->cap.max_inline_data = T3_MAX_INLINE;
900
901 qhp->rhp = rhp;
902 qhp->attr.pd = php->pdid;
903 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
904 qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
905 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
906 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
907 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
908 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
909 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
910 qhp->attr.state = IWCH_QP_STATE_IDLE;
911 qhp->attr.next_state = IWCH_QP_STATE_IDLE;
912
913 /*
914 * XXX - These don't get passed in from the openib user
915 * at create time. The CM sets them via a QP modify.
916 * Need to fix... I think the CM should
917 */
918 qhp->attr.enable_rdma_read = 1;
919 qhp->attr.enable_rdma_write = 1;
920 qhp->attr.enable_bind = 1;
921 qhp->attr.max_ord = 1;
922 qhp->attr.max_ird = 1;
923
924 spin_lock_init(&qhp->lock);
925 init_waitqueue_head(&qhp->wait);
926 atomic_set(&qhp->refcnt, 1);
927
928 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
929 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
930 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
931 kfree(qhp);
932 return ERR_PTR(-ENOMEM);
933 }
934
935 if (udata) {
936
937 struct iwch_mm_entry *mm1, *mm2;
938
939 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
940 if (!mm1) {
941 iwch_destroy_qp(&qhp->ibqp);
942 return ERR_PTR(-ENOMEM);
943 }
944
945 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
946 if (!mm2) {
947 kfree(mm1);
948 iwch_destroy_qp(&qhp->ibqp);
949 return ERR_PTR(-ENOMEM);
950 }
951
952 uresp.qpid = qhp->wq.qpid;
953 uresp.size_log2 = qhp->wq.size_log2;
954 uresp.sq_size_log2 = qhp->wq.sq_size_log2;
955 uresp.rq_size_log2 = qhp->wq.rq_size_log2;
956 spin_lock(&ucontext->mmap_lock);
957 uresp.key = ucontext->key;
958 ucontext->key += PAGE_SIZE;
959 uresp.db_key = ucontext->key;
960 ucontext->key += PAGE_SIZE;
961 spin_unlock(&ucontext->mmap_lock);
962 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
963 kfree(mm1);
964 kfree(mm2);
965 iwch_destroy_qp(&qhp->ibqp);
966 return ERR_PTR(-EFAULT);
967 }
968 mm1->key = uresp.key;
969 mm1->addr = virt_to_phys(qhp->wq.queue);
970 mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
971 insert_mmap(ucontext, mm1);
972 mm2->key = uresp.db_key;
973 mm2->addr = qhp->wq.udb & PAGE_MASK;
974 mm2->len = PAGE_SIZE;
975 insert_mmap(ucontext, mm2);
976 }
977 qhp->ibqp.qp_num = qhp->wq.qpid;
978 init_timer(&(qhp->timer));
979 PDBG("%s sq_num_entries %d, rq_num_entries %d "
980 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
981 __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
982 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
983 1 << qhp->wq.size_log2, qhp->wq.rq_addr);
984 return &qhp->ibqp;
985 }
986
987 static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
988 int attr_mask, struct ib_udata *udata)
989 {
990 struct iwch_dev *rhp;
991 struct iwch_qp *qhp;
992 enum iwch_qp_attr_mask mask = 0;
993 struct iwch_qp_attributes attrs;
994
995 PDBG("%s ib_qp %p\n", __func__, ibqp);
996
997 /* iwarp does not support the RTR state */
998 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
999 attr_mask &= ~IB_QP_STATE;
1000
1001 /* Make sure we still have something left to do */
1002 if (!attr_mask)
1003 return 0;
1004
1005 memset(&attrs, 0, sizeof attrs);
1006 qhp = to_iwch_qp(ibqp);
1007 rhp = qhp->rhp;
1008
1009 attrs.next_state = iwch_convert_state(attr->qp_state);
1010 attrs.enable_rdma_read = (attr->qp_access_flags &
1011 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1012 attrs.enable_rdma_write = (attr->qp_access_flags &
1013 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1014 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1015
1016
1017 mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
1018 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1019 (IWCH_QP_ATTR_ENABLE_RDMA_READ |
1020 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
1021 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1022
1023 return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
1024 }
1025
1026 void iwch_qp_add_ref(struct ib_qp *qp)
1027 {
1028 PDBG("%s ib_qp %p\n", __func__, qp);
1029 atomic_inc(&(to_iwch_qp(qp)->refcnt));
1030 }
1031
1032 void iwch_qp_rem_ref(struct ib_qp *qp)
1033 {
1034 PDBG("%s ib_qp %p\n", __func__, qp);
1035 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
1036 wake_up(&(to_iwch_qp(qp)->wait));
1037 }
1038
1039 static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
1040 {
1041 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1042 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
1043 }
1044
1045
1046 static int iwch_query_pkey(struct ib_device *ibdev,
1047 u8 port, u16 index, u16 * pkey)
1048 {
1049 PDBG("%s ibdev %p\n", __func__, ibdev);
1050 *pkey = 0;
1051 return 0;
1052 }
1053
1054 static int iwch_query_gid(struct ib_device *ibdev, u8 port,
1055 int index, union ib_gid *gid)
1056 {
1057 struct iwch_dev *dev;
1058
1059 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
1060 __func__, ibdev, port, index, gid);
1061 dev = to_iwch_dev(ibdev);
1062 BUG_ON(port == 0 || port > 2);
1063 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1064 memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
1065 return 0;
1066 }
1067
1068 static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
1069 {
1070 struct ethtool_drvinfo info;
1071 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1072 char *cp, *next;
1073 unsigned fw_maj, fw_min, fw_mic;
1074
1075 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1076
1077 next = info.fw_version + 1;
1078 cp = strsep(&next, ".");
1079 sscanf(cp, "%i", &fw_maj);
1080 cp = strsep(&next, ".");
1081 sscanf(cp, "%i", &fw_min);
1082 cp = strsep(&next, ".");
1083 sscanf(cp, "%i", &fw_mic);
1084
1085 return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
1086 (fw_mic & 0xffff);
1087 }
1088
1089 static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
1090 struct ib_udata *uhw)
1091 {
1092
1093 struct iwch_dev *dev;
1094
1095 PDBG("%s ibdev %p\n", __func__, ibdev);
1096
1097 if (uhw->inlen || uhw->outlen)
1098 return -EINVAL;
1099
1100 dev = to_iwch_dev(ibdev);
1101 memset(props, 0, sizeof *props);
1102 memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1103 props->hw_ver = dev->rdev.t3cdev_p->type;
1104 props->fw_ver = fw_vers_string_to_u64(dev);
1105 props->device_cap_flags = dev->device_cap_flags;
1106 props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
1107 props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
1108 props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
1109 props->max_mr_size = dev->attr.max_mr_size;
1110 props->max_qp = dev->attr.max_qps;
1111 props->max_qp_wr = dev->attr.max_wrs;
1112 props->max_sge = dev->attr.max_sge_per_wr;
1113 props->max_sge_rd = 1;
1114 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1115 props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1116 props->max_cq = dev->attr.max_cqs;
1117 props->max_cqe = dev->attr.max_cqes_per_cq;
1118 props->max_mr = dev->attr.max_mem_regs;
1119 props->max_pd = dev->attr.max_pds;
1120 props->local_ca_ack_delay = 0;
1121 props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
1122
1123 return 0;
1124 }
1125
1126 static int iwch_query_port(struct ib_device *ibdev,
1127 u8 port, struct ib_port_attr *props)
1128 {
1129 struct iwch_dev *dev;
1130 struct net_device *netdev;
1131 struct in_device *inetdev;
1132
1133 PDBG("%s ibdev %p\n", __func__, ibdev);
1134
1135 dev = to_iwch_dev(ibdev);
1136 netdev = dev->rdev.port_info.lldevs[port-1];
1137
1138 memset(props, 0, sizeof(struct ib_port_attr));
1139 props->max_mtu = IB_MTU_4096;
1140 if (netdev->mtu >= 4096)
1141 props->active_mtu = IB_MTU_4096;
1142 else if (netdev->mtu >= 2048)
1143 props->active_mtu = IB_MTU_2048;
1144 else if (netdev->mtu >= 1024)
1145 props->active_mtu = IB_MTU_1024;
1146 else if (netdev->mtu >= 512)
1147 props->active_mtu = IB_MTU_512;
1148 else
1149 props->active_mtu = IB_MTU_256;
1150
1151 if (!netif_carrier_ok(netdev))
1152 props->state = IB_PORT_DOWN;
1153 else {
1154 inetdev = in_dev_get(netdev);
1155 if (inetdev) {
1156 if (inetdev->ifa_list)
1157 props->state = IB_PORT_ACTIVE;
1158 else
1159 props->state = IB_PORT_INIT;
1160 in_dev_put(inetdev);
1161 } else
1162 props->state = IB_PORT_INIT;
1163 }
1164
1165 props->port_cap_flags =
1166 IB_PORT_CM_SUP |
1167 IB_PORT_SNMP_TUNNEL_SUP |
1168 IB_PORT_REINIT_SUP |
1169 IB_PORT_DEVICE_MGMT_SUP |
1170 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1171 props->gid_tbl_len = 1;
1172 props->pkey_tbl_len = 1;
1173 props->active_width = 2;
1174 props->active_speed = IB_SPEED_DDR;
1175 props->max_msg_sz = -1;
1176
1177 return 0;
1178 }
1179
1180 static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
1181 char *buf)
1182 {
1183 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1184 ibdev.dev);
1185 PDBG("%s dev 0x%p\n", __func__, dev);
1186 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
1187 }
1188
1189 static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf)
1190 {
1191 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1192 ibdev.dev);
1193 struct ethtool_drvinfo info;
1194 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1195
1196 PDBG("%s dev 0x%p\n", __func__, dev);
1197 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1198 return sprintf(buf, "%s\n", info.fw_version);
1199 }
1200
1201 static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
1202 char *buf)
1203 {
1204 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1205 ibdev.dev);
1206 struct ethtool_drvinfo info;
1207 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1208
1209 PDBG("%s dev 0x%p\n", __func__, dev);
1210 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1211 return sprintf(buf, "%s\n", info.driver);
1212 }
1213
1214 static ssize_t show_board(struct device *dev, struct device_attribute *attr,
1215 char *buf)
1216 {
1217 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1218 ibdev.dev);
1219 PDBG("%s dev 0x%p\n", __func__, dev);
1220 return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
1221 iwch_dev->rdev.rnic_info.pdev->device);
1222 }
1223
1224 static int iwch_get_mib(struct ib_device *ibdev,
1225 union rdma_protocol_stats *stats)
1226 {
1227 struct iwch_dev *dev;
1228 struct tp_mib_stats m;
1229 int ret;
1230
1231 PDBG("%s ibdev %p\n", __func__, ibdev);
1232 dev = to_iwch_dev(ibdev);
1233 ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
1234 if (ret)
1235 return -ENOSYS;
1236
1237 memset(stats, 0, sizeof *stats);
1238 stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) +
1239 m.ipInReceive_lo;
1240 stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) +
1241 m.ipInHdrErrors_lo;
1242 stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) +
1243 m.ipInAddrErrors_lo;
1244 stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) +
1245 m.ipInUnknownProtos_lo;
1246 stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) +
1247 m.ipInDiscards_lo;
1248 stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) +
1249 m.ipInDelivers_lo;
1250 stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) +
1251 m.ipOutRequests_lo;
1252 stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) +
1253 m.ipOutDiscards_lo;
1254 stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) +
1255 m.ipOutNoRoutes_lo;
1256 stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout;
1257 stats->iw.ipReasmReqds = (u64) m.ipReasmReqds;
1258 stats->iw.ipReasmOKs = (u64) m.ipReasmOKs;
1259 stats->iw.ipReasmFails = (u64) m.ipReasmFails;
1260 stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens;
1261 stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens;
1262 stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails;
1263 stats->iw.tcpEstabResets = (u64) m.tcpEstabResets;
1264 stats->iw.tcpOutRsts = (u64) m.tcpOutRsts;
1265 stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab;
1266 stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) +
1267 m.tcpInSegs_lo;
1268 stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) +
1269 m.tcpOutSegs_lo;
1270 stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) +
1271 m.tcpRetransSeg_lo;
1272 stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) +
1273 m.tcpInErrs_lo;
1274 stats->iw.tcpRtoMin = (u64) m.tcpRtoMin;
1275 stats->iw.tcpRtoMax = (u64) m.tcpRtoMax;
1276 return 0;
1277 }
1278
1279 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1280 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1281 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1282 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1283
1284 static struct device_attribute *iwch_class_attributes[] = {
1285 &dev_attr_hw_rev,
1286 &dev_attr_fw_ver,
1287 &dev_attr_hca_type,
1288 &dev_attr_board_id,
1289 };
1290
1291 static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
1292 struct ib_port_immutable *immutable)
1293 {
1294 struct ib_port_attr attr;
1295 int err;
1296
1297 err = iwch_query_port(ibdev, port_num, &attr);
1298 if (err)
1299 return err;
1300
1301 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1302 immutable->gid_tbl_len = attr.gid_tbl_len;
1303 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
1304
1305 return 0;
1306 }
1307
1308 int iwch_register_device(struct iwch_dev *dev)
1309 {
1310 int ret;
1311 int i;
1312
1313 PDBG("%s iwch_dev %p\n", __func__, dev);
1314 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1315 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1316 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1317 dev->ibdev.owner = THIS_MODULE;
1318 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
1319 IB_DEVICE_MEM_WINDOW |
1320 IB_DEVICE_MEM_MGT_EXTENSIONS;
1321
1322 /* cxgb3 supports STag 0. */
1323 dev->ibdev.local_dma_lkey = 0;
1324
1325 dev->ibdev.uverbs_cmd_mask =
1326 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1327 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1328 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1329 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1330 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1331 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1332 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1333 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1334 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1335 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1336 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1337 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1338 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1339 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1340 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1341 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1342 (1ull << IB_USER_VERBS_CMD_POST_RECV);
1343 dev->ibdev.node_type = RDMA_NODE_RNIC;
1344 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1345 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
1346 dev->ibdev.num_comp_vectors = 1;
1347 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
1348 dev->ibdev.query_device = iwch_query_device;
1349 dev->ibdev.query_port = iwch_query_port;
1350 dev->ibdev.query_pkey = iwch_query_pkey;
1351 dev->ibdev.query_gid = iwch_query_gid;
1352 dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1353 dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1354 dev->ibdev.mmap = iwch_mmap;
1355 dev->ibdev.alloc_pd = iwch_allocate_pd;
1356 dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1357 dev->ibdev.create_ah = iwch_ah_create;
1358 dev->ibdev.destroy_ah = iwch_ah_destroy;
1359 dev->ibdev.create_qp = iwch_create_qp;
1360 dev->ibdev.modify_qp = iwch_ib_modify_qp;
1361 dev->ibdev.destroy_qp = iwch_destroy_qp;
1362 dev->ibdev.create_cq = iwch_create_cq;
1363 dev->ibdev.destroy_cq = iwch_destroy_cq;
1364 dev->ibdev.resize_cq = iwch_resize_cq;
1365 dev->ibdev.poll_cq = iwch_poll_cq;
1366 dev->ibdev.get_dma_mr = iwch_get_dma_mr;
1367 dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1368 dev->ibdev.dereg_mr = iwch_dereg_mr;
1369 dev->ibdev.alloc_mw = iwch_alloc_mw;
1370 dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1371 dev->ibdev.alloc_mr = iwch_alloc_mr;
1372 dev->ibdev.map_mr_sg = iwch_map_mr_sg;
1373 dev->ibdev.attach_mcast = iwch_multicast_attach;
1374 dev->ibdev.detach_mcast = iwch_multicast_detach;
1375 dev->ibdev.process_mad = iwch_process_mad;
1376 dev->ibdev.req_notify_cq = iwch_arm_cq;
1377 dev->ibdev.post_send = iwch_post_send;
1378 dev->ibdev.post_recv = iwch_post_receive;
1379 dev->ibdev.get_protocol_stats = iwch_get_mib;
1380 dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1381 dev->ibdev.get_port_immutable = iwch_port_immutable;
1382
1383 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
1384 if (!dev->ibdev.iwcm)
1385 return -ENOMEM;
1386
1387 dev->ibdev.iwcm->connect = iwch_connect;
1388 dev->ibdev.iwcm->accept = iwch_accept_cr;
1389 dev->ibdev.iwcm->reject = iwch_reject_cr;
1390 dev->ibdev.iwcm->create_listen = iwch_create_listen;
1391 dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1392 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1393 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1394 dev->ibdev.iwcm->get_qp = iwch_get_qp;
1395
1396 ret = ib_register_device(&dev->ibdev, NULL);
1397 if (ret)
1398 goto bail1;
1399
1400 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
1401 ret = device_create_file(&dev->ibdev.dev,
1402 iwch_class_attributes[i]);
1403 if (ret) {
1404 goto bail2;
1405 }
1406 }
1407 return 0;
1408 bail2:
1409 ib_unregister_device(&dev->ibdev);
1410 bail1:
1411 kfree(dev->ibdev.iwcm);
1412 return ret;
1413 }
1414
1415 void iwch_unregister_device(struct iwch_dev *dev)
1416 {
1417 int i;
1418
1419 PDBG("%s iwch_dev %p\n", __func__, dev);
1420 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
1421 device_remove_file(&dev->ibdev.dev,
1422 iwch_class_attributes[i]);
1423 ib_unregister_device(&dev->ibdev);
1424 kfree(dev->ibdev.iwcm);
1425 return;
1426 }
This page took 0.057425 seconds and 4 git commands to generate.