Commit | Line | Data |
---|---|---|
b038ced7 SW |
1 | /* |
2 | * Copyright (c) 2006 Chelsio, Inc. All rights reserved. | |
b038ced7 SW |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include <linux/module.h> | |
33 | #include <linux/moduleparam.h> | |
34 | #include <linux/device.h> | |
35 | #include <linux/netdevice.h> | |
36 | #include <linux/etherdevice.h> | |
37 | #include <linux/delay.h> | |
38 | #include <linux/errno.h> | |
39 | #include <linux/list.h> | |
40 | #include <linux/spinlock.h> | |
41 | #include <linux/ethtool.h> | |
42 | ||
43 | #include <asm/io.h> | |
44 | #include <asm/irq.h> | |
45 | #include <asm/byteorder.h> | |
46 | ||
47 | #include <rdma/iw_cm.h> | |
48 | #include <rdma/ib_verbs.h> | |
49 | #include <rdma/ib_smi.h> | |
50 | #include <rdma/ib_user_verbs.h> | |
51 | ||
52 | #include "cxio_hal.h" | |
53 | #include "iwch.h" | |
54 | #include "iwch_provider.h" | |
55 | #include "iwch_cm.h" | |
56 | #include "iwch_user.h" | |
57 | ||
58 | static int iwch_modify_port(struct ib_device *ibdev, | |
59 | u8 port, int port_modify_mask, | |
60 | struct ib_port_modify *props) | |
61 | { | |
62 | return -ENOSYS; | |
63 | } | |
64 | ||
65 | static struct ib_ah *iwch_ah_create(struct ib_pd *pd, | |
66 | struct ib_ah_attr *ah_attr) | |
67 | { | |
68 | return ERR_PTR(-ENOSYS); | |
69 | } | |
70 | ||
71 | static int iwch_ah_destroy(struct ib_ah *ah) | |
72 | { | |
73 | return -ENOSYS; | |
74 | } | |
75 | ||
76 | static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |
77 | { | |
78 | return -ENOSYS; | |
79 | } | |
80 | ||
81 | static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |
82 | { | |
83 | return -ENOSYS; | |
84 | } | |
85 | ||
86 | static int iwch_process_mad(struct ib_device *ibdev, | |
87 | int mad_flags, | |
88 | u8 port_num, | |
89 | struct ib_wc *in_wc, | |
90 | struct ib_grh *in_grh, | |
91 | struct ib_mad *in_mad, struct ib_mad *out_mad) | |
92 | { | |
93 | return -ENOSYS; | |
94 | } | |
95 | ||
96 | static int iwch_dealloc_ucontext(struct ib_ucontext *context) | |
97 | { | |
98 | struct iwch_dev *rhp = to_iwch_dev(context->device); | |
99 | struct iwch_ucontext *ucontext = to_iwch_ucontext(context); | |
100 | struct iwch_mm_entry *mm, *tmp; | |
101 | ||
102 | PDBG("%s context %p\n", __FUNCTION__, context); | |
103 | list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) | |
104 | kfree(mm); | |
105 | cxio_release_ucontext(&rhp->rdev, &ucontext->uctx); | |
106 | kfree(ucontext); | |
107 | return 0; | |
108 | } | |
109 | ||
110 | static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev, | |
111 | struct ib_udata *udata) | |
112 | { | |
113 | struct iwch_ucontext *context; | |
114 | struct iwch_dev *rhp = to_iwch_dev(ibdev); | |
115 | ||
116 | PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); | |
117 | context = kzalloc(sizeof(*context), GFP_KERNEL); | |
118 | if (!context) | |
119 | return ERR_PTR(-ENOMEM); | |
120 | cxio_init_ucontext(&rhp->rdev, &context->uctx); | |
121 | INIT_LIST_HEAD(&context->mmaps); | |
122 | spin_lock_init(&context->mmap_lock); | |
123 | return &context->ibucontext; | |
124 | } | |
125 | ||
126 | static int iwch_destroy_cq(struct ib_cq *ib_cq) | |
127 | { | |
128 | struct iwch_cq *chp; | |
129 | ||
130 | PDBG("%s ib_cq %p\n", __FUNCTION__, ib_cq); | |
131 | chp = to_iwch_cq(ib_cq); | |
132 | ||
133 | remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); | |
134 | atomic_dec(&chp->refcnt); | |
135 | wait_event(chp->wait, !atomic_read(&chp->refcnt)); | |
136 | ||
137 | cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); | |
138 | kfree(chp); | |
139 | return 0; | |
140 | } | |
141 | ||
142 | static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, | |
143 | struct ib_ucontext *ib_context, | |
144 | struct ib_udata *udata) | |
145 | { | |
146 | struct iwch_dev *rhp; | |
147 | struct iwch_cq *chp; | |
148 | struct iwch_create_cq_resp uresp; | |
149 | struct iwch_create_cq_req ureq; | |
150 | struct iwch_ucontext *ucontext = NULL; | |
151 | ||
152 | PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries); | |
153 | rhp = to_iwch_dev(ibdev); | |
154 | chp = kzalloc(sizeof(*chp), GFP_KERNEL); | |
155 | if (!chp) | |
156 | return ERR_PTR(-ENOMEM); | |
157 | ||
158 | if (ib_context) { | |
159 | ucontext = to_iwch_ucontext(ib_context); | |
160 | if (!t3a_device(rhp)) { | |
161 | if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { | |
162 | kfree(chp); | |
163 | return ERR_PTR(-EFAULT); | |
164 | } | |
165 | chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr; | |
166 | } | |
167 | } | |
168 | ||
169 | if (t3a_device(rhp)) { | |
170 | ||
171 | /* | |
172 | * T3A: Add some fluff to handle extra CQEs inserted | |
173 | * for various errors. | |
174 | * Additional CQE possibilities: | |
175 | * TERMINATE, | |
176 | * incoming RDMA WRITE Failures | |
177 | * incoming RDMA READ REQUEST FAILUREs | |
178 | * NOTE: We cannot ensure the CQ won't overflow. | |
179 | */ | |
180 | entries += 16; | |
181 | } | |
182 | entries = roundup_pow_of_two(entries); | |
183 | chp->cq.size_log2 = ilog2(entries); | |
184 | ||
185 | if (cxio_create_cq(&rhp->rdev, &chp->cq)) { | |
186 | kfree(chp); | |
187 | return ERR_PTR(-ENOMEM); | |
188 | } | |
189 | chp->rhp = rhp; | |
190 | chp->ibcq.cqe = (1 << chp->cq.size_log2) - 1; | |
191 | spin_lock_init(&chp->lock); | |
192 | atomic_set(&chp->refcnt, 1); | |
193 | init_waitqueue_head(&chp->wait); | |
194 | insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); | |
195 | ||
196 | if (ucontext) { | |
197 | struct iwch_mm_entry *mm; | |
198 | ||
199 | mm = kmalloc(sizeof *mm, GFP_KERNEL); | |
200 | if (!mm) { | |
201 | iwch_destroy_cq(&chp->ibcq); | |
202 | return ERR_PTR(-ENOMEM); | |
203 | } | |
204 | uresp.cqid = chp->cq.cqid; | |
205 | uresp.size_log2 = chp->cq.size_log2; | |
206 | spin_lock(&ucontext->mmap_lock); | |
207 | uresp.key = ucontext->key; | |
208 | ucontext->key += PAGE_SIZE; | |
209 | spin_unlock(&ucontext->mmap_lock); | |
210 | if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { | |
211 | kfree(mm); | |
212 | iwch_destroy_cq(&chp->ibcq); | |
213 | return ERR_PTR(-EFAULT); | |
214 | } | |
215 | mm->key = uresp.key; | |
216 | mm->addr = virt_to_phys(chp->cq.queue); | |
217 | mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * | |
218 | sizeof (struct t3_cqe)); | |
219 | insert_mmap(ucontext, mm); | |
220 | } | |
221 | PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", | |
222 | chp->cq.cqid, chp, (1 << chp->cq.size_log2), | |
223 | (unsigned long long) chp->cq.dma_addr); | |
224 | return &chp->ibcq; | |
225 | } | |
226 | ||
227 | static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) | |
228 | { | |
229 | #ifdef notyet | |
230 | struct iwch_cq *chp = to_iwch_cq(cq); | |
231 | struct t3_cq oldcq, newcq; | |
232 | int ret; | |
233 | ||
234 | PDBG("%s ib_cq %p cqe %d\n", __FUNCTION__, cq, cqe); | |
235 | ||
236 | /* We don't downsize... */ | |
237 | if (cqe <= cq->cqe) | |
238 | return 0; | |
239 | ||
240 | /* create new t3_cq with new size */ | |
241 | cqe = roundup_pow_of_two(cqe+1); | |
242 | newcq.size_log2 = ilog2(cqe); | |
243 | ||
244 | /* Dont allow resize to less than the current wce count */ | |
245 | if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) { | |
246 | return -ENOMEM; | |
247 | } | |
248 | ||
249 | /* Quiesce all QPs using this CQ */ | |
250 | ret = iwch_quiesce_qps(chp); | |
251 | if (ret) { | |
252 | return ret; | |
253 | } | |
254 | ||
255 | ret = cxio_create_cq(&chp->rhp->rdev, &newcq); | |
256 | if (ret) { | |
257 | return ret; | |
258 | } | |
259 | ||
260 | /* copy CQEs */ | |
261 | memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) * | |
262 | sizeof(struct t3_cqe)); | |
263 | ||
264 | /* old iwch_qp gets new t3_cq but keeps old cqid */ | |
265 | oldcq = chp->cq; | |
266 | chp->cq = newcq; | |
267 | chp->cq.cqid = oldcq.cqid; | |
268 | ||
269 | /* resize new t3_cq to update the HW context */ | |
270 | ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq); | |
271 | if (ret) { | |
272 | chp->cq = oldcq; | |
273 | return ret; | |
274 | } | |
275 | chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1; | |
276 | ||
277 | /* destroy old t3_cq */ | |
278 | oldcq.cqid = newcq.cqid; | |
279 | ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq); | |
280 | if (ret) { | |
281 | printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n", | |
282 | __FUNCTION__, ret); | |
283 | } | |
284 | ||
285 | /* add user hooks here */ | |
286 | ||
287 | /* resume qps */ | |
288 | ret = iwch_resume_qps(chp); | |
289 | return ret; | |
290 | #else | |
291 | return -ENOSYS; | |
292 | #endif | |
293 | } | |
294 | ||
295 | static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) | |
296 | { | |
297 | struct iwch_dev *rhp; | |
298 | struct iwch_cq *chp; | |
299 | enum t3_cq_opcode cq_op; | |
300 | int err; | |
301 | unsigned long flag; | |
302 | u32 rptr; | |
303 | ||
304 | chp = to_iwch_cq(ibcq); | |
305 | rhp = chp->rhp; | |
306 | if (notify == IB_CQ_SOLICITED) | |
307 | cq_op = CQ_ARM_SE; | |
308 | else | |
309 | cq_op = CQ_ARM_AN; | |
310 | if (chp->user_rptr_addr) { | |
311 | if (get_user(rptr, chp->user_rptr_addr)) | |
312 | return -EFAULT; | |
313 | spin_lock_irqsave(&chp->lock, flag); | |
314 | chp->cq.rptr = rptr; | |
315 | } else | |
316 | spin_lock_irqsave(&chp->lock, flag); | |
317 | PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr); | |
318 | err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); | |
319 | spin_unlock_irqrestore(&chp->lock, flag); | |
320 | if (err) | |
321 | printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err, | |
322 | chp->cq.cqid); | |
323 | return err; | |
324 | } | |
325 | ||
326 | static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |
327 | { | |
328 | int len = vma->vm_end - vma->vm_start; | |
329 | u32 key = vma->vm_pgoff << PAGE_SHIFT; | |
330 | struct cxio_rdev *rdev_p; | |
331 | int ret = 0; | |
332 | struct iwch_mm_entry *mm; | |
333 | struct iwch_ucontext *ucontext; | |
334 | ||
335 | PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff, | |
336 | key, len); | |
337 | ||
338 | if (vma->vm_start & (PAGE_SIZE-1)) { | |
339 | return -EINVAL; | |
340 | } | |
341 | ||
342 | rdev_p = &(to_iwch_dev(context->device)->rdev); | |
343 | ucontext = to_iwch_ucontext(context); | |
344 | ||
345 | mm = remove_mmap(ucontext, key, len); | |
346 | if (!mm) | |
347 | return -EINVAL; | |
348 | kfree(mm); | |
349 | ||
350 | if ((mm->addr >= rdev_p->rnic_info.udbell_physbase) && | |
351 | (mm->addr < (rdev_p->rnic_info.udbell_physbase + | |
352 | rdev_p->rnic_info.udbell_len))) { | |
353 | ||
354 | /* | |
355 | * Map T3 DB register. | |
356 | */ | |
357 | if (vma->vm_flags & VM_READ) { | |
358 | return -EPERM; | |
359 | } | |
360 | ||
361 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
362 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | |
363 | vma->vm_flags &= ~VM_MAYREAD; | |
364 | ret = io_remap_pfn_range(vma, vma->vm_start, | |
365 | mm->addr >> PAGE_SHIFT, | |
366 | len, vma->vm_page_prot); | |
367 | } else { | |
368 | ||
369 | /* | |
370 | * Map WQ or CQ contig dma memory... | |
371 | */ | |
372 | ret = remap_pfn_range(vma, vma->vm_start, | |
373 | mm->addr >> PAGE_SHIFT, | |
374 | len, vma->vm_page_prot); | |
375 | } | |
376 | ||
377 | return ret; | |
378 | } | |
379 | ||
380 | static int iwch_deallocate_pd(struct ib_pd *pd) | |
381 | { | |
382 | struct iwch_dev *rhp; | |
383 | struct iwch_pd *php; | |
384 | ||
385 | php = to_iwch_pd(pd); | |
386 | rhp = php->rhp; | |
387 | PDBG("%s ibpd %p pdid 0x%x\n", __FUNCTION__, pd, php->pdid); | |
388 | cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); | |
389 | kfree(php); | |
390 | return 0; | |
391 | } | |
392 | ||
393 | static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev, | |
394 | struct ib_ucontext *context, | |
395 | struct ib_udata *udata) | |
396 | { | |
397 | struct iwch_pd *php; | |
398 | u32 pdid; | |
399 | struct iwch_dev *rhp; | |
400 | ||
401 | PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); | |
402 | rhp = (struct iwch_dev *) ibdev; | |
403 | pdid = cxio_hal_get_pdid(rhp->rdev.rscp); | |
404 | if (!pdid) | |
405 | return ERR_PTR(-EINVAL); | |
406 | php = kzalloc(sizeof(*php), GFP_KERNEL); | |
407 | if (!php) { | |
408 | cxio_hal_put_pdid(rhp->rdev.rscp, pdid); | |
409 | return ERR_PTR(-ENOMEM); | |
410 | } | |
411 | php->pdid = pdid; | |
412 | php->rhp = rhp; | |
413 | if (context) { | |
414 | if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) { | |
415 | iwch_deallocate_pd(&php->ibpd); | |
416 | return ERR_PTR(-EFAULT); | |
417 | } | |
418 | } | |
419 | PDBG("%s pdid 0x%0x ptr 0x%p\n", __FUNCTION__, pdid, php); | |
420 | return &php->ibpd; | |
421 | } | |
422 | ||
423 | static int iwch_dereg_mr(struct ib_mr *ib_mr) | |
424 | { | |
425 | struct iwch_dev *rhp; | |
426 | struct iwch_mr *mhp; | |
427 | u32 mmid; | |
428 | ||
429 | PDBG("%s ib_mr %p\n", __FUNCTION__, ib_mr); | |
430 | /* There can be no memory windows */ | |
431 | if (atomic_read(&ib_mr->usecnt)) | |
432 | return -EINVAL; | |
433 | ||
434 | mhp = to_iwch_mr(ib_mr); | |
435 | rhp = mhp->rhp; | |
436 | mmid = mhp->attr.stag >> 8; | |
437 | cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, | |
438 | mhp->attr.pbl_addr); | |
439 | remove_handle(rhp, &rhp->mmidr, mmid); | |
440 | if (mhp->kva) | |
441 | kfree((void *) (unsigned long) mhp->kva); | |
442 | PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp); | |
443 | kfree(mhp); | |
444 | return 0; | |
445 | } | |
446 | ||
447 | static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd, | |
448 | struct ib_phys_buf *buffer_list, | |
449 | int num_phys_buf, | |
450 | int acc, | |
451 | u64 *iova_start) | |
452 | { | |
453 | __be64 *page_list; | |
454 | int shift; | |
455 | u64 total_size; | |
456 | int npages; | |
457 | struct iwch_dev *rhp; | |
458 | struct iwch_pd *php; | |
459 | struct iwch_mr *mhp; | |
460 | int ret; | |
461 | ||
462 | PDBG("%s ib_pd %p\n", __FUNCTION__, pd); | |
463 | php = to_iwch_pd(pd); | |
464 | rhp = php->rhp; | |
465 | ||
466 | acc = iwch_convert_access(acc); | |
467 | ||
468 | ||
469 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); | |
470 | if (!mhp) | |
471 | return ERR_PTR(-ENOMEM); | |
472 | ||
473 | /* First check that we have enough alignment */ | |
474 | if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { | |
475 | ret = -EINVAL; | |
476 | goto err; | |
477 | } | |
478 | ||
479 | if (num_phys_buf > 1 && | |
480 | ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) { | |
481 | ret = -EINVAL; | |
482 | goto err; | |
483 | } | |
484 | ||
485 | ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, | |
486 | &total_size, &npages, &shift, &page_list); | |
487 | if (ret) | |
488 | goto err; | |
489 | ||
490 | mhp->rhp = rhp; | |
491 | mhp->attr.pdid = php->pdid; | |
492 | mhp->attr.zbva = 0; | |
493 | ||
494 | /* NOTE: TPT perms are backwards from BIND WR perms! */ | |
495 | mhp->attr.perms = (acc & 0x1) << 3; | |
496 | mhp->attr.perms |= (acc & 0x2) << 1; | |
497 | mhp->attr.perms |= (acc & 0x4) >> 1; | |
498 | mhp->attr.perms |= (acc & 0x8) >> 3; | |
499 | ||
500 | mhp->attr.va_fbo = *iova_start; | |
501 | mhp->attr.page_size = shift - 12; | |
502 | ||
503 | mhp->attr.len = (u32) total_size; | |
504 | mhp->attr.pbl_size = npages; | |
505 | ret = iwch_register_mem(rhp, php, mhp, shift, page_list); | |
506 | kfree(page_list); | |
507 | if (ret) { | |
508 | goto err; | |
509 | } | |
510 | return &mhp->ibmr; | |
511 | err: | |
512 | kfree(mhp); | |
513 | return ERR_PTR(ret); | |
514 | ||
515 | } | |
516 | ||
517 | static int iwch_reregister_phys_mem(struct ib_mr *mr, | |
518 | int mr_rereg_mask, | |
519 | struct ib_pd *pd, | |
520 | struct ib_phys_buf *buffer_list, | |
521 | int num_phys_buf, | |
522 | int acc, u64 * iova_start) | |
523 | { | |
524 | ||
525 | struct iwch_mr mh, *mhp; | |
526 | struct iwch_pd *php; | |
527 | struct iwch_dev *rhp; | |
528 | int new_acc; | |
529 | __be64 *page_list = NULL; | |
530 | int shift = 0; | |
531 | u64 total_size; | |
532 | int npages; | |
533 | int ret; | |
534 | ||
535 | PDBG("%s ib_mr %p ib_pd %p\n", __FUNCTION__, mr, pd); | |
536 | ||
537 | /* There can be no memory windows */ | |
538 | if (atomic_read(&mr->usecnt)) | |
539 | return -EINVAL; | |
540 | ||
541 | mhp = to_iwch_mr(mr); | |
542 | rhp = mhp->rhp; | |
543 | php = to_iwch_pd(mr->pd); | |
544 | ||
545 | /* make sure we are on the same adapter */ | |
546 | if (rhp != php->rhp) | |
547 | return -EINVAL; | |
548 | ||
549 | new_acc = mhp->attr.perms; | |
550 | ||
551 | memcpy(&mh, mhp, sizeof *mhp); | |
552 | ||
553 | if (mr_rereg_mask & IB_MR_REREG_PD) | |
554 | php = to_iwch_pd(pd); | |
555 | if (mr_rereg_mask & IB_MR_REREG_ACCESS) | |
556 | mh.attr.perms = iwch_convert_access(acc); | |
557 | if (mr_rereg_mask & IB_MR_REREG_TRANS) | |
558 | ret = build_phys_page_list(buffer_list, num_phys_buf, | |
559 | iova_start, | |
560 | &total_size, &npages, | |
561 | &shift, &page_list); | |
562 | ||
563 | ret = iwch_reregister_mem(rhp, php, &mh, shift, page_list, npages); | |
564 | kfree(page_list); | |
565 | if (ret) { | |
566 | return ret; | |
567 | } | |
568 | if (mr_rereg_mask & IB_MR_REREG_PD) | |
569 | mhp->attr.pdid = php->pdid; | |
570 | if (mr_rereg_mask & IB_MR_REREG_ACCESS) | |
571 | mhp->attr.perms = acc; | |
572 | if (mr_rereg_mask & IB_MR_REREG_TRANS) { | |
573 | mhp->attr.zbva = 0; | |
574 | mhp->attr.va_fbo = *iova_start; | |
575 | mhp->attr.page_size = shift - 12; | |
576 | mhp->attr.len = (u32) total_size; | |
577 | mhp->attr.pbl_size = npages; | |
578 | } | |
579 | ||
580 | return 0; | |
581 | } | |
582 | ||
583 | ||
584 | static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | |
585 | int acc, struct ib_udata *udata) | |
586 | { | |
587 | __be64 *pages; | |
588 | int shift, n, len; | |
589 | int i, j, k; | |
590 | int err = 0; | |
591 | struct ib_umem_chunk *chunk; | |
592 | struct iwch_dev *rhp; | |
593 | struct iwch_pd *php; | |
594 | struct iwch_mr *mhp; | |
595 | struct iwch_reg_user_mr_resp uresp; | |
596 | ||
597 | PDBG("%s ib_pd %p\n", __FUNCTION__, pd); | |
598 | shift = ffs(region->page_size) - 1; | |
599 | ||
600 | php = to_iwch_pd(pd); | |
601 | rhp = php->rhp; | |
602 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); | |
603 | if (!mhp) | |
604 | return ERR_PTR(-ENOMEM); | |
605 | ||
606 | n = 0; | |
607 | list_for_each_entry(chunk, ®ion->chunk_list, list) | |
608 | n += chunk->nents; | |
609 | ||
610 | pages = kmalloc(n * sizeof(u64), GFP_KERNEL); | |
611 | if (!pages) { | |
612 | err = -ENOMEM; | |
613 | goto err; | |
614 | } | |
615 | ||
616 | acc = iwch_convert_access(acc); | |
617 | ||
618 | i = n = 0; | |
619 | ||
620 | list_for_each_entry(chunk, ®ion->chunk_list, list) | |
621 | for (j = 0; j < chunk->nmap; ++j) { | |
622 | len = sg_dma_len(&chunk->page_list[j]) >> shift; | |
623 | for (k = 0; k < len; ++k) { | |
624 | pages[i++] = cpu_to_be64(sg_dma_address( | |
625 | &chunk->page_list[j]) + | |
626 | region->page_size * k); | |
627 | } | |
628 | } | |
629 | ||
630 | mhp->rhp = rhp; | |
631 | mhp->attr.pdid = php->pdid; | |
632 | mhp->attr.zbva = 0; | |
633 | mhp->attr.perms = (acc & 0x1) << 3; | |
634 | mhp->attr.perms |= (acc & 0x2) << 1; | |
635 | mhp->attr.perms |= (acc & 0x4) >> 1; | |
636 | mhp->attr.perms |= (acc & 0x8) >> 3; | |
637 | mhp->attr.va_fbo = region->virt_base; | |
638 | mhp->attr.page_size = shift - 12; | |
639 | mhp->attr.len = (u32) region->length; | |
640 | mhp->attr.pbl_size = i; | |
641 | err = iwch_register_mem(rhp, php, mhp, shift, pages); | |
642 | kfree(pages); | |
643 | if (err) | |
644 | goto err; | |
645 | ||
646 | if (udata && t3b_device(rhp)) { | |
647 | uresp.pbl_addr = (mhp->attr.pbl_addr - | |
648 | rhp->rdev.rnic_info.pbl_base) >> 3; | |
649 | PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__, | |
650 | uresp.pbl_addr); | |
651 | ||
652 | if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { | |
653 | iwch_dereg_mr(&mhp->ibmr); | |
654 | err = -EFAULT; | |
655 | goto err; | |
656 | } | |
657 | } | |
658 | ||
659 | return &mhp->ibmr; | |
660 | ||
661 | err: | |
662 | kfree(mhp); | |
663 | return ERR_PTR(err); | |
664 | } | |
665 | ||
666 | static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc) | |
667 | { | |
668 | struct ib_phys_buf bl; | |
669 | u64 kva; | |
670 | struct ib_mr *ibmr; | |
671 | ||
672 | PDBG("%s ib_pd %p\n", __FUNCTION__, pd); | |
673 | ||
674 | /* | |
675 | * T3 only supports 32 bits of size. | |
676 | */ | |
677 | bl.size = 0xffffffff; | |
678 | bl.addr = 0; | |
679 | kva = 0; | |
680 | ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva); | |
681 | return ibmr; | |
682 | } | |
683 | ||
684 | static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd) | |
685 | { | |
686 | struct iwch_dev *rhp; | |
687 | struct iwch_pd *php; | |
688 | struct iwch_mw *mhp; | |
689 | u32 mmid; | |
690 | u32 stag = 0; | |
691 | int ret; | |
692 | ||
693 | php = to_iwch_pd(pd); | |
694 | rhp = php->rhp; | |
695 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); | |
696 | if (!mhp) | |
697 | return ERR_PTR(-ENOMEM); | |
698 | ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid); | |
699 | if (ret) { | |
700 | kfree(mhp); | |
701 | return ERR_PTR(ret); | |
702 | } | |
703 | mhp->rhp = rhp; | |
704 | mhp->attr.pdid = php->pdid; | |
705 | mhp->attr.type = TPT_MW; | |
706 | mhp->attr.stag = stag; | |
707 | mmid = (stag) >> 8; | |
708 | insert_handle(rhp, &rhp->mmidr, mhp, mmid); | |
709 | PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __FUNCTION__, mmid, mhp, stag); | |
710 | return &(mhp->ibmw); | |
711 | } | |
712 | ||
713 | static int iwch_dealloc_mw(struct ib_mw *mw) | |
714 | { | |
715 | struct iwch_dev *rhp; | |
716 | struct iwch_mw *mhp; | |
717 | u32 mmid; | |
718 | ||
719 | mhp = to_iwch_mw(mw); | |
720 | rhp = mhp->rhp; | |
721 | mmid = (mw->rkey) >> 8; | |
722 | cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); | |
723 | remove_handle(rhp, &rhp->mmidr, mmid); | |
724 | kfree(mhp); | |
725 | PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __FUNCTION__, mw, mmid, mhp); | |
726 | return 0; | |
727 | } | |
728 | ||
729 | static int iwch_destroy_qp(struct ib_qp *ib_qp) | |
730 | { | |
731 | struct iwch_dev *rhp; | |
732 | struct iwch_qp *qhp; | |
733 | struct iwch_qp_attributes attrs; | |
734 | struct iwch_ucontext *ucontext; | |
735 | ||
736 | qhp = to_iwch_qp(ib_qp); | |
737 | rhp = qhp->rhp; | |
738 | ||
739 | if (qhp->attr.state == IWCH_QP_STATE_RTS) { | |
740 | attrs.next_state = IWCH_QP_STATE_ERROR; | |
741 | iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0); | |
742 | } | |
743 | wait_event(qhp->wait, !qhp->ep); | |
744 | ||
745 | remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); | |
746 | ||
747 | atomic_dec(&qhp->refcnt); | |
748 | wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); | |
749 | ||
750 | ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context) | |
751 | : NULL; | |
752 | cxio_destroy_qp(&rhp->rdev, &qhp->wq, | |
753 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
754 | ||
755 | PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __FUNCTION__, | |
756 | ib_qp, qhp->wq.qpid, qhp); | |
757 | kfree(qhp); | |
758 | return 0; | |
759 | } | |
760 | ||
761 | static struct ib_qp *iwch_create_qp(struct ib_pd *pd, | |
762 | struct ib_qp_init_attr *attrs, | |
763 | struct ib_udata *udata) | |
764 | { | |
765 | struct iwch_dev *rhp; | |
766 | struct iwch_qp *qhp; | |
767 | struct iwch_pd *php; | |
768 | struct iwch_cq *schp; | |
769 | struct iwch_cq *rchp; | |
770 | struct iwch_create_qp_resp uresp; | |
771 | int wqsize, sqsize, rqsize; | |
772 | struct iwch_ucontext *ucontext; | |
773 | ||
774 | PDBG("%s ib_pd %p\n", __FUNCTION__, pd); | |
775 | if (attrs->qp_type != IB_QPT_RC) | |
776 | return ERR_PTR(-EINVAL); | |
777 | php = to_iwch_pd(pd); | |
778 | rhp = php->rhp; | |
779 | schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid); | |
780 | rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid); | |
781 | if (!schp || !rchp) | |
782 | return ERR_PTR(-EINVAL); | |
783 | ||
784 | /* The RQT size must be # of entries + 1 rounded up to a power of two */ | |
785 | rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr); | |
786 | if (rqsize == attrs->cap.max_recv_wr) | |
787 | rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1); | |
788 | ||
789 | /* T3 doesn't support RQT depth < 16 */ | |
790 | if (rqsize < 16) | |
791 | rqsize = 16; | |
792 | ||
793 | if (rqsize > T3_MAX_RQ_SIZE) | |
794 | return ERR_PTR(-EINVAL); | |
795 | ||
796 | /* | |
797 | * NOTE: The SQ and total WQ sizes don't need to be | |
798 | * a power of two. However, all the code assumes | |
799 | * they are. EG: Q_FREECNT() and friends. | |
800 | */ | |
801 | sqsize = roundup_pow_of_two(attrs->cap.max_send_wr); | |
802 | wqsize = roundup_pow_of_two(rqsize + sqsize); | |
803 | PDBG("%s wqsize %d sqsize %d rqsize %d\n", __FUNCTION__, | |
804 | wqsize, sqsize, rqsize); | |
805 | qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); | |
806 | if (!qhp) | |
807 | return ERR_PTR(-ENOMEM); | |
808 | qhp->wq.size_log2 = ilog2(wqsize); | |
809 | qhp->wq.rq_size_log2 = ilog2(rqsize); | |
810 | qhp->wq.sq_size_log2 = ilog2(sqsize); | |
811 | ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL; | |
812 | if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, | |
813 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) { | |
814 | kfree(qhp); | |
815 | return ERR_PTR(-ENOMEM); | |
816 | } | |
817 | attrs->cap.max_recv_wr = rqsize - 1; | |
818 | attrs->cap.max_send_wr = sqsize; | |
819 | qhp->rhp = rhp; | |
820 | qhp->attr.pd = php->pdid; | |
821 | qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid; | |
822 | qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid; | |
823 | qhp->attr.sq_num_entries = attrs->cap.max_send_wr; | |
824 | qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; | |
825 | qhp->attr.sq_max_sges = attrs->cap.max_send_sge; | |
826 | qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; | |
827 | qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; | |
828 | qhp->attr.state = IWCH_QP_STATE_IDLE; | |
829 | qhp->attr.next_state = IWCH_QP_STATE_IDLE; | |
830 | ||
831 | /* | |
832 | * XXX - These don't get passed in from the openib user | |
833 | * at create time. The CM sets them via a QP modify. | |
834 | * Need to fix... I think the CM should | |
835 | */ | |
836 | qhp->attr.enable_rdma_read = 1; | |
837 | qhp->attr.enable_rdma_write = 1; | |
838 | qhp->attr.enable_bind = 1; | |
839 | qhp->attr.max_ord = 1; | |
840 | qhp->attr.max_ird = 1; | |
841 | ||
842 | spin_lock_init(&qhp->lock); | |
843 | init_waitqueue_head(&qhp->wait); | |
844 | atomic_set(&qhp->refcnt, 1); | |
845 | insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid); | |
846 | ||
847 | if (udata) { | |
848 | ||
849 | struct iwch_mm_entry *mm1, *mm2; | |
850 | ||
851 | mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); | |
852 | if (!mm1) { | |
853 | iwch_destroy_qp(&qhp->ibqp); | |
854 | return ERR_PTR(-ENOMEM); | |
855 | } | |
856 | ||
857 | mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); | |
858 | if (!mm2) { | |
859 | kfree(mm1); | |
860 | iwch_destroy_qp(&qhp->ibqp); | |
861 | return ERR_PTR(-ENOMEM); | |
862 | } | |
863 | ||
864 | uresp.qpid = qhp->wq.qpid; | |
865 | uresp.size_log2 = qhp->wq.size_log2; | |
866 | uresp.sq_size_log2 = qhp->wq.sq_size_log2; | |
867 | uresp.rq_size_log2 = qhp->wq.rq_size_log2; | |
868 | spin_lock(&ucontext->mmap_lock); | |
869 | uresp.key = ucontext->key; | |
870 | ucontext->key += PAGE_SIZE; | |
871 | uresp.db_key = ucontext->key; | |
872 | ucontext->key += PAGE_SIZE; | |
873 | spin_unlock(&ucontext->mmap_lock); | |
874 | if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { | |
875 | kfree(mm1); | |
876 | kfree(mm2); | |
877 | iwch_destroy_qp(&qhp->ibqp); | |
878 | return ERR_PTR(-EFAULT); | |
879 | } | |
880 | mm1->key = uresp.key; | |
881 | mm1->addr = virt_to_phys(qhp->wq.queue); | |
882 | mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr)); | |
883 | insert_mmap(ucontext, mm1); | |
884 | mm2->key = uresp.db_key; | |
885 | mm2->addr = qhp->wq.udb & PAGE_MASK; | |
886 | mm2->len = PAGE_SIZE; | |
887 | insert_mmap(ucontext, mm2); | |
888 | } | |
889 | qhp->ibqp.qp_num = qhp->wq.qpid; | |
890 | init_timer(&(qhp->timer)); | |
891 | PDBG("%s sq_num_entries %d, rq_num_entries %d " | |
892 | "qpid 0x%0x qhp %p dma_addr 0x%llx size %d\n", | |
893 | __FUNCTION__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, | |
894 | qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr, | |
895 | 1 << qhp->wq.size_log2); | |
896 | return &qhp->ibqp; | |
897 | } | |
898 | ||
899 | static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
900 | int attr_mask, struct ib_udata *udata) | |
901 | { | |
902 | struct iwch_dev *rhp; | |
903 | struct iwch_qp *qhp; | |
904 | enum iwch_qp_attr_mask mask = 0; | |
905 | struct iwch_qp_attributes attrs; | |
906 | ||
907 | PDBG("%s ib_qp %p\n", __FUNCTION__, ibqp); | |
908 | ||
909 | /* iwarp does not support the RTR state */ | |
910 | if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) | |
911 | attr_mask &= ~IB_QP_STATE; | |
912 | ||
913 | /* Make sure we still have something left to do */ | |
914 | if (!attr_mask) | |
915 | return 0; | |
916 | ||
917 | memset(&attrs, 0, sizeof attrs); | |
918 | qhp = to_iwch_qp(ibqp); | |
919 | rhp = qhp->rhp; | |
920 | ||
921 | attrs.next_state = iwch_convert_state(attr->qp_state); | |
922 | attrs.enable_rdma_read = (attr->qp_access_flags & | |
923 | IB_ACCESS_REMOTE_READ) ? 1 : 0; | |
924 | attrs.enable_rdma_write = (attr->qp_access_flags & | |
925 | IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | |
926 | attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; | |
927 | ||
928 | ||
929 | mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0; | |
930 | mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? | |
931 | (IWCH_QP_ATTR_ENABLE_RDMA_READ | | |
932 | IWCH_QP_ATTR_ENABLE_RDMA_WRITE | | |
933 | IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0; | |
934 | ||
935 | return iwch_modify_qp(rhp, qhp, mask, &attrs, 0); | |
936 | } | |
937 | ||
938 | void iwch_qp_add_ref(struct ib_qp *qp) | |
939 | { | |
940 | PDBG("%s ib_qp %p\n", __FUNCTION__, qp); | |
941 | atomic_inc(&(to_iwch_qp(qp)->refcnt)); | |
942 | } | |
943 | ||
944 | void iwch_qp_rem_ref(struct ib_qp *qp) | |
945 | { | |
946 | PDBG("%s ib_qp %p\n", __FUNCTION__, qp); | |
947 | if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt))) | |
948 | wake_up(&(to_iwch_qp(qp)->wait)); | |
949 | } | |
950 | ||
2b540355 | 951 | static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) |
b038ced7 SW |
952 | { |
953 | PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn); | |
954 | return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn); | |
955 | } | |
956 | ||
957 | ||
958 | static int iwch_query_pkey(struct ib_device *ibdev, | |
959 | u8 port, u16 index, u16 * pkey) | |
960 | { | |
961 | PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); | |
962 | *pkey = 0; | |
963 | return 0; | |
964 | } | |
965 | ||
966 | static int iwch_query_gid(struct ib_device *ibdev, u8 port, | |
967 | int index, union ib_gid *gid) | |
968 | { | |
969 | struct iwch_dev *dev; | |
970 | ||
971 | PDBG("%s ibdev %p, port %d, index %d, gid %p\n", | |
972 | __FUNCTION__, ibdev, port, index, gid); | |
973 | dev = to_iwch_dev(ibdev); | |
974 | BUG_ON(port == 0 || port > 2); | |
975 | memset(&(gid->raw[0]), 0, sizeof(gid->raw)); | |
976 | memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6); | |
977 | return 0; | |
978 | } | |
979 | ||
980 | static int iwch_query_device(struct ib_device *ibdev, | |
981 | struct ib_device_attr *props) | |
982 | { | |
983 | ||
984 | struct iwch_dev *dev; | |
985 | PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); | |
986 | ||
987 | dev = to_iwch_dev(ibdev); | |
988 | memset(props, 0, sizeof *props); | |
989 | memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); | |
990 | props->device_cap_flags = dev->device_cap_flags; | |
991 | props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor; | |
992 | props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device; | |
993 | props->max_mr_size = ~0ull; | |
994 | props->max_qp = dev->attr.max_qps; | |
995 | props->max_qp_wr = dev->attr.max_wrs; | |
996 | props->max_sge = dev->attr.max_sge_per_wr; | |
997 | props->max_sge_rd = 1; | |
998 | props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp; | |
999 | props->max_cq = dev->attr.max_cqs; | |
1000 | props->max_cqe = dev->attr.max_cqes_per_cq; | |
1001 | props->max_mr = dev->attr.max_mem_regs; | |
1002 | props->max_pd = dev->attr.max_pds; | |
1003 | props->local_ca_ack_delay = 0; | |
1004 | ||
1005 | return 0; | |
1006 | } | |
1007 | ||
1008 | static int iwch_query_port(struct ib_device *ibdev, | |
1009 | u8 port, struct ib_port_attr *props) | |
1010 | { | |
1011 | PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); | |
1012 | props->max_mtu = IB_MTU_4096; | |
1013 | props->lid = 0; | |
1014 | props->lmc = 0; | |
1015 | props->sm_lid = 0; | |
1016 | props->sm_sl = 0; | |
1017 | props->state = IB_PORT_ACTIVE; | |
1018 | props->phys_state = 0; | |
1019 | props->port_cap_flags = | |
1020 | IB_PORT_CM_SUP | | |
1021 | IB_PORT_SNMP_TUNNEL_SUP | | |
1022 | IB_PORT_REINIT_SUP | | |
1023 | IB_PORT_DEVICE_MGMT_SUP | | |
1024 | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; | |
1025 | props->gid_tbl_len = 1; | |
1026 | props->pkey_tbl_len = 1; | |
1027 | props->qkey_viol_cntr = 0; | |
1028 | props->active_width = 2; | |
1029 | props->active_speed = 2; | |
1030 | props->max_msg_sz = -1; | |
1031 | ||
1032 | return 0; | |
1033 | } | |
1034 | ||
1035 | static ssize_t show_rev(struct class_device *cdev, char *buf) | |
1036 | { | |
1037 | struct iwch_dev *dev = container_of(cdev, struct iwch_dev, | |
1038 | ibdev.class_dev); | |
1039 | PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); | |
1040 | return sprintf(buf, "%d\n", dev->rdev.t3cdev_p->type); | |
1041 | } | |
1042 | ||
1043 | static ssize_t show_fw_ver(struct class_device *cdev, char *buf) | |
1044 | { | |
1045 | struct iwch_dev *dev = container_of(cdev, struct iwch_dev, | |
1046 | ibdev.class_dev); | |
1047 | struct ethtool_drvinfo info; | |
1048 | struct net_device *lldev = dev->rdev.t3cdev_p->lldev; | |
1049 | ||
1050 | PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); | |
1051 | lldev->ethtool_ops->get_drvinfo(lldev, &info); | |
1052 | return sprintf(buf, "%s\n", info.fw_version); | |
1053 | } | |
1054 | ||
1055 | static ssize_t show_hca(struct class_device *cdev, char *buf) | |
1056 | { | |
1057 | struct iwch_dev *dev = container_of(cdev, struct iwch_dev, | |
1058 | ibdev.class_dev); | |
1059 | struct ethtool_drvinfo info; | |
1060 | struct net_device *lldev = dev->rdev.t3cdev_p->lldev; | |
1061 | ||
1062 | PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); | |
1063 | lldev->ethtool_ops->get_drvinfo(lldev, &info); | |
1064 | return sprintf(buf, "%s\n", info.driver); | |
1065 | } | |
1066 | ||
1067 | static ssize_t show_board(struct class_device *cdev, char *buf) | |
1068 | { | |
1069 | struct iwch_dev *dev = container_of(cdev, struct iwch_dev, | |
1070 | ibdev.class_dev); | |
1071 | PDBG("%s class dev 0x%p\n", __FUNCTION__, dev); | |
1072 | return sprintf(buf, "%x.%x\n", dev->rdev.rnic_info.pdev->vendor, | |
1073 | dev->rdev.rnic_info.pdev->device); | |
1074 | } | |
1075 | ||
1076 | static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | |
1077 | static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); | |
1078 | static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); | |
1079 | static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); | |
1080 | ||
1081 | static struct class_device_attribute *iwch_class_attributes[] = { | |
1082 | &class_device_attr_hw_rev, | |
1083 | &class_device_attr_fw_ver, | |
1084 | &class_device_attr_hca_type, | |
1085 | &class_device_attr_board_id | |
1086 | }; | |
1087 | ||
1088 | int iwch_register_device(struct iwch_dev *dev) | |
1089 | { | |
1090 | int ret; | |
1091 | int i; | |
1092 | ||
1093 | PDBG("%s iwch_dev %p\n", __FUNCTION__, dev); | |
1094 | strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX); | |
1095 | memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); | |
1096 | memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); | |
1097 | dev->ibdev.owner = THIS_MODULE; | |
1098 | dev->device_cap_flags = | |
1099 | (IB_DEVICE_ZERO_STAG | | |
1100 | IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW); | |
1101 | ||
1102 | dev->ibdev.uverbs_cmd_mask = | |
1103 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | | |
1104 | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | | |
1105 | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | | |
1106 | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | | |
1107 | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | | |
1108 | (1ull << IB_USER_VERBS_CMD_REG_MR) | | |
1109 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | | |
1110 | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | | |
1111 | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | | |
1112 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | | |
1113 | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | | |
1114 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | | |
1115 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | | |
1116 | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | | |
1117 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | | |
1118 | (1ull << IB_USER_VERBS_CMD_POST_SEND) | | |
1119 | (1ull << IB_USER_VERBS_CMD_POST_RECV); | |
1120 | dev->ibdev.node_type = RDMA_NODE_RNIC; | |
1121 | memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); | |
1122 | dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; | |
1123 | dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); | |
1124 | dev->ibdev.class_dev.dev = &(dev->rdev.rnic_info.pdev->dev); | |
1125 | dev->ibdev.query_device = iwch_query_device; | |
1126 | dev->ibdev.query_port = iwch_query_port; | |
1127 | dev->ibdev.modify_port = iwch_modify_port; | |
1128 | dev->ibdev.query_pkey = iwch_query_pkey; | |
1129 | dev->ibdev.query_gid = iwch_query_gid; | |
1130 | dev->ibdev.alloc_ucontext = iwch_alloc_ucontext; | |
1131 | dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext; | |
1132 | dev->ibdev.mmap = iwch_mmap; | |
1133 | dev->ibdev.alloc_pd = iwch_allocate_pd; | |
1134 | dev->ibdev.dealloc_pd = iwch_deallocate_pd; | |
1135 | dev->ibdev.create_ah = iwch_ah_create; | |
1136 | dev->ibdev.destroy_ah = iwch_ah_destroy; | |
1137 | dev->ibdev.create_qp = iwch_create_qp; | |
1138 | dev->ibdev.modify_qp = iwch_ib_modify_qp; | |
1139 | dev->ibdev.destroy_qp = iwch_destroy_qp; | |
1140 | dev->ibdev.create_cq = iwch_create_cq; | |
1141 | dev->ibdev.destroy_cq = iwch_destroy_cq; | |
1142 | dev->ibdev.resize_cq = iwch_resize_cq; | |
1143 | dev->ibdev.poll_cq = iwch_poll_cq; | |
1144 | dev->ibdev.get_dma_mr = iwch_get_dma_mr; | |
1145 | dev->ibdev.reg_phys_mr = iwch_register_phys_mem; | |
1146 | dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem; | |
1147 | dev->ibdev.reg_user_mr = iwch_reg_user_mr; | |
1148 | dev->ibdev.dereg_mr = iwch_dereg_mr; | |
1149 | dev->ibdev.alloc_mw = iwch_alloc_mw; | |
1150 | dev->ibdev.bind_mw = iwch_bind_mw; | |
1151 | dev->ibdev.dealloc_mw = iwch_dealloc_mw; | |
1152 | ||
1153 | dev->ibdev.attach_mcast = iwch_multicast_attach; | |
1154 | dev->ibdev.detach_mcast = iwch_multicast_detach; | |
1155 | dev->ibdev.process_mad = iwch_process_mad; | |
1156 | ||
1157 | dev->ibdev.req_notify_cq = iwch_arm_cq; | |
1158 | dev->ibdev.post_send = iwch_post_send; | |
1159 | dev->ibdev.post_recv = iwch_post_receive; | |
1160 | ||
1161 | ||
1162 | dev->ibdev.iwcm = | |
1163 | (struct iw_cm_verbs *) kmalloc(sizeof(struct iw_cm_verbs), | |
1164 | GFP_KERNEL); | |
1165 | dev->ibdev.iwcm->connect = iwch_connect; | |
1166 | dev->ibdev.iwcm->accept = iwch_accept_cr; | |
1167 | dev->ibdev.iwcm->reject = iwch_reject_cr; | |
1168 | dev->ibdev.iwcm->create_listen = iwch_create_listen; | |
1169 | dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen; | |
1170 | dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; | |
1171 | dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; | |
1172 | dev->ibdev.iwcm->get_qp = iwch_get_qp; | |
1173 | ||
1174 | ret = ib_register_device(&dev->ibdev); | |
1175 | if (ret) | |
1176 | goto bail1; | |
1177 | ||
1178 | for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) { | |
1179 | ret = class_device_create_file(&dev->ibdev.class_dev, | |
1180 | iwch_class_attributes[i]); | |
1181 | if (ret) { | |
1182 | goto bail2; | |
1183 | } | |
1184 | } | |
1185 | return 0; | |
1186 | bail2: | |
1187 | ib_unregister_device(&dev->ibdev); | |
1188 | bail1: | |
1189 | return ret; | |
1190 | } | |
1191 | ||
1192 | void iwch_unregister_device(struct iwch_dev *dev) | |
1193 | { | |
1194 | int i; | |
1195 | ||
1196 | PDBG("%s iwch_dev %p\n", __FUNCTION__, dev); | |
1197 | for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) | |
1198 | class_device_remove_file(&dev->ibdev.class_dev, | |
1199 | iwch_class_attributes[i]); | |
1200 | ib_unregister_device(&dev->ibdev); | |
1201 | return; | |
1202 | } |