IB: remove support for phys MRs
[deliverable/linux.git] / drivers / staging / rdma / hfi1 / mr.c
1 /*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 #include <rdma/ib_umem.h>
52 #include <rdma/ib_smi.h>
53
54 #include "hfi.h"
55
56 /* Fast memory region */
57 struct hfi1_fmr {
58 struct ib_fmr ibfmr;
59 struct hfi1_mregion mr; /* must be last */
60 };
61
62 static inline struct hfi1_fmr *to_ifmr(struct ib_fmr *ibfmr)
63 {
64 return container_of(ibfmr, struct hfi1_fmr, ibfmr);
65 }
66
67 static int init_mregion(struct hfi1_mregion *mr, struct ib_pd *pd,
68 int count)
69 {
70 int m, i = 0;
71 int rval = 0;
72
73 m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
74 for (; i < m; i++) {
75 mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
76 if (!mr->map[i])
77 goto bail;
78 }
79 mr->mapsz = m;
80 init_completion(&mr->comp);
81 /* count returning the ptr to user */
82 atomic_set(&mr->refcount, 1);
83 mr->pd = pd;
84 mr->max_segs = count;
85 out:
86 return rval;
87 bail:
88 while (i)
89 kfree(mr->map[--i]);
90 rval = -ENOMEM;
91 goto out;
92 }
93
94 static void deinit_mregion(struct hfi1_mregion *mr)
95 {
96 int i = mr->mapsz;
97
98 mr->mapsz = 0;
99 while (i)
100 kfree(mr->map[--i]);
101 }
102
103
104 /**
105 * hfi1_get_dma_mr - get a DMA memory region
106 * @pd: protection domain for this memory region
107 * @acc: access flags
108 *
109 * Returns the memory region on success, otherwise returns an errno.
110 * Note that all DMA addresses should be created via the
111 * struct ib_dma_mapping_ops functions (see dma.c).
112 */
113 struct ib_mr *hfi1_get_dma_mr(struct ib_pd *pd, int acc)
114 {
115 struct hfi1_mr *mr = NULL;
116 struct ib_mr *ret;
117 int rval;
118
119 if (to_ipd(pd)->user) {
120 ret = ERR_PTR(-EPERM);
121 goto bail;
122 }
123
124 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
125 if (!mr) {
126 ret = ERR_PTR(-ENOMEM);
127 goto bail;
128 }
129
130 rval = init_mregion(&mr->mr, pd, 0);
131 if (rval) {
132 ret = ERR_PTR(rval);
133 goto bail;
134 }
135
136
137 rval = hfi1_alloc_lkey(&mr->mr, 1);
138 if (rval) {
139 ret = ERR_PTR(rval);
140 goto bail_mregion;
141 }
142
143 mr->mr.access_flags = acc;
144 ret = &mr->ibmr;
145 done:
146 return ret;
147
148 bail_mregion:
149 deinit_mregion(&mr->mr);
150 bail:
151 kfree(mr);
152 goto done;
153 }
154
155 static struct hfi1_mr *alloc_mr(int count, struct ib_pd *pd)
156 {
157 struct hfi1_mr *mr;
158 int rval = -ENOMEM;
159 int m;
160
161 /* Allocate struct plus pointers to first level page tables. */
162 m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
163 mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
164 if (!mr)
165 goto bail;
166
167 rval = init_mregion(&mr->mr, pd, count);
168 if (rval)
169 goto bail;
170
171 rval = hfi1_alloc_lkey(&mr->mr, 0);
172 if (rval)
173 goto bail_mregion;
174 mr->ibmr.lkey = mr->mr.lkey;
175 mr->ibmr.rkey = mr->mr.lkey;
176 done:
177 return mr;
178
179 bail_mregion:
180 deinit_mregion(&mr->mr);
181 bail:
182 kfree(mr);
183 mr = ERR_PTR(rval);
184 goto done;
185 }
186
187 /**
188 * hfi1_reg_user_mr - register a userspace memory region
189 * @pd: protection domain for this memory region
190 * @start: starting userspace address
191 * @length: length of region to register
192 * @mr_access_flags: access flags for this memory region
193 * @udata: unused by the driver
194 *
195 * Returns the memory region on success, otherwise returns an errno.
196 */
197 struct ib_mr *hfi1_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
198 u64 virt_addr, int mr_access_flags,
199 struct ib_udata *udata)
200 {
201 struct hfi1_mr *mr;
202 struct ib_umem *umem;
203 struct scatterlist *sg;
204 int n, m, entry;
205 struct ib_mr *ret;
206
207 if (length == 0) {
208 ret = ERR_PTR(-EINVAL);
209 goto bail;
210 }
211
212 umem = ib_umem_get(pd->uobject->context, start, length,
213 mr_access_flags, 0);
214 if (IS_ERR(umem))
215 return (void *) umem;
216
217 n = umem->nmap;
218
219 mr = alloc_mr(n, pd);
220 if (IS_ERR(mr)) {
221 ret = (struct ib_mr *)mr;
222 ib_umem_release(umem);
223 goto bail;
224 }
225
226 mr->mr.user_base = start;
227 mr->mr.iova = virt_addr;
228 mr->mr.length = length;
229 mr->mr.offset = ib_umem_offset(umem);
230 mr->mr.access_flags = mr_access_flags;
231 mr->umem = umem;
232
233 if (is_power_of_2(umem->page_size))
234 mr->mr.page_shift = ilog2(umem->page_size);
235 m = 0;
236 n = 0;
237 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
238 void *vaddr;
239
240 vaddr = page_address(sg_page(sg));
241 if (!vaddr) {
242 ret = ERR_PTR(-EINVAL);
243 goto bail;
244 }
245 mr->mr.map[m]->segs[n].vaddr = vaddr;
246 mr->mr.map[m]->segs[n].length = umem->page_size;
247 n++;
248 if (n == HFI1_SEGSZ) {
249 m++;
250 n = 0;
251 }
252 }
253 ret = &mr->ibmr;
254
255 bail:
256 return ret;
257 }
258
259 /**
260 * hfi1_dereg_mr - unregister and free a memory region
261 * @ibmr: the memory region to free
262 *
263 * Returns 0 on success.
264 *
265 * Note that this is called to free MRs created by hfi1_get_dma_mr()
266 * or hfi1_reg_user_mr().
267 */
268 int hfi1_dereg_mr(struct ib_mr *ibmr)
269 {
270 struct hfi1_mr *mr = to_imr(ibmr);
271 int ret = 0;
272 unsigned long timeout;
273
274 hfi1_free_lkey(&mr->mr);
275
276 hfi1_put_mr(&mr->mr); /* will set completion if last */
277 timeout = wait_for_completion_timeout(&mr->mr.comp,
278 5 * HZ);
279 if (!timeout) {
280 dd_dev_err(
281 dd_from_ibdev(mr->mr.pd->device),
282 "hfi1_dereg_mr timeout mr %p pd %p refcount %u\n",
283 mr, mr->mr.pd, atomic_read(&mr->mr.refcount));
284 hfi1_get_mr(&mr->mr);
285 ret = -EBUSY;
286 goto out;
287 }
288 deinit_mregion(&mr->mr);
289 if (mr->umem)
290 ib_umem_release(mr->umem);
291 kfree(mr);
292 out:
293 return ret;
294 }
295
296 /*
297 * Allocate a memory region usable with the
298 * IB_WR_REG_MR send work request.
299 *
300 * Return the memory region on success, otherwise return an errno.
301 * FIXME: IB_WR_REG_MR is not supported
302 */
303 struct ib_mr *hfi1_alloc_mr(struct ib_pd *pd,
304 enum ib_mr_type mr_type,
305 u32 max_num_sg)
306 {
307 struct hfi1_mr *mr;
308
309 if (mr_type != IB_MR_TYPE_MEM_REG)
310 return ERR_PTR(-EINVAL);
311
312 mr = alloc_mr(max_num_sg, pd);
313 if (IS_ERR(mr))
314 return (struct ib_mr *)mr;
315
316 return &mr->ibmr;
317 }
318
319 /**
320 * hfi1_alloc_fmr - allocate a fast memory region
321 * @pd: the protection domain for this memory region
322 * @mr_access_flags: access flags for this memory region
323 * @fmr_attr: fast memory region attributes
324 *
325 * Returns the memory region on success, otherwise returns an errno.
326 */
327 struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
328 struct ib_fmr_attr *fmr_attr)
329 {
330 struct hfi1_fmr *fmr;
331 int m;
332 struct ib_fmr *ret;
333 int rval = -ENOMEM;
334
335 /* Allocate struct plus pointers to first level page tables. */
336 m = (fmr_attr->max_pages + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
337 fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
338 if (!fmr)
339 goto bail;
340
341 rval = init_mregion(&fmr->mr, pd, fmr_attr->max_pages);
342 if (rval)
343 goto bail;
344
345 /*
346 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
347 * rkey.
348 */
349 rval = hfi1_alloc_lkey(&fmr->mr, 0);
350 if (rval)
351 goto bail_mregion;
352 fmr->ibfmr.rkey = fmr->mr.lkey;
353 fmr->ibfmr.lkey = fmr->mr.lkey;
354 /*
355 * Resources are allocated but no valid mapping (RKEY can't be
356 * used).
357 */
358 fmr->mr.access_flags = mr_access_flags;
359 fmr->mr.max_segs = fmr_attr->max_pages;
360 fmr->mr.page_shift = fmr_attr->page_shift;
361
362 ret = &fmr->ibfmr;
363 done:
364 return ret;
365
366 bail_mregion:
367 deinit_mregion(&fmr->mr);
368 bail:
369 kfree(fmr);
370 ret = ERR_PTR(rval);
371 goto done;
372 }
373
374 /**
375 * hfi1_map_phys_fmr - set up a fast memory region
376 * @ibmfr: the fast memory region to set up
377 * @page_list: the list of pages to associate with the fast memory region
378 * @list_len: the number of pages to associate with the fast memory region
379 * @iova: the virtual address of the start of the fast memory region
380 *
381 * This may be called from interrupt context.
382 */
383
384 int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
385 int list_len, u64 iova)
386 {
387 struct hfi1_fmr *fmr = to_ifmr(ibfmr);
388 struct hfi1_lkey_table *rkt;
389 unsigned long flags;
390 int m, n, i;
391 u32 ps;
392 int ret;
393
394 i = atomic_read(&fmr->mr.refcount);
395 if (i > 2)
396 return -EBUSY;
397
398 if (list_len > fmr->mr.max_segs) {
399 ret = -EINVAL;
400 goto bail;
401 }
402 rkt = &to_idev(ibfmr->device)->lk_table;
403 spin_lock_irqsave(&rkt->lock, flags);
404 fmr->mr.user_base = iova;
405 fmr->mr.iova = iova;
406 ps = 1 << fmr->mr.page_shift;
407 fmr->mr.length = list_len * ps;
408 m = 0;
409 n = 0;
410 for (i = 0; i < list_len; i++) {
411 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
412 fmr->mr.map[m]->segs[n].length = ps;
413 if (++n == HFI1_SEGSZ) {
414 m++;
415 n = 0;
416 }
417 }
418 spin_unlock_irqrestore(&rkt->lock, flags);
419 ret = 0;
420
421 bail:
422 return ret;
423 }
424
425 /**
426 * hfi1_unmap_fmr - unmap fast memory regions
427 * @fmr_list: the list of fast memory regions to unmap
428 *
429 * Returns 0 on success.
430 */
431 int hfi1_unmap_fmr(struct list_head *fmr_list)
432 {
433 struct hfi1_fmr *fmr;
434 struct hfi1_lkey_table *rkt;
435 unsigned long flags;
436
437 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
438 rkt = &to_idev(fmr->ibfmr.device)->lk_table;
439 spin_lock_irqsave(&rkt->lock, flags);
440 fmr->mr.user_base = 0;
441 fmr->mr.iova = 0;
442 fmr->mr.length = 0;
443 spin_unlock_irqrestore(&rkt->lock, flags);
444 }
445 return 0;
446 }
447
448 /**
449 * hfi1_dealloc_fmr - deallocate a fast memory region
450 * @ibfmr: the fast memory region to deallocate
451 *
452 * Returns 0 on success.
453 */
454 int hfi1_dealloc_fmr(struct ib_fmr *ibfmr)
455 {
456 struct hfi1_fmr *fmr = to_ifmr(ibfmr);
457 int ret = 0;
458 unsigned long timeout;
459
460 hfi1_free_lkey(&fmr->mr);
461 hfi1_put_mr(&fmr->mr); /* will set completion if last */
462 timeout = wait_for_completion_timeout(&fmr->mr.comp,
463 5 * HZ);
464 if (!timeout) {
465 hfi1_get_mr(&fmr->mr);
466 ret = -EBUSY;
467 goto out;
468 }
469 deinit_mregion(&fmr->mr);
470 kfree(fmr);
471 out:
472 return ret;
473 }
This page took 0.039954 seconds and 5 git commands to generate.