3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <rdma/ib_umem.h>
52 #include <rdma/ib_smi.h>
56 /* Fast memory region */
59 struct hfi1_mregion mr
; /* must be last */
62 static inline struct hfi1_fmr
*to_ifmr(struct ib_fmr
*ibfmr
)
64 return container_of(ibfmr
, struct hfi1_fmr
, ibfmr
);
67 static int init_mregion(struct hfi1_mregion
*mr
, struct ib_pd
*pd
,
73 m
= (count
+ HFI1_SEGSZ
- 1) / HFI1_SEGSZ
;
75 mr
->map
[i
] = kzalloc(sizeof(*mr
->map
[0]), GFP_KERNEL
);
80 init_completion(&mr
->comp
);
81 /* count returning the ptr to user */
82 atomic_set(&mr
->refcount
, 1);
94 static void deinit_mregion(struct hfi1_mregion
*mr
)
105 * hfi1_get_dma_mr - get a DMA memory region
106 * @pd: protection domain for this memory region
109 * Returns the memory region on success, otherwise returns an errno.
110 * Note that all DMA addresses should be created via the
111 * struct ib_dma_mapping_ops functions (see dma.c).
113 struct ib_mr
*hfi1_get_dma_mr(struct ib_pd
*pd
, int acc
)
115 struct hfi1_mr
*mr
= NULL
;
119 if (to_ipd(pd
)->user
) {
120 ret
= ERR_PTR(-EPERM
);
124 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
126 ret
= ERR_PTR(-ENOMEM
);
130 rval
= init_mregion(&mr
->mr
, pd
, 0);
137 rval
= hfi1_alloc_lkey(&mr
->mr
, 1);
143 mr
->mr
.access_flags
= acc
;
149 deinit_mregion(&mr
->mr
);
155 static struct hfi1_mr
*alloc_mr(int count
, struct ib_pd
*pd
)
161 /* Allocate struct plus pointers to first level page tables. */
162 m
= (count
+ HFI1_SEGSZ
- 1) / HFI1_SEGSZ
;
163 mr
= kzalloc(sizeof(*mr
) + m
* sizeof(mr
->mr
.map
[0]), GFP_KERNEL
);
167 rval
= init_mregion(&mr
->mr
, pd
, count
);
171 rval
= hfi1_alloc_lkey(&mr
->mr
, 0);
174 mr
->ibmr
.lkey
= mr
->mr
.lkey
;
175 mr
->ibmr
.rkey
= mr
->mr
.lkey
;
180 deinit_mregion(&mr
->mr
);
188 * hfi1_reg_user_mr - register a userspace memory region
189 * @pd: protection domain for this memory region
190 * @start: starting userspace address
191 * @length: length of region to register
192 * @mr_access_flags: access flags for this memory region
193 * @udata: unused by the driver
195 * Returns the memory region on success, otherwise returns an errno.
197 struct ib_mr
*hfi1_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
198 u64 virt_addr
, int mr_access_flags
,
199 struct ib_udata
*udata
)
202 struct ib_umem
*umem
;
203 struct scatterlist
*sg
;
208 ret
= ERR_PTR(-EINVAL
);
212 umem
= ib_umem_get(pd
->uobject
->context
, start
, length
,
215 return (void *) umem
;
219 mr
= alloc_mr(n
, pd
);
221 ret
= (struct ib_mr
*)mr
;
222 ib_umem_release(umem
);
226 mr
->mr
.user_base
= start
;
227 mr
->mr
.iova
= virt_addr
;
228 mr
->mr
.length
= length
;
229 mr
->mr
.offset
= ib_umem_offset(umem
);
230 mr
->mr
.access_flags
= mr_access_flags
;
233 if (is_power_of_2(umem
->page_size
))
234 mr
->mr
.page_shift
= ilog2(umem
->page_size
);
237 for_each_sg(umem
->sg_head
.sgl
, sg
, umem
->nmap
, entry
) {
240 vaddr
= page_address(sg_page(sg
));
242 ret
= ERR_PTR(-EINVAL
);
245 mr
->mr
.map
[m
]->segs
[n
].vaddr
= vaddr
;
246 mr
->mr
.map
[m
]->segs
[n
].length
= umem
->page_size
;
248 if (n
== HFI1_SEGSZ
) {
260 * hfi1_dereg_mr - unregister and free a memory region
261 * @ibmr: the memory region to free
263 * Returns 0 on success.
265 * Note that this is called to free MRs created by hfi1_get_dma_mr()
266 * or hfi1_reg_user_mr().
268 int hfi1_dereg_mr(struct ib_mr
*ibmr
)
270 struct hfi1_mr
*mr
= to_imr(ibmr
);
272 unsigned long timeout
;
274 hfi1_free_lkey(&mr
->mr
);
276 hfi1_put_mr(&mr
->mr
); /* will set completion if last */
277 timeout
= wait_for_completion_timeout(&mr
->mr
.comp
,
281 dd_from_ibdev(mr
->mr
.pd
->device
),
282 "hfi1_dereg_mr timeout mr %p pd %p refcount %u\n",
283 mr
, mr
->mr
.pd
, atomic_read(&mr
->mr
.refcount
));
284 hfi1_get_mr(&mr
->mr
);
288 deinit_mregion(&mr
->mr
);
290 ib_umem_release(mr
->umem
);
297 * Allocate a memory region usable with the
298 * IB_WR_REG_MR send work request.
300 * Return the memory region on success, otherwise return an errno.
301 * FIXME: IB_WR_REG_MR is not supported
303 struct ib_mr
*hfi1_alloc_mr(struct ib_pd
*pd
,
304 enum ib_mr_type mr_type
,
309 if (mr_type
!= IB_MR_TYPE_MEM_REG
)
310 return ERR_PTR(-EINVAL
);
312 mr
= alloc_mr(max_num_sg
, pd
);
314 return (struct ib_mr
*)mr
;
320 * hfi1_alloc_fmr - allocate a fast memory region
321 * @pd: the protection domain for this memory region
322 * @mr_access_flags: access flags for this memory region
323 * @fmr_attr: fast memory region attributes
325 * Returns the memory region on success, otherwise returns an errno.
327 struct ib_fmr
*hfi1_alloc_fmr(struct ib_pd
*pd
, int mr_access_flags
,
328 struct ib_fmr_attr
*fmr_attr
)
330 struct hfi1_fmr
*fmr
;
335 /* Allocate struct plus pointers to first level page tables. */
336 m
= (fmr_attr
->max_pages
+ HFI1_SEGSZ
- 1) / HFI1_SEGSZ
;
337 fmr
= kzalloc(sizeof(*fmr
) + m
* sizeof(fmr
->mr
.map
[0]), GFP_KERNEL
);
341 rval
= init_mregion(&fmr
->mr
, pd
, fmr_attr
->max_pages
);
346 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
349 rval
= hfi1_alloc_lkey(&fmr
->mr
, 0);
352 fmr
->ibfmr
.rkey
= fmr
->mr
.lkey
;
353 fmr
->ibfmr
.lkey
= fmr
->mr
.lkey
;
355 * Resources are allocated but no valid mapping (RKEY can't be
358 fmr
->mr
.access_flags
= mr_access_flags
;
359 fmr
->mr
.max_segs
= fmr_attr
->max_pages
;
360 fmr
->mr
.page_shift
= fmr_attr
->page_shift
;
367 deinit_mregion(&fmr
->mr
);
375 * hfi1_map_phys_fmr - set up a fast memory region
376 * @ibmfr: the fast memory region to set up
377 * @page_list: the list of pages to associate with the fast memory region
378 * @list_len: the number of pages to associate with the fast memory region
379 * @iova: the virtual address of the start of the fast memory region
381 * This may be called from interrupt context.
384 int hfi1_map_phys_fmr(struct ib_fmr
*ibfmr
, u64
*page_list
,
385 int list_len
, u64 iova
)
387 struct hfi1_fmr
*fmr
= to_ifmr(ibfmr
);
388 struct hfi1_lkey_table
*rkt
;
394 i
= atomic_read(&fmr
->mr
.refcount
);
398 if (list_len
> fmr
->mr
.max_segs
) {
402 rkt
= &to_idev(ibfmr
->device
)->lk_table
;
403 spin_lock_irqsave(&rkt
->lock
, flags
);
404 fmr
->mr
.user_base
= iova
;
406 ps
= 1 << fmr
->mr
.page_shift
;
407 fmr
->mr
.length
= list_len
* ps
;
410 for (i
= 0; i
< list_len
; i
++) {
411 fmr
->mr
.map
[m
]->segs
[n
].vaddr
= (void *) page_list
[i
];
412 fmr
->mr
.map
[m
]->segs
[n
].length
= ps
;
413 if (++n
== HFI1_SEGSZ
) {
418 spin_unlock_irqrestore(&rkt
->lock
, flags
);
426 * hfi1_unmap_fmr - unmap fast memory regions
427 * @fmr_list: the list of fast memory regions to unmap
429 * Returns 0 on success.
431 int hfi1_unmap_fmr(struct list_head
*fmr_list
)
433 struct hfi1_fmr
*fmr
;
434 struct hfi1_lkey_table
*rkt
;
437 list_for_each_entry(fmr
, fmr_list
, ibfmr
.list
) {
438 rkt
= &to_idev(fmr
->ibfmr
.device
)->lk_table
;
439 spin_lock_irqsave(&rkt
->lock
, flags
);
440 fmr
->mr
.user_base
= 0;
443 spin_unlock_irqrestore(&rkt
->lock
, flags
);
449 * hfi1_dealloc_fmr - deallocate a fast memory region
450 * @ibfmr: the fast memory region to deallocate
452 * Returns 0 on success.
454 int hfi1_dealloc_fmr(struct ib_fmr
*ibfmr
)
456 struct hfi1_fmr
*fmr
= to_ifmr(ibfmr
);
458 unsigned long timeout
;
460 hfi1_free_lkey(&fmr
->mr
);
461 hfi1_put_mr(&fmr
->mr
); /* will set completion if last */
462 timeout
= wait_for_completion_timeout(&fmr
->mr
.comp
,
465 hfi1_get_mr(&fmr
->mr
);
469 deinit_mregion(&fmr
->mr
);
This page took 0.039954 seconds and 5 git commands to generate.