2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <christian.koenig@amd.com>
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <linux/mmu_notifier.h>
40 /* constant after initialisation */
41 struct amdgpu_device
*adev
;
43 struct mmu_notifier mn
;
45 /* only used on destruction */
46 struct work_struct work
;
48 /* protected by adev->mn_lock */
49 struct hlist_node node
;
51 /* objects protected by lock */
53 struct rb_root objects
;
56 struct amdgpu_mn_node
{
57 struct interval_tree_node it
;
62 * amdgpu_mn_destroy - destroy the rmn
64 * @work: previously sheduled work item
66 * Lazy destroys the notifier from a work item
68 static void amdgpu_mn_destroy(struct work_struct
*work
)
70 struct amdgpu_mn
*rmn
= container_of(work
, struct amdgpu_mn
, work
);
71 struct amdgpu_device
*adev
= rmn
->adev
;
72 struct amdgpu_mn_node
*node
, *next_node
;
73 struct amdgpu_bo
*bo
, *next_bo
;
75 mutex_lock(&adev
->mn_lock
);
76 mutex_lock(&rmn
->lock
);
78 rbtree_postorder_for_each_entry_safe(node
, next_node
, &rmn
->objects
,
81 interval_tree_remove(&node
->it
, &rmn
->objects
);
82 list_for_each_entry_safe(bo
, next_bo
, &node
->bos
, mn_list
) {
84 list_del_init(&bo
->mn_list
);
88 mutex_unlock(&rmn
->lock
);
89 mutex_unlock(&adev
->mn_lock
);
90 mmu_notifier_unregister(&rmn
->mn
, rmn
->mm
);
95 * amdgpu_mn_release - callback to notify about mm destruction
98 * @mn: the mm this callback is about
100 * Shedule a work item to lazy destroy our notifier.
102 static void amdgpu_mn_release(struct mmu_notifier
*mn
,
103 struct mm_struct
*mm
)
105 struct amdgpu_mn
*rmn
= container_of(mn
, struct amdgpu_mn
, mn
);
106 INIT_WORK(&rmn
->work
, amdgpu_mn_destroy
);
107 schedule_work(&rmn
->work
);
111 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
114 * @mn: the mm this callback is about
115 * @start: start of updated range
116 * @end: end of updated range
118 * We block for all BOs between start and end to be idle and
119 * unmap them by move them into system domain again.
121 static void amdgpu_mn_invalidate_range_start(struct mmu_notifier
*mn
,
122 struct mm_struct
*mm
,
126 struct amdgpu_mn
*rmn
= container_of(mn
, struct amdgpu_mn
, mn
);
127 struct interval_tree_node
*it
;
129 /* notification is exclusive, but interval is inclusive */
132 mutex_lock(&rmn
->lock
);
134 it
= interval_tree_iter_first(&rmn
->objects
, start
, end
);
136 struct amdgpu_mn_node
*node
;
137 struct amdgpu_bo
*bo
;
140 node
= container_of(it
, struct amdgpu_mn_node
, it
);
141 it
= interval_tree_iter_next(it
, start
, end
);
143 list_for_each_entry(bo
, &node
->bos
, mn_list
) {
145 if (!amdgpu_ttm_tt_affect_userptr(bo
->tbo
.ttm
, start
,
149 r
= amdgpu_bo_reserve(bo
, true);
151 DRM_ERROR("(%ld) failed to reserve user bo\n", r
);
155 r
= reservation_object_wait_timeout_rcu(bo
->tbo
.resv
,
156 true, false, MAX_SCHEDULE_TIMEOUT
);
158 DRM_ERROR("(%ld) failed to wait for user bo\n", r
);
160 amdgpu_ttm_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_CPU
);
161 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false);
163 DRM_ERROR("(%ld) failed to validate user bo\n", r
);
165 amdgpu_bo_unreserve(bo
);
169 mutex_unlock(&rmn
->lock
);
172 static const struct mmu_notifier_ops amdgpu_mn_ops
= {
173 .release
= amdgpu_mn_release
,
174 .invalidate_range_start
= amdgpu_mn_invalidate_range_start
,
178 * amdgpu_mn_get - create notifier context
180 * @adev: amdgpu device pointer
182 * Creates a notifier context for current->mm.
184 static struct amdgpu_mn
*amdgpu_mn_get(struct amdgpu_device
*adev
)
186 struct mm_struct
*mm
= current
->mm
;
187 struct amdgpu_mn
*rmn
;
190 down_write(&mm
->mmap_sem
);
191 mutex_lock(&adev
->mn_lock
);
193 hash_for_each_possible(adev
->mn_hash
, rmn
, node
, (unsigned long)mm
)
197 rmn
= kzalloc(sizeof(*rmn
), GFP_KERNEL
);
199 rmn
= ERR_PTR(-ENOMEM
);
205 rmn
->mn
.ops
= &amdgpu_mn_ops
;
206 mutex_init(&rmn
->lock
);
207 rmn
->objects
= RB_ROOT
;
209 r
= __mmu_notifier_register(&rmn
->mn
, mm
);
213 hash_add(adev
->mn_hash
, &rmn
->node
, (unsigned long)mm
);
216 mutex_unlock(&adev
->mn_lock
);
217 up_write(&mm
->mmap_sem
);
222 mutex_unlock(&adev
->mn_lock
);
223 up_write(&mm
->mmap_sem
);
230 * amdgpu_mn_register - register a BO for notifier updates
232 * @bo: amdgpu buffer object
233 * @addr: userptr addr we should monitor
235 * Registers an MMU notifier for the given BO at the specified address.
236 * Returns 0 on success, -ERRNO if anything goes wrong.
238 int amdgpu_mn_register(struct amdgpu_bo
*bo
, unsigned long addr
)
240 unsigned long end
= addr
+ amdgpu_bo_size(bo
) - 1;
241 struct amdgpu_device
*adev
= bo
->adev
;
242 struct amdgpu_mn
*rmn
;
243 struct amdgpu_mn_node
*node
= NULL
;
244 struct list_head bos
;
245 struct interval_tree_node
*it
;
247 rmn
= amdgpu_mn_get(adev
);
251 INIT_LIST_HEAD(&bos
);
253 mutex_lock(&rmn
->lock
);
255 while ((it
= interval_tree_iter_first(&rmn
->objects
, addr
, end
))) {
257 node
= container_of(it
, struct amdgpu_mn_node
, it
);
258 interval_tree_remove(&node
->it
, &rmn
->objects
);
259 addr
= min(it
->start
, addr
);
260 end
= max(it
->last
, end
);
261 list_splice(&node
->bos
, &bos
);
265 node
= kmalloc(sizeof(struct amdgpu_mn_node
), GFP_KERNEL
);
267 mutex_unlock(&rmn
->lock
);
274 node
->it
.start
= addr
;
276 INIT_LIST_HEAD(&node
->bos
);
277 list_splice(&bos
, &node
->bos
);
278 list_add(&bo
->mn_list
, &node
->bos
);
280 interval_tree_insert(&node
->it
, &rmn
->objects
);
282 mutex_unlock(&rmn
->lock
);
288 * amdgpu_mn_unregister - unregister a BO for notifier updates
290 * @bo: amdgpu buffer object
292 * Remove any registration of MMU notifier updates from the buffer object.
294 void amdgpu_mn_unregister(struct amdgpu_bo
*bo
)
296 struct amdgpu_device
*adev
= bo
->adev
;
297 struct amdgpu_mn
*rmn
;
298 struct list_head
*head
;
300 mutex_lock(&adev
->mn_lock
);
303 mutex_unlock(&adev
->mn_lock
);
307 mutex_lock(&rmn
->lock
);
308 /* save the next list entry for later */
309 head
= bo
->mn_list
.next
;
312 list_del(&bo
->mn_list
);
314 if (list_empty(head
)) {
315 struct amdgpu_mn_node
*node
;
316 node
= container_of(head
, struct amdgpu_mn_node
, bos
);
317 interval_tree_remove(&node
->it
, &rmn
->objects
);
321 mutex_unlock(&rmn
->lock
);
322 mutex_unlock(&adev
->mn_lock
);