2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <christian.koenig@amd.com>
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <linux/mmu_notifier.h>
40 /* constant after initialisation */
41 struct amdgpu_device
*adev
;
43 struct mmu_notifier mn
;
45 /* only used on destruction */
46 struct work_struct work
;
48 /* protected by adev->mn_lock */
49 struct hlist_node node
;
51 /* objects protected by mm->mmap_sem */
52 struct rb_root objects
;
55 struct amdgpu_mn_node
{
56 struct interval_tree_node it
;
61 * amdgpu_mn_destroy - destroy the rmn
63 * @work: previously sheduled work item
65 * Lazy destroys the notifier from a work item
67 static void amdgpu_mn_destroy(struct work_struct
*work
)
69 struct amdgpu_mn
*rmn
= container_of(work
, struct amdgpu_mn
, work
);
70 struct amdgpu_device
*adev
= rmn
->adev
;
71 struct amdgpu_mn_node
*node
, *next_node
;
72 struct amdgpu_bo
*bo
, *next_bo
;
74 mutex_lock(&adev
->mn_lock
);
75 down_write(&rmn
->mm
->mmap_sem
);
77 rbtree_postorder_for_each_entry_safe(node
, next_node
, &rmn
->objects
,
79 list_for_each_entry_safe(bo
, next_bo
, &node
->bos
, mn_list
) {
81 list_del_init(&bo
->mn_list
);
85 up_write(&rmn
->mm
->mmap_sem
);
86 mutex_unlock(&adev
->mn_lock
);
87 mmu_notifier_unregister_no_release(&rmn
->mn
, rmn
->mm
);
92 * amdgpu_mn_release - callback to notify about mm destruction
95 * @mn: the mm this callback is about
97 * Shedule a work item to lazy destroy our notifier.
99 static void amdgpu_mn_release(struct mmu_notifier
*mn
,
100 struct mm_struct
*mm
)
102 struct amdgpu_mn
*rmn
= container_of(mn
, struct amdgpu_mn
, mn
);
103 INIT_WORK(&rmn
->work
, amdgpu_mn_destroy
);
104 schedule_work(&rmn
->work
);
108 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
111 * @mn: the mm this callback is about
112 * @start: start of updated range
113 * @end: end of updated range
115 * We block for all BOs between start and end to be idle and
116 * unmap them by move them into system domain again.
118 static void amdgpu_mn_invalidate_range_start(struct mmu_notifier
*mn
,
119 struct mm_struct
*mm
,
123 struct amdgpu_mn
*rmn
= container_of(mn
, struct amdgpu_mn
, mn
);
124 struct interval_tree_node
*it
;
126 /* notification is exclusive, but interval is inclusive */
129 it
= interval_tree_iter_first(&rmn
->objects
, start
, end
);
131 struct amdgpu_mn_node
*node
;
132 struct amdgpu_bo
*bo
;
135 node
= container_of(it
, struct amdgpu_mn_node
, it
);
136 it
= interval_tree_iter_next(it
, start
, end
);
138 list_for_each_entry(bo
, &node
->bos
, mn_list
) {
140 if (!amdgpu_ttm_tt_affect_userptr(bo
->tbo
.ttm
, start
,
144 r
= amdgpu_bo_reserve(bo
, true);
146 DRM_ERROR("(%ld) failed to reserve user bo\n", r
);
150 r
= reservation_object_wait_timeout_rcu(bo
->tbo
.resv
,
151 true, false, MAX_SCHEDULE_TIMEOUT
);
153 DRM_ERROR("(%ld) failed to wait for user bo\n", r
);
155 amdgpu_ttm_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_CPU
);
156 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false);
158 DRM_ERROR("(%ld) failed to validate user bo\n", r
);
160 amdgpu_bo_unreserve(bo
);
165 static const struct mmu_notifier_ops amdgpu_mn_ops
= {
166 .release
= amdgpu_mn_release
,
167 .invalidate_range_start
= amdgpu_mn_invalidate_range_start
,
171 * amdgpu_mn_get - create notifier context
173 * @adev: amdgpu device pointer
175 * Creates a notifier context for current->mm.
177 static struct amdgpu_mn
*amdgpu_mn_get(struct amdgpu_device
*adev
)
179 struct mm_struct
*mm
= current
->mm
;
180 struct amdgpu_mn
*rmn
;
183 mutex_lock(&adev
->mn_lock
);
184 down_write(&mm
->mmap_sem
);
186 hash_for_each_possible(adev
->mn_hash
, rmn
, node
, (unsigned long)mm
)
190 rmn
= kzalloc(sizeof(*rmn
), GFP_KERNEL
);
192 rmn
= ERR_PTR(-ENOMEM
);
198 rmn
->mn
.ops
= &amdgpu_mn_ops
;
199 rmn
->objects
= RB_ROOT
;
201 r
= __mmu_notifier_register(&rmn
->mn
, mm
);
205 hash_add(adev
->mn_hash
, &rmn
->node
, (unsigned long)mm
);
208 up_write(&mm
->mmap_sem
);
209 mutex_unlock(&adev
->mn_lock
);
214 up_write(&mm
->mmap_sem
);
215 mutex_unlock(&adev
->mn_lock
);
222 * amdgpu_mn_register - register a BO for notifier updates
224 * @bo: amdgpu buffer object
225 * @addr: userptr addr we should monitor
227 * Registers an MMU notifier for the given BO at the specified address.
228 * Returns 0 on success, -ERRNO if anything goes wrong.
230 int amdgpu_mn_register(struct amdgpu_bo
*bo
, unsigned long addr
)
232 unsigned long end
= addr
+ amdgpu_bo_size(bo
) - 1;
233 struct amdgpu_device
*adev
= bo
->adev
;
234 struct amdgpu_mn
*rmn
;
235 struct amdgpu_mn_node
*node
= NULL
;
236 struct list_head bos
;
237 struct interval_tree_node
*it
;
239 rmn
= amdgpu_mn_get(adev
);
243 INIT_LIST_HEAD(&bos
);
245 down_write(&rmn
->mm
->mmap_sem
);
247 while ((it
= interval_tree_iter_first(&rmn
->objects
, addr
, end
))) {
249 node
= container_of(it
, struct amdgpu_mn_node
, it
);
250 interval_tree_remove(&node
->it
, &rmn
->objects
);
251 addr
= min(it
->start
, addr
);
252 end
= max(it
->last
, end
);
253 list_splice(&node
->bos
, &bos
);
257 node
= kmalloc(sizeof(struct amdgpu_mn_node
), GFP_KERNEL
);
259 up_write(&rmn
->mm
->mmap_sem
);
266 node
->it
.start
= addr
;
268 INIT_LIST_HEAD(&node
->bos
);
269 list_splice(&bos
, &node
->bos
);
270 list_add(&bo
->mn_list
, &node
->bos
);
272 interval_tree_insert(&node
->it
, &rmn
->objects
);
274 up_write(&rmn
->mm
->mmap_sem
);
280 * amdgpu_mn_unregister - unregister a BO for notifier updates
282 * @bo: amdgpu buffer object
284 * Remove any registration of MMU notifier updates from the buffer object.
286 void amdgpu_mn_unregister(struct amdgpu_bo
*bo
)
288 struct amdgpu_device
*adev
= bo
->adev
;
289 struct amdgpu_mn
*rmn
;
290 struct list_head
*head
;
292 mutex_lock(&adev
->mn_lock
);
296 mutex_unlock(&adev
->mn_lock
);
300 down_write(&rmn
->mm
->mmap_sem
);
302 /* save the next list entry for later */
303 head
= bo
->mn_list
.next
;
306 list_del(&bo
->mn_list
);
308 if (list_empty(head
)) {
309 struct amdgpu_mn_node
*node
;
310 node
= container_of(head
, struct amdgpu_mn_node
, bos
);
311 interval_tree_remove(&node
->it
, &rmn
->objects
);
315 up_write(&rmn
->mm
->mmap_sem
);
316 mutex_unlock(&adev
->mn_lock
);