2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <christian.koenig@amd.com>
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <linux/mmu_notifier.h>
40 /* constant after initialisation */
41 struct amdgpu_device
*adev
;
43 struct mmu_notifier mn
;
45 /* only used on destruction */
46 struct work_struct work
;
48 /* protected by adev->mn_lock */
49 struct hlist_node node
;
51 /* objects protected by lock */
53 struct rb_root objects
;
56 struct amdgpu_mn_node
{
57 struct interval_tree_node it
;
62 * amdgpu_mn_destroy - destroy the rmn
64 * @work: previously sheduled work item
66 * Lazy destroys the notifier from a work item
68 static void amdgpu_mn_destroy(struct work_struct
*work
)
70 struct amdgpu_mn
*rmn
= container_of(work
, struct amdgpu_mn
, work
);
71 struct amdgpu_device
*adev
= rmn
->adev
;
72 struct amdgpu_mn_node
*node
, *next_node
;
73 struct amdgpu_bo
*bo
, *next_bo
;
75 mutex_lock(&adev
->mn_lock
);
76 mutex_lock(&rmn
->lock
);
78 rbtree_postorder_for_each_entry_safe(node
, next_node
, &rmn
->objects
,
80 list_for_each_entry_safe(bo
, next_bo
, &node
->bos
, mn_list
) {
82 list_del_init(&bo
->mn_list
);
86 mutex_unlock(&rmn
->lock
);
87 mutex_unlock(&adev
->mn_lock
);
88 mmu_notifier_unregister_no_release(&rmn
->mn
, rmn
->mm
);
93 * amdgpu_mn_release - callback to notify about mm destruction
96 * @mn: the mm this callback is about
98 * Shedule a work item to lazy destroy our notifier.
100 static void amdgpu_mn_release(struct mmu_notifier
*mn
,
101 struct mm_struct
*mm
)
103 struct amdgpu_mn
*rmn
= container_of(mn
, struct amdgpu_mn
, mn
);
104 INIT_WORK(&rmn
->work
, amdgpu_mn_destroy
);
105 schedule_work(&rmn
->work
);
109 * amdgpu_mn_invalidate_node - unmap all BOs of a node
111 * @node: the node with the BOs to unmap
113 * We block for all BOs and unmap them by move them
114 * into system domain again.
116 static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node
*node
,
120 struct amdgpu_bo
*bo
;
123 list_for_each_entry(bo
, &node
->bos
, mn_list
) {
125 if (!amdgpu_ttm_tt_affect_userptr(bo
->tbo
.ttm
, start
, end
))
128 r
= amdgpu_bo_reserve(bo
, true);
130 DRM_ERROR("(%ld) failed to reserve user bo\n", r
);
134 r
= reservation_object_wait_timeout_rcu(bo
->tbo
.resv
,
135 true, false, MAX_SCHEDULE_TIMEOUT
);
137 DRM_ERROR("(%ld) failed to wait for user bo\n", r
);
139 amdgpu_ttm_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_CPU
);
140 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false);
142 DRM_ERROR("(%ld) failed to validate user bo\n", r
);
144 amdgpu_bo_unreserve(bo
);
149 * amdgpu_mn_invalidate_page - callback to notify about mm change
152 * @mn: the mm this callback is about
153 * @address: address of invalidate page
155 * Invalidation of a single page. Blocks for all BOs mapping it
156 * and unmap them by move them into system domain again.
158 static void amdgpu_mn_invalidate_page(struct mmu_notifier
*mn
,
159 struct mm_struct
*mm
,
160 unsigned long address
)
162 struct amdgpu_mn
*rmn
= container_of(mn
, struct amdgpu_mn
, mn
);
163 struct interval_tree_node
*it
;
165 mutex_lock(&rmn
->lock
);
167 it
= interval_tree_iter_first(&rmn
->objects
, address
, address
);
169 struct amdgpu_mn_node
*node
;
171 node
= container_of(it
, struct amdgpu_mn_node
, it
);
172 amdgpu_mn_invalidate_node(node
, address
, address
);
175 mutex_unlock(&rmn
->lock
);
179 * amdgpu_mn_invalidate_range_start - callback to notify about mm change
182 * @mn: the mm this callback is about
183 * @start: start of updated range
184 * @end: end of updated range
186 * We block for all BOs between start and end to be idle and
187 * unmap them by move them into system domain again.
189 static void amdgpu_mn_invalidate_range_start(struct mmu_notifier
*mn
,
190 struct mm_struct
*mm
,
194 struct amdgpu_mn
*rmn
= container_of(mn
, struct amdgpu_mn
, mn
);
195 struct interval_tree_node
*it
;
197 /* notification is exclusive, but interval is inclusive */
200 mutex_lock(&rmn
->lock
);
202 it
= interval_tree_iter_first(&rmn
->objects
, start
, end
);
204 struct amdgpu_mn_node
*node
;
206 node
= container_of(it
, struct amdgpu_mn_node
, it
);
207 it
= interval_tree_iter_next(it
, start
, end
);
209 amdgpu_mn_invalidate_node(node
, start
, end
);
212 mutex_unlock(&rmn
->lock
);
215 static const struct mmu_notifier_ops amdgpu_mn_ops
= {
216 .release
= amdgpu_mn_release
,
217 .invalidate_page
= amdgpu_mn_invalidate_page
,
218 .invalidate_range_start
= amdgpu_mn_invalidate_range_start
,
222 * amdgpu_mn_get - create notifier context
224 * @adev: amdgpu device pointer
226 * Creates a notifier context for current->mm.
228 static struct amdgpu_mn
*amdgpu_mn_get(struct amdgpu_device
*adev
)
230 struct mm_struct
*mm
= current
->mm
;
231 struct amdgpu_mn
*rmn
;
234 mutex_lock(&adev
->mn_lock
);
235 if (down_write_killable(&mm
->mmap_sem
)) {
236 mutex_unlock(&adev
->mn_lock
);
237 return ERR_PTR(-EINTR
);
240 hash_for_each_possible(adev
->mn_hash
, rmn
, node
, (unsigned long)mm
)
244 rmn
= kzalloc(sizeof(*rmn
), GFP_KERNEL
);
246 rmn
= ERR_PTR(-ENOMEM
);
252 rmn
->mn
.ops
= &amdgpu_mn_ops
;
253 mutex_init(&rmn
->lock
);
254 rmn
->objects
= RB_ROOT
;
256 r
= __mmu_notifier_register(&rmn
->mn
, mm
);
260 hash_add(adev
->mn_hash
, &rmn
->node
, (unsigned long)mm
);
263 up_write(&mm
->mmap_sem
);
264 mutex_unlock(&adev
->mn_lock
);
269 up_write(&mm
->mmap_sem
);
270 mutex_unlock(&adev
->mn_lock
);
277 * amdgpu_mn_register - register a BO for notifier updates
279 * @bo: amdgpu buffer object
280 * @addr: userptr addr we should monitor
282 * Registers an MMU notifier for the given BO at the specified address.
283 * Returns 0 on success, -ERRNO if anything goes wrong.
285 int amdgpu_mn_register(struct amdgpu_bo
*bo
, unsigned long addr
)
287 unsigned long end
= addr
+ amdgpu_bo_size(bo
) - 1;
288 struct amdgpu_device
*adev
= bo
->adev
;
289 struct amdgpu_mn
*rmn
;
290 struct amdgpu_mn_node
*node
= NULL
;
291 struct list_head bos
;
292 struct interval_tree_node
*it
;
294 rmn
= amdgpu_mn_get(adev
);
298 INIT_LIST_HEAD(&bos
);
300 mutex_lock(&rmn
->lock
);
302 while ((it
= interval_tree_iter_first(&rmn
->objects
, addr
, end
))) {
304 node
= container_of(it
, struct amdgpu_mn_node
, it
);
305 interval_tree_remove(&node
->it
, &rmn
->objects
);
306 addr
= min(it
->start
, addr
);
307 end
= max(it
->last
, end
);
308 list_splice(&node
->bos
, &bos
);
312 node
= kmalloc(sizeof(struct amdgpu_mn_node
), GFP_KERNEL
);
314 mutex_unlock(&rmn
->lock
);
321 node
->it
.start
= addr
;
323 INIT_LIST_HEAD(&node
->bos
);
324 list_splice(&bos
, &node
->bos
);
325 list_add(&bo
->mn_list
, &node
->bos
);
327 interval_tree_insert(&node
->it
, &rmn
->objects
);
329 mutex_unlock(&rmn
->lock
);
335 * amdgpu_mn_unregister - unregister a BO for notifier updates
337 * @bo: amdgpu buffer object
339 * Remove any registration of MMU notifier updates from the buffer object.
341 void amdgpu_mn_unregister(struct amdgpu_bo
*bo
)
343 struct amdgpu_device
*adev
= bo
->adev
;
344 struct amdgpu_mn
*rmn
;
345 struct list_head
*head
;
347 mutex_lock(&adev
->mn_lock
);
351 mutex_unlock(&adev
->mn_lock
);
355 mutex_lock(&rmn
->lock
);
357 /* save the next list entry for later */
358 head
= bo
->mn_list
.next
;
361 list_del(&bo
->mn_list
);
363 if (list_empty(head
)) {
364 struct amdgpu_mn_node
*node
;
365 node
= container_of(head
, struct amdgpu_mn_node
, bos
);
366 interval_tree_remove(&node
->it
, &rmn
->objects
);
370 mutex_unlock(&rmn
->lock
);
371 mutex_unlock(&adev
->mn_lock
);