2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <drm/i915_drm.h>
28 #include "i915_trace.h"
29 #include "intel_drv.h"
30 #include <linux/mmu_context.h>
31 #include <linux/mmu_notifier.h>
32 #include <linux/mempolicy.h>
33 #include <linux/swap.h>
35 struct i915_mm_struct
{
37 struct drm_i915_private
*i915
;
38 struct i915_mmu_notifier
*mn
;
39 struct hlist_node node
;
41 struct work_struct work
;
44 #if defined(CONFIG_MMU_NOTIFIER)
45 #include <linux/interval_tree.h>
47 struct i915_mmu_notifier
{
49 struct hlist_node node
;
50 struct mmu_notifier mn
;
51 struct rb_root objects
;
52 struct workqueue_struct
*wq
;
55 struct i915_mmu_object
{
56 struct i915_mmu_notifier
*mn
;
57 struct drm_i915_gem_object
*obj
;
58 struct interval_tree_node it
;
59 struct list_head link
;
60 struct work_struct work
;
64 static void wait_rendering(struct drm_i915_gem_object
*obj
)
66 unsigned long active
= __I915_BO_ACTIVE(obj
);
69 for_each_active(active
, idx
)
70 i915_gem_active_wait_unlocked(&obj
->last_read
[idx
],
74 static void cancel_userptr(struct work_struct
*work
)
76 struct i915_mmu_object
*mo
= container_of(work
, typeof(*mo
), work
);
77 struct drm_i915_gem_object
*obj
= mo
->obj
;
78 struct drm_device
*dev
= obj
->base
.dev
;
82 mutex_lock(&dev
->struct_mutex
);
83 /* Cancel any active worker and force us to re-evaluate gup */
84 obj
->userptr
.work
= NULL
;
86 if (obj
->pages
!= NULL
) {
87 /* We are inside a kthread context and can't be interrupted */
88 WARN_ON(i915_gem_object_unbind(obj
));
89 WARN_ON(i915_gem_object_put_pages(obj
));
92 i915_gem_object_put(obj
);
93 mutex_unlock(&dev
->struct_mutex
);
96 static void add_object(struct i915_mmu_object
*mo
)
101 interval_tree_insert(&mo
->it
, &mo
->mn
->objects
);
105 static void del_object(struct i915_mmu_object
*mo
)
110 interval_tree_remove(&mo
->it
, &mo
->mn
->objects
);
111 mo
->attached
= false;
114 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier
*_mn
,
115 struct mm_struct
*mm
,
119 struct i915_mmu_notifier
*mn
=
120 container_of(_mn
, struct i915_mmu_notifier
, mn
);
121 struct i915_mmu_object
*mo
;
122 struct interval_tree_node
*it
;
123 LIST_HEAD(cancelled
);
125 if (RB_EMPTY_ROOT(&mn
->objects
))
128 /* interval ranges are inclusive, but invalidate range is exclusive */
131 spin_lock(&mn
->lock
);
132 it
= interval_tree_iter_first(&mn
->objects
, start
, end
);
134 /* The mmu_object is released late when destroying the
135 * GEM object so it is entirely possible to gain a
136 * reference on an object in the process of being freed
137 * since our serialisation is via the spinlock and not
138 * the struct_mutex - and consequently use it after it
139 * is freed and then double free it. To prevent that
140 * use-after-free we only acquire a reference on the
141 * object if it is not in the process of being destroyed.
143 mo
= container_of(it
, struct i915_mmu_object
, it
);
144 if (kref_get_unless_zero(&mo
->obj
->base
.refcount
))
145 queue_work(mn
->wq
, &mo
->work
);
147 list_add(&mo
->link
, &cancelled
);
148 it
= interval_tree_iter_next(it
, start
, end
);
150 list_for_each_entry(mo
, &cancelled
, link
)
152 spin_unlock(&mn
->lock
);
154 flush_workqueue(mn
->wq
);
157 static const struct mmu_notifier_ops i915_gem_userptr_notifier
= {
158 .invalidate_range_start
= i915_gem_userptr_mn_invalidate_range_start
,
161 static struct i915_mmu_notifier
*
162 i915_mmu_notifier_create(struct mm_struct
*mm
)
164 struct i915_mmu_notifier
*mn
;
167 mn
= kmalloc(sizeof(*mn
), GFP_KERNEL
);
169 return ERR_PTR(-ENOMEM
);
171 spin_lock_init(&mn
->lock
);
172 mn
->mn
.ops
= &i915_gem_userptr_notifier
;
173 mn
->objects
= RB_ROOT
;
174 mn
->wq
= alloc_workqueue("i915-userptr-release", WQ_UNBOUND
, 0);
175 if (mn
->wq
== NULL
) {
177 return ERR_PTR(-ENOMEM
);
180 /* Protected by mmap_sem (write-lock) */
181 ret
= __mmu_notifier_register(&mn
->mn
, mm
);
183 destroy_workqueue(mn
->wq
);
192 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object
*obj
)
194 struct i915_mmu_object
*mo
;
196 mo
= obj
->userptr
.mmu_object
;
200 spin_lock(&mo
->mn
->lock
);
202 spin_unlock(&mo
->mn
->lock
);
205 obj
->userptr
.mmu_object
= NULL
;
208 static struct i915_mmu_notifier
*
209 i915_mmu_notifier_find(struct i915_mm_struct
*mm
)
211 struct i915_mmu_notifier
*mn
= mm
->mn
;
217 down_write(&mm
->mm
->mmap_sem
);
218 mutex_lock(&mm
->i915
->mm_lock
);
219 if ((mn
= mm
->mn
) == NULL
) {
220 mn
= i915_mmu_notifier_create(mm
->mm
);
224 mutex_unlock(&mm
->i915
->mm_lock
);
225 up_write(&mm
->mm
->mmap_sem
);
231 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object
*obj
,
234 struct i915_mmu_notifier
*mn
;
235 struct i915_mmu_object
*mo
;
237 if (flags
& I915_USERPTR_UNSYNCHRONIZED
)
238 return capable(CAP_SYS_ADMIN
) ? 0 : -EPERM
;
240 if (WARN_ON(obj
->userptr
.mm
== NULL
))
243 mn
= i915_mmu_notifier_find(obj
->userptr
.mm
);
247 mo
= kzalloc(sizeof(*mo
), GFP_KERNEL
);
253 mo
->it
.start
= obj
->userptr
.ptr
;
254 mo
->it
.last
= obj
->userptr
.ptr
+ obj
->base
.size
- 1;
255 INIT_WORK(&mo
->work
, cancel_userptr
);
257 obj
->userptr
.mmu_object
= mo
;
262 i915_mmu_notifier_free(struct i915_mmu_notifier
*mn
,
263 struct mm_struct
*mm
)
268 mmu_notifier_unregister(&mn
->mn
, mm
);
269 destroy_workqueue(mn
->wq
);
276 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object
*obj
)
281 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object
*obj
,
284 if ((flags
& I915_USERPTR_UNSYNCHRONIZED
) == 0)
287 if (!capable(CAP_SYS_ADMIN
))
294 i915_mmu_notifier_free(struct i915_mmu_notifier
*mn
,
295 struct mm_struct
*mm
)
301 static struct i915_mm_struct
*
302 __i915_mm_struct_find(struct drm_i915_private
*dev_priv
, struct mm_struct
*real
)
304 struct i915_mm_struct
*mm
;
306 /* Protected by dev_priv->mm_lock */
307 hash_for_each_possible(dev_priv
->mm_structs
, mm
, node
, (unsigned long)real
)
315 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object
*obj
)
317 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
318 struct i915_mm_struct
*mm
;
321 /* During release of the GEM object we hold the struct_mutex. This
322 * precludes us from calling mmput() at that time as that may be
323 * the last reference and so call exit_mmap(). exit_mmap() will
324 * attempt to reap the vma, and if we were holding a GTT mmap
325 * would then call drm_gem_vm_close() and attempt to reacquire
326 * the struct mutex. So in order to avoid that recursion, we have
327 * to defer releasing the mm reference until after we drop the
328 * struct_mutex, i.e. we need to schedule a worker to do the clean
331 mutex_lock(&dev_priv
->mm_lock
);
332 mm
= __i915_mm_struct_find(dev_priv
, current
->mm
);
334 mm
= kmalloc(sizeof(*mm
), GFP_KERNEL
);
340 kref_init(&mm
->kref
);
341 mm
->i915
= to_i915(obj
->base
.dev
);
343 mm
->mm
= current
->mm
;
344 atomic_inc(¤t
->mm
->mm_count
);
348 /* Protected by dev_priv->mm_lock */
349 hash_add(dev_priv
->mm_structs
,
350 &mm
->node
, (unsigned long)mm
->mm
);
354 obj
->userptr
.mm
= mm
;
356 mutex_unlock(&dev_priv
->mm_lock
);
361 __i915_mm_struct_free__worker(struct work_struct
*work
)
363 struct i915_mm_struct
*mm
= container_of(work
, typeof(*mm
), work
);
364 i915_mmu_notifier_free(mm
->mn
, mm
->mm
);
370 __i915_mm_struct_free(struct kref
*kref
)
372 struct i915_mm_struct
*mm
= container_of(kref
, typeof(*mm
), kref
);
374 /* Protected by dev_priv->mm_lock */
376 mutex_unlock(&mm
->i915
->mm_lock
);
378 INIT_WORK(&mm
->work
, __i915_mm_struct_free__worker
);
379 schedule_work(&mm
->work
);
383 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object
*obj
)
385 if (obj
->userptr
.mm
== NULL
)
388 kref_put_mutex(&obj
->userptr
.mm
->kref
,
389 __i915_mm_struct_free
,
390 &to_i915(obj
->base
.dev
)->mm_lock
);
391 obj
->userptr
.mm
= NULL
;
394 struct get_pages_work
{
395 struct work_struct work
;
396 struct drm_i915_gem_object
*obj
;
397 struct task_struct
*task
;
400 #if IS_ENABLED(CONFIG_SWIOTLB)
401 #define swiotlb_active() swiotlb_nr_tbl()
403 #define swiotlb_active() 0
407 st_set_pages(struct sg_table
**st
, struct page
**pvec
, int num_pages
)
409 struct scatterlist
*sg
;
412 *st
= kmalloc(sizeof(**st
), GFP_KERNEL
);
416 if (swiotlb_active()) {
417 ret
= sg_alloc_table(*st
, num_pages
, GFP_KERNEL
);
421 for_each_sg((*st
)->sgl
, sg
, num_pages
, n
)
422 sg_set_page(sg
, pvec
[n
], PAGE_SIZE
, 0);
424 ret
= sg_alloc_table_from_pages(*st
, pvec
, num_pages
,
425 0, num_pages
<< PAGE_SHIFT
,
440 __i915_gem_userptr_set_pages(struct drm_i915_gem_object
*obj
,
441 struct page
**pvec
, int num_pages
)
445 ret
= st_set_pages(&obj
->pages
, pvec
, num_pages
);
449 ret
= i915_gem_gtt_prepare_object(obj
);
451 sg_free_table(obj
->pages
);
460 __i915_gem_userptr_set_active(struct drm_i915_gem_object
*obj
,
465 /* During mm_invalidate_range we need to cancel any userptr that
466 * overlaps the range being invalidated. Doing so requires the
467 * struct_mutex, and that risks recursion. In order to cause
468 * recursion, the user must alias the userptr address space with
469 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
470 * to invalidate that mmaping, mm_invalidate_range is called with
471 * the userptr address *and* the struct_mutex held. To prevent that
472 * we set a flag under the i915_mmu_notifier spinlock to indicate
473 * whether this object is valid.
475 #if defined(CONFIG_MMU_NOTIFIER)
476 if (obj
->userptr
.mmu_object
== NULL
)
479 spin_lock(&obj
->userptr
.mmu_object
->mn
->lock
);
480 /* In order to serialise get_pages with an outstanding
481 * cancel_userptr, we must drop the struct_mutex and try again.
484 del_object(obj
->userptr
.mmu_object
);
485 else if (!work_pending(&obj
->userptr
.mmu_object
->work
))
486 add_object(obj
->userptr
.mmu_object
);
489 spin_unlock(&obj
->userptr
.mmu_object
->mn
->lock
);
496 __i915_gem_userptr_get_pages_worker(struct work_struct
*_work
)
498 struct get_pages_work
*work
= container_of(_work
, typeof(*work
), work
);
499 struct drm_i915_gem_object
*obj
= work
->obj
;
500 struct drm_device
*dev
= obj
->base
.dev
;
501 const int npages
= obj
->base
.size
>> PAGE_SHIFT
;
508 pvec
= drm_malloc_gfp(npages
, sizeof(struct page
*), GFP_TEMPORARY
);
510 struct mm_struct
*mm
= obj
->userptr
.mm
->mm
;
513 if (atomic_inc_not_zero(&mm
->mm_users
)) {
514 down_read(&mm
->mmap_sem
);
515 while (pinned
< npages
) {
516 ret
= get_user_pages_remote
518 obj
->userptr
.ptr
+ pinned
* PAGE_SIZE
,
520 !obj
->userptr
.read_only
, 0,
521 pvec
+ pinned
, NULL
);
527 up_read(&mm
->mmap_sem
);
532 mutex_lock(&dev
->struct_mutex
);
533 if (obj
->userptr
.work
== &work
->work
) {
534 if (pinned
== npages
) {
535 ret
= __i915_gem_userptr_set_pages(obj
, pvec
, npages
);
537 list_add_tail(&obj
->global_list
,
538 &to_i915(dev
)->mm
.unbound_list
);
539 obj
->get_page
.sg
= obj
->pages
->sgl
;
540 obj
->get_page
.last
= 0;
544 obj
->userptr
.work
= ERR_PTR(ret
);
547 obj
->userptr
.workers
--;
548 i915_gem_object_put(obj
);
549 mutex_unlock(&dev
->struct_mutex
);
551 release_pages(pvec
, pinned
, 0);
552 drm_free_large(pvec
);
554 put_task_struct(work
->task
);
559 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object
*obj
,
562 struct get_pages_work
*work
;
564 /* Spawn a worker so that we can acquire the
565 * user pages without holding our mutex. Access
566 * to the user pages requires mmap_sem, and we have
567 * a strict lock ordering of mmap_sem, struct_mutex -
568 * we already hold struct_mutex here and so cannot
569 * call gup without encountering a lock inversion.
571 * Userspace will keep on repeating the operation
572 * (thanks to EAGAIN) until either we hit the fast
573 * path or the worker completes. If the worker is
574 * cancelled or superseded, the task is still run
575 * but the results ignored. (This leads to
576 * complications that we may have a stray object
577 * refcount that we need to be wary of when
578 * checking for existing objects during creation.)
579 * If the worker encounters an error, it reports
580 * that error back to this function through
581 * obj->userptr.work = ERR_PTR.
583 if (obj
->userptr
.workers
>= I915_GEM_USERPTR_MAX_WORKERS
)
586 work
= kmalloc(sizeof(*work
), GFP_KERNEL
);
590 obj
->userptr
.work
= &work
->work
;
591 obj
->userptr
.workers
++;
593 work
->obj
= i915_gem_object_get(obj
);
595 work
->task
= current
;
596 get_task_struct(work
->task
);
598 INIT_WORK(&work
->work
, __i915_gem_userptr_get_pages_worker
);
599 schedule_work(&work
->work
);
606 i915_gem_userptr_get_pages(struct drm_i915_gem_object
*obj
)
608 const int num_pages
= obj
->base
.size
>> PAGE_SHIFT
;
613 /* If userspace should engineer that these pages are replaced in
614 * the vma between us binding this page into the GTT and completion
615 * of rendering... Their loss. If they change the mapping of their
616 * pages they need to create a new bo to point to the new vma.
618 * However, that still leaves open the possibility of the vma
619 * being copied upon fork. Which falls under the same userspace
620 * synchronisation issue as a regular bo, except that this time
621 * the process may not be expecting that a particular piece of
622 * memory is tied to the GPU.
624 * Fortunately, we can hook into the mmu_notifier in order to
625 * discard the page references prior to anything nasty happening
626 * to the vma (discard or cloning) which should prevent the more
627 * egregious cases from causing harm.
630 if (obj
->userptr
.work
) {
631 /* active flag should still be held for the pending work */
632 if (IS_ERR(obj
->userptr
.work
))
633 return PTR_ERR(obj
->userptr
.work
);
638 /* Let the mmu-notifier know that we have begun and need cancellation */
639 ret
= __i915_gem_userptr_set_active(obj
, true);
645 if (obj
->userptr
.mm
->mm
== current
->mm
) {
646 pvec
= drm_malloc_gfp(num_pages
, sizeof(struct page
*),
649 __i915_gem_userptr_set_active(obj
, false);
653 pinned
= __get_user_pages_fast(obj
->userptr
.ptr
, num_pages
,
654 !obj
->userptr
.read_only
, pvec
);
659 ret
= pinned
, pinned
= 0;
660 else if (pinned
< num_pages
)
661 ret
= __i915_gem_userptr_get_pages_schedule(obj
, &active
);
663 ret
= __i915_gem_userptr_set_pages(obj
, pvec
, num_pages
);
665 __i915_gem_userptr_set_active(obj
, active
);
666 release_pages(pvec
, pinned
, 0);
668 drm_free_large(pvec
);
673 i915_gem_userptr_put_pages(struct drm_i915_gem_object
*obj
)
675 struct sgt_iter sgt_iter
;
678 BUG_ON(obj
->userptr
.work
!= NULL
);
679 __i915_gem_userptr_set_active(obj
, false);
681 if (obj
->madv
!= I915_MADV_WILLNEED
)
684 i915_gem_gtt_finish_object(obj
);
686 for_each_sgt_page(page
, sgt_iter
, obj
->pages
) {
688 set_page_dirty(page
);
690 mark_page_accessed(page
);
695 sg_free_table(obj
->pages
);
700 i915_gem_userptr_release(struct drm_i915_gem_object
*obj
)
702 i915_gem_userptr_release__mmu_notifier(obj
);
703 i915_gem_userptr_release__mm_struct(obj
);
707 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object
*obj
)
709 if (obj
->userptr
.mmu_object
)
712 return i915_gem_userptr_init__mmu_notifier(obj
, 0);
715 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops
= {
716 .flags
= I915_GEM_OBJECT_HAS_STRUCT_PAGE
,
717 .get_pages
= i915_gem_userptr_get_pages
,
718 .put_pages
= i915_gem_userptr_put_pages
,
719 .dmabuf_export
= i915_gem_userptr_dmabuf_export
,
720 .release
= i915_gem_userptr_release
,
724 * Creates a new mm object that wraps some normal memory from the process
725 * context - user memory.
727 * We impose several restrictions upon the memory being mapped
729 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
730 * 2. It must be normal system memory, not a pointer into another map of IO
731 * space (e.g. it must not be a GTT mmapping of another object).
732 * 3. We only allow a bo as large as we could in theory map into the GTT,
733 * that is we limit the size to the total size of the GTT.
734 * 4. The bo is marked as being snoopable. The backing pages are left
735 * accessible directly by the CPU, but reads and writes by the GPU may
736 * incur the cost of a snoop (unless you have an LLC architecture).
738 * Synchronisation between multiple users and the GPU is left to userspace
739 * through the normal set-domain-ioctl. The kernel will enforce that the
740 * GPU relinquishes the VMA before it is returned back to the system
741 * i.e. upon free(), munmap() or process termination. However, the userspace
742 * malloc() library may not immediately relinquish the VMA after free() and
743 * instead reuse it whilst the GPU is still reading and writing to the VMA.
746 * Also note, that the object created here is not currently a "first class"
747 * object, in that several ioctls are banned. These are the CPU access
748 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
749 * direct access via your pointer rather than use those ioctls. Another
750 * restriction is that we do not allow userptr surfaces to be pinned to the
751 * hardware and so we reject any attempt to create a framebuffer out of a
754 * If you think this is a good interface to use to pass GPU memory between
755 * drivers, please use dma-buf instead. In fact, wherever possible use
759 i915_gem_userptr_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
761 struct drm_i915_gem_userptr
*args
= data
;
762 struct drm_i915_gem_object
*obj
;
766 if (!HAS_LLC(dev
) && !HAS_SNOOP(dev
)) {
767 /* We cannot support coherent userptr objects on hw without
768 * LLC and broken snooping.
773 if (args
->flags
& ~(I915_USERPTR_READ_ONLY
|
774 I915_USERPTR_UNSYNCHRONIZED
))
777 if (offset_in_page(args
->user_ptr
| args
->user_size
))
780 if (!access_ok(args
->flags
& I915_USERPTR_READ_ONLY
? VERIFY_READ
: VERIFY_WRITE
,
781 (char __user
*)(unsigned long)args
->user_ptr
, args
->user_size
))
784 if (args
->flags
& I915_USERPTR_READ_ONLY
) {
785 /* On almost all of the current hw, we cannot tell the GPU that a
786 * page is readonly, so this is just a placeholder in the uAPI.
791 obj
= i915_gem_object_alloc(dev
);
795 drm_gem_private_object_init(dev
, &obj
->base
, args
->user_size
);
796 i915_gem_object_init(obj
, &i915_gem_userptr_ops
);
797 obj
->cache_level
= I915_CACHE_LLC
;
798 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
799 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
801 obj
->userptr
.ptr
= args
->user_ptr
;
802 obj
->userptr
.read_only
= !!(args
->flags
& I915_USERPTR_READ_ONLY
);
804 /* And keep a pointer to the current->mm for resolving the user pages
805 * at binding. This means that we need to hook into the mmu_notifier
806 * in order to detect if the mmu is destroyed.
808 ret
= i915_gem_userptr_init__mm_struct(obj
);
810 ret
= i915_gem_userptr_init__mmu_notifier(obj
, args
->flags
);
812 ret
= drm_gem_handle_create(file
, &obj
->base
, &handle
);
814 /* drop reference from allocate - handle holds it now */
815 i915_gem_object_put_unlocked(obj
);
819 args
->handle
= handle
;
823 void i915_gem_init_userptr(struct drm_i915_private
*dev_priv
)
825 mutex_init(&dev_priv
->mm_lock
);
826 hash_init(dev_priv
->mm_structs
);