Merge remote-tracking branch 'lightnvm/for-next'
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_userptr.c
1 /*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <drm/drmP.h>
26 #include <drm/i915_drm.h>
27 #include "i915_drv.h"
28 #include "i915_trace.h"
29 #include "intel_drv.h"
30 #include <linux/mmu_context.h>
31 #include <linux/mmu_notifier.h>
32 #include <linux/mempolicy.h>
33 #include <linux/swap.h>
34
35 struct i915_mm_struct {
36 struct mm_struct *mm;
37 struct drm_i915_private *i915;
38 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
40 struct kref kref;
41 struct work_struct work;
42 };
43
44 #if defined(CONFIG_MMU_NOTIFIER)
45 #include <linux/interval_tree.h>
46
47 struct i915_mmu_notifier {
48 spinlock_t lock;
49 struct hlist_node node;
50 struct mmu_notifier mn;
51 struct rb_root objects;
52 struct workqueue_struct *wq;
53 };
54
55 struct i915_mmu_object {
56 struct i915_mmu_notifier *mn;
57 struct drm_i915_gem_object *obj;
58 struct interval_tree_node it;
59 struct list_head link;
60 struct work_struct work;
61 bool attached;
62 };
63
64 static void wait_rendering(struct drm_i915_gem_object *obj)
65 {
66 unsigned long active = __I915_BO_ACTIVE(obj);
67 int idx;
68
69 for_each_active(active, idx)
70 i915_gem_active_wait_unlocked(&obj->last_read[idx],
71 0, NULL, NULL);
72 }
73
74 static void cancel_userptr(struct work_struct *work)
75 {
76 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
77 struct drm_i915_gem_object *obj = mo->obj;
78 struct drm_device *dev = obj->base.dev;
79
80 wait_rendering(obj);
81
82 mutex_lock(&dev->struct_mutex);
83 /* Cancel any active worker and force us to re-evaluate gup */
84 obj->userptr.work = NULL;
85
86 if (obj->pages != NULL) {
87 /* We are inside a kthread context and can't be interrupted */
88 WARN_ON(i915_gem_object_unbind(obj));
89 WARN_ON(i915_gem_object_put_pages(obj));
90 }
91
92 i915_gem_object_put(obj);
93 mutex_unlock(&dev->struct_mutex);
94 }
95
96 static void add_object(struct i915_mmu_object *mo)
97 {
98 if (mo->attached)
99 return;
100
101 interval_tree_insert(&mo->it, &mo->mn->objects);
102 mo->attached = true;
103 }
104
105 static void del_object(struct i915_mmu_object *mo)
106 {
107 if (!mo->attached)
108 return;
109
110 interval_tree_remove(&mo->it, &mo->mn->objects);
111 mo->attached = false;
112 }
113
114 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
115 struct mm_struct *mm,
116 unsigned long start,
117 unsigned long end)
118 {
119 struct i915_mmu_notifier *mn =
120 container_of(_mn, struct i915_mmu_notifier, mn);
121 struct i915_mmu_object *mo;
122 struct interval_tree_node *it;
123 LIST_HEAD(cancelled);
124
125 if (RB_EMPTY_ROOT(&mn->objects))
126 return;
127
128 /* interval ranges are inclusive, but invalidate range is exclusive */
129 end--;
130
131 spin_lock(&mn->lock);
132 it = interval_tree_iter_first(&mn->objects, start, end);
133 while (it) {
134 /* The mmu_object is released late when destroying the
135 * GEM object so it is entirely possible to gain a
136 * reference on an object in the process of being freed
137 * since our serialisation is via the spinlock and not
138 * the struct_mutex - and consequently use it after it
139 * is freed and then double free it. To prevent that
140 * use-after-free we only acquire a reference on the
141 * object if it is not in the process of being destroyed.
142 */
143 mo = container_of(it, struct i915_mmu_object, it);
144 if (kref_get_unless_zero(&mo->obj->base.refcount))
145 queue_work(mn->wq, &mo->work);
146
147 list_add(&mo->link, &cancelled);
148 it = interval_tree_iter_next(it, start, end);
149 }
150 list_for_each_entry(mo, &cancelled, link)
151 del_object(mo);
152 spin_unlock(&mn->lock);
153
154 flush_workqueue(mn->wq);
155 }
156
157 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
158 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
159 };
160
161 static struct i915_mmu_notifier *
162 i915_mmu_notifier_create(struct mm_struct *mm)
163 {
164 struct i915_mmu_notifier *mn;
165 int ret;
166
167 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
168 if (mn == NULL)
169 return ERR_PTR(-ENOMEM);
170
171 spin_lock_init(&mn->lock);
172 mn->mn.ops = &i915_gem_userptr_notifier;
173 mn->objects = RB_ROOT;
174 mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
175 if (mn->wq == NULL) {
176 kfree(mn);
177 return ERR_PTR(-ENOMEM);
178 }
179
180 /* Protected by mmap_sem (write-lock) */
181 ret = __mmu_notifier_register(&mn->mn, mm);
182 if (ret) {
183 destroy_workqueue(mn->wq);
184 kfree(mn);
185 return ERR_PTR(ret);
186 }
187
188 return mn;
189 }
190
191 static void
192 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
193 {
194 struct i915_mmu_object *mo;
195
196 mo = obj->userptr.mmu_object;
197 if (mo == NULL)
198 return;
199
200 spin_lock(&mo->mn->lock);
201 del_object(mo);
202 spin_unlock(&mo->mn->lock);
203 kfree(mo);
204
205 obj->userptr.mmu_object = NULL;
206 }
207
208 static struct i915_mmu_notifier *
209 i915_mmu_notifier_find(struct i915_mm_struct *mm)
210 {
211 struct i915_mmu_notifier *mn = mm->mn;
212
213 mn = mm->mn;
214 if (mn)
215 return mn;
216
217 down_write(&mm->mm->mmap_sem);
218 mutex_lock(&mm->i915->mm_lock);
219 if ((mn = mm->mn) == NULL) {
220 mn = i915_mmu_notifier_create(mm->mm);
221 if (!IS_ERR(mn))
222 mm->mn = mn;
223 }
224 mutex_unlock(&mm->i915->mm_lock);
225 up_write(&mm->mm->mmap_sem);
226
227 return mn;
228 }
229
230 static int
231 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
232 unsigned flags)
233 {
234 struct i915_mmu_notifier *mn;
235 struct i915_mmu_object *mo;
236
237 if (flags & I915_USERPTR_UNSYNCHRONIZED)
238 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
239
240 if (WARN_ON(obj->userptr.mm == NULL))
241 return -EINVAL;
242
243 mn = i915_mmu_notifier_find(obj->userptr.mm);
244 if (IS_ERR(mn))
245 return PTR_ERR(mn);
246
247 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
248 if (mo == NULL)
249 return -ENOMEM;
250
251 mo->mn = mn;
252 mo->obj = obj;
253 mo->it.start = obj->userptr.ptr;
254 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
255 INIT_WORK(&mo->work, cancel_userptr);
256
257 obj->userptr.mmu_object = mo;
258 return 0;
259 }
260
261 static void
262 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
263 struct mm_struct *mm)
264 {
265 if (mn == NULL)
266 return;
267
268 mmu_notifier_unregister(&mn->mn, mm);
269 destroy_workqueue(mn->wq);
270 kfree(mn);
271 }
272
273 #else
274
275 static void
276 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
277 {
278 }
279
280 static int
281 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
282 unsigned flags)
283 {
284 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
285 return -ENODEV;
286
287 if (!capable(CAP_SYS_ADMIN))
288 return -EPERM;
289
290 return 0;
291 }
292
293 static void
294 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
295 struct mm_struct *mm)
296 {
297 }
298
299 #endif
300
301 static struct i915_mm_struct *
302 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
303 {
304 struct i915_mm_struct *mm;
305
306 /* Protected by dev_priv->mm_lock */
307 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
308 if (mm->mm == real)
309 return mm;
310
311 return NULL;
312 }
313
314 static int
315 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
316 {
317 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
318 struct i915_mm_struct *mm;
319 int ret = 0;
320
321 /* During release of the GEM object we hold the struct_mutex. This
322 * precludes us from calling mmput() at that time as that may be
323 * the last reference and so call exit_mmap(). exit_mmap() will
324 * attempt to reap the vma, and if we were holding a GTT mmap
325 * would then call drm_gem_vm_close() and attempt to reacquire
326 * the struct mutex. So in order to avoid that recursion, we have
327 * to defer releasing the mm reference until after we drop the
328 * struct_mutex, i.e. we need to schedule a worker to do the clean
329 * up.
330 */
331 mutex_lock(&dev_priv->mm_lock);
332 mm = __i915_mm_struct_find(dev_priv, current->mm);
333 if (mm == NULL) {
334 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
335 if (mm == NULL) {
336 ret = -ENOMEM;
337 goto out;
338 }
339
340 kref_init(&mm->kref);
341 mm->i915 = to_i915(obj->base.dev);
342
343 mm->mm = current->mm;
344 atomic_inc(&current->mm->mm_count);
345
346 mm->mn = NULL;
347
348 /* Protected by dev_priv->mm_lock */
349 hash_add(dev_priv->mm_structs,
350 &mm->node, (unsigned long)mm->mm);
351 } else
352 kref_get(&mm->kref);
353
354 obj->userptr.mm = mm;
355 out:
356 mutex_unlock(&dev_priv->mm_lock);
357 return ret;
358 }
359
360 static void
361 __i915_mm_struct_free__worker(struct work_struct *work)
362 {
363 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
364 i915_mmu_notifier_free(mm->mn, mm->mm);
365 mmdrop(mm->mm);
366 kfree(mm);
367 }
368
369 static void
370 __i915_mm_struct_free(struct kref *kref)
371 {
372 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
373
374 /* Protected by dev_priv->mm_lock */
375 hash_del(&mm->node);
376 mutex_unlock(&mm->i915->mm_lock);
377
378 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
379 schedule_work(&mm->work);
380 }
381
382 static void
383 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
384 {
385 if (obj->userptr.mm == NULL)
386 return;
387
388 kref_put_mutex(&obj->userptr.mm->kref,
389 __i915_mm_struct_free,
390 &to_i915(obj->base.dev)->mm_lock);
391 obj->userptr.mm = NULL;
392 }
393
394 struct get_pages_work {
395 struct work_struct work;
396 struct drm_i915_gem_object *obj;
397 struct task_struct *task;
398 };
399
400 #if IS_ENABLED(CONFIG_SWIOTLB)
401 #define swiotlb_active() swiotlb_nr_tbl()
402 #else
403 #define swiotlb_active() 0
404 #endif
405
406 static int
407 st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
408 {
409 struct scatterlist *sg;
410 int ret, n;
411
412 *st = kmalloc(sizeof(**st), GFP_KERNEL);
413 if (*st == NULL)
414 return -ENOMEM;
415
416 if (swiotlb_active()) {
417 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
418 if (ret)
419 goto err;
420
421 for_each_sg((*st)->sgl, sg, num_pages, n)
422 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
423 } else {
424 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
425 0, num_pages << PAGE_SHIFT,
426 GFP_KERNEL);
427 if (ret)
428 goto err;
429 }
430
431 return 0;
432
433 err:
434 kfree(*st);
435 *st = NULL;
436 return ret;
437 }
438
439 static int
440 __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
441 struct page **pvec, int num_pages)
442 {
443 int ret;
444
445 ret = st_set_pages(&obj->pages, pvec, num_pages);
446 if (ret)
447 return ret;
448
449 ret = i915_gem_gtt_prepare_object(obj);
450 if (ret) {
451 sg_free_table(obj->pages);
452 kfree(obj->pages);
453 obj->pages = NULL;
454 }
455
456 return ret;
457 }
458
459 static int
460 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
461 bool value)
462 {
463 int ret = 0;
464
465 /* During mm_invalidate_range we need to cancel any userptr that
466 * overlaps the range being invalidated. Doing so requires the
467 * struct_mutex, and that risks recursion. In order to cause
468 * recursion, the user must alias the userptr address space with
469 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
470 * to invalidate that mmaping, mm_invalidate_range is called with
471 * the userptr address *and* the struct_mutex held. To prevent that
472 * we set a flag under the i915_mmu_notifier spinlock to indicate
473 * whether this object is valid.
474 */
475 #if defined(CONFIG_MMU_NOTIFIER)
476 if (obj->userptr.mmu_object == NULL)
477 return 0;
478
479 spin_lock(&obj->userptr.mmu_object->mn->lock);
480 /* In order to serialise get_pages with an outstanding
481 * cancel_userptr, we must drop the struct_mutex and try again.
482 */
483 if (!value)
484 del_object(obj->userptr.mmu_object);
485 else if (!work_pending(&obj->userptr.mmu_object->work))
486 add_object(obj->userptr.mmu_object);
487 else
488 ret = -EAGAIN;
489 spin_unlock(&obj->userptr.mmu_object->mn->lock);
490 #endif
491
492 return ret;
493 }
494
495 static void
496 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
497 {
498 struct get_pages_work *work = container_of(_work, typeof(*work), work);
499 struct drm_i915_gem_object *obj = work->obj;
500 struct drm_device *dev = obj->base.dev;
501 const int npages = obj->base.size >> PAGE_SHIFT;
502 struct page **pvec;
503 int pinned, ret;
504
505 ret = -ENOMEM;
506 pinned = 0;
507
508 pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
509 if (pvec != NULL) {
510 struct mm_struct *mm = obj->userptr.mm->mm;
511
512 ret = -EFAULT;
513 if (atomic_inc_not_zero(&mm->mm_users)) {
514 down_read(&mm->mmap_sem);
515 while (pinned < npages) {
516 ret = get_user_pages_remote
517 (work->task, mm,
518 obj->userptr.ptr + pinned * PAGE_SIZE,
519 npages - pinned,
520 !obj->userptr.read_only, 0,
521 pvec + pinned, NULL);
522 if (ret < 0)
523 break;
524
525 pinned += ret;
526 }
527 up_read(&mm->mmap_sem);
528 mmput(mm);
529 }
530 }
531
532 mutex_lock(&dev->struct_mutex);
533 if (obj->userptr.work == &work->work) {
534 if (pinned == npages) {
535 ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
536 if (ret == 0) {
537 list_add_tail(&obj->global_list,
538 &to_i915(dev)->mm.unbound_list);
539 obj->get_page.sg = obj->pages->sgl;
540 obj->get_page.last = 0;
541 pinned = 0;
542 }
543 }
544 obj->userptr.work = ERR_PTR(ret);
545 }
546
547 obj->userptr.workers--;
548 i915_gem_object_put(obj);
549 mutex_unlock(&dev->struct_mutex);
550
551 release_pages(pvec, pinned, 0);
552 drm_free_large(pvec);
553
554 put_task_struct(work->task);
555 kfree(work);
556 }
557
558 static int
559 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
560 bool *active)
561 {
562 struct get_pages_work *work;
563
564 /* Spawn a worker so that we can acquire the
565 * user pages without holding our mutex. Access
566 * to the user pages requires mmap_sem, and we have
567 * a strict lock ordering of mmap_sem, struct_mutex -
568 * we already hold struct_mutex here and so cannot
569 * call gup without encountering a lock inversion.
570 *
571 * Userspace will keep on repeating the operation
572 * (thanks to EAGAIN) until either we hit the fast
573 * path or the worker completes. If the worker is
574 * cancelled or superseded, the task is still run
575 * but the results ignored. (This leads to
576 * complications that we may have a stray object
577 * refcount that we need to be wary of when
578 * checking for existing objects during creation.)
579 * If the worker encounters an error, it reports
580 * that error back to this function through
581 * obj->userptr.work = ERR_PTR.
582 */
583 if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
584 return -EAGAIN;
585
586 work = kmalloc(sizeof(*work), GFP_KERNEL);
587 if (work == NULL)
588 return -ENOMEM;
589
590 obj->userptr.work = &work->work;
591 obj->userptr.workers++;
592
593 work->obj = i915_gem_object_get(obj);
594
595 work->task = current;
596 get_task_struct(work->task);
597
598 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
599 schedule_work(&work->work);
600
601 *active = true;
602 return -EAGAIN;
603 }
604
605 static int
606 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
607 {
608 const int num_pages = obj->base.size >> PAGE_SHIFT;
609 struct page **pvec;
610 int pinned, ret;
611 bool active;
612
613 /* If userspace should engineer that these pages are replaced in
614 * the vma between us binding this page into the GTT and completion
615 * of rendering... Their loss. If they change the mapping of their
616 * pages they need to create a new bo to point to the new vma.
617 *
618 * However, that still leaves open the possibility of the vma
619 * being copied upon fork. Which falls under the same userspace
620 * synchronisation issue as a regular bo, except that this time
621 * the process may not be expecting that a particular piece of
622 * memory is tied to the GPU.
623 *
624 * Fortunately, we can hook into the mmu_notifier in order to
625 * discard the page references prior to anything nasty happening
626 * to the vma (discard or cloning) which should prevent the more
627 * egregious cases from causing harm.
628 */
629
630 if (obj->userptr.work) {
631 /* active flag should still be held for the pending work */
632 if (IS_ERR(obj->userptr.work))
633 return PTR_ERR(obj->userptr.work);
634 else
635 return -EAGAIN;
636 }
637
638 /* Let the mmu-notifier know that we have begun and need cancellation */
639 ret = __i915_gem_userptr_set_active(obj, true);
640 if (ret)
641 return ret;
642
643 pvec = NULL;
644 pinned = 0;
645 if (obj->userptr.mm->mm == current->mm) {
646 pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
647 GFP_TEMPORARY);
648 if (pvec == NULL) {
649 __i915_gem_userptr_set_active(obj, false);
650 return -ENOMEM;
651 }
652
653 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
654 !obj->userptr.read_only, pvec);
655 }
656
657 active = false;
658 if (pinned < 0)
659 ret = pinned, pinned = 0;
660 else if (pinned < num_pages)
661 ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
662 else
663 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
664 if (ret) {
665 __i915_gem_userptr_set_active(obj, active);
666 release_pages(pvec, pinned, 0);
667 }
668 drm_free_large(pvec);
669 return ret;
670 }
671
672 static void
673 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
674 {
675 struct sgt_iter sgt_iter;
676 struct page *page;
677
678 BUG_ON(obj->userptr.work != NULL);
679 __i915_gem_userptr_set_active(obj, false);
680
681 if (obj->madv != I915_MADV_WILLNEED)
682 obj->dirty = 0;
683
684 i915_gem_gtt_finish_object(obj);
685
686 for_each_sgt_page(page, sgt_iter, obj->pages) {
687 if (obj->dirty)
688 set_page_dirty(page);
689
690 mark_page_accessed(page);
691 put_page(page);
692 }
693 obj->dirty = 0;
694
695 sg_free_table(obj->pages);
696 kfree(obj->pages);
697 }
698
699 static void
700 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
701 {
702 i915_gem_userptr_release__mmu_notifier(obj);
703 i915_gem_userptr_release__mm_struct(obj);
704 }
705
706 static int
707 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
708 {
709 if (obj->userptr.mmu_object)
710 return 0;
711
712 return i915_gem_userptr_init__mmu_notifier(obj, 0);
713 }
714
715 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
716 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
717 .get_pages = i915_gem_userptr_get_pages,
718 .put_pages = i915_gem_userptr_put_pages,
719 .dmabuf_export = i915_gem_userptr_dmabuf_export,
720 .release = i915_gem_userptr_release,
721 };
722
723 /**
724 * Creates a new mm object that wraps some normal memory from the process
725 * context - user memory.
726 *
727 * We impose several restrictions upon the memory being mapped
728 * into the GPU.
729 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
730 * 2. It must be normal system memory, not a pointer into another map of IO
731 * space (e.g. it must not be a GTT mmapping of another object).
732 * 3. We only allow a bo as large as we could in theory map into the GTT,
733 * that is we limit the size to the total size of the GTT.
734 * 4. The bo is marked as being snoopable. The backing pages are left
735 * accessible directly by the CPU, but reads and writes by the GPU may
736 * incur the cost of a snoop (unless you have an LLC architecture).
737 *
738 * Synchronisation between multiple users and the GPU is left to userspace
739 * through the normal set-domain-ioctl. The kernel will enforce that the
740 * GPU relinquishes the VMA before it is returned back to the system
741 * i.e. upon free(), munmap() or process termination. However, the userspace
742 * malloc() library may not immediately relinquish the VMA after free() and
743 * instead reuse it whilst the GPU is still reading and writing to the VMA.
744 * Caveat emptor.
745 *
746 * Also note, that the object created here is not currently a "first class"
747 * object, in that several ioctls are banned. These are the CPU access
748 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
749 * direct access via your pointer rather than use those ioctls. Another
750 * restriction is that we do not allow userptr surfaces to be pinned to the
751 * hardware and so we reject any attempt to create a framebuffer out of a
752 * userptr.
753 *
754 * If you think this is a good interface to use to pass GPU memory between
755 * drivers, please use dma-buf instead. In fact, wherever possible use
756 * dma-buf instead.
757 */
758 int
759 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
760 {
761 struct drm_i915_gem_userptr *args = data;
762 struct drm_i915_gem_object *obj;
763 int ret;
764 u32 handle;
765
766 if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
767 /* We cannot support coherent userptr objects on hw without
768 * LLC and broken snooping.
769 */
770 return -ENODEV;
771 }
772
773 if (args->flags & ~(I915_USERPTR_READ_ONLY |
774 I915_USERPTR_UNSYNCHRONIZED))
775 return -EINVAL;
776
777 if (offset_in_page(args->user_ptr | args->user_size))
778 return -EINVAL;
779
780 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
781 (char __user *)(unsigned long)args->user_ptr, args->user_size))
782 return -EFAULT;
783
784 if (args->flags & I915_USERPTR_READ_ONLY) {
785 /* On almost all of the current hw, we cannot tell the GPU that a
786 * page is readonly, so this is just a placeholder in the uAPI.
787 */
788 return -ENODEV;
789 }
790
791 obj = i915_gem_object_alloc(dev);
792 if (obj == NULL)
793 return -ENOMEM;
794
795 drm_gem_private_object_init(dev, &obj->base, args->user_size);
796 i915_gem_object_init(obj, &i915_gem_userptr_ops);
797 obj->cache_level = I915_CACHE_LLC;
798 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
799 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
800
801 obj->userptr.ptr = args->user_ptr;
802 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
803
804 /* And keep a pointer to the current->mm for resolving the user pages
805 * at binding. This means that we need to hook into the mmu_notifier
806 * in order to detect if the mmu is destroyed.
807 */
808 ret = i915_gem_userptr_init__mm_struct(obj);
809 if (ret == 0)
810 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
811 if (ret == 0)
812 ret = drm_gem_handle_create(file, &obj->base, &handle);
813
814 /* drop reference from allocate - handle holds it now */
815 i915_gem_object_put_unlocked(obj);
816 if (ret)
817 return ret;
818
819 args->handle = handle;
820 return 0;
821 }
822
823 void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
824 {
825 mutex_init(&dev_priv->mm_lock);
826 hash_init(dev_priv->mm_structs);
827 }
This page took 0.047028 seconds and 5 git commands to generate.