Commit | Line | Data |
---|---|---|
5cc9ed4b CW |
1 | /* |
2 | * Copyright © 2012-2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
b588c92b ML |
25 | #include <drm/drmP.h> |
26 | #include <drm/i915_drm.h> | |
5cc9ed4b CW |
27 | #include "i915_drv.h" |
28 | #include "i915_trace.h" | |
29 | #include "intel_drv.h" | |
30 | #include <linux/mmu_context.h> | |
31 | #include <linux/mmu_notifier.h> | |
32 | #include <linux/mempolicy.h> | |
33 | #include <linux/swap.h> | |
34 | ||
ad46cb53 CW |
35 | struct i915_mm_struct { |
36 | struct mm_struct *mm; | |
f470b190 | 37 | struct drm_i915_private *i915; |
ad46cb53 CW |
38 | struct i915_mmu_notifier *mn; |
39 | struct hlist_node node; | |
40 | struct kref kref; | |
41 | struct work_struct work; | |
42 | }; | |
43 | ||
5cc9ed4b CW |
44 | #if defined(CONFIG_MMU_NOTIFIER) |
45 | #include <linux/interval_tree.h> | |
46 | ||
47 | struct i915_mmu_notifier { | |
48 | spinlock_t lock; | |
49 | struct hlist_node node; | |
50 | struct mmu_notifier mn; | |
51 | struct rb_root objects; | |
393afc2c | 52 | struct workqueue_struct *wq; |
5cc9ed4b CW |
53 | }; |
54 | ||
55 | struct i915_mmu_object { | |
ad46cb53 | 56 | struct i915_mmu_notifier *mn; |
768e159f | 57 | struct drm_i915_gem_object *obj; |
5cc9ed4b | 58 | struct interval_tree_node it; |
ec8b0dd5 | 59 | struct list_head link; |
380996aa | 60 | struct work_struct work; |
768e159f | 61 | bool attached; |
5cc9ed4b CW |
62 | }; |
63 | ||
393afc2c CW |
64 | static void wait_rendering(struct drm_i915_gem_object *obj) |
65 | { | |
66 | struct drm_device *dev = obj->base.dev; | |
67 | struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; | |
393afc2c CW |
68 | int i, n; |
69 | ||
70 | if (!obj->active) | |
71 | return; | |
72 | ||
73 | n = 0; | |
74 | for (i = 0; i < I915_NUM_ENGINES; i++) { | |
75 | struct drm_i915_gem_request *req; | |
76 | ||
77 | req = obj->last_read_req[i]; | |
78 | if (req == NULL) | |
79 | continue; | |
80 | ||
81 | requests[n++] = i915_gem_request_reference(req); | |
82 | } | |
83 | ||
393afc2c CW |
84 | mutex_unlock(&dev->struct_mutex); |
85 | ||
86 | for (i = 0; i < n; i++) | |
299259a3 | 87 | __i915_wait_request(requests[i], false, NULL, NULL); |
393afc2c CW |
88 | |
89 | mutex_lock(&dev->struct_mutex); | |
90 | ||
91 | for (i = 0; i < n; i++) | |
92 | i915_gem_request_unreference(requests[i]); | |
93 | } | |
94 | ||
768e159f | 95 | static void cancel_userptr(struct work_struct *work) |
ec8b0dd5 | 96 | { |
380996aa CW |
97 | struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); |
98 | struct drm_i915_gem_object *obj = mo->obj; | |
ec8b0dd5 | 99 | struct drm_device *dev = obj->base.dev; |
ec8b0dd5 CW |
100 | |
101 | mutex_lock(&dev->struct_mutex); | |
102 | /* Cancel any active worker and force us to re-evaluate gup */ | |
103 | obj->userptr.work = NULL; | |
104 | ||
105 | if (obj->pages != NULL) { | |
106 | struct drm_i915_private *dev_priv = to_i915(dev); | |
107 | struct i915_vma *vma, *tmp; | |
108 | bool was_interruptible; | |
109 | ||
393afc2c CW |
110 | wait_rendering(obj); |
111 | ||
ec8b0dd5 CW |
112 | was_interruptible = dev_priv->mm.interruptible; |
113 | dev_priv->mm.interruptible = false; | |
114 | ||
f4457ae7 CW |
115 | list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) |
116 | WARN_ON(i915_vma_unbind(vma)); | |
ec8b0dd5 CW |
117 | WARN_ON(i915_gem_object_put_pages(obj)); |
118 | ||
119 | dev_priv->mm.interruptible = was_interruptible; | |
120 | } | |
121 | ||
ec8b0dd5 CW |
122 | drm_gem_object_unreference(&obj->base); |
123 | mutex_unlock(&dev->struct_mutex); | |
ec8b0dd5 CW |
124 | } |
125 | ||
768e159f | 126 | static void add_object(struct i915_mmu_object *mo) |
ec8b0dd5 | 127 | { |
768e159f CW |
128 | if (mo->attached) |
129 | return; | |
ec8b0dd5 | 130 | |
768e159f CW |
131 | interval_tree_insert(&mo->it, &mo->mn->objects); |
132 | mo->attached = true; | |
133 | } | |
134 | ||
135 | static void del_object(struct i915_mmu_object *mo) | |
136 | { | |
137 | if (!mo->attached) | |
138 | return; | |
139 | ||
140 | interval_tree_remove(&mo->it, &mo->mn->objects); | |
141 | mo->attached = false; | |
ec8b0dd5 CW |
142 | } |
143 | ||
5cc9ed4b CW |
144 | static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, |
145 | struct mm_struct *mm, | |
146 | unsigned long start, | |
147 | unsigned long end) | |
148 | { | |
380996aa CW |
149 | struct i915_mmu_notifier *mn = |
150 | container_of(_mn, struct i915_mmu_notifier, mn); | |
151 | struct i915_mmu_object *mo; | |
768e159f CW |
152 | struct interval_tree_node *it; |
153 | LIST_HEAD(cancelled); | |
154 | ||
155 | if (RB_EMPTY_ROOT(&mn->objects)) | |
156 | return; | |
380996aa CW |
157 | |
158 | /* interval ranges are inclusive, but invalidate range is exclusive */ | |
159 | end--; | |
160 | ||
161 | spin_lock(&mn->lock); | |
768e159f CW |
162 | it = interval_tree_iter_first(&mn->objects, start, end); |
163 | while (it) { | |
164 | /* The mmu_object is released late when destroying the | |
165 | * GEM object so it is entirely possible to gain a | |
166 | * reference on an object in the process of being freed | |
167 | * since our serialisation is via the spinlock and not | |
168 | * the struct_mutex - and consequently use it after it | |
169 | * is freed and then double free it. To prevent that | |
170 | * use-after-free we only acquire a reference on the | |
171 | * object if it is not in the process of being destroyed. | |
172 | */ | |
173 | mo = container_of(it, struct i915_mmu_object, it); | |
174 | if (kref_get_unless_zero(&mo->obj->base.refcount)) | |
393afc2c | 175 | queue_work(mn->wq, &mo->work); |
5cc9ed4b | 176 | |
768e159f CW |
177 | list_add(&mo->link, &cancelled); |
178 | it = interval_tree_iter_next(it, start, end); | |
5cc9ed4b | 179 | } |
768e159f CW |
180 | list_for_each_entry(mo, &cancelled, link) |
181 | del_object(mo); | |
380996aa | 182 | spin_unlock(&mn->lock); |
393afc2c CW |
183 | |
184 | flush_workqueue(mn->wq); | |
5cc9ed4b CW |
185 | } |
186 | ||
187 | static const struct mmu_notifier_ops i915_gem_userptr_notifier = { | |
188 | .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start, | |
189 | }; | |
190 | ||
191 | static struct i915_mmu_notifier * | |
ad46cb53 | 192 | i915_mmu_notifier_create(struct mm_struct *mm) |
5cc9ed4b | 193 | { |
ad46cb53 | 194 | struct i915_mmu_notifier *mn; |
5cc9ed4b CW |
195 | int ret; |
196 | ||
ad46cb53 CW |
197 | mn = kmalloc(sizeof(*mn), GFP_KERNEL); |
198 | if (mn == NULL) | |
5cc9ed4b CW |
199 | return ERR_PTR(-ENOMEM); |
200 | ||
ad46cb53 CW |
201 | spin_lock_init(&mn->lock); |
202 | mn->mn.ops = &i915_gem_userptr_notifier; | |
203 | mn->objects = RB_ROOT; | |
393afc2c CW |
204 | mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0); |
205 | if (mn->wq == NULL) { | |
206 | kfree(mn); | |
207 | return ERR_PTR(-ENOMEM); | |
208 | } | |
ad46cb53 CW |
209 | |
210 | /* Protected by mmap_sem (write-lock) */ | |
211 | ret = __mmu_notifier_register(&mn->mn, mm); | |
5cc9ed4b | 212 | if (ret) { |
393afc2c | 213 | destroy_workqueue(mn->wq); |
ad46cb53 | 214 | kfree(mn); |
5cc9ed4b CW |
215 | return ERR_PTR(ret); |
216 | } | |
217 | ||
ad46cb53 | 218 | return mn; |
5cc9ed4b CW |
219 | } |
220 | ||
5cc9ed4b CW |
221 | static void |
222 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
223 | { | |
ad46cb53 | 224 | struct i915_mmu_object *mo; |
5cc9ed4b | 225 | |
ad46cb53 CW |
226 | mo = obj->userptr.mmu_object; |
227 | if (mo == NULL) | |
5cc9ed4b CW |
228 | return; |
229 | ||
768e159f CW |
230 | spin_lock(&mo->mn->lock); |
231 | del_object(mo); | |
232 | spin_unlock(&mo->mn->lock); | |
ad46cb53 CW |
233 | kfree(mo); |
234 | ||
235 | obj->userptr.mmu_object = NULL; | |
236 | } | |
237 | ||
238 | static struct i915_mmu_notifier * | |
239 | i915_mmu_notifier_find(struct i915_mm_struct *mm) | |
240 | { | |
e9681366 CW |
241 | struct i915_mmu_notifier *mn = mm->mn; |
242 | ||
243 | mn = mm->mn; | |
244 | if (mn) | |
245 | return mn; | |
246 | ||
247 | down_write(&mm->mm->mmap_sem); | |
f470b190 | 248 | mutex_lock(&mm->i915->mm_lock); |
e9681366 CW |
249 | if ((mn = mm->mn) == NULL) { |
250 | mn = i915_mmu_notifier_create(mm->mm); | |
251 | if (!IS_ERR(mn)) | |
252 | mm->mn = mn; | |
ad46cb53 | 253 | } |
f470b190 | 254 | mutex_unlock(&mm->i915->mm_lock); |
e9681366 CW |
255 | up_write(&mm->mm->mmap_sem); |
256 | ||
257 | return mn; | |
5cc9ed4b CW |
258 | } |
259 | ||
260 | static int | |
261 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
262 | unsigned flags) | |
263 | { | |
ad46cb53 CW |
264 | struct i915_mmu_notifier *mn; |
265 | struct i915_mmu_object *mo; | |
5cc9ed4b CW |
266 | |
267 | if (flags & I915_USERPTR_UNSYNCHRONIZED) | |
268 | return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; | |
269 | ||
ad46cb53 CW |
270 | if (WARN_ON(obj->userptr.mm == NULL)) |
271 | return -EINVAL; | |
5cc9ed4b | 272 | |
ad46cb53 CW |
273 | mn = i915_mmu_notifier_find(obj->userptr.mm); |
274 | if (IS_ERR(mn)) | |
275 | return PTR_ERR(mn); | |
5cc9ed4b | 276 | |
ad46cb53 CW |
277 | mo = kzalloc(sizeof(*mo), GFP_KERNEL); |
278 | if (mo == NULL) | |
279 | return -ENOMEM; | |
5cc9ed4b | 280 | |
ad46cb53 | 281 | mo->mn = mn; |
ad46cb53 | 282 | mo->obj = obj; |
768e159f CW |
283 | mo->it.start = obj->userptr.ptr; |
284 | mo->it.last = obj->userptr.ptr + obj->base.size - 1; | |
285 | INIT_WORK(&mo->work, cancel_userptr); | |
ad46cb53 CW |
286 | |
287 | obj->userptr.mmu_object = mo; | |
5cc9ed4b | 288 | return 0; |
ad46cb53 CW |
289 | } |
290 | ||
291 | static void | |
292 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
293 | struct mm_struct *mm) | |
294 | { | |
295 | if (mn == NULL) | |
296 | return; | |
5cc9ed4b | 297 | |
ad46cb53 | 298 | mmu_notifier_unregister(&mn->mn, mm); |
393afc2c | 299 | destroy_workqueue(mn->wq); |
5cc9ed4b | 300 | kfree(mn); |
5cc9ed4b CW |
301 | } |
302 | ||
303 | #else | |
304 | ||
305 | static void | |
306 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | |
307 | { | |
308 | } | |
309 | ||
310 | static int | |
311 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |
312 | unsigned flags) | |
313 | { | |
314 | if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) | |
315 | return -ENODEV; | |
316 | ||
317 | if (!capable(CAP_SYS_ADMIN)) | |
318 | return -EPERM; | |
319 | ||
320 | return 0; | |
321 | } | |
ad46cb53 CW |
322 | |
323 | static void | |
324 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | |
325 | struct mm_struct *mm) | |
326 | { | |
327 | } | |
328 | ||
5cc9ed4b CW |
329 | #endif |
330 | ||
ad46cb53 CW |
331 | static struct i915_mm_struct * |
332 | __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) | |
333 | { | |
334 | struct i915_mm_struct *mm; | |
335 | ||
336 | /* Protected by dev_priv->mm_lock */ | |
337 | hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) | |
338 | if (mm->mm == real) | |
339 | return mm; | |
340 | ||
341 | return NULL; | |
342 | } | |
343 | ||
344 | static int | |
345 | i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) | |
346 | { | |
347 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | |
348 | struct i915_mm_struct *mm; | |
349 | int ret = 0; | |
350 | ||
351 | /* During release of the GEM object we hold the struct_mutex. This | |
352 | * precludes us from calling mmput() at that time as that may be | |
353 | * the last reference and so call exit_mmap(). exit_mmap() will | |
354 | * attempt to reap the vma, and if we were holding a GTT mmap | |
355 | * would then call drm_gem_vm_close() and attempt to reacquire | |
356 | * the struct mutex. So in order to avoid that recursion, we have | |
357 | * to defer releasing the mm reference until after we drop the | |
358 | * struct_mutex, i.e. we need to schedule a worker to do the clean | |
359 | * up. | |
360 | */ | |
361 | mutex_lock(&dev_priv->mm_lock); | |
362 | mm = __i915_mm_struct_find(dev_priv, current->mm); | |
363 | if (mm == NULL) { | |
364 | mm = kmalloc(sizeof(*mm), GFP_KERNEL); | |
365 | if (mm == NULL) { | |
366 | ret = -ENOMEM; | |
367 | goto out; | |
368 | } | |
369 | ||
370 | kref_init(&mm->kref); | |
f470b190 | 371 | mm->i915 = to_i915(obj->base.dev); |
ad46cb53 CW |
372 | |
373 | mm->mm = current->mm; | |
374 | atomic_inc(¤t->mm->mm_count); | |
375 | ||
376 | mm->mn = NULL; | |
377 | ||
378 | /* Protected by dev_priv->mm_lock */ | |
379 | hash_add(dev_priv->mm_structs, | |
380 | &mm->node, (unsigned long)mm->mm); | |
381 | } else | |
382 | kref_get(&mm->kref); | |
383 | ||
384 | obj->userptr.mm = mm; | |
385 | out: | |
386 | mutex_unlock(&dev_priv->mm_lock); | |
387 | return ret; | |
388 | } | |
389 | ||
390 | static void | |
391 | __i915_mm_struct_free__worker(struct work_struct *work) | |
392 | { | |
393 | struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); | |
394 | i915_mmu_notifier_free(mm->mn, mm->mm); | |
395 | mmdrop(mm->mm); | |
396 | kfree(mm); | |
397 | } | |
398 | ||
399 | static void | |
400 | __i915_mm_struct_free(struct kref *kref) | |
401 | { | |
402 | struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); | |
403 | ||
404 | /* Protected by dev_priv->mm_lock */ | |
405 | hash_del(&mm->node); | |
f470b190 | 406 | mutex_unlock(&mm->i915->mm_lock); |
ad46cb53 CW |
407 | |
408 | INIT_WORK(&mm->work, __i915_mm_struct_free__worker); | |
409 | schedule_work(&mm->work); | |
410 | } | |
411 | ||
412 | static void | |
413 | i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) | |
414 | { | |
415 | if (obj->userptr.mm == NULL) | |
416 | return; | |
417 | ||
418 | kref_put_mutex(&obj->userptr.mm->kref, | |
419 | __i915_mm_struct_free, | |
420 | &to_i915(obj->base.dev)->mm_lock); | |
421 | obj->userptr.mm = NULL; | |
422 | } | |
423 | ||
5cc9ed4b CW |
424 | struct get_pages_work { |
425 | struct work_struct work; | |
426 | struct drm_i915_gem_object *obj; | |
427 | struct task_struct *task; | |
428 | }; | |
429 | ||
5cc9ed4b CW |
430 | #if IS_ENABLED(CONFIG_SWIOTLB) |
431 | #define swiotlb_active() swiotlb_nr_tbl() | |
432 | #else | |
433 | #define swiotlb_active() 0 | |
434 | #endif | |
435 | ||
436 | static int | |
437 | st_set_pages(struct sg_table **st, struct page **pvec, int num_pages) | |
438 | { | |
439 | struct scatterlist *sg; | |
440 | int ret, n; | |
441 | ||
442 | *st = kmalloc(sizeof(**st), GFP_KERNEL); | |
443 | if (*st == NULL) | |
444 | return -ENOMEM; | |
445 | ||
446 | if (swiotlb_active()) { | |
447 | ret = sg_alloc_table(*st, num_pages, GFP_KERNEL); | |
448 | if (ret) | |
449 | goto err; | |
450 | ||
451 | for_each_sg((*st)->sgl, sg, num_pages, n) | |
452 | sg_set_page(sg, pvec[n], PAGE_SIZE, 0); | |
453 | } else { | |
454 | ret = sg_alloc_table_from_pages(*st, pvec, num_pages, | |
455 | 0, num_pages << PAGE_SHIFT, | |
456 | GFP_KERNEL); | |
457 | if (ret) | |
458 | goto err; | |
459 | } | |
460 | ||
461 | return 0; | |
462 | ||
463 | err: | |
464 | kfree(*st); | |
465 | *st = NULL; | |
466 | return ret; | |
467 | } | |
468 | ||
e2273302 ID |
469 | static int |
470 | __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, | |
471 | struct page **pvec, int num_pages) | |
472 | { | |
473 | int ret; | |
474 | ||
475 | ret = st_set_pages(&obj->pages, pvec, num_pages); | |
476 | if (ret) | |
477 | return ret; | |
478 | ||
479 | ret = i915_gem_gtt_prepare_object(obj); | |
480 | if (ret) { | |
481 | sg_free_table(obj->pages); | |
482 | kfree(obj->pages); | |
483 | obj->pages = NULL; | |
484 | } | |
485 | ||
486 | return ret; | |
487 | } | |
488 | ||
380996aa | 489 | static int |
e4b946bf CW |
490 | __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, |
491 | bool value) | |
492 | { | |
380996aa CW |
493 | int ret = 0; |
494 | ||
e4b946bf CW |
495 | /* During mm_invalidate_range we need to cancel any userptr that |
496 | * overlaps the range being invalidated. Doing so requires the | |
497 | * struct_mutex, and that risks recursion. In order to cause | |
498 | * recursion, the user must alias the userptr address space with | |
499 | * a GTT mmapping (possible with a MAP_FIXED) - then when we have | |
500 | * to invalidate that mmaping, mm_invalidate_range is called with | |
501 | * the userptr address *and* the struct_mutex held. To prevent that | |
502 | * we set a flag under the i915_mmu_notifier spinlock to indicate | |
503 | * whether this object is valid. | |
504 | */ | |
505 | #if defined(CONFIG_MMU_NOTIFIER) | |
506 | if (obj->userptr.mmu_object == NULL) | |
380996aa | 507 | return 0; |
e4b946bf CW |
508 | |
509 | spin_lock(&obj->userptr.mmu_object->mn->lock); | |
380996aa CW |
510 | /* In order to serialise get_pages with an outstanding |
511 | * cancel_userptr, we must drop the struct_mutex and try again. | |
512 | */ | |
768e159f CW |
513 | if (!value) |
514 | del_object(obj->userptr.mmu_object); | |
515 | else if (!work_pending(&obj->userptr.mmu_object->work)) | |
516 | add_object(obj->userptr.mmu_object); | |
380996aa CW |
517 | else |
518 | ret = -EAGAIN; | |
e4b946bf CW |
519 | spin_unlock(&obj->userptr.mmu_object->mn->lock); |
520 | #endif | |
380996aa CW |
521 | |
522 | return ret; | |
e4b946bf CW |
523 | } |
524 | ||
5cc9ed4b CW |
525 | static void |
526 | __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |
527 | { | |
528 | struct get_pages_work *work = container_of(_work, typeof(*work), work); | |
529 | struct drm_i915_gem_object *obj = work->obj; | |
530 | struct drm_device *dev = obj->base.dev; | |
68d6c840 | 531 | const int npages = obj->base.size >> PAGE_SHIFT; |
5cc9ed4b CW |
532 | struct page **pvec; |
533 | int pinned, ret; | |
534 | ||
535 | ret = -ENOMEM; | |
536 | pinned = 0; | |
537 | ||
f2a85e19 | 538 | pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); |
5cc9ed4b | 539 | if (pvec != NULL) { |
ad46cb53 | 540 | struct mm_struct *mm = obj->userptr.mm->mm; |
5cc9ed4b | 541 | |
40313f0c CW |
542 | ret = -EFAULT; |
543 | if (atomic_inc_not_zero(&mm->mm_users)) { | |
544 | down_read(&mm->mmap_sem); | |
545 | while (pinned < npages) { | |
546 | ret = get_user_pages_remote | |
547 | (work->task, mm, | |
548 | obj->userptr.ptr + pinned * PAGE_SIZE, | |
549 | npages - pinned, | |
550 | !obj->userptr.read_only, 0, | |
551 | pvec + pinned, NULL); | |
552 | if (ret < 0) | |
553 | break; | |
554 | ||
555 | pinned += ret; | |
556 | } | |
557 | up_read(&mm->mmap_sem); | |
558 | mmput(mm); | |
5cc9ed4b | 559 | } |
5cc9ed4b CW |
560 | } |
561 | ||
562 | mutex_lock(&dev->struct_mutex); | |
68d6c840 CW |
563 | if (obj->userptr.work == &work->work) { |
564 | if (pinned == npages) { | |
565 | ret = __i915_gem_userptr_set_pages(obj, pvec, npages); | |
566 | if (ret == 0) { | |
567 | list_add_tail(&obj->global_list, | |
568 | &to_i915(dev)->mm.unbound_list); | |
569 | obj->get_page.sg = obj->pages->sgl; | |
570 | obj->get_page.last = 0; | |
571 | pinned = 0; | |
572 | } | |
5cc9ed4b | 573 | } |
68d6c840 | 574 | obj->userptr.work = ERR_PTR(ret); |
e4b946bf CW |
575 | if (ret) |
576 | __i915_gem_userptr_set_active(obj, false); | |
5cc9ed4b CW |
577 | } |
578 | ||
5cc9ed4b CW |
579 | obj->userptr.workers--; |
580 | drm_gem_object_unreference(&obj->base); | |
581 | mutex_unlock(&dev->struct_mutex); | |
582 | ||
583 | release_pages(pvec, pinned, 0); | |
584 | drm_free_large(pvec); | |
585 | ||
586 | put_task_struct(work->task); | |
587 | kfree(work); | |
588 | } | |
589 | ||
e4b946bf CW |
590 | static int |
591 | __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj, | |
592 | bool *active) | |
593 | { | |
594 | struct get_pages_work *work; | |
595 | ||
596 | /* Spawn a worker so that we can acquire the | |
597 | * user pages without holding our mutex. Access | |
598 | * to the user pages requires mmap_sem, and we have | |
599 | * a strict lock ordering of mmap_sem, struct_mutex - | |
600 | * we already hold struct_mutex here and so cannot | |
601 | * call gup without encountering a lock inversion. | |
602 | * | |
603 | * Userspace will keep on repeating the operation | |
604 | * (thanks to EAGAIN) until either we hit the fast | |
605 | * path or the worker completes. If the worker is | |
606 | * cancelled or superseded, the task is still run | |
607 | * but the results ignored. (This leads to | |
608 | * complications that we may have a stray object | |
609 | * refcount that we need to be wary of when | |
610 | * checking for existing objects during creation.) | |
611 | * If the worker encounters an error, it reports | |
612 | * that error back to this function through | |
613 | * obj->userptr.work = ERR_PTR. | |
614 | */ | |
615 | if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS) | |
616 | return -EAGAIN; | |
617 | ||
618 | work = kmalloc(sizeof(*work), GFP_KERNEL); | |
619 | if (work == NULL) | |
620 | return -ENOMEM; | |
621 | ||
622 | obj->userptr.work = &work->work; | |
623 | obj->userptr.workers++; | |
624 | ||
625 | work->obj = obj; | |
626 | drm_gem_object_reference(&obj->base); | |
627 | ||
628 | work->task = current; | |
629 | get_task_struct(work->task); | |
630 | ||
631 | INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); | |
632 | schedule_work(&work->work); | |
633 | ||
634 | *active = true; | |
635 | return -EAGAIN; | |
636 | } | |
637 | ||
5cc9ed4b CW |
638 | static int |
639 | i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) | |
640 | { | |
641 | const int num_pages = obj->base.size >> PAGE_SHIFT; | |
642 | struct page **pvec; | |
643 | int pinned, ret; | |
e4b946bf | 644 | bool active; |
5cc9ed4b CW |
645 | |
646 | /* If userspace should engineer that these pages are replaced in | |
647 | * the vma between us binding this page into the GTT and completion | |
648 | * of rendering... Their loss. If they change the mapping of their | |
649 | * pages they need to create a new bo to point to the new vma. | |
650 | * | |
651 | * However, that still leaves open the possibility of the vma | |
652 | * being copied upon fork. Which falls under the same userspace | |
653 | * synchronisation issue as a regular bo, except that this time | |
654 | * the process may not be expecting that a particular piece of | |
655 | * memory is tied to the GPU. | |
656 | * | |
657 | * Fortunately, we can hook into the mmu_notifier in order to | |
658 | * discard the page references prior to anything nasty happening | |
659 | * to the vma (discard or cloning) which should prevent the more | |
660 | * egregious cases from causing harm. | |
661 | */ | |
e4b946bf CW |
662 | if (IS_ERR(obj->userptr.work)) { |
663 | /* active flag will have been dropped already by the worker */ | |
664 | ret = PTR_ERR(obj->userptr.work); | |
665 | obj->userptr.work = NULL; | |
666 | return ret; | |
667 | } | |
668 | if (obj->userptr.work) | |
669 | /* active flag should still be held for the pending work */ | |
670 | return -EAGAIN; | |
671 | ||
672 | /* Let the mmu-notifier know that we have begun and need cancellation */ | |
380996aa CW |
673 | ret = __i915_gem_userptr_set_active(obj, true); |
674 | if (ret) | |
675 | return ret; | |
5cc9ed4b CW |
676 | |
677 | pvec = NULL; | |
678 | pinned = 0; | |
ad46cb53 | 679 | if (obj->userptr.mm->mm == current->mm) { |
f2a85e19 CW |
680 | pvec = drm_malloc_gfp(num_pages, sizeof(struct page *), |
681 | GFP_TEMPORARY); | |
5cc9ed4b | 682 | if (pvec == NULL) { |
f2a85e19 CW |
683 | __i915_gem_userptr_set_active(obj, false); |
684 | return -ENOMEM; | |
5cc9ed4b CW |
685 | } |
686 | ||
687 | pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, | |
688 | !obj->userptr.read_only, pvec); | |
689 | } | |
e4b946bf CW |
690 | |
691 | active = false; | |
692 | if (pinned < 0) | |
693 | ret = pinned, pinned = 0; | |
694 | else if (pinned < num_pages) | |
695 | ret = __i915_gem_userptr_get_pages_schedule(obj, &active); | |
696 | else | |
e2273302 | 697 | ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); |
e4b946bf CW |
698 | if (ret) { |
699 | __i915_gem_userptr_set_active(obj, active); | |
700 | release_pages(pvec, pinned, 0); | |
5cc9ed4b | 701 | } |
5cc9ed4b CW |
702 | drm_free_large(pvec); |
703 | return ret; | |
704 | } | |
705 | ||
706 | static void | |
707 | i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) | |
708 | { | |
c479f438 | 709 | struct sg_page_iter sg_iter; |
5cc9ed4b CW |
710 | |
711 | BUG_ON(obj->userptr.work != NULL); | |
e4b946bf | 712 | __i915_gem_userptr_set_active(obj, false); |
5cc9ed4b CW |
713 | |
714 | if (obj->madv != I915_MADV_WILLNEED) | |
715 | obj->dirty = 0; | |
716 | ||
e2273302 ID |
717 | i915_gem_gtt_finish_object(obj); |
718 | ||
c479f438 TU |
719 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { |
720 | struct page *page = sg_page_iter_page(&sg_iter); | |
5cc9ed4b CW |
721 | |
722 | if (obj->dirty) | |
723 | set_page_dirty(page); | |
724 | ||
725 | mark_page_accessed(page); | |
09cbfeaf | 726 | put_page(page); |
5cc9ed4b CW |
727 | } |
728 | obj->dirty = 0; | |
729 | ||
730 | sg_free_table(obj->pages); | |
731 | kfree(obj->pages); | |
732 | } | |
733 | ||
734 | static void | |
735 | i915_gem_userptr_release(struct drm_i915_gem_object *obj) | |
736 | { | |
737 | i915_gem_userptr_release__mmu_notifier(obj); | |
ad46cb53 | 738 | i915_gem_userptr_release__mm_struct(obj); |
5cc9ed4b CW |
739 | } |
740 | ||
741 | static int | |
742 | i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) | |
743 | { | |
ad46cb53 | 744 | if (obj->userptr.mmu_object) |
5cc9ed4b CW |
745 | return 0; |
746 | ||
747 | return i915_gem_userptr_init__mmu_notifier(obj, 0); | |
748 | } | |
749 | ||
750 | static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { | |
de472664 | 751 | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, |
5cc9ed4b CW |
752 | .get_pages = i915_gem_userptr_get_pages, |
753 | .put_pages = i915_gem_userptr_put_pages, | |
de472664 | 754 | .dmabuf_export = i915_gem_userptr_dmabuf_export, |
5cc9ed4b CW |
755 | .release = i915_gem_userptr_release, |
756 | }; | |
757 | ||
758 | /** | |
759 | * Creates a new mm object that wraps some normal memory from the process | |
760 | * context - user memory. | |
761 | * | |
762 | * We impose several restrictions upon the memory being mapped | |
763 | * into the GPU. | |
764 | * 1. It must be page aligned (both start/end addresses, i.e ptr and size). | |
ec8b0dd5 | 765 | * 2. It must be normal system memory, not a pointer into another map of IO |
5cc9ed4b | 766 | * space (e.g. it must not be a GTT mmapping of another object). |
ec8b0dd5 | 767 | * 3. We only allow a bo as large as we could in theory map into the GTT, |
5cc9ed4b | 768 | * that is we limit the size to the total size of the GTT. |
ec8b0dd5 | 769 | * 4. The bo is marked as being snoopable. The backing pages are left |
5cc9ed4b CW |
770 | * accessible directly by the CPU, but reads and writes by the GPU may |
771 | * incur the cost of a snoop (unless you have an LLC architecture). | |
772 | * | |
773 | * Synchronisation between multiple users and the GPU is left to userspace | |
774 | * through the normal set-domain-ioctl. The kernel will enforce that the | |
775 | * GPU relinquishes the VMA before it is returned back to the system | |
776 | * i.e. upon free(), munmap() or process termination. However, the userspace | |
777 | * malloc() library may not immediately relinquish the VMA after free() and | |
778 | * instead reuse it whilst the GPU is still reading and writing to the VMA. | |
779 | * Caveat emptor. | |
780 | * | |
781 | * Also note, that the object created here is not currently a "first class" | |
782 | * object, in that several ioctls are banned. These are the CPU access | |
783 | * ioctls: mmap(), pwrite and pread. In practice, you are expected to use | |
cc917ab4 CW |
784 | * direct access via your pointer rather than use those ioctls. Another |
785 | * restriction is that we do not allow userptr surfaces to be pinned to the | |
786 | * hardware and so we reject any attempt to create a framebuffer out of a | |
787 | * userptr. | |
5cc9ed4b CW |
788 | * |
789 | * If you think this is a good interface to use to pass GPU memory between | |
790 | * drivers, please use dma-buf instead. In fact, wherever possible use | |
791 | * dma-buf instead. | |
792 | */ | |
793 | int | |
794 | i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |
795 | { | |
5cc9ed4b CW |
796 | struct drm_i915_gem_userptr *args = data; |
797 | struct drm_i915_gem_object *obj; | |
798 | int ret; | |
799 | u32 handle; | |
800 | ||
ca377809 TU |
801 | if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) { |
802 | /* We cannot support coherent userptr objects on hw without | |
803 | * LLC and broken snooping. | |
804 | */ | |
805 | return -ENODEV; | |
806 | } | |
807 | ||
5cc9ed4b CW |
808 | if (args->flags & ~(I915_USERPTR_READ_ONLY | |
809 | I915_USERPTR_UNSYNCHRONIZED)) | |
810 | return -EINVAL; | |
811 | ||
812 | if (offset_in_page(args->user_ptr | args->user_size)) | |
813 | return -EINVAL; | |
814 | ||
5cc9ed4b CW |
815 | if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, |
816 | (char __user *)(unsigned long)args->user_ptr, args->user_size)) | |
817 | return -EFAULT; | |
818 | ||
819 | if (args->flags & I915_USERPTR_READ_ONLY) { | |
820 | /* On almost all of the current hw, we cannot tell the GPU that a | |
821 | * page is readonly, so this is just a placeholder in the uAPI. | |
822 | */ | |
823 | return -ENODEV; | |
824 | } | |
825 | ||
5cc9ed4b CW |
826 | obj = i915_gem_object_alloc(dev); |
827 | if (obj == NULL) | |
828 | return -ENOMEM; | |
829 | ||
830 | drm_gem_private_object_init(dev, &obj->base, args->user_size); | |
831 | i915_gem_object_init(obj, &i915_gem_userptr_ops); | |
832 | obj->cache_level = I915_CACHE_LLC; | |
833 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; | |
834 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; | |
835 | ||
836 | obj->userptr.ptr = args->user_ptr; | |
837 | obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY); | |
838 | ||
839 | /* And keep a pointer to the current->mm for resolving the user pages | |
840 | * at binding. This means that we need to hook into the mmu_notifier | |
841 | * in order to detect if the mmu is destroyed. | |
842 | */ | |
ad46cb53 CW |
843 | ret = i915_gem_userptr_init__mm_struct(obj); |
844 | if (ret == 0) | |
5cc9ed4b CW |
845 | ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); |
846 | if (ret == 0) | |
847 | ret = drm_gem_handle_create(file, &obj->base, &handle); | |
848 | ||
849 | /* drop reference from allocate - handle holds it now */ | |
850 | drm_gem_object_unreference_unlocked(&obj->base); | |
851 | if (ret) | |
852 | return ret; | |
853 | ||
854 | args->handle = handle; | |
855 | return 0; | |
856 | } | |
857 | ||
858 | int | |
859 | i915_gem_init_userptr(struct drm_device *dev) | |
860 | { | |
5cc9ed4b | 861 | struct drm_i915_private *dev_priv = to_i915(dev); |
ad46cb53 CW |
862 | mutex_init(&dev_priv->mm_lock); |
863 | hash_init(dev_priv->mm_structs); | |
5cc9ed4b CW |
864 | return 0; |
865 | } |