drm/i915: Store a i915 backpointer from engine, and use it
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b 1/*
be6a0376 2 * Copyright © 2008-2015 Intel Corporation
673a394b
EA
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
760285e7 28#include <drm/drmP.h>
0de23977 29#include <drm/drm_vma_manager.h>
760285e7 30#include <drm/i915_drm.h>
673a394b 31#include "i915_drv.h"
eb82289a 32#include "i915_vgpu.h"
1c5d22f7 33#include "i915_trace.h"
652c393a 34#include "intel_drv.h"
0ccdacf6 35#include "intel_mocs.h"
5949eac4 36#include <linux/shmem_fs.h>
5a0e3ad6 37#include <linux/slab.h>
673a394b 38#include <linux/swap.h>
79e53945 39#include <linux/pci.h>
1286ff73 40#include <linux/dma-buf.h>
673a394b 41
05394f39 42static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
e62b59e4 43static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
c8725f3d 44static void
b4716185
CW
45i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
46static void
47i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
61050808 48
c76ce038
CW
49static bool cpu_cache_is_coherent(struct drm_device *dev,
50 enum i915_cache_level level)
51{
52 return HAS_LLC(dev) || level != I915_CACHE_NONE;
53}
54
2c22569b
CW
55static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
56{
57 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
58 return true;
59
60 return obj->pin_display;
61}
62
73aa808f
CW
63/* some bookkeeping */
64static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
65 size_t size)
66{
c20e8355 67 spin_lock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
68 dev_priv->mm.object_count++;
69 dev_priv->mm.object_memory += size;
c20e8355 70 spin_unlock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
71}
72
73static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
74 size_t size)
75{
c20e8355 76 spin_lock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
77 dev_priv->mm.object_count--;
78 dev_priv->mm.object_memory -= size;
c20e8355 79 spin_unlock(&dev_priv->mm.object_stat_lock);
73aa808f
CW
80}
81
21dd3734 82static int
33196ded 83i915_gem_wait_for_error(struct i915_gpu_error *error)
30dbf0c0 84{
30dbf0c0
CW
85 int ret;
86
d98c52cf 87 if (!i915_reset_in_progress(error))
30dbf0c0
CW
88 return 0;
89
0a6759c6
DV
90 /*
91 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
92 * userspace. If it takes that long something really bad is going on and
93 * we should simply try to bail out and fail as gracefully as possible.
94 */
1f83fee0 95 ret = wait_event_interruptible_timeout(error->reset_queue,
d98c52cf 96 !i915_reset_in_progress(error),
1f83fee0 97 10*HZ);
0a6759c6
DV
98 if (ret == 0) {
99 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
100 return -EIO;
101 } else if (ret < 0) {
30dbf0c0 102 return ret;
d98c52cf
CW
103 } else {
104 return 0;
0a6759c6 105 }
30dbf0c0
CW
106}
107
54cf91dc 108int i915_mutex_lock_interruptible(struct drm_device *dev)
76c1dec1 109{
33196ded 110 struct drm_i915_private *dev_priv = dev->dev_private;
76c1dec1
CW
111 int ret;
112
33196ded 113 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
76c1dec1
CW
114 if (ret)
115 return ret;
116
117 ret = mutex_lock_interruptible(&dev->struct_mutex);
118 if (ret)
119 return ret;
120
23bc5982 121 WARN_ON(i915_verify_lists(dev));
76c1dec1
CW
122 return 0;
123}
30dbf0c0 124
5a125c3c
EA
125int
126i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
05394f39 127 struct drm_file *file)
5a125c3c 128{
72e96d64 129 struct drm_i915_private *dev_priv = to_i915(dev);
62106b4f 130 struct i915_ggtt *ggtt = &dev_priv->ggtt;
72e96d64 131 struct drm_i915_gem_get_aperture *args = data;
ca1543be 132 struct i915_vma *vma;
6299f992 133 size_t pinned;
5a125c3c 134
6299f992 135 pinned = 0;
73aa808f 136 mutex_lock(&dev->struct_mutex);
1c7f4bca 137 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
ca1543be
TU
138 if (vma->pin_count)
139 pinned += vma->node.size;
1c7f4bca 140 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
ca1543be
TU
141 if (vma->pin_count)
142 pinned += vma->node.size;
73aa808f 143 mutex_unlock(&dev->struct_mutex);
5a125c3c 144
72e96d64 145 args->aper_size = ggtt->base.total;
0206e353 146 args->aper_available_size = args->aper_size - pinned;
6299f992 147
5a125c3c
EA
148 return 0;
149}
150
6a2c4232
CW
151static int
152i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
00731155 153{
6a2c4232
CW
154 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
155 char *vaddr = obj->phys_handle->vaddr;
156 struct sg_table *st;
157 struct scatterlist *sg;
158 int i;
00731155 159
6a2c4232
CW
160 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
161 return -EINVAL;
162
163 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
164 struct page *page;
165 char *src;
166
167 page = shmem_read_mapping_page(mapping, i);
168 if (IS_ERR(page))
169 return PTR_ERR(page);
170
171 src = kmap_atomic(page);
172 memcpy(vaddr, src, PAGE_SIZE);
173 drm_clflush_virt_range(vaddr, PAGE_SIZE);
174 kunmap_atomic(src);
175
09cbfeaf 176 put_page(page);
6a2c4232
CW
177 vaddr += PAGE_SIZE;
178 }
179
c033666a 180 i915_gem_chipset_flush(to_i915(obj->base.dev));
6a2c4232
CW
181
182 st = kmalloc(sizeof(*st), GFP_KERNEL);
183 if (st == NULL)
184 return -ENOMEM;
185
186 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
187 kfree(st);
188 return -ENOMEM;
189 }
190
191 sg = st->sgl;
192 sg->offset = 0;
193 sg->length = obj->base.size;
00731155 194
6a2c4232
CW
195 sg_dma_address(sg) = obj->phys_handle->busaddr;
196 sg_dma_len(sg) = obj->base.size;
197
198 obj->pages = st;
6a2c4232
CW
199 return 0;
200}
201
202static void
203i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
204{
205 int ret;
206
207 BUG_ON(obj->madv == __I915_MADV_PURGED);
00731155 208
6a2c4232 209 ret = i915_gem_object_set_to_cpu_domain(obj, true);
f4457ae7 210 if (WARN_ON(ret)) {
6a2c4232
CW
211 /* In the event of a disaster, abandon all caches and
212 * hope for the best.
213 */
6a2c4232
CW
214 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
215 }
216
217 if (obj->madv == I915_MADV_DONTNEED)
218 obj->dirty = 0;
219
220 if (obj->dirty) {
00731155 221 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
6a2c4232 222 char *vaddr = obj->phys_handle->vaddr;
00731155
CW
223 int i;
224
225 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
6a2c4232
CW
226 struct page *page;
227 char *dst;
228
229 page = shmem_read_mapping_page(mapping, i);
230 if (IS_ERR(page))
231 continue;
232
233 dst = kmap_atomic(page);
234 drm_clflush_virt_range(vaddr, PAGE_SIZE);
235 memcpy(dst, vaddr, PAGE_SIZE);
236 kunmap_atomic(dst);
237
238 set_page_dirty(page);
239 if (obj->madv == I915_MADV_WILLNEED)
00731155 240 mark_page_accessed(page);
09cbfeaf 241 put_page(page);
00731155
CW
242 vaddr += PAGE_SIZE;
243 }
6a2c4232 244 obj->dirty = 0;
00731155
CW
245 }
246
6a2c4232
CW
247 sg_free_table(obj->pages);
248 kfree(obj->pages);
6a2c4232
CW
249}
250
251static void
252i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
253{
254 drm_pci_free(obj->base.dev, obj->phys_handle);
255}
256
257static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
258 .get_pages = i915_gem_object_get_pages_phys,
259 .put_pages = i915_gem_object_put_pages_phys,
260 .release = i915_gem_object_release_phys,
261};
262
263static int
264drop_pages(struct drm_i915_gem_object *obj)
265{
266 struct i915_vma *vma, *next;
267 int ret;
268
269 drm_gem_object_reference(&obj->base);
1c7f4bca 270 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
6a2c4232
CW
271 if (i915_vma_unbind(vma))
272 break;
273
274 ret = i915_gem_object_put_pages(obj);
275 drm_gem_object_unreference(&obj->base);
276
277 return ret;
00731155
CW
278}
279
280int
281i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
282 int align)
283{
284 drm_dma_handle_t *phys;
6a2c4232 285 int ret;
00731155
CW
286
287 if (obj->phys_handle) {
288 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
289 return -EBUSY;
290
291 return 0;
292 }
293
294 if (obj->madv != I915_MADV_WILLNEED)
295 return -EFAULT;
296
297 if (obj->base.filp == NULL)
298 return -EINVAL;
299
6a2c4232
CW
300 ret = drop_pages(obj);
301 if (ret)
302 return ret;
303
00731155
CW
304 /* create a new object */
305 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
306 if (!phys)
307 return -ENOMEM;
308
00731155 309 obj->phys_handle = phys;
6a2c4232
CW
310 obj->ops = &i915_gem_phys_ops;
311
312 return i915_gem_object_get_pages(obj);
00731155
CW
313}
314
315static int
316i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
317 struct drm_i915_gem_pwrite *args,
318 struct drm_file *file_priv)
319{
320 struct drm_device *dev = obj->base.dev;
321 void *vaddr = obj->phys_handle->vaddr + args->offset;
322 char __user *user_data = to_user_ptr(args->data_ptr);
063e4e6b 323 int ret = 0;
6a2c4232
CW
324
325 /* We manually control the domain here and pretend that it
326 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
327 */
328 ret = i915_gem_object_wait_rendering(obj, false);
329 if (ret)
330 return ret;
00731155 331
77a0d1ca 332 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
00731155
CW
333 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
334 unsigned long unwritten;
335
336 /* The physical object once assigned is fixed for the lifetime
337 * of the obj, so we can safely drop the lock and continue
338 * to access vaddr.
339 */
340 mutex_unlock(&dev->struct_mutex);
341 unwritten = copy_from_user(vaddr, user_data, args->size);
342 mutex_lock(&dev->struct_mutex);
063e4e6b
PZ
343 if (unwritten) {
344 ret = -EFAULT;
345 goto out;
346 }
00731155
CW
347 }
348
6a2c4232 349 drm_clflush_virt_range(vaddr, args->size);
c033666a 350 i915_gem_chipset_flush(to_i915(dev));
063e4e6b
PZ
351
352out:
de152b62 353 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
063e4e6b 354 return ret;
00731155
CW
355}
356
42dcedd4
CW
357void *i915_gem_object_alloc(struct drm_device *dev)
358{
359 struct drm_i915_private *dev_priv = dev->dev_private;
efab6d8d 360 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
42dcedd4
CW
361}
362
363void i915_gem_object_free(struct drm_i915_gem_object *obj)
364{
365 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
efab6d8d 366 kmem_cache_free(dev_priv->objects, obj);
42dcedd4
CW
367}
368
ff72145b
DA
369static int
370i915_gem_create(struct drm_file *file,
371 struct drm_device *dev,
372 uint64_t size,
373 uint32_t *handle_p)
673a394b 374{
05394f39 375 struct drm_i915_gem_object *obj;
a1a2d1d3
PP
376 int ret;
377 u32 handle;
673a394b 378
ff72145b 379 size = roundup(size, PAGE_SIZE);
8ffc0246
CW
380 if (size == 0)
381 return -EINVAL;
673a394b
EA
382
383 /* Allocate the new object */
d37cd8a8 384 obj = i915_gem_object_create(dev, size);
fe3db79b
CW
385 if (IS_ERR(obj))
386 return PTR_ERR(obj);
673a394b 387
05394f39 388 ret = drm_gem_handle_create(file, &obj->base, &handle);
202f2fef 389 /* drop reference from allocate - handle holds it now */
d861e338
DV
390 drm_gem_object_unreference_unlocked(&obj->base);
391 if (ret)
392 return ret;
202f2fef 393
ff72145b 394 *handle_p = handle;
673a394b
EA
395 return 0;
396}
397
ff72145b
DA
398int
399i915_gem_dumb_create(struct drm_file *file,
400 struct drm_device *dev,
401 struct drm_mode_create_dumb *args)
402{
403 /* have to work out size/pitch and return them */
de45eaf7 404 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
ff72145b
DA
405 args->size = args->pitch * args->height;
406 return i915_gem_create(file, dev,
da6b51d0 407 args->size, &args->handle);
ff72145b
DA
408}
409
ff72145b
DA
410/**
411 * Creates a new mm object and returns a handle to it.
412 */
413int
414i915_gem_create_ioctl(struct drm_device *dev, void *data,
415 struct drm_file *file)
416{
417 struct drm_i915_gem_create *args = data;
63ed2cb2 418
ff72145b 419 return i915_gem_create(file, dev,
da6b51d0 420 args->size, &args->handle);
ff72145b
DA
421}
422
8461d226
DV
423static inline int
424__copy_to_user_swizzled(char __user *cpu_vaddr,
425 const char *gpu_vaddr, int gpu_offset,
426 int length)
427{
428 int ret, cpu_offset = 0;
429
430 while (length > 0) {
431 int cacheline_end = ALIGN(gpu_offset + 1, 64);
432 int this_length = min(cacheline_end - gpu_offset, length);
433 int swizzled_gpu_offset = gpu_offset ^ 64;
434
435 ret = __copy_to_user(cpu_vaddr + cpu_offset,
436 gpu_vaddr + swizzled_gpu_offset,
437 this_length);
438 if (ret)
439 return ret + length;
440
441 cpu_offset += this_length;
442 gpu_offset += this_length;
443 length -= this_length;
444 }
445
446 return 0;
447}
448
8c59967c 449static inline int
4f0c7cfb
BW
450__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
451 const char __user *cpu_vaddr,
8c59967c
DV
452 int length)
453{
454 int ret, cpu_offset = 0;
455
456 while (length > 0) {
457 int cacheline_end = ALIGN(gpu_offset + 1, 64);
458 int this_length = min(cacheline_end - gpu_offset, length);
459 int swizzled_gpu_offset = gpu_offset ^ 64;
460
461 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
462 cpu_vaddr + cpu_offset,
463 this_length);
464 if (ret)
465 return ret + length;
466
467 cpu_offset += this_length;
468 gpu_offset += this_length;
469 length -= this_length;
470 }
471
472 return 0;
473}
474
4c914c0c
BV
475/*
476 * Pins the specified object's pages and synchronizes the object with
477 * GPU accesses. Sets needs_clflush to non-zero if the caller should
478 * flush the object from the CPU cache.
479 */
480int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
481 int *needs_clflush)
482{
483 int ret;
484
485 *needs_clflush = 0;
486
1db6e2e7 487 if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
4c914c0c
BV
488 return -EINVAL;
489
490 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
491 /* If we're not in the cpu read domain, set ourself into the gtt
492 * read domain and manually flush cachelines (if required). This
493 * optimizes for the case when the gpu will dirty the data
494 * anyway again before the next pread happens. */
495 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
496 obj->cache_level);
497 ret = i915_gem_object_wait_rendering(obj, true);
498 if (ret)
499 return ret;
500 }
501
502 ret = i915_gem_object_get_pages(obj);
503 if (ret)
504 return ret;
505
506 i915_gem_object_pin_pages(obj);
507
508 return ret;
509}
510
d174bd64
DV
511/* Per-page copy function for the shmem pread fastpath.
512 * Flushes invalid cachelines before reading the target if
513 * needs_clflush is set. */
eb01459f 514static int
d174bd64
DV
515shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
516 char __user *user_data,
517 bool page_do_bit17_swizzling, bool needs_clflush)
518{
519 char *vaddr;
520 int ret;
521
e7e58eb5 522 if (unlikely(page_do_bit17_swizzling))
d174bd64
DV
523 return -EINVAL;
524
525 vaddr = kmap_atomic(page);
526 if (needs_clflush)
527 drm_clflush_virt_range(vaddr + shmem_page_offset,
528 page_length);
529 ret = __copy_to_user_inatomic(user_data,
530 vaddr + shmem_page_offset,
531 page_length);
532 kunmap_atomic(vaddr);
533
f60d7f0c 534 return ret ? -EFAULT : 0;
d174bd64
DV
535}
536
23c18c71
DV
537static void
538shmem_clflush_swizzled_range(char *addr, unsigned long length,
539 bool swizzled)
540{
e7e58eb5 541 if (unlikely(swizzled)) {
23c18c71
DV
542 unsigned long start = (unsigned long) addr;
543 unsigned long end = (unsigned long) addr + length;
544
545 /* For swizzling simply ensure that we always flush both
546 * channels. Lame, but simple and it works. Swizzled
547 * pwrite/pread is far from a hotpath - current userspace
548 * doesn't use it at all. */
549 start = round_down(start, 128);
550 end = round_up(end, 128);
551
552 drm_clflush_virt_range((void *)start, end - start);
553 } else {
554 drm_clflush_virt_range(addr, length);
555 }
556
557}
558
d174bd64
DV
559/* Only difference to the fast-path function is that this can handle bit17
560 * and uses non-atomic copy and kmap functions. */
561static int
562shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
563 char __user *user_data,
564 bool page_do_bit17_swizzling, bool needs_clflush)
565{
566 char *vaddr;
567 int ret;
568
569 vaddr = kmap(page);
570 if (needs_clflush)
23c18c71
DV
571 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
572 page_length,
573 page_do_bit17_swizzling);
d174bd64
DV
574
575 if (page_do_bit17_swizzling)
576 ret = __copy_to_user_swizzled(user_data,
577 vaddr, shmem_page_offset,
578 page_length);
579 else
580 ret = __copy_to_user(user_data,
581 vaddr + shmem_page_offset,
582 page_length);
583 kunmap(page);
584
f60d7f0c 585 return ret ? - EFAULT : 0;
d174bd64
DV
586}
587
eb01459f 588static int
dbf7bff0
DV
589i915_gem_shmem_pread(struct drm_device *dev,
590 struct drm_i915_gem_object *obj,
591 struct drm_i915_gem_pread *args,
592 struct drm_file *file)
eb01459f 593{
8461d226 594 char __user *user_data;
eb01459f 595 ssize_t remain;
8461d226 596 loff_t offset;
eb2c0c81 597 int shmem_page_offset, page_length, ret = 0;
8461d226 598 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
96d79b52 599 int prefaulted = 0;
8489731c 600 int needs_clflush = 0;
67d5a50c 601 struct sg_page_iter sg_iter;
eb01459f 602
2bb4629a 603 user_data = to_user_ptr(args->data_ptr);
eb01459f
EA
604 remain = args->size;
605
8461d226 606 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
eb01459f 607
4c914c0c 608 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
f60d7f0c
CW
609 if (ret)
610 return ret;
611
8461d226 612 offset = args->offset;
eb01459f 613
67d5a50c
ID
614 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
615 offset >> PAGE_SHIFT) {
2db76d7c 616 struct page *page = sg_page_iter_page(&sg_iter);
9da3da66
CW
617
618 if (remain <= 0)
619 break;
620
eb01459f
EA
621 /* Operation in this page
622 *
eb01459f 623 * shmem_page_offset = offset within page in shmem file
eb01459f
EA
624 * page_length = bytes to copy for this page
625 */
c8cbbb8b 626 shmem_page_offset = offset_in_page(offset);
eb01459f
EA
627 page_length = remain;
628 if ((shmem_page_offset + page_length) > PAGE_SIZE)
629 page_length = PAGE_SIZE - shmem_page_offset;
eb01459f 630
8461d226
DV
631 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
632 (page_to_phys(page) & (1 << 17)) != 0;
633
d174bd64
DV
634 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
635 user_data, page_do_bit17_swizzling,
636 needs_clflush);
637 if (ret == 0)
638 goto next_page;
dbf7bff0 639
dbf7bff0
DV
640 mutex_unlock(&dev->struct_mutex);
641
d330a953 642 if (likely(!i915.prefault_disable) && !prefaulted) {
f56f821f 643 ret = fault_in_multipages_writeable(user_data, remain);
96d79b52
DV
644 /* Userspace is tricking us, but we've already clobbered
645 * its pages with the prefault and promised to write the
646 * data up to the first fault. Hence ignore any errors
647 * and just continue. */
648 (void)ret;
649 prefaulted = 1;
650 }
eb01459f 651
d174bd64
DV
652 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
653 user_data, page_do_bit17_swizzling,
654 needs_clflush);
eb01459f 655
dbf7bff0 656 mutex_lock(&dev->struct_mutex);
f60d7f0c 657
f60d7f0c 658 if (ret)
8461d226 659 goto out;
8461d226 660
17793c9a 661next_page:
eb01459f 662 remain -= page_length;
8461d226 663 user_data += page_length;
eb01459f
EA
664 offset += page_length;
665 }
666
4f27b75d 667out:
f60d7f0c
CW
668 i915_gem_object_unpin_pages(obj);
669
eb01459f
EA
670 return ret;
671}
672
673a394b
EA
673/**
674 * Reads data from the object referenced by handle.
675 *
676 * On error, the contents of *data are undefined.
677 */
678int
679i915_gem_pread_ioctl(struct drm_device *dev, void *data,
05394f39 680 struct drm_file *file)
673a394b
EA
681{
682 struct drm_i915_gem_pread *args = data;
05394f39 683 struct drm_i915_gem_object *obj;
35b62a89 684 int ret = 0;
673a394b 685
51311d0a
CW
686 if (args->size == 0)
687 return 0;
688
689 if (!access_ok(VERIFY_WRITE,
2bb4629a 690 to_user_ptr(args->data_ptr),
51311d0a
CW
691 args->size))
692 return -EFAULT;
693
4f27b75d 694 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 695 if (ret)
4f27b75d 696 return ret;
673a394b 697
05394f39 698 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 699 if (&obj->base == NULL) {
1d7cfea1
CW
700 ret = -ENOENT;
701 goto unlock;
4f27b75d 702 }
673a394b 703
7dcd2499 704 /* Bounds check source. */
05394f39
CW
705 if (args->offset > obj->base.size ||
706 args->size > obj->base.size - args->offset) {
ce9d419d 707 ret = -EINVAL;
35b62a89 708 goto out;
ce9d419d
CW
709 }
710
1286ff73
DV
711 /* prime objects have no backing filp to GEM pread/pwrite
712 * pages from.
713 */
714 if (!obj->base.filp) {
715 ret = -EINVAL;
716 goto out;
717 }
718
db53a302
CW
719 trace_i915_gem_object_pread(obj, args->offset, args->size);
720
dbf7bff0 721 ret = i915_gem_shmem_pread(dev, obj, args, file);
673a394b 722
35b62a89 723out:
05394f39 724 drm_gem_object_unreference(&obj->base);
1d7cfea1 725unlock:
4f27b75d 726 mutex_unlock(&dev->struct_mutex);
eb01459f 727 return ret;
673a394b
EA
728}
729
0839ccb8
KP
730/* This is the fast write path which cannot handle
731 * page faults in the source data
9b7530cc 732 */
0839ccb8
KP
733
734static inline int
735fast_user_write(struct io_mapping *mapping,
736 loff_t page_base, int page_offset,
737 char __user *user_data,
738 int length)
9b7530cc 739{
4f0c7cfb
BW
740 void __iomem *vaddr_atomic;
741 void *vaddr;
0839ccb8 742 unsigned long unwritten;
9b7530cc 743
3e4d3af5 744 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
4f0c7cfb
BW
745 /* We can use the cpu mem copy function because this is X86. */
746 vaddr = (void __force*)vaddr_atomic + page_offset;
747 unwritten = __copy_from_user_inatomic_nocache(vaddr,
0839ccb8 748 user_data, length);
3e4d3af5 749 io_mapping_unmap_atomic(vaddr_atomic);
fbd5a26d 750 return unwritten;
0839ccb8
KP
751}
752
3de09aa3
EA
753/**
754 * This is the fast pwrite path, where we copy the data directly from the
755 * user into the GTT, uncached.
756 */
673a394b 757static int
05394f39
CW
758i915_gem_gtt_pwrite_fast(struct drm_device *dev,
759 struct drm_i915_gem_object *obj,
3de09aa3 760 struct drm_i915_gem_pwrite *args,
05394f39 761 struct drm_file *file)
673a394b 762{
72e96d64
JL
763 struct drm_i915_private *dev_priv = to_i915(dev);
764 struct i915_ggtt *ggtt = &dev_priv->ggtt;
673a394b 765 ssize_t remain;
0839ccb8 766 loff_t offset, page_base;
673a394b 767 char __user *user_data;
935aaa69
DV
768 int page_offset, page_length, ret;
769
1ec9e26d 770 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
935aaa69
DV
771 if (ret)
772 goto out;
773
774 ret = i915_gem_object_set_to_gtt_domain(obj, true);
775 if (ret)
776 goto out_unpin;
777
778 ret = i915_gem_object_put_fence(obj);
779 if (ret)
780 goto out_unpin;
673a394b 781
2bb4629a 782 user_data = to_user_ptr(args->data_ptr);
673a394b 783 remain = args->size;
673a394b 784
f343c5f6 785 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
673a394b 786
77a0d1ca 787 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
063e4e6b 788
673a394b
EA
789 while (remain > 0) {
790 /* Operation in this page
791 *
0839ccb8
KP
792 * page_base = page offset within aperture
793 * page_offset = offset within page
794 * page_length = bytes to copy for this page
673a394b 795 */
c8cbbb8b
CW
796 page_base = offset & PAGE_MASK;
797 page_offset = offset_in_page(offset);
0839ccb8
KP
798 page_length = remain;
799 if ((page_offset + remain) > PAGE_SIZE)
800 page_length = PAGE_SIZE - page_offset;
801
0839ccb8 802 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
803 * source page isn't available. Return the error and we'll
804 * retry in the slow path.
0839ccb8 805 */
72e96d64 806 if (fast_user_write(ggtt->mappable, page_base,
935aaa69
DV
807 page_offset, user_data, page_length)) {
808 ret = -EFAULT;
063e4e6b 809 goto out_flush;
935aaa69 810 }
673a394b 811
0839ccb8
KP
812 remain -= page_length;
813 user_data += page_length;
814 offset += page_length;
673a394b 815 }
673a394b 816
063e4e6b 817out_flush:
de152b62 818 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
935aaa69 819out_unpin:
d7f46fc4 820 i915_gem_object_ggtt_unpin(obj);
935aaa69 821out:
3de09aa3 822 return ret;
673a394b
EA
823}
824
d174bd64
DV
825/* Per-page copy function for the shmem pwrite fastpath.
826 * Flushes invalid cachelines before writing to the target if
827 * needs_clflush_before is set and flushes out any written cachelines after
828 * writing if needs_clflush is set. */
3043c60c 829static int
d174bd64
DV
830shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
831 char __user *user_data,
832 bool page_do_bit17_swizzling,
833 bool needs_clflush_before,
834 bool needs_clflush_after)
673a394b 835{
d174bd64 836 char *vaddr;
673a394b 837 int ret;
3de09aa3 838
e7e58eb5 839 if (unlikely(page_do_bit17_swizzling))
d174bd64 840 return -EINVAL;
3de09aa3 841
d174bd64
DV
842 vaddr = kmap_atomic(page);
843 if (needs_clflush_before)
844 drm_clflush_virt_range(vaddr + shmem_page_offset,
845 page_length);
c2831a94
CW
846 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
847 user_data, page_length);
d174bd64
DV
848 if (needs_clflush_after)
849 drm_clflush_virt_range(vaddr + shmem_page_offset,
850 page_length);
851 kunmap_atomic(vaddr);
3de09aa3 852
755d2218 853 return ret ? -EFAULT : 0;
3de09aa3
EA
854}
855
d174bd64
DV
856/* Only difference to the fast-path function is that this can handle bit17
857 * and uses non-atomic copy and kmap functions. */
3043c60c 858static int
d174bd64
DV
859shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
860 char __user *user_data,
861 bool page_do_bit17_swizzling,
862 bool needs_clflush_before,
863 bool needs_clflush_after)
673a394b 864{
d174bd64
DV
865 char *vaddr;
866 int ret;
e5281ccd 867
d174bd64 868 vaddr = kmap(page);
e7e58eb5 869 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
23c18c71
DV
870 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
871 page_length,
872 page_do_bit17_swizzling);
d174bd64
DV
873 if (page_do_bit17_swizzling)
874 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
e5281ccd
CW
875 user_data,
876 page_length);
d174bd64
DV
877 else
878 ret = __copy_from_user(vaddr + shmem_page_offset,
879 user_data,
880 page_length);
881 if (needs_clflush_after)
23c18c71
DV
882 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
883 page_length,
884 page_do_bit17_swizzling);
d174bd64 885 kunmap(page);
40123c1f 886
755d2218 887 return ret ? -EFAULT : 0;
40123c1f
EA
888}
889
40123c1f 890static int
e244a443
DV
891i915_gem_shmem_pwrite(struct drm_device *dev,
892 struct drm_i915_gem_object *obj,
893 struct drm_i915_gem_pwrite *args,
894 struct drm_file *file)
40123c1f 895{
40123c1f 896 ssize_t remain;
8c59967c
DV
897 loff_t offset;
898 char __user *user_data;
eb2c0c81 899 int shmem_page_offset, page_length, ret = 0;
8c59967c 900 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
e244a443 901 int hit_slowpath = 0;
58642885
DV
902 int needs_clflush_after = 0;
903 int needs_clflush_before = 0;
67d5a50c 904 struct sg_page_iter sg_iter;
40123c1f 905
2bb4629a 906 user_data = to_user_ptr(args->data_ptr);
40123c1f
EA
907 remain = args->size;
908
8c59967c 909 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
40123c1f 910
58642885
DV
911 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
912 /* If we're not in the cpu write domain, set ourself into the gtt
913 * write domain and manually flush cachelines (if required). This
914 * optimizes for the case when the gpu will use the data
915 * right away and we therefore have to clflush anyway. */
2c22569b 916 needs_clflush_after = cpu_write_needs_clflush(obj);
23f54483
BW
917 ret = i915_gem_object_wait_rendering(obj, false);
918 if (ret)
919 return ret;
58642885 920 }
c76ce038
CW
921 /* Same trick applies to invalidate partially written cachelines read
922 * before writing. */
923 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
924 needs_clflush_before =
925 !cpu_cache_is_coherent(dev, obj->cache_level);
58642885 926
755d2218
CW
927 ret = i915_gem_object_get_pages(obj);
928 if (ret)
929 return ret;
930
77a0d1ca 931 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
063e4e6b 932
755d2218
CW
933 i915_gem_object_pin_pages(obj);
934
673a394b 935 offset = args->offset;
05394f39 936 obj->dirty = 1;
673a394b 937
67d5a50c
ID
938 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
939 offset >> PAGE_SHIFT) {
2db76d7c 940 struct page *page = sg_page_iter_page(&sg_iter);
58642885 941 int partial_cacheline_write;
e5281ccd 942
9da3da66
CW
943 if (remain <= 0)
944 break;
945
40123c1f
EA
946 /* Operation in this page
947 *
40123c1f 948 * shmem_page_offset = offset within page in shmem file
40123c1f
EA
949 * page_length = bytes to copy for this page
950 */
c8cbbb8b 951 shmem_page_offset = offset_in_page(offset);
40123c1f
EA
952
953 page_length = remain;
954 if ((shmem_page_offset + page_length) > PAGE_SIZE)
955 page_length = PAGE_SIZE - shmem_page_offset;
40123c1f 956
58642885
DV
957 /* If we don't overwrite a cacheline completely we need to be
958 * careful to have up-to-date data by first clflushing. Don't
959 * overcomplicate things and flush the entire patch. */
960 partial_cacheline_write = needs_clflush_before &&
961 ((shmem_page_offset | page_length)
962 & (boot_cpu_data.x86_clflush_size - 1));
963
8c59967c
DV
964 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
965 (page_to_phys(page) & (1 << 17)) != 0;
966
d174bd64
DV
967 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
968 user_data, page_do_bit17_swizzling,
969 partial_cacheline_write,
970 needs_clflush_after);
971 if (ret == 0)
972 goto next_page;
e244a443
DV
973
974 hit_slowpath = 1;
e244a443 975 mutex_unlock(&dev->struct_mutex);
d174bd64
DV
976 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
977 user_data, page_do_bit17_swizzling,
978 partial_cacheline_write,
979 needs_clflush_after);
40123c1f 980
e244a443 981 mutex_lock(&dev->struct_mutex);
755d2218 982
755d2218 983 if (ret)
8c59967c 984 goto out;
8c59967c 985
17793c9a 986next_page:
40123c1f 987 remain -= page_length;
8c59967c 988 user_data += page_length;
40123c1f 989 offset += page_length;
673a394b
EA
990 }
991
fbd5a26d 992out:
755d2218
CW
993 i915_gem_object_unpin_pages(obj);
994
e244a443 995 if (hit_slowpath) {
8dcf015e
DV
996 /*
997 * Fixup: Flush cpu caches in case we didn't flush the dirty
998 * cachelines in-line while writing and the object moved
999 * out of the cpu write domain while we've dropped the lock.
1000 */
1001 if (!needs_clflush_after &&
1002 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
000433b6 1003 if (i915_gem_clflush_object(obj, obj->pin_display))
ed75a55b 1004 needs_clflush_after = true;
e244a443 1005 }
8c59967c 1006 }
673a394b 1007
58642885 1008 if (needs_clflush_after)
c033666a 1009 i915_gem_chipset_flush(to_i915(dev));
ed75a55b
VS
1010 else
1011 obj->cache_dirty = true;
58642885 1012
de152b62 1013 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
40123c1f 1014 return ret;
673a394b
EA
1015}
1016
1017/**
1018 * Writes data to the object referenced by handle.
1019 *
1020 * On error, the contents of the buffer that were to be modified are undefined.
1021 */
1022int
1023i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
fbd5a26d 1024 struct drm_file *file)
673a394b 1025{
5d77d9c5 1026 struct drm_i915_private *dev_priv = dev->dev_private;
673a394b 1027 struct drm_i915_gem_pwrite *args = data;
05394f39 1028 struct drm_i915_gem_object *obj;
51311d0a
CW
1029 int ret;
1030
1031 if (args->size == 0)
1032 return 0;
1033
1034 if (!access_ok(VERIFY_READ,
2bb4629a 1035 to_user_ptr(args->data_ptr),
51311d0a
CW
1036 args->size))
1037 return -EFAULT;
1038
d330a953 1039 if (likely(!i915.prefault_disable)) {
0b74b508
XZ
1040 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
1041 args->size);
1042 if (ret)
1043 return -EFAULT;
1044 }
673a394b 1045
5d77d9c5
ID
1046 intel_runtime_pm_get(dev_priv);
1047
fbd5a26d 1048 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1049 if (ret)
5d77d9c5 1050 goto put_rpm;
1d7cfea1 1051
05394f39 1052 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 1053 if (&obj->base == NULL) {
1d7cfea1
CW
1054 ret = -ENOENT;
1055 goto unlock;
fbd5a26d 1056 }
673a394b 1057
7dcd2499 1058 /* Bounds check destination. */
05394f39
CW
1059 if (args->offset > obj->base.size ||
1060 args->size > obj->base.size - args->offset) {
ce9d419d 1061 ret = -EINVAL;
35b62a89 1062 goto out;
ce9d419d
CW
1063 }
1064
1286ff73
DV
1065 /* prime objects have no backing filp to GEM pread/pwrite
1066 * pages from.
1067 */
1068 if (!obj->base.filp) {
1069 ret = -EINVAL;
1070 goto out;
1071 }
1072
db53a302
CW
1073 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1074
935aaa69 1075 ret = -EFAULT;
673a394b
EA
1076 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1077 * it would end up going through the fenced access, and we'll get
1078 * different detiling behavior between reading and writing.
1079 * pread/pwrite currently are reading and writing from the CPU
1080 * perspective, requiring manual detiling by the client.
1081 */
2c22569b
CW
1082 if (obj->tiling_mode == I915_TILING_NONE &&
1083 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1084 cpu_write_needs_clflush(obj)) {
fbd5a26d 1085 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
935aaa69
DV
1086 /* Note that the gtt paths might fail with non-page-backed user
1087 * pointers (e.g. gtt mappings when moving data between
1088 * textures). Fallback to the shmem path in that case. */
fbd5a26d 1089 }
673a394b 1090
6a2c4232
CW
1091 if (ret == -EFAULT || ret == -ENOSPC) {
1092 if (obj->phys_handle)
1093 ret = i915_gem_phys_pwrite(obj, args, file);
1094 else
1095 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1096 }
5c0480f2 1097
35b62a89 1098out:
05394f39 1099 drm_gem_object_unreference(&obj->base);
1d7cfea1 1100unlock:
fbd5a26d 1101 mutex_unlock(&dev->struct_mutex);
5d77d9c5
ID
1102put_rpm:
1103 intel_runtime_pm_put(dev_priv);
1104
673a394b
EA
1105 return ret;
1106}
1107
f4457ae7
CW
1108static int
1109i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
b361237b 1110{
f4457ae7
CW
1111 if (__i915_terminally_wedged(reset_counter))
1112 return -EIO;
d98c52cf 1113
f4457ae7 1114 if (__i915_reset_in_progress(reset_counter)) {
b361237b
CW
1115 /* Non-interruptible callers can't handle -EAGAIN, hence return
1116 * -EIO unconditionally for these. */
1117 if (!interruptible)
1118 return -EIO;
1119
d98c52cf 1120 return -EAGAIN;
b361237b
CW
1121 }
1122
1123 return 0;
1124}
1125
094f9a54
CW
1126static void fake_irq(unsigned long data)
1127{
1128 wake_up_process((struct task_struct *)data);
1129}
1130
1131static bool missed_irq(struct drm_i915_private *dev_priv,
0bc40be8 1132 struct intel_engine_cs *engine)
094f9a54 1133{
0bc40be8 1134 return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
094f9a54
CW
1135}
1136
ca5b721e
CW
1137static unsigned long local_clock_us(unsigned *cpu)
1138{
1139 unsigned long t;
1140
1141 /* Cheaply and approximately convert from nanoseconds to microseconds.
1142 * The result and subsequent calculations are also defined in the same
1143 * approximate microseconds units. The principal source of timing
1144 * error here is from the simple truncation.
1145 *
1146 * Note that local_clock() is only defined wrt to the current CPU;
1147 * the comparisons are no longer valid if we switch CPUs. Instead of
1148 * blocking preemption for the entire busywait, we can detect the CPU
1149 * switch and use that as indicator of system load and a reason to
1150 * stop busywaiting, see busywait_stop().
1151 */
1152 *cpu = get_cpu();
1153 t = local_clock() >> 10;
1154 put_cpu();
1155
1156 return t;
1157}
1158
1159static bool busywait_stop(unsigned long timeout, unsigned cpu)
1160{
1161 unsigned this_cpu;
1162
1163 if (time_after(local_clock_us(&this_cpu), timeout))
1164 return true;
1165
1166 return this_cpu != cpu;
1167}
1168
91b0c352 1169static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
b29c19b6 1170{
2def4ad9 1171 unsigned long timeout;
ca5b721e
CW
1172 unsigned cpu;
1173
1174 /* When waiting for high frequency requests, e.g. during synchronous
1175 * rendering split between the CPU and GPU, the finite amount of time
1176 * required to set up the irq and wait upon it limits the response
1177 * rate. By busywaiting on the request completion for a short while we
1178 * can service the high frequency waits as quick as possible. However,
1179 * if it is a slow request, we want to sleep as quickly as possible.
1180 * The tradeoff between waiting and sleeping is roughly the time it
1181 * takes to sleep on a request, on the order of a microsecond.
1182 */
2def4ad9 1183
4a570db5 1184 if (req->engine->irq_refcount)
2def4ad9
CW
1185 return -EBUSY;
1186
821485dc
CW
1187 /* Only spin if we know the GPU is processing this request */
1188 if (!i915_gem_request_started(req, true))
1189 return -EAGAIN;
1190
ca5b721e 1191 timeout = local_clock_us(&cpu) + 5;
2def4ad9 1192 while (!need_resched()) {
eed29a5b 1193 if (i915_gem_request_completed(req, true))
2def4ad9
CW
1194 return 0;
1195
91b0c352
CW
1196 if (signal_pending_state(state, current))
1197 break;
1198
ca5b721e 1199 if (busywait_stop(timeout, cpu))
2def4ad9 1200 break;
b29c19b6 1201
2def4ad9
CW
1202 cpu_relax_lowlatency();
1203 }
821485dc 1204
eed29a5b 1205 if (i915_gem_request_completed(req, false))
2def4ad9
CW
1206 return 0;
1207
1208 return -EAGAIN;
b29c19b6
CW
1209}
1210
b361237b 1211/**
9c654818
JH
1212 * __i915_wait_request - wait until execution of request has finished
1213 * @req: duh!
b361237b
CW
1214 * @interruptible: do an interruptible wait (normally yes)
1215 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1216 *
f69061be
DV
1217 * Note: It is of utmost importance that the passed in seqno and reset_counter
1218 * values have been read by the caller in an smp safe manner. Where read-side
1219 * locks are involved, it is sufficient to read the reset_counter before
1220 * unlocking the lock that protects the seqno. For lockless tricks, the
1221 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1222 * inserted.
1223 *
9c654818 1224 * Returns 0 if the request was found within the alloted time. Else returns the
b361237b
CW
1225 * errno with remaining time filled in timeout argument.
1226 */
9c654818 1227int __i915_wait_request(struct drm_i915_gem_request *req,
b29c19b6 1228 bool interruptible,
5ed0bdf2 1229 s64 *timeout,
2e1b8730 1230 struct intel_rps_client *rps)
b361237b 1231{
666796da 1232 struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
c033666a 1233 struct drm_i915_private *dev_priv = req->i915;
168c3f21 1234 const bool irq_test_in_progress =
666796da 1235 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
91b0c352 1236 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
094f9a54 1237 DEFINE_WAIT(wait);
47e9766d 1238 unsigned long timeout_expire;
e0313db0 1239 s64 before = 0; /* Only to silence a compiler warning. */
b361237b
CW
1240 int ret;
1241
9df7575f 1242 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
c67a470b 1243
b4716185
CW
1244 if (list_empty(&req->list))
1245 return 0;
1246
1b5a433a 1247 if (i915_gem_request_completed(req, true))
b361237b
CW
1248 return 0;
1249
bb6d1984
CW
1250 timeout_expire = 0;
1251 if (timeout) {
1252 if (WARN_ON(*timeout < 0))
1253 return -EINVAL;
1254
1255 if (*timeout == 0)
1256 return -ETIME;
1257
1258 timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
e0313db0
TU
1259
1260 /*
1261 * Record current time in case interrupted by signal, or wedged.
1262 */
1263 before = ktime_get_raw_ns();
bb6d1984 1264 }
b361237b 1265
2e1b8730 1266 if (INTEL_INFO(dev_priv)->gen >= 6)
e61b9958 1267 gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
b361237b 1268
74328ee5 1269 trace_i915_gem_request_wait_begin(req);
2def4ad9
CW
1270
1271 /* Optimistic spin for the next jiffie before touching IRQs */
91b0c352 1272 ret = __i915_spin_request(req, state);
2def4ad9
CW
1273 if (ret == 0)
1274 goto out;
1275
e2f80391 1276 if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
2def4ad9
CW
1277 ret = -ENODEV;
1278 goto out;
1279 }
1280
094f9a54
CW
1281 for (;;) {
1282 struct timer_list timer;
b361237b 1283
e2f80391 1284 prepare_to_wait(&engine->irq_queue, &wait, state);
b361237b 1285
f69061be 1286 /* We need to check whether any gpu reset happened in between
f4457ae7
CW
1287 * the request being submitted and now. If a reset has occurred,
1288 * the request is effectively complete (we either are in the
1289 * process of or have discarded the rendering and completely
1290 * reset the GPU. The results of the request are lost and we
1291 * are free to continue on with the original operation.
1292 */
299259a3 1293 if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
f4457ae7 1294 ret = 0;
094f9a54
CW
1295 break;
1296 }
f69061be 1297
1b5a433a 1298 if (i915_gem_request_completed(req, false)) {
094f9a54
CW
1299 ret = 0;
1300 break;
1301 }
b361237b 1302
91b0c352 1303 if (signal_pending_state(state, current)) {
094f9a54
CW
1304 ret = -ERESTARTSYS;
1305 break;
1306 }
1307
47e9766d 1308 if (timeout && time_after_eq(jiffies, timeout_expire)) {
094f9a54
CW
1309 ret = -ETIME;
1310 break;
1311 }
1312
1313 timer.function = NULL;
e2f80391 1314 if (timeout || missed_irq(dev_priv, engine)) {
47e9766d
MK
1315 unsigned long expire;
1316
094f9a54 1317 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
e2f80391 1318 expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
094f9a54
CW
1319 mod_timer(&timer, expire);
1320 }
1321
5035c275 1322 io_schedule();
094f9a54 1323
094f9a54
CW
1324 if (timer.function) {
1325 del_singleshot_timer_sync(&timer);
1326 destroy_timer_on_stack(&timer);
1327 }
1328 }
168c3f21 1329 if (!irq_test_in_progress)
e2f80391 1330 engine->irq_put(engine);
094f9a54 1331
e2f80391 1332 finish_wait(&engine->irq_queue, &wait);
b361237b 1333
2def4ad9 1334out:
2def4ad9
CW
1335 trace_i915_gem_request_wait_end(req);
1336
b361237b 1337 if (timeout) {
e0313db0 1338 s64 tres = *timeout - (ktime_get_raw_ns() - before);
5ed0bdf2
TG
1339
1340 *timeout = tres < 0 ? 0 : tres;
9cca3068
DV
1341
1342 /*
1343 * Apparently ktime isn't accurate enough and occasionally has a
1344 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
1345 * things up to make the test happy. We allow up to 1 jiffy.
1346 *
1347 * This is a regrssion from the timespec->ktime conversion.
1348 */
1349 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
1350 *timeout = 0;
b361237b
CW
1351 }
1352
094f9a54 1353 return ret;
b361237b
CW
1354}
1355
fcfa423c
JH
1356int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
1357 struct drm_file *file)
1358{
fcfa423c
JH
1359 struct drm_i915_file_private *file_priv;
1360
1361 WARN_ON(!req || !file || req->file_priv);
1362
1363 if (!req || !file)
1364 return -EINVAL;
1365
1366 if (req->file_priv)
1367 return -EINVAL;
1368
fcfa423c
JH
1369 file_priv = file->driver_priv;
1370
1371 spin_lock(&file_priv->mm.lock);
1372 req->file_priv = file_priv;
1373 list_add_tail(&req->client_list, &file_priv->mm.request_list);
1374 spin_unlock(&file_priv->mm.lock);
1375
1376 req->pid = get_pid(task_pid(current));
1377
1378 return 0;
1379}
1380
b4716185
CW
1381static inline void
1382i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1383{
1384 struct drm_i915_file_private *file_priv = request->file_priv;
1385
1386 if (!file_priv)
1387 return;
1388
1389 spin_lock(&file_priv->mm.lock);
1390 list_del(&request->client_list);
1391 request->file_priv = NULL;
1392 spin_unlock(&file_priv->mm.lock);
fcfa423c
JH
1393
1394 put_pid(request->pid);
1395 request->pid = NULL;
b4716185
CW
1396}
1397
1398static void i915_gem_request_retire(struct drm_i915_gem_request *request)
1399{
1400 trace_i915_gem_request_retire(request);
1401
1402 /* We know the GPU must have read the request to have
1403 * sent us the seqno + interrupt, so use the position
1404 * of tail of the request to update the last known position
1405 * of the GPU head.
1406 *
1407 * Note this requires that we are always called in request
1408 * completion order.
1409 */
1410 request->ringbuf->last_retired_head = request->postfix;
1411
1412 list_del_init(&request->list);
1413 i915_gem_request_remove_from_client(request);
1414
a16a4052 1415 if (request->previous_context) {
73db04cf 1416 if (i915.enable_execlists)
a16a4052
CW
1417 intel_lr_context_unpin(request->previous_context,
1418 request->engine);
73db04cf
CW
1419 }
1420
a16a4052 1421 i915_gem_context_unreference(request->ctx);
b4716185
CW
1422 i915_gem_request_unreference(request);
1423}
1424
1425static void
1426__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1427{
4a570db5 1428 struct intel_engine_cs *engine = req->engine;
b4716185
CW
1429 struct drm_i915_gem_request *tmp;
1430
c033666a 1431 lockdep_assert_held(&engine->i915->dev->struct_mutex);
b4716185
CW
1432
1433 if (list_empty(&req->list))
1434 return;
1435
1436 do {
1437 tmp = list_first_entry(&engine->request_list,
1438 typeof(*tmp), list);
1439
1440 i915_gem_request_retire(tmp);
1441 } while (tmp != req);
1442
1443 WARN_ON(i915_verify_lists(engine->dev));
1444}
1445
b361237b 1446/**
a4b3a571 1447 * Waits for a request to be signaled, and cleans up the
b361237b
CW
1448 * request and object lists appropriately for that event.
1449 */
1450int
a4b3a571 1451i915_wait_request(struct drm_i915_gem_request *req)
b361237b 1452{
791bee12 1453 struct drm_i915_private *dev_priv = req->i915;
a4b3a571 1454 bool interruptible;
b361237b
CW
1455 int ret;
1456
a4b3a571
DV
1457 interruptible = dev_priv->mm.interruptible;
1458
791bee12 1459 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
b361237b 1460
299259a3 1461 ret = __i915_wait_request(req, interruptible, NULL, NULL);
b4716185
CW
1462 if (ret)
1463 return ret;
d26e3af8 1464
b4716185 1465 __i915_gem_request_retire__upto(req);
d26e3af8
CW
1466 return 0;
1467}
1468
b361237b
CW
1469/**
1470 * Ensures that all rendering to the object has completed and the object is
1471 * safe to unbind from the GTT or access from the CPU.
1472 */
2e2f351d 1473int
b361237b
CW
1474i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1475 bool readonly)
1476{
b4716185 1477 int ret, i;
b361237b 1478
b4716185 1479 if (!obj->active)
b361237b
CW
1480 return 0;
1481
b4716185
CW
1482 if (readonly) {
1483 if (obj->last_write_req != NULL) {
1484 ret = i915_wait_request(obj->last_write_req);
1485 if (ret)
1486 return ret;
b361237b 1487
4a570db5 1488 i = obj->last_write_req->engine->id;
b4716185
CW
1489 if (obj->last_read_req[i] == obj->last_write_req)
1490 i915_gem_object_retire__read(obj, i);
1491 else
1492 i915_gem_object_retire__write(obj);
1493 }
1494 } else {
666796da 1495 for (i = 0; i < I915_NUM_ENGINES; i++) {
b4716185
CW
1496 if (obj->last_read_req[i] == NULL)
1497 continue;
1498
1499 ret = i915_wait_request(obj->last_read_req[i]);
1500 if (ret)
1501 return ret;
1502
1503 i915_gem_object_retire__read(obj, i);
1504 }
d501b1d2 1505 GEM_BUG_ON(obj->active);
b4716185
CW
1506 }
1507
1508 return 0;
1509}
1510
1511static void
1512i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
1513 struct drm_i915_gem_request *req)
1514{
4a570db5 1515 int ring = req->engine->id;
b4716185
CW
1516
1517 if (obj->last_read_req[ring] == req)
1518 i915_gem_object_retire__read(obj, ring);
1519 else if (obj->last_write_req == req)
1520 i915_gem_object_retire__write(obj);
1521
1522 __i915_gem_request_retire__upto(req);
b361237b
CW
1523}
1524
3236f57a
CW
1525/* A nonblocking variant of the above wait. This is a highly dangerous routine
1526 * as the object state may change during this call.
1527 */
1528static __must_check int
1529i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
2e1b8730 1530 struct intel_rps_client *rps,
3236f57a
CW
1531 bool readonly)
1532{
1533 struct drm_device *dev = obj->base.dev;
1534 struct drm_i915_private *dev_priv = dev->dev_private;
666796da 1535 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
b4716185 1536 int ret, i, n = 0;
3236f57a
CW
1537
1538 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1539 BUG_ON(!dev_priv->mm.interruptible);
1540
b4716185 1541 if (!obj->active)
3236f57a
CW
1542 return 0;
1543
b4716185
CW
1544 if (readonly) {
1545 struct drm_i915_gem_request *req;
1546
1547 req = obj->last_write_req;
1548 if (req == NULL)
1549 return 0;
1550
b4716185
CW
1551 requests[n++] = i915_gem_request_reference(req);
1552 } else {
666796da 1553 for (i = 0; i < I915_NUM_ENGINES; i++) {
b4716185
CW
1554 struct drm_i915_gem_request *req;
1555
1556 req = obj->last_read_req[i];
1557 if (req == NULL)
1558 continue;
1559
b4716185
CW
1560 requests[n++] = i915_gem_request_reference(req);
1561 }
1562 }
1563
3236f57a 1564 mutex_unlock(&dev->struct_mutex);
299259a3 1565 ret = 0;
b4716185 1566 for (i = 0; ret == 0 && i < n; i++)
299259a3 1567 ret = __i915_wait_request(requests[i], true, NULL, rps);
3236f57a
CW
1568 mutex_lock(&dev->struct_mutex);
1569
b4716185
CW
1570 for (i = 0; i < n; i++) {
1571 if (ret == 0)
1572 i915_gem_object_retire_request(obj, requests[i]);
1573 i915_gem_request_unreference(requests[i]);
1574 }
1575
1576 return ret;
3236f57a
CW
1577}
1578
2e1b8730
CW
1579static struct intel_rps_client *to_rps_client(struct drm_file *file)
1580{
1581 struct drm_i915_file_private *fpriv = file->driver_priv;
1582 return &fpriv->rps;
1583}
1584
673a394b 1585/**
2ef7eeaa
EA
1586 * Called when user space prepares to use an object with the CPU, either
1587 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
1588 */
1589int
1590i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
05394f39 1591 struct drm_file *file)
673a394b
EA
1592{
1593 struct drm_i915_gem_set_domain *args = data;
05394f39 1594 struct drm_i915_gem_object *obj;
2ef7eeaa
EA
1595 uint32_t read_domains = args->read_domains;
1596 uint32_t write_domain = args->write_domain;
673a394b
EA
1597 int ret;
1598
2ef7eeaa 1599 /* Only handle setting domains to types used by the CPU. */
21d509e3 1600 if (write_domain & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1601 return -EINVAL;
1602
21d509e3 1603 if (read_domains & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
1604 return -EINVAL;
1605
1606 /* Having something in the write domain implies it's in the read
1607 * domain, and only that read domain. Enforce that in the request.
1608 */
1609 if (write_domain != 0 && read_domains != write_domain)
1610 return -EINVAL;
1611
76c1dec1 1612 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1613 if (ret)
76c1dec1 1614 return ret;
1d7cfea1 1615
05394f39 1616 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 1617 if (&obj->base == NULL) {
1d7cfea1
CW
1618 ret = -ENOENT;
1619 goto unlock;
76c1dec1 1620 }
673a394b 1621
3236f57a
CW
1622 /* Try to flush the object off the GPU without holding the lock.
1623 * We will repeat the flush holding the lock in the normal manner
1624 * to catch cases where we are gazumped.
1625 */
6e4930f6 1626 ret = i915_gem_object_wait_rendering__nonblocking(obj,
2e1b8730 1627 to_rps_client(file),
6e4930f6 1628 !write_domain);
3236f57a
CW
1629 if (ret)
1630 goto unref;
1631
43566ded 1632 if (read_domains & I915_GEM_DOMAIN_GTT)
2ef7eeaa 1633 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
43566ded 1634 else
e47c68e9 1635 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa 1636
031b698a
DV
1637 if (write_domain != 0)
1638 intel_fb_obj_invalidate(obj,
1639 write_domain == I915_GEM_DOMAIN_GTT ?
1640 ORIGIN_GTT : ORIGIN_CPU);
1641
3236f57a 1642unref:
05394f39 1643 drm_gem_object_unreference(&obj->base);
1d7cfea1 1644unlock:
673a394b
EA
1645 mutex_unlock(&dev->struct_mutex);
1646 return ret;
1647}
1648
1649/**
1650 * Called when user space has done writes to this buffer
1651 */
1652int
1653i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
05394f39 1654 struct drm_file *file)
673a394b
EA
1655{
1656 struct drm_i915_gem_sw_finish *args = data;
05394f39 1657 struct drm_i915_gem_object *obj;
673a394b
EA
1658 int ret = 0;
1659
76c1dec1 1660 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1661 if (ret)
76c1dec1 1662 return ret;
1d7cfea1 1663
05394f39 1664 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 1665 if (&obj->base == NULL) {
1d7cfea1
CW
1666 ret = -ENOENT;
1667 goto unlock;
673a394b
EA
1668 }
1669
673a394b 1670 /* Pinned buffers may be scanout, so flush the cache */
2c22569b 1671 if (obj->pin_display)
e62b59e4 1672 i915_gem_object_flush_cpu_write_domain(obj);
e47c68e9 1673
05394f39 1674 drm_gem_object_unreference(&obj->base);
1d7cfea1 1675unlock:
673a394b
EA
1676 mutex_unlock(&dev->struct_mutex);
1677 return ret;
1678}
1679
1680/**
1681 * Maps the contents of an object, returning the address it is mapped
1682 * into.
1683 *
1684 * While the mapping holds a reference on the contents of the object, it doesn't
1685 * imply a ref on the object itself.
34367381
DV
1686 *
1687 * IMPORTANT:
1688 *
1689 * DRM driver writers who look a this function as an example for how to do GEM
1690 * mmap support, please don't implement mmap support like here. The modern way
1691 * to implement DRM mmap support is with an mmap offset ioctl (like
1692 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1693 * That way debug tooling like valgrind will understand what's going on, hiding
1694 * the mmap call in a driver private ioctl will break that. The i915 driver only
1695 * does cpu mmaps this way because we didn't know better.
673a394b
EA
1696 */
1697int
1698i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
05394f39 1699 struct drm_file *file)
673a394b
EA
1700{
1701 struct drm_i915_gem_mmap *args = data;
1702 struct drm_gem_object *obj;
673a394b
EA
1703 unsigned long addr;
1704
1816f923
AG
1705 if (args->flags & ~(I915_MMAP_WC))
1706 return -EINVAL;
1707
1708 if (args->flags & I915_MMAP_WC && !cpu_has_pat)
1709 return -ENODEV;
1710
05394f39 1711 obj = drm_gem_object_lookup(dev, file, args->handle);
673a394b 1712 if (obj == NULL)
bf79cb91 1713 return -ENOENT;
673a394b 1714
1286ff73
DV
1715 /* prime objects have no backing filp to GEM mmap
1716 * pages from.
1717 */
1718 if (!obj->filp) {
1719 drm_gem_object_unreference_unlocked(obj);
1720 return -EINVAL;
1721 }
1722
6be5ceb0 1723 addr = vm_mmap(obj->filp, 0, args->size,
673a394b
EA
1724 PROT_READ | PROT_WRITE, MAP_SHARED,
1725 args->offset);
1816f923
AG
1726 if (args->flags & I915_MMAP_WC) {
1727 struct mm_struct *mm = current->mm;
1728 struct vm_area_struct *vma;
1729
1730 down_write(&mm->mmap_sem);
1731 vma = find_vma(mm, addr);
1732 if (vma)
1733 vma->vm_page_prot =
1734 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1735 else
1736 addr = -ENOMEM;
1737 up_write(&mm->mmap_sem);
1738 }
bc9025bd 1739 drm_gem_object_unreference_unlocked(obj);
673a394b
EA
1740 if (IS_ERR((void *)addr))
1741 return addr;
1742
1743 args->addr_ptr = (uint64_t) addr;
1744
1745 return 0;
1746}
1747
de151cf6
JB
1748/**
1749 * i915_gem_fault - fault a page into the GTT
d9072a3e
GT
1750 * @vma: VMA in question
1751 * @vmf: fault info
de151cf6
JB
1752 *
1753 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1754 * from userspace. The fault handler takes care of binding the object to
1755 * the GTT (if needed), allocating and programming a fence register (again,
1756 * only if needed based on whether the old reg is still valid or the object
1757 * is tiled) and inserting a new PTE into the faulting process.
1758 *
1759 * Note that the faulting process may involve evicting existing objects
1760 * from the GTT and/or fence registers to make room. So performance may
1761 * suffer if the GTT working set is large or there are few fence registers
1762 * left.
1763 */
1764int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1765{
05394f39
CW
1766 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1767 struct drm_device *dev = obj->base.dev;
72e96d64
JL
1768 struct drm_i915_private *dev_priv = to_i915(dev);
1769 struct i915_ggtt *ggtt = &dev_priv->ggtt;
c5ad54cf 1770 struct i915_ggtt_view view = i915_ggtt_view_normal;
de151cf6
JB
1771 pgoff_t page_offset;
1772 unsigned long pfn;
1773 int ret = 0;
0f973f27 1774 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6 1775
f65c9168
PZ
1776 intel_runtime_pm_get(dev_priv);
1777
de151cf6
JB
1778 /* We don't use vmf->pgoff since that has the fake offset */
1779 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1780 PAGE_SHIFT;
1781
d9bc7e9f
CW
1782 ret = i915_mutex_lock_interruptible(dev);
1783 if (ret)
1784 goto out;
a00b10c3 1785
db53a302
CW
1786 trace_i915_gem_object_fault(obj, page_offset, true, write);
1787
6e4930f6
CW
1788 /* Try to flush the object off the GPU first without holding the lock.
1789 * Upon reacquiring the lock, we will perform our sanity checks and then
1790 * repeat the flush holding the lock in the normal manner to catch cases
1791 * where we are gazumped.
1792 */
1793 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1794 if (ret)
1795 goto unlock;
1796
eb119bd6
CW
1797 /* Access to snoopable pages through the GTT is incoherent. */
1798 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
ddeff6ee 1799 ret = -EFAULT;
eb119bd6
CW
1800 goto unlock;
1801 }
1802
c5ad54cf 1803 /* Use a partial view if the object is bigger than the aperture. */
72e96d64 1804 if (obj->base.size >= ggtt->mappable_end &&
e7ded2d7 1805 obj->tiling_mode == I915_TILING_NONE) {
c5ad54cf 1806 static const unsigned int chunk_size = 256; // 1 MiB
e7ded2d7 1807
c5ad54cf
JL
1808 memset(&view, 0, sizeof(view));
1809 view.type = I915_GGTT_VIEW_PARTIAL;
1810 view.params.partial.offset = rounddown(page_offset, chunk_size);
1811 view.params.partial.size =
1812 min_t(unsigned int,
1813 chunk_size,
1814 (vma->vm_end - vma->vm_start)/PAGE_SIZE -
1815 view.params.partial.offset);
1816 }
1817
1818 /* Now pin it into the GTT if needed */
1819 ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
c9839303
CW
1820 if (ret)
1821 goto unlock;
4a684a41 1822
c9839303
CW
1823 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1824 if (ret)
1825 goto unpin;
74898d7e 1826
06d98131 1827 ret = i915_gem_object_get_fence(obj);
d9e86c0e 1828 if (ret)
c9839303 1829 goto unpin;
7d1c4804 1830
b90b91d8 1831 /* Finally, remap it using the new GTT offset */
72e96d64 1832 pfn = ggtt->mappable_base +
c5ad54cf 1833 i915_gem_obj_ggtt_offset_view(obj, &view);
f343c5f6 1834 pfn >>= PAGE_SHIFT;
de151cf6 1835
c5ad54cf
JL
1836 if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
1837 /* Overriding existing pages in partial view does not cause
1838 * us any trouble as TLBs are still valid because the fault
1839 * is due to userspace losing part of the mapping or never
1840 * having accessed it before (at this partials' range).
1841 */
1842 unsigned long base = vma->vm_start +
1843 (view.params.partial.offset << PAGE_SHIFT);
1844 unsigned int i;
b90b91d8 1845
c5ad54cf
JL
1846 for (i = 0; i < view.params.partial.size; i++) {
1847 ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
b90b91d8
CW
1848 if (ret)
1849 break;
1850 }
1851
1852 obj->fault_mappable = true;
c5ad54cf
JL
1853 } else {
1854 if (!obj->fault_mappable) {
1855 unsigned long size = min_t(unsigned long,
1856 vma->vm_end - vma->vm_start,
1857 obj->base.size);
1858 int i;
1859
1860 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1861 ret = vm_insert_pfn(vma,
1862 (unsigned long)vma->vm_start + i * PAGE_SIZE,
1863 pfn + i);
1864 if (ret)
1865 break;
1866 }
1867
1868 obj->fault_mappable = true;
1869 } else
1870 ret = vm_insert_pfn(vma,
1871 (unsigned long)vmf->virtual_address,
1872 pfn + page_offset);
1873 }
c9839303 1874unpin:
c5ad54cf 1875 i915_gem_object_ggtt_unpin_view(obj, &view);
c715089f 1876unlock:
de151cf6 1877 mutex_unlock(&dev->struct_mutex);
d9bc7e9f 1878out:
de151cf6 1879 switch (ret) {
d9bc7e9f 1880 case -EIO:
2232f031
DV
1881 /*
1882 * We eat errors when the gpu is terminally wedged to avoid
1883 * userspace unduly crashing (gl has no provisions for mmaps to
1884 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1885 * and so needs to be reported.
1886 */
1887 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
f65c9168
PZ
1888 ret = VM_FAULT_SIGBUS;
1889 break;
1890 }
045e769a 1891 case -EAGAIN:
571c608d
DV
1892 /*
1893 * EAGAIN means the gpu is hung and we'll wait for the error
1894 * handler to reset everything when re-faulting in
1895 * i915_mutex_lock_interruptible.
d9bc7e9f 1896 */
c715089f
CW
1897 case 0:
1898 case -ERESTARTSYS:
bed636ab 1899 case -EINTR:
e79e0fe3
DR
1900 case -EBUSY:
1901 /*
1902 * EBUSY is ok: this just means that another thread
1903 * already did the job.
1904 */
f65c9168
PZ
1905 ret = VM_FAULT_NOPAGE;
1906 break;
de151cf6 1907 case -ENOMEM:
f65c9168
PZ
1908 ret = VM_FAULT_OOM;
1909 break;
a7c2e1aa 1910 case -ENOSPC:
45d67817 1911 case -EFAULT:
f65c9168
PZ
1912 ret = VM_FAULT_SIGBUS;
1913 break;
de151cf6 1914 default:
a7c2e1aa 1915 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
f65c9168
PZ
1916 ret = VM_FAULT_SIGBUS;
1917 break;
de151cf6 1918 }
f65c9168
PZ
1919
1920 intel_runtime_pm_put(dev_priv);
1921 return ret;
de151cf6
JB
1922}
1923
901782b2
CW
1924/**
1925 * i915_gem_release_mmap - remove physical page mappings
1926 * @obj: obj in question
1927 *
af901ca1 1928 * Preserve the reservation of the mmapping with the DRM core code, but
901782b2
CW
1929 * relinquish ownership of the pages back to the system.
1930 *
1931 * It is vital that we remove the page mapping if we have mapped a tiled
1932 * object through the GTT and then lose the fence register due to
1933 * resource pressure. Similarly if the object has been moved out of the
1934 * aperture, than pages mapped into userspace must be revoked. Removing the
1935 * mapping will then trigger a page fault on the next user access, allowing
1936 * fixup by i915_gem_fault().
1937 */
d05ca301 1938void
05394f39 1939i915_gem_release_mmap(struct drm_i915_gem_object *obj)
901782b2 1940{
349f2ccf
CW
1941 /* Serialisation between user GTT access and our code depends upon
1942 * revoking the CPU's PTE whilst the mutex is held. The next user
1943 * pagefault then has to wait until we release the mutex.
1944 */
1945 lockdep_assert_held(&obj->base.dev->struct_mutex);
1946
6299f992
CW
1947 if (!obj->fault_mappable)
1948 return;
901782b2 1949
6796cb16
DH
1950 drm_vma_node_unmap(&obj->base.vma_node,
1951 obj->base.dev->anon_inode->i_mapping);
349f2ccf
CW
1952
1953 /* Ensure that the CPU's PTE are revoked and there are not outstanding
1954 * memory transactions from userspace before we return. The TLB
1955 * flushing implied above by changing the PTE above *should* be
1956 * sufficient, an extra barrier here just provides us with a bit
1957 * of paranoid documentation about our requirement to serialise
1958 * memory writes before touching registers / GSM.
1959 */
1960 wmb();
1961
6299f992 1962 obj->fault_mappable = false;
901782b2
CW
1963}
1964
eedd10f4
CW
1965void
1966i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1967{
1968 struct drm_i915_gem_object *obj;
1969
1970 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1971 i915_gem_release_mmap(obj);
1972}
1973
0fa87796 1974uint32_t
e28f8711 1975i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
92b88aeb 1976{
e28f8711 1977 uint32_t gtt_size;
92b88aeb
CW
1978
1979 if (INTEL_INFO(dev)->gen >= 4 ||
e28f8711
CW
1980 tiling_mode == I915_TILING_NONE)
1981 return size;
92b88aeb
CW
1982
1983 /* Previous chips need a power-of-two fence region when tiling */
1984 if (INTEL_INFO(dev)->gen == 3)
e28f8711 1985 gtt_size = 1024*1024;
92b88aeb 1986 else
e28f8711 1987 gtt_size = 512*1024;
92b88aeb 1988
e28f8711
CW
1989 while (gtt_size < size)
1990 gtt_size <<= 1;
92b88aeb 1991
e28f8711 1992 return gtt_size;
92b88aeb
CW
1993}
1994
de151cf6
JB
1995/**
1996 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1997 * @obj: object to check
1998 *
1999 * Return the required GTT alignment for an object, taking into account
5e783301 2000 * potential fence register mapping.
de151cf6 2001 */
d865110c
ID
2002uint32_t
2003i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2004 int tiling_mode, bool fenced)
de151cf6 2005{
de151cf6
JB
2006 /*
2007 * Minimum alignment is 4k (GTT page size), but might be greater
2008 * if a fence register is needed for the object.
2009 */
d865110c 2010 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
e28f8711 2011 tiling_mode == I915_TILING_NONE)
de151cf6
JB
2012 return 4096;
2013
a00b10c3
CW
2014 /*
2015 * Previous chips need to be aligned to the size of the smallest
2016 * fence register that can contain the object.
2017 */
e28f8711 2018 return i915_gem_get_gtt_size(dev, size, tiling_mode);
a00b10c3
CW
2019}
2020
d8cb5086
CW
2021static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2022{
2023 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2024 int ret;
2025
0de23977 2026 if (drm_vma_node_has_offset(&obj->base.vma_node))
d8cb5086
CW
2027 return 0;
2028
da494d7c
DV
2029 dev_priv->mm.shrinker_no_lock_stealing = true;
2030
d8cb5086
CW
2031 ret = drm_gem_create_mmap_offset(&obj->base);
2032 if (ret != -ENOSPC)
da494d7c 2033 goto out;
d8cb5086
CW
2034
2035 /* Badly fragmented mmap space? The only way we can recover
2036 * space is by destroying unwanted objects. We can't randomly release
2037 * mmap_offsets as userspace expects them to be persistent for the
2038 * lifetime of the objects. The closest we can is to release the
2039 * offsets on purgeable objects by truncating it and marking it purged,
2040 * which prevents userspace from ever using that object again.
2041 */
21ab4e74
CW
2042 i915_gem_shrink(dev_priv,
2043 obj->base.size >> PAGE_SHIFT,
2044 I915_SHRINK_BOUND |
2045 I915_SHRINK_UNBOUND |
2046 I915_SHRINK_PURGEABLE);
d8cb5086
CW
2047 ret = drm_gem_create_mmap_offset(&obj->base);
2048 if (ret != -ENOSPC)
da494d7c 2049 goto out;
d8cb5086
CW
2050
2051 i915_gem_shrink_all(dev_priv);
da494d7c
DV
2052 ret = drm_gem_create_mmap_offset(&obj->base);
2053out:
2054 dev_priv->mm.shrinker_no_lock_stealing = false;
2055
2056 return ret;
d8cb5086
CW
2057}
2058
2059static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2060{
d8cb5086
CW
2061 drm_gem_free_mmap_offset(&obj->base);
2062}
2063
da6b51d0 2064int
ff72145b
DA
2065i915_gem_mmap_gtt(struct drm_file *file,
2066 struct drm_device *dev,
da6b51d0 2067 uint32_t handle,
ff72145b 2068 uint64_t *offset)
de151cf6 2069{
05394f39 2070 struct drm_i915_gem_object *obj;
de151cf6
JB
2071 int ret;
2072
76c1dec1 2073 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 2074 if (ret)
76c1dec1 2075 return ret;
de151cf6 2076
ff72145b 2077 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
c8725226 2078 if (&obj->base == NULL) {
1d7cfea1
CW
2079 ret = -ENOENT;
2080 goto unlock;
2081 }
de151cf6 2082
05394f39 2083 if (obj->madv != I915_MADV_WILLNEED) {
bd9b6a4e 2084 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
8c99e57d 2085 ret = -EFAULT;
1d7cfea1 2086 goto out;
ab18282d
CW
2087 }
2088
d8cb5086
CW
2089 ret = i915_gem_object_create_mmap_offset(obj);
2090 if (ret)
2091 goto out;
de151cf6 2092
0de23977 2093 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
de151cf6 2094
1d7cfea1 2095out:
05394f39 2096 drm_gem_object_unreference(&obj->base);
1d7cfea1 2097unlock:
de151cf6 2098 mutex_unlock(&dev->struct_mutex);
1d7cfea1 2099 return ret;
de151cf6
JB
2100}
2101
ff72145b
DA
2102/**
2103 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2104 * @dev: DRM device
2105 * @data: GTT mapping ioctl data
2106 * @file: GEM object info
2107 *
2108 * Simply returns the fake offset to userspace so it can mmap it.
2109 * The mmap call will end up in drm_gem_mmap(), which will set things
2110 * up so we can get faults in the handler above.
2111 *
2112 * The fault handler will take care of binding the object into the GTT
2113 * (since it may have been evicted to make room for something), allocating
2114 * a fence register, and mapping the appropriate aperture address into
2115 * userspace.
2116 */
2117int
2118i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2119 struct drm_file *file)
2120{
2121 struct drm_i915_gem_mmap_gtt *args = data;
2122
da6b51d0 2123 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
ff72145b
DA
2124}
2125
225067ee
DV
2126/* Immediately discard the backing storage */
2127static void
2128i915_gem_object_truncate(struct drm_i915_gem_object *obj)
e5281ccd 2129{
4d6294bf 2130 i915_gem_object_free_mmap_offset(obj);
1286ff73 2131
4d6294bf
CW
2132 if (obj->base.filp == NULL)
2133 return;
e5281ccd 2134
225067ee
DV
2135 /* Our goal here is to return as much of the memory as
2136 * is possible back to the system as we are called from OOM.
2137 * To do this we must instruct the shmfs to drop all of its
2138 * backing pages, *now*.
2139 */
5537252b 2140 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
225067ee
DV
2141 obj->madv = __I915_MADV_PURGED;
2142}
e5281ccd 2143
5537252b
CW
2144/* Try to discard unwanted pages */
2145static void
2146i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
225067ee 2147{
5537252b
CW
2148 struct address_space *mapping;
2149
2150 switch (obj->madv) {
2151 case I915_MADV_DONTNEED:
2152 i915_gem_object_truncate(obj);
2153 case __I915_MADV_PURGED:
2154 return;
2155 }
2156
2157 if (obj->base.filp == NULL)
2158 return;
2159
2160 mapping = file_inode(obj->base.filp)->i_mapping,
2161 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
e5281ccd
CW
2162}
2163
5cdf5881 2164static void
05394f39 2165i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
673a394b 2166{
90797e6d
ID
2167 struct sg_page_iter sg_iter;
2168 int ret;
1286ff73 2169
05394f39 2170 BUG_ON(obj->madv == __I915_MADV_PURGED);
673a394b 2171
6c085a72 2172 ret = i915_gem_object_set_to_cpu_domain(obj, true);
f4457ae7 2173 if (WARN_ON(ret)) {
6c085a72
CW
2174 /* In the event of a disaster, abandon all caches and
2175 * hope for the best.
2176 */
2c22569b 2177 i915_gem_clflush_object(obj, true);
6c085a72
CW
2178 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2179 }
2180
e2273302
ID
2181 i915_gem_gtt_finish_object(obj);
2182
6dacfd2f 2183 if (i915_gem_object_needs_bit17_swizzle(obj))
280b713b
EA
2184 i915_gem_object_save_bit_17_swizzle(obj);
2185
05394f39
CW
2186 if (obj->madv == I915_MADV_DONTNEED)
2187 obj->dirty = 0;
3ef94daa 2188
90797e6d 2189 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
2db76d7c 2190 struct page *page = sg_page_iter_page(&sg_iter);
9da3da66 2191
05394f39 2192 if (obj->dirty)
9da3da66 2193 set_page_dirty(page);
3ef94daa 2194
05394f39 2195 if (obj->madv == I915_MADV_WILLNEED)
9da3da66 2196 mark_page_accessed(page);
3ef94daa 2197
09cbfeaf 2198 put_page(page);
3ef94daa 2199 }
05394f39 2200 obj->dirty = 0;
673a394b 2201
9da3da66
CW
2202 sg_free_table(obj->pages);
2203 kfree(obj->pages);
37e680a1 2204}
6c085a72 2205
dd624afd 2206int
37e680a1
CW
2207i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2208{
2209 const struct drm_i915_gem_object_ops *ops = obj->ops;
2210
2f745ad3 2211 if (obj->pages == NULL)
37e680a1
CW
2212 return 0;
2213
a5570178
CW
2214 if (obj->pages_pin_count)
2215 return -EBUSY;
2216
9843877d 2217 BUG_ON(i915_gem_obj_bound_any(obj));
3e123027 2218
a2165e31
CW
2219 /* ->put_pages might need to allocate memory for the bit17 swizzle
2220 * array, hence protect them from being reaped by removing them from gtt
2221 * lists early. */
35c20a60 2222 list_del(&obj->global_list);
a2165e31 2223
0a798eb9 2224 if (obj->mapping) {
fb8621d3
CW
2225 if (is_vmalloc_addr(obj->mapping))
2226 vunmap(obj->mapping);
2227 else
2228 kunmap(kmap_to_page(obj->mapping));
0a798eb9
CW
2229 obj->mapping = NULL;
2230 }
2231
37e680a1 2232 ops->put_pages(obj);
05394f39 2233 obj->pages = NULL;
37e680a1 2234
5537252b 2235 i915_gem_object_invalidate(obj);
6c085a72
CW
2236
2237 return 0;
2238}
2239
37e680a1 2240static int
6c085a72 2241i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
e5281ccd 2242{
6c085a72 2243 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
e5281ccd
CW
2244 int page_count, i;
2245 struct address_space *mapping;
9da3da66
CW
2246 struct sg_table *st;
2247 struct scatterlist *sg;
90797e6d 2248 struct sg_page_iter sg_iter;
e5281ccd 2249 struct page *page;
90797e6d 2250 unsigned long last_pfn = 0; /* suppress gcc warning */
e2273302 2251 int ret;
6c085a72 2252 gfp_t gfp;
e5281ccd 2253
6c085a72
CW
2254 /* Assert that the object is not currently in any GPU domain. As it
2255 * wasn't in the GTT, there shouldn't be any way it could have been in
2256 * a GPU cache
2257 */
2258 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2259 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2260
9da3da66
CW
2261 st = kmalloc(sizeof(*st), GFP_KERNEL);
2262 if (st == NULL)
2263 return -ENOMEM;
2264
05394f39 2265 page_count = obj->base.size / PAGE_SIZE;
9da3da66 2266 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
9da3da66 2267 kfree(st);
e5281ccd 2268 return -ENOMEM;
9da3da66 2269 }
e5281ccd 2270
9da3da66
CW
2271 /* Get the list of pages out of our struct file. They'll be pinned
2272 * at this point until we release them.
2273 *
2274 * Fail silently without starting the shrinker
2275 */
496ad9aa 2276 mapping = file_inode(obj->base.filp)->i_mapping;
c62d2555 2277 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
d0164adc 2278 gfp |= __GFP_NORETRY | __GFP_NOWARN;
90797e6d
ID
2279 sg = st->sgl;
2280 st->nents = 0;
2281 for (i = 0; i < page_count; i++) {
6c085a72
CW
2282 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2283 if (IS_ERR(page)) {
21ab4e74
CW
2284 i915_gem_shrink(dev_priv,
2285 page_count,
2286 I915_SHRINK_BOUND |
2287 I915_SHRINK_UNBOUND |
2288 I915_SHRINK_PURGEABLE);
6c085a72
CW
2289 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2290 }
2291 if (IS_ERR(page)) {
2292 /* We've tried hard to allocate the memory by reaping
2293 * our own buffer, now let the real VM do its job and
2294 * go down in flames if truly OOM.
2295 */
6c085a72 2296 i915_gem_shrink_all(dev_priv);
f461d1be 2297 page = shmem_read_mapping_page(mapping, i);
e2273302
ID
2298 if (IS_ERR(page)) {
2299 ret = PTR_ERR(page);
6c085a72 2300 goto err_pages;
e2273302 2301 }
6c085a72 2302 }
426729dc
KRW
2303#ifdef CONFIG_SWIOTLB
2304 if (swiotlb_nr_tbl()) {
2305 st->nents++;
2306 sg_set_page(sg, page, PAGE_SIZE, 0);
2307 sg = sg_next(sg);
2308 continue;
2309 }
2310#endif
90797e6d
ID
2311 if (!i || page_to_pfn(page) != last_pfn + 1) {
2312 if (i)
2313 sg = sg_next(sg);
2314 st->nents++;
2315 sg_set_page(sg, page, PAGE_SIZE, 0);
2316 } else {
2317 sg->length += PAGE_SIZE;
2318 }
2319 last_pfn = page_to_pfn(page);
3bbbe706
DV
2320
2321 /* Check that the i965g/gm workaround works. */
2322 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
e5281ccd 2323 }
426729dc
KRW
2324#ifdef CONFIG_SWIOTLB
2325 if (!swiotlb_nr_tbl())
2326#endif
2327 sg_mark_end(sg);
74ce6b6c
CW
2328 obj->pages = st;
2329
e2273302
ID
2330 ret = i915_gem_gtt_prepare_object(obj);
2331 if (ret)
2332 goto err_pages;
2333
6dacfd2f 2334 if (i915_gem_object_needs_bit17_swizzle(obj))
e5281ccd
CW
2335 i915_gem_object_do_bit_17_swizzle(obj);
2336
656bfa3a
DV
2337 if (obj->tiling_mode != I915_TILING_NONE &&
2338 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2339 i915_gem_object_pin_pages(obj);
2340
e5281ccd
CW
2341 return 0;
2342
2343err_pages:
90797e6d
ID
2344 sg_mark_end(sg);
2345 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
09cbfeaf 2346 put_page(sg_page_iter_page(&sg_iter));
9da3da66
CW
2347 sg_free_table(st);
2348 kfree(st);
0820baf3
CW
2349
2350 /* shmemfs first checks if there is enough memory to allocate the page
2351 * and reports ENOSPC should there be insufficient, along with the usual
2352 * ENOMEM for a genuine allocation failure.
2353 *
2354 * We use ENOSPC in our driver to mean that we have run out of aperture
2355 * space and so want to translate the error from shmemfs back to our
2356 * usual understanding of ENOMEM.
2357 */
e2273302
ID
2358 if (ret == -ENOSPC)
2359 ret = -ENOMEM;
2360
2361 return ret;
673a394b
EA
2362}
2363
37e680a1
CW
2364/* Ensure that the associated pages are gathered from the backing storage
2365 * and pinned into our object. i915_gem_object_get_pages() may be called
2366 * multiple times before they are released by a single call to
2367 * i915_gem_object_put_pages() - once the pages are no longer referenced
2368 * either as a result of memory pressure (reaping pages under the shrinker)
2369 * or as the object is itself released.
2370 */
2371int
2372i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2373{
2374 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2375 const struct drm_i915_gem_object_ops *ops = obj->ops;
2376 int ret;
2377
2f745ad3 2378 if (obj->pages)
37e680a1
CW
2379 return 0;
2380
43e28f09 2381 if (obj->madv != I915_MADV_WILLNEED) {
bd9b6a4e 2382 DRM_DEBUG("Attempting to obtain a purgeable object\n");
8c99e57d 2383 return -EFAULT;
43e28f09
CW
2384 }
2385
a5570178
CW
2386 BUG_ON(obj->pages_pin_count);
2387
37e680a1
CW
2388 ret = ops->get_pages(obj);
2389 if (ret)
2390 return ret;
2391
35c20a60 2392 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
ee286370
CW
2393
2394 obj->get_page.sg = obj->pages->sgl;
2395 obj->get_page.last = 0;
2396
37e680a1 2397 return 0;
673a394b
EA
2398}
2399
0a798eb9
CW
2400void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2401{
2402 int ret;
2403
2404 lockdep_assert_held(&obj->base.dev->struct_mutex);
2405
2406 ret = i915_gem_object_get_pages(obj);
2407 if (ret)
2408 return ERR_PTR(ret);
2409
2410 i915_gem_object_pin_pages(obj);
2411
2412 if (obj->mapping == NULL) {
0a798eb9 2413 struct page **pages;
0a798eb9 2414
fb8621d3
CW
2415 pages = NULL;
2416 if (obj->base.size == PAGE_SIZE)
2417 obj->mapping = kmap(sg_page(obj->pages->sgl));
2418 else
2419 pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
2420 sizeof(*pages),
2421 GFP_TEMPORARY);
0a798eb9 2422 if (pages != NULL) {
fb8621d3
CW
2423 struct sg_page_iter sg_iter;
2424 int n;
2425
0a798eb9
CW
2426 n = 0;
2427 for_each_sg_page(obj->pages->sgl, &sg_iter,
2428 obj->pages->nents, 0)
2429 pages[n++] = sg_page_iter_page(&sg_iter);
2430
2431 obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
2432 drm_free_large(pages);
2433 }
2434 if (obj->mapping == NULL) {
2435 i915_gem_object_unpin_pages(obj);
2436 return ERR_PTR(-ENOMEM);
2437 }
2438 }
2439
2440 return obj->mapping;
2441}
2442
b4716185 2443void i915_vma_move_to_active(struct i915_vma *vma,
b2af0376 2444 struct drm_i915_gem_request *req)
673a394b 2445{
b4716185 2446 struct drm_i915_gem_object *obj = vma->obj;
e2f80391 2447 struct intel_engine_cs *engine;
b2af0376 2448
666796da 2449 engine = i915_gem_request_get_engine(req);
673a394b
EA
2450
2451 /* Add a reference if we're newly entering the active list. */
b4716185 2452 if (obj->active == 0)
05394f39 2453 drm_gem_object_reference(&obj->base);
666796da 2454 obj->active |= intel_engine_flag(engine);
e35a41de 2455
117897f4 2456 list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
e2f80391 2457 i915_gem_request_assign(&obj->last_read_req[engine->id], req);
caea7476 2458
1c7f4bca 2459 list_move_tail(&vma->vm_link, &vma->vm->active_list);
caea7476
CW
2460}
2461
b4716185
CW
2462static void
2463i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
e2d05a8b 2464{
d501b1d2
CW
2465 GEM_BUG_ON(obj->last_write_req == NULL);
2466 GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
b4716185
CW
2467
2468 i915_gem_request_assign(&obj->last_write_req, NULL);
de152b62 2469 intel_fb_obj_flush(obj, true, ORIGIN_CS);
e2d05a8b
BW
2470}
2471
caea7476 2472static void
b4716185 2473i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
ce44b0ea 2474{
feb822cf 2475 struct i915_vma *vma;
ce44b0ea 2476
d501b1d2
CW
2477 GEM_BUG_ON(obj->last_read_req[ring] == NULL);
2478 GEM_BUG_ON(!(obj->active & (1 << ring)));
b4716185 2479
117897f4 2480 list_del_init(&obj->engine_list[ring]);
b4716185
CW
2481 i915_gem_request_assign(&obj->last_read_req[ring], NULL);
2482
4a570db5 2483 if (obj->last_write_req && obj->last_write_req->engine->id == ring)
b4716185
CW
2484 i915_gem_object_retire__write(obj);
2485
2486 obj->active &= ~(1 << ring);
2487 if (obj->active)
2488 return;
caea7476 2489
6c246959
CW
2490 /* Bump our place on the bound list to keep it roughly in LRU order
2491 * so that we don't steal from recently used but inactive objects
2492 * (unless we are forced to ofc!)
2493 */
2494 list_move_tail(&obj->global_list,
2495 &to_i915(obj->base.dev)->mm.bound_list);
2496
1c7f4bca
CW
2497 list_for_each_entry(vma, &obj->vma_list, obj_link) {
2498 if (!list_empty(&vma->vm_link))
2499 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
feb822cf 2500 }
caea7476 2501
97b2a6a1 2502 i915_gem_request_assign(&obj->last_fenced_req, NULL);
caea7476 2503 drm_gem_object_unreference(&obj->base);
c8725f3d
CW
2504}
2505
9d773091 2506static int
c033666a 2507i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
53d227f2 2508{
e2f80391 2509 struct intel_engine_cs *engine;
29dcb570 2510 int ret;
53d227f2 2511
107f27a5 2512 /* Carefully retire all requests without writing to the rings */
b4ac5afc 2513 for_each_engine(engine, dev_priv) {
666796da 2514 ret = intel_engine_idle(engine);
107f27a5
CW
2515 if (ret)
2516 return ret;
9d773091 2517 }
c033666a 2518 i915_gem_retire_requests(dev_priv);
107f27a5
CW
2519
2520 /* Finally reset hw state */
29dcb570 2521 for_each_engine(engine, dev_priv)
e2f80391 2522 intel_ring_init_seqno(engine, seqno);
498d2ac1 2523
9d773091 2524 return 0;
53d227f2
DV
2525}
2526
fca26bb4
MK
2527int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2528{
2529 struct drm_i915_private *dev_priv = dev->dev_private;
2530 int ret;
2531
2532 if (seqno == 0)
2533 return -EINVAL;
2534
2535 /* HWS page needs to be set less than what we
2536 * will inject to ring
2537 */
c033666a 2538 ret = i915_gem_init_seqno(dev_priv, seqno - 1);
fca26bb4
MK
2539 if (ret)
2540 return ret;
2541
2542 /* Carefully set the last_seqno value so that wrap
2543 * detection still works
2544 */
2545 dev_priv->next_seqno = seqno;
2546 dev_priv->last_seqno = seqno - 1;
2547 if (dev_priv->last_seqno == 0)
2548 dev_priv->last_seqno--;
2549
2550 return 0;
2551}
2552
9d773091 2553int
c033666a 2554i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
53d227f2 2555{
9d773091
CW
2556 /* reserve 0 for non-seqno */
2557 if (dev_priv->next_seqno == 0) {
c033666a 2558 int ret = i915_gem_init_seqno(dev_priv, 0);
9d773091
CW
2559 if (ret)
2560 return ret;
53d227f2 2561
9d773091
CW
2562 dev_priv->next_seqno = 1;
2563 }
53d227f2 2564
f72b3435 2565 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
9d773091 2566 return 0;
53d227f2
DV
2567}
2568
bf7dc5b7
JH
2569/*
2570 * NB: This function is not allowed to fail. Doing so would mean the the
2571 * request is not being tracked for completion but the work itself is
2572 * going to happen on the hardware. This would be a Bad Thing(tm).
2573 */
75289874 2574void __i915_add_request(struct drm_i915_gem_request *request,
5b4a60c2
JH
2575 struct drm_i915_gem_object *obj,
2576 bool flush_caches)
673a394b 2577{
e2f80391 2578 struct intel_engine_cs *engine;
75289874 2579 struct drm_i915_private *dev_priv;
48e29f55 2580 struct intel_ringbuffer *ringbuf;
6d3d8274 2581 u32 request_start;
0251a963 2582 u32 reserved_tail;
3cce469c
CW
2583 int ret;
2584
48e29f55 2585 if (WARN_ON(request == NULL))
bf7dc5b7 2586 return;
48e29f55 2587
4a570db5 2588 engine = request->engine;
39dabecd 2589 dev_priv = request->i915;
75289874
JH
2590 ringbuf = request->ringbuf;
2591
29b1b415
JH
2592 /*
2593 * To ensure that this call will not fail, space for its emissions
2594 * should already have been reserved in the ring buffer. Let the ring
2595 * know that it is time to use that space up.
2596 */
48e29f55 2597 request_start = intel_ring_get_tail(ringbuf);
0251a963
CW
2598 reserved_tail = request->reserved_space;
2599 request->reserved_space = 0;
2600
cc889e0f
DV
2601 /*
2602 * Emit any outstanding flushes - execbuf can fail to emit the flush
2603 * after having emitted the batchbuffer command. Hence we need to fix
2604 * things up similar to emitting the lazy request. The difference here
2605 * is that the flush _must_ happen before the next request, no matter
2606 * what.
2607 */
5b4a60c2
JH
2608 if (flush_caches) {
2609 if (i915.enable_execlists)
4866d729 2610 ret = logical_ring_flush_all_caches(request);
5b4a60c2 2611 else
4866d729 2612 ret = intel_ring_flush_all_caches(request);
5b4a60c2
JH
2613 /* Not allowed to fail! */
2614 WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
2615 }
cc889e0f 2616
7c90b7de
CW
2617 trace_i915_gem_request_add(request);
2618
2619 request->head = request_start;
2620
2621 /* Whilst this request exists, batch_obj will be on the
2622 * active_list, and so will hold the active reference. Only when this
2623 * request is retired will the the batch_obj be moved onto the
2624 * inactive_list and lose its active reference. Hence we do not need
2625 * to explicitly hold another reference here.
2626 */
2627 request->batch_obj = obj;
2628
2629 /* Seal the request and mark it as pending execution. Note that
2630 * we may inspect this state, without holding any locks, during
2631 * hangcheck. Hence we apply the barrier to ensure that we do not
2632 * see a more recent value in the hws than we are tracking.
2633 */
2634 request->emitted_jiffies = jiffies;
2635 request->previous_seqno = engine->last_submitted_seqno;
2636 smp_store_mb(engine->last_submitted_seqno, request->seqno);
2637 list_add_tail(&request->list, &engine->request_list);
2638
a71d8d94
CW
2639 /* Record the position of the start of the request so that
2640 * should we detect the updated seqno part-way through the
2641 * GPU processing the request, we never over-estimate the
2642 * position of the head.
2643 */
6d3d8274 2644 request->postfix = intel_ring_get_tail(ringbuf);
a71d8d94 2645
bf7dc5b7 2646 if (i915.enable_execlists)
e2f80391 2647 ret = engine->emit_request(request);
bf7dc5b7 2648 else {
e2f80391 2649 ret = engine->add_request(request);
53292cdb
MT
2650
2651 request->tail = intel_ring_get_tail(ringbuf);
48e29f55 2652 }
bf7dc5b7
JH
2653 /* Not allowed to fail! */
2654 WARN(ret, "emit|add_request failed: %d!\n", ret);
673a394b 2655
c033666a 2656 i915_queue_hangcheck(engine->i915);
10cd45b6 2657
87255483
DV
2658 queue_delayed_work(dev_priv->wq,
2659 &dev_priv->mm.retire_work,
2660 round_jiffies_up_relative(HZ));
7d993739 2661 intel_mark_busy(dev_priv);
cc889e0f 2662
29b1b415 2663 /* Sanity check that the reserved size was large enough. */
0251a963
CW
2664 ret = intel_ring_get_tail(ringbuf) - request_start;
2665 if (ret < 0)
2666 ret += ringbuf->size;
2667 WARN_ONCE(ret > reserved_tail,
2668 "Not enough space reserved (%d bytes) "
2669 "for adding the request (%d bytes)\n",
2670 reserved_tail, ret);
673a394b
EA
2671}
2672
939fd762 2673static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
273497e5 2674 const struct intel_context *ctx)
be62acb4 2675{
44e2c070 2676 unsigned long elapsed;
be62acb4 2677
44e2c070
MK
2678 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2679
2680 if (ctx->hang_stats.banned)
be62acb4
MK
2681 return true;
2682
676fa572
CW
2683 if (ctx->hang_stats.ban_period_seconds &&
2684 elapsed <= ctx->hang_stats.ban_period_seconds) {
ccc7bed0 2685 if (!i915_gem_context_is_default(ctx)) {
3fac8978 2686 DRM_DEBUG("context hanging too fast, banning!\n");
ccc7bed0 2687 return true;
88b4aa87
MK
2688 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2689 if (i915_stop_ring_allow_warn(dev_priv))
2690 DRM_ERROR("gpu hanging too fast, banning!\n");
ccc7bed0 2691 return true;
3fac8978 2692 }
be62acb4
MK
2693 }
2694
2695 return false;
2696}
2697
939fd762 2698static void i915_set_reset_status(struct drm_i915_private *dev_priv,
273497e5 2699 struct intel_context *ctx,
b6b0fac0 2700 const bool guilty)
aa60c664 2701{
44e2c070
MK
2702 struct i915_ctx_hang_stats *hs;
2703
2704 if (WARN_ON(!ctx))
2705 return;
aa60c664 2706
44e2c070
MK
2707 hs = &ctx->hang_stats;
2708
2709 if (guilty) {
939fd762 2710 hs->banned = i915_context_is_banned(dev_priv, ctx);
44e2c070
MK
2711 hs->batch_active++;
2712 hs->guilty_ts = get_seconds();
2713 } else {
2714 hs->batch_pending++;
aa60c664
MK
2715 }
2716}
2717
abfe262a
JH
2718void i915_gem_request_free(struct kref *req_ref)
2719{
2720 struct drm_i915_gem_request *req = container_of(req_ref,
2721 typeof(*req), ref);
efab6d8d 2722 kmem_cache_free(req->i915->requests, req);
0e50e96b
MK
2723}
2724
26827088 2725static inline int
0bc40be8 2726__i915_gem_request_alloc(struct intel_engine_cs *engine,
26827088
DG
2727 struct intel_context *ctx,
2728 struct drm_i915_gem_request **req_out)
6689cb2b 2729{
c033666a 2730 struct drm_i915_private *dev_priv = engine->i915;
299259a3 2731 unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
eed29a5b 2732 struct drm_i915_gem_request *req;
6689cb2b 2733 int ret;
6689cb2b 2734
217e46b5
JH
2735 if (!req_out)
2736 return -EINVAL;
2737
bccca494 2738 *req_out = NULL;
6689cb2b 2739
f4457ae7
CW
2740 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
2741 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
2742 * and restart.
2743 */
2744 ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
299259a3
CW
2745 if (ret)
2746 return ret;
2747
eed29a5b
DV
2748 req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
2749 if (req == NULL)
6689cb2b
JH
2750 return -ENOMEM;
2751
c033666a 2752 ret = i915_gem_get_seqno(engine->i915, &req->seqno);
9a0c1e27
CW
2753 if (ret)
2754 goto err;
6689cb2b 2755
40e895ce
JH
2756 kref_init(&req->ref);
2757 req->i915 = dev_priv;
4a570db5 2758 req->engine = engine;
299259a3 2759 req->reset_counter = reset_counter;
40e895ce
JH
2760 req->ctx = ctx;
2761 i915_gem_context_reference(req->ctx);
6689cb2b 2762
29b1b415
JH
2763 /*
2764 * Reserve space in the ring buffer for all the commands required to
2765 * eventually emit this request. This is to guarantee that the
2766 * i915_add_request() call can't fail. Note that the reserve may need
2767 * to be redone if the request is not actually submitted straight
2768 * away, e.g. because a GPU scheduler has deferred it.
29b1b415 2769 */
0251a963 2770 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
bfa01200
CW
2771
2772 if (i915.enable_execlists)
2773 ret = intel_logical_ring_alloc_request_extras(req);
2774 else
2775 ret = intel_ring_alloc_request_extras(req);
2776 if (ret)
2777 goto err_ctx;
29b1b415 2778
bccca494 2779 *req_out = req;
6689cb2b 2780 return 0;
9a0c1e27 2781
bfa01200
CW
2782err_ctx:
2783 i915_gem_context_unreference(ctx);
9a0c1e27
CW
2784err:
2785 kmem_cache_free(dev_priv->requests, req);
2786 return ret;
0e50e96b
MK
2787}
2788
26827088
DG
2789/**
2790 * i915_gem_request_alloc - allocate a request structure
2791 *
2792 * @engine: engine that we wish to issue the request on.
2793 * @ctx: context that the request will be associated with.
2794 * This can be NULL if the request is not directly related to
2795 * any specific user context, in which case this function will
2796 * choose an appropriate context to use.
2797 *
2798 * Returns a pointer to the allocated request if successful,
2799 * or an error code if not.
2800 */
2801struct drm_i915_gem_request *
2802i915_gem_request_alloc(struct intel_engine_cs *engine,
2803 struct intel_context *ctx)
2804{
2805 struct drm_i915_gem_request *req;
2806 int err;
2807
2808 if (ctx == NULL)
c033666a 2809 ctx = engine->i915->kernel_context;
26827088
DG
2810 err = __i915_gem_request_alloc(engine, ctx, &req);
2811 return err ? ERR_PTR(err) : req;
2812}
2813
8d9fc7fd 2814struct drm_i915_gem_request *
0bc40be8 2815i915_gem_find_active_request(struct intel_engine_cs *engine)
9375e446 2816{
4db080f9
CW
2817 struct drm_i915_gem_request *request;
2818
0bc40be8 2819 list_for_each_entry(request, &engine->request_list, list) {
1b5a433a 2820 if (i915_gem_request_completed(request, false))
4db080f9 2821 continue;
aa60c664 2822
b6b0fac0 2823 return request;
4db080f9 2824 }
b6b0fac0
MK
2825
2826 return NULL;
2827}
2828
666796da 2829static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
0bc40be8 2830 struct intel_engine_cs *engine)
b6b0fac0
MK
2831{
2832 struct drm_i915_gem_request *request;
2833 bool ring_hung;
2834
0bc40be8 2835 request = i915_gem_find_active_request(engine);
b6b0fac0
MK
2836
2837 if (request == NULL)
2838 return;
2839
0bc40be8 2840 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
b6b0fac0 2841
939fd762 2842 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
b6b0fac0 2843
0bc40be8 2844 list_for_each_entry_continue(request, &engine->request_list, list)
939fd762 2845 i915_set_reset_status(dev_priv, request->ctx, false);
4db080f9 2846}
aa60c664 2847
666796da 2848static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
0bc40be8 2849 struct intel_engine_cs *engine)
4db080f9 2850{
608c1a52
CW
2851 struct intel_ringbuffer *buffer;
2852
0bc40be8 2853 while (!list_empty(&engine->active_list)) {
05394f39 2854 struct drm_i915_gem_object *obj;
9375e446 2855
0bc40be8 2856 obj = list_first_entry(&engine->active_list,
05394f39 2857 struct drm_i915_gem_object,
117897f4 2858 engine_list[engine->id]);
9375e446 2859
0bc40be8 2860 i915_gem_object_retire__read(obj, engine->id);
673a394b 2861 }
1d62beea 2862
dcb4c12a
OM
2863 /*
2864 * Clear the execlists queue up before freeing the requests, as those
2865 * are the ones that keep the context and ringbuffer backing objects
2866 * pinned in place.
2867 */
dcb4c12a 2868
7de1691a 2869 if (i915.enable_execlists) {
27af5eea
TU
2870 /* Ensure irq handler finishes or is cancelled. */
2871 tasklet_kill(&engine->irq_tasklet);
1197b4f2 2872
e39d42fa 2873 intel_execlists_cancel_requests(engine);
dcb4c12a
OM
2874 }
2875
1d62beea
BW
2876 /*
2877 * We must free the requests after all the corresponding objects have
2878 * been moved off active lists. Which is the same order as the normal
2879 * retire_requests function does. This is important if object hold
2880 * implicit references on things like e.g. ppgtt address spaces through
2881 * the request.
2882 */
0bc40be8 2883 while (!list_empty(&engine->request_list)) {
1d62beea
BW
2884 struct drm_i915_gem_request *request;
2885
0bc40be8 2886 request = list_first_entry(&engine->request_list,
1d62beea
BW
2887 struct drm_i915_gem_request,
2888 list);
2889
b4716185 2890 i915_gem_request_retire(request);
1d62beea 2891 }
608c1a52
CW
2892
2893 /* Having flushed all requests from all queues, we know that all
2894 * ringbuffers must now be empty. However, since we do not reclaim
2895 * all space when retiring the request (to prevent HEADs colliding
2896 * with rapid ringbuffer wraparound) the amount of available space
2897 * upon reset is less than when we start. Do one more pass over
2898 * all the ringbuffers to reset last_retired_head.
2899 */
0bc40be8 2900 list_for_each_entry(buffer, &engine->buffers, link) {
608c1a52
CW
2901 buffer->last_retired_head = buffer->tail;
2902 intel_ring_update_space(buffer);
2903 }
2ed53a94
CW
2904
2905 intel_ring_init_seqno(engine, engine->last_submitted_seqno);
673a394b
EA
2906}
2907
069efc1d 2908void i915_gem_reset(struct drm_device *dev)
673a394b 2909{
77f01230 2910 struct drm_i915_private *dev_priv = dev->dev_private;
e2f80391 2911 struct intel_engine_cs *engine;
673a394b 2912
4db080f9
CW
2913 /*
2914 * Before we free the objects from the requests, we need to inspect
2915 * them for finding the guilty party. As the requests only borrow
2916 * their reference to the objects, the inspection must be done first.
2917 */
b4ac5afc 2918 for_each_engine(engine, dev_priv)
666796da 2919 i915_gem_reset_engine_status(dev_priv, engine);
4db080f9 2920
b4ac5afc 2921 for_each_engine(engine, dev_priv)
666796da 2922 i915_gem_reset_engine_cleanup(dev_priv, engine);
dfaae392 2923
acce9ffa
BW
2924 i915_gem_context_reset(dev);
2925
19b2dbde 2926 i915_gem_restore_fences(dev);
b4716185
CW
2927
2928 WARN_ON(i915_verify_lists(dev));
673a394b
EA
2929}
2930
2931/**
2932 * This function clears the request list as sequence numbers are passed.
2933 */
1cf0ba14 2934void
0bc40be8 2935i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
673a394b 2936{
0bc40be8 2937 WARN_ON(i915_verify_lists(engine->dev));
673a394b 2938
832a3aad
CW
2939 /* Retire requests first as we use it above for the early return.
2940 * If we retire requests last, we may use a later seqno and so clear
2941 * the requests lists without clearing the active list, leading to
2942 * confusion.
e9103038 2943 */
0bc40be8 2944 while (!list_empty(&engine->request_list)) {
673a394b 2945 struct drm_i915_gem_request *request;
673a394b 2946
0bc40be8 2947 request = list_first_entry(&engine->request_list,
673a394b
EA
2948 struct drm_i915_gem_request,
2949 list);
673a394b 2950
1b5a433a 2951 if (!i915_gem_request_completed(request, true))
b84d5f0c
CW
2952 break;
2953
b4716185 2954 i915_gem_request_retire(request);
b84d5f0c 2955 }
673a394b 2956
832a3aad
CW
2957 /* Move any buffers on the active list that are no longer referenced
2958 * by the ringbuffer to the flushing/inactive lists as appropriate,
2959 * before we free the context associated with the requests.
2960 */
0bc40be8 2961 while (!list_empty(&engine->active_list)) {
832a3aad
CW
2962 struct drm_i915_gem_object *obj;
2963
0bc40be8
TU
2964 obj = list_first_entry(&engine->active_list,
2965 struct drm_i915_gem_object,
117897f4 2966 engine_list[engine->id]);
832a3aad 2967
0bc40be8 2968 if (!list_empty(&obj->last_read_req[engine->id]->list))
832a3aad
CW
2969 break;
2970
0bc40be8 2971 i915_gem_object_retire__read(obj, engine->id);
832a3aad
CW
2972 }
2973
0bc40be8
TU
2974 if (unlikely(engine->trace_irq_req &&
2975 i915_gem_request_completed(engine->trace_irq_req, true))) {
2976 engine->irq_put(engine);
2977 i915_gem_request_assign(&engine->trace_irq_req, NULL);
9d34e5db 2978 }
23bc5982 2979
0bc40be8 2980 WARN_ON(i915_verify_lists(engine->dev));
673a394b
EA
2981}
2982
b29c19b6 2983bool
c033666a 2984i915_gem_retire_requests(struct drm_i915_private *dev_priv)
b09a1fec 2985{
e2f80391 2986 struct intel_engine_cs *engine;
b29c19b6 2987 bool idle = true;
b09a1fec 2988
b4ac5afc 2989 for_each_engine(engine, dev_priv) {
e2f80391
TU
2990 i915_gem_retire_requests_ring(engine);
2991 idle &= list_empty(&engine->request_list);
c86ee3a9 2992 if (i915.enable_execlists) {
27af5eea 2993 spin_lock_bh(&engine->execlist_lock);
e2f80391 2994 idle &= list_empty(&engine->execlist_queue);
27af5eea 2995 spin_unlock_bh(&engine->execlist_lock);
c86ee3a9 2996 }
b29c19b6
CW
2997 }
2998
2999 if (idle)
3000 mod_delayed_work(dev_priv->wq,
3001 &dev_priv->mm.idle_work,
3002 msecs_to_jiffies(100));
3003
3004 return idle;
b09a1fec
CW
3005}
3006
75ef9da2 3007static void
673a394b
EA
3008i915_gem_retire_work_handler(struct work_struct *work)
3009{
b29c19b6
CW
3010 struct drm_i915_private *dev_priv =
3011 container_of(work, typeof(*dev_priv), mm.retire_work.work);
3012 struct drm_device *dev = dev_priv->dev;
0a58705b 3013 bool idle;
673a394b 3014
891b48cf 3015 /* Come back later if the device is busy... */
b29c19b6
CW
3016 idle = false;
3017 if (mutex_trylock(&dev->struct_mutex)) {
c033666a 3018 idle = i915_gem_retire_requests(dev_priv);
b29c19b6 3019 mutex_unlock(&dev->struct_mutex);
673a394b 3020 }
b29c19b6 3021 if (!idle)
bcb45086
CW
3022 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
3023 round_jiffies_up_relative(HZ));
b29c19b6 3024}
0a58705b 3025
b29c19b6
CW
3026static void
3027i915_gem_idle_work_handler(struct work_struct *work)
3028{
3029 struct drm_i915_private *dev_priv =
3030 container_of(work, typeof(*dev_priv), mm.idle_work.work);
35c94185 3031 struct drm_device *dev = dev_priv->dev;
b4ac5afc 3032 struct intel_engine_cs *engine;
b29c19b6 3033
b4ac5afc
DG
3034 for_each_engine(engine, dev_priv)
3035 if (!list_empty(&engine->request_list))
423795cb 3036 return;
35c94185 3037
30ecad77 3038 /* we probably should sync with hangcheck here, using cancel_work_sync.
b4ac5afc 3039 * Also locking seems to be fubar here, engine->request_list is protected
30ecad77
DV
3040 * by dev->struct_mutex. */
3041
7d993739 3042 intel_mark_idle(dev_priv);
35c94185
CW
3043
3044 if (mutex_trylock(&dev->struct_mutex)) {
b4ac5afc 3045 for_each_engine(engine, dev_priv)
e2f80391 3046 i915_gem_batch_pool_fini(&engine->batch_pool);
b29c19b6 3047
35c94185
CW
3048 mutex_unlock(&dev->struct_mutex);
3049 }
673a394b
EA
3050}
3051
30dfebf3
DV
3052/**
3053 * Ensures that an object will eventually get non-busy by flushing any required
3054 * write domains, emitting any outstanding lazy request and retiring and
3055 * completed requests.
3056 */
3057static int
3058i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
3059{
a5ac0f90 3060 int i;
b4716185
CW
3061
3062 if (!obj->active)
3063 return 0;
30dfebf3 3064
666796da 3065 for (i = 0; i < I915_NUM_ENGINES; i++) {
b4716185 3066 struct drm_i915_gem_request *req;
41c52415 3067
b4716185
CW
3068 req = obj->last_read_req[i];
3069 if (req == NULL)
3070 continue;
3071
3072 if (list_empty(&req->list))
3073 goto retire;
3074
b4716185
CW
3075 if (i915_gem_request_completed(req, true)) {
3076 __i915_gem_request_retire__upto(req);
3077retire:
3078 i915_gem_object_retire__read(obj, i);
3079 }
30dfebf3
DV
3080 }
3081
3082 return 0;
3083}
3084
23ba4fd0
BW
3085/**
3086 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3087 * @DRM_IOCTL_ARGS: standard ioctl arguments
3088 *
3089 * Returns 0 if successful, else an error is returned with the remaining time in
3090 * the timeout parameter.
3091 * -ETIME: object is still busy after timeout
3092 * -ERESTARTSYS: signal interrupted the wait
3093 * -ENONENT: object doesn't exist
3094 * Also possible, but rare:
3095 * -EAGAIN: GPU wedged
3096 * -ENOMEM: damn
3097 * -ENODEV: Internal IRQ fail
3098 * -E?: The add request failed
3099 *
3100 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3101 * non-zero timeout parameter the wait ioctl will wait for the given number of
3102 * nanoseconds on an object becoming unbusy. Since the wait itself does so
3103 * without holding struct_mutex the object may become re-busied before this
3104 * function completes. A similar but shorter * race condition exists in the busy
3105 * ioctl
3106 */
3107int
3108i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3109{
3110 struct drm_i915_gem_wait *args = data;
3111 struct drm_i915_gem_object *obj;
666796da 3112 struct drm_i915_gem_request *req[I915_NUM_ENGINES];
b4716185
CW
3113 int i, n = 0;
3114 int ret;
23ba4fd0 3115
11b5d511
DV
3116 if (args->flags != 0)
3117 return -EINVAL;
3118
23ba4fd0
BW
3119 ret = i915_mutex_lock_interruptible(dev);
3120 if (ret)
3121 return ret;
3122
3123 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
3124 if (&obj->base == NULL) {
3125 mutex_unlock(&dev->struct_mutex);
3126 return -ENOENT;
3127 }
3128
30dfebf3
DV
3129 /* Need to make sure the object gets inactive eventually. */
3130 ret = i915_gem_object_flush_active(obj);
23ba4fd0
BW
3131 if (ret)
3132 goto out;
3133
b4716185 3134 if (!obj->active)
97b2a6a1 3135 goto out;
23ba4fd0 3136
23ba4fd0 3137 /* Do this after OLR check to make sure we make forward progress polling
762e4583 3138 * on this IOCTL with a timeout == 0 (like busy ioctl)
23ba4fd0 3139 */
762e4583 3140 if (args->timeout_ns == 0) {
23ba4fd0
BW
3141 ret = -ETIME;
3142 goto out;
3143 }
3144
3145 drm_gem_object_unreference(&obj->base);
b4716185 3146
666796da 3147 for (i = 0; i < I915_NUM_ENGINES; i++) {
b4716185
CW
3148 if (obj->last_read_req[i] == NULL)
3149 continue;
3150
3151 req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
3152 }
3153
23ba4fd0
BW
3154 mutex_unlock(&dev->struct_mutex);
3155
b4716185
CW
3156 for (i = 0; i < n; i++) {
3157 if (ret == 0)
299259a3 3158 ret = __i915_wait_request(req[i], true,
b4716185 3159 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
b6aa0873 3160 to_rps_client(file));
73db04cf 3161 i915_gem_request_unreference(req[i]);
b4716185 3162 }
ff865885 3163 return ret;
23ba4fd0
BW
3164
3165out:
3166 drm_gem_object_unreference(&obj->base);
3167 mutex_unlock(&dev->struct_mutex);
3168 return ret;
3169}
3170
b4716185
CW
3171static int
3172__i915_gem_object_sync(struct drm_i915_gem_object *obj,
3173 struct intel_engine_cs *to,
91af127f
JH
3174 struct drm_i915_gem_request *from_req,
3175 struct drm_i915_gem_request **to_req)
b4716185
CW
3176{
3177 struct intel_engine_cs *from;
3178 int ret;
3179
666796da 3180 from = i915_gem_request_get_engine(from_req);
b4716185
CW
3181 if (to == from)
3182 return 0;
3183
91af127f 3184 if (i915_gem_request_completed(from_req, true))
b4716185
CW
3185 return 0;
3186
c033666a 3187 if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
a6f766f3 3188 struct drm_i915_private *i915 = to_i915(obj->base.dev);
91af127f 3189 ret = __i915_wait_request(from_req,
a6f766f3
CW
3190 i915->mm.interruptible,
3191 NULL,
3192 &i915->rps.semaphores);
b4716185
CW
3193 if (ret)
3194 return ret;
3195
91af127f 3196 i915_gem_object_retire_request(obj, from_req);
b4716185
CW
3197 } else {
3198 int idx = intel_ring_sync_index(from, to);
91af127f
JH
3199 u32 seqno = i915_gem_request_get_seqno(from_req);
3200
3201 WARN_ON(!to_req);
b4716185
CW
3202
3203 if (seqno <= from->semaphore.sync_seqno[idx])
3204 return 0;
3205
91af127f 3206 if (*to_req == NULL) {
26827088
DG
3207 struct drm_i915_gem_request *req;
3208
3209 req = i915_gem_request_alloc(to, NULL);
3210 if (IS_ERR(req))
3211 return PTR_ERR(req);
3212
3213 *to_req = req;
91af127f
JH
3214 }
3215
599d924c
JH
3216 trace_i915_gem_ring_sync_to(*to_req, from, from_req);
3217 ret = to->semaphore.sync_to(*to_req, from, seqno);
b4716185
CW
3218 if (ret)
3219 return ret;
3220
3221 /* We use last_read_req because sync_to()
3222 * might have just caused seqno wrap under
3223 * the radar.
3224 */
3225 from->semaphore.sync_seqno[idx] =
3226 i915_gem_request_get_seqno(obj->last_read_req[from->id]);
3227 }
3228
3229 return 0;
3230}
3231
5816d648
BW
3232/**
3233 * i915_gem_object_sync - sync an object to a ring.
3234 *
3235 * @obj: object which may be in use on another ring.
3236 * @to: ring we wish to use the object on. May be NULL.
91af127f
JH
3237 * @to_req: request we wish to use the object for. See below.
3238 * This will be allocated and returned if a request is
3239 * required but not passed in.
5816d648
BW
3240 *
3241 * This code is meant to abstract object synchronization with the GPU.
3242 * Calling with NULL implies synchronizing the object with the CPU
b4716185 3243 * rather than a particular GPU ring. Conceptually we serialise writes
91af127f 3244 * between engines inside the GPU. We only allow one engine to write
b4716185
CW
3245 * into a buffer at any time, but multiple readers. To ensure each has
3246 * a coherent view of memory, we must:
3247 *
3248 * - If there is an outstanding write request to the object, the new
3249 * request must wait for it to complete (either CPU or in hw, requests
3250 * on the same ring will be naturally ordered).
3251 *
3252 * - If we are a write request (pending_write_domain is set), the new
3253 * request must wait for outstanding read requests to complete.
5816d648 3254 *
91af127f
JH
3255 * For CPU synchronisation (NULL to) no request is required. For syncing with
3256 * rings to_req must be non-NULL. However, a request does not have to be
3257 * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
3258 * request will be allocated automatically and returned through *to_req. Note
3259 * that it is not guaranteed that commands will be emitted (because the system
3260 * might already be idle). Hence there is no need to create a request that
3261 * might never have any work submitted. Note further that if a request is
3262 * returned in *to_req, it is the responsibility of the caller to submit
3263 * that request (after potentially adding more work to it).
3264 *
5816d648
BW
3265 * Returns 0 if successful, else propagates up the lower layer error.
3266 */
2911a35b
BW
3267int
3268i915_gem_object_sync(struct drm_i915_gem_object *obj,
91af127f
JH
3269 struct intel_engine_cs *to,
3270 struct drm_i915_gem_request **to_req)
2911a35b 3271{
b4716185 3272 const bool readonly = obj->base.pending_write_domain == 0;
666796da 3273 struct drm_i915_gem_request *req[I915_NUM_ENGINES];
b4716185 3274 int ret, i, n;
41c52415 3275
b4716185 3276 if (!obj->active)
2911a35b
BW
3277 return 0;
3278
b4716185
CW
3279 if (to == NULL)
3280 return i915_gem_object_wait_rendering(obj, readonly);
2911a35b 3281
b4716185
CW
3282 n = 0;
3283 if (readonly) {
3284 if (obj->last_write_req)
3285 req[n++] = obj->last_write_req;
3286 } else {
666796da 3287 for (i = 0; i < I915_NUM_ENGINES; i++)
b4716185
CW
3288 if (obj->last_read_req[i])
3289 req[n++] = obj->last_read_req[i];
3290 }
3291 for (i = 0; i < n; i++) {
91af127f 3292 ret = __i915_gem_object_sync(obj, to, req[i], to_req);
b4716185
CW
3293 if (ret)
3294 return ret;
3295 }
2911a35b 3296
b4716185 3297 return 0;
2911a35b
BW
3298}
3299
b5ffc9bc
CW
3300static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3301{
3302 u32 old_write_domain, old_read_domains;
3303
b5ffc9bc
CW
3304 /* Force a pagefault for domain tracking on next user access */
3305 i915_gem_release_mmap(obj);
3306
b97c3d9c
KP
3307 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3308 return;
3309
b5ffc9bc
CW
3310 old_read_domains = obj->base.read_domains;
3311 old_write_domain = obj->base.write_domain;
3312
3313 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
3314 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
3315
3316 trace_i915_gem_object_change_domain(obj,
3317 old_read_domains,
3318 old_write_domain);
3319}
3320
8ef8561f
CW
3321static void __i915_vma_iounmap(struct i915_vma *vma)
3322{
3323 GEM_BUG_ON(vma->pin_count);
3324
3325 if (vma->iomap == NULL)
3326 return;
3327
3328 io_mapping_unmap(vma->iomap);
3329 vma->iomap = NULL;
3330}
3331
e9f24d5f 3332static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
673a394b 3333{
07fe0b12 3334 struct drm_i915_gem_object *obj = vma->obj;
3e31c6c0 3335 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
43e28f09 3336 int ret;
673a394b 3337
1c7f4bca 3338 if (list_empty(&vma->obj_link))
673a394b
EA
3339 return 0;
3340
0ff501cb
DV
3341 if (!drm_mm_node_allocated(&vma->node)) {
3342 i915_gem_vma_destroy(vma);
0ff501cb
DV
3343 return 0;
3344 }
433544bd 3345
d7f46fc4 3346 if (vma->pin_count)
31d8d651 3347 return -EBUSY;
673a394b 3348
c4670ad0
CW
3349 BUG_ON(obj->pages == NULL);
3350
e9f24d5f
TU
3351 if (wait) {
3352 ret = i915_gem_object_wait_rendering(obj, false);
3353 if (ret)
3354 return ret;
3355 }
a8198eea 3356
596c5923 3357 if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
8b1bc9b4 3358 i915_gem_object_finish_gtt(obj);
5323fd04 3359
8b1bc9b4
DV
3360 /* release the fence reg _after_ flushing */
3361 ret = i915_gem_object_put_fence(obj);
3362 if (ret)
3363 return ret;
8ef8561f
CW
3364
3365 __i915_vma_iounmap(vma);
8b1bc9b4 3366 }
96b47b65 3367
07fe0b12 3368 trace_i915_vma_unbind(vma);
db53a302 3369
777dc5bb 3370 vma->vm->unbind_vma(vma);
5e562f1d 3371 vma->bound = 0;
6f65e29a 3372
1c7f4bca 3373 list_del_init(&vma->vm_link);
596c5923 3374 if (vma->is_ggtt) {
fe14d5f4
TU
3375 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3376 obj->map_and_fenceable = false;
3377 } else if (vma->ggtt_view.pages) {
3378 sg_free_table(vma->ggtt_view.pages);
3379 kfree(vma->ggtt_view.pages);
fe14d5f4 3380 }
016a65a3 3381 vma->ggtt_view.pages = NULL;
fe14d5f4 3382 }
673a394b 3383
2f633156
BW
3384 drm_mm_remove_node(&vma->node);
3385 i915_gem_vma_destroy(vma);
3386
3387 /* Since the unbound list is global, only move to that list if
b93dab6e 3388 * no more VMAs exist. */
e2273302 3389 if (list_empty(&obj->vma_list))
2f633156 3390 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
673a394b 3391
70903c3b
CW
3392 /* And finally now the object is completely decoupled from this vma,
3393 * we can drop its hold on the backing storage and allow it to be
3394 * reaped by the shrinker.
3395 */
3396 i915_gem_object_unpin_pages(obj);
3397
88241785 3398 return 0;
54cf91dc
CW
3399}
3400
e9f24d5f
TU
3401int i915_vma_unbind(struct i915_vma *vma)
3402{
3403 return __i915_vma_unbind(vma, true);
3404}
3405
3406int __i915_vma_unbind_no_wait(struct i915_vma *vma)
3407{
3408 return __i915_vma_unbind(vma, false);
3409}
3410
b2da9fe5 3411int i915_gpu_idle(struct drm_device *dev)
4df2faf4 3412{
3e31c6c0 3413 struct drm_i915_private *dev_priv = dev->dev_private;
e2f80391 3414 struct intel_engine_cs *engine;
b4ac5afc 3415 int ret;
4df2faf4 3416
4df2faf4 3417 /* Flush everything onto the inactive list. */
b4ac5afc 3418 for_each_engine(engine, dev_priv) {
ecdb5fd8 3419 if (!i915.enable_execlists) {
73cfa865
JH
3420 struct drm_i915_gem_request *req;
3421
e2f80391 3422 req = i915_gem_request_alloc(engine, NULL);
26827088
DG
3423 if (IS_ERR(req))
3424 return PTR_ERR(req);
73cfa865 3425
ba01cc93 3426 ret = i915_switch_context(req);
75289874 3427 i915_add_request_no_flush(req);
aa9b7810
CW
3428 if (ret)
3429 return ret;
ecdb5fd8 3430 }
b6c7488d 3431
666796da 3432 ret = intel_engine_idle(engine);
1ec14ad3
CW
3433 if (ret)
3434 return ret;
3435 }
4df2faf4 3436
b4716185 3437 WARN_ON(i915_verify_lists(dev));
8a1a49f9 3438 return 0;
4df2faf4
DV
3439}
3440
4144f9b5 3441static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
42d6ab48
CW
3442 unsigned long cache_level)
3443{
4144f9b5 3444 struct drm_mm_node *gtt_space = &vma->node;
42d6ab48
CW
3445 struct drm_mm_node *other;
3446
4144f9b5
CW
3447 /*
3448 * On some machines we have to be careful when putting differing types
3449 * of snoopable memory together to avoid the prefetcher crossing memory
3450 * domains and dying. During vm initialisation, we decide whether or not
3451 * these constraints apply and set the drm_mm.color_adjust
3452 * appropriately.
42d6ab48 3453 */
4144f9b5 3454 if (vma->vm->mm.color_adjust == NULL)
42d6ab48
CW
3455 return true;
3456
c6cfb325 3457 if (!drm_mm_node_allocated(gtt_space))
42d6ab48
CW
3458 return true;
3459
3460 if (list_empty(&gtt_space->node_list))
3461 return true;
3462
3463 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3464 if (other->allocated && !other->hole_follows && other->color != cache_level)
3465 return false;
3466
3467 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3468 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3469 return false;
3470
3471 return true;
3472}
3473
673a394b 3474/**
91e6711e
JL
3475 * Finds free space in the GTT aperture and binds the object or a view of it
3476 * there.
673a394b 3477 */
262de145 3478static struct i915_vma *
07fe0b12
BW
3479i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3480 struct i915_address_space *vm,
ec7adb6e 3481 const struct i915_ggtt_view *ggtt_view,
07fe0b12 3482 unsigned alignment,
ec7adb6e 3483 uint64_t flags)
673a394b 3484{
05394f39 3485 struct drm_device *dev = obj->base.dev;
72e96d64
JL
3486 struct drm_i915_private *dev_priv = to_i915(dev);
3487 struct i915_ggtt *ggtt = &dev_priv->ggtt;
65bd342f 3488 u32 fence_alignment, unfenced_alignment;
101b506a
MT
3489 u32 search_flag, alloc_flag;
3490 u64 start, end;
65bd342f 3491 u64 size, fence_size;
2f633156 3492 struct i915_vma *vma;
07f73f69 3493 int ret;
673a394b 3494
91e6711e
JL
3495 if (i915_is_ggtt(vm)) {
3496 u32 view_size;
3497
3498 if (WARN_ON(!ggtt_view))
3499 return ERR_PTR(-EINVAL);
ec7adb6e 3500
91e6711e
JL
3501 view_size = i915_ggtt_view_size(obj, ggtt_view);
3502
3503 fence_size = i915_gem_get_gtt_size(dev,
3504 view_size,
3505 obj->tiling_mode);
3506 fence_alignment = i915_gem_get_gtt_alignment(dev,
3507 view_size,
3508 obj->tiling_mode,
3509 true);
3510 unfenced_alignment = i915_gem_get_gtt_alignment(dev,
3511 view_size,
3512 obj->tiling_mode,
3513 false);
3514 size = flags & PIN_MAPPABLE ? fence_size : view_size;
3515 } else {
3516 fence_size = i915_gem_get_gtt_size(dev,
3517 obj->base.size,
3518 obj->tiling_mode);
3519 fence_alignment = i915_gem_get_gtt_alignment(dev,
3520 obj->base.size,
3521 obj->tiling_mode,
3522 true);
3523 unfenced_alignment =
3524 i915_gem_get_gtt_alignment(dev,
3525 obj->base.size,
3526 obj->tiling_mode,
3527 false);
3528 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3529 }
a00b10c3 3530
101b506a
MT
3531 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3532 end = vm->total;
3533 if (flags & PIN_MAPPABLE)
72e96d64 3534 end = min_t(u64, end, ggtt->mappable_end);
101b506a 3535 if (flags & PIN_ZONE_4G)
48ea1e32 3536 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
101b506a 3537
673a394b 3538 if (alignment == 0)
1ec9e26d 3539 alignment = flags & PIN_MAPPABLE ? fence_alignment :
5e783301 3540 unfenced_alignment;
1ec9e26d 3541 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
91e6711e
JL
3542 DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
3543 ggtt_view ? ggtt_view->type : 0,
3544 alignment);
262de145 3545 return ERR_PTR(-EINVAL);
673a394b
EA
3546 }
3547
91e6711e
JL
3548 /* If binding the object/GGTT view requires more space than the entire
3549 * aperture has, reject it early before evicting everything in a vain
3550 * attempt to find space.
654fc607 3551 */
91e6711e 3552 if (size > end) {
65bd342f 3553 DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
91e6711e
JL
3554 ggtt_view ? ggtt_view->type : 0,
3555 size,
1ec9e26d 3556 flags & PIN_MAPPABLE ? "mappable" : "total",
d23db88c 3557 end);
262de145 3558 return ERR_PTR(-E2BIG);
654fc607
CW
3559 }
3560
37e680a1 3561 ret = i915_gem_object_get_pages(obj);
6c085a72 3562 if (ret)
262de145 3563 return ERR_PTR(ret);
6c085a72 3564
fbdda6fb
CW
3565 i915_gem_object_pin_pages(obj);
3566
ec7adb6e
JL
3567 vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3568 i915_gem_obj_lookup_or_create_vma(obj, vm);
3569
262de145 3570 if (IS_ERR(vma))
bc6bc15b 3571 goto err_unpin;
2f633156 3572
506a8e87
CW
3573 if (flags & PIN_OFFSET_FIXED) {
3574 uint64_t offset = flags & PIN_OFFSET_MASK;
3575
3576 if (offset & (alignment - 1) || offset + size > end) {
3577 ret = -EINVAL;
3578 goto err_free_vma;
3579 }
3580 vma->node.start = offset;
3581 vma->node.size = size;
3582 vma->node.color = obj->cache_level;
3583 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3584 if (ret) {
3585 ret = i915_gem_evict_for_vma(vma);
3586 if (ret == 0)
3587 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3588 }
3589 if (ret)
3590 goto err_free_vma;
101b506a 3591 } else {
506a8e87
CW
3592 if (flags & PIN_HIGH) {
3593 search_flag = DRM_MM_SEARCH_BELOW;
3594 alloc_flag = DRM_MM_CREATE_TOP;
3595 } else {
3596 search_flag = DRM_MM_SEARCH_DEFAULT;
3597 alloc_flag = DRM_MM_CREATE_DEFAULT;
3598 }
101b506a 3599
0a9ae0d7 3600search_free:
506a8e87
CW
3601 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3602 size, alignment,
3603 obj->cache_level,
3604 start, end,
3605 search_flag,
3606 alloc_flag);
3607 if (ret) {
3608 ret = i915_gem_evict_something(dev, vm, size, alignment,
3609 obj->cache_level,
3610 start, end,
3611 flags);
3612 if (ret == 0)
3613 goto search_free;
9731129c 3614
506a8e87
CW
3615 goto err_free_vma;
3616 }
673a394b 3617 }
4144f9b5 3618 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
2f633156 3619 ret = -EINVAL;
bc6bc15b 3620 goto err_remove_node;
673a394b
EA
3621 }
3622
fe14d5f4 3623 trace_i915_vma_bind(vma, flags);
0875546c 3624 ret = i915_vma_bind(vma, obj->cache_level, flags);
fe14d5f4 3625 if (ret)
e2273302 3626 goto err_remove_node;
fe14d5f4 3627
35c20a60 3628 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
1c7f4bca 3629 list_add_tail(&vma->vm_link, &vm->inactive_list);
bf1a1092 3630
262de145 3631 return vma;
2f633156 3632
bc6bc15b 3633err_remove_node:
6286ef9b 3634 drm_mm_remove_node(&vma->node);
bc6bc15b 3635err_free_vma:
2f633156 3636 i915_gem_vma_destroy(vma);
262de145 3637 vma = ERR_PTR(ret);
bc6bc15b 3638err_unpin:
2f633156 3639 i915_gem_object_unpin_pages(obj);
262de145 3640 return vma;
673a394b
EA
3641}
3642
000433b6 3643bool
2c22569b
CW
3644i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3645 bool force)
673a394b 3646{
673a394b
EA
3647 /* If we don't have a page list set up, then we're not pinned
3648 * to GPU, and we can ignore the cache flush because it'll happen
3649 * again at bind time.
3650 */
05394f39 3651 if (obj->pages == NULL)
000433b6 3652 return false;
673a394b 3653
769ce464
ID
3654 /*
3655 * Stolen memory is always coherent with the GPU as it is explicitly
3656 * marked as wc by the system, or the system is cache-coherent.
3657 */
6a2c4232 3658 if (obj->stolen || obj->phys_handle)
000433b6 3659 return false;
769ce464 3660
9c23f7fc
CW
3661 /* If the GPU is snooping the contents of the CPU cache,
3662 * we do not need to manually clear the CPU cache lines. However,
3663 * the caches are only snooped when the render cache is
3664 * flushed/invalidated. As we always have to emit invalidations
3665 * and flushes when moving into and out of the RENDER domain, correct
3666 * snooping behaviour occurs naturally as the result of our domain
3667 * tracking.
3668 */
0f71979a
CW
3669 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3670 obj->cache_dirty = true;
000433b6 3671 return false;
0f71979a 3672 }
9c23f7fc 3673
1c5d22f7 3674 trace_i915_gem_object_clflush(obj);
9da3da66 3675 drm_clflush_sg(obj->pages);
0f71979a 3676 obj->cache_dirty = false;
000433b6
CW
3677
3678 return true;
e47c68e9
EA
3679}
3680
3681/** Flushes the GTT write domain for the object if it's dirty. */
3682static void
05394f39 3683i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 3684{
1c5d22f7
CW
3685 uint32_t old_write_domain;
3686
05394f39 3687 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
e47c68e9
EA
3688 return;
3689
63256ec5 3690 /* No actual flushing is required for the GTT write domain. Writes
e47c68e9
EA
3691 * to it immediately go to main memory as far as we know, so there's
3692 * no chipset flush. It also doesn't land in render cache.
63256ec5
CW
3693 *
3694 * However, we do have to enforce the order so that all writes through
3695 * the GTT land before any writes to the device, such as updates to
3696 * the GATT itself.
e47c68e9 3697 */
63256ec5
CW
3698 wmb();
3699
05394f39
CW
3700 old_write_domain = obj->base.write_domain;
3701 obj->base.write_domain = 0;
1c5d22f7 3702
de152b62 3703 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
f99d7069 3704
1c5d22f7 3705 trace_i915_gem_object_change_domain(obj,
05394f39 3706 obj->base.read_domains,
1c5d22f7 3707 old_write_domain);
e47c68e9
EA
3708}
3709
3710/** Flushes the CPU write domain for the object if it's dirty. */
3711static void
e62b59e4 3712i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 3713{
1c5d22f7 3714 uint32_t old_write_domain;
e47c68e9 3715
05394f39 3716 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
e47c68e9
EA
3717 return;
3718
e62b59e4 3719 if (i915_gem_clflush_object(obj, obj->pin_display))
c033666a 3720 i915_gem_chipset_flush(to_i915(obj->base.dev));
000433b6 3721
05394f39
CW
3722 old_write_domain = obj->base.write_domain;
3723 obj->base.write_domain = 0;
1c5d22f7 3724
de152b62 3725 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
f99d7069 3726
1c5d22f7 3727 trace_i915_gem_object_change_domain(obj,
05394f39 3728 obj->base.read_domains,
1c5d22f7 3729 old_write_domain);
e47c68e9
EA
3730}
3731
2ef7eeaa
EA
3732/**
3733 * Moves a single object to the GTT read, and possibly write domain.
3734 *
3735 * This function returns when the move is complete, including waiting on
3736 * flushes to occur.
3737 */
79e53945 3738int
2021746e 3739i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2ef7eeaa 3740{
72e96d64
JL
3741 struct drm_device *dev = obj->base.dev;
3742 struct drm_i915_private *dev_priv = to_i915(dev);
3743 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1c5d22f7 3744 uint32_t old_write_domain, old_read_domains;
43566ded 3745 struct i915_vma *vma;
e47c68e9 3746 int ret;
2ef7eeaa 3747
8d7e3de1
CW
3748 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3749 return 0;
3750
0201f1ec 3751 ret = i915_gem_object_wait_rendering(obj, !write);
88241785
CW
3752 if (ret)
3753 return ret;
3754
43566ded
CW
3755 /* Flush and acquire obj->pages so that we are coherent through
3756 * direct access in memory with previous cached writes through
3757 * shmemfs and that our cache domain tracking remains valid.
3758 * For example, if the obj->filp was moved to swap without us
3759 * being notified and releasing the pages, we would mistakenly
3760 * continue to assume that the obj remained out of the CPU cached
3761 * domain.
3762 */
3763 ret = i915_gem_object_get_pages(obj);
3764 if (ret)
3765 return ret;
3766
e62b59e4 3767 i915_gem_object_flush_cpu_write_domain(obj);
1c5d22f7 3768
d0a57789
CW
3769 /* Serialise direct access to this object with the barriers for
3770 * coherent writes from the GPU, by effectively invalidating the
3771 * GTT domain upon first access.
3772 */
3773 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3774 mb();
3775
05394f39
CW
3776 old_write_domain = obj->base.write_domain;
3777 old_read_domains = obj->base.read_domains;
1c5d22f7 3778
e47c68e9
EA
3779 /* It should now be out of any other write domains, and we can update
3780 * the domain values for our changes.
3781 */
05394f39
CW
3782 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3783 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
e47c68e9 3784 if (write) {
05394f39
CW
3785 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3786 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3787 obj->dirty = 1;
2ef7eeaa
EA
3788 }
3789
1c5d22f7
CW
3790 trace_i915_gem_object_change_domain(obj,
3791 old_read_domains,
3792 old_write_domain);
3793
8325a09d 3794 /* And bump the LRU for this access */
43566ded
CW
3795 vma = i915_gem_obj_to_ggtt(obj);
3796 if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
1c7f4bca 3797 list_move_tail(&vma->vm_link,
72e96d64 3798 &ggtt->base.inactive_list);
8325a09d 3799
e47c68e9
EA
3800 return 0;
3801}
3802
ef55f92a
CW
3803/**
3804 * Changes the cache-level of an object across all VMA.
3805 *
3806 * After this function returns, the object will be in the new cache-level
3807 * across all GTT and the contents of the backing storage will be coherent,
3808 * with respect to the new cache-level. In order to keep the backing storage
3809 * coherent for all users, we only allow a single cache level to be set
3810 * globally on the object and prevent it from being changed whilst the
3811 * hardware is reading from the object. That is if the object is currently
3812 * on the scanout it will be set to uncached (or equivalent display
3813 * cache coherency) and all non-MOCS GPU access will also be uncached so
3814 * that all direct access to the scanout remains coherent.
3815 */
e4ffd173
CW
3816int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3817 enum i915_cache_level cache_level)
3818{
7bddb01f 3819 struct drm_device *dev = obj->base.dev;
df6f783a 3820 struct i915_vma *vma, *next;
ef55f92a 3821 bool bound = false;
ed75a55b 3822 int ret = 0;
e4ffd173
CW
3823
3824 if (obj->cache_level == cache_level)
ed75a55b 3825 goto out;
e4ffd173 3826
ef55f92a
CW
3827 /* Inspect the list of currently bound VMA and unbind any that would
3828 * be invalid given the new cache-level. This is principally to
3829 * catch the issue of the CS prefetch crossing page boundaries and
3830 * reading an invalid PTE on older architectures.
3831 */
1c7f4bca 3832 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
ef55f92a
CW
3833 if (!drm_mm_node_allocated(&vma->node))
3834 continue;
3835
3836 if (vma->pin_count) {
3837 DRM_DEBUG("can not change the cache level of pinned objects\n");
3838 return -EBUSY;
3839 }
3840
4144f9b5 3841 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
07fe0b12 3842 ret = i915_vma_unbind(vma);
3089c6f2
BW
3843 if (ret)
3844 return ret;
ef55f92a
CW
3845 } else
3846 bound = true;
42d6ab48
CW
3847 }
3848
ef55f92a
CW
3849 /* We can reuse the existing drm_mm nodes but need to change the
3850 * cache-level on the PTE. We could simply unbind them all and
3851 * rebind with the correct cache-level on next use. However since
3852 * we already have a valid slot, dma mapping, pages etc, we may as
3853 * rewrite the PTE in the belief that doing so tramples upon less
3854 * state and so involves less work.
3855 */
3856 if (bound) {
3857 /* Before we change the PTE, the GPU must not be accessing it.
3858 * If we wait upon the object, we know that all the bound
3859 * VMA are no longer active.
3860 */
2e2f351d 3861 ret = i915_gem_object_wait_rendering(obj, false);
e4ffd173
CW
3862 if (ret)
3863 return ret;
3864
ef55f92a
CW
3865 if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
3866 /* Access to snoopable pages through the GTT is
3867 * incoherent and on some machines causes a hard
3868 * lockup. Relinquish the CPU mmaping to force
3869 * userspace to refault in the pages and we can
3870 * then double check if the GTT mapping is still
3871 * valid for that pointer access.
3872 */
3873 i915_gem_release_mmap(obj);
3874
3875 /* As we no longer need a fence for GTT access,
3876 * we can relinquish it now (and so prevent having
3877 * to steal a fence from someone else on the next
3878 * fence request). Note GPU activity would have
3879 * dropped the fence as all snoopable access is
3880 * supposed to be linear.
3881 */
e4ffd173
CW
3882 ret = i915_gem_object_put_fence(obj);
3883 if (ret)
3884 return ret;
ef55f92a
CW
3885 } else {
3886 /* We either have incoherent backing store and
3887 * so no GTT access or the architecture is fully
3888 * coherent. In such cases, existing GTT mmaps
3889 * ignore the cache bit in the PTE and we can
3890 * rewrite it without confusing the GPU or having
3891 * to force userspace to fault back in its mmaps.
3892 */
e4ffd173
CW
3893 }
3894
1c7f4bca 3895 list_for_each_entry(vma, &obj->vma_list, obj_link) {
ef55f92a
CW
3896 if (!drm_mm_node_allocated(&vma->node))
3897 continue;
3898
3899 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3900 if (ret)
3901 return ret;
3902 }
e4ffd173
CW
3903 }
3904
1c7f4bca 3905 list_for_each_entry(vma, &obj->vma_list, obj_link)
2c22569b
CW
3906 vma->node.color = cache_level;
3907 obj->cache_level = cache_level;
3908
ed75a55b 3909out:
ef55f92a
CW
3910 /* Flush the dirty CPU caches to the backing storage so that the
3911 * object is now coherent at its new cache level (with respect
3912 * to the access domain).
3913 */
0f71979a
CW
3914 if (obj->cache_dirty &&
3915 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
3916 cpu_write_needs_clflush(obj)) {
3917 if (i915_gem_clflush_object(obj, true))
c033666a 3918 i915_gem_chipset_flush(to_i915(obj->base.dev));
e4ffd173
CW
3919 }
3920
e4ffd173
CW
3921 return 0;
3922}
3923
199adf40
BW
3924int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3925 struct drm_file *file)
e6994aee 3926{
199adf40 3927 struct drm_i915_gem_caching *args = data;
e6994aee 3928 struct drm_i915_gem_object *obj;
e6994aee
CW
3929
3930 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
432be69d
CW
3931 if (&obj->base == NULL)
3932 return -ENOENT;
e6994aee 3933
651d794f
CW
3934 switch (obj->cache_level) {
3935 case I915_CACHE_LLC:
3936 case I915_CACHE_L3_LLC:
3937 args->caching = I915_CACHING_CACHED;
3938 break;
3939
4257d3ba
CW
3940 case I915_CACHE_WT:
3941 args->caching = I915_CACHING_DISPLAY;
3942 break;
3943
651d794f
CW
3944 default:
3945 args->caching = I915_CACHING_NONE;
3946 break;
3947 }
e6994aee 3948
432be69d
CW
3949 drm_gem_object_unreference_unlocked(&obj->base);
3950 return 0;
e6994aee
CW
3951}
3952
199adf40
BW
3953int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3954 struct drm_file *file)
e6994aee 3955{
fd0fe6ac 3956 struct drm_i915_private *dev_priv = dev->dev_private;
199adf40 3957 struct drm_i915_gem_caching *args = data;
e6994aee
CW
3958 struct drm_i915_gem_object *obj;
3959 enum i915_cache_level level;
3960 int ret;
3961
199adf40
BW
3962 switch (args->caching) {
3963 case I915_CACHING_NONE:
e6994aee
CW
3964 level = I915_CACHE_NONE;
3965 break;
199adf40 3966 case I915_CACHING_CACHED:
e5756c10
ID
3967 /*
3968 * Due to a HW issue on BXT A stepping, GPU stores via a
3969 * snooped mapping may leave stale data in a corresponding CPU
3970 * cacheline, whereas normally such cachelines would get
3971 * invalidated.
3972 */
ca377809 3973 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
e5756c10
ID
3974 return -ENODEV;
3975
e6994aee
CW
3976 level = I915_CACHE_LLC;
3977 break;
4257d3ba
CW
3978 case I915_CACHING_DISPLAY:
3979 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3980 break;
e6994aee
CW
3981 default:
3982 return -EINVAL;
3983 }
3984
fd0fe6ac
ID
3985 intel_runtime_pm_get(dev_priv);
3986
3bc2913e
BW
3987 ret = i915_mutex_lock_interruptible(dev);
3988 if (ret)
fd0fe6ac 3989 goto rpm_put;
3bc2913e 3990
e6994aee
CW
3991 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3992 if (&obj->base == NULL) {
3993 ret = -ENOENT;
3994 goto unlock;
3995 }
3996
3997 ret = i915_gem_object_set_cache_level(obj, level);
3998
3999 drm_gem_object_unreference(&obj->base);
4000unlock:
4001 mutex_unlock(&dev->struct_mutex);
fd0fe6ac
ID
4002rpm_put:
4003 intel_runtime_pm_put(dev_priv);
4004
e6994aee
CW
4005 return ret;
4006}
4007
b9241ea3 4008/*
2da3b9b9
CW
4009 * Prepare buffer for display plane (scanout, cursors, etc).
4010 * Can be called from an uninterruptible phase (modesetting) and allows
4011 * any flushes to be pipelined (for pageflips).
b9241ea3
ZW
4012 */
4013int
2da3b9b9
CW
4014i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
4015 u32 alignment,
e6617330 4016 const struct i915_ggtt_view *view)
b9241ea3 4017{
2da3b9b9 4018 u32 old_read_domains, old_write_domain;
b9241ea3
ZW
4019 int ret;
4020
cc98b413
CW
4021 /* Mark the pin_display early so that we account for the
4022 * display coherency whilst setting up the cache domains.
4023 */
8a0c39b1 4024 obj->pin_display++;
cc98b413 4025
a7ef0640
EA
4026 /* The display engine is not coherent with the LLC cache on gen6. As
4027 * a result, we make sure that the pinning that is about to occur is
4028 * done with uncached PTEs. This is lowest common denominator for all
4029 * chipsets.
4030 *
4031 * However for gen6+, we could do better by using the GFDT bit instead
4032 * of uncaching, which would allow us to flush all the LLC-cached data
4033 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
4034 */
651d794f
CW
4035 ret = i915_gem_object_set_cache_level(obj,
4036 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
a7ef0640 4037 if (ret)
cc98b413 4038 goto err_unpin_display;
a7ef0640 4039
2da3b9b9
CW
4040 /* As the user may map the buffer once pinned in the display plane
4041 * (e.g. libkms for the bootup splash), we have to ensure that we
4042 * always use map_and_fenceable for all scanout buffers.
4043 */
50470bb0
TU
4044 ret = i915_gem_object_ggtt_pin(obj, view, alignment,
4045 view->type == I915_GGTT_VIEW_NORMAL ?
4046 PIN_MAPPABLE : 0);
2da3b9b9 4047 if (ret)
cc98b413 4048 goto err_unpin_display;
2da3b9b9 4049
e62b59e4 4050 i915_gem_object_flush_cpu_write_domain(obj);
b118c1e3 4051
2da3b9b9 4052 old_write_domain = obj->base.write_domain;
05394f39 4053 old_read_domains = obj->base.read_domains;
2da3b9b9
CW
4054
4055 /* It should now be out of any other write domains, and we can update
4056 * the domain values for our changes.
4057 */
e5f1d962 4058 obj->base.write_domain = 0;
05394f39 4059 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
b9241ea3
ZW
4060
4061 trace_i915_gem_object_change_domain(obj,
4062 old_read_domains,
2da3b9b9 4063 old_write_domain);
b9241ea3
ZW
4064
4065 return 0;
cc98b413
CW
4066
4067err_unpin_display:
8a0c39b1 4068 obj->pin_display--;
cc98b413
CW
4069 return ret;
4070}
4071
4072void
e6617330
TU
4073i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
4074 const struct i915_ggtt_view *view)
cc98b413 4075{
8a0c39b1
TU
4076 if (WARN_ON(obj->pin_display == 0))
4077 return;
4078
e6617330
TU
4079 i915_gem_object_ggtt_unpin_view(obj, view);
4080
8a0c39b1 4081 obj->pin_display--;
b9241ea3
ZW
4082}
4083
e47c68e9
EA
4084/**
4085 * Moves a single object to the CPU read, and possibly write domain.
4086 *
4087 * This function returns when the move is complete, including waiting on
4088 * flushes to occur.
4089 */
dabdfe02 4090int
919926ae 4091i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
e47c68e9 4092{
1c5d22f7 4093 uint32_t old_write_domain, old_read_domains;
e47c68e9
EA
4094 int ret;
4095
8d7e3de1
CW
4096 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
4097 return 0;
4098
0201f1ec 4099 ret = i915_gem_object_wait_rendering(obj, !write);
88241785
CW
4100 if (ret)
4101 return ret;
4102
e47c68e9 4103 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 4104
05394f39
CW
4105 old_write_domain = obj->base.write_domain;
4106 old_read_domains = obj->base.read_domains;
1c5d22f7 4107
e47c68e9 4108 /* Flush the CPU cache if it's still invalid. */
05394f39 4109 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2c22569b 4110 i915_gem_clflush_object(obj, false);
2ef7eeaa 4111
05394f39 4112 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
4113 }
4114
4115 /* It should now be out of any other write domains, and we can update
4116 * the domain values for our changes.
4117 */
05394f39 4118 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
e47c68e9
EA
4119
4120 /* If we're writing through the CPU, then the GPU read domains will
4121 * need to be invalidated at next use.
4122 */
4123 if (write) {
05394f39
CW
4124 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4125 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
e47c68e9 4126 }
2ef7eeaa 4127
1c5d22f7
CW
4128 trace_i915_gem_object_change_domain(obj,
4129 old_read_domains,
4130 old_write_domain);
4131
2ef7eeaa
EA
4132 return 0;
4133}
4134
673a394b
EA
4135/* Throttle our rendering by waiting until the ring has completed our requests
4136 * emitted over 20 msec ago.
4137 *
b962442e
EA
4138 * Note that if we were to use the current jiffies each time around the loop,
4139 * we wouldn't escape the function with any frames outstanding if the time to
4140 * render a frame was over 20ms.
4141 *
673a394b
EA
4142 * This should get us reasonable parallelism between CPU and GPU but also
4143 * relatively low latency when blocking on a particular request to finish.
4144 */
40a5f0de 4145static int
f787a5f5 4146i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
40a5f0de 4147{
f787a5f5
CW
4148 struct drm_i915_private *dev_priv = dev->dev_private;
4149 struct drm_i915_file_private *file_priv = file->driver_priv;
d0bc54f2 4150 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
54fb2411 4151 struct drm_i915_gem_request *request, *target = NULL;
f787a5f5 4152 int ret;
93533c29 4153
308887aa
DV
4154 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4155 if (ret)
4156 return ret;
4157
f4457ae7
CW
4158 /* ABI: return -EIO if already wedged */
4159 if (i915_terminally_wedged(&dev_priv->gpu_error))
4160 return -EIO;
e110e8d6 4161
1c25595f 4162 spin_lock(&file_priv->mm.lock);
f787a5f5 4163 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
b962442e
EA
4164 if (time_after_eq(request->emitted_jiffies, recent_enough))
4165 break;
40a5f0de 4166
fcfa423c
JH
4167 /*
4168 * Note that the request might not have been submitted yet.
4169 * In which case emitted_jiffies will be zero.
4170 */
4171 if (!request->emitted_jiffies)
4172 continue;
4173
54fb2411 4174 target = request;
b962442e 4175 }
ff865885
JH
4176 if (target)
4177 i915_gem_request_reference(target);
1c25595f 4178 spin_unlock(&file_priv->mm.lock);
40a5f0de 4179
54fb2411 4180 if (target == NULL)
f787a5f5 4181 return 0;
2bc43b5c 4182
299259a3 4183 ret = __i915_wait_request(target, true, NULL, NULL);
f787a5f5
CW
4184 if (ret == 0)
4185 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
40a5f0de 4186
73db04cf 4187 i915_gem_request_unreference(target);
ff865885 4188
40a5f0de
EA
4189 return ret;
4190}
4191
d23db88c
CW
4192static bool
4193i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4194{
4195 struct drm_i915_gem_object *obj = vma->obj;
4196
4197 if (alignment &&
4198 vma->node.start & (alignment - 1))
4199 return true;
4200
4201 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4202 return true;
4203
4204 if (flags & PIN_OFFSET_BIAS &&
4205 vma->node.start < (flags & PIN_OFFSET_MASK))
4206 return true;
4207
506a8e87
CW
4208 if (flags & PIN_OFFSET_FIXED &&
4209 vma->node.start != (flags & PIN_OFFSET_MASK))
4210 return true;
4211
d23db88c
CW
4212 return false;
4213}
4214
d0710abb
CW
4215void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
4216{
4217 struct drm_i915_gem_object *obj = vma->obj;
4218 bool mappable, fenceable;
4219 u32 fence_size, fence_alignment;
4220
4221 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4222 obj->base.size,
4223 obj->tiling_mode);
4224 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4225 obj->base.size,
4226 obj->tiling_mode,
4227 true);
4228
4229 fenceable = (vma->node.size == fence_size &&
4230 (vma->node.start & (fence_alignment - 1)) == 0);
4231
4232 mappable = (vma->node.start + fence_size <=
62106b4f 4233 to_i915(obj->base.dev)->ggtt.mappable_end);
d0710abb
CW
4234
4235 obj->map_and_fenceable = mappable && fenceable;
4236}
4237
ec7adb6e
JL
4238static int
4239i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4240 struct i915_address_space *vm,
4241 const struct i915_ggtt_view *ggtt_view,
4242 uint32_t alignment,
4243 uint64_t flags)
673a394b 4244{
6e7186af 4245 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
07fe0b12 4246 struct i915_vma *vma;
ef79e17c 4247 unsigned bound;
673a394b
EA
4248 int ret;
4249
6e7186af
BW
4250 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4251 return -ENODEV;
4252
bf3d149b 4253 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
1ec9e26d 4254 return -EINVAL;
07fe0b12 4255
c826c449
CW
4256 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4257 return -EINVAL;
4258
ec7adb6e
JL
4259 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
4260 return -EINVAL;
4261
4262 vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
4263 i915_gem_obj_to_vma(obj, vm);
4264
07fe0b12 4265 if (vma) {
d7f46fc4
BW
4266 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4267 return -EBUSY;
4268
d23db88c 4269 if (i915_vma_misplaced(vma, alignment, flags)) {
d7f46fc4 4270 WARN(vma->pin_count,
ec7adb6e 4271 "bo is already pinned in %s with incorrect alignment:"
088e0df4 4272 " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
75e9e915 4273 " obj->map_and_fenceable=%d\n",
ec7adb6e 4274 ggtt_view ? "ggtt" : "ppgtt",
088e0df4
MT
4275 upper_32_bits(vma->node.start),
4276 lower_32_bits(vma->node.start),
fe14d5f4 4277 alignment,
d23db88c 4278 !!(flags & PIN_MAPPABLE),
05394f39 4279 obj->map_and_fenceable);
07fe0b12 4280 ret = i915_vma_unbind(vma);
ac0c6b5a
CW
4281 if (ret)
4282 return ret;
8ea99c92
DV
4283
4284 vma = NULL;
ac0c6b5a
CW
4285 }
4286 }
4287
ef79e17c 4288 bound = vma ? vma->bound : 0;
8ea99c92 4289 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
ec7adb6e
JL
4290 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
4291 flags);
262de145
DV
4292 if (IS_ERR(vma))
4293 return PTR_ERR(vma);
0875546c
DV
4294 } else {
4295 ret = i915_vma_bind(vma, obj->cache_level, flags);
fe14d5f4
TU
4296 if (ret)
4297 return ret;
4298 }
74898d7e 4299
91e6711e
JL
4300 if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
4301 (bound ^ vma->bound) & GLOBAL_BIND) {
d0710abb 4302 __i915_vma_set_map_and_fenceable(vma);
91e6711e
JL
4303 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4304 }
ef79e17c 4305
8ea99c92 4306 vma->pin_count++;
673a394b
EA
4307 return 0;
4308}
4309
ec7adb6e
JL
4310int
4311i915_gem_object_pin(struct drm_i915_gem_object *obj,
4312 struct i915_address_space *vm,
4313 uint32_t alignment,
4314 uint64_t flags)
4315{
4316 return i915_gem_object_do_pin(obj, vm,
4317 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
4318 alignment, flags);
4319}
4320
4321int
4322i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4323 const struct i915_ggtt_view *view,
4324 uint32_t alignment,
4325 uint64_t flags)
4326{
72e96d64
JL
4327 struct drm_device *dev = obj->base.dev;
4328 struct drm_i915_private *dev_priv = to_i915(dev);
4329 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4330
ade7daa1 4331 BUG_ON(!view);
ec7adb6e 4332
72e96d64 4333 return i915_gem_object_do_pin(obj, &ggtt->base, view,
6fafab76 4334 alignment, flags | PIN_GLOBAL);
ec7adb6e
JL
4335}
4336
673a394b 4337void
e6617330
TU
4338i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
4339 const struct i915_ggtt_view *view)
673a394b 4340{
e6617330 4341 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
673a394b 4342
e6617330 4343 WARN_ON(vma->pin_count == 0);
9abc4648 4344 WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
d7f46fc4 4345
30154650 4346 --vma->pin_count;
673a394b
EA
4347}
4348
673a394b
EA
4349int
4350i915_gem_busy_ioctl(struct drm_device *dev, void *data,
05394f39 4351 struct drm_file *file)
673a394b
EA
4352{
4353 struct drm_i915_gem_busy *args = data;
05394f39 4354 struct drm_i915_gem_object *obj;
30dbf0c0
CW
4355 int ret;
4356
76c1dec1 4357 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 4358 if (ret)
76c1dec1 4359 return ret;
673a394b 4360
05394f39 4361 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 4362 if (&obj->base == NULL) {
1d7cfea1
CW
4363 ret = -ENOENT;
4364 goto unlock;
673a394b 4365 }
d1b851fc 4366
0be555b6
CW
4367 /* Count all active objects as busy, even if they are currently not used
4368 * by the gpu. Users of this interface expect objects to eventually
4369 * become non-busy without any further actions, therefore emit any
4370 * necessary flushes here.
c4de0a5d 4371 */
30dfebf3 4372 ret = i915_gem_object_flush_active(obj);
b4716185
CW
4373 if (ret)
4374 goto unref;
0be555b6 4375
426960be
CW
4376 args->busy = 0;
4377 if (obj->active) {
4378 int i;
4379
666796da 4380 for (i = 0; i < I915_NUM_ENGINES; i++) {
426960be
CW
4381 struct drm_i915_gem_request *req;
4382
4383 req = obj->last_read_req[i];
4384 if (req)
4a570db5 4385 args->busy |= 1 << (16 + req->engine->exec_id);
426960be
CW
4386 }
4387 if (obj->last_write_req)
4a570db5 4388 args->busy |= obj->last_write_req->engine->exec_id;
426960be 4389 }
673a394b 4390
b4716185 4391unref:
05394f39 4392 drm_gem_object_unreference(&obj->base);
1d7cfea1 4393unlock:
673a394b 4394 mutex_unlock(&dev->struct_mutex);
1d7cfea1 4395 return ret;
673a394b
EA
4396}
4397
4398int
4399i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4400 struct drm_file *file_priv)
4401{
0206e353 4402 return i915_gem_ring_throttle(dev, file_priv);
673a394b
EA
4403}
4404
3ef94daa
CW
4405int
4406i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4407 struct drm_file *file_priv)
4408{
656bfa3a 4409 struct drm_i915_private *dev_priv = dev->dev_private;
3ef94daa 4410 struct drm_i915_gem_madvise *args = data;
05394f39 4411 struct drm_i915_gem_object *obj;
76c1dec1 4412 int ret;
3ef94daa
CW
4413
4414 switch (args->madv) {
4415 case I915_MADV_DONTNEED:
4416 case I915_MADV_WILLNEED:
4417 break;
4418 default:
4419 return -EINVAL;
4420 }
4421
1d7cfea1
CW
4422 ret = i915_mutex_lock_interruptible(dev);
4423 if (ret)
4424 return ret;
4425
05394f39 4426 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
c8725226 4427 if (&obj->base == NULL) {
1d7cfea1
CW
4428 ret = -ENOENT;
4429 goto unlock;
3ef94daa 4430 }
3ef94daa 4431
d7f46fc4 4432 if (i915_gem_obj_is_pinned(obj)) {
1d7cfea1
CW
4433 ret = -EINVAL;
4434 goto out;
3ef94daa
CW
4435 }
4436
656bfa3a
DV
4437 if (obj->pages &&
4438 obj->tiling_mode != I915_TILING_NONE &&
4439 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4440 if (obj->madv == I915_MADV_WILLNEED)
4441 i915_gem_object_unpin_pages(obj);
4442 if (args->madv == I915_MADV_WILLNEED)
4443 i915_gem_object_pin_pages(obj);
4444 }
4445
05394f39
CW
4446 if (obj->madv != __I915_MADV_PURGED)
4447 obj->madv = args->madv;
3ef94daa 4448
6c085a72 4449 /* if the object is no longer attached, discard its backing storage */
be6a0376 4450 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
2d7ef395
CW
4451 i915_gem_object_truncate(obj);
4452
05394f39 4453 args->retained = obj->madv != __I915_MADV_PURGED;
bb6baf76 4454
1d7cfea1 4455out:
05394f39 4456 drm_gem_object_unreference(&obj->base);
1d7cfea1 4457unlock:
3ef94daa 4458 mutex_unlock(&dev->struct_mutex);
1d7cfea1 4459 return ret;
3ef94daa
CW
4460}
4461
37e680a1
CW
4462void i915_gem_object_init(struct drm_i915_gem_object *obj,
4463 const struct drm_i915_gem_object_ops *ops)
0327d6ba 4464{
b4716185
CW
4465 int i;
4466
35c20a60 4467 INIT_LIST_HEAD(&obj->global_list);
666796da 4468 for (i = 0; i < I915_NUM_ENGINES; i++)
117897f4 4469 INIT_LIST_HEAD(&obj->engine_list[i]);
b25cb2f8 4470 INIT_LIST_HEAD(&obj->obj_exec_link);
2f633156 4471 INIT_LIST_HEAD(&obj->vma_list);
8d9d5744 4472 INIT_LIST_HEAD(&obj->batch_pool_link);
0327d6ba 4473
37e680a1
CW
4474 obj->ops = ops;
4475
0327d6ba
CW
4476 obj->fence_reg = I915_FENCE_REG_NONE;
4477 obj->madv = I915_MADV_WILLNEED;
0327d6ba
CW
4478
4479 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4480}
4481
37e680a1 4482static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
de472664 4483 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
37e680a1
CW
4484 .get_pages = i915_gem_object_get_pages_gtt,
4485 .put_pages = i915_gem_object_put_pages_gtt,
4486};
4487
d37cd8a8 4488struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
05394f39 4489 size_t size)
ac52bc56 4490{
c397b908 4491 struct drm_i915_gem_object *obj;
5949eac4 4492 struct address_space *mapping;
1a240d4d 4493 gfp_t mask;
fe3db79b 4494 int ret;
ac52bc56 4495
42dcedd4 4496 obj = i915_gem_object_alloc(dev);
c397b908 4497 if (obj == NULL)
fe3db79b 4498 return ERR_PTR(-ENOMEM);
673a394b 4499
fe3db79b
CW
4500 ret = drm_gem_object_init(dev, &obj->base, size);
4501 if (ret)
4502 goto fail;
673a394b 4503
bed1ea95
CW
4504 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4505 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4506 /* 965gm cannot relocate objects above 4GiB. */
4507 mask &= ~__GFP_HIGHMEM;
4508 mask |= __GFP_DMA32;
4509 }
4510
496ad9aa 4511 mapping = file_inode(obj->base.filp)->i_mapping;
bed1ea95 4512 mapping_set_gfp_mask(mapping, mask);
5949eac4 4513
37e680a1 4514 i915_gem_object_init(obj, &i915_gem_object_ops);
73aa808f 4515
c397b908
DV
4516 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4517 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
673a394b 4518
3d29b842
ED
4519 if (HAS_LLC(dev)) {
4520 /* On some devices, we can have the GPU use the LLC (the CPU
a1871112
EA
4521 * cache) for about a 10% performance improvement
4522 * compared to uncached. Graphics requests other than
4523 * display scanout are coherent with the CPU in
4524 * accessing this cache. This means in this mode we
4525 * don't need to clflush on the CPU side, and on the
4526 * GPU side we only need to flush internal caches to
4527 * get data visible to the CPU.
4528 *
4529 * However, we maintain the display planes as UC, and so
4530 * need to rebind when first used as such.
4531 */
4532 obj->cache_level = I915_CACHE_LLC;
4533 } else
4534 obj->cache_level = I915_CACHE_NONE;
4535
d861e338
DV
4536 trace_i915_gem_object_create(obj);
4537
05394f39 4538 return obj;
fe3db79b
CW
4539
4540fail:
4541 i915_gem_object_free(obj);
4542
4543 return ERR_PTR(ret);
c397b908
DV
4544}
4545
340fbd8c
CW
4546static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4547{
4548 /* If we are the last user of the backing storage (be it shmemfs
4549 * pages or stolen etc), we know that the pages are going to be
4550 * immediately released. In this case, we can then skip copying
4551 * back the contents from the GPU.
4552 */
4553
4554 if (obj->madv != I915_MADV_WILLNEED)
4555 return false;
4556
4557 if (obj->base.filp == NULL)
4558 return true;
4559
4560 /* At first glance, this looks racy, but then again so would be
4561 * userspace racing mmap against close. However, the first external
4562 * reference to the filp can only be obtained through the
4563 * i915_gem_mmap_ioctl() which safeguards us against the user
4564 * acquiring such a reference whilst we are in the middle of
4565 * freeing the object.
4566 */
4567 return atomic_long_read(&obj->base.filp->f_count) == 1;
4568}
4569
1488fc08 4570void i915_gem_free_object(struct drm_gem_object *gem_obj)
673a394b 4571{
1488fc08 4572 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
05394f39 4573 struct drm_device *dev = obj->base.dev;
3e31c6c0 4574 struct drm_i915_private *dev_priv = dev->dev_private;
07fe0b12 4575 struct i915_vma *vma, *next;
673a394b 4576
f65c9168
PZ
4577 intel_runtime_pm_get(dev_priv);
4578
26e12f89
CW
4579 trace_i915_gem_object_destroy(obj);
4580
1c7f4bca 4581 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
d7f46fc4
BW
4582 int ret;
4583
4584 vma->pin_count = 0;
4585 ret = i915_vma_unbind(vma);
07fe0b12
BW
4586 if (WARN_ON(ret == -ERESTARTSYS)) {
4587 bool was_interruptible;
1488fc08 4588
07fe0b12
BW
4589 was_interruptible = dev_priv->mm.interruptible;
4590 dev_priv->mm.interruptible = false;
1488fc08 4591
07fe0b12 4592 WARN_ON(i915_vma_unbind(vma));
1488fc08 4593
07fe0b12
BW
4594 dev_priv->mm.interruptible = was_interruptible;
4595 }
1488fc08
CW
4596 }
4597
1d64ae71
BW
4598 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4599 * before progressing. */
4600 if (obj->stolen)
4601 i915_gem_object_unpin_pages(obj);
4602
a071fa00
DV
4603 WARN_ON(obj->frontbuffer_bits);
4604
656bfa3a
DV
4605 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4606 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4607 obj->tiling_mode != I915_TILING_NONE)
4608 i915_gem_object_unpin_pages(obj);
4609
401c29f6
BW
4610 if (WARN_ON(obj->pages_pin_count))
4611 obj->pages_pin_count = 0;
340fbd8c 4612 if (discard_backing_storage(obj))
5537252b 4613 obj->madv = I915_MADV_DONTNEED;
37e680a1 4614 i915_gem_object_put_pages(obj);
d8cb5086 4615 i915_gem_object_free_mmap_offset(obj);
de151cf6 4616
9da3da66
CW
4617 BUG_ON(obj->pages);
4618
2f745ad3
CW
4619 if (obj->base.import_attach)
4620 drm_prime_gem_destroy(&obj->base, NULL);
de151cf6 4621
5cc9ed4b
CW
4622 if (obj->ops->release)
4623 obj->ops->release(obj);
4624
05394f39
CW
4625 drm_gem_object_release(&obj->base);
4626 i915_gem_info_remove_obj(dev_priv, obj->base.size);
c397b908 4627
05394f39 4628 kfree(obj->bit_17);
42dcedd4 4629 i915_gem_object_free(obj);
f65c9168
PZ
4630
4631 intel_runtime_pm_put(dev_priv);
673a394b
EA
4632}
4633
ec7adb6e
JL
4634struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4635 struct i915_address_space *vm)
e656a6cb
DV
4636{
4637 struct i915_vma *vma;
1c7f4bca 4638 list_for_each_entry(vma, &obj->vma_list, obj_link) {
1b683729
TU
4639 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4640 vma->vm == vm)
e656a6cb 4641 return vma;
ec7adb6e
JL
4642 }
4643 return NULL;
4644}
4645
4646struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4647 const struct i915_ggtt_view *view)
4648{
ec7adb6e 4649 struct i915_vma *vma;
e656a6cb 4650
598b9ec8 4651 GEM_BUG_ON(!view);
ec7adb6e 4652
1c7f4bca 4653 list_for_each_entry(vma, &obj->vma_list, obj_link)
598b9ec8 4654 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
ec7adb6e 4655 return vma;
e656a6cb
DV
4656 return NULL;
4657}
4658
2f633156
BW
4659void i915_gem_vma_destroy(struct i915_vma *vma)
4660{
4661 WARN_ON(vma->node.allocated);
aaa05667
CW
4662
4663 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4664 if (!list_empty(&vma->exec_list))
4665 return;
4666
596c5923
CW
4667 if (!vma->is_ggtt)
4668 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
b9d06dd9 4669
1c7f4bca 4670 list_del(&vma->obj_link);
b93dab6e 4671
e20d2ab7 4672 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
2f633156
BW
4673}
4674
e3efda49 4675static void
117897f4 4676i915_gem_stop_engines(struct drm_device *dev)
e3efda49
CW
4677{
4678 struct drm_i915_private *dev_priv = dev->dev_private;
e2f80391 4679 struct intel_engine_cs *engine;
e3efda49 4680
b4ac5afc 4681 for_each_engine(engine, dev_priv)
117897f4 4682 dev_priv->gt.stop_engine(engine);
e3efda49
CW
4683}
4684
29105ccc 4685int
45c5f202 4686i915_gem_suspend(struct drm_device *dev)
29105ccc 4687{
3e31c6c0 4688 struct drm_i915_private *dev_priv = dev->dev_private;
45c5f202 4689 int ret = 0;
28dfe52a 4690
45c5f202 4691 mutex_lock(&dev->struct_mutex);
b2da9fe5 4692 ret = i915_gpu_idle(dev);
f7403347 4693 if (ret)
45c5f202 4694 goto err;
f7403347 4695
c033666a 4696 i915_gem_retire_requests(dev_priv);
673a394b 4697
117897f4 4698 i915_gem_stop_engines(dev);
b2e862d0 4699 i915_gem_context_lost(dev_priv);
45c5f202
CW
4700 mutex_unlock(&dev->struct_mutex);
4701
737b1506 4702 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
29105ccc 4703 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
274fa1c1 4704 flush_delayed_work(&dev_priv->mm.idle_work);
29105ccc 4705
bdcf120b
CW
4706 /* Assert that we sucessfully flushed all the work and
4707 * reset the GPU back to its idle, low power state.
4708 */
4709 WARN_ON(dev_priv->mm.busy);
4710
673a394b 4711 return 0;
45c5f202
CW
4712
4713err:
4714 mutex_unlock(&dev->struct_mutex);
4715 return ret;
673a394b
EA
4716}
4717
f691e2f4
DV
4718void i915_gem_init_swizzling(struct drm_device *dev)
4719{
3e31c6c0 4720 struct drm_i915_private *dev_priv = dev->dev_private;
f691e2f4 4721
11782b02 4722 if (INTEL_INFO(dev)->gen < 5 ||
f691e2f4
DV
4723 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4724 return;
4725
4726 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4727 DISP_TILE_SURFACE_SWIZZLING);
4728
11782b02
DV
4729 if (IS_GEN5(dev))
4730 return;
4731
f691e2f4
DV
4732 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4733 if (IS_GEN6(dev))
6b26c86d 4734 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
8782e26c 4735 else if (IS_GEN7(dev))
6b26c86d 4736 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
31a5336e
BW
4737 else if (IS_GEN8(dev))
4738 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
8782e26c
BW
4739 else
4740 BUG();
f691e2f4 4741}
e21af88d 4742
81e7f200
VS
4743static void init_unused_ring(struct drm_device *dev, u32 base)
4744{
4745 struct drm_i915_private *dev_priv = dev->dev_private;
4746
4747 I915_WRITE(RING_CTL(base), 0);
4748 I915_WRITE(RING_HEAD(base), 0);
4749 I915_WRITE(RING_TAIL(base), 0);
4750 I915_WRITE(RING_START(base), 0);
4751}
4752
4753static void init_unused_rings(struct drm_device *dev)
4754{
4755 if (IS_I830(dev)) {
4756 init_unused_ring(dev, PRB1_BASE);
4757 init_unused_ring(dev, SRB0_BASE);
4758 init_unused_ring(dev, SRB1_BASE);
4759 init_unused_ring(dev, SRB2_BASE);
4760 init_unused_ring(dev, SRB3_BASE);
4761 } else if (IS_GEN2(dev)) {
4762 init_unused_ring(dev, SRB0_BASE);
4763 init_unused_ring(dev, SRB1_BASE);
4764 } else if (IS_GEN3(dev)) {
4765 init_unused_ring(dev, PRB1_BASE);
4766 init_unused_ring(dev, PRB2_BASE);
4767 }
4768}
4769
117897f4 4770int i915_gem_init_engines(struct drm_device *dev)
8187a2b7 4771{
4fc7c971 4772 struct drm_i915_private *dev_priv = dev->dev_private;
8187a2b7 4773 int ret;
68f95ba9 4774
5c1143bb 4775 ret = intel_init_render_ring_buffer(dev);
68f95ba9 4776 if (ret)
b6913e4b 4777 return ret;
68f95ba9
CW
4778
4779 if (HAS_BSD(dev)) {
5c1143bb 4780 ret = intel_init_bsd_ring_buffer(dev);
68f95ba9
CW
4781 if (ret)
4782 goto cleanup_render_ring;
d1b851fc 4783 }
68f95ba9 4784
d39398f5 4785 if (HAS_BLT(dev)) {
549f7365
CW
4786 ret = intel_init_blt_ring_buffer(dev);
4787 if (ret)
4788 goto cleanup_bsd_ring;
4789 }
4790
9a8a2213
BW
4791 if (HAS_VEBOX(dev)) {
4792 ret = intel_init_vebox_ring_buffer(dev);
4793 if (ret)
4794 goto cleanup_blt_ring;
4795 }
4796
845f74a7
ZY
4797 if (HAS_BSD2(dev)) {
4798 ret = intel_init_bsd2_ring_buffer(dev);
4799 if (ret)
4800 goto cleanup_vebox_ring;
4801 }
9a8a2213 4802
4fc7c971
BW
4803 return 0;
4804
9a8a2213 4805cleanup_vebox_ring:
117897f4 4806 intel_cleanup_engine(&dev_priv->engine[VECS]);
4fc7c971 4807cleanup_blt_ring:
117897f4 4808 intel_cleanup_engine(&dev_priv->engine[BCS]);
4fc7c971 4809cleanup_bsd_ring:
117897f4 4810 intel_cleanup_engine(&dev_priv->engine[VCS]);
4fc7c971 4811cleanup_render_ring:
117897f4 4812 intel_cleanup_engine(&dev_priv->engine[RCS]);
4fc7c971
BW
4813
4814 return ret;
4815}
4816
4817int
4818i915_gem_init_hw(struct drm_device *dev)
4819{
3e31c6c0 4820 struct drm_i915_private *dev_priv = dev->dev_private;
e2f80391 4821 struct intel_engine_cs *engine;
d200cda6 4822 int ret;
4fc7c971
BW
4823
4824 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4825 return -EIO;
4826
5e4f5189
CW
4827 /* Double layer security blanket, see i915_gem_init() */
4828 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4829
3accaf7e 4830 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
05e21cc4 4831 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4fc7c971 4832
0bf21347
VS
4833 if (IS_HASWELL(dev))
4834 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4835 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
9435373e 4836
88a2b2a3 4837 if (HAS_PCH_NOP(dev)) {
6ba844b0
DV
4838 if (IS_IVYBRIDGE(dev)) {
4839 u32 temp = I915_READ(GEN7_MSG_CTL);
4840 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4841 I915_WRITE(GEN7_MSG_CTL, temp);
4842 } else if (INTEL_INFO(dev)->gen >= 7) {
4843 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4844 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4845 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4846 }
88a2b2a3
BW
4847 }
4848
4fc7c971
BW
4849 i915_gem_init_swizzling(dev);
4850
d5abdfda
DV
4851 /*
4852 * At least 830 can leave some of the unused rings
4853 * "active" (ie. head != tail) after resume which
4854 * will prevent c3 entry. Makes sure all unused rings
4855 * are totally idle.
4856 */
4857 init_unused_rings(dev);
4858
ed54c1a1 4859 BUG_ON(!dev_priv->kernel_context);
90638cc1 4860
4ad2fd88
JH
4861 ret = i915_ppgtt_init_hw(dev);
4862 if (ret) {
4863 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4864 goto out;
4865 }
4866
4867 /* Need to do basic initialisation of all rings first: */
b4ac5afc 4868 for_each_engine(engine, dev_priv) {
e2f80391 4869 ret = engine->init_hw(engine);
35a57ffb 4870 if (ret)
5e4f5189 4871 goto out;
35a57ffb 4872 }
99433931 4873
0ccdacf6
PA
4874 intel_mocs_init_l3cc_table(dev);
4875
33a732f4 4876 /* We can't enable contexts until all firmware is loaded */
87bcdd2e
JB
4877 if (HAS_GUC_UCODE(dev)) {
4878 ret = intel_guc_ucode_load(dev);
4879 if (ret) {
9f9e539f
DV
4880 DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
4881 ret = -EIO;
4882 goto out;
87bcdd2e 4883 }
33a732f4
AD
4884 }
4885
e84fe803
NH
4886 /*
4887 * Increment the next seqno by 0x100 so we have a visible break
4888 * on re-initialisation
4889 */
4890 ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
e21af88d 4891
5e4f5189
CW
4892out:
4893 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2fa48d8d 4894 return ret;
8187a2b7
ZN
4895}
4896
1070a42b
CW
4897int i915_gem_init(struct drm_device *dev)
4898{
4899 struct drm_i915_private *dev_priv = dev->dev_private;
1070a42b
CW
4900 int ret;
4901
1070a42b 4902 mutex_lock(&dev->struct_mutex);
d62b4892 4903
a83014d3 4904 if (!i915.enable_execlists) {
f3dc74c0 4905 dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
117897f4
TU
4906 dev_priv->gt.init_engines = i915_gem_init_engines;
4907 dev_priv->gt.cleanup_engine = intel_cleanup_engine;
4908 dev_priv->gt.stop_engine = intel_stop_engine;
454afebd 4909 } else {
f3dc74c0 4910 dev_priv->gt.execbuf_submit = intel_execlists_submission;
117897f4
TU
4911 dev_priv->gt.init_engines = intel_logical_rings_init;
4912 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4913 dev_priv->gt.stop_engine = intel_logical_ring_stop;
a83014d3
OM
4914 }
4915
5e4f5189
CW
4916 /* This is just a security blanket to placate dragons.
4917 * On some systems, we very sporadically observe that the first TLBs
4918 * used by the CS may be stale, despite us poking the TLB reset. If
4919 * we hold the forcewake during initialisation these problems
4920 * just magically go away.
4921 */
4922 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4923
6c5566a8 4924 ret = i915_gem_init_userptr(dev);
7bcc3777
JN
4925 if (ret)
4926 goto out_unlock;
6c5566a8 4927
d85489d3 4928 i915_gem_init_ggtt(dev);
d62b4892 4929
2fa48d8d 4930 ret = i915_gem_context_init(dev);
7bcc3777
JN
4931 if (ret)
4932 goto out_unlock;
2fa48d8d 4933
117897f4 4934 ret = dev_priv->gt.init_engines(dev);
35a57ffb 4935 if (ret)
7bcc3777 4936 goto out_unlock;
2fa48d8d 4937
1070a42b 4938 ret = i915_gem_init_hw(dev);
60990320
CW
4939 if (ret == -EIO) {
4940 /* Allow ring initialisation to fail by marking the GPU as
4941 * wedged. But we only want to do this where the GPU is angry,
4942 * for all other failure, such as an allocation failure, bail.
4943 */
4944 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
805de8f4 4945 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
60990320 4946 ret = 0;
1070a42b 4947 }
7bcc3777
JN
4948
4949out_unlock:
5e4f5189 4950 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
60990320 4951 mutex_unlock(&dev->struct_mutex);
1070a42b 4952
60990320 4953 return ret;
1070a42b
CW
4954}
4955
8187a2b7 4956void
117897f4 4957i915_gem_cleanup_engines(struct drm_device *dev)
8187a2b7 4958{
3e31c6c0 4959 struct drm_i915_private *dev_priv = dev->dev_private;
e2f80391 4960 struct intel_engine_cs *engine;
8187a2b7 4961
b4ac5afc 4962 for_each_engine(engine, dev_priv)
117897f4 4963 dev_priv->gt.cleanup_engine(engine);
8187a2b7
ZN
4964}
4965
64193406 4966static void
666796da 4967init_engine_lists(struct intel_engine_cs *engine)
64193406 4968{
0bc40be8
TU
4969 INIT_LIST_HEAD(&engine->active_list);
4970 INIT_LIST_HEAD(&engine->request_list);
64193406
CW
4971}
4972
40ae4e16
ID
4973void
4974i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4975{
4976 struct drm_device *dev = dev_priv->dev;
4977
4978 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4979 !IS_CHERRYVIEW(dev_priv))
4980 dev_priv->num_fence_regs = 32;
4981 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4982 IS_I945GM(dev_priv) || IS_G33(dev_priv))
4983 dev_priv->num_fence_regs = 16;
4984 else
4985 dev_priv->num_fence_regs = 8;
4986
c033666a 4987 if (intel_vgpu_active(dev_priv))
40ae4e16
ID
4988 dev_priv->num_fence_regs =
4989 I915_READ(vgtif_reg(avail_rs.fence_num));
4990
4991 /* Initialize fence registers to zero */
4992 i915_gem_restore_fences(dev);
4993
4994 i915_gem_detect_bit_6_swizzle(dev);
4995}
4996
673a394b 4997void
d64aa096 4998i915_gem_load_init(struct drm_device *dev)
673a394b 4999{
3e31c6c0 5000 struct drm_i915_private *dev_priv = dev->dev_private;
42dcedd4
CW
5001 int i;
5002
efab6d8d 5003 dev_priv->objects =
42dcedd4
CW
5004 kmem_cache_create("i915_gem_object",
5005 sizeof(struct drm_i915_gem_object), 0,
5006 SLAB_HWCACHE_ALIGN,
5007 NULL);
e20d2ab7
CW
5008 dev_priv->vmas =
5009 kmem_cache_create("i915_gem_vma",
5010 sizeof(struct i915_vma), 0,
5011 SLAB_HWCACHE_ALIGN,
5012 NULL);
efab6d8d
CW
5013 dev_priv->requests =
5014 kmem_cache_create("i915_gem_request",
5015 sizeof(struct drm_i915_gem_request), 0,
5016 SLAB_HWCACHE_ALIGN,
5017 NULL);
673a394b 5018
fc8c067e 5019 INIT_LIST_HEAD(&dev_priv->vm_list);
a33afea5 5020 INIT_LIST_HEAD(&dev_priv->context_list);
6c085a72
CW
5021 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5022 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
a09ba7fa 5023 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
666796da
TU
5024 for (i = 0; i < I915_NUM_ENGINES; i++)
5025 init_engine_lists(&dev_priv->engine[i]);
4b9de737 5026 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
007cc8ac 5027 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
673a394b
EA
5028 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
5029 i915_gem_retire_work_handler);
b29c19b6
CW
5030 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
5031 i915_gem_idle_work_handler);
1f83fee0 5032 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
31169714 5033
72bfa19c
CW
5034 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5035
e84fe803
NH
5036 /*
5037 * Set initial sequence number for requests.
5038 * Using this number allows the wraparound to happen early,
5039 * catching any obvious problems.
5040 */
5041 dev_priv->next_seqno = ((u32)~0 - 0x1100);
5042 dev_priv->last_seqno = ((u32)~0 - 0x1101);
5043
19b2dbde 5044 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
10ed13e4 5045
6b95a207 5046 init_waitqueue_head(&dev_priv->pending_flip_queue);
17250b71 5047
ce453d81
CW
5048 dev_priv->mm.interruptible = true;
5049
f99d7069 5050 mutex_init(&dev_priv->fb_tracking.lock);
673a394b 5051}
71acb5eb 5052
d64aa096
ID
5053void i915_gem_load_cleanup(struct drm_device *dev)
5054{
5055 struct drm_i915_private *dev_priv = to_i915(dev);
5056
5057 kmem_cache_destroy(dev_priv->requests);
5058 kmem_cache_destroy(dev_priv->vmas);
5059 kmem_cache_destroy(dev_priv->objects);
5060}
5061
f787a5f5 5062void i915_gem_release(struct drm_device *dev, struct drm_file *file)
b962442e 5063{
f787a5f5 5064 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e
EA
5065
5066 /* Clean up our request list when the client is going away, so that
5067 * later retire_requests won't dereference our soon-to-be-gone
5068 * file_priv.
5069 */
1c25595f 5070 spin_lock(&file_priv->mm.lock);
f787a5f5
CW
5071 while (!list_empty(&file_priv->mm.request_list)) {
5072 struct drm_i915_gem_request *request;
5073
5074 request = list_first_entry(&file_priv->mm.request_list,
5075 struct drm_i915_gem_request,
5076 client_list);
5077 list_del(&request->client_list);
5078 request->file_priv = NULL;
5079 }
1c25595f 5080 spin_unlock(&file_priv->mm.lock);
b29c19b6 5081
2e1b8730 5082 if (!list_empty(&file_priv->rps.link)) {
8d3afd7d 5083 spin_lock(&to_i915(dev)->rps.client_lock);
2e1b8730 5084 list_del(&file_priv->rps.link);
8d3afd7d 5085 spin_unlock(&to_i915(dev)->rps.client_lock);
1854d5ca 5086 }
b29c19b6
CW
5087}
5088
5089int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5090{
5091 struct drm_i915_file_private *file_priv;
e422b888 5092 int ret;
b29c19b6
CW
5093
5094 DRM_DEBUG_DRIVER("\n");
5095
5096 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5097 if (!file_priv)
5098 return -ENOMEM;
5099
5100 file->driver_priv = file_priv;
5101 file_priv->dev_priv = dev->dev_private;
ab0e7ff9 5102 file_priv->file = file;
2e1b8730 5103 INIT_LIST_HEAD(&file_priv->rps.link);
b29c19b6
CW
5104
5105 spin_lock_init(&file_priv->mm.lock);
5106 INIT_LIST_HEAD(&file_priv->mm.request_list);
b29c19b6 5107
de1add36
TU
5108 file_priv->bsd_ring = -1;
5109
e422b888
BW
5110 ret = i915_gem_context_open(dev, file);
5111 if (ret)
5112 kfree(file_priv);
b29c19b6 5113
e422b888 5114 return ret;
b29c19b6
CW
5115}
5116
b680c37a
DV
5117/**
5118 * i915_gem_track_fb - update frontbuffer tracking
d9072a3e
GT
5119 * @old: current GEM buffer for the frontbuffer slots
5120 * @new: new GEM buffer for the frontbuffer slots
5121 * @frontbuffer_bits: bitmask of frontbuffer slots
b680c37a
DV
5122 *
5123 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5124 * from @old and setting them in @new. Both @old and @new can be NULL.
5125 */
a071fa00
DV
5126void i915_gem_track_fb(struct drm_i915_gem_object *old,
5127 struct drm_i915_gem_object *new,
5128 unsigned frontbuffer_bits)
5129{
5130 if (old) {
5131 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5132 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5133 old->frontbuffer_bits &= ~frontbuffer_bits;
5134 }
5135
5136 if (new) {
5137 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5138 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5139 new->frontbuffer_bits |= frontbuffer_bits;
5140 }
5141}
5142
a70a3148 5143/* All the new VM stuff */
088e0df4
MT
5144u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5145 struct i915_address_space *vm)
a70a3148
BW
5146{
5147 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5148 struct i915_vma *vma;
5149
896ab1a5 5150 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
a70a3148 5151
1c7f4bca 5152 list_for_each_entry(vma, &o->vma_list, obj_link) {
596c5923 5153 if (vma->is_ggtt &&
ec7adb6e
JL
5154 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5155 continue;
5156 if (vma->vm == vm)
a70a3148 5157 return vma->node.start;
a70a3148 5158 }
ec7adb6e 5159
f25748ea
DV
5160 WARN(1, "%s vma for this object not found.\n",
5161 i915_is_ggtt(vm) ? "global" : "ppgtt");
a70a3148
BW
5162 return -1;
5163}
5164
088e0df4
MT
5165u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5166 const struct i915_ggtt_view *view)
a70a3148
BW
5167{
5168 struct i915_vma *vma;
5169
1c7f4bca 5170 list_for_each_entry(vma, &o->vma_list, obj_link)
8aac2220 5171 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
ec7adb6e
JL
5172 return vma->node.start;
5173
5678ad73 5174 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
ec7adb6e
JL
5175 return -1;
5176}
5177
5178bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5179 struct i915_address_space *vm)
5180{
5181 struct i915_vma *vma;
5182
1c7f4bca 5183 list_for_each_entry(vma, &o->vma_list, obj_link) {
596c5923 5184 if (vma->is_ggtt &&
ec7adb6e
JL
5185 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5186 continue;
5187 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5188 return true;
5189 }
5190
5191 return false;
5192}
5193
5194bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
9abc4648 5195 const struct i915_ggtt_view *view)
ec7adb6e 5196{
ec7adb6e
JL
5197 struct i915_vma *vma;
5198
1c7f4bca 5199 list_for_each_entry(vma, &o->vma_list, obj_link)
ff5ec22d 5200 if (vma->is_ggtt &&
9abc4648 5201 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
fe14d5f4 5202 drm_mm_node_allocated(&vma->node))
a70a3148
BW
5203 return true;
5204
5205 return false;
5206}
5207
5208bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5209{
5a1d5eb0 5210 struct i915_vma *vma;
a70a3148 5211
1c7f4bca 5212 list_for_each_entry(vma, &o->vma_list, obj_link)
5a1d5eb0 5213 if (drm_mm_node_allocated(&vma->node))
a70a3148
BW
5214 return true;
5215
5216 return false;
5217}
5218
8da32727 5219unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
a70a3148 5220{
a70a3148
BW
5221 struct i915_vma *vma;
5222
8da32727 5223 GEM_BUG_ON(list_empty(&o->vma_list));
a70a3148 5224
1c7f4bca 5225 list_for_each_entry(vma, &o->vma_list, obj_link) {
596c5923 5226 if (vma->is_ggtt &&
8da32727 5227 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
a70a3148 5228 return vma->node.size;
ec7adb6e 5229 }
8da32727 5230
a70a3148
BW
5231 return 0;
5232}
5233
ec7adb6e 5234bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
5c2abbea
BW
5235{
5236 struct i915_vma *vma;
1c7f4bca 5237 list_for_each_entry(vma, &obj->vma_list, obj_link)
ec7adb6e
JL
5238 if (vma->pin_count > 0)
5239 return true;
a6631ae1 5240
ec7adb6e 5241 return false;
5c2abbea 5242}
ea70299d 5243
033908ae
DG
5244/* Like i915_gem_object_get_page(), but mark the returned page dirty */
5245struct page *
5246i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
5247{
5248 struct page *page;
5249
5250 /* Only default objects have per-page dirty tracking */
de472664 5251 if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
033908ae
DG
5252 return NULL;
5253
5254 page = i915_gem_object_get_page(obj, n);
5255 set_page_dirty(page);
5256 return page;
5257}
5258
ea70299d
DG
5259/* Allocate a new GEM object and fill it with the supplied data */
5260struct drm_i915_gem_object *
5261i915_gem_object_create_from_data(struct drm_device *dev,
5262 const void *data, size_t size)
5263{
5264 struct drm_i915_gem_object *obj;
5265 struct sg_table *sg;
5266 size_t bytes;
5267 int ret;
5268
d37cd8a8 5269 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
fe3db79b 5270 if (IS_ERR(obj))
ea70299d
DG
5271 return obj;
5272
5273 ret = i915_gem_object_set_to_cpu_domain(obj, true);
5274 if (ret)
5275 goto fail;
5276
5277 ret = i915_gem_object_get_pages(obj);
5278 if (ret)
5279 goto fail;
5280
5281 i915_gem_object_pin_pages(obj);
5282 sg = obj->pages;
5283 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
9e7d18c0 5284 obj->dirty = 1; /* Backing store is now out of date */
ea70299d
DG
5285 i915_gem_object_unpin_pages(obj);
5286
5287 if (WARN_ON(bytes != size)) {
5288 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
5289 ret = -EFAULT;
5290 goto fail;
5291 }
5292
5293 return obj;
5294
5295fail:
5296 drm_gem_object_unreference(&obj->base);
5297 return ERR_PTR(ret);
5298}
This page took 1.33901 seconds and 5 git commands to generate.