drm/i915: Segregate memory domains in the GTT using coloring
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
1c5d22f7 32#include "i915_trace.h"
652c393a 33#include "intel_drv.h"
5949eac4 34#include <linux/shmem_fs.h>
5a0e3ad6 35#include <linux/slab.h>
673a394b 36#include <linux/swap.h>
79e53945 37#include <linux/pci.h>
1286ff73 38#include <linux/dma-buf.h>
673a394b 39
05394f39
CW
40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
88241785
CW
42static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
43 unsigned alignment,
44 bool map_and_fenceable);
05394f39
CW
45static int i915_gem_phys_pwrite(struct drm_device *dev,
46 struct drm_i915_gem_object *obj,
71acb5eb 47 struct drm_i915_gem_pwrite *args,
05394f39 48 struct drm_file *file);
673a394b 49
61050808
CW
50static void i915_gem_write_fence(struct drm_device *dev, int reg,
51 struct drm_i915_gem_object *obj);
52static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53 struct drm_i915_fence_reg *fence,
54 bool enable);
55
17250b71 56static int i915_gem_inactive_shrink(struct shrinker *shrinker,
1495f230 57 struct shrink_control *sc);
8c59967c 58static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
31169714 59
61050808
CW
60static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
61{
62 if (obj->tiling_mode)
63 i915_gem_release_mmap(obj);
64
65 /* As we do not have an associated fence register, we will force
66 * a tiling change if we ever need to acquire one.
67 */
5d82e3e6 68 obj->fence_dirty = false;
61050808
CW
69 obj->fence_reg = I915_FENCE_REG_NONE;
70}
71
73aa808f
CW
72/* some bookkeeping */
73static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
74 size_t size)
75{
76 dev_priv->mm.object_count++;
77 dev_priv->mm.object_memory += size;
78}
79
80static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
81 size_t size)
82{
83 dev_priv->mm.object_count--;
84 dev_priv->mm.object_memory -= size;
85}
86
21dd3734
CW
87static int
88i915_gem_wait_for_error(struct drm_device *dev)
30dbf0c0
CW
89{
90 struct drm_i915_private *dev_priv = dev->dev_private;
91 struct completion *x = &dev_priv->error_completion;
92 unsigned long flags;
93 int ret;
94
95 if (!atomic_read(&dev_priv->mm.wedged))
96 return 0;
97
0a6759c6
DV
98 /*
99 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
100 * userspace. If it takes that long something really bad is going on and
101 * we should simply try to bail out and fail as gracefully as possible.
102 */
103 ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
104 if (ret == 0) {
105 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
106 return -EIO;
107 } else if (ret < 0) {
30dbf0c0 108 return ret;
0a6759c6 109 }
30dbf0c0 110
21dd3734
CW
111 if (atomic_read(&dev_priv->mm.wedged)) {
112 /* GPU is hung, bump the completion count to account for
113 * the token we just consumed so that we never hit zero and
114 * end up waiting upon a subsequent completion event that
115 * will never happen.
116 */
117 spin_lock_irqsave(&x->wait.lock, flags);
118 x->done++;
119 spin_unlock_irqrestore(&x->wait.lock, flags);
120 }
121 return 0;
30dbf0c0
CW
122}
123
54cf91dc 124int i915_mutex_lock_interruptible(struct drm_device *dev)
76c1dec1 125{
76c1dec1
CW
126 int ret;
127
21dd3734 128 ret = i915_gem_wait_for_error(dev);
76c1dec1
CW
129 if (ret)
130 return ret;
131
132 ret = mutex_lock_interruptible(&dev->struct_mutex);
133 if (ret)
134 return ret;
135
23bc5982 136 WARN_ON(i915_verify_lists(dev));
76c1dec1
CW
137 return 0;
138}
30dbf0c0 139
7d1c4804 140static inline bool
05394f39 141i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
7d1c4804 142{
1b50247a 143 return !obj->active;
7d1c4804
CW
144}
145
79e53945
JB
146int
147i915_gem_init_ioctl(struct drm_device *dev, void *data,
05394f39 148 struct drm_file *file)
79e53945
JB
149{
150 struct drm_i915_gem_init *args = data;
2021746e 151
7bb6fb8d
DV
152 if (drm_core_check_feature(dev, DRIVER_MODESET))
153 return -ENODEV;
154
2021746e
CW
155 if (args->gtt_start >= args->gtt_end ||
156 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
157 return -EINVAL;
79e53945 158
f534bc0b
DV
159 /* GEM with user mode setting was never supported on ilk and later. */
160 if (INTEL_INFO(dev)->gen >= 5)
161 return -ENODEV;
162
79e53945 163 mutex_lock(&dev->struct_mutex);
644ec02b
DV
164 i915_gem_init_global_gtt(dev, args->gtt_start,
165 args->gtt_end, args->gtt_end);
673a394b
EA
166 mutex_unlock(&dev->struct_mutex);
167
2021746e 168 return 0;
673a394b
EA
169}
170
5a125c3c
EA
171int
172i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
05394f39 173 struct drm_file *file)
5a125c3c 174{
73aa808f 175 struct drm_i915_private *dev_priv = dev->dev_private;
5a125c3c 176 struct drm_i915_gem_get_aperture *args = data;
6299f992
CW
177 struct drm_i915_gem_object *obj;
178 size_t pinned;
5a125c3c 179
6299f992 180 pinned = 0;
73aa808f 181 mutex_lock(&dev->struct_mutex);
1b50247a
CW
182 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
183 if (obj->pin_count)
184 pinned += obj->gtt_space->size;
73aa808f 185 mutex_unlock(&dev->struct_mutex);
5a125c3c 186
6299f992 187 args->aper_size = dev_priv->mm.gtt_total;
0206e353 188 args->aper_available_size = args->aper_size - pinned;
6299f992 189
5a125c3c
EA
190 return 0;
191}
192
ff72145b
DA
193static int
194i915_gem_create(struct drm_file *file,
195 struct drm_device *dev,
196 uint64_t size,
197 uint32_t *handle_p)
673a394b 198{
05394f39 199 struct drm_i915_gem_object *obj;
a1a2d1d3
PP
200 int ret;
201 u32 handle;
673a394b 202
ff72145b 203 size = roundup(size, PAGE_SIZE);
8ffc0246
CW
204 if (size == 0)
205 return -EINVAL;
673a394b
EA
206
207 /* Allocate the new object */
ff72145b 208 obj = i915_gem_alloc_object(dev, size);
673a394b
EA
209 if (obj == NULL)
210 return -ENOMEM;
211
05394f39 212 ret = drm_gem_handle_create(file, &obj->base, &handle);
1dfd9754 213 if (ret) {
05394f39
CW
214 drm_gem_object_release(&obj->base);
215 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
202f2fef 216 kfree(obj);
673a394b 217 return ret;
1dfd9754 218 }
673a394b 219
202f2fef 220 /* drop reference from allocate - handle holds it now */
05394f39 221 drm_gem_object_unreference(&obj->base);
202f2fef
CW
222 trace_i915_gem_object_create(obj);
223
ff72145b 224 *handle_p = handle;
673a394b
EA
225 return 0;
226}
227
ff72145b
DA
228int
229i915_gem_dumb_create(struct drm_file *file,
230 struct drm_device *dev,
231 struct drm_mode_create_dumb *args)
232{
233 /* have to work out size/pitch and return them */
ed0291fd 234 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
ff72145b
DA
235 args->size = args->pitch * args->height;
236 return i915_gem_create(file, dev,
237 args->size, &args->handle);
238}
239
240int i915_gem_dumb_destroy(struct drm_file *file,
241 struct drm_device *dev,
242 uint32_t handle)
243{
244 return drm_gem_handle_delete(file, handle);
245}
246
247/**
248 * Creates a new mm object and returns a handle to it.
249 */
250int
251i915_gem_create_ioctl(struct drm_device *dev, void *data,
252 struct drm_file *file)
253{
254 struct drm_i915_gem_create *args = data;
63ed2cb2 255
ff72145b
DA
256 return i915_gem_create(file, dev,
257 args->size, &args->handle);
258}
259
05394f39 260static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
280b713b 261{
05394f39 262 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
280b713b
EA
263
264 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
05394f39 265 obj->tiling_mode != I915_TILING_NONE;
280b713b
EA
266}
267
8461d226
DV
268static inline int
269__copy_to_user_swizzled(char __user *cpu_vaddr,
270 const char *gpu_vaddr, int gpu_offset,
271 int length)
272{
273 int ret, cpu_offset = 0;
274
275 while (length > 0) {
276 int cacheline_end = ALIGN(gpu_offset + 1, 64);
277 int this_length = min(cacheline_end - gpu_offset, length);
278 int swizzled_gpu_offset = gpu_offset ^ 64;
279
280 ret = __copy_to_user(cpu_vaddr + cpu_offset,
281 gpu_vaddr + swizzled_gpu_offset,
282 this_length);
283 if (ret)
284 return ret + length;
285
286 cpu_offset += this_length;
287 gpu_offset += this_length;
288 length -= this_length;
289 }
290
291 return 0;
292}
293
8c59967c 294static inline int
4f0c7cfb
BW
295__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
296 const char __user *cpu_vaddr,
8c59967c
DV
297 int length)
298{
299 int ret, cpu_offset = 0;
300
301 while (length > 0) {
302 int cacheline_end = ALIGN(gpu_offset + 1, 64);
303 int this_length = min(cacheline_end - gpu_offset, length);
304 int swizzled_gpu_offset = gpu_offset ^ 64;
305
306 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
307 cpu_vaddr + cpu_offset,
308 this_length);
309 if (ret)
310 return ret + length;
311
312 cpu_offset += this_length;
313 gpu_offset += this_length;
314 length -= this_length;
315 }
316
317 return 0;
318}
319
d174bd64
DV
320/* Per-page copy function for the shmem pread fastpath.
321 * Flushes invalid cachelines before reading the target if
322 * needs_clflush is set. */
eb01459f 323static int
d174bd64
DV
324shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
325 char __user *user_data,
326 bool page_do_bit17_swizzling, bool needs_clflush)
327{
328 char *vaddr;
329 int ret;
330
e7e58eb5 331 if (unlikely(page_do_bit17_swizzling))
d174bd64
DV
332 return -EINVAL;
333
334 vaddr = kmap_atomic(page);
335 if (needs_clflush)
336 drm_clflush_virt_range(vaddr + shmem_page_offset,
337 page_length);
338 ret = __copy_to_user_inatomic(user_data,
339 vaddr + shmem_page_offset,
340 page_length);
341 kunmap_atomic(vaddr);
342
343 return ret;
344}
345
23c18c71
DV
346static void
347shmem_clflush_swizzled_range(char *addr, unsigned long length,
348 bool swizzled)
349{
e7e58eb5 350 if (unlikely(swizzled)) {
23c18c71
DV
351 unsigned long start = (unsigned long) addr;
352 unsigned long end = (unsigned long) addr + length;
353
354 /* For swizzling simply ensure that we always flush both
355 * channels. Lame, but simple and it works. Swizzled
356 * pwrite/pread is far from a hotpath - current userspace
357 * doesn't use it at all. */
358 start = round_down(start, 128);
359 end = round_up(end, 128);
360
361 drm_clflush_virt_range((void *)start, end - start);
362 } else {
363 drm_clflush_virt_range(addr, length);
364 }
365
366}
367
d174bd64
DV
368/* Only difference to the fast-path function is that this can handle bit17
369 * and uses non-atomic copy and kmap functions. */
370static int
371shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
372 char __user *user_data,
373 bool page_do_bit17_swizzling, bool needs_clflush)
374{
375 char *vaddr;
376 int ret;
377
378 vaddr = kmap(page);
379 if (needs_clflush)
23c18c71
DV
380 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
381 page_length,
382 page_do_bit17_swizzling);
d174bd64
DV
383
384 if (page_do_bit17_swizzling)
385 ret = __copy_to_user_swizzled(user_data,
386 vaddr, shmem_page_offset,
387 page_length);
388 else
389 ret = __copy_to_user(user_data,
390 vaddr + shmem_page_offset,
391 page_length);
392 kunmap(page);
393
394 return ret;
395}
396
eb01459f 397static int
dbf7bff0
DV
398i915_gem_shmem_pread(struct drm_device *dev,
399 struct drm_i915_gem_object *obj,
400 struct drm_i915_gem_pread *args,
401 struct drm_file *file)
eb01459f 402{
05394f39 403 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
8461d226 404 char __user *user_data;
eb01459f 405 ssize_t remain;
8461d226 406 loff_t offset;
eb2c0c81 407 int shmem_page_offset, page_length, ret = 0;
8461d226 408 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
dbf7bff0 409 int hit_slowpath = 0;
96d79b52 410 int prefaulted = 0;
8489731c 411 int needs_clflush = 0;
692a576b 412 int release_page;
eb01459f 413
8461d226 414 user_data = (char __user *) (uintptr_t) args->data_ptr;
eb01459f
EA
415 remain = args->size;
416
8461d226 417 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
eb01459f 418
8489731c
DV
419 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
420 /* If we're not in the cpu read domain, set ourself into the gtt
421 * read domain and manually flush cachelines (if required). This
422 * optimizes for the case when the gpu will dirty the data
423 * anyway again before the next pread happens. */
424 if (obj->cache_level == I915_CACHE_NONE)
425 needs_clflush = 1;
426 ret = i915_gem_object_set_to_gtt_domain(obj, false);
427 if (ret)
428 return ret;
429 }
eb01459f 430
8461d226 431 offset = args->offset;
eb01459f
EA
432
433 while (remain > 0) {
e5281ccd
CW
434 struct page *page;
435
eb01459f
EA
436 /* Operation in this page
437 *
eb01459f 438 * shmem_page_offset = offset within page in shmem file
eb01459f
EA
439 * page_length = bytes to copy for this page
440 */
c8cbbb8b 441 shmem_page_offset = offset_in_page(offset);
eb01459f
EA
442 page_length = remain;
443 if ((shmem_page_offset + page_length) > PAGE_SIZE)
444 page_length = PAGE_SIZE - shmem_page_offset;
eb01459f 445
692a576b
DV
446 if (obj->pages) {
447 page = obj->pages[offset >> PAGE_SHIFT];
448 release_page = 0;
449 } else {
450 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
451 if (IS_ERR(page)) {
452 ret = PTR_ERR(page);
453 goto out;
454 }
455 release_page = 1;
b65552f0 456 }
e5281ccd 457
8461d226
DV
458 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
459 (page_to_phys(page) & (1 << 17)) != 0;
460
d174bd64
DV
461 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
462 user_data, page_do_bit17_swizzling,
463 needs_clflush);
464 if (ret == 0)
465 goto next_page;
dbf7bff0
DV
466
467 hit_slowpath = 1;
692a576b 468 page_cache_get(page);
dbf7bff0
DV
469 mutex_unlock(&dev->struct_mutex);
470
96d79b52 471 if (!prefaulted) {
f56f821f 472 ret = fault_in_multipages_writeable(user_data, remain);
96d79b52
DV
473 /* Userspace is tricking us, but we've already clobbered
474 * its pages with the prefault and promised to write the
475 * data up to the first fault. Hence ignore any errors
476 * and just continue. */
477 (void)ret;
478 prefaulted = 1;
479 }
eb01459f 480
d174bd64
DV
481 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
482 user_data, page_do_bit17_swizzling,
483 needs_clflush);
eb01459f 484
dbf7bff0 485 mutex_lock(&dev->struct_mutex);
e5281ccd 486 page_cache_release(page);
dbf7bff0 487next_page:
e5281ccd 488 mark_page_accessed(page);
692a576b
DV
489 if (release_page)
490 page_cache_release(page);
e5281ccd 491
8461d226
DV
492 if (ret) {
493 ret = -EFAULT;
494 goto out;
495 }
496
eb01459f 497 remain -= page_length;
8461d226 498 user_data += page_length;
eb01459f
EA
499 offset += page_length;
500 }
501
4f27b75d 502out:
dbf7bff0
DV
503 if (hit_slowpath) {
504 /* Fixup: Kill any reinstated backing storage pages */
505 if (obj->madv == __I915_MADV_PURGED)
506 i915_gem_object_truncate(obj);
507 }
eb01459f
EA
508
509 return ret;
510}
511
673a394b
EA
512/**
513 * Reads data from the object referenced by handle.
514 *
515 * On error, the contents of *data are undefined.
516 */
517int
518i915_gem_pread_ioctl(struct drm_device *dev, void *data,
05394f39 519 struct drm_file *file)
673a394b
EA
520{
521 struct drm_i915_gem_pread *args = data;
05394f39 522 struct drm_i915_gem_object *obj;
35b62a89 523 int ret = 0;
673a394b 524
51311d0a
CW
525 if (args->size == 0)
526 return 0;
527
528 if (!access_ok(VERIFY_WRITE,
529 (char __user *)(uintptr_t)args->data_ptr,
530 args->size))
531 return -EFAULT;
532
4f27b75d 533 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 534 if (ret)
4f27b75d 535 return ret;
673a394b 536
05394f39 537 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 538 if (&obj->base == NULL) {
1d7cfea1
CW
539 ret = -ENOENT;
540 goto unlock;
4f27b75d 541 }
673a394b 542
7dcd2499 543 /* Bounds check source. */
05394f39
CW
544 if (args->offset > obj->base.size ||
545 args->size > obj->base.size - args->offset) {
ce9d419d 546 ret = -EINVAL;
35b62a89 547 goto out;
ce9d419d
CW
548 }
549
1286ff73
DV
550 /* prime objects have no backing filp to GEM pread/pwrite
551 * pages from.
552 */
553 if (!obj->base.filp) {
554 ret = -EINVAL;
555 goto out;
556 }
557
db53a302
CW
558 trace_i915_gem_object_pread(obj, args->offset, args->size);
559
dbf7bff0 560 ret = i915_gem_shmem_pread(dev, obj, args, file);
673a394b 561
35b62a89 562out:
05394f39 563 drm_gem_object_unreference(&obj->base);
1d7cfea1 564unlock:
4f27b75d 565 mutex_unlock(&dev->struct_mutex);
eb01459f 566 return ret;
673a394b
EA
567}
568
0839ccb8
KP
569/* This is the fast write path which cannot handle
570 * page faults in the source data
9b7530cc 571 */
0839ccb8
KP
572
573static inline int
574fast_user_write(struct io_mapping *mapping,
575 loff_t page_base, int page_offset,
576 char __user *user_data,
577 int length)
9b7530cc 578{
4f0c7cfb
BW
579 void __iomem *vaddr_atomic;
580 void *vaddr;
0839ccb8 581 unsigned long unwritten;
9b7530cc 582
3e4d3af5 583 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
4f0c7cfb
BW
584 /* We can use the cpu mem copy function because this is X86. */
585 vaddr = (void __force*)vaddr_atomic + page_offset;
586 unwritten = __copy_from_user_inatomic_nocache(vaddr,
0839ccb8 587 user_data, length);
3e4d3af5 588 io_mapping_unmap_atomic(vaddr_atomic);
fbd5a26d 589 return unwritten;
0839ccb8
KP
590}
591
3de09aa3
EA
592/**
593 * This is the fast pwrite path, where we copy the data directly from the
594 * user into the GTT, uncached.
595 */
673a394b 596static int
05394f39
CW
597i915_gem_gtt_pwrite_fast(struct drm_device *dev,
598 struct drm_i915_gem_object *obj,
3de09aa3 599 struct drm_i915_gem_pwrite *args,
05394f39 600 struct drm_file *file)
673a394b 601{
0839ccb8 602 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 603 ssize_t remain;
0839ccb8 604 loff_t offset, page_base;
673a394b 605 char __user *user_data;
935aaa69
DV
606 int page_offset, page_length, ret;
607
608 ret = i915_gem_object_pin(obj, 0, true);
609 if (ret)
610 goto out;
611
612 ret = i915_gem_object_set_to_gtt_domain(obj, true);
613 if (ret)
614 goto out_unpin;
615
616 ret = i915_gem_object_put_fence(obj);
617 if (ret)
618 goto out_unpin;
673a394b
EA
619
620 user_data = (char __user *) (uintptr_t) args->data_ptr;
621 remain = args->size;
673a394b 622
05394f39 623 offset = obj->gtt_offset + args->offset;
673a394b
EA
624
625 while (remain > 0) {
626 /* Operation in this page
627 *
0839ccb8
KP
628 * page_base = page offset within aperture
629 * page_offset = offset within page
630 * page_length = bytes to copy for this page
673a394b 631 */
c8cbbb8b
CW
632 page_base = offset & PAGE_MASK;
633 page_offset = offset_in_page(offset);
0839ccb8
KP
634 page_length = remain;
635 if ((page_offset + remain) > PAGE_SIZE)
636 page_length = PAGE_SIZE - page_offset;
637
0839ccb8 638 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
639 * source page isn't available. Return the error and we'll
640 * retry in the slow path.
0839ccb8 641 */
fbd5a26d 642 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
935aaa69
DV
643 page_offset, user_data, page_length)) {
644 ret = -EFAULT;
645 goto out_unpin;
646 }
673a394b 647
0839ccb8
KP
648 remain -= page_length;
649 user_data += page_length;
650 offset += page_length;
673a394b 651 }
673a394b 652
935aaa69
DV
653out_unpin:
654 i915_gem_object_unpin(obj);
655out:
3de09aa3 656 return ret;
673a394b
EA
657}
658
d174bd64
DV
659/* Per-page copy function for the shmem pwrite fastpath.
660 * Flushes invalid cachelines before writing to the target if
661 * needs_clflush_before is set and flushes out any written cachelines after
662 * writing if needs_clflush is set. */
3043c60c 663static int
d174bd64
DV
664shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
665 char __user *user_data,
666 bool page_do_bit17_swizzling,
667 bool needs_clflush_before,
668 bool needs_clflush_after)
673a394b 669{
d174bd64 670 char *vaddr;
673a394b 671 int ret;
3de09aa3 672
e7e58eb5 673 if (unlikely(page_do_bit17_swizzling))
d174bd64 674 return -EINVAL;
3de09aa3 675
d174bd64
DV
676 vaddr = kmap_atomic(page);
677 if (needs_clflush_before)
678 drm_clflush_virt_range(vaddr + shmem_page_offset,
679 page_length);
680 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
681 user_data,
682 page_length);
683 if (needs_clflush_after)
684 drm_clflush_virt_range(vaddr + shmem_page_offset,
685 page_length);
686 kunmap_atomic(vaddr);
3de09aa3
EA
687
688 return ret;
689}
690
d174bd64
DV
691/* Only difference to the fast-path function is that this can handle bit17
692 * and uses non-atomic copy and kmap functions. */
3043c60c 693static int
d174bd64
DV
694shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
695 char __user *user_data,
696 bool page_do_bit17_swizzling,
697 bool needs_clflush_before,
698 bool needs_clflush_after)
673a394b 699{
d174bd64
DV
700 char *vaddr;
701 int ret;
e5281ccd 702
d174bd64 703 vaddr = kmap(page);
e7e58eb5 704 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
23c18c71
DV
705 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
706 page_length,
707 page_do_bit17_swizzling);
d174bd64
DV
708 if (page_do_bit17_swizzling)
709 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
e5281ccd
CW
710 user_data,
711 page_length);
d174bd64
DV
712 else
713 ret = __copy_from_user(vaddr + shmem_page_offset,
714 user_data,
715 page_length);
716 if (needs_clflush_after)
23c18c71
DV
717 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
718 page_length,
719 page_do_bit17_swizzling);
d174bd64 720 kunmap(page);
40123c1f 721
d174bd64 722 return ret;
40123c1f
EA
723}
724
40123c1f 725static int
e244a443
DV
726i915_gem_shmem_pwrite(struct drm_device *dev,
727 struct drm_i915_gem_object *obj,
728 struct drm_i915_gem_pwrite *args,
729 struct drm_file *file)
40123c1f 730{
05394f39 731 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
40123c1f 732 ssize_t remain;
8c59967c
DV
733 loff_t offset;
734 char __user *user_data;
eb2c0c81 735 int shmem_page_offset, page_length, ret = 0;
8c59967c 736 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
e244a443 737 int hit_slowpath = 0;
58642885
DV
738 int needs_clflush_after = 0;
739 int needs_clflush_before = 0;
692a576b 740 int release_page;
40123c1f 741
8c59967c 742 user_data = (char __user *) (uintptr_t) args->data_ptr;
40123c1f
EA
743 remain = args->size;
744
8c59967c 745 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
40123c1f 746
58642885
DV
747 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
748 /* If we're not in the cpu write domain, set ourself into the gtt
749 * write domain and manually flush cachelines (if required). This
750 * optimizes for the case when the gpu will use the data
751 * right away and we therefore have to clflush anyway. */
752 if (obj->cache_level == I915_CACHE_NONE)
753 needs_clflush_after = 1;
754 ret = i915_gem_object_set_to_gtt_domain(obj, true);
755 if (ret)
756 return ret;
757 }
758 /* Same trick applies for invalidate partially written cachelines before
759 * writing. */
760 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
761 && obj->cache_level == I915_CACHE_NONE)
762 needs_clflush_before = 1;
763
673a394b 764 offset = args->offset;
05394f39 765 obj->dirty = 1;
673a394b 766
40123c1f 767 while (remain > 0) {
e5281ccd 768 struct page *page;
58642885 769 int partial_cacheline_write;
e5281ccd 770
40123c1f
EA
771 /* Operation in this page
772 *
40123c1f 773 * shmem_page_offset = offset within page in shmem file
40123c1f
EA
774 * page_length = bytes to copy for this page
775 */
c8cbbb8b 776 shmem_page_offset = offset_in_page(offset);
40123c1f
EA
777
778 page_length = remain;
779 if ((shmem_page_offset + page_length) > PAGE_SIZE)
780 page_length = PAGE_SIZE - shmem_page_offset;
40123c1f 781
58642885
DV
782 /* If we don't overwrite a cacheline completely we need to be
783 * careful to have up-to-date data by first clflushing. Don't
784 * overcomplicate things and flush the entire patch. */
785 partial_cacheline_write = needs_clflush_before &&
786 ((shmem_page_offset | page_length)
787 & (boot_cpu_data.x86_clflush_size - 1));
788
692a576b
DV
789 if (obj->pages) {
790 page = obj->pages[offset >> PAGE_SHIFT];
791 release_page = 0;
792 } else {
793 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
794 if (IS_ERR(page)) {
795 ret = PTR_ERR(page);
796 goto out;
797 }
798 release_page = 1;
e5281ccd
CW
799 }
800
8c59967c
DV
801 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
802 (page_to_phys(page) & (1 << 17)) != 0;
803
d174bd64
DV
804 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
805 user_data, page_do_bit17_swizzling,
806 partial_cacheline_write,
807 needs_clflush_after);
808 if (ret == 0)
809 goto next_page;
e244a443
DV
810
811 hit_slowpath = 1;
692a576b 812 page_cache_get(page);
e244a443
DV
813 mutex_unlock(&dev->struct_mutex);
814
d174bd64
DV
815 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
816 user_data, page_do_bit17_swizzling,
817 partial_cacheline_write,
818 needs_clflush_after);
40123c1f 819
e244a443 820 mutex_lock(&dev->struct_mutex);
692a576b 821 page_cache_release(page);
e244a443 822next_page:
e5281ccd
CW
823 set_page_dirty(page);
824 mark_page_accessed(page);
692a576b
DV
825 if (release_page)
826 page_cache_release(page);
e5281ccd 827
8c59967c
DV
828 if (ret) {
829 ret = -EFAULT;
830 goto out;
831 }
832
40123c1f 833 remain -= page_length;
8c59967c 834 user_data += page_length;
40123c1f 835 offset += page_length;
673a394b
EA
836 }
837
fbd5a26d 838out:
e244a443
DV
839 if (hit_slowpath) {
840 /* Fixup: Kill any reinstated backing storage pages */
841 if (obj->madv == __I915_MADV_PURGED)
842 i915_gem_object_truncate(obj);
843 /* and flush dirty cachelines in case the object isn't in the cpu write
844 * domain anymore. */
845 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
846 i915_gem_clflush_object(obj);
847 intel_gtt_chipset_flush();
848 }
8c59967c 849 }
673a394b 850
58642885
DV
851 if (needs_clflush_after)
852 intel_gtt_chipset_flush();
853
40123c1f 854 return ret;
673a394b
EA
855}
856
857/**
858 * Writes data to the object referenced by handle.
859 *
860 * On error, the contents of the buffer that were to be modified are undefined.
861 */
862int
863i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
fbd5a26d 864 struct drm_file *file)
673a394b
EA
865{
866 struct drm_i915_gem_pwrite *args = data;
05394f39 867 struct drm_i915_gem_object *obj;
51311d0a
CW
868 int ret;
869
870 if (args->size == 0)
871 return 0;
872
873 if (!access_ok(VERIFY_READ,
874 (char __user *)(uintptr_t)args->data_ptr,
875 args->size))
876 return -EFAULT;
877
f56f821f
DV
878 ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
879 args->size);
51311d0a
CW
880 if (ret)
881 return -EFAULT;
673a394b 882
fbd5a26d 883 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 884 if (ret)
fbd5a26d 885 return ret;
1d7cfea1 886
05394f39 887 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 888 if (&obj->base == NULL) {
1d7cfea1
CW
889 ret = -ENOENT;
890 goto unlock;
fbd5a26d 891 }
673a394b 892
7dcd2499 893 /* Bounds check destination. */
05394f39
CW
894 if (args->offset > obj->base.size ||
895 args->size > obj->base.size - args->offset) {
ce9d419d 896 ret = -EINVAL;
35b62a89 897 goto out;
ce9d419d
CW
898 }
899
1286ff73
DV
900 /* prime objects have no backing filp to GEM pread/pwrite
901 * pages from.
902 */
903 if (!obj->base.filp) {
904 ret = -EINVAL;
905 goto out;
906 }
907
db53a302
CW
908 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
909
935aaa69 910 ret = -EFAULT;
673a394b
EA
911 /* We can only do the GTT pwrite on untiled buffers, as otherwise
912 * it would end up going through the fenced access, and we'll get
913 * different detiling behavior between reading and writing.
914 * pread/pwrite currently are reading and writing from the CPU
915 * perspective, requiring manual detiling by the client.
916 */
5c0480f2 917 if (obj->phys_obj) {
fbd5a26d 918 ret = i915_gem_phys_pwrite(dev, obj, args, file);
5c0480f2
DV
919 goto out;
920 }
921
922 if (obj->gtt_space &&
3ae53783 923 obj->cache_level == I915_CACHE_NONE &&
c07496fa 924 obj->tiling_mode == I915_TILING_NONE &&
ffc62976 925 obj->map_and_fenceable &&
5c0480f2 926 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
fbd5a26d 927 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
935aaa69
DV
928 /* Note that the gtt paths might fail with non-page-backed user
929 * pointers (e.g. gtt mappings when moving data between
930 * textures). Fallback to the shmem path in that case. */
fbd5a26d 931 }
673a394b 932
5c0480f2 933 if (ret == -EFAULT)
935aaa69 934 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
5c0480f2 935
35b62a89 936out:
05394f39 937 drm_gem_object_unreference(&obj->base);
1d7cfea1 938unlock:
fbd5a26d 939 mutex_unlock(&dev->struct_mutex);
673a394b
EA
940 return ret;
941}
942
943/**
2ef7eeaa
EA
944 * Called when user space prepares to use an object with the CPU, either
945 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
946 */
947int
948i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
05394f39 949 struct drm_file *file)
673a394b
EA
950{
951 struct drm_i915_gem_set_domain *args = data;
05394f39 952 struct drm_i915_gem_object *obj;
2ef7eeaa
EA
953 uint32_t read_domains = args->read_domains;
954 uint32_t write_domain = args->write_domain;
673a394b
EA
955 int ret;
956
2ef7eeaa 957 /* Only handle setting domains to types used by the CPU. */
21d509e3 958 if (write_domain & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
959 return -EINVAL;
960
21d509e3 961 if (read_domains & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
962 return -EINVAL;
963
964 /* Having something in the write domain implies it's in the read
965 * domain, and only that read domain. Enforce that in the request.
966 */
967 if (write_domain != 0 && read_domains != write_domain)
968 return -EINVAL;
969
76c1dec1 970 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 971 if (ret)
76c1dec1 972 return ret;
1d7cfea1 973
05394f39 974 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 975 if (&obj->base == NULL) {
1d7cfea1
CW
976 ret = -ENOENT;
977 goto unlock;
76c1dec1 978 }
673a394b 979
2ef7eeaa
EA
980 if (read_domains & I915_GEM_DOMAIN_GTT) {
981 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
02354392
EA
982
983 /* Silently promote "you're not bound, there was nothing to do"
984 * to success, since the client was just asking us to
985 * make sure everything was done.
986 */
987 if (ret == -EINVAL)
988 ret = 0;
2ef7eeaa 989 } else {
e47c68e9 990 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa
EA
991 }
992
05394f39 993 drm_gem_object_unreference(&obj->base);
1d7cfea1 994unlock:
673a394b
EA
995 mutex_unlock(&dev->struct_mutex);
996 return ret;
997}
998
999/**
1000 * Called when user space has done writes to this buffer
1001 */
1002int
1003i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
05394f39 1004 struct drm_file *file)
673a394b
EA
1005{
1006 struct drm_i915_gem_sw_finish *args = data;
05394f39 1007 struct drm_i915_gem_object *obj;
673a394b
EA
1008 int ret = 0;
1009
76c1dec1 1010 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1011 if (ret)
76c1dec1 1012 return ret;
1d7cfea1 1013
05394f39 1014 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 1015 if (&obj->base == NULL) {
1d7cfea1
CW
1016 ret = -ENOENT;
1017 goto unlock;
673a394b
EA
1018 }
1019
673a394b 1020 /* Pinned buffers may be scanout, so flush the cache */
05394f39 1021 if (obj->pin_count)
e47c68e9
EA
1022 i915_gem_object_flush_cpu_write_domain(obj);
1023
05394f39 1024 drm_gem_object_unreference(&obj->base);
1d7cfea1 1025unlock:
673a394b
EA
1026 mutex_unlock(&dev->struct_mutex);
1027 return ret;
1028}
1029
1030/**
1031 * Maps the contents of an object, returning the address it is mapped
1032 * into.
1033 *
1034 * While the mapping holds a reference on the contents of the object, it doesn't
1035 * imply a ref on the object itself.
1036 */
1037int
1038i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
05394f39 1039 struct drm_file *file)
673a394b
EA
1040{
1041 struct drm_i915_gem_mmap *args = data;
1042 struct drm_gem_object *obj;
673a394b
EA
1043 unsigned long addr;
1044
05394f39 1045 obj = drm_gem_object_lookup(dev, file, args->handle);
673a394b 1046 if (obj == NULL)
bf79cb91 1047 return -ENOENT;
673a394b 1048
1286ff73
DV
1049 /* prime objects have no backing filp to GEM mmap
1050 * pages from.
1051 */
1052 if (!obj->filp) {
1053 drm_gem_object_unreference_unlocked(obj);
1054 return -EINVAL;
1055 }
1056
6be5ceb0 1057 addr = vm_mmap(obj->filp, 0, args->size,
673a394b
EA
1058 PROT_READ | PROT_WRITE, MAP_SHARED,
1059 args->offset);
bc9025bd 1060 drm_gem_object_unreference_unlocked(obj);
673a394b
EA
1061 if (IS_ERR((void *)addr))
1062 return addr;
1063
1064 args->addr_ptr = (uint64_t) addr;
1065
1066 return 0;
1067}
1068
de151cf6
JB
1069/**
1070 * i915_gem_fault - fault a page into the GTT
1071 * vma: VMA in question
1072 * vmf: fault info
1073 *
1074 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1075 * from userspace. The fault handler takes care of binding the object to
1076 * the GTT (if needed), allocating and programming a fence register (again,
1077 * only if needed based on whether the old reg is still valid or the object
1078 * is tiled) and inserting a new PTE into the faulting process.
1079 *
1080 * Note that the faulting process may involve evicting existing objects
1081 * from the GTT and/or fence registers to make room. So performance may
1082 * suffer if the GTT working set is large or there are few fence registers
1083 * left.
1084 */
1085int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1086{
05394f39
CW
1087 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1088 struct drm_device *dev = obj->base.dev;
7d1c4804 1089 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
1090 pgoff_t page_offset;
1091 unsigned long pfn;
1092 int ret = 0;
0f973f27 1093 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6
JB
1094
1095 /* We don't use vmf->pgoff since that has the fake offset */
1096 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1097 PAGE_SHIFT;
1098
d9bc7e9f
CW
1099 ret = i915_mutex_lock_interruptible(dev);
1100 if (ret)
1101 goto out;
a00b10c3 1102
db53a302
CW
1103 trace_i915_gem_object_fault(obj, page_offset, true, write);
1104
d9bc7e9f 1105 /* Now bind it into the GTT if needed */
919926ae
CW
1106 if (!obj->map_and_fenceable) {
1107 ret = i915_gem_object_unbind(obj);
1108 if (ret)
1109 goto unlock;
a00b10c3 1110 }
05394f39 1111 if (!obj->gtt_space) {
75e9e915 1112 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
c715089f
CW
1113 if (ret)
1114 goto unlock;
de151cf6 1115
e92d03bf
EA
1116 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1117 if (ret)
1118 goto unlock;
1119 }
4a684a41 1120
74898d7e
DV
1121 if (!obj->has_global_gtt_mapping)
1122 i915_gem_gtt_bind_object(obj, obj->cache_level);
1123
06d98131 1124 ret = i915_gem_object_get_fence(obj);
d9e86c0e
CW
1125 if (ret)
1126 goto unlock;
de151cf6 1127
05394f39
CW
1128 if (i915_gem_object_is_inactive(obj))
1129 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
7d1c4804 1130
6299f992
CW
1131 obj->fault_mappable = true;
1132
dd2757f8 1133 pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
de151cf6
JB
1134 page_offset;
1135
1136 /* Finally, remap it using the new GTT offset */
1137 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
c715089f 1138unlock:
de151cf6 1139 mutex_unlock(&dev->struct_mutex);
d9bc7e9f 1140out:
de151cf6 1141 switch (ret) {
d9bc7e9f 1142 case -EIO:
a9340cca
DV
1143 /* If this -EIO is due to a gpu hang, give the reset code a
1144 * chance to clean up the mess. Otherwise return the proper
1145 * SIGBUS. */
1146 if (!atomic_read(&dev_priv->mm.wedged))
1147 return VM_FAULT_SIGBUS;
045e769a 1148 case -EAGAIN:
d9bc7e9f
CW
1149 /* Give the error handler a chance to run and move the
1150 * objects off the GPU active list. Next time we service the
1151 * fault, we should be able to transition the page into the
1152 * GTT without touching the GPU (and so avoid further
1153 * EIO/EGAIN). If the GPU is wedged, then there is no issue
1154 * with coherency, just lost writes.
1155 */
045e769a 1156 set_need_resched();
c715089f
CW
1157 case 0:
1158 case -ERESTARTSYS:
bed636ab 1159 case -EINTR:
c715089f 1160 return VM_FAULT_NOPAGE;
de151cf6 1161 case -ENOMEM:
de151cf6 1162 return VM_FAULT_OOM;
de151cf6 1163 default:
c715089f 1164 return VM_FAULT_SIGBUS;
de151cf6
JB
1165 }
1166}
1167
901782b2
CW
1168/**
1169 * i915_gem_release_mmap - remove physical page mappings
1170 * @obj: obj in question
1171 *
af901ca1 1172 * Preserve the reservation of the mmapping with the DRM core code, but
901782b2
CW
1173 * relinquish ownership of the pages back to the system.
1174 *
1175 * It is vital that we remove the page mapping if we have mapped a tiled
1176 * object through the GTT and then lose the fence register due to
1177 * resource pressure. Similarly if the object has been moved out of the
1178 * aperture, than pages mapped into userspace must be revoked. Removing the
1179 * mapping will then trigger a page fault on the next user access, allowing
1180 * fixup by i915_gem_fault().
1181 */
d05ca301 1182void
05394f39 1183i915_gem_release_mmap(struct drm_i915_gem_object *obj)
901782b2 1184{
6299f992
CW
1185 if (!obj->fault_mappable)
1186 return;
901782b2 1187
f6e47884
CW
1188 if (obj->base.dev->dev_mapping)
1189 unmap_mapping_range(obj->base.dev->dev_mapping,
1190 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1191 obj->base.size, 1);
fb7d516a 1192
6299f992 1193 obj->fault_mappable = false;
901782b2
CW
1194}
1195
92b88aeb 1196static uint32_t
e28f8711 1197i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
92b88aeb 1198{
e28f8711 1199 uint32_t gtt_size;
92b88aeb
CW
1200
1201 if (INTEL_INFO(dev)->gen >= 4 ||
e28f8711
CW
1202 tiling_mode == I915_TILING_NONE)
1203 return size;
92b88aeb
CW
1204
1205 /* Previous chips need a power-of-two fence region when tiling */
1206 if (INTEL_INFO(dev)->gen == 3)
e28f8711 1207 gtt_size = 1024*1024;
92b88aeb 1208 else
e28f8711 1209 gtt_size = 512*1024;
92b88aeb 1210
e28f8711
CW
1211 while (gtt_size < size)
1212 gtt_size <<= 1;
92b88aeb 1213
e28f8711 1214 return gtt_size;
92b88aeb
CW
1215}
1216
de151cf6
JB
1217/**
1218 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1219 * @obj: object to check
1220 *
1221 * Return the required GTT alignment for an object, taking into account
5e783301 1222 * potential fence register mapping.
de151cf6
JB
1223 */
1224static uint32_t
e28f8711
CW
1225i915_gem_get_gtt_alignment(struct drm_device *dev,
1226 uint32_t size,
1227 int tiling_mode)
de151cf6 1228{
de151cf6
JB
1229 /*
1230 * Minimum alignment is 4k (GTT page size), but might be greater
1231 * if a fence register is needed for the object.
1232 */
a00b10c3 1233 if (INTEL_INFO(dev)->gen >= 4 ||
e28f8711 1234 tiling_mode == I915_TILING_NONE)
de151cf6
JB
1235 return 4096;
1236
a00b10c3
CW
1237 /*
1238 * Previous chips need to be aligned to the size of the smallest
1239 * fence register that can contain the object.
1240 */
e28f8711 1241 return i915_gem_get_gtt_size(dev, size, tiling_mode);
a00b10c3
CW
1242}
1243
5e783301
DV
1244/**
1245 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1246 * unfenced object
e28f8711
CW
1247 * @dev: the device
1248 * @size: size of the object
1249 * @tiling_mode: tiling mode of the object
5e783301
DV
1250 *
1251 * Return the required GTT alignment for an object, only taking into account
1252 * unfenced tiled surface requirements.
1253 */
467cffba 1254uint32_t
e28f8711
CW
1255i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1256 uint32_t size,
1257 int tiling_mode)
5e783301 1258{
5e783301
DV
1259 /*
1260 * Minimum alignment is 4k (GTT page size) for sane hw.
1261 */
1262 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
e28f8711 1263 tiling_mode == I915_TILING_NONE)
5e783301
DV
1264 return 4096;
1265
e28f8711
CW
1266 /* Previous hardware however needs to be aligned to a power-of-two
1267 * tile height. The simplest method for determining this is to reuse
1268 * the power-of-tile object size.
5e783301 1269 */
e28f8711 1270 return i915_gem_get_gtt_size(dev, size, tiling_mode);
5e783301
DV
1271}
1272
de151cf6 1273int
ff72145b
DA
1274i915_gem_mmap_gtt(struct drm_file *file,
1275 struct drm_device *dev,
1276 uint32_t handle,
1277 uint64_t *offset)
de151cf6 1278{
da761a6e 1279 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1280 struct drm_i915_gem_object *obj;
de151cf6
JB
1281 int ret;
1282
76c1dec1 1283 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 1284 if (ret)
76c1dec1 1285 return ret;
de151cf6 1286
ff72145b 1287 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
c8725226 1288 if (&obj->base == NULL) {
1d7cfea1
CW
1289 ret = -ENOENT;
1290 goto unlock;
1291 }
de151cf6 1292
05394f39 1293 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
da761a6e 1294 ret = -E2BIG;
ff56b0bc 1295 goto out;
da761a6e
CW
1296 }
1297
05394f39 1298 if (obj->madv != I915_MADV_WILLNEED) {
ab18282d 1299 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1d7cfea1
CW
1300 ret = -EINVAL;
1301 goto out;
ab18282d
CW
1302 }
1303
05394f39 1304 if (!obj->base.map_list.map) {
b464e9a2 1305 ret = drm_gem_create_mmap_offset(&obj->base);
1d7cfea1
CW
1306 if (ret)
1307 goto out;
de151cf6
JB
1308 }
1309
ff72145b 1310 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
de151cf6 1311
1d7cfea1 1312out:
05394f39 1313 drm_gem_object_unreference(&obj->base);
1d7cfea1 1314unlock:
de151cf6 1315 mutex_unlock(&dev->struct_mutex);
1d7cfea1 1316 return ret;
de151cf6
JB
1317}
1318
ff72145b
DA
1319/**
1320 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1321 * @dev: DRM device
1322 * @data: GTT mapping ioctl data
1323 * @file: GEM object info
1324 *
1325 * Simply returns the fake offset to userspace so it can mmap it.
1326 * The mmap call will end up in drm_gem_mmap(), which will set things
1327 * up so we can get faults in the handler above.
1328 *
1329 * The fault handler will take care of binding the object into the GTT
1330 * (since it may have been evicted to make room for something), allocating
1331 * a fence register, and mapping the appropriate aperture address into
1332 * userspace.
1333 */
1334int
1335i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1336 struct drm_file *file)
1337{
1338 struct drm_i915_gem_mmap_gtt *args = data;
1339
ff72145b
DA
1340 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1341}
1342
1286ff73 1343int
05394f39 1344i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
e5281ccd
CW
1345 gfp_t gfpmask)
1346{
e5281ccd
CW
1347 int page_count, i;
1348 struct address_space *mapping;
1349 struct inode *inode;
1350 struct page *page;
1351
1286ff73
DV
1352 if (obj->pages || obj->sg_table)
1353 return 0;
1354
e5281ccd
CW
1355 /* Get the list of pages out of our struct file. They'll be pinned
1356 * at this point until we release them.
1357 */
05394f39
CW
1358 page_count = obj->base.size / PAGE_SIZE;
1359 BUG_ON(obj->pages != NULL);
1360 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1361 if (obj->pages == NULL)
e5281ccd
CW
1362 return -ENOMEM;
1363
05394f39 1364 inode = obj->base.filp->f_path.dentry->d_inode;
e5281ccd 1365 mapping = inode->i_mapping;
5949eac4
HD
1366 gfpmask |= mapping_gfp_mask(mapping);
1367
e5281ccd 1368 for (i = 0; i < page_count; i++) {
5949eac4 1369 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
e5281ccd
CW
1370 if (IS_ERR(page))
1371 goto err_pages;
1372
05394f39 1373 obj->pages[i] = page;
e5281ccd
CW
1374 }
1375
6dacfd2f 1376 if (i915_gem_object_needs_bit17_swizzle(obj))
e5281ccd
CW
1377 i915_gem_object_do_bit_17_swizzle(obj);
1378
1379 return 0;
1380
1381err_pages:
1382 while (i--)
05394f39 1383 page_cache_release(obj->pages[i]);
e5281ccd 1384
05394f39
CW
1385 drm_free_large(obj->pages);
1386 obj->pages = NULL;
e5281ccd
CW
1387 return PTR_ERR(page);
1388}
1389
5cdf5881 1390static void
05394f39 1391i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
673a394b 1392{
05394f39 1393 int page_count = obj->base.size / PAGE_SIZE;
673a394b
EA
1394 int i;
1395
1286ff73
DV
1396 if (!obj->pages)
1397 return;
1398
05394f39 1399 BUG_ON(obj->madv == __I915_MADV_PURGED);
673a394b 1400
6dacfd2f 1401 if (i915_gem_object_needs_bit17_swizzle(obj))
280b713b
EA
1402 i915_gem_object_save_bit_17_swizzle(obj);
1403
05394f39
CW
1404 if (obj->madv == I915_MADV_DONTNEED)
1405 obj->dirty = 0;
3ef94daa
CW
1406
1407 for (i = 0; i < page_count; i++) {
05394f39
CW
1408 if (obj->dirty)
1409 set_page_dirty(obj->pages[i]);
3ef94daa 1410
05394f39
CW
1411 if (obj->madv == I915_MADV_WILLNEED)
1412 mark_page_accessed(obj->pages[i]);
3ef94daa 1413
05394f39 1414 page_cache_release(obj->pages[i]);
3ef94daa 1415 }
05394f39 1416 obj->dirty = 0;
673a394b 1417
05394f39
CW
1418 drm_free_large(obj->pages);
1419 obj->pages = NULL;
673a394b
EA
1420}
1421
54cf91dc 1422void
05394f39 1423i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1ec14ad3
CW
1424 struct intel_ring_buffer *ring,
1425 u32 seqno)
673a394b 1426{
05394f39 1427 struct drm_device *dev = obj->base.dev;
69dc4987 1428 struct drm_i915_private *dev_priv = dev->dev_private;
617dbe27 1429
852835f3 1430 BUG_ON(ring == NULL);
05394f39 1431 obj->ring = ring;
673a394b
EA
1432
1433 /* Add a reference if we're newly entering the active list. */
05394f39
CW
1434 if (!obj->active) {
1435 drm_gem_object_reference(&obj->base);
1436 obj->active = 1;
673a394b 1437 }
e35a41de 1438
673a394b 1439 /* Move from whatever list we were on to the tail of execution. */
05394f39
CW
1440 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1441 list_move_tail(&obj->ring_list, &ring->active_list);
caea7476 1442
0201f1ec 1443 obj->last_read_seqno = seqno;
caea7476 1444
7dd49065 1445 if (obj->fenced_gpu_access) {
caea7476 1446 obj->last_fenced_seqno = seqno;
caea7476 1447
7dd49065
CW
1448 /* Bump MRU to take account of the delayed flush */
1449 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1450 struct drm_i915_fence_reg *reg;
1451
1452 reg = &dev_priv->fence_regs[obj->fence_reg];
1453 list_move_tail(&reg->lru_list,
1454 &dev_priv->mm.fence_list);
1455 }
caea7476
CW
1456 }
1457}
1458
caea7476
CW
1459static void
1460i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1461{
1462 struct drm_device *dev = obj->base.dev;
1463 struct drm_i915_private *dev_priv = dev->dev_private;
1464
65ce3027 1465 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
caea7476 1466 BUG_ON(!obj->active);
65ce3027 1467
f047e395
CW
1468 if (obj->pin_count) /* are we a framebuffer? */
1469 intel_mark_fb_idle(obj);
1470
1471 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1472
65ce3027 1473 list_del_init(&obj->ring_list);
caea7476
CW
1474 obj->ring = NULL;
1475
65ce3027
CW
1476 obj->last_read_seqno = 0;
1477 obj->last_write_seqno = 0;
1478 obj->base.write_domain = 0;
1479
1480 obj->last_fenced_seqno = 0;
caea7476 1481 obj->fenced_gpu_access = false;
caea7476
CW
1482
1483 obj->active = 0;
1484 drm_gem_object_unreference(&obj->base);
1485
1486 WARN_ON(i915_verify_lists(dev));
ce44b0ea 1487}
673a394b 1488
963b4836
CW
1489/* Immediately discard the backing storage */
1490static void
05394f39 1491i915_gem_object_truncate(struct drm_i915_gem_object *obj)
963b4836 1492{
bb6baf76 1493 struct inode *inode;
963b4836 1494
ae9fed6b
CW
1495 /* Our goal here is to return as much of the memory as
1496 * is possible back to the system as we are called from OOM.
1497 * To do this we must instruct the shmfs to drop all of its
e2377fe0 1498 * backing pages, *now*.
ae9fed6b 1499 */
05394f39 1500 inode = obj->base.filp->f_path.dentry->d_inode;
e2377fe0 1501 shmem_truncate_range(inode, 0, (loff_t)-1);
bb6baf76 1502
a14917ee
CW
1503 if (obj->base.map_list.map)
1504 drm_gem_free_mmap_offset(&obj->base);
1505
05394f39 1506 obj->madv = __I915_MADV_PURGED;
963b4836
CW
1507}
1508
1509static inline int
05394f39 1510i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
963b4836 1511{
05394f39 1512 return obj->madv == I915_MADV_DONTNEED;
963b4836
CW
1513}
1514
53d227f2
DV
1515static u32
1516i915_gem_get_seqno(struct drm_device *dev)
1517{
1518 drm_i915_private_t *dev_priv = dev->dev_private;
1519 u32 seqno = dev_priv->next_seqno;
1520
1521 /* reserve 0 for non-seqno */
1522 if (++dev_priv->next_seqno == 0)
1523 dev_priv->next_seqno = 1;
1524
1525 return seqno;
1526}
1527
1528u32
1529i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
1530{
1531 if (ring->outstanding_lazy_request == 0)
1532 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
1533
1534 return ring->outstanding_lazy_request;
1535}
1536
3cce469c 1537int
db53a302 1538i915_add_request(struct intel_ring_buffer *ring,
f787a5f5 1539 struct drm_file *file,
db53a302 1540 struct drm_i915_gem_request *request)
673a394b 1541{
db53a302 1542 drm_i915_private_t *dev_priv = ring->dev->dev_private;
673a394b 1543 uint32_t seqno;
a71d8d94 1544 u32 request_ring_position;
673a394b 1545 int was_empty;
3cce469c
CW
1546 int ret;
1547
cc889e0f
DV
1548 /*
1549 * Emit any outstanding flushes - execbuf can fail to emit the flush
1550 * after having emitted the batchbuffer command. Hence we need to fix
1551 * things up similar to emitting the lazy request. The difference here
1552 * is that the flush _must_ happen before the next request, no matter
1553 * what.
1554 */
a7b9761d
CW
1555 ret = intel_ring_flush_all_caches(ring);
1556 if (ret)
1557 return ret;
cc889e0f 1558
3bb73aba
CW
1559 if (request == NULL) {
1560 request = kmalloc(sizeof(*request), GFP_KERNEL);
1561 if (request == NULL)
1562 return -ENOMEM;
1563 }
1564
53d227f2 1565 seqno = i915_gem_next_request_seqno(ring);
673a394b 1566
a71d8d94
CW
1567 /* Record the position of the start of the request so that
1568 * should we detect the updated seqno part-way through the
1569 * GPU processing the request, we never over-estimate the
1570 * position of the head.
1571 */
1572 request_ring_position = intel_ring_get_tail(ring);
1573
3cce469c 1574 ret = ring->add_request(ring, &seqno);
3bb73aba
CW
1575 if (ret) {
1576 kfree(request);
1577 return ret;
1578 }
673a394b 1579
db53a302 1580 trace_i915_gem_request_add(ring, seqno);
673a394b
EA
1581
1582 request->seqno = seqno;
852835f3 1583 request->ring = ring;
a71d8d94 1584 request->tail = request_ring_position;
673a394b 1585 request->emitted_jiffies = jiffies;
852835f3
ZN
1586 was_empty = list_empty(&ring->request_list);
1587 list_add_tail(&request->list, &ring->request_list);
3bb73aba 1588 request->file_priv = NULL;
852835f3 1589
db53a302
CW
1590 if (file) {
1591 struct drm_i915_file_private *file_priv = file->driver_priv;
1592
1c25595f 1593 spin_lock(&file_priv->mm.lock);
f787a5f5 1594 request->file_priv = file_priv;
b962442e 1595 list_add_tail(&request->client_list,
f787a5f5 1596 &file_priv->mm.request_list);
1c25595f 1597 spin_unlock(&file_priv->mm.lock);
b962442e 1598 }
673a394b 1599
5391d0cf 1600 ring->outstanding_lazy_request = 0;
db53a302 1601
f65d9421 1602 if (!dev_priv->mm.suspended) {
3e0dc6b0
BW
1603 if (i915_enable_hangcheck) {
1604 mod_timer(&dev_priv->hangcheck_timer,
1605 jiffies +
1606 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1607 }
f047e395 1608 if (was_empty) {
b3b079db
CW
1609 queue_delayed_work(dev_priv->wq,
1610 &dev_priv->mm.retire_work, HZ);
f047e395
CW
1611 intel_mark_busy(dev_priv->dev);
1612 }
f65d9421 1613 }
cc889e0f 1614
3cce469c 1615 return 0;
673a394b
EA
1616}
1617
f787a5f5
CW
1618static inline void
1619i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
673a394b 1620{
1c25595f 1621 struct drm_i915_file_private *file_priv = request->file_priv;
673a394b 1622
1c25595f
CW
1623 if (!file_priv)
1624 return;
1c5d22f7 1625
1c25595f 1626 spin_lock(&file_priv->mm.lock);
09bfa517
HRK
1627 if (request->file_priv) {
1628 list_del(&request->client_list);
1629 request->file_priv = NULL;
1630 }
1c25595f 1631 spin_unlock(&file_priv->mm.lock);
673a394b 1632}
673a394b 1633
dfaae392
CW
1634static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1635 struct intel_ring_buffer *ring)
9375e446 1636{
dfaae392
CW
1637 while (!list_empty(&ring->request_list)) {
1638 struct drm_i915_gem_request *request;
673a394b 1639
dfaae392
CW
1640 request = list_first_entry(&ring->request_list,
1641 struct drm_i915_gem_request,
1642 list);
de151cf6 1643
dfaae392 1644 list_del(&request->list);
f787a5f5 1645 i915_gem_request_remove_from_client(request);
dfaae392
CW
1646 kfree(request);
1647 }
673a394b 1648
dfaae392 1649 while (!list_empty(&ring->active_list)) {
05394f39 1650 struct drm_i915_gem_object *obj;
9375e446 1651
05394f39
CW
1652 obj = list_first_entry(&ring->active_list,
1653 struct drm_i915_gem_object,
1654 ring_list);
9375e446 1655
05394f39 1656 i915_gem_object_move_to_inactive(obj);
673a394b
EA
1657 }
1658}
1659
312817a3
CW
1660static void i915_gem_reset_fences(struct drm_device *dev)
1661{
1662 struct drm_i915_private *dev_priv = dev->dev_private;
1663 int i;
1664
4b9de737 1665 for (i = 0; i < dev_priv->num_fence_regs; i++) {
312817a3 1666 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
7d2cb39c 1667
ada726c7 1668 i915_gem_write_fence(dev, i, NULL);
7d2cb39c 1669
ada726c7
CW
1670 if (reg->obj)
1671 i915_gem_object_fence_lost(reg->obj);
7d2cb39c 1672
ada726c7
CW
1673 reg->pin_count = 0;
1674 reg->obj = NULL;
1675 INIT_LIST_HEAD(&reg->lru_list);
312817a3 1676 }
ada726c7
CW
1677
1678 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
312817a3
CW
1679}
1680
069efc1d 1681void i915_gem_reset(struct drm_device *dev)
673a394b 1682{
77f01230 1683 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1684 struct drm_i915_gem_object *obj;
b4519513 1685 struct intel_ring_buffer *ring;
1ec14ad3 1686 int i;
673a394b 1687
b4519513
CW
1688 for_each_ring(ring, dev_priv, i)
1689 i915_gem_reset_ring_lists(dev_priv, ring);
dfaae392 1690
dfaae392
CW
1691 /* Move everything out of the GPU domains to ensure we do any
1692 * necessary invalidation upon reuse.
1693 */
05394f39 1694 list_for_each_entry(obj,
77f01230 1695 &dev_priv->mm.inactive_list,
69dc4987 1696 mm_list)
77f01230 1697 {
05394f39 1698 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
77f01230 1699 }
069efc1d
CW
1700
1701 /* The fence registers are invalidated so clear them out */
312817a3 1702 i915_gem_reset_fences(dev);
673a394b
EA
1703}
1704
1705/**
1706 * This function clears the request list as sequence numbers are passed.
1707 */
a71d8d94 1708void
db53a302 1709i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
673a394b 1710{
673a394b 1711 uint32_t seqno;
1ec14ad3 1712 int i;
673a394b 1713
db53a302 1714 if (list_empty(&ring->request_list))
6c0594a3
KW
1715 return;
1716
db53a302 1717 WARN_ON(i915_verify_lists(ring->dev));
673a394b 1718
78501eac 1719 seqno = ring->get_seqno(ring);
1ec14ad3 1720
076e2c0e 1721 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1ec14ad3
CW
1722 if (seqno >= ring->sync_seqno[i])
1723 ring->sync_seqno[i] = 0;
1724
852835f3 1725 while (!list_empty(&ring->request_list)) {
673a394b 1726 struct drm_i915_gem_request *request;
673a394b 1727
852835f3 1728 request = list_first_entry(&ring->request_list,
673a394b
EA
1729 struct drm_i915_gem_request,
1730 list);
673a394b 1731
dfaae392 1732 if (!i915_seqno_passed(seqno, request->seqno))
b84d5f0c
CW
1733 break;
1734
db53a302 1735 trace_i915_gem_request_retire(ring, request->seqno);
a71d8d94
CW
1736 /* We know the GPU must have read the request to have
1737 * sent us the seqno + interrupt, so use the position
1738 * of tail of the request to update the last known position
1739 * of the GPU head.
1740 */
1741 ring->last_retired_head = request->tail;
b84d5f0c
CW
1742
1743 list_del(&request->list);
f787a5f5 1744 i915_gem_request_remove_from_client(request);
b84d5f0c
CW
1745 kfree(request);
1746 }
673a394b 1747
b84d5f0c
CW
1748 /* Move any buffers on the active list that are no longer referenced
1749 * by the ringbuffer to the flushing/inactive lists as appropriate.
1750 */
1751 while (!list_empty(&ring->active_list)) {
05394f39 1752 struct drm_i915_gem_object *obj;
b84d5f0c 1753
0206e353 1754 obj = list_first_entry(&ring->active_list,
05394f39
CW
1755 struct drm_i915_gem_object,
1756 ring_list);
673a394b 1757
0201f1ec 1758 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
673a394b 1759 break;
b84d5f0c 1760
65ce3027 1761 i915_gem_object_move_to_inactive(obj);
673a394b 1762 }
9d34e5db 1763
db53a302
CW
1764 if (unlikely(ring->trace_irq_seqno &&
1765 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1ec14ad3 1766 ring->irq_put(ring);
db53a302 1767 ring->trace_irq_seqno = 0;
9d34e5db 1768 }
23bc5982 1769
db53a302 1770 WARN_ON(i915_verify_lists(ring->dev));
673a394b
EA
1771}
1772
b09a1fec
CW
1773void
1774i915_gem_retire_requests(struct drm_device *dev)
1775{
1776 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 1777 struct intel_ring_buffer *ring;
1ec14ad3 1778 int i;
b09a1fec 1779
b4519513
CW
1780 for_each_ring(ring, dev_priv, i)
1781 i915_gem_retire_requests_ring(ring);
b09a1fec
CW
1782}
1783
75ef9da2 1784static void
673a394b
EA
1785i915_gem_retire_work_handler(struct work_struct *work)
1786{
1787 drm_i915_private_t *dev_priv;
1788 struct drm_device *dev;
b4519513 1789 struct intel_ring_buffer *ring;
0a58705b
CW
1790 bool idle;
1791 int i;
673a394b
EA
1792
1793 dev_priv = container_of(work, drm_i915_private_t,
1794 mm.retire_work.work);
1795 dev = dev_priv->dev;
1796
891b48cf
CW
1797 /* Come back later if the device is busy... */
1798 if (!mutex_trylock(&dev->struct_mutex)) {
1799 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1800 return;
1801 }
1802
b09a1fec 1803 i915_gem_retire_requests(dev);
d1b851fc 1804
0a58705b
CW
1805 /* Send a periodic flush down the ring so we don't hold onto GEM
1806 * objects indefinitely.
1807 */
1808 idle = true;
b4519513 1809 for_each_ring(ring, dev_priv, i) {
3bb73aba
CW
1810 if (ring->gpu_caches_dirty)
1811 i915_add_request(ring, NULL, NULL);
0a58705b
CW
1812
1813 idle &= list_empty(&ring->request_list);
1814 }
1815
1816 if (!dev_priv->mm.suspended && !idle)
9c9fe1f8 1817 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
f047e395
CW
1818 if (idle)
1819 intel_mark_idle(dev);
0a58705b 1820
673a394b
EA
1821 mutex_unlock(&dev->struct_mutex);
1822}
1823
d6b2c790
DV
1824int
1825i915_gem_check_wedge(struct drm_i915_private *dev_priv,
1826 bool interruptible)
b4aca010 1827{
b4aca010
BW
1828 if (atomic_read(&dev_priv->mm.wedged)) {
1829 struct completion *x = &dev_priv->error_completion;
1830 bool recovery_complete;
1831 unsigned long flags;
1832
1833 /* Give the error handler a chance to run. */
1834 spin_lock_irqsave(&x->wait.lock, flags);
1835 recovery_complete = x->done > 0;
1836 spin_unlock_irqrestore(&x->wait.lock, flags);
1837
d6b2c790
DV
1838 /* Non-interruptible callers can't handle -EAGAIN, hence return
1839 * -EIO unconditionally for these. */
1840 if (!interruptible)
1841 return -EIO;
1842
1843 /* Recovery complete, but still wedged means reset failure. */
1844 if (recovery_complete)
1845 return -EIO;
1846
1847 return -EAGAIN;
b4aca010
BW
1848 }
1849
1850 return 0;
1851}
1852
1853/*
1854 * Compare seqno against outstanding lazy request. Emit a request if they are
1855 * equal.
1856 */
1857static int
1858i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1859{
3bb73aba 1860 int ret;
b4aca010
BW
1861
1862 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1863
3bb73aba
CW
1864 ret = 0;
1865 if (seqno == ring->outstanding_lazy_request)
1866 ret = i915_add_request(ring, NULL, NULL);
b4aca010
BW
1867
1868 return ret;
1869}
1870
5c81fe85
BW
1871/**
1872 * __wait_seqno - wait until execution of seqno has finished
1873 * @ring: the ring expected to report seqno
1874 * @seqno: duh!
1875 * @interruptible: do an interruptible wait (normally yes)
1876 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1877 *
1878 * Returns 0 if the seqno was found within the alloted time. Else returns the
1879 * errno with remaining time filled in timeout argument.
1880 */
604dd3ec 1881static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
5c81fe85 1882 bool interruptible, struct timespec *timeout)
604dd3ec
BW
1883{
1884 drm_i915_private_t *dev_priv = ring->dev->dev_private;
5c81fe85
BW
1885 struct timespec before, now, wait_time={1,0};
1886 unsigned long timeout_jiffies;
1887 long end;
1888 bool wait_forever = true;
d6b2c790 1889 int ret;
604dd3ec
BW
1890
1891 if (i915_seqno_passed(ring->get_seqno(ring), seqno))
1892 return 0;
1893
1894 trace_i915_gem_request_wait_begin(ring, seqno);
5c81fe85
BW
1895
1896 if (timeout != NULL) {
1897 wait_time = *timeout;
1898 wait_forever = false;
1899 }
1900
1901 timeout_jiffies = timespec_to_jiffies(&wait_time);
1902
604dd3ec
BW
1903 if (WARN_ON(!ring->irq_get(ring)))
1904 return -ENODEV;
1905
5c81fe85
BW
1906 /* Record current time in case interrupted by signal, or wedged * */
1907 getrawmonotonic(&before);
1908
604dd3ec
BW
1909#define EXIT_COND \
1910 (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
1911 atomic_read(&dev_priv->mm.wedged))
5c81fe85
BW
1912 do {
1913 if (interruptible)
1914 end = wait_event_interruptible_timeout(ring->irq_queue,
1915 EXIT_COND,
1916 timeout_jiffies);
1917 else
1918 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1919 timeout_jiffies);
604dd3ec 1920
d6b2c790
DV
1921 ret = i915_gem_check_wedge(dev_priv, interruptible);
1922 if (ret)
1923 end = ret;
5c81fe85
BW
1924 } while (end == 0 && wait_forever);
1925
1926 getrawmonotonic(&now);
604dd3ec
BW
1927
1928 ring->irq_put(ring);
1929 trace_i915_gem_request_wait_end(ring, seqno);
1930#undef EXIT_COND
1931
5c81fe85
BW
1932 if (timeout) {
1933 struct timespec sleep_time = timespec_sub(now, before);
1934 *timeout = timespec_sub(*timeout, sleep_time);
1935 }
1936
1937 switch (end) {
eeef9b38 1938 case -EIO:
5c81fe85
BW
1939 case -EAGAIN: /* Wedged */
1940 case -ERESTARTSYS: /* Signal */
1941 return (int)end;
1942 case 0: /* Timeout */
1943 if (timeout)
1944 set_normalized_timespec(timeout, 0, 0);
1945 return -ETIME;
1946 default: /* Completed */
1947 WARN_ON(end < 0); /* We're not aware of other errors */
1948 return 0;
1949 }
604dd3ec
BW
1950}
1951
db53a302
CW
1952/**
1953 * Waits for a sequence number to be signaled, and cleans up the
1954 * request and object lists appropriately for that event.
1955 */
5a5a0c64 1956int
199b2bc2 1957i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
673a394b 1958{
db53a302 1959 drm_i915_private_t *dev_priv = ring->dev->dev_private;
673a394b
EA
1960 int ret = 0;
1961
1962 BUG_ON(seqno == 0);
1963
d6b2c790 1964 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
b4aca010
BW
1965 if (ret)
1966 return ret;
3cce469c 1967
b4aca010
BW
1968 ret = i915_gem_check_olr(ring, seqno);
1969 if (ret)
1970 return ret;
ffed1d09 1971
5c81fe85 1972 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
673a394b 1973
673a394b
EA
1974 return ret;
1975}
1976
673a394b
EA
1977/**
1978 * Ensures that all rendering to the object has completed and the object is
1979 * safe to unbind from the GTT or access from the CPU.
1980 */
0201f1ec
CW
1981static __must_check int
1982i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1983 bool readonly)
673a394b 1984{
0201f1ec 1985 u32 seqno;
673a394b
EA
1986 int ret;
1987
673a394b
EA
1988 /* If there is rendering queued on the buffer being evicted, wait for
1989 * it.
1990 */
0201f1ec
CW
1991 if (readonly)
1992 seqno = obj->last_write_seqno;
1993 else
1994 seqno = obj->last_read_seqno;
1995 if (seqno == 0)
1996 return 0;
1997
1998 ret = i915_wait_seqno(obj->ring, seqno);
1999 if (ret)
2000 return ret;
2001
2002 /* Manually manage the write flush as we may have not yet retired
2003 * the buffer.
2004 */
2005 if (obj->last_write_seqno &&
2006 i915_seqno_passed(seqno, obj->last_write_seqno)) {
2007 obj->last_write_seqno = 0;
2008 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
673a394b
EA
2009 }
2010
0201f1ec 2011 i915_gem_retire_requests_ring(obj->ring);
673a394b
EA
2012 return 0;
2013}
2014
30dfebf3
DV
2015/**
2016 * Ensures that an object will eventually get non-busy by flushing any required
2017 * write domains, emitting any outstanding lazy request and retiring and
2018 * completed requests.
2019 */
2020static int
2021i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2022{
2023 int ret;
2024
2025 if (obj->active) {
0201f1ec 2026 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
30dfebf3
DV
2027 if (ret)
2028 return ret;
0201f1ec 2029
30dfebf3
DV
2030 i915_gem_retire_requests_ring(obj->ring);
2031 }
2032
2033 return 0;
2034}
2035
23ba4fd0
BW
2036/**
2037 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2038 * @DRM_IOCTL_ARGS: standard ioctl arguments
2039 *
2040 * Returns 0 if successful, else an error is returned with the remaining time in
2041 * the timeout parameter.
2042 * -ETIME: object is still busy after timeout
2043 * -ERESTARTSYS: signal interrupted the wait
2044 * -ENONENT: object doesn't exist
2045 * Also possible, but rare:
2046 * -EAGAIN: GPU wedged
2047 * -ENOMEM: damn
2048 * -ENODEV: Internal IRQ fail
2049 * -E?: The add request failed
2050 *
2051 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2052 * non-zero timeout parameter the wait ioctl will wait for the given number of
2053 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2054 * without holding struct_mutex the object may become re-busied before this
2055 * function completes. A similar but shorter * race condition exists in the busy
2056 * ioctl
2057 */
2058int
2059i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2060{
2061 struct drm_i915_gem_wait *args = data;
2062 struct drm_i915_gem_object *obj;
2063 struct intel_ring_buffer *ring = NULL;
eac1f14f 2064 struct timespec timeout_stack, *timeout = NULL;
23ba4fd0
BW
2065 u32 seqno = 0;
2066 int ret = 0;
2067
eac1f14f
BW
2068 if (args->timeout_ns >= 0) {
2069 timeout_stack = ns_to_timespec(args->timeout_ns);
2070 timeout = &timeout_stack;
2071 }
23ba4fd0
BW
2072
2073 ret = i915_mutex_lock_interruptible(dev);
2074 if (ret)
2075 return ret;
2076
2077 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2078 if (&obj->base == NULL) {
2079 mutex_unlock(&dev->struct_mutex);
2080 return -ENOENT;
2081 }
2082
30dfebf3
DV
2083 /* Need to make sure the object gets inactive eventually. */
2084 ret = i915_gem_object_flush_active(obj);
23ba4fd0
BW
2085 if (ret)
2086 goto out;
2087
2088 if (obj->active) {
0201f1ec 2089 seqno = obj->last_read_seqno;
23ba4fd0
BW
2090 ring = obj->ring;
2091 }
2092
2093 if (seqno == 0)
2094 goto out;
2095
23ba4fd0
BW
2096 /* Do this after OLR check to make sure we make forward progress polling
2097 * on this IOCTL with a 0 timeout (like busy ioctl)
2098 */
2099 if (!args->timeout_ns) {
2100 ret = -ETIME;
2101 goto out;
2102 }
2103
2104 drm_gem_object_unreference(&obj->base);
2105 mutex_unlock(&dev->struct_mutex);
2106
eac1f14f
BW
2107 ret = __wait_seqno(ring, seqno, true, timeout);
2108 if (timeout) {
2109 WARN_ON(!timespec_valid(timeout));
2110 args->timeout_ns = timespec_to_ns(timeout);
2111 }
23ba4fd0
BW
2112 return ret;
2113
2114out:
2115 drm_gem_object_unreference(&obj->base);
2116 mutex_unlock(&dev->struct_mutex);
2117 return ret;
2118}
2119
5816d648
BW
2120/**
2121 * i915_gem_object_sync - sync an object to a ring.
2122 *
2123 * @obj: object which may be in use on another ring.
2124 * @to: ring we wish to use the object on. May be NULL.
2125 *
2126 * This code is meant to abstract object synchronization with the GPU.
2127 * Calling with NULL implies synchronizing the object with the CPU
2128 * rather than a particular GPU ring.
2129 *
2130 * Returns 0 if successful, else propagates up the lower layer error.
2131 */
2911a35b
BW
2132int
2133i915_gem_object_sync(struct drm_i915_gem_object *obj,
2134 struct intel_ring_buffer *to)
2135{
2136 struct intel_ring_buffer *from = obj->ring;
2137 u32 seqno;
2138 int ret, idx;
2139
2140 if (from == NULL || to == from)
2141 return 0;
2142
5816d648 2143 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
0201f1ec 2144 return i915_gem_object_wait_rendering(obj, false);
2911a35b
BW
2145
2146 idx = intel_ring_sync_index(from, to);
2147
0201f1ec 2148 seqno = obj->last_read_seqno;
2911a35b
BW
2149 if (seqno <= from->sync_seqno[idx])
2150 return 0;
2151
b4aca010
BW
2152 ret = i915_gem_check_olr(obj->ring, seqno);
2153 if (ret)
2154 return ret;
2911a35b 2155
1500f7ea 2156 ret = to->sync_to(to, from, seqno);
e3a5a225
BW
2157 if (!ret)
2158 from->sync_seqno[idx] = seqno;
2911a35b 2159
e3a5a225 2160 return ret;
2911a35b
BW
2161}
2162
b5ffc9bc
CW
2163static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2164{
2165 u32 old_write_domain, old_read_domains;
2166
b5ffc9bc
CW
2167 /* Act a barrier for all accesses through the GTT */
2168 mb();
2169
2170 /* Force a pagefault for domain tracking on next user access */
2171 i915_gem_release_mmap(obj);
2172
b97c3d9c
KP
2173 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2174 return;
2175
b5ffc9bc
CW
2176 old_read_domains = obj->base.read_domains;
2177 old_write_domain = obj->base.write_domain;
2178
2179 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2180 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2181
2182 trace_i915_gem_object_change_domain(obj,
2183 old_read_domains,
2184 old_write_domain);
2185}
2186
673a394b
EA
2187/**
2188 * Unbinds an object from the GTT aperture.
2189 */
0f973f27 2190int
05394f39 2191i915_gem_object_unbind(struct drm_i915_gem_object *obj)
673a394b 2192{
7bddb01f 2193 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
673a394b
EA
2194 int ret = 0;
2195
05394f39 2196 if (obj->gtt_space == NULL)
673a394b
EA
2197 return 0;
2198
31d8d651
CW
2199 if (obj->pin_count)
2200 return -EBUSY;
673a394b 2201
a8198eea 2202 ret = i915_gem_object_finish_gpu(obj);
1488fc08 2203 if (ret)
a8198eea
CW
2204 return ret;
2205 /* Continue on if we fail due to EIO, the GPU is hung so we
2206 * should be safe and we need to cleanup or else we might
2207 * cause memory corruption through use-after-free.
2208 */
2209
b5ffc9bc 2210 i915_gem_object_finish_gtt(obj);
5323fd04 2211
673a394b
EA
2212 /* Move the object to the CPU domain to ensure that
2213 * any possible CPU writes while it's not in the GTT
a8198eea 2214 * are flushed when we go to remap it.
673a394b 2215 */
a8198eea
CW
2216 if (ret == 0)
2217 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
8dc1775d 2218 if (ret == -ERESTARTSYS)
673a394b 2219 return ret;
812ed492 2220 if (ret) {
a8198eea
CW
2221 /* In the event of a disaster, abandon all caches and
2222 * hope for the best.
2223 */
812ed492 2224 i915_gem_clflush_object(obj);
05394f39 2225 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
812ed492 2226 }
673a394b 2227
96b47b65 2228 /* release the fence reg _after_ flushing */
d9e86c0e 2229 ret = i915_gem_object_put_fence(obj);
1488fc08 2230 if (ret)
d9e86c0e 2231 return ret;
96b47b65 2232
db53a302
CW
2233 trace_i915_gem_object_unbind(obj);
2234
74898d7e
DV
2235 if (obj->has_global_gtt_mapping)
2236 i915_gem_gtt_unbind_object(obj);
7bddb01f
DV
2237 if (obj->has_aliasing_ppgtt_mapping) {
2238 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2239 obj->has_aliasing_ppgtt_mapping = 0;
2240 }
74163907 2241 i915_gem_gtt_finish_object(obj);
7bddb01f 2242
e5281ccd 2243 i915_gem_object_put_pages_gtt(obj);
673a394b 2244
6299f992 2245 list_del_init(&obj->gtt_list);
05394f39 2246 list_del_init(&obj->mm_list);
75e9e915 2247 /* Avoid an unnecessary call to unbind on rebind. */
05394f39 2248 obj->map_and_fenceable = true;
673a394b 2249
05394f39
CW
2250 drm_mm_put_block(obj->gtt_space);
2251 obj->gtt_space = NULL;
2252 obj->gtt_offset = 0;
673a394b 2253
05394f39 2254 if (i915_gem_object_is_purgeable(obj))
963b4836
CW
2255 i915_gem_object_truncate(obj);
2256
8dc1775d 2257 return ret;
673a394b
EA
2258}
2259
b2da9fe5 2260static int i915_ring_idle(struct intel_ring_buffer *ring)
a56ba56c 2261{
69c2fc89 2262 if (list_empty(&ring->active_list))
64193406
CW
2263 return 0;
2264
199b2bc2 2265 return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
a56ba56c
CW
2266}
2267
b2da9fe5 2268int i915_gpu_idle(struct drm_device *dev)
4df2faf4
DV
2269{
2270 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 2271 struct intel_ring_buffer *ring;
1ec14ad3 2272 int ret, i;
4df2faf4 2273
4df2faf4 2274 /* Flush everything onto the inactive list. */
b4519513
CW
2275 for_each_ring(ring, dev_priv, i) {
2276 ret = i915_ring_idle(ring);
1ec14ad3
CW
2277 if (ret)
2278 return ret;
b4519513 2279
f2ef6eb1
BW
2280 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2281 if (ret)
2282 return ret;
1ec14ad3 2283 }
4df2faf4 2284
8a1a49f9 2285 return 0;
4df2faf4
DV
2286}
2287
9ce079e4
CW
2288static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
2289 struct drm_i915_gem_object *obj)
4e901fdc 2290{
4e901fdc 2291 drm_i915_private_t *dev_priv = dev->dev_private;
4e901fdc
EA
2292 uint64_t val;
2293
9ce079e4
CW
2294 if (obj) {
2295 u32 size = obj->gtt_space->size;
4e901fdc 2296
9ce079e4
CW
2297 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2298 0xfffff000) << 32;
2299 val |= obj->gtt_offset & 0xfffff000;
2300 val |= (uint64_t)((obj->stride / 128) - 1) <<
2301 SANDYBRIDGE_FENCE_PITCH_SHIFT;
4e901fdc 2302
9ce079e4
CW
2303 if (obj->tiling_mode == I915_TILING_Y)
2304 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2305 val |= I965_FENCE_REG_VALID;
2306 } else
2307 val = 0;
c6642782 2308
9ce079e4
CW
2309 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
2310 POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
4e901fdc
EA
2311}
2312
9ce079e4
CW
2313static void i965_write_fence_reg(struct drm_device *dev, int reg,
2314 struct drm_i915_gem_object *obj)
de151cf6 2315{
de151cf6 2316 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
2317 uint64_t val;
2318
9ce079e4
CW
2319 if (obj) {
2320 u32 size = obj->gtt_space->size;
de151cf6 2321
9ce079e4
CW
2322 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2323 0xfffff000) << 32;
2324 val |= obj->gtt_offset & 0xfffff000;
2325 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2326 if (obj->tiling_mode == I915_TILING_Y)
2327 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2328 val |= I965_FENCE_REG_VALID;
2329 } else
2330 val = 0;
c6642782 2331
9ce079e4
CW
2332 I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
2333 POSTING_READ(FENCE_REG_965_0 + reg * 8);
de151cf6
JB
2334}
2335
9ce079e4
CW
2336static void i915_write_fence_reg(struct drm_device *dev, int reg,
2337 struct drm_i915_gem_object *obj)
de151cf6 2338{
de151cf6 2339 drm_i915_private_t *dev_priv = dev->dev_private;
9ce079e4 2340 u32 val;
de151cf6 2341
9ce079e4
CW
2342 if (obj) {
2343 u32 size = obj->gtt_space->size;
2344 int pitch_val;
2345 int tile_width;
c6642782 2346
9ce079e4
CW
2347 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2348 (size & -size) != size ||
2349 (obj->gtt_offset & (size - 1)),
2350 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2351 obj->gtt_offset, obj->map_and_fenceable, size);
c6642782 2352
9ce079e4
CW
2353 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2354 tile_width = 128;
2355 else
2356 tile_width = 512;
2357
2358 /* Note: pitch better be a power of two tile widths */
2359 pitch_val = obj->stride / tile_width;
2360 pitch_val = ffs(pitch_val) - 1;
2361
2362 val = obj->gtt_offset;
2363 if (obj->tiling_mode == I915_TILING_Y)
2364 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2365 val |= I915_FENCE_SIZE_BITS(size);
2366 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2367 val |= I830_FENCE_REG_VALID;
2368 } else
2369 val = 0;
2370
2371 if (reg < 8)
2372 reg = FENCE_REG_830_0 + reg * 4;
2373 else
2374 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2375
2376 I915_WRITE(reg, val);
2377 POSTING_READ(reg);
de151cf6
JB
2378}
2379
9ce079e4
CW
2380static void i830_write_fence_reg(struct drm_device *dev, int reg,
2381 struct drm_i915_gem_object *obj)
de151cf6 2382{
de151cf6 2383 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6 2384 uint32_t val;
de151cf6 2385
9ce079e4
CW
2386 if (obj) {
2387 u32 size = obj->gtt_space->size;
2388 uint32_t pitch_val;
de151cf6 2389
9ce079e4
CW
2390 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2391 (size & -size) != size ||
2392 (obj->gtt_offset & (size - 1)),
2393 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2394 obj->gtt_offset, size);
e76a16de 2395
9ce079e4
CW
2396 pitch_val = obj->stride / 128;
2397 pitch_val = ffs(pitch_val) - 1;
de151cf6 2398
9ce079e4
CW
2399 val = obj->gtt_offset;
2400 if (obj->tiling_mode == I915_TILING_Y)
2401 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2402 val |= I830_FENCE_SIZE_BITS(size);
2403 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2404 val |= I830_FENCE_REG_VALID;
2405 } else
2406 val = 0;
c6642782 2407
9ce079e4
CW
2408 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2409 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2410}
2411
2412static void i915_gem_write_fence(struct drm_device *dev, int reg,
2413 struct drm_i915_gem_object *obj)
2414{
2415 switch (INTEL_INFO(dev)->gen) {
2416 case 7:
2417 case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2418 case 5:
2419 case 4: i965_write_fence_reg(dev, reg, obj); break;
2420 case 3: i915_write_fence_reg(dev, reg, obj); break;
2421 case 2: i830_write_fence_reg(dev, reg, obj); break;
2422 default: break;
2423 }
de151cf6
JB
2424}
2425
61050808
CW
2426static inline int fence_number(struct drm_i915_private *dev_priv,
2427 struct drm_i915_fence_reg *fence)
2428{
2429 return fence - dev_priv->fence_regs;
2430}
2431
2432static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2433 struct drm_i915_fence_reg *fence,
2434 bool enable)
2435{
2436 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2437 int reg = fence_number(dev_priv, fence);
2438
2439 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2440
2441 if (enable) {
2442 obj->fence_reg = reg;
2443 fence->obj = obj;
2444 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2445 } else {
2446 obj->fence_reg = I915_FENCE_REG_NONE;
2447 fence->obj = NULL;
2448 list_del_init(&fence->lru_list);
2449 }
2450}
2451
d9e86c0e 2452static int
a360bb1a 2453i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
d9e86c0e 2454{
1c293ea3 2455 if (obj->last_fenced_seqno) {
86d5bc37 2456 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
18991845
CW
2457 if (ret)
2458 return ret;
d9e86c0e
CW
2459
2460 obj->last_fenced_seqno = 0;
d9e86c0e
CW
2461 }
2462
63256ec5
CW
2463 /* Ensure that all CPU reads are completed before installing a fence
2464 * and all writes before removing the fence.
2465 */
2466 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2467 mb();
2468
86d5bc37 2469 obj->fenced_gpu_access = false;
d9e86c0e
CW
2470 return 0;
2471}
2472
2473int
2474i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2475{
61050808 2476 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
d9e86c0e
CW
2477 int ret;
2478
a360bb1a 2479 ret = i915_gem_object_flush_fence(obj);
d9e86c0e
CW
2480 if (ret)
2481 return ret;
2482
61050808
CW
2483 if (obj->fence_reg == I915_FENCE_REG_NONE)
2484 return 0;
d9e86c0e 2485
61050808
CW
2486 i915_gem_object_update_fence(obj,
2487 &dev_priv->fence_regs[obj->fence_reg],
2488 false);
2489 i915_gem_object_fence_lost(obj);
d9e86c0e
CW
2490
2491 return 0;
2492}
2493
2494static struct drm_i915_fence_reg *
a360bb1a 2495i915_find_fence_reg(struct drm_device *dev)
ae3db24a 2496{
ae3db24a 2497 struct drm_i915_private *dev_priv = dev->dev_private;
8fe301ad 2498 struct drm_i915_fence_reg *reg, *avail;
d9e86c0e 2499 int i;
ae3db24a
DV
2500
2501 /* First try to find a free reg */
d9e86c0e 2502 avail = NULL;
ae3db24a
DV
2503 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2504 reg = &dev_priv->fence_regs[i];
2505 if (!reg->obj)
d9e86c0e 2506 return reg;
ae3db24a 2507
1690e1eb 2508 if (!reg->pin_count)
d9e86c0e 2509 avail = reg;
ae3db24a
DV
2510 }
2511
d9e86c0e
CW
2512 if (avail == NULL)
2513 return NULL;
ae3db24a
DV
2514
2515 /* None available, try to steal one or wait for a user to finish */
d9e86c0e 2516 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
1690e1eb 2517 if (reg->pin_count)
ae3db24a
DV
2518 continue;
2519
8fe301ad 2520 return reg;
ae3db24a
DV
2521 }
2522
8fe301ad 2523 return NULL;
ae3db24a
DV
2524}
2525
de151cf6 2526/**
9a5a53b3 2527 * i915_gem_object_get_fence - set up fencing for an object
de151cf6
JB
2528 * @obj: object to map through a fence reg
2529 *
2530 * When mapping objects through the GTT, userspace wants to be able to write
2531 * to them without having to worry about swizzling if the object is tiled.
de151cf6
JB
2532 * This function walks the fence regs looking for a free one for @obj,
2533 * stealing one if it can't find any.
2534 *
2535 * It then sets up the reg based on the object's properties: address, pitch
2536 * and tiling format.
9a5a53b3
CW
2537 *
2538 * For an untiled surface, this removes any existing fence.
de151cf6 2539 */
8c4b8c3f 2540int
06d98131 2541i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
de151cf6 2542{
05394f39 2543 struct drm_device *dev = obj->base.dev;
79e53945 2544 struct drm_i915_private *dev_priv = dev->dev_private;
14415745 2545 bool enable = obj->tiling_mode != I915_TILING_NONE;
d9e86c0e 2546 struct drm_i915_fence_reg *reg;
ae3db24a 2547 int ret;
de151cf6 2548
14415745
CW
2549 /* Have we updated the tiling parameters upon the object and so
2550 * will need to serialise the write to the associated fence register?
2551 */
5d82e3e6 2552 if (obj->fence_dirty) {
14415745
CW
2553 ret = i915_gem_object_flush_fence(obj);
2554 if (ret)
2555 return ret;
2556 }
9a5a53b3 2557
d9e86c0e 2558 /* Just update our place in the LRU if our fence is getting reused. */
05394f39
CW
2559 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2560 reg = &dev_priv->fence_regs[obj->fence_reg];
5d82e3e6 2561 if (!obj->fence_dirty) {
14415745
CW
2562 list_move_tail(&reg->lru_list,
2563 &dev_priv->mm.fence_list);
2564 return 0;
2565 }
2566 } else if (enable) {
2567 reg = i915_find_fence_reg(dev);
2568 if (reg == NULL)
2569 return -EDEADLK;
d9e86c0e 2570
14415745
CW
2571 if (reg->obj) {
2572 struct drm_i915_gem_object *old = reg->obj;
2573
2574 ret = i915_gem_object_flush_fence(old);
29c5a587
CW
2575 if (ret)
2576 return ret;
2577
14415745 2578 i915_gem_object_fence_lost(old);
29c5a587 2579 }
14415745 2580 } else
a09ba7fa 2581 return 0;
a09ba7fa 2582
14415745 2583 i915_gem_object_update_fence(obj, reg, enable);
5d82e3e6 2584 obj->fence_dirty = false;
14415745 2585
9ce079e4 2586 return 0;
de151cf6
JB
2587}
2588
42d6ab48
CW
2589static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2590 struct drm_mm_node *gtt_space,
2591 unsigned long cache_level)
2592{
2593 struct drm_mm_node *other;
2594
2595 /* On non-LLC machines we have to be careful when putting differing
2596 * types of snoopable memory together to avoid the prefetcher
2597 * crossing memory domains and dieing.
2598 */
2599 if (HAS_LLC(dev))
2600 return true;
2601
2602 if (gtt_space == NULL)
2603 return true;
2604
2605 if (list_empty(&gtt_space->node_list))
2606 return true;
2607
2608 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2609 if (other->allocated && !other->hole_follows && other->color != cache_level)
2610 return false;
2611
2612 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2613 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2614 return false;
2615
2616 return true;
2617}
2618
2619static void i915_gem_verify_gtt(struct drm_device *dev)
2620{
2621#if WATCH_GTT
2622 struct drm_i915_private *dev_priv = dev->dev_private;
2623 struct drm_i915_gem_object *obj;
2624 int err = 0;
2625
2626 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
2627 if (obj->gtt_space == NULL) {
2628 printk(KERN_ERR "object found on GTT list with no space reserved\n");
2629 err++;
2630 continue;
2631 }
2632
2633 if (obj->cache_level != obj->gtt_space->color) {
2634 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2635 obj->gtt_space->start,
2636 obj->gtt_space->start + obj->gtt_space->size,
2637 obj->cache_level,
2638 obj->gtt_space->color);
2639 err++;
2640 continue;
2641 }
2642
2643 if (!i915_gem_valid_gtt_space(dev,
2644 obj->gtt_space,
2645 obj->cache_level)) {
2646 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2647 obj->gtt_space->start,
2648 obj->gtt_space->start + obj->gtt_space->size,
2649 obj->cache_level);
2650 err++;
2651 continue;
2652 }
2653 }
2654
2655 WARN_ON(err);
2656#endif
2657}
2658
673a394b
EA
2659/**
2660 * Finds free space in the GTT aperture and binds the object there.
2661 */
2662static int
05394f39 2663i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
920afa77 2664 unsigned alignment,
75e9e915 2665 bool map_and_fenceable)
673a394b 2666{
05394f39 2667 struct drm_device *dev = obj->base.dev;
673a394b 2668 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 2669 struct drm_mm_node *free_space;
a00b10c3 2670 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
5e783301 2671 u32 size, fence_size, fence_alignment, unfenced_alignment;
75e9e915 2672 bool mappable, fenceable;
07f73f69 2673 int ret;
673a394b 2674
05394f39 2675 if (obj->madv != I915_MADV_WILLNEED) {
3ef94daa
CW
2676 DRM_ERROR("Attempting to bind a purgeable object\n");
2677 return -EINVAL;
2678 }
2679
e28f8711
CW
2680 fence_size = i915_gem_get_gtt_size(dev,
2681 obj->base.size,
2682 obj->tiling_mode);
2683 fence_alignment = i915_gem_get_gtt_alignment(dev,
2684 obj->base.size,
2685 obj->tiling_mode);
2686 unfenced_alignment =
2687 i915_gem_get_unfenced_gtt_alignment(dev,
2688 obj->base.size,
2689 obj->tiling_mode);
a00b10c3 2690
673a394b 2691 if (alignment == 0)
5e783301
DV
2692 alignment = map_and_fenceable ? fence_alignment :
2693 unfenced_alignment;
75e9e915 2694 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
673a394b
EA
2695 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2696 return -EINVAL;
2697 }
2698
05394f39 2699 size = map_and_fenceable ? fence_size : obj->base.size;
a00b10c3 2700
654fc607
CW
2701 /* If the object is bigger than the entire aperture, reject it early
2702 * before evicting everything in a vain attempt to find space.
2703 */
05394f39 2704 if (obj->base.size >
75e9e915 2705 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
654fc607
CW
2706 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2707 return -E2BIG;
2708 }
2709
673a394b 2710 search_free:
75e9e915 2711 if (map_and_fenceable)
920afa77 2712 free_space =
42d6ab48
CW
2713 drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
2714 size, alignment, obj->cache_level,
2715 0, dev_priv->mm.gtt_mappable_end,
2716 false);
920afa77 2717 else
42d6ab48
CW
2718 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
2719 size, alignment, obj->cache_level,
2720 false);
920afa77
DV
2721
2722 if (free_space != NULL) {
75e9e915 2723 if (map_and_fenceable)
05394f39 2724 obj->gtt_space =
920afa77 2725 drm_mm_get_block_range_generic(free_space,
42d6ab48 2726 size, alignment, obj->cache_level,
6b9d89b4 2727 0, dev_priv->mm.gtt_mappable_end,
42d6ab48 2728 false);
920afa77 2729 else
05394f39 2730 obj->gtt_space =
42d6ab48
CW
2731 drm_mm_get_block_generic(free_space,
2732 size, alignment, obj->cache_level,
2733 false);
920afa77 2734 }
05394f39 2735 if (obj->gtt_space == NULL) {
673a394b
EA
2736 /* If the gtt is empty and we're still having trouble
2737 * fitting our object in, we're out of memory.
2738 */
75e9e915 2739 ret = i915_gem_evict_something(dev, size, alignment,
42d6ab48 2740 obj->cache_level,
75e9e915 2741 map_and_fenceable);
9731129c 2742 if (ret)
673a394b 2743 return ret;
9731129c 2744
673a394b
EA
2745 goto search_free;
2746 }
42d6ab48
CW
2747 if (WARN_ON(!i915_gem_valid_gtt_space(dev,
2748 obj->gtt_space,
2749 obj->cache_level))) {
2750 drm_mm_put_block(obj->gtt_space);
2751 obj->gtt_space = NULL;
2752 return -EINVAL;
2753 }
673a394b 2754
e5281ccd 2755 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
673a394b 2756 if (ret) {
05394f39
CW
2757 drm_mm_put_block(obj->gtt_space);
2758 obj->gtt_space = NULL;
07f73f69
CW
2759
2760 if (ret == -ENOMEM) {
809b6334
CW
2761 /* first try to reclaim some memory by clearing the GTT */
2762 ret = i915_gem_evict_everything(dev, false);
07f73f69 2763 if (ret) {
07f73f69 2764 /* now try to shrink everyone else */
4bdadb97
CW
2765 if (gfpmask) {
2766 gfpmask = 0;
2767 goto search_free;
07f73f69
CW
2768 }
2769
809b6334 2770 return -ENOMEM;
07f73f69
CW
2771 }
2772
2773 goto search_free;
2774 }
2775
673a394b
EA
2776 return ret;
2777 }
2778
74163907 2779 ret = i915_gem_gtt_prepare_object(obj);
7c2e6fdf 2780 if (ret) {
e5281ccd 2781 i915_gem_object_put_pages_gtt(obj);
05394f39
CW
2782 drm_mm_put_block(obj->gtt_space);
2783 obj->gtt_space = NULL;
07f73f69 2784
809b6334 2785 if (i915_gem_evict_everything(dev, false))
07f73f69 2786 return ret;
07f73f69
CW
2787
2788 goto search_free;
673a394b 2789 }
673a394b 2790
0ebb9829
DV
2791 if (!dev_priv->mm.aliasing_ppgtt)
2792 i915_gem_gtt_bind_object(obj, obj->cache_level);
673a394b 2793
6299f992 2794 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
05394f39 2795 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
bf1a1092 2796
673a394b
EA
2797 /* Assert that the object is not currently in any GPU domain. As it
2798 * wasn't in the GTT, there shouldn't be any way it could have been in
2799 * a GPU cache
2800 */
05394f39
CW
2801 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2802 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
673a394b 2803
6299f992 2804 obj->gtt_offset = obj->gtt_space->start;
1c5d22f7 2805
75e9e915 2806 fenceable =
05394f39 2807 obj->gtt_space->size == fence_size &&
0206e353 2808 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
a00b10c3 2809
75e9e915 2810 mappable =
05394f39 2811 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
a00b10c3 2812
05394f39 2813 obj->map_and_fenceable = mappable && fenceable;
75e9e915 2814
db53a302 2815 trace_i915_gem_object_bind(obj, map_and_fenceable);
42d6ab48 2816 i915_gem_verify_gtt(dev);
673a394b
EA
2817 return 0;
2818}
2819
2820void
05394f39 2821i915_gem_clflush_object(struct drm_i915_gem_object *obj)
673a394b 2822{
673a394b
EA
2823 /* If we don't have a page list set up, then we're not pinned
2824 * to GPU, and we can ignore the cache flush because it'll happen
2825 * again at bind time.
2826 */
05394f39 2827 if (obj->pages == NULL)
673a394b
EA
2828 return;
2829
9c23f7fc
CW
2830 /* If the GPU is snooping the contents of the CPU cache,
2831 * we do not need to manually clear the CPU cache lines. However,
2832 * the caches are only snooped when the render cache is
2833 * flushed/invalidated. As we always have to emit invalidations
2834 * and flushes when moving into and out of the RENDER domain, correct
2835 * snooping behaviour occurs naturally as the result of our domain
2836 * tracking.
2837 */
2838 if (obj->cache_level != I915_CACHE_NONE)
2839 return;
2840
1c5d22f7 2841 trace_i915_gem_object_clflush(obj);
cfa16a0d 2842
05394f39 2843 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
673a394b
EA
2844}
2845
e47c68e9
EA
2846/** Flushes the GTT write domain for the object if it's dirty. */
2847static void
05394f39 2848i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 2849{
1c5d22f7
CW
2850 uint32_t old_write_domain;
2851
05394f39 2852 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
e47c68e9
EA
2853 return;
2854
63256ec5 2855 /* No actual flushing is required for the GTT write domain. Writes
e47c68e9
EA
2856 * to it immediately go to main memory as far as we know, so there's
2857 * no chipset flush. It also doesn't land in render cache.
63256ec5
CW
2858 *
2859 * However, we do have to enforce the order so that all writes through
2860 * the GTT land before any writes to the device, such as updates to
2861 * the GATT itself.
e47c68e9 2862 */
63256ec5
CW
2863 wmb();
2864
05394f39
CW
2865 old_write_domain = obj->base.write_domain;
2866 obj->base.write_domain = 0;
1c5d22f7
CW
2867
2868 trace_i915_gem_object_change_domain(obj,
05394f39 2869 obj->base.read_domains,
1c5d22f7 2870 old_write_domain);
e47c68e9
EA
2871}
2872
2873/** Flushes the CPU write domain for the object if it's dirty. */
2874static void
05394f39 2875i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
e47c68e9 2876{
1c5d22f7 2877 uint32_t old_write_domain;
e47c68e9 2878
05394f39 2879 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
e47c68e9
EA
2880 return;
2881
2882 i915_gem_clflush_object(obj);
40ce6575 2883 intel_gtt_chipset_flush();
05394f39
CW
2884 old_write_domain = obj->base.write_domain;
2885 obj->base.write_domain = 0;
1c5d22f7
CW
2886
2887 trace_i915_gem_object_change_domain(obj,
05394f39 2888 obj->base.read_domains,
1c5d22f7 2889 old_write_domain);
e47c68e9
EA
2890}
2891
2ef7eeaa
EA
2892/**
2893 * Moves a single object to the GTT read, and possibly write domain.
2894 *
2895 * This function returns when the move is complete, including waiting on
2896 * flushes to occur.
2897 */
79e53945 2898int
2021746e 2899i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2ef7eeaa 2900{
8325a09d 2901 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1c5d22f7 2902 uint32_t old_write_domain, old_read_domains;
e47c68e9 2903 int ret;
2ef7eeaa 2904
02354392 2905 /* Not valid to be called on unbound objects. */
05394f39 2906 if (obj->gtt_space == NULL)
02354392
EA
2907 return -EINVAL;
2908
8d7e3de1
CW
2909 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2910 return 0;
2911
0201f1ec
CW
2912 ret = i915_gem_object_wait_rendering(obj, !write);
2913 if (ret)
2914 return ret;
2dafb1e0 2915
7213342d 2916 i915_gem_object_flush_cpu_write_domain(obj);
1c5d22f7 2917
05394f39
CW
2918 old_write_domain = obj->base.write_domain;
2919 old_read_domains = obj->base.read_domains;
1c5d22f7 2920
e47c68e9
EA
2921 /* It should now be out of any other write domains, and we can update
2922 * the domain values for our changes.
2923 */
05394f39
CW
2924 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2925 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
e47c68e9 2926 if (write) {
05394f39
CW
2927 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2928 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2929 obj->dirty = 1;
2ef7eeaa
EA
2930 }
2931
1c5d22f7
CW
2932 trace_i915_gem_object_change_domain(obj,
2933 old_read_domains,
2934 old_write_domain);
2935
8325a09d
CW
2936 /* And bump the LRU for this access */
2937 if (i915_gem_object_is_inactive(obj))
2938 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2939
e47c68e9
EA
2940 return 0;
2941}
2942
e4ffd173
CW
2943int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2944 enum i915_cache_level cache_level)
2945{
7bddb01f
DV
2946 struct drm_device *dev = obj->base.dev;
2947 drm_i915_private_t *dev_priv = dev->dev_private;
e4ffd173
CW
2948 int ret;
2949
2950 if (obj->cache_level == cache_level)
2951 return 0;
2952
2953 if (obj->pin_count) {
2954 DRM_DEBUG("can not change the cache level of pinned objects\n");
2955 return -EBUSY;
2956 }
2957
42d6ab48
CW
2958 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
2959 ret = i915_gem_object_unbind(obj);
2960 if (ret)
2961 return ret;
2962 }
2963
e4ffd173
CW
2964 if (obj->gtt_space) {
2965 ret = i915_gem_object_finish_gpu(obj);
2966 if (ret)
2967 return ret;
2968
2969 i915_gem_object_finish_gtt(obj);
2970
2971 /* Before SandyBridge, you could not use tiling or fence
2972 * registers with snooped memory, so relinquish any fences
2973 * currently pointing to our region in the aperture.
2974 */
42d6ab48 2975 if (INTEL_INFO(dev)->gen < 6) {
e4ffd173
CW
2976 ret = i915_gem_object_put_fence(obj);
2977 if (ret)
2978 return ret;
2979 }
2980
74898d7e
DV
2981 if (obj->has_global_gtt_mapping)
2982 i915_gem_gtt_bind_object(obj, cache_level);
7bddb01f
DV
2983 if (obj->has_aliasing_ppgtt_mapping)
2984 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2985 obj, cache_level);
42d6ab48
CW
2986
2987 obj->gtt_space->color = cache_level;
e4ffd173
CW
2988 }
2989
2990 if (cache_level == I915_CACHE_NONE) {
2991 u32 old_read_domains, old_write_domain;
2992
2993 /* If we're coming from LLC cached, then we haven't
2994 * actually been tracking whether the data is in the
2995 * CPU cache or not, since we only allow one bit set
2996 * in obj->write_domain and have been skipping the clflushes.
2997 * Just set it to the CPU cache for now.
2998 */
2999 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3000 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3001
3002 old_read_domains = obj->base.read_domains;
3003 old_write_domain = obj->base.write_domain;
3004
3005 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3006 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3007
3008 trace_i915_gem_object_change_domain(obj,
3009 old_read_domains,
3010 old_write_domain);
3011 }
3012
3013 obj->cache_level = cache_level;
42d6ab48 3014 i915_gem_verify_gtt(dev);
e4ffd173
CW
3015 return 0;
3016}
3017
b9241ea3 3018/*
2da3b9b9
CW
3019 * Prepare buffer for display plane (scanout, cursors, etc).
3020 * Can be called from an uninterruptible phase (modesetting) and allows
3021 * any flushes to be pipelined (for pageflips).
b9241ea3
ZW
3022 */
3023int
2da3b9b9
CW
3024i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3025 u32 alignment,
919926ae 3026 struct intel_ring_buffer *pipelined)
b9241ea3 3027{
2da3b9b9 3028 u32 old_read_domains, old_write_domain;
b9241ea3
ZW
3029 int ret;
3030
0be73284 3031 if (pipelined != obj->ring) {
2911a35b
BW
3032 ret = i915_gem_object_sync(obj, pipelined);
3033 if (ret)
b9241ea3
ZW
3034 return ret;
3035 }
3036
a7ef0640
EA
3037 /* The display engine is not coherent with the LLC cache on gen6. As
3038 * a result, we make sure that the pinning that is about to occur is
3039 * done with uncached PTEs. This is lowest common denominator for all
3040 * chipsets.
3041 *
3042 * However for gen6+, we could do better by using the GFDT bit instead
3043 * of uncaching, which would allow us to flush all the LLC-cached data
3044 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3045 */
3046 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3047 if (ret)
3048 return ret;
3049
2da3b9b9
CW
3050 /* As the user may map the buffer once pinned in the display plane
3051 * (e.g. libkms for the bootup splash), we have to ensure that we
3052 * always use map_and_fenceable for all scanout buffers.
3053 */
3054 ret = i915_gem_object_pin(obj, alignment, true);
3055 if (ret)
3056 return ret;
3057
b118c1e3
CW
3058 i915_gem_object_flush_cpu_write_domain(obj);
3059
2da3b9b9 3060 old_write_domain = obj->base.write_domain;
05394f39 3061 old_read_domains = obj->base.read_domains;
2da3b9b9
CW
3062
3063 /* It should now be out of any other write domains, and we can update
3064 * the domain values for our changes.
3065 */
e5f1d962 3066 obj->base.write_domain = 0;
05394f39 3067 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
b9241ea3
ZW
3068
3069 trace_i915_gem_object_change_domain(obj,
3070 old_read_domains,
2da3b9b9 3071 old_write_domain);
b9241ea3
ZW
3072
3073 return 0;
3074}
3075
85345517 3076int
a8198eea 3077i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
85345517 3078{
88241785
CW
3079 int ret;
3080
a8198eea 3081 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
85345517
CW
3082 return 0;
3083
0201f1ec 3084 ret = i915_gem_object_wait_rendering(obj, false);
c501ae7f
CW
3085 if (ret)
3086 return ret;
3087
a8198eea
CW
3088 /* Ensure that we invalidate the GPU's caches and TLBs. */
3089 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
c501ae7f 3090 return 0;
85345517
CW
3091}
3092
e47c68e9
EA
3093/**
3094 * Moves a single object to the CPU read, and possibly write domain.
3095 *
3096 * This function returns when the move is complete, including waiting on
3097 * flushes to occur.
3098 */
dabdfe02 3099int
919926ae 3100i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
e47c68e9 3101{
1c5d22f7 3102 uint32_t old_write_domain, old_read_domains;
e47c68e9
EA
3103 int ret;
3104
8d7e3de1
CW
3105 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3106 return 0;
3107
0201f1ec
CW
3108 ret = i915_gem_object_wait_rendering(obj, !write);
3109 if (ret)
3110 return ret;
2ef7eeaa 3111
e47c68e9 3112 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 3113
05394f39
CW
3114 old_write_domain = obj->base.write_domain;
3115 old_read_domains = obj->base.read_domains;
1c5d22f7 3116
e47c68e9 3117 /* Flush the CPU cache if it's still invalid. */
05394f39 3118 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2ef7eeaa 3119 i915_gem_clflush_object(obj);
2ef7eeaa 3120
05394f39 3121 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
3122 }
3123
3124 /* It should now be out of any other write domains, and we can update
3125 * the domain values for our changes.
3126 */
05394f39 3127 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
e47c68e9
EA
3128
3129 /* If we're writing through the CPU, then the GPU read domains will
3130 * need to be invalidated at next use.
3131 */
3132 if (write) {
05394f39
CW
3133 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3134 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
e47c68e9 3135 }
2ef7eeaa 3136
1c5d22f7
CW
3137 trace_i915_gem_object_change_domain(obj,
3138 old_read_domains,
3139 old_write_domain);
3140
2ef7eeaa
EA
3141 return 0;
3142}
3143
673a394b
EA
3144/* Throttle our rendering by waiting until the ring has completed our requests
3145 * emitted over 20 msec ago.
3146 *
b962442e
EA
3147 * Note that if we were to use the current jiffies each time around the loop,
3148 * we wouldn't escape the function with any frames outstanding if the time to
3149 * render a frame was over 20ms.
3150 *
673a394b
EA
3151 * This should get us reasonable parallelism between CPU and GPU but also
3152 * relatively low latency when blocking on a particular request to finish.
3153 */
40a5f0de 3154static int
f787a5f5 3155i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
40a5f0de 3156{
f787a5f5
CW
3157 struct drm_i915_private *dev_priv = dev->dev_private;
3158 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e 3159 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
f787a5f5
CW
3160 struct drm_i915_gem_request *request;
3161 struct intel_ring_buffer *ring = NULL;
3162 u32 seqno = 0;
3163 int ret;
93533c29 3164
e110e8d6
CW
3165 if (atomic_read(&dev_priv->mm.wedged))
3166 return -EIO;
3167
1c25595f 3168 spin_lock(&file_priv->mm.lock);
f787a5f5 3169 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
b962442e
EA
3170 if (time_after_eq(request->emitted_jiffies, recent_enough))
3171 break;
40a5f0de 3172
f787a5f5
CW
3173 ring = request->ring;
3174 seqno = request->seqno;
b962442e 3175 }
1c25595f 3176 spin_unlock(&file_priv->mm.lock);
40a5f0de 3177
f787a5f5
CW
3178 if (seqno == 0)
3179 return 0;
2bc43b5c 3180
5c81fe85 3181 ret = __wait_seqno(ring, seqno, true, NULL);
f787a5f5
CW
3182 if (ret == 0)
3183 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
40a5f0de
EA
3184
3185 return ret;
3186}
3187
673a394b 3188int
05394f39
CW
3189i915_gem_object_pin(struct drm_i915_gem_object *obj,
3190 uint32_t alignment,
75e9e915 3191 bool map_and_fenceable)
673a394b 3192{
673a394b
EA
3193 int ret;
3194
05394f39 3195 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
ac0c6b5a 3196
05394f39
CW
3197 if (obj->gtt_space != NULL) {
3198 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3199 (map_and_fenceable && !obj->map_and_fenceable)) {
3200 WARN(obj->pin_count,
ae7d49d8 3201 "bo is already pinned with incorrect alignment:"
75e9e915
DV
3202 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3203 " obj->map_and_fenceable=%d\n",
05394f39 3204 obj->gtt_offset, alignment,
75e9e915 3205 map_and_fenceable,
05394f39 3206 obj->map_and_fenceable);
ac0c6b5a
CW
3207 ret = i915_gem_object_unbind(obj);
3208 if (ret)
3209 return ret;
3210 }
3211 }
3212
05394f39 3213 if (obj->gtt_space == NULL) {
a00b10c3 3214 ret = i915_gem_object_bind_to_gtt(obj, alignment,
75e9e915 3215 map_and_fenceable);
9731129c 3216 if (ret)
673a394b 3217 return ret;
22c344e9 3218 }
76446cac 3219
74898d7e
DV
3220 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3221 i915_gem_gtt_bind_object(obj, obj->cache_level);
3222
1b50247a 3223 obj->pin_count++;
6299f992 3224 obj->pin_mappable |= map_and_fenceable;
673a394b
EA
3225
3226 return 0;
3227}
3228
3229void
05394f39 3230i915_gem_object_unpin(struct drm_i915_gem_object *obj)
673a394b 3231{
05394f39
CW
3232 BUG_ON(obj->pin_count == 0);
3233 BUG_ON(obj->gtt_space == NULL);
673a394b 3234
1b50247a 3235 if (--obj->pin_count == 0)
6299f992 3236 obj->pin_mappable = false;
673a394b
EA
3237}
3238
3239int
3240i915_gem_pin_ioctl(struct drm_device *dev, void *data,
05394f39 3241 struct drm_file *file)
673a394b
EA
3242{
3243 struct drm_i915_gem_pin *args = data;
05394f39 3244 struct drm_i915_gem_object *obj;
673a394b
EA
3245 int ret;
3246
1d7cfea1
CW
3247 ret = i915_mutex_lock_interruptible(dev);
3248 if (ret)
3249 return ret;
673a394b 3250
05394f39 3251 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3252 if (&obj->base == NULL) {
1d7cfea1
CW
3253 ret = -ENOENT;
3254 goto unlock;
673a394b 3255 }
673a394b 3256
05394f39 3257 if (obj->madv != I915_MADV_WILLNEED) {
bb6baf76 3258 DRM_ERROR("Attempting to pin a purgeable buffer\n");
1d7cfea1
CW
3259 ret = -EINVAL;
3260 goto out;
3ef94daa
CW
3261 }
3262
05394f39 3263 if (obj->pin_filp != NULL && obj->pin_filp != file) {
79e53945
JB
3264 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3265 args->handle);
1d7cfea1
CW
3266 ret = -EINVAL;
3267 goto out;
79e53945
JB
3268 }
3269
05394f39
CW
3270 obj->user_pin_count++;
3271 obj->pin_filp = file;
3272 if (obj->user_pin_count == 1) {
75e9e915 3273 ret = i915_gem_object_pin(obj, args->alignment, true);
1d7cfea1
CW
3274 if (ret)
3275 goto out;
673a394b
EA
3276 }
3277
3278 /* XXX - flush the CPU caches for pinned objects
3279 * as the X server doesn't manage domains yet
3280 */
e47c68e9 3281 i915_gem_object_flush_cpu_write_domain(obj);
05394f39 3282 args->offset = obj->gtt_offset;
1d7cfea1 3283out:
05394f39 3284 drm_gem_object_unreference(&obj->base);
1d7cfea1 3285unlock:
673a394b 3286 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3287 return ret;
673a394b
EA
3288}
3289
3290int
3291i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
05394f39 3292 struct drm_file *file)
673a394b
EA
3293{
3294 struct drm_i915_gem_pin *args = data;
05394f39 3295 struct drm_i915_gem_object *obj;
76c1dec1 3296 int ret;
673a394b 3297
1d7cfea1
CW
3298 ret = i915_mutex_lock_interruptible(dev);
3299 if (ret)
3300 return ret;
673a394b 3301
05394f39 3302 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3303 if (&obj->base == NULL) {
1d7cfea1
CW
3304 ret = -ENOENT;
3305 goto unlock;
673a394b 3306 }
76c1dec1 3307
05394f39 3308 if (obj->pin_filp != file) {
79e53945
JB
3309 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3310 args->handle);
1d7cfea1
CW
3311 ret = -EINVAL;
3312 goto out;
79e53945 3313 }
05394f39
CW
3314 obj->user_pin_count--;
3315 if (obj->user_pin_count == 0) {
3316 obj->pin_filp = NULL;
79e53945
JB
3317 i915_gem_object_unpin(obj);
3318 }
673a394b 3319
1d7cfea1 3320out:
05394f39 3321 drm_gem_object_unreference(&obj->base);
1d7cfea1 3322unlock:
673a394b 3323 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3324 return ret;
673a394b
EA
3325}
3326
3327int
3328i915_gem_busy_ioctl(struct drm_device *dev, void *data,
05394f39 3329 struct drm_file *file)
673a394b
EA
3330{
3331 struct drm_i915_gem_busy *args = data;
05394f39 3332 struct drm_i915_gem_object *obj;
30dbf0c0
CW
3333 int ret;
3334
76c1dec1 3335 ret = i915_mutex_lock_interruptible(dev);
1d7cfea1 3336 if (ret)
76c1dec1 3337 return ret;
673a394b 3338
05394f39 3339 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
c8725226 3340 if (&obj->base == NULL) {
1d7cfea1
CW
3341 ret = -ENOENT;
3342 goto unlock;
673a394b 3343 }
d1b851fc 3344
0be555b6
CW
3345 /* Count all active objects as busy, even if they are currently not used
3346 * by the gpu. Users of this interface expect objects to eventually
3347 * become non-busy without any further actions, therefore emit any
3348 * necessary flushes here.
c4de0a5d 3349 */
30dfebf3 3350 ret = i915_gem_object_flush_active(obj);
0be555b6 3351
30dfebf3 3352 args->busy = obj->active;
e9808edd
CW
3353 if (obj->ring) {
3354 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3355 args->busy |= intel_ring_flag(obj->ring) << 16;
3356 }
673a394b 3357
05394f39 3358 drm_gem_object_unreference(&obj->base);
1d7cfea1 3359unlock:
673a394b 3360 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3361 return ret;
673a394b
EA
3362}
3363
3364int
3365i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3366 struct drm_file *file_priv)
3367{
0206e353 3368 return i915_gem_ring_throttle(dev, file_priv);
673a394b
EA
3369}
3370
3ef94daa
CW
3371int
3372i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3373 struct drm_file *file_priv)
3374{
3375 struct drm_i915_gem_madvise *args = data;
05394f39 3376 struct drm_i915_gem_object *obj;
76c1dec1 3377 int ret;
3ef94daa
CW
3378
3379 switch (args->madv) {
3380 case I915_MADV_DONTNEED:
3381 case I915_MADV_WILLNEED:
3382 break;
3383 default:
3384 return -EINVAL;
3385 }
3386
1d7cfea1
CW
3387 ret = i915_mutex_lock_interruptible(dev);
3388 if (ret)
3389 return ret;
3390
05394f39 3391 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
c8725226 3392 if (&obj->base == NULL) {
1d7cfea1
CW
3393 ret = -ENOENT;
3394 goto unlock;
3ef94daa 3395 }
3ef94daa 3396
05394f39 3397 if (obj->pin_count) {
1d7cfea1
CW
3398 ret = -EINVAL;
3399 goto out;
3ef94daa
CW
3400 }
3401
05394f39
CW
3402 if (obj->madv != __I915_MADV_PURGED)
3403 obj->madv = args->madv;
3ef94daa 3404
2d7ef395 3405 /* if the object is no longer bound, discard its backing storage */
05394f39
CW
3406 if (i915_gem_object_is_purgeable(obj) &&
3407 obj->gtt_space == NULL)
2d7ef395
CW
3408 i915_gem_object_truncate(obj);
3409
05394f39 3410 args->retained = obj->madv != __I915_MADV_PURGED;
bb6baf76 3411
1d7cfea1 3412out:
05394f39 3413 drm_gem_object_unreference(&obj->base);
1d7cfea1 3414unlock:
3ef94daa 3415 mutex_unlock(&dev->struct_mutex);
1d7cfea1 3416 return ret;
3ef94daa
CW
3417}
3418
05394f39
CW
3419struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3420 size_t size)
ac52bc56 3421{
73aa808f 3422 struct drm_i915_private *dev_priv = dev->dev_private;
c397b908 3423 struct drm_i915_gem_object *obj;
5949eac4 3424 struct address_space *mapping;
bed1ea95 3425 u32 mask;
ac52bc56 3426
c397b908
DV
3427 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3428 if (obj == NULL)
3429 return NULL;
673a394b 3430
c397b908
DV
3431 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3432 kfree(obj);
3433 return NULL;
3434 }
673a394b 3435
bed1ea95
CW
3436 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3437 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3438 /* 965gm cannot relocate objects above 4GiB. */
3439 mask &= ~__GFP_HIGHMEM;
3440 mask |= __GFP_DMA32;
3441 }
3442
5949eac4 3443 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
bed1ea95 3444 mapping_set_gfp_mask(mapping, mask);
5949eac4 3445
73aa808f
CW
3446 i915_gem_info_add_obj(dev_priv, size);
3447
c397b908
DV
3448 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3449 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
673a394b 3450
3d29b842
ED
3451 if (HAS_LLC(dev)) {
3452 /* On some devices, we can have the GPU use the LLC (the CPU
a1871112
EA
3453 * cache) for about a 10% performance improvement
3454 * compared to uncached. Graphics requests other than
3455 * display scanout are coherent with the CPU in
3456 * accessing this cache. This means in this mode we
3457 * don't need to clflush on the CPU side, and on the
3458 * GPU side we only need to flush internal caches to
3459 * get data visible to the CPU.
3460 *
3461 * However, we maintain the display planes as UC, and so
3462 * need to rebind when first used as such.
3463 */
3464 obj->cache_level = I915_CACHE_LLC;
3465 } else
3466 obj->cache_level = I915_CACHE_NONE;
3467
62b8b215 3468 obj->base.driver_private = NULL;
c397b908 3469 obj->fence_reg = I915_FENCE_REG_NONE;
69dc4987 3470 INIT_LIST_HEAD(&obj->mm_list);
93a37f20 3471 INIT_LIST_HEAD(&obj->gtt_list);
69dc4987 3472 INIT_LIST_HEAD(&obj->ring_list);
432e58ed 3473 INIT_LIST_HEAD(&obj->exec_list);
c397b908 3474 obj->madv = I915_MADV_WILLNEED;
75e9e915
DV
3475 /* Avoid an unnecessary call to unbind on the first bind. */
3476 obj->map_and_fenceable = true;
de151cf6 3477
05394f39 3478 return obj;
c397b908
DV
3479}
3480
3481int i915_gem_init_object(struct drm_gem_object *obj)
3482{
3483 BUG();
de151cf6 3484
673a394b
EA
3485 return 0;
3486}
3487
1488fc08 3488void i915_gem_free_object(struct drm_gem_object *gem_obj)
673a394b 3489{
1488fc08 3490 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
05394f39 3491 struct drm_device *dev = obj->base.dev;
be72615b 3492 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 3493
26e12f89
CW
3494 trace_i915_gem_object_destroy(obj);
3495
1286ff73
DV
3496 if (gem_obj->import_attach)
3497 drm_prime_gem_destroy(gem_obj, obj->sg_table);
3498
1488fc08
CW
3499 if (obj->phys_obj)
3500 i915_gem_detach_phys_object(dev, obj);
3501
3502 obj->pin_count = 0;
3503 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3504 bool was_interruptible;
3505
3506 was_interruptible = dev_priv->mm.interruptible;
3507 dev_priv->mm.interruptible = false;
3508
3509 WARN_ON(i915_gem_object_unbind(obj));
3510
3511 dev_priv->mm.interruptible = was_interruptible;
3512 }
3513
05394f39 3514 if (obj->base.map_list.map)
b464e9a2 3515 drm_gem_free_mmap_offset(&obj->base);
de151cf6 3516
05394f39
CW
3517 drm_gem_object_release(&obj->base);
3518 i915_gem_info_remove_obj(dev_priv, obj->base.size);
c397b908 3519
05394f39
CW
3520 kfree(obj->bit_17);
3521 kfree(obj);
673a394b
EA
3522}
3523
29105ccc
CW
3524int
3525i915_gem_idle(struct drm_device *dev)
3526{
3527 drm_i915_private_t *dev_priv = dev->dev_private;
3528 int ret;
28dfe52a 3529
29105ccc 3530 mutex_lock(&dev->struct_mutex);
1c5d22f7 3531
87acb0a5 3532 if (dev_priv->mm.suspended) {
29105ccc
CW
3533 mutex_unlock(&dev->struct_mutex);
3534 return 0;
28dfe52a
EA
3535 }
3536
b2da9fe5 3537 ret = i915_gpu_idle(dev);
6dbe2772
KP
3538 if (ret) {
3539 mutex_unlock(&dev->struct_mutex);
673a394b 3540 return ret;
6dbe2772 3541 }
b2da9fe5 3542 i915_gem_retire_requests(dev);
673a394b 3543
29105ccc 3544 /* Under UMS, be paranoid and evict. */
a39d7efc
CW
3545 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3546 i915_gem_evict_everything(dev, false);
29105ccc 3547
312817a3
CW
3548 i915_gem_reset_fences(dev);
3549
29105ccc
CW
3550 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3551 * We need to replace this with a semaphore, or something.
3552 * And not confound mm.suspended!
3553 */
3554 dev_priv->mm.suspended = 1;
bc0c7f14 3555 del_timer_sync(&dev_priv->hangcheck_timer);
29105ccc
CW
3556
3557 i915_kernel_lost_context(dev);
6dbe2772 3558 i915_gem_cleanup_ringbuffer(dev);
29105ccc 3559
6dbe2772
KP
3560 mutex_unlock(&dev->struct_mutex);
3561
29105ccc
CW
3562 /* Cancel the retire work handler, which should be idle now. */
3563 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3564
673a394b
EA
3565 return 0;
3566}
3567
b9524a1e
BW
3568void i915_gem_l3_remap(struct drm_device *dev)
3569{
3570 drm_i915_private_t *dev_priv = dev->dev_private;
3571 u32 misccpctl;
3572 int i;
3573
3574 if (!IS_IVYBRIDGE(dev))
3575 return;
3576
3577 if (!dev_priv->mm.l3_remap_info)
3578 return;
3579
3580 misccpctl = I915_READ(GEN7_MISCCPCTL);
3581 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3582 POSTING_READ(GEN7_MISCCPCTL);
3583
3584 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3585 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3586 if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
3587 DRM_DEBUG("0x%x was already programmed to %x\n",
3588 GEN7_L3LOG_BASE + i, remap);
3589 if (remap && !dev_priv->mm.l3_remap_info[i/4])
3590 DRM_DEBUG_DRIVER("Clearing remapped register\n");
3591 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
3592 }
3593
3594 /* Make sure all the writes land before disabling dop clock gating */
3595 POSTING_READ(GEN7_L3LOG_BASE);
3596
3597 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3598}
3599
f691e2f4
DV
3600void i915_gem_init_swizzling(struct drm_device *dev)
3601{
3602 drm_i915_private_t *dev_priv = dev->dev_private;
3603
11782b02 3604 if (INTEL_INFO(dev)->gen < 5 ||
f691e2f4
DV
3605 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3606 return;
3607
3608 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3609 DISP_TILE_SURFACE_SWIZZLING);
3610
11782b02
DV
3611 if (IS_GEN5(dev))
3612 return;
3613
f691e2f4
DV
3614 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3615 if (IS_GEN6(dev))
6b26c86d 3616 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
f691e2f4 3617 else
6b26c86d 3618 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
f691e2f4 3619}
e21af88d
DV
3620
3621void i915_gem_init_ppgtt(struct drm_device *dev)
3622{
3623 drm_i915_private_t *dev_priv = dev->dev_private;
3624 uint32_t pd_offset;
3625 struct intel_ring_buffer *ring;
55a254ac
DV
3626 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3627 uint32_t __iomem *pd_addr;
3628 uint32_t pd_entry;
e21af88d
DV
3629 int i;
3630
3631 if (!dev_priv->mm.aliasing_ppgtt)
3632 return;
3633
55a254ac
DV
3634
3635 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
3636 for (i = 0; i < ppgtt->num_pd_entries; i++) {
3637 dma_addr_t pt_addr;
3638
3639 if (dev_priv->mm.gtt->needs_dmar)
3640 pt_addr = ppgtt->pt_dma_addr[i];
3641 else
3642 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
3643
3644 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
3645 pd_entry |= GEN6_PDE_VALID;
3646
3647 writel(pd_entry, pd_addr + i);
3648 }
3649 readl(pd_addr);
3650
3651 pd_offset = ppgtt->pd_offset;
e21af88d
DV
3652 pd_offset /= 64; /* in cachelines, */
3653 pd_offset <<= 16;
3654
3655 if (INTEL_INFO(dev)->gen == 6) {
48ecfa10
DV
3656 uint32_t ecochk, gab_ctl, ecobits;
3657
3658 ecobits = I915_READ(GAC_ECO_BITS);
3659 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
be901a5a
DV
3660
3661 gab_ctl = I915_READ(GAB_CTL);
3662 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
3663
3664 ecochk = I915_READ(GAM_ECOCHK);
e21af88d
DV
3665 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3666 ECOCHK_PPGTT_CACHE64B);
6b26c86d 3667 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
e21af88d
DV
3668 } else if (INTEL_INFO(dev)->gen >= 7) {
3669 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3670 /* GFX_MODE is per-ring on gen7+ */
3671 }
3672
b4519513 3673 for_each_ring(ring, dev_priv, i) {
e21af88d
DV
3674 if (INTEL_INFO(dev)->gen >= 7)
3675 I915_WRITE(RING_MODE_GEN7(ring),
6b26c86d 3676 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
e21af88d
DV
3677
3678 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3679 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
3680 }
3681}
3682
67b1b571
CW
3683static bool
3684intel_enable_blt(struct drm_device *dev)
3685{
3686 if (!HAS_BLT(dev))
3687 return false;
3688
3689 /* The blitter was dysfunctional on early prototypes */
3690 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
3691 DRM_INFO("BLT not supported on this pre-production hardware;"
3692 " graphics performance will be degraded.\n");
3693 return false;
3694 }
3695
3696 return true;
3697}
3698
8187a2b7 3699int
f691e2f4 3700i915_gem_init_hw(struct drm_device *dev)
8187a2b7
ZN
3701{
3702 drm_i915_private_t *dev_priv = dev->dev_private;
3703 int ret;
68f95ba9 3704
8ecd1a66
DV
3705 if (!intel_enable_gtt())
3706 return -EIO;
3707
b9524a1e
BW
3708 i915_gem_l3_remap(dev);
3709
f691e2f4
DV
3710 i915_gem_init_swizzling(dev);
3711
5c1143bb 3712 ret = intel_init_render_ring_buffer(dev);
68f95ba9 3713 if (ret)
b6913e4b 3714 return ret;
68f95ba9
CW
3715
3716 if (HAS_BSD(dev)) {
5c1143bb 3717 ret = intel_init_bsd_ring_buffer(dev);
68f95ba9
CW
3718 if (ret)
3719 goto cleanup_render_ring;
d1b851fc 3720 }
68f95ba9 3721
67b1b571 3722 if (intel_enable_blt(dev)) {
549f7365
CW
3723 ret = intel_init_blt_ring_buffer(dev);
3724 if (ret)
3725 goto cleanup_bsd_ring;
3726 }
3727
6f392d54
CW
3728 dev_priv->next_seqno = 1;
3729
254f965c
BW
3730 /*
3731 * XXX: There was some w/a described somewhere suggesting loading
3732 * contexts before PPGTT.
3733 */
3734 i915_gem_context_init(dev);
e21af88d
DV
3735 i915_gem_init_ppgtt(dev);
3736
68f95ba9
CW
3737 return 0;
3738
549f7365 3739cleanup_bsd_ring:
1ec14ad3 3740 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
68f95ba9 3741cleanup_render_ring:
1ec14ad3 3742 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
8187a2b7
ZN
3743 return ret;
3744}
3745
1070a42b
CW
3746static bool
3747intel_enable_ppgtt(struct drm_device *dev)
3748{
3749 if (i915_enable_ppgtt >= 0)
3750 return i915_enable_ppgtt;
3751
3752#ifdef CONFIG_INTEL_IOMMU
3753 /* Disable ppgtt on SNB if VT-d is on. */
3754 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
3755 return false;
3756#endif
3757
3758 return true;
3759}
3760
3761int i915_gem_init(struct drm_device *dev)
3762{
3763 struct drm_i915_private *dev_priv = dev->dev_private;
3764 unsigned long gtt_size, mappable_size;
3765 int ret;
3766
3767 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3768 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3769
3770 mutex_lock(&dev->struct_mutex);
3771 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
3772 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3773 * aperture accordingly when using aliasing ppgtt. */
3774 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3775
3776 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
3777
3778 ret = i915_gem_init_aliasing_ppgtt(dev);
3779 if (ret) {
3780 mutex_unlock(&dev->struct_mutex);
3781 return ret;
3782 }
3783 } else {
3784 /* Let GEM Manage all of the aperture.
3785 *
3786 * However, leave one page at the end still bound to the scratch
3787 * page. There are a number of places where the hardware
3788 * apparently prefetches past the end of the object, and we've
3789 * seen multiple hangs with the GPU head pointer stuck in a
3790 * batchbuffer bound at the last page of the aperture. One page
3791 * should be enough to keep any prefetching inside of the
3792 * aperture.
3793 */
3794 i915_gem_init_global_gtt(dev, 0, mappable_size,
3795 gtt_size);
3796 }
3797
3798 ret = i915_gem_init_hw(dev);
3799 mutex_unlock(&dev->struct_mutex);
3800 if (ret) {
3801 i915_gem_cleanup_aliasing_ppgtt(dev);
3802 return ret;
3803 }
3804
53ca26ca
DV
3805 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
3806 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3807 dev_priv->dri1.allow_batchbuffer = 1;
1070a42b
CW
3808 return 0;
3809}
3810
8187a2b7
ZN
3811void
3812i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3813{
3814 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 3815 struct intel_ring_buffer *ring;
1ec14ad3 3816 int i;
8187a2b7 3817
b4519513
CW
3818 for_each_ring(ring, dev_priv, i)
3819 intel_cleanup_ring_buffer(ring);
8187a2b7
ZN
3820}
3821
673a394b
EA
3822int
3823i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3824 struct drm_file *file_priv)
3825{
3826 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 3827 int ret;
673a394b 3828
79e53945
JB
3829 if (drm_core_check_feature(dev, DRIVER_MODESET))
3830 return 0;
3831
ba1234d1 3832 if (atomic_read(&dev_priv->mm.wedged)) {
673a394b 3833 DRM_ERROR("Reenabling wedged hardware, good luck\n");
ba1234d1 3834 atomic_set(&dev_priv->mm.wedged, 0);
673a394b
EA
3835 }
3836
673a394b 3837 mutex_lock(&dev->struct_mutex);
9bb2d6f9
EA
3838 dev_priv->mm.suspended = 0;
3839
f691e2f4 3840 ret = i915_gem_init_hw(dev);
d816f6ac
WF
3841 if (ret != 0) {
3842 mutex_unlock(&dev->struct_mutex);
9bb2d6f9 3843 return ret;
d816f6ac 3844 }
9bb2d6f9 3845
69dc4987 3846 BUG_ON(!list_empty(&dev_priv->mm.active_list));
673a394b 3847 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
673a394b 3848 mutex_unlock(&dev->struct_mutex);
dbb19d30 3849
5f35308b
CW
3850 ret = drm_irq_install(dev);
3851 if (ret)
3852 goto cleanup_ringbuffer;
dbb19d30 3853
673a394b 3854 return 0;
5f35308b
CW
3855
3856cleanup_ringbuffer:
3857 mutex_lock(&dev->struct_mutex);
3858 i915_gem_cleanup_ringbuffer(dev);
3859 dev_priv->mm.suspended = 1;
3860 mutex_unlock(&dev->struct_mutex);
3861
3862 return ret;
673a394b
EA
3863}
3864
3865int
3866i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3867 struct drm_file *file_priv)
3868{
79e53945
JB
3869 if (drm_core_check_feature(dev, DRIVER_MODESET))
3870 return 0;
3871
dbb19d30 3872 drm_irq_uninstall(dev);
e6890f6f 3873 return i915_gem_idle(dev);
673a394b
EA
3874}
3875
3876void
3877i915_gem_lastclose(struct drm_device *dev)
3878{
3879 int ret;
673a394b 3880
e806b495
EA
3881 if (drm_core_check_feature(dev, DRIVER_MODESET))
3882 return;
3883
6dbe2772
KP
3884 ret = i915_gem_idle(dev);
3885 if (ret)
3886 DRM_ERROR("failed to idle hardware: %d\n", ret);
673a394b
EA
3887}
3888
64193406
CW
3889static void
3890init_ring_lists(struct intel_ring_buffer *ring)
3891{
3892 INIT_LIST_HEAD(&ring->active_list);
3893 INIT_LIST_HEAD(&ring->request_list);
64193406
CW
3894}
3895
673a394b
EA
3896void
3897i915_gem_load(struct drm_device *dev)
3898{
b5aa8a0f 3899 int i;
673a394b
EA
3900 drm_i915_private_t *dev_priv = dev->dev_private;
3901
69dc4987 3902 INIT_LIST_HEAD(&dev_priv->mm.active_list);
673a394b 3903 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
a09ba7fa 3904 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
93a37f20 3905 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
1ec14ad3
CW
3906 for (i = 0; i < I915_NUM_RINGS; i++)
3907 init_ring_lists(&dev_priv->ring[i]);
4b9de737 3908 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
007cc8ac 3909 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
673a394b
EA
3910 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3911 i915_gem_retire_work_handler);
30dbf0c0 3912 init_completion(&dev_priv->error_completion);
31169714 3913
94400120
DA
3914 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3915 if (IS_GEN3(dev)) {
50743298
DV
3916 I915_WRITE(MI_ARB_STATE,
3917 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
94400120
DA
3918 }
3919
72bfa19c
CW
3920 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3921
de151cf6 3922 /* Old X drivers will take 0-2 for front, back, depth buffers */
b397c836
EA
3923 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3924 dev_priv->fence_reg_start = 3;
de151cf6 3925
a6c45cf0 3926 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
de151cf6
JB
3927 dev_priv->num_fence_regs = 16;
3928 else
3929 dev_priv->num_fence_regs = 8;
3930
b5aa8a0f 3931 /* Initialize fence registers to zero */
ada726c7 3932 i915_gem_reset_fences(dev);
10ed13e4 3933
673a394b 3934 i915_gem_detect_bit_6_swizzle(dev);
6b95a207 3935 init_waitqueue_head(&dev_priv->pending_flip_queue);
17250b71 3936
ce453d81
CW
3937 dev_priv->mm.interruptible = true;
3938
17250b71
CW
3939 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3940 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3941 register_shrinker(&dev_priv->mm.inactive_shrinker);
673a394b 3942}
71acb5eb
DA
3943
3944/*
3945 * Create a physically contiguous memory object for this object
3946 * e.g. for cursor + overlay regs
3947 */
995b6762
CW
3948static int i915_gem_init_phys_object(struct drm_device *dev,
3949 int id, int size, int align)
71acb5eb
DA
3950{
3951 drm_i915_private_t *dev_priv = dev->dev_private;
3952 struct drm_i915_gem_phys_object *phys_obj;
3953 int ret;
3954
3955 if (dev_priv->mm.phys_objs[id - 1] || !size)
3956 return 0;
3957
9a298b2a 3958 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
71acb5eb
DA
3959 if (!phys_obj)
3960 return -ENOMEM;
3961
3962 phys_obj->id = id;
3963
6eeefaf3 3964 phys_obj->handle = drm_pci_alloc(dev, size, align);
71acb5eb
DA
3965 if (!phys_obj->handle) {
3966 ret = -ENOMEM;
3967 goto kfree_obj;
3968 }
3969#ifdef CONFIG_X86
3970 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3971#endif
3972
3973 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3974
3975 return 0;
3976kfree_obj:
9a298b2a 3977 kfree(phys_obj);
71acb5eb
DA
3978 return ret;
3979}
3980
995b6762 3981static void i915_gem_free_phys_object(struct drm_device *dev, int id)
71acb5eb
DA
3982{
3983 drm_i915_private_t *dev_priv = dev->dev_private;
3984 struct drm_i915_gem_phys_object *phys_obj;
3985
3986 if (!dev_priv->mm.phys_objs[id - 1])
3987 return;
3988
3989 phys_obj = dev_priv->mm.phys_objs[id - 1];
3990 if (phys_obj->cur_obj) {
3991 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3992 }
3993
3994#ifdef CONFIG_X86
3995 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3996#endif
3997 drm_pci_free(dev, phys_obj->handle);
3998 kfree(phys_obj);
3999 dev_priv->mm.phys_objs[id - 1] = NULL;
4000}
4001
4002void i915_gem_free_all_phys_object(struct drm_device *dev)
4003{
4004 int i;
4005
260883c8 4006 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
71acb5eb
DA
4007 i915_gem_free_phys_object(dev, i);
4008}
4009
4010void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 4011 struct drm_i915_gem_object *obj)
71acb5eb 4012{
05394f39 4013 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
e5281ccd 4014 char *vaddr;
71acb5eb 4015 int i;
71acb5eb
DA
4016 int page_count;
4017
05394f39 4018 if (!obj->phys_obj)
71acb5eb 4019 return;
05394f39 4020 vaddr = obj->phys_obj->handle->vaddr;
71acb5eb 4021
05394f39 4022 page_count = obj->base.size / PAGE_SIZE;
71acb5eb 4023 for (i = 0; i < page_count; i++) {
5949eac4 4024 struct page *page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
4025 if (!IS_ERR(page)) {
4026 char *dst = kmap_atomic(page);
4027 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4028 kunmap_atomic(dst);
4029
4030 drm_clflush_pages(&page, 1);
4031
4032 set_page_dirty(page);
4033 mark_page_accessed(page);
4034 page_cache_release(page);
4035 }
71acb5eb 4036 }
40ce6575 4037 intel_gtt_chipset_flush();
d78b47b9 4038
05394f39
CW
4039 obj->phys_obj->cur_obj = NULL;
4040 obj->phys_obj = NULL;
71acb5eb
DA
4041}
4042
4043int
4044i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 4045 struct drm_i915_gem_object *obj,
6eeefaf3
CW
4046 int id,
4047 int align)
71acb5eb 4048{
05394f39 4049 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
71acb5eb 4050 drm_i915_private_t *dev_priv = dev->dev_private;
71acb5eb
DA
4051 int ret = 0;
4052 int page_count;
4053 int i;
4054
4055 if (id > I915_MAX_PHYS_OBJECT)
4056 return -EINVAL;
4057
05394f39
CW
4058 if (obj->phys_obj) {
4059 if (obj->phys_obj->id == id)
71acb5eb
DA
4060 return 0;
4061 i915_gem_detach_phys_object(dev, obj);
4062 }
4063
71acb5eb
DA
4064 /* create a new object */
4065 if (!dev_priv->mm.phys_objs[id - 1]) {
4066 ret = i915_gem_init_phys_object(dev, id,
05394f39 4067 obj->base.size, align);
71acb5eb 4068 if (ret) {
05394f39
CW
4069 DRM_ERROR("failed to init phys object %d size: %zu\n",
4070 id, obj->base.size);
e5281ccd 4071 return ret;
71acb5eb
DA
4072 }
4073 }
4074
4075 /* bind to the object */
05394f39
CW
4076 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4077 obj->phys_obj->cur_obj = obj;
71acb5eb 4078
05394f39 4079 page_count = obj->base.size / PAGE_SIZE;
71acb5eb
DA
4080
4081 for (i = 0; i < page_count; i++) {
e5281ccd
CW
4082 struct page *page;
4083 char *dst, *src;
4084
5949eac4 4085 page = shmem_read_mapping_page(mapping, i);
e5281ccd
CW
4086 if (IS_ERR(page))
4087 return PTR_ERR(page);
71acb5eb 4088
ff75b9bc 4089 src = kmap_atomic(page);
05394f39 4090 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
71acb5eb 4091 memcpy(dst, src, PAGE_SIZE);
3e4d3af5 4092 kunmap_atomic(src);
71acb5eb 4093
e5281ccd
CW
4094 mark_page_accessed(page);
4095 page_cache_release(page);
4096 }
d78b47b9 4097
71acb5eb 4098 return 0;
71acb5eb
DA
4099}
4100
4101static int
05394f39
CW
4102i915_gem_phys_pwrite(struct drm_device *dev,
4103 struct drm_i915_gem_object *obj,
71acb5eb
DA
4104 struct drm_i915_gem_pwrite *args,
4105 struct drm_file *file_priv)
4106{
05394f39 4107 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
b47b30cc 4108 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
71acb5eb 4109
b47b30cc
CW
4110 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4111 unsigned long unwritten;
4112
4113 /* The physical object once assigned is fixed for the lifetime
4114 * of the obj, so we can safely drop the lock and continue
4115 * to access vaddr.
4116 */
4117 mutex_unlock(&dev->struct_mutex);
4118 unwritten = copy_from_user(vaddr, user_data, args->size);
4119 mutex_lock(&dev->struct_mutex);
4120 if (unwritten)
4121 return -EFAULT;
4122 }
71acb5eb 4123
40ce6575 4124 intel_gtt_chipset_flush();
71acb5eb
DA
4125 return 0;
4126}
b962442e 4127
f787a5f5 4128void i915_gem_release(struct drm_device *dev, struct drm_file *file)
b962442e 4129{
f787a5f5 4130 struct drm_i915_file_private *file_priv = file->driver_priv;
b962442e
EA
4131
4132 /* Clean up our request list when the client is going away, so that
4133 * later retire_requests won't dereference our soon-to-be-gone
4134 * file_priv.
4135 */
1c25595f 4136 spin_lock(&file_priv->mm.lock);
f787a5f5
CW
4137 while (!list_empty(&file_priv->mm.request_list)) {
4138 struct drm_i915_gem_request *request;
4139
4140 request = list_first_entry(&file_priv->mm.request_list,
4141 struct drm_i915_gem_request,
4142 client_list);
4143 list_del(&request->client_list);
4144 request->file_priv = NULL;
4145 }
1c25595f 4146 spin_unlock(&file_priv->mm.lock);
b962442e 4147}
31169714 4148
1637ef41
CW
4149static int
4150i915_gpu_is_active(struct drm_device *dev)
4151{
4152 drm_i915_private_t *dev_priv = dev->dev_private;
65ce3027 4153 return !list_empty(&dev_priv->mm.active_list);
1637ef41
CW
4154}
4155
31169714 4156static int
1495f230 4157i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
31169714 4158{
17250b71
CW
4159 struct drm_i915_private *dev_priv =
4160 container_of(shrinker,
4161 struct drm_i915_private,
4162 mm.inactive_shrinker);
4163 struct drm_device *dev = dev_priv->dev;
4164 struct drm_i915_gem_object *obj, *next;
1495f230 4165 int nr_to_scan = sc->nr_to_scan;
17250b71
CW
4166 int cnt;
4167
4168 if (!mutex_trylock(&dev->struct_mutex))
bbe2e11a 4169 return 0;
31169714
CW
4170
4171 /* "fast-path" to count number of available objects */
4172 if (nr_to_scan == 0) {
17250b71
CW
4173 cnt = 0;
4174 list_for_each_entry(obj,
4175 &dev_priv->mm.inactive_list,
4176 mm_list)
4177 cnt++;
4178 mutex_unlock(&dev->struct_mutex);
4179 return cnt / 100 * sysctl_vfs_cache_pressure;
31169714
CW
4180 }
4181
1637ef41 4182rescan:
31169714 4183 /* first scan for clean buffers */
17250b71 4184 i915_gem_retire_requests(dev);
31169714 4185
17250b71
CW
4186 list_for_each_entry_safe(obj, next,
4187 &dev_priv->mm.inactive_list,
4188 mm_list) {
4189 if (i915_gem_object_is_purgeable(obj)) {
2021746e
CW
4190 if (i915_gem_object_unbind(obj) == 0 &&
4191 --nr_to_scan == 0)
17250b71 4192 break;
31169714 4193 }
31169714
CW
4194 }
4195
4196 /* second pass, evict/count anything still on the inactive list */
17250b71
CW
4197 cnt = 0;
4198 list_for_each_entry_safe(obj, next,
4199 &dev_priv->mm.inactive_list,
4200 mm_list) {
2021746e
CW
4201 if (nr_to_scan &&
4202 i915_gem_object_unbind(obj) == 0)
17250b71 4203 nr_to_scan--;
2021746e 4204 else
17250b71
CW
4205 cnt++;
4206 }
4207
4208 if (nr_to_scan && i915_gpu_is_active(dev)) {
1637ef41
CW
4209 /*
4210 * We are desperate for pages, so as a last resort, wait
4211 * for the GPU to finish and discard whatever we can.
4212 * This has a dramatic impact to reduce the number of
4213 * OOM-killer events whilst running the GPU aggressively.
4214 */
b2da9fe5 4215 if (i915_gpu_idle(dev) == 0)
1637ef41
CW
4216 goto rescan;
4217 }
17250b71
CW
4218 mutex_unlock(&dev->struct_mutex);
4219 return cnt / 100 * sysctl_vfs_cache_pressure;
31169714 4220}
This page took 0.686177 seconds and 5 git commands to generate.