drm/i915: Move semaphore specific ring members to struct
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
39
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
42 bool force);
43 static __must_check int
44 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly);
46 static void
47 i915_gem_object_retire(struct drm_i915_gem_object *obj);
48
49 static int i915_gem_phys_pwrite(struct drm_device *dev,
50 struct drm_i915_gem_object *obj,
51 struct drm_i915_gem_pwrite *args,
52 struct drm_file *file);
53
54 static void i915_gem_write_fence(struct drm_device *dev, int reg,
55 struct drm_i915_gem_object *obj);
56 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
57 struct drm_i915_fence_reg *fence,
58 bool enable);
59
60 static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
61 struct shrink_control *sc);
62 static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
63 struct shrink_control *sc);
64 static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
65 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
66 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
67 static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
68
69 static bool cpu_cache_is_coherent(struct drm_device *dev,
70 enum i915_cache_level level)
71 {
72 return HAS_LLC(dev) || level != I915_CACHE_NONE;
73 }
74
75 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
76 {
77 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
78 return true;
79
80 return obj->pin_display;
81 }
82
83 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
84 {
85 if (obj->tiling_mode)
86 i915_gem_release_mmap(obj);
87
88 /* As we do not have an associated fence register, we will force
89 * a tiling change if we ever need to acquire one.
90 */
91 obj->fence_dirty = false;
92 obj->fence_reg = I915_FENCE_REG_NONE;
93 }
94
95 /* some bookkeeping */
96 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
97 size_t size)
98 {
99 spin_lock(&dev_priv->mm.object_stat_lock);
100 dev_priv->mm.object_count++;
101 dev_priv->mm.object_memory += size;
102 spin_unlock(&dev_priv->mm.object_stat_lock);
103 }
104
105 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
106 size_t size)
107 {
108 spin_lock(&dev_priv->mm.object_stat_lock);
109 dev_priv->mm.object_count--;
110 dev_priv->mm.object_memory -= size;
111 spin_unlock(&dev_priv->mm.object_stat_lock);
112 }
113
114 static int
115 i915_gem_wait_for_error(struct i915_gpu_error *error)
116 {
117 int ret;
118
119 #define EXIT_COND (!i915_reset_in_progress(error) || \
120 i915_terminally_wedged(error))
121 if (EXIT_COND)
122 return 0;
123
124 /*
125 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
126 * userspace. If it takes that long something really bad is going on and
127 * we should simply try to bail out and fail as gracefully as possible.
128 */
129 ret = wait_event_interruptible_timeout(error->reset_queue,
130 EXIT_COND,
131 10*HZ);
132 if (ret == 0) {
133 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
134 return -EIO;
135 } else if (ret < 0) {
136 return ret;
137 }
138 #undef EXIT_COND
139
140 return 0;
141 }
142
143 int i915_mutex_lock_interruptible(struct drm_device *dev)
144 {
145 struct drm_i915_private *dev_priv = dev->dev_private;
146 int ret;
147
148 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
149 if (ret)
150 return ret;
151
152 ret = mutex_lock_interruptible(&dev->struct_mutex);
153 if (ret)
154 return ret;
155
156 WARN_ON(i915_verify_lists(dev));
157 return 0;
158 }
159
160 static inline bool
161 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
162 {
163 return i915_gem_obj_bound_any(obj) && !obj->active;
164 }
165
166 int
167 i915_gem_init_ioctl(struct drm_device *dev, void *data,
168 struct drm_file *file)
169 {
170 struct drm_i915_private *dev_priv = dev->dev_private;
171 struct drm_i915_gem_init *args = data;
172
173 if (drm_core_check_feature(dev, DRIVER_MODESET))
174 return -ENODEV;
175
176 if (args->gtt_start >= args->gtt_end ||
177 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
178 return -EINVAL;
179
180 /* GEM with user mode setting was never supported on ilk and later. */
181 if (INTEL_INFO(dev)->gen >= 5)
182 return -ENODEV;
183
184 mutex_lock(&dev->struct_mutex);
185 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
186 args->gtt_end);
187 dev_priv->gtt.mappable_end = args->gtt_end;
188 mutex_unlock(&dev->struct_mutex);
189
190 return 0;
191 }
192
193 int
194 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
195 struct drm_file *file)
196 {
197 struct drm_i915_private *dev_priv = dev->dev_private;
198 struct drm_i915_gem_get_aperture *args = data;
199 struct drm_i915_gem_object *obj;
200 size_t pinned;
201
202 pinned = 0;
203 mutex_lock(&dev->struct_mutex);
204 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
205 if (i915_gem_obj_is_pinned(obj))
206 pinned += i915_gem_obj_ggtt_size(obj);
207 mutex_unlock(&dev->struct_mutex);
208
209 args->aper_size = dev_priv->gtt.base.total;
210 args->aper_available_size = args->aper_size - pinned;
211
212 return 0;
213 }
214
215 void *i915_gem_object_alloc(struct drm_device *dev)
216 {
217 struct drm_i915_private *dev_priv = dev->dev_private;
218 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
219 }
220
221 void i915_gem_object_free(struct drm_i915_gem_object *obj)
222 {
223 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
224 kmem_cache_free(dev_priv->slab, obj);
225 }
226
227 static int
228 i915_gem_create(struct drm_file *file,
229 struct drm_device *dev,
230 uint64_t size,
231 uint32_t *handle_p)
232 {
233 struct drm_i915_gem_object *obj;
234 int ret;
235 u32 handle;
236
237 size = roundup(size, PAGE_SIZE);
238 if (size == 0)
239 return -EINVAL;
240
241 /* Allocate the new object */
242 obj = i915_gem_alloc_object(dev, size);
243 if (obj == NULL)
244 return -ENOMEM;
245
246 ret = drm_gem_handle_create(file, &obj->base, &handle);
247 /* drop reference from allocate - handle holds it now */
248 drm_gem_object_unreference_unlocked(&obj->base);
249 if (ret)
250 return ret;
251
252 *handle_p = handle;
253 return 0;
254 }
255
256 int
257 i915_gem_dumb_create(struct drm_file *file,
258 struct drm_device *dev,
259 struct drm_mode_create_dumb *args)
260 {
261 /* have to work out size/pitch and return them */
262 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
263 args->size = args->pitch * args->height;
264 return i915_gem_create(file, dev,
265 args->size, &args->handle);
266 }
267
268 /**
269 * Creates a new mm object and returns a handle to it.
270 */
271 int
272 i915_gem_create_ioctl(struct drm_device *dev, void *data,
273 struct drm_file *file)
274 {
275 struct drm_i915_gem_create *args = data;
276
277 return i915_gem_create(file, dev,
278 args->size, &args->handle);
279 }
280
281 static inline int
282 __copy_to_user_swizzled(char __user *cpu_vaddr,
283 const char *gpu_vaddr, int gpu_offset,
284 int length)
285 {
286 int ret, cpu_offset = 0;
287
288 while (length > 0) {
289 int cacheline_end = ALIGN(gpu_offset + 1, 64);
290 int this_length = min(cacheline_end - gpu_offset, length);
291 int swizzled_gpu_offset = gpu_offset ^ 64;
292
293 ret = __copy_to_user(cpu_vaddr + cpu_offset,
294 gpu_vaddr + swizzled_gpu_offset,
295 this_length);
296 if (ret)
297 return ret + length;
298
299 cpu_offset += this_length;
300 gpu_offset += this_length;
301 length -= this_length;
302 }
303
304 return 0;
305 }
306
307 static inline int
308 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
309 const char __user *cpu_vaddr,
310 int length)
311 {
312 int ret, cpu_offset = 0;
313
314 while (length > 0) {
315 int cacheline_end = ALIGN(gpu_offset + 1, 64);
316 int this_length = min(cacheline_end - gpu_offset, length);
317 int swizzled_gpu_offset = gpu_offset ^ 64;
318
319 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
320 cpu_vaddr + cpu_offset,
321 this_length);
322 if (ret)
323 return ret + length;
324
325 cpu_offset += this_length;
326 gpu_offset += this_length;
327 length -= this_length;
328 }
329
330 return 0;
331 }
332
333 /*
334 * Pins the specified object's pages and synchronizes the object with
335 * GPU accesses. Sets needs_clflush to non-zero if the caller should
336 * flush the object from the CPU cache.
337 */
338 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
339 int *needs_clflush)
340 {
341 int ret;
342
343 *needs_clflush = 0;
344
345 if (!obj->base.filp)
346 return -EINVAL;
347
348 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
349 /* If we're not in the cpu read domain, set ourself into the gtt
350 * read domain and manually flush cachelines (if required). This
351 * optimizes for the case when the gpu will dirty the data
352 * anyway again before the next pread happens. */
353 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
354 obj->cache_level);
355 ret = i915_gem_object_wait_rendering(obj, true);
356 if (ret)
357 return ret;
358
359 i915_gem_object_retire(obj);
360 }
361
362 ret = i915_gem_object_get_pages(obj);
363 if (ret)
364 return ret;
365
366 i915_gem_object_pin_pages(obj);
367
368 return ret;
369 }
370
371 /* Per-page copy function for the shmem pread fastpath.
372 * Flushes invalid cachelines before reading the target if
373 * needs_clflush is set. */
374 static int
375 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
376 char __user *user_data,
377 bool page_do_bit17_swizzling, bool needs_clflush)
378 {
379 char *vaddr;
380 int ret;
381
382 if (unlikely(page_do_bit17_swizzling))
383 return -EINVAL;
384
385 vaddr = kmap_atomic(page);
386 if (needs_clflush)
387 drm_clflush_virt_range(vaddr + shmem_page_offset,
388 page_length);
389 ret = __copy_to_user_inatomic(user_data,
390 vaddr + shmem_page_offset,
391 page_length);
392 kunmap_atomic(vaddr);
393
394 return ret ? -EFAULT : 0;
395 }
396
397 static void
398 shmem_clflush_swizzled_range(char *addr, unsigned long length,
399 bool swizzled)
400 {
401 if (unlikely(swizzled)) {
402 unsigned long start = (unsigned long) addr;
403 unsigned long end = (unsigned long) addr + length;
404
405 /* For swizzling simply ensure that we always flush both
406 * channels. Lame, but simple and it works. Swizzled
407 * pwrite/pread is far from a hotpath - current userspace
408 * doesn't use it at all. */
409 start = round_down(start, 128);
410 end = round_up(end, 128);
411
412 drm_clflush_virt_range((void *)start, end - start);
413 } else {
414 drm_clflush_virt_range(addr, length);
415 }
416
417 }
418
419 /* Only difference to the fast-path function is that this can handle bit17
420 * and uses non-atomic copy and kmap functions. */
421 static int
422 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
423 char __user *user_data,
424 bool page_do_bit17_swizzling, bool needs_clflush)
425 {
426 char *vaddr;
427 int ret;
428
429 vaddr = kmap(page);
430 if (needs_clflush)
431 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
432 page_length,
433 page_do_bit17_swizzling);
434
435 if (page_do_bit17_swizzling)
436 ret = __copy_to_user_swizzled(user_data,
437 vaddr, shmem_page_offset,
438 page_length);
439 else
440 ret = __copy_to_user(user_data,
441 vaddr + shmem_page_offset,
442 page_length);
443 kunmap(page);
444
445 return ret ? - EFAULT : 0;
446 }
447
448 static int
449 i915_gem_shmem_pread(struct drm_device *dev,
450 struct drm_i915_gem_object *obj,
451 struct drm_i915_gem_pread *args,
452 struct drm_file *file)
453 {
454 char __user *user_data;
455 ssize_t remain;
456 loff_t offset;
457 int shmem_page_offset, page_length, ret = 0;
458 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
459 int prefaulted = 0;
460 int needs_clflush = 0;
461 struct sg_page_iter sg_iter;
462
463 user_data = to_user_ptr(args->data_ptr);
464 remain = args->size;
465
466 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
467
468 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
469 if (ret)
470 return ret;
471
472 offset = args->offset;
473
474 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
475 offset >> PAGE_SHIFT) {
476 struct page *page = sg_page_iter_page(&sg_iter);
477
478 if (remain <= 0)
479 break;
480
481 /* Operation in this page
482 *
483 * shmem_page_offset = offset within page in shmem file
484 * page_length = bytes to copy for this page
485 */
486 shmem_page_offset = offset_in_page(offset);
487 page_length = remain;
488 if ((shmem_page_offset + page_length) > PAGE_SIZE)
489 page_length = PAGE_SIZE - shmem_page_offset;
490
491 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
492 (page_to_phys(page) & (1 << 17)) != 0;
493
494 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
495 user_data, page_do_bit17_swizzling,
496 needs_clflush);
497 if (ret == 0)
498 goto next_page;
499
500 mutex_unlock(&dev->struct_mutex);
501
502 if (likely(!i915.prefault_disable) && !prefaulted) {
503 ret = fault_in_multipages_writeable(user_data, remain);
504 /* Userspace is tricking us, but we've already clobbered
505 * its pages with the prefault and promised to write the
506 * data up to the first fault. Hence ignore any errors
507 * and just continue. */
508 (void)ret;
509 prefaulted = 1;
510 }
511
512 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
513 user_data, page_do_bit17_swizzling,
514 needs_clflush);
515
516 mutex_lock(&dev->struct_mutex);
517
518 if (ret)
519 goto out;
520
521 next_page:
522 remain -= page_length;
523 user_data += page_length;
524 offset += page_length;
525 }
526
527 out:
528 i915_gem_object_unpin_pages(obj);
529
530 return ret;
531 }
532
533 /**
534 * Reads data from the object referenced by handle.
535 *
536 * On error, the contents of *data are undefined.
537 */
538 int
539 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
540 struct drm_file *file)
541 {
542 struct drm_i915_gem_pread *args = data;
543 struct drm_i915_gem_object *obj;
544 int ret = 0;
545
546 if (args->size == 0)
547 return 0;
548
549 if (!access_ok(VERIFY_WRITE,
550 to_user_ptr(args->data_ptr),
551 args->size))
552 return -EFAULT;
553
554 ret = i915_mutex_lock_interruptible(dev);
555 if (ret)
556 return ret;
557
558 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
559 if (&obj->base == NULL) {
560 ret = -ENOENT;
561 goto unlock;
562 }
563
564 /* Bounds check source. */
565 if (args->offset > obj->base.size ||
566 args->size > obj->base.size - args->offset) {
567 ret = -EINVAL;
568 goto out;
569 }
570
571 /* prime objects have no backing filp to GEM pread/pwrite
572 * pages from.
573 */
574 if (!obj->base.filp) {
575 ret = -EINVAL;
576 goto out;
577 }
578
579 trace_i915_gem_object_pread(obj, args->offset, args->size);
580
581 ret = i915_gem_shmem_pread(dev, obj, args, file);
582
583 out:
584 drm_gem_object_unreference(&obj->base);
585 unlock:
586 mutex_unlock(&dev->struct_mutex);
587 return ret;
588 }
589
590 /* This is the fast write path which cannot handle
591 * page faults in the source data
592 */
593
594 static inline int
595 fast_user_write(struct io_mapping *mapping,
596 loff_t page_base, int page_offset,
597 char __user *user_data,
598 int length)
599 {
600 void __iomem *vaddr_atomic;
601 void *vaddr;
602 unsigned long unwritten;
603
604 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
605 /* We can use the cpu mem copy function because this is X86. */
606 vaddr = (void __force*)vaddr_atomic + page_offset;
607 unwritten = __copy_from_user_inatomic_nocache(vaddr,
608 user_data, length);
609 io_mapping_unmap_atomic(vaddr_atomic);
610 return unwritten;
611 }
612
613 /**
614 * This is the fast pwrite path, where we copy the data directly from the
615 * user into the GTT, uncached.
616 */
617 static int
618 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
619 struct drm_i915_gem_object *obj,
620 struct drm_i915_gem_pwrite *args,
621 struct drm_file *file)
622 {
623 struct drm_i915_private *dev_priv = dev->dev_private;
624 ssize_t remain;
625 loff_t offset, page_base;
626 char __user *user_data;
627 int page_offset, page_length, ret;
628
629 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
630 if (ret)
631 goto out;
632
633 ret = i915_gem_object_set_to_gtt_domain(obj, true);
634 if (ret)
635 goto out_unpin;
636
637 ret = i915_gem_object_put_fence(obj);
638 if (ret)
639 goto out_unpin;
640
641 user_data = to_user_ptr(args->data_ptr);
642 remain = args->size;
643
644 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
645
646 while (remain > 0) {
647 /* Operation in this page
648 *
649 * page_base = page offset within aperture
650 * page_offset = offset within page
651 * page_length = bytes to copy for this page
652 */
653 page_base = offset & PAGE_MASK;
654 page_offset = offset_in_page(offset);
655 page_length = remain;
656 if ((page_offset + remain) > PAGE_SIZE)
657 page_length = PAGE_SIZE - page_offset;
658
659 /* If we get a fault while copying data, then (presumably) our
660 * source page isn't available. Return the error and we'll
661 * retry in the slow path.
662 */
663 if (fast_user_write(dev_priv->gtt.mappable, page_base,
664 page_offset, user_data, page_length)) {
665 ret = -EFAULT;
666 goto out_unpin;
667 }
668
669 remain -= page_length;
670 user_data += page_length;
671 offset += page_length;
672 }
673
674 out_unpin:
675 i915_gem_object_ggtt_unpin(obj);
676 out:
677 return ret;
678 }
679
680 /* Per-page copy function for the shmem pwrite fastpath.
681 * Flushes invalid cachelines before writing to the target if
682 * needs_clflush_before is set and flushes out any written cachelines after
683 * writing if needs_clflush is set. */
684 static int
685 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
686 char __user *user_data,
687 bool page_do_bit17_swizzling,
688 bool needs_clflush_before,
689 bool needs_clflush_after)
690 {
691 char *vaddr;
692 int ret;
693
694 if (unlikely(page_do_bit17_swizzling))
695 return -EINVAL;
696
697 vaddr = kmap_atomic(page);
698 if (needs_clflush_before)
699 drm_clflush_virt_range(vaddr + shmem_page_offset,
700 page_length);
701 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
702 user_data, page_length);
703 if (needs_clflush_after)
704 drm_clflush_virt_range(vaddr + shmem_page_offset,
705 page_length);
706 kunmap_atomic(vaddr);
707
708 return ret ? -EFAULT : 0;
709 }
710
711 /* Only difference to the fast-path function is that this can handle bit17
712 * and uses non-atomic copy and kmap functions. */
713 static int
714 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
715 char __user *user_data,
716 bool page_do_bit17_swizzling,
717 bool needs_clflush_before,
718 bool needs_clflush_after)
719 {
720 char *vaddr;
721 int ret;
722
723 vaddr = kmap(page);
724 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
725 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
726 page_length,
727 page_do_bit17_swizzling);
728 if (page_do_bit17_swizzling)
729 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
730 user_data,
731 page_length);
732 else
733 ret = __copy_from_user(vaddr + shmem_page_offset,
734 user_data,
735 page_length);
736 if (needs_clflush_after)
737 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
738 page_length,
739 page_do_bit17_swizzling);
740 kunmap(page);
741
742 return ret ? -EFAULT : 0;
743 }
744
745 static int
746 i915_gem_shmem_pwrite(struct drm_device *dev,
747 struct drm_i915_gem_object *obj,
748 struct drm_i915_gem_pwrite *args,
749 struct drm_file *file)
750 {
751 ssize_t remain;
752 loff_t offset;
753 char __user *user_data;
754 int shmem_page_offset, page_length, ret = 0;
755 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
756 int hit_slowpath = 0;
757 int needs_clflush_after = 0;
758 int needs_clflush_before = 0;
759 struct sg_page_iter sg_iter;
760
761 user_data = to_user_ptr(args->data_ptr);
762 remain = args->size;
763
764 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
765
766 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
767 /* If we're not in the cpu write domain, set ourself into the gtt
768 * write domain and manually flush cachelines (if required). This
769 * optimizes for the case when the gpu will use the data
770 * right away and we therefore have to clflush anyway. */
771 needs_clflush_after = cpu_write_needs_clflush(obj);
772 ret = i915_gem_object_wait_rendering(obj, false);
773 if (ret)
774 return ret;
775
776 i915_gem_object_retire(obj);
777 }
778 /* Same trick applies to invalidate partially written cachelines read
779 * before writing. */
780 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
781 needs_clflush_before =
782 !cpu_cache_is_coherent(dev, obj->cache_level);
783
784 ret = i915_gem_object_get_pages(obj);
785 if (ret)
786 return ret;
787
788 i915_gem_object_pin_pages(obj);
789
790 offset = args->offset;
791 obj->dirty = 1;
792
793 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
794 offset >> PAGE_SHIFT) {
795 struct page *page = sg_page_iter_page(&sg_iter);
796 int partial_cacheline_write;
797
798 if (remain <= 0)
799 break;
800
801 /* Operation in this page
802 *
803 * shmem_page_offset = offset within page in shmem file
804 * page_length = bytes to copy for this page
805 */
806 shmem_page_offset = offset_in_page(offset);
807
808 page_length = remain;
809 if ((shmem_page_offset + page_length) > PAGE_SIZE)
810 page_length = PAGE_SIZE - shmem_page_offset;
811
812 /* If we don't overwrite a cacheline completely we need to be
813 * careful to have up-to-date data by first clflushing. Don't
814 * overcomplicate things and flush the entire patch. */
815 partial_cacheline_write = needs_clflush_before &&
816 ((shmem_page_offset | page_length)
817 & (boot_cpu_data.x86_clflush_size - 1));
818
819 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
820 (page_to_phys(page) & (1 << 17)) != 0;
821
822 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
823 user_data, page_do_bit17_swizzling,
824 partial_cacheline_write,
825 needs_clflush_after);
826 if (ret == 0)
827 goto next_page;
828
829 hit_slowpath = 1;
830 mutex_unlock(&dev->struct_mutex);
831 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
832 user_data, page_do_bit17_swizzling,
833 partial_cacheline_write,
834 needs_clflush_after);
835
836 mutex_lock(&dev->struct_mutex);
837
838 if (ret)
839 goto out;
840
841 next_page:
842 remain -= page_length;
843 user_data += page_length;
844 offset += page_length;
845 }
846
847 out:
848 i915_gem_object_unpin_pages(obj);
849
850 if (hit_slowpath) {
851 /*
852 * Fixup: Flush cpu caches in case we didn't flush the dirty
853 * cachelines in-line while writing and the object moved
854 * out of the cpu write domain while we've dropped the lock.
855 */
856 if (!needs_clflush_after &&
857 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
858 if (i915_gem_clflush_object(obj, obj->pin_display))
859 i915_gem_chipset_flush(dev);
860 }
861 }
862
863 if (needs_clflush_after)
864 i915_gem_chipset_flush(dev);
865
866 return ret;
867 }
868
869 /**
870 * Writes data to the object referenced by handle.
871 *
872 * On error, the contents of the buffer that were to be modified are undefined.
873 */
874 int
875 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
876 struct drm_file *file)
877 {
878 struct drm_i915_gem_pwrite *args = data;
879 struct drm_i915_gem_object *obj;
880 int ret;
881
882 if (args->size == 0)
883 return 0;
884
885 if (!access_ok(VERIFY_READ,
886 to_user_ptr(args->data_ptr),
887 args->size))
888 return -EFAULT;
889
890 if (likely(!i915.prefault_disable)) {
891 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
892 args->size);
893 if (ret)
894 return -EFAULT;
895 }
896
897 ret = i915_mutex_lock_interruptible(dev);
898 if (ret)
899 return ret;
900
901 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
902 if (&obj->base == NULL) {
903 ret = -ENOENT;
904 goto unlock;
905 }
906
907 /* Bounds check destination. */
908 if (args->offset > obj->base.size ||
909 args->size > obj->base.size - args->offset) {
910 ret = -EINVAL;
911 goto out;
912 }
913
914 /* prime objects have no backing filp to GEM pread/pwrite
915 * pages from.
916 */
917 if (!obj->base.filp) {
918 ret = -EINVAL;
919 goto out;
920 }
921
922 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
923
924 ret = -EFAULT;
925 /* We can only do the GTT pwrite on untiled buffers, as otherwise
926 * it would end up going through the fenced access, and we'll get
927 * different detiling behavior between reading and writing.
928 * pread/pwrite currently are reading and writing from the CPU
929 * perspective, requiring manual detiling by the client.
930 */
931 if (obj->phys_obj) {
932 ret = i915_gem_phys_pwrite(dev, obj, args, file);
933 goto out;
934 }
935
936 if (obj->tiling_mode == I915_TILING_NONE &&
937 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
938 cpu_write_needs_clflush(obj)) {
939 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
940 /* Note that the gtt paths might fail with non-page-backed user
941 * pointers (e.g. gtt mappings when moving data between
942 * textures). Fallback to the shmem path in that case. */
943 }
944
945 if (ret == -EFAULT || ret == -ENOSPC)
946 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
947
948 out:
949 drm_gem_object_unreference(&obj->base);
950 unlock:
951 mutex_unlock(&dev->struct_mutex);
952 return ret;
953 }
954
955 int
956 i915_gem_check_wedge(struct i915_gpu_error *error,
957 bool interruptible)
958 {
959 if (i915_reset_in_progress(error)) {
960 /* Non-interruptible callers can't handle -EAGAIN, hence return
961 * -EIO unconditionally for these. */
962 if (!interruptible)
963 return -EIO;
964
965 /* Recovery complete, but the reset failed ... */
966 if (i915_terminally_wedged(error))
967 return -EIO;
968
969 return -EAGAIN;
970 }
971
972 return 0;
973 }
974
975 /*
976 * Compare seqno against outstanding lazy request. Emit a request if they are
977 * equal.
978 */
979 static int
980 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
981 {
982 int ret;
983
984 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
985
986 ret = 0;
987 if (seqno == ring->outstanding_lazy_seqno)
988 ret = i915_add_request(ring, NULL);
989
990 return ret;
991 }
992
993 static void fake_irq(unsigned long data)
994 {
995 wake_up_process((struct task_struct *)data);
996 }
997
998 static bool missed_irq(struct drm_i915_private *dev_priv,
999 struct intel_ring_buffer *ring)
1000 {
1001 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1002 }
1003
1004 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1005 {
1006 if (file_priv == NULL)
1007 return true;
1008
1009 return !atomic_xchg(&file_priv->rps_wait_boost, true);
1010 }
1011
1012 /**
1013 * __wait_seqno - wait until execution of seqno has finished
1014 * @ring: the ring expected to report seqno
1015 * @seqno: duh!
1016 * @reset_counter: reset sequence associated with the given seqno
1017 * @interruptible: do an interruptible wait (normally yes)
1018 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1019 *
1020 * Note: It is of utmost importance that the passed in seqno and reset_counter
1021 * values have been read by the caller in an smp safe manner. Where read-side
1022 * locks are involved, it is sufficient to read the reset_counter before
1023 * unlocking the lock that protects the seqno. For lockless tricks, the
1024 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1025 * inserted.
1026 *
1027 * Returns 0 if the seqno was found within the alloted time. Else returns the
1028 * errno with remaining time filled in timeout argument.
1029 */
1030 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1031 unsigned reset_counter,
1032 bool interruptible,
1033 struct timespec *timeout,
1034 struct drm_i915_file_private *file_priv)
1035 {
1036 struct drm_device *dev = ring->dev;
1037 struct drm_i915_private *dev_priv = dev->dev_private;
1038 const bool irq_test_in_progress =
1039 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1040 struct timespec before, now;
1041 DEFINE_WAIT(wait);
1042 unsigned long timeout_expire;
1043 int ret;
1044
1045 WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
1046
1047 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1048 return 0;
1049
1050 timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
1051
1052 if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
1053 gen6_rps_boost(dev_priv);
1054 if (file_priv)
1055 mod_delayed_work(dev_priv->wq,
1056 &file_priv->mm.idle_work,
1057 msecs_to_jiffies(100));
1058 }
1059
1060 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1061 return -ENODEV;
1062
1063 /* Record current time in case interrupted by signal, or wedged */
1064 trace_i915_gem_request_wait_begin(ring, seqno);
1065 getrawmonotonic(&before);
1066 for (;;) {
1067 struct timer_list timer;
1068
1069 prepare_to_wait(&ring->irq_queue, &wait,
1070 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1071
1072 /* We need to check whether any gpu reset happened in between
1073 * the caller grabbing the seqno and now ... */
1074 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1075 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1076 * is truely gone. */
1077 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1078 if (ret == 0)
1079 ret = -EAGAIN;
1080 break;
1081 }
1082
1083 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1084 ret = 0;
1085 break;
1086 }
1087
1088 if (interruptible && signal_pending(current)) {
1089 ret = -ERESTARTSYS;
1090 break;
1091 }
1092
1093 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1094 ret = -ETIME;
1095 break;
1096 }
1097
1098 timer.function = NULL;
1099 if (timeout || missed_irq(dev_priv, ring)) {
1100 unsigned long expire;
1101
1102 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1103 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1104 mod_timer(&timer, expire);
1105 }
1106
1107 io_schedule();
1108
1109 if (timer.function) {
1110 del_singleshot_timer_sync(&timer);
1111 destroy_timer_on_stack(&timer);
1112 }
1113 }
1114 getrawmonotonic(&now);
1115 trace_i915_gem_request_wait_end(ring, seqno);
1116
1117 if (!irq_test_in_progress)
1118 ring->irq_put(ring);
1119
1120 finish_wait(&ring->irq_queue, &wait);
1121
1122 if (timeout) {
1123 struct timespec sleep_time = timespec_sub(now, before);
1124 *timeout = timespec_sub(*timeout, sleep_time);
1125 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1126 set_normalized_timespec(timeout, 0, 0);
1127 }
1128
1129 return ret;
1130 }
1131
1132 /**
1133 * Waits for a sequence number to be signaled, and cleans up the
1134 * request and object lists appropriately for that event.
1135 */
1136 int
1137 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1138 {
1139 struct drm_device *dev = ring->dev;
1140 struct drm_i915_private *dev_priv = dev->dev_private;
1141 bool interruptible = dev_priv->mm.interruptible;
1142 int ret;
1143
1144 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1145 BUG_ON(seqno == 0);
1146
1147 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1148 if (ret)
1149 return ret;
1150
1151 ret = i915_gem_check_olr(ring, seqno);
1152 if (ret)
1153 return ret;
1154
1155 return __wait_seqno(ring, seqno,
1156 atomic_read(&dev_priv->gpu_error.reset_counter),
1157 interruptible, NULL, NULL);
1158 }
1159
1160 static int
1161 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1162 struct intel_ring_buffer *ring)
1163 {
1164 if (!obj->active)
1165 return 0;
1166
1167 /* Manually manage the write flush as we may have not yet
1168 * retired the buffer.
1169 *
1170 * Note that the last_write_seqno is always the earlier of
1171 * the two (read/write) seqno, so if we haved successfully waited,
1172 * we know we have passed the last write.
1173 */
1174 obj->last_write_seqno = 0;
1175
1176 return 0;
1177 }
1178
1179 /**
1180 * Ensures that all rendering to the object has completed and the object is
1181 * safe to unbind from the GTT or access from the CPU.
1182 */
1183 static __must_check int
1184 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1185 bool readonly)
1186 {
1187 struct intel_ring_buffer *ring = obj->ring;
1188 u32 seqno;
1189 int ret;
1190
1191 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1192 if (seqno == 0)
1193 return 0;
1194
1195 ret = i915_wait_seqno(ring, seqno);
1196 if (ret)
1197 return ret;
1198
1199 return i915_gem_object_wait_rendering__tail(obj, ring);
1200 }
1201
1202 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1203 * as the object state may change during this call.
1204 */
1205 static __must_check int
1206 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1207 struct drm_i915_file_private *file_priv,
1208 bool readonly)
1209 {
1210 struct drm_device *dev = obj->base.dev;
1211 struct drm_i915_private *dev_priv = dev->dev_private;
1212 struct intel_ring_buffer *ring = obj->ring;
1213 unsigned reset_counter;
1214 u32 seqno;
1215 int ret;
1216
1217 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1218 BUG_ON(!dev_priv->mm.interruptible);
1219
1220 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1221 if (seqno == 0)
1222 return 0;
1223
1224 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1225 if (ret)
1226 return ret;
1227
1228 ret = i915_gem_check_olr(ring, seqno);
1229 if (ret)
1230 return ret;
1231
1232 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1233 mutex_unlock(&dev->struct_mutex);
1234 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
1235 mutex_lock(&dev->struct_mutex);
1236 if (ret)
1237 return ret;
1238
1239 return i915_gem_object_wait_rendering__tail(obj, ring);
1240 }
1241
1242 /**
1243 * Called when user space prepares to use an object with the CPU, either
1244 * through the mmap ioctl's mapping or a GTT mapping.
1245 */
1246 int
1247 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1248 struct drm_file *file)
1249 {
1250 struct drm_i915_gem_set_domain *args = data;
1251 struct drm_i915_gem_object *obj;
1252 uint32_t read_domains = args->read_domains;
1253 uint32_t write_domain = args->write_domain;
1254 int ret;
1255
1256 /* Only handle setting domains to types used by the CPU. */
1257 if (write_domain & I915_GEM_GPU_DOMAINS)
1258 return -EINVAL;
1259
1260 if (read_domains & I915_GEM_GPU_DOMAINS)
1261 return -EINVAL;
1262
1263 /* Having something in the write domain implies it's in the read
1264 * domain, and only that read domain. Enforce that in the request.
1265 */
1266 if (write_domain != 0 && read_domains != write_domain)
1267 return -EINVAL;
1268
1269 ret = i915_mutex_lock_interruptible(dev);
1270 if (ret)
1271 return ret;
1272
1273 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1274 if (&obj->base == NULL) {
1275 ret = -ENOENT;
1276 goto unlock;
1277 }
1278
1279 /* Try to flush the object off the GPU without holding the lock.
1280 * We will repeat the flush holding the lock in the normal manner
1281 * to catch cases where we are gazumped.
1282 */
1283 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1284 file->driver_priv,
1285 !write_domain);
1286 if (ret)
1287 goto unref;
1288
1289 if (read_domains & I915_GEM_DOMAIN_GTT) {
1290 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1291
1292 /* Silently promote "you're not bound, there was nothing to do"
1293 * to success, since the client was just asking us to
1294 * make sure everything was done.
1295 */
1296 if (ret == -EINVAL)
1297 ret = 0;
1298 } else {
1299 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1300 }
1301
1302 unref:
1303 drm_gem_object_unreference(&obj->base);
1304 unlock:
1305 mutex_unlock(&dev->struct_mutex);
1306 return ret;
1307 }
1308
1309 /**
1310 * Called when user space has done writes to this buffer
1311 */
1312 int
1313 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1314 struct drm_file *file)
1315 {
1316 struct drm_i915_gem_sw_finish *args = data;
1317 struct drm_i915_gem_object *obj;
1318 int ret = 0;
1319
1320 ret = i915_mutex_lock_interruptible(dev);
1321 if (ret)
1322 return ret;
1323
1324 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1325 if (&obj->base == NULL) {
1326 ret = -ENOENT;
1327 goto unlock;
1328 }
1329
1330 /* Pinned buffers may be scanout, so flush the cache */
1331 if (obj->pin_display)
1332 i915_gem_object_flush_cpu_write_domain(obj, true);
1333
1334 drm_gem_object_unreference(&obj->base);
1335 unlock:
1336 mutex_unlock(&dev->struct_mutex);
1337 return ret;
1338 }
1339
1340 /**
1341 * Maps the contents of an object, returning the address it is mapped
1342 * into.
1343 *
1344 * While the mapping holds a reference on the contents of the object, it doesn't
1345 * imply a ref on the object itself.
1346 */
1347 int
1348 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1349 struct drm_file *file)
1350 {
1351 struct drm_i915_gem_mmap *args = data;
1352 struct drm_gem_object *obj;
1353 unsigned long addr;
1354
1355 obj = drm_gem_object_lookup(dev, file, args->handle);
1356 if (obj == NULL)
1357 return -ENOENT;
1358
1359 /* prime objects have no backing filp to GEM mmap
1360 * pages from.
1361 */
1362 if (!obj->filp) {
1363 drm_gem_object_unreference_unlocked(obj);
1364 return -EINVAL;
1365 }
1366
1367 addr = vm_mmap(obj->filp, 0, args->size,
1368 PROT_READ | PROT_WRITE, MAP_SHARED,
1369 args->offset);
1370 drm_gem_object_unreference_unlocked(obj);
1371 if (IS_ERR((void *)addr))
1372 return addr;
1373
1374 args->addr_ptr = (uint64_t) addr;
1375
1376 return 0;
1377 }
1378
1379 /**
1380 * i915_gem_fault - fault a page into the GTT
1381 * vma: VMA in question
1382 * vmf: fault info
1383 *
1384 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1385 * from userspace. The fault handler takes care of binding the object to
1386 * the GTT (if needed), allocating and programming a fence register (again,
1387 * only if needed based on whether the old reg is still valid or the object
1388 * is tiled) and inserting a new PTE into the faulting process.
1389 *
1390 * Note that the faulting process may involve evicting existing objects
1391 * from the GTT and/or fence registers to make room. So performance may
1392 * suffer if the GTT working set is large or there are few fence registers
1393 * left.
1394 */
1395 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1396 {
1397 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1398 struct drm_device *dev = obj->base.dev;
1399 struct drm_i915_private *dev_priv = dev->dev_private;
1400 pgoff_t page_offset;
1401 unsigned long pfn;
1402 int ret = 0;
1403 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1404
1405 intel_runtime_pm_get(dev_priv);
1406
1407 /* We don't use vmf->pgoff since that has the fake offset */
1408 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1409 PAGE_SHIFT;
1410
1411 ret = i915_mutex_lock_interruptible(dev);
1412 if (ret)
1413 goto out;
1414
1415 trace_i915_gem_object_fault(obj, page_offset, true, write);
1416
1417 /* Try to flush the object off the GPU first without holding the lock.
1418 * Upon reacquiring the lock, we will perform our sanity checks and then
1419 * repeat the flush holding the lock in the normal manner to catch cases
1420 * where we are gazumped.
1421 */
1422 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1423 if (ret)
1424 goto unlock;
1425
1426 /* Access to snoopable pages through the GTT is incoherent. */
1427 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1428 ret = -EINVAL;
1429 goto unlock;
1430 }
1431
1432 /* Now bind it into the GTT if needed */
1433 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1434 if (ret)
1435 goto unlock;
1436
1437 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1438 if (ret)
1439 goto unpin;
1440
1441 ret = i915_gem_object_get_fence(obj);
1442 if (ret)
1443 goto unpin;
1444
1445 obj->fault_mappable = true;
1446
1447 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1448 pfn >>= PAGE_SHIFT;
1449 pfn += page_offset;
1450
1451 /* Finally, remap it using the new GTT offset */
1452 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1453 unpin:
1454 i915_gem_object_ggtt_unpin(obj);
1455 unlock:
1456 mutex_unlock(&dev->struct_mutex);
1457 out:
1458 switch (ret) {
1459 case -EIO:
1460 /* If this -EIO is due to a gpu hang, give the reset code a
1461 * chance to clean up the mess. Otherwise return the proper
1462 * SIGBUS. */
1463 if (i915_terminally_wedged(&dev_priv->gpu_error)) {
1464 ret = VM_FAULT_SIGBUS;
1465 break;
1466 }
1467 case -EAGAIN:
1468 /*
1469 * EAGAIN means the gpu is hung and we'll wait for the error
1470 * handler to reset everything when re-faulting in
1471 * i915_mutex_lock_interruptible.
1472 */
1473 case 0:
1474 case -ERESTARTSYS:
1475 case -EINTR:
1476 case -EBUSY:
1477 /*
1478 * EBUSY is ok: this just means that another thread
1479 * already did the job.
1480 */
1481 ret = VM_FAULT_NOPAGE;
1482 break;
1483 case -ENOMEM:
1484 ret = VM_FAULT_OOM;
1485 break;
1486 case -ENOSPC:
1487 case -EFAULT:
1488 ret = VM_FAULT_SIGBUS;
1489 break;
1490 default:
1491 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1492 ret = VM_FAULT_SIGBUS;
1493 break;
1494 }
1495
1496 intel_runtime_pm_put(dev_priv);
1497 return ret;
1498 }
1499
1500 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1501 {
1502 struct i915_vma *vma;
1503
1504 /*
1505 * Only the global gtt is relevant for gtt memory mappings, so restrict
1506 * list traversal to objects bound into the global address space. Note
1507 * that the active list should be empty, but better safe than sorry.
1508 */
1509 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1510 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1511 i915_gem_release_mmap(vma->obj);
1512 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1513 i915_gem_release_mmap(vma->obj);
1514 }
1515
1516 /**
1517 * i915_gem_release_mmap - remove physical page mappings
1518 * @obj: obj in question
1519 *
1520 * Preserve the reservation of the mmapping with the DRM core code, but
1521 * relinquish ownership of the pages back to the system.
1522 *
1523 * It is vital that we remove the page mapping if we have mapped a tiled
1524 * object through the GTT and then lose the fence register due to
1525 * resource pressure. Similarly if the object has been moved out of the
1526 * aperture, than pages mapped into userspace must be revoked. Removing the
1527 * mapping will then trigger a page fault on the next user access, allowing
1528 * fixup by i915_gem_fault().
1529 */
1530 void
1531 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1532 {
1533 if (!obj->fault_mappable)
1534 return;
1535
1536 drm_vma_node_unmap(&obj->base.vma_node,
1537 obj->base.dev->anon_inode->i_mapping);
1538 obj->fault_mappable = false;
1539 }
1540
1541 uint32_t
1542 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1543 {
1544 uint32_t gtt_size;
1545
1546 if (INTEL_INFO(dev)->gen >= 4 ||
1547 tiling_mode == I915_TILING_NONE)
1548 return size;
1549
1550 /* Previous chips need a power-of-two fence region when tiling */
1551 if (INTEL_INFO(dev)->gen == 3)
1552 gtt_size = 1024*1024;
1553 else
1554 gtt_size = 512*1024;
1555
1556 while (gtt_size < size)
1557 gtt_size <<= 1;
1558
1559 return gtt_size;
1560 }
1561
1562 /**
1563 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1564 * @obj: object to check
1565 *
1566 * Return the required GTT alignment for an object, taking into account
1567 * potential fence register mapping.
1568 */
1569 uint32_t
1570 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1571 int tiling_mode, bool fenced)
1572 {
1573 /*
1574 * Minimum alignment is 4k (GTT page size), but might be greater
1575 * if a fence register is needed for the object.
1576 */
1577 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1578 tiling_mode == I915_TILING_NONE)
1579 return 4096;
1580
1581 /*
1582 * Previous chips need to be aligned to the size of the smallest
1583 * fence register that can contain the object.
1584 */
1585 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1586 }
1587
1588 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1589 {
1590 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1591 int ret;
1592
1593 if (drm_vma_node_has_offset(&obj->base.vma_node))
1594 return 0;
1595
1596 dev_priv->mm.shrinker_no_lock_stealing = true;
1597
1598 ret = drm_gem_create_mmap_offset(&obj->base);
1599 if (ret != -ENOSPC)
1600 goto out;
1601
1602 /* Badly fragmented mmap space? The only way we can recover
1603 * space is by destroying unwanted objects. We can't randomly release
1604 * mmap_offsets as userspace expects them to be persistent for the
1605 * lifetime of the objects. The closest we can is to release the
1606 * offsets on purgeable objects by truncating it and marking it purged,
1607 * which prevents userspace from ever using that object again.
1608 */
1609 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1610 ret = drm_gem_create_mmap_offset(&obj->base);
1611 if (ret != -ENOSPC)
1612 goto out;
1613
1614 i915_gem_shrink_all(dev_priv);
1615 ret = drm_gem_create_mmap_offset(&obj->base);
1616 out:
1617 dev_priv->mm.shrinker_no_lock_stealing = false;
1618
1619 return ret;
1620 }
1621
1622 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1623 {
1624 drm_gem_free_mmap_offset(&obj->base);
1625 }
1626
1627 int
1628 i915_gem_mmap_gtt(struct drm_file *file,
1629 struct drm_device *dev,
1630 uint32_t handle,
1631 uint64_t *offset)
1632 {
1633 struct drm_i915_private *dev_priv = dev->dev_private;
1634 struct drm_i915_gem_object *obj;
1635 int ret;
1636
1637 ret = i915_mutex_lock_interruptible(dev);
1638 if (ret)
1639 return ret;
1640
1641 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1642 if (&obj->base == NULL) {
1643 ret = -ENOENT;
1644 goto unlock;
1645 }
1646
1647 if (obj->base.size > dev_priv->gtt.mappable_end) {
1648 ret = -E2BIG;
1649 goto out;
1650 }
1651
1652 if (obj->madv != I915_MADV_WILLNEED) {
1653 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1654 ret = -EFAULT;
1655 goto out;
1656 }
1657
1658 ret = i915_gem_object_create_mmap_offset(obj);
1659 if (ret)
1660 goto out;
1661
1662 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1663
1664 out:
1665 drm_gem_object_unreference(&obj->base);
1666 unlock:
1667 mutex_unlock(&dev->struct_mutex);
1668 return ret;
1669 }
1670
1671 /**
1672 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1673 * @dev: DRM device
1674 * @data: GTT mapping ioctl data
1675 * @file: GEM object info
1676 *
1677 * Simply returns the fake offset to userspace so it can mmap it.
1678 * The mmap call will end up in drm_gem_mmap(), which will set things
1679 * up so we can get faults in the handler above.
1680 *
1681 * The fault handler will take care of binding the object into the GTT
1682 * (since it may have been evicted to make room for something), allocating
1683 * a fence register, and mapping the appropriate aperture address into
1684 * userspace.
1685 */
1686 int
1687 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1688 struct drm_file *file)
1689 {
1690 struct drm_i915_gem_mmap_gtt *args = data;
1691
1692 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1693 }
1694
1695 /* Immediately discard the backing storage */
1696 static void
1697 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1698 {
1699 struct inode *inode;
1700
1701 i915_gem_object_free_mmap_offset(obj);
1702
1703 if (obj->base.filp == NULL)
1704 return;
1705
1706 /* Our goal here is to return as much of the memory as
1707 * is possible back to the system as we are called from OOM.
1708 * To do this we must instruct the shmfs to drop all of its
1709 * backing pages, *now*.
1710 */
1711 inode = file_inode(obj->base.filp);
1712 shmem_truncate_range(inode, 0, (loff_t)-1);
1713
1714 obj->madv = __I915_MADV_PURGED;
1715 }
1716
1717 static inline int
1718 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1719 {
1720 return obj->madv == I915_MADV_DONTNEED;
1721 }
1722
1723 static void
1724 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1725 {
1726 struct sg_page_iter sg_iter;
1727 int ret;
1728
1729 BUG_ON(obj->madv == __I915_MADV_PURGED);
1730
1731 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1732 if (ret) {
1733 /* In the event of a disaster, abandon all caches and
1734 * hope for the best.
1735 */
1736 WARN_ON(ret != -EIO);
1737 i915_gem_clflush_object(obj, true);
1738 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1739 }
1740
1741 if (i915_gem_object_needs_bit17_swizzle(obj))
1742 i915_gem_object_save_bit_17_swizzle(obj);
1743
1744 if (obj->madv == I915_MADV_DONTNEED)
1745 obj->dirty = 0;
1746
1747 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1748 struct page *page = sg_page_iter_page(&sg_iter);
1749
1750 if (obj->dirty)
1751 set_page_dirty(page);
1752
1753 if (obj->madv == I915_MADV_WILLNEED)
1754 mark_page_accessed(page);
1755
1756 page_cache_release(page);
1757 }
1758 obj->dirty = 0;
1759
1760 sg_free_table(obj->pages);
1761 kfree(obj->pages);
1762 }
1763
1764 int
1765 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1766 {
1767 const struct drm_i915_gem_object_ops *ops = obj->ops;
1768
1769 if (obj->pages == NULL)
1770 return 0;
1771
1772 if (obj->pages_pin_count)
1773 return -EBUSY;
1774
1775 BUG_ON(i915_gem_obj_bound_any(obj));
1776
1777 /* ->put_pages might need to allocate memory for the bit17 swizzle
1778 * array, hence protect them from being reaped by removing them from gtt
1779 * lists early. */
1780 list_del(&obj->global_list);
1781
1782 ops->put_pages(obj);
1783 obj->pages = NULL;
1784
1785 if (i915_gem_object_is_purgeable(obj))
1786 i915_gem_object_truncate(obj);
1787
1788 return 0;
1789 }
1790
1791 static unsigned long
1792 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1793 bool purgeable_only)
1794 {
1795 struct list_head still_in_list;
1796 struct drm_i915_gem_object *obj;
1797 unsigned long count = 0;
1798
1799 /*
1800 * As we may completely rewrite the (un)bound list whilst unbinding
1801 * (due to retiring requests) we have to strictly process only
1802 * one element of the list at the time, and recheck the list
1803 * on every iteration.
1804 *
1805 * In particular, we must hold a reference whilst removing the
1806 * object as we may end up waiting for and/or retiring the objects.
1807 * This might release the final reference (held by the active list)
1808 * and result in the object being freed from under us. This is
1809 * similar to the precautions the eviction code must take whilst
1810 * removing objects.
1811 *
1812 * Also note that although these lists do not hold a reference to
1813 * the object we can safely grab one here: The final object
1814 * unreferencing and the bound_list are both protected by the
1815 * dev->struct_mutex and so we won't ever be able to observe an
1816 * object on the bound_list with a reference count equals 0.
1817 */
1818 INIT_LIST_HEAD(&still_in_list);
1819 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1820 obj = list_first_entry(&dev_priv->mm.unbound_list,
1821 typeof(*obj), global_list);
1822 list_move_tail(&obj->global_list, &still_in_list);
1823
1824 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1825 continue;
1826
1827 drm_gem_object_reference(&obj->base);
1828
1829 if (i915_gem_object_put_pages(obj) == 0)
1830 count += obj->base.size >> PAGE_SHIFT;
1831
1832 drm_gem_object_unreference(&obj->base);
1833 }
1834 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1835
1836 INIT_LIST_HEAD(&still_in_list);
1837 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1838 struct i915_vma *vma, *v;
1839
1840 obj = list_first_entry(&dev_priv->mm.bound_list,
1841 typeof(*obj), global_list);
1842 list_move_tail(&obj->global_list, &still_in_list);
1843
1844 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1845 continue;
1846
1847 drm_gem_object_reference(&obj->base);
1848
1849 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1850 if (i915_vma_unbind(vma))
1851 break;
1852
1853 if (i915_gem_object_put_pages(obj) == 0)
1854 count += obj->base.size >> PAGE_SHIFT;
1855
1856 drm_gem_object_unreference(&obj->base);
1857 }
1858 list_splice(&still_in_list, &dev_priv->mm.bound_list);
1859
1860 return count;
1861 }
1862
1863 static unsigned long
1864 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1865 {
1866 return __i915_gem_shrink(dev_priv, target, true);
1867 }
1868
1869 static unsigned long
1870 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1871 {
1872 i915_gem_evict_everything(dev_priv->dev);
1873 return __i915_gem_shrink(dev_priv, LONG_MAX, false);
1874 }
1875
1876 static int
1877 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1878 {
1879 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1880 int page_count, i;
1881 struct address_space *mapping;
1882 struct sg_table *st;
1883 struct scatterlist *sg;
1884 struct sg_page_iter sg_iter;
1885 struct page *page;
1886 unsigned long last_pfn = 0; /* suppress gcc warning */
1887 gfp_t gfp;
1888
1889 /* Assert that the object is not currently in any GPU domain. As it
1890 * wasn't in the GTT, there shouldn't be any way it could have been in
1891 * a GPU cache
1892 */
1893 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1894 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1895
1896 st = kmalloc(sizeof(*st), GFP_KERNEL);
1897 if (st == NULL)
1898 return -ENOMEM;
1899
1900 page_count = obj->base.size / PAGE_SIZE;
1901 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1902 kfree(st);
1903 return -ENOMEM;
1904 }
1905
1906 /* Get the list of pages out of our struct file. They'll be pinned
1907 * at this point until we release them.
1908 *
1909 * Fail silently without starting the shrinker
1910 */
1911 mapping = file_inode(obj->base.filp)->i_mapping;
1912 gfp = mapping_gfp_mask(mapping);
1913 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1914 gfp &= ~(__GFP_IO | __GFP_WAIT);
1915 sg = st->sgl;
1916 st->nents = 0;
1917 for (i = 0; i < page_count; i++) {
1918 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1919 if (IS_ERR(page)) {
1920 i915_gem_purge(dev_priv, page_count);
1921 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1922 }
1923 if (IS_ERR(page)) {
1924 /* We've tried hard to allocate the memory by reaping
1925 * our own buffer, now let the real VM do its job and
1926 * go down in flames if truly OOM.
1927 */
1928 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1929 gfp |= __GFP_IO | __GFP_WAIT;
1930
1931 i915_gem_shrink_all(dev_priv);
1932 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1933 if (IS_ERR(page))
1934 goto err_pages;
1935
1936 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1937 gfp &= ~(__GFP_IO | __GFP_WAIT);
1938 }
1939 #ifdef CONFIG_SWIOTLB
1940 if (swiotlb_nr_tbl()) {
1941 st->nents++;
1942 sg_set_page(sg, page, PAGE_SIZE, 0);
1943 sg = sg_next(sg);
1944 continue;
1945 }
1946 #endif
1947 if (!i || page_to_pfn(page) != last_pfn + 1) {
1948 if (i)
1949 sg = sg_next(sg);
1950 st->nents++;
1951 sg_set_page(sg, page, PAGE_SIZE, 0);
1952 } else {
1953 sg->length += PAGE_SIZE;
1954 }
1955 last_pfn = page_to_pfn(page);
1956
1957 /* Check that the i965g/gm workaround works. */
1958 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
1959 }
1960 #ifdef CONFIG_SWIOTLB
1961 if (!swiotlb_nr_tbl())
1962 #endif
1963 sg_mark_end(sg);
1964 obj->pages = st;
1965
1966 if (i915_gem_object_needs_bit17_swizzle(obj))
1967 i915_gem_object_do_bit_17_swizzle(obj);
1968
1969 return 0;
1970
1971 err_pages:
1972 sg_mark_end(sg);
1973 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1974 page_cache_release(sg_page_iter_page(&sg_iter));
1975 sg_free_table(st);
1976 kfree(st);
1977 return PTR_ERR(page);
1978 }
1979
1980 /* Ensure that the associated pages are gathered from the backing storage
1981 * and pinned into our object. i915_gem_object_get_pages() may be called
1982 * multiple times before they are released by a single call to
1983 * i915_gem_object_put_pages() - once the pages are no longer referenced
1984 * either as a result of memory pressure (reaping pages under the shrinker)
1985 * or as the object is itself released.
1986 */
1987 int
1988 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1989 {
1990 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1991 const struct drm_i915_gem_object_ops *ops = obj->ops;
1992 int ret;
1993
1994 if (obj->pages)
1995 return 0;
1996
1997 if (obj->madv != I915_MADV_WILLNEED) {
1998 DRM_DEBUG("Attempting to obtain a purgeable object\n");
1999 return -EFAULT;
2000 }
2001
2002 BUG_ON(obj->pages_pin_count);
2003
2004 ret = ops->get_pages(obj);
2005 if (ret)
2006 return ret;
2007
2008 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2009 return 0;
2010 }
2011
2012 static void
2013 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2014 struct intel_ring_buffer *ring)
2015 {
2016 struct drm_device *dev = obj->base.dev;
2017 struct drm_i915_private *dev_priv = dev->dev_private;
2018 u32 seqno = intel_ring_get_seqno(ring);
2019
2020 BUG_ON(ring == NULL);
2021 if (obj->ring != ring && obj->last_write_seqno) {
2022 /* Keep the seqno relative to the current ring */
2023 obj->last_write_seqno = seqno;
2024 }
2025 obj->ring = ring;
2026
2027 /* Add a reference if we're newly entering the active list. */
2028 if (!obj->active) {
2029 drm_gem_object_reference(&obj->base);
2030 obj->active = 1;
2031 }
2032
2033 list_move_tail(&obj->ring_list, &ring->active_list);
2034
2035 obj->last_read_seqno = seqno;
2036
2037 if (obj->fenced_gpu_access) {
2038 obj->last_fenced_seqno = seqno;
2039
2040 /* Bump MRU to take account of the delayed flush */
2041 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2042 struct drm_i915_fence_reg *reg;
2043
2044 reg = &dev_priv->fence_regs[obj->fence_reg];
2045 list_move_tail(&reg->lru_list,
2046 &dev_priv->mm.fence_list);
2047 }
2048 }
2049 }
2050
2051 void i915_vma_move_to_active(struct i915_vma *vma,
2052 struct intel_ring_buffer *ring)
2053 {
2054 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2055 return i915_gem_object_move_to_active(vma->obj, ring);
2056 }
2057
2058 static void
2059 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2060 {
2061 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2062 struct i915_address_space *vm;
2063 struct i915_vma *vma;
2064
2065 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2066 BUG_ON(!obj->active);
2067
2068 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2069 vma = i915_gem_obj_to_vma(obj, vm);
2070 if (vma && !list_empty(&vma->mm_list))
2071 list_move_tail(&vma->mm_list, &vm->inactive_list);
2072 }
2073
2074 list_del_init(&obj->ring_list);
2075 obj->ring = NULL;
2076
2077 obj->last_read_seqno = 0;
2078 obj->last_write_seqno = 0;
2079 obj->base.write_domain = 0;
2080
2081 obj->last_fenced_seqno = 0;
2082 obj->fenced_gpu_access = false;
2083
2084 obj->active = 0;
2085 drm_gem_object_unreference(&obj->base);
2086
2087 WARN_ON(i915_verify_lists(dev));
2088 }
2089
2090 static void
2091 i915_gem_object_retire(struct drm_i915_gem_object *obj)
2092 {
2093 struct intel_ring_buffer *ring = obj->ring;
2094
2095 if (ring == NULL)
2096 return;
2097
2098 if (i915_seqno_passed(ring->get_seqno(ring, true),
2099 obj->last_read_seqno))
2100 i915_gem_object_move_to_inactive(obj);
2101 }
2102
2103 static int
2104 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2105 {
2106 struct drm_i915_private *dev_priv = dev->dev_private;
2107 struct intel_ring_buffer *ring;
2108 int ret, i, j;
2109
2110 /* Carefully retire all requests without writing to the rings */
2111 for_each_ring(ring, dev_priv, i) {
2112 ret = intel_ring_idle(ring);
2113 if (ret)
2114 return ret;
2115 }
2116 i915_gem_retire_requests(dev);
2117
2118 /* Finally reset hw state */
2119 for_each_ring(ring, dev_priv, i) {
2120 intel_ring_init_seqno(ring, seqno);
2121
2122 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2123 ring->semaphore.sync_seqno[j] = 0;
2124 }
2125
2126 return 0;
2127 }
2128
2129 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2130 {
2131 struct drm_i915_private *dev_priv = dev->dev_private;
2132 int ret;
2133
2134 if (seqno == 0)
2135 return -EINVAL;
2136
2137 /* HWS page needs to be set less than what we
2138 * will inject to ring
2139 */
2140 ret = i915_gem_init_seqno(dev, seqno - 1);
2141 if (ret)
2142 return ret;
2143
2144 /* Carefully set the last_seqno value so that wrap
2145 * detection still works
2146 */
2147 dev_priv->next_seqno = seqno;
2148 dev_priv->last_seqno = seqno - 1;
2149 if (dev_priv->last_seqno == 0)
2150 dev_priv->last_seqno--;
2151
2152 return 0;
2153 }
2154
2155 int
2156 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2157 {
2158 struct drm_i915_private *dev_priv = dev->dev_private;
2159
2160 /* reserve 0 for non-seqno */
2161 if (dev_priv->next_seqno == 0) {
2162 int ret = i915_gem_init_seqno(dev, 0);
2163 if (ret)
2164 return ret;
2165
2166 dev_priv->next_seqno = 1;
2167 }
2168
2169 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2170 return 0;
2171 }
2172
2173 int __i915_add_request(struct intel_ring_buffer *ring,
2174 struct drm_file *file,
2175 struct drm_i915_gem_object *obj,
2176 u32 *out_seqno)
2177 {
2178 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2179 struct drm_i915_gem_request *request;
2180 u32 request_ring_position, request_start;
2181 int ret;
2182
2183 request_start = intel_ring_get_tail(ring);
2184 /*
2185 * Emit any outstanding flushes - execbuf can fail to emit the flush
2186 * after having emitted the batchbuffer command. Hence we need to fix
2187 * things up similar to emitting the lazy request. The difference here
2188 * is that the flush _must_ happen before the next request, no matter
2189 * what.
2190 */
2191 ret = intel_ring_flush_all_caches(ring);
2192 if (ret)
2193 return ret;
2194
2195 request = ring->preallocated_lazy_request;
2196 if (WARN_ON(request == NULL))
2197 return -ENOMEM;
2198
2199 /* Record the position of the start of the request so that
2200 * should we detect the updated seqno part-way through the
2201 * GPU processing the request, we never over-estimate the
2202 * position of the head.
2203 */
2204 request_ring_position = intel_ring_get_tail(ring);
2205
2206 ret = ring->add_request(ring);
2207 if (ret)
2208 return ret;
2209
2210 request->seqno = intel_ring_get_seqno(ring);
2211 request->ring = ring;
2212 request->head = request_start;
2213 request->tail = request_ring_position;
2214
2215 /* Whilst this request exists, batch_obj will be on the
2216 * active_list, and so will hold the active reference. Only when this
2217 * request is retired will the the batch_obj be moved onto the
2218 * inactive_list and lose its active reference. Hence we do not need
2219 * to explicitly hold another reference here.
2220 */
2221 request->batch_obj = obj;
2222
2223 /* Hold a reference to the current context so that we can inspect
2224 * it later in case a hangcheck error event fires.
2225 */
2226 request->ctx = ring->last_context;
2227 if (request->ctx)
2228 i915_gem_context_reference(request->ctx);
2229
2230 request->emitted_jiffies = jiffies;
2231 list_add_tail(&request->list, &ring->request_list);
2232 request->file_priv = NULL;
2233
2234 if (file) {
2235 struct drm_i915_file_private *file_priv = file->driver_priv;
2236
2237 spin_lock(&file_priv->mm.lock);
2238 request->file_priv = file_priv;
2239 list_add_tail(&request->client_list,
2240 &file_priv->mm.request_list);
2241 spin_unlock(&file_priv->mm.lock);
2242 }
2243
2244 trace_i915_gem_request_add(ring, request->seqno);
2245 ring->outstanding_lazy_seqno = 0;
2246 ring->preallocated_lazy_request = NULL;
2247
2248 if (!dev_priv->ums.mm_suspended) {
2249 i915_queue_hangcheck(ring->dev);
2250
2251 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2252 queue_delayed_work(dev_priv->wq,
2253 &dev_priv->mm.retire_work,
2254 round_jiffies_up_relative(HZ));
2255 intel_mark_busy(dev_priv->dev);
2256 }
2257
2258 if (out_seqno)
2259 *out_seqno = request->seqno;
2260 return 0;
2261 }
2262
2263 static inline void
2264 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2265 {
2266 struct drm_i915_file_private *file_priv = request->file_priv;
2267
2268 if (!file_priv)
2269 return;
2270
2271 spin_lock(&file_priv->mm.lock);
2272 list_del(&request->client_list);
2273 request->file_priv = NULL;
2274 spin_unlock(&file_priv->mm.lock);
2275 }
2276
2277 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2278 const struct i915_hw_context *ctx)
2279 {
2280 unsigned long elapsed;
2281
2282 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2283
2284 if (ctx->hang_stats.banned)
2285 return true;
2286
2287 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2288 if (!i915_gem_context_is_default(ctx)) {
2289 DRM_DEBUG("context hanging too fast, banning!\n");
2290 return true;
2291 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2292 if (i915_stop_ring_allow_warn(dev_priv))
2293 DRM_ERROR("gpu hanging too fast, banning!\n");
2294 return true;
2295 }
2296 }
2297
2298 return false;
2299 }
2300
2301 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2302 struct i915_hw_context *ctx,
2303 const bool guilty)
2304 {
2305 struct i915_ctx_hang_stats *hs;
2306
2307 if (WARN_ON(!ctx))
2308 return;
2309
2310 hs = &ctx->hang_stats;
2311
2312 if (guilty) {
2313 hs->banned = i915_context_is_banned(dev_priv, ctx);
2314 hs->batch_active++;
2315 hs->guilty_ts = get_seconds();
2316 } else {
2317 hs->batch_pending++;
2318 }
2319 }
2320
2321 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2322 {
2323 list_del(&request->list);
2324 i915_gem_request_remove_from_client(request);
2325
2326 if (request->ctx)
2327 i915_gem_context_unreference(request->ctx);
2328
2329 kfree(request);
2330 }
2331
2332 struct drm_i915_gem_request *
2333 i915_gem_find_active_request(struct intel_ring_buffer *ring)
2334 {
2335 struct drm_i915_gem_request *request;
2336 u32 completed_seqno;
2337
2338 completed_seqno = ring->get_seqno(ring, false);
2339
2340 list_for_each_entry(request, &ring->request_list, list) {
2341 if (i915_seqno_passed(completed_seqno, request->seqno))
2342 continue;
2343
2344 return request;
2345 }
2346
2347 return NULL;
2348 }
2349
2350 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2351 struct intel_ring_buffer *ring)
2352 {
2353 struct drm_i915_gem_request *request;
2354 bool ring_hung;
2355
2356 request = i915_gem_find_active_request(ring);
2357
2358 if (request == NULL)
2359 return;
2360
2361 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2362
2363 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2364
2365 list_for_each_entry_continue(request, &ring->request_list, list)
2366 i915_set_reset_status(dev_priv, request->ctx, false);
2367 }
2368
2369 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2370 struct intel_ring_buffer *ring)
2371 {
2372 while (!list_empty(&ring->active_list)) {
2373 struct drm_i915_gem_object *obj;
2374
2375 obj = list_first_entry(&ring->active_list,
2376 struct drm_i915_gem_object,
2377 ring_list);
2378
2379 i915_gem_object_move_to_inactive(obj);
2380 }
2381
2382 /*
2383 * We must free the requests after all the corresponding objects have
2384 * been moved off active lists. Which is the same order as the normal
2385 * retire_requests function does. This is important if object hold
2386 * implicit references on things like e.g. ppgtt address spaces through
2387 * the request.
2388 */
2389 while (!list_empty(&ring->request_list)) {
2390 struct drm_i915_gem_request *request;
2391
2392 request = list_first_entry(&ring->request_list,
2393 struct drm_i915_gem_request,
2394 list);
2395
2396 i915_gem_free_request(request);
2397 }
2398
2399 /* These may not have been flush before the reset, do so now */
2400 kfree(ring->preallocated_lazy_request);
2401 ring->preallocated_lazy_request = NULL;
2402 ring->outstanding_lazy_seqno = 0;
2403 }
2404
2405 void i915_gem_restore_fences(struct drm_device *dev)
2406 {
2407 struct drm_i915_private *dev_priv = dev->dev_private;
2408 int i;
2409
2410 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2411 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2412
2413 /*
2414 * Commit delayed tiling changes if we have an object still
2415 * attached to the fence, otherwise just clear the fence.
2416 */
2417 if (reg->obj) {
2418 i915_gem_object_update_fence(reg->obj, reg,
2419 reg->obj->tiling_mode);
2420 } else {
2421 i915_gem_write_fence(dev, i, NULL);
2422 }
2423 }
2424 }
2425
2426 void i915_gem_reset(struct drm_device *dev)
2427 {
2428 struct drm_i915_private *dev_priv = dev->dev_private;
2429 struct intel_ring_buffer *ring;
2430 int i;
2431
2432 /*
2433 * Before we free the objects from the requests, we need to inspect
2434 * them for finding the guilty party. As the requests only borrow
2435 * their reference to the objects, the inspection must be done first.
2436 */
2437 for_each_ring(ring, dev_priv, i)
2438 i915_gem_reset_ring_status(dev_priv, ring);
2439
2440 for_each_ring(ring, dev_priv, i)
2441 i915_gem_reset_ring_cleanup(dev_priv, ring);
2442
2443 i915_gem_context_reset(dev);
2444
2445 i915_gem_restore_fences(dev);
2446 }
2447
2448 /**
2449 * This function clears the request list as sequence numbers are passed.
2450 */
2451 static void
2452 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2453 {
2454 uint32_t seqno;
2455
2456 if (list_empty(&ring->request_list))
2457 return;
2458
2459 WARN_ON(i915_verify_lists(ring->dev));
2460
2461 seqno = ring->get_seqno(ring, true);
2462
2463 /* Move any buffers on the active list that are no longer referenced
2464 * by the ringbuffer to the flushing/inactive lists as appropriate,
2465 * before we free the context associated with the requests.
2466 */
2467 while (!list_empty(&ring->active_list)) {
2468 struct drm_i915_gem_object *obj;
2469
2470 obj = list_first_entry(&ring->active_list,
2471 struct drm_i915_gem_object,
2472 ring_list);
2473
2474 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2475 break;
2476
2477 i915_gem_object_move_to_inactive(obj);
2478 }
2479
2480
2481 while (!list_empty(&ring->request_list)) {
2482 struct drm_i915_gem_request *request;
2483
2484 request = list_first_entry(&ring->request_list,
2485 struct drm_i915_gem_request,
2486 list);
2487
2488 if (!i915_seqno_passed(seqno, request->seqno))
2489 break;
2490
2491 trace_i915_gem_request_retire(ring, request->seqno);
2492 /* We know the GPU must have read the request to have
2493 * sent us the seqno + interrupt, so use the position
2494 * of tail of the request to update the last known position
2495 * of the GPU head.
2496 */
2497 ring->last_retired_head = request->tail;
2498
2499 i915_gem_free_request(request);
2500 }
2501
2502 if (unlikely(ring->trace_irq_seqno &&
2503 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2504 ring->irq_put(ring);
2505 ring->trace_irq_seqno = 0;
2506 }
2507
2508 WARN_ON(i915_verify_lists(ring->dev));
2509 }
2510
2511 bool
2512 i915_gem_retire_requests(struct drm_device *dev)
2513 {
2514 struct drm_i915_private *dev_priv = dev->dev_private;
2515 struct intel_ring_buffer *ring;
2516 bool idle = true;
2517 int i;
2518
2519 for_each_ring(ring, dev_priv, i) {
2520 i915_gem_retire_requests_ring(ring);
2521 idle &= list_empty(&ring->request_list);
2522 }
2523
2524 if (idle)
2525 mod_delayed_work(dev_priv->wq,
2526 &dev_priv->mm.idle_work,
2527 msecs_to_jiffies(100));
2528
2529 return idle;
2530 }
2531
2532 static void
2533 i915_gem_retire_work_handler(struct work_struct *work)
2534 {
2535 struct drm_i915_private *dev_priv =
2536 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2537 struct drm_device *dev = dev_priv->dev;
2538 bool idle;
2539
2540 /* Come back later if the device is busy... */
2541 idle = false;
2542 if (mutex_trylock(&dev->struct_mutex)) {
2543 idle = i915_gem_retire_requests(dev);
2544 mutex_unlock(&dev->struct_mutex);
2545 }
2546 if (!idle)
2547 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2548 round_jiffies_up_relative(HZ));
2549 }
2550
2551 static void
2552 i915_gem_idle_work_handler(struct work_struct *work)
2553 {
2554 struct drm_i915_private *dev_priv =
2555 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2556
2557 intel_mark_idle(dev_priv->dev);
2558 }
2559
2560 /**
2561 * Ensures that an object will eventually get non-busy by flushing any required
2562 * write domains, emitting any outstanding lazy request and retiring and
2563 * completed requests.
2564 */
2565 static int
2566 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2567 {
2568 int ret;
2569
2570 if (obj->active) {
2571 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2572 if (ret)
2573 return ret;
2574
2575 i915_gem_retire_requests_ring(obj->ring);
2576 }
2577
2578 return 0;
2579 }
2580
2581 /**
2582 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2583 * @DRM_IOCTL_ARGS: standard ioctl arguments
2584 *
2585 * Returns 0 if successful, else an error is returned with the remaining time in
2586 * the timeout parameter.
2587 * -ETIME: object is still busy after timeout
2588 * -ERESTARTSYS: signal interrupted the wait
2589 * -ENONENT: object doesn't exist
2590 * Also possible, but rare:
2591 * -EAGAIN: GPU wedged
2592 * -ENOMEM: damn
2593 * -ENODEV: Internal IRQ fail
2594 * -E?: The add request failed
2595 *
2596 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2597 * non-zero timeout parameter the wait ioctl will wait for the given number of
2598 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2599 * without holding struct_mutex the object may become re-busied before this
2600 * function completes. A similar but shorter * race condition exists in the busy
2601 * ioctl
2602 */
2603 int
2604 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2605 {
2606 struct drm_i915_private *dev_priv = dev->dev_private;
2607 struct drm_i915_gem_wait *args = data;
2608 struct drm_i915_gem_object *obj;
2609 struct intel_ring_buffer *ring = NULL;
2610 struct timespec timeout_stack, *timeout = NULL;
2611 unsigned reset_counter;
2612 u32 seqno = 0;
2613 int ret = 0;
2614
2615 if (args->timeout_ns >= 0) {
2616 timeout_stack = ns_to_timespec(args->timeout_ns);
2617 timeout = &timeout_stack;
2618 }
2619
2620 ret = i915_mutex_lock_interruptible(dev);
2621 if (ret)
2622 return ret;
2623
2624 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2625 if (&obj->base == NULL) {
2626 mutex_unlock(&dev->struct_mutex);
2627 return -ENOENT;
2628 }
2629
2630 /* Need to make sure the object gets inactive eventually. */
2631 ret = i915_gem_object_flush_active(obj);
2632 if (ret)
2633 goto out;
2634
2635 if (obj->active) {
2636 seqno = obj->last_read_seqno;
2637 ring = obj->ring;
2638 }
2639
2640 if (seqno == 0)
2641 goto out;
2642
2643 /* Do this after OLR check to make sure we make forward progress polling
2644 * on this IOCTL with a 0 timeout (like busy ioctl)
2645 */
2646 if (!args->timeout_ns) {
2647 ret = -ETIME;
2648 goto out;
2649 }
2650
2651 drm_gem_object_unreference(&obj->base);
2652 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2653 mutex_unlock(&dev->struct_mutex);
2654
2655 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
2656 if (timeout)
2657 args->timeout_ns = timespec_to_ns(timeout);
2658 return ret;
2659
2660 out:
2661 drm_gem_object_unreference(&obj->base);
2662 mutex_unlock(&dev->struct_mutex);
2663 return ret;
2664 }
2665
2666 /**
2667 * i915_gem_object_sync - sync an object to a ring.
2668 *
2669 * @obj: object which may be in use on another ring.
2670 * @to: ring we wish to use the object on. May be NULL.
2671 *
2672 * This code is meant to abstract object synchronization with the GPU.
2673 * Calling with NULL implies synchronizing the object with the CPU
2674 * rather than a particular GPU ring.
2675 *
2676 * Returns 0 if successful, else propagates up the lower layer error.
2677 */
2678 int
2679 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2680 struct intel_ring_buffer *to)
2681 {
2682 struct intel_ring_buffer *from = obj->ring;
2683 u32 seqno;
2684 int ret, idx;
2685
2686 if (from == NULL || to == from)
2687 return 0;
2688
2689 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2690 return i915_gem_object_wait_rendering(obj, false);
2691
2692 idx = intel_ring_sync_index(from, to);
2693
2694 seqno = obj->last_read_seqno;
2695 if (seqno <= from->semaphore.sync_seqno[idx])
2696 return 0;
2697
2698 ret = i915_gem_check_olr(obj->ring, seqno);
2699 if (ret)
2700 return ret;
2701
2702 trace_i915_gem_ring_sync_to(from, to, seqno);
2703 ret = to->semaphore.sync_to(to, from, seqno);
2704 if (!ret)
2705 /* We use last_read_seqno because sync_to()
2706 * might have just caused seqno wrap under
2707 * the radar.
2708 */
2709 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
2710
2711 return ret;
2712 }
2713
2714 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2715 {
2716 u32 old_write_domain, old_read_domains;
2717
2718 /* Force a pagefault for domain tracking on next user access */
2719 i915_gem_release_mmap(obj);
2720
2721 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2722 return;
2723
2724 /* Wait for any direct GTT access to complete */
2725 mb();
2726
2727 old_read_domains = obj->base.read_domains;
2728 old_write_domain = obj->base.write_domain;
2729
2730 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2731 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2732
2733 trace_i915_gem_object_change_domain(obj,
2734 old_read_domains,
2735 old_write_domain);
2736 }
2737
2738 int i915_vma_unbind(struct i915_vma *vma)
2739 {
2740 struct drm_i915_gem_object *obj = vma->obj;
2741 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2742 int ret;
2743
2744 if (list_empty(&vma->vma_link))
2745 return 0;
2746
2747 if (!drm_mm_node_allocated(&vma->node)) {
2748 i915_gem_vma_destroy(vma);
2749 return 0;
2750 }
2751
2752 if (vma->pin_count)
2753 return -EBUSY;
2754
2755 BUG_ON(obj->pages == NULL);
2756
2757 ret = i915_gem_object_finish_gpu(obj);
2758 if (ret)
2759 return ret;
2760 /* Continue on if we fail due to EIO, the GPU is hung so we
2761 * should be safe and we need to cleanup or else we might
2762 * cause memory corruption through use-after-free.
2763 */
2764
2765 i915_gem_object_finish_gtt(obj);
2766
2767 /* release the fence reg _after_ flushing */
2768 ret = i915_gem_object_put_fence(obj);
2769 if (ret)
2770 return ret;
2771
2772 trace_i915_vma_unbind(vma);
2773
2774 vma->unbind_vma(vma);
2775
2776 i915_gem_gtt_finish_object(obj);
2777
2778 list_del_init(&vma->mm_list);
2779 /* Avoid an unnecessary call to unbind on rebind. */
2780 if (i915_is_ggtt(vma->vm))
2781 obj->map_and_fenceable = true;
2782
2783 drm_mm_remove_node(&vma->node);
2784 i915_gem_vma_destroy(vma);
2785
2786 /* Since the unbound list is global, only move to that list if
2787 * no more VMAs exist. */
2788 if (list_empty(&obj->vma_list))
2789 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2790
2791 /* And finally now the object is completely decoupled from this vma,
2792 * we can drop its hold on the backing storage and allow it to be
2793 * reaped by the shrinker.
2794 */
2795 i915_gem_object_unpin_pages(obj);
2796
2797 return 0;
2798 }
2799
2800 int i915_gpu_idle(struct drm_device *dev)
2801 {
2802 struct drm_i915_private *dev_priv = dev->dev_private;
2803 struct intel_ring_buffer *ring;
2804 int ret, i;
2805
2806 /* Flush everything onto the inactive list. */
2807 for_each_ring(ring, dev_priv, i) {
2808 ret = i915_switch_context(ring, ring->default_context);
2809 if (ret)
2810 return ret;
2811
2812 ret = intel_ring_idle(ring);
2813 if (ret)
2814 return ret;
2815 }
2816
2817 return 0;
2818 }
2819
2820 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2821 struct drm_i915_gem_object *obj)
2822 {
2823 struct drm_i915_private *dev_priv = dev->dev_private;
2824 int fence_reg;
2825 int fence_pitch_shift;
2826
2827 if (INTEL_INFO(dev)->gen >= 6) {
2828 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2829 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2830 } else {
2831 fence_reg = FENCE_REG_965_0;
2832 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2833 }
2834
2835 fence_reg += reg * 8;
2836
2837 /* To w/a incoherency with non-atomic 64-bit register updates,
2838 * we split the 64-bit update into two 32-bit writes. In order
2839 * for a partial fence not to be evaluated between writes, we
2840 * precede the update with write to turn off the fence register,
2841 * and only enable the fence as the last step.
2842 *
2843 * For extra levels of paranoia, we make sure each step lands
2844 * before applying the next step.
2845 */
2846 I915_WRITE(fence_reg, 0);
2847 POSTING_READ(fence_reg);
2848
2849 if (obj) {
2850 u32 size = i915_gem_obj_ggtt_size(obj);
2851 uint64_t val;
2852
2853 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2854 0xfffff000) << 32;
2855 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2856 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2857 if (obj->tiling_mode == I915_TILING_Y)
2858 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2859 val |= I965_FENCE_REG_VALID;
2860
2861 I915_WRITE(fence_reg + 4, val >> 32);
2862 POSTING_READ(fence_reg + 4);
2863
2864 I915_WRITE(fence_reg + 0, val);
2865 POSTING_READ(fence_reg);
2866 } else {
2867 I915_WRITE(fence_reg + 4, 0);
2868 POSTING_READ(fence_reg + 4);
2869 }
2870 }
2871
2872 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2873 struct drm_i915_gem_object *obj)
2874 {
2875 struct drm_i915_private *dev_priv = dev->dev_private;
2876 u32 val;
2877
2878 if (obj) {
2879 u32 size = i915_gem_obj_ggtt_size(obj);
2880 int pitch_val;
2881 int tile_width;
2882
2883 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2884 (size & -size) != size ||
2885 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2886 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2887 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2888
2889 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2890 tile_width = 128;
2891 else
2892 tile_width = 512;
2893
2894 /* Note: pitch better be a power of two tile widths */
2895 pitch_val = obj->stride / tile_width;
2896 pitch_val = ffs(pitch_val) - 1;
2897
2898 val = i915_gem_obj_ggtt_offset(obj);
2899 if (obj->tiling_mode == I915_TILING_Y)
2900 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2901 val |= I915_FENCE_SIZE_BITS(size);
2902 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2903 val |= I830_FENCE_REG_VALID;
2904 } else
2905 val = 0;
2906
2907 if (reg < 8)
2908 reg = FENCE_REG_830_0 + reg * 4;
2909 else
2910 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2911
2912 I915_WRITE(reg, val);
2913 POSTING_READ(reg);
2914 }
2915
2916 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2917 struct drm_i915_gem_object *obj)
2918 {
2919 struct drm_i915_private *dev_priv = dev->dev_private;
2920 uint32_t val;
2921
2922 if (obj) {
2923 u32 size = i915_gem_obj_ggtt_size(obj);
2924 uint32_t pitch_val;
2925
2926 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
2927 (size & -size) != size ||
2928 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2929 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2930 i915_gem_obj_ggtt_offset(obj), size);
2931
2932 pitch_val = obj->stride / 128;
2933 pitch_val = ffs(pitch_val) - 1;
2934
2935 val = i915_gem_obj_ggtt_offset(obj);
2936 if (obj->tiling_mode == I915_TILING_Y)
2937 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2938 val |= I830_FENCE_SIZE_BITS(size);
2939 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2940 val |= I830_FENCE_REG_VALID;
2941 } else
2942 val = 0;
2943
2944 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2945 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2946 }
2947
2948 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2949 {
2950 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2951 }
2952
2953 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2954 struct drm_i915_gem_object *obj)
2955 {
2956 struct drm_i915_private *dev_priv = dev->dev_private;
2957
2958 /* Ensure that all CPU reads are completed before installing a fence
2959 * and all writes before removing the fence.
2960 */
2961 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2962 mb();
2963
2964 WARN(obj && (!obj->stride || !obj->tiling_mode),
2965 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2966 obj->stride, obj->tiling_mode);
2967
2968 switch (INTEL_INFO(dev)->gen) {
2969 case 8:
2970 case 7:
2971 case 6:
2972 case 5:
2973 case 4: i965_write_fence_reg(dev, reg, obj); break;
2974 case 3: i915_write_fence_reg(dev, reg, obj); break;
2975 case 2: i830_write_fence_reg(dev, reg, obj); break;
2976 default: BUG();
2977 }
2978
2979 /* And similarly be paranoid that no direct access to this region
2980 * is reordered to before the fence is installed.
2981 */
2982 if (i915_gem_object_needs_mb(obj))
2983 mb();
2984 }
2985
2986 static inline int fence_number(struct drm_i915_private *dev_priv,
2987 struct drm_i915_fence_reg *fence)
2988 {
2989 return fence - dev_priv->fence_regs;
2990 }
2991
2992 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2993 struct drm_i915_fence_reg *fence,
2994 bool enable)
2995 {
2996 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2997 int reg = fence_number(dev_priv, fence);
2998
2999 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3000
3001 if (enable) {
3002 obj->fence_reg = reg;
3003 fence->obj = obj;
3004 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3005 } else {
3006 obj->fence_reg = I915_FENCE_REG_NONE;
3007 fence->obj = NULL;
3008 list_del_init(&fence->lru_list);
3009 }
3010 obj->fence_dirty = false;
3011 }
3012
3013 static int
3014 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3015 {
3016 if (obj->last_fenced_seqno) {
3017 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3018 if (ret)
3019 return ret;
3020
3021 obj->last_fenced_seqno = 0;
3022 }
3023
3024 obj->fenced_gpu_access = false;
3025 return 0;
3026 }
3027
3028 int
3029 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3030 {
3031 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3032 struct drm_i915_fence_reg *fence;
3033 int ret;
3034
3035 ret = i915_gem_object_wait_fence(obj);
3036 if (ret)
3037 return ret;
3038
3039 if (obj->fence_reg == I915_FENCE_REG_NONE)
3040 return 0;
3041
3042 fence = &dev_priv->fence_regs[obj->fence_reg];
3043
3044 i915_gem_object_fence_lost(obj);
3045 i915_gem_object_update_fence(obj, fence, false);
3046
3047 return 0;
3048 }
3049
3050 static struct drm_i915_fence_reg *
3051 i915_find_fence_reg(struct drm_device *dev)
3052 {
3053 struct drm_i915_private *dev_priv = dev->dev_private;
3054 struct drm_i915_fence_reg *reg, *avail;
3055 int i;
3056
3057 /* First try to find a free reg */
3058 avail = NULL;
3059 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3060 reg = &dev_priv->fence_regs[i];
3061 if (!reg->obj)
3062 return reg;
3063
3064 if (!reg->pin_count)
3065 avail = reg;
3066 }
3067
3068 if (avail == NULL)
3069 goto deadlock;
3070
3071 /* None available, try to steal one or wait for a user to finish */
3072 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3073 if (reg->pin_count)
3074 continue;
3075
3076 return reg;
3077 }
3078
3079 deadlock:
3080 /* Wait for completion of pending flips which consume fences */
3081 if (intel_has_pending_fb_unpin(dev))
3082 return ERR_PTR(-EAGAIN);
3083
3084 return ERR_PTR(-EDEADLK);
3085 }
3086
3087 /**
3088 * i915_gem_object_get_fence - set up fencing for an object
3089 * @obj: object to map through a fence reg
3090 *
3091 * When mapping objects through the GTT, userspace wants to be able to write
3092 * to them without having to worry about swizzling if the object is tiled.
3093 * This function walks the fence regs looking for a free one for @obj,
3094 * stealing one if it can't find any.
3095 *
3096 * It then sets up the reg based on the object's properties: address, pitch
3097 * and tiling format.
3098 *
3099 * For an untiled surface, this removes any existing fence.
3100 */
3101 int
3102 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3103 {
3104 struct drm_device *dev = obj->base.dev;
3105 struct drm_i915_private *dev_priv = dev->dev_private;
3106 bool enable = obj->tiling_mode != I915_TILING_NONE;
3107 struct drm_i915_fence_reg *reg;
3108 int ret;
3109
3110 /* Have we updated the tiling parameters upon the object and so
3111 * will need to serialise the write to the associated fence register?
3112 */
3113 if (obj->fence_dirty) {
3114 ret = i915_gem_object_wait_fence(obj);
3115 if (ret)
3116 return ret;
3117 }
3118
3119 /* Just update our place in the LRU if our fence is getting reused. */
3120 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3121 reg = &dev_priv->fence_regs[obj->fence_reg];
3122 if (!obj->fence_dirty) {
3123 list_move_tail(&reg->lru_list,
3124 &dev_priv->mm.fence_list);
3125 return 0;
3126 }
3127 } else if (enable) {
3128 reg = i915_find_fence_reg(dev);
3129 if (IS_ERR(reg))
3130 return PTR_ERR(reg);
3131
3132 if (reg->obj) {
3133 struct drm_i915_gem_object *old = reg->obj;
3134
3135 ret = i915_gem_object_wait_fence(old);
3136 if (ret)
3137 return ret;
3138
3139 i915_gem_object_fence_lost(old);
3140 }
3141 } else
3142 return 0;
3143
3144 i915_gem_object_update_fence(obj, reg, enable);
3145
3146 return 0;
3147 }
3148
3149 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3150 struct drm_mm_node *gtt_space,
3151 unsigned long cache_level)
3152 {
3153 struct drm_mm_node *other;
3154
3155 /* On non-LLC machines we have to be careful when putting differing
3156 * types of snoopable memory together to avoid the prefetcher
3157 * crossing memory domains and dying.
3158 */
3159 if (HAS_LLC(dev))
3160 return true;
3161
3162 if (!drm_mm_node_allocated(gtt_space))
3163 return true;
3164
3165 if (list_empty(&gtt_space->node_list))
3166 return true;
3167
3168 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3169 if (other->allocated && !other->hole_follows && other->color != cache_level)
3170 return false;
3171
3172 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3173 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3174 return false;
3175
3176 return true;
3177 }
3178
3179 static void i915_gem_verify_gtt(struct drm_device *dev)
3180 {
3181 #if WATCH_GTT
3182 struct drm_i915_private *dev_priv = dev->dev_private;
3183 struct drm_i915_gem_object *obj;
3184 int err = 0;
3185
3186 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3187 if (obj->gtt_space == NULL) {
3188 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3189 err++;
3190 continue;
3191 }
3192
3193 if (obj->cache_level != obj->gtt_space->color) {
3194 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3195 i915_gem_obj_ggtt_offset(obj),
3196 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3197 obj->cache_level,
3198 obj->gtt_space->color);
3199 err++;
3200 continue;
3201 }
3202
3203 if (!i915_gem_valid_gtt_space(dev,
3204 obj->gtt_space,
3205 obj->cache_level)) {
3206 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3207 i915_gem_obj_ggtt_offset(obj),
3208 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3209 obj->cache_level);
3210 err++;
3211 continue;
3212 }
3213 }
3214
3215 WARN_ON(err);
3216 #endif
3217 }
3218
3219 /**
3220 * Finds free space in the GTT aperture and binds the object there.
3221 */
3222 static struct i915_vma *
3223 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3224 struct i915_address_space *vm,
3225 unsigned alignment,
3226 unsigned flags)
3227 {
3228 struct drm_device *dev = obj->base.dev;
3229 struct drm_i915_private *dev_priv = dev->dev_private;
3230 u32 size, fence_size, fence_alignment, unfenced_alignment;
3231 size_t gtt_max =
3232 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3233 struct i915_vma *vma;
3234 int ret;
3235
3236 fence_size = i915_gem_get_gtt_size(dev,
3237 obj->base.size,
3238 obj->tiling_mode);
3239 fence_alignment = i915_gem_get_gtt_alignment(dev,
3240 obj->base.size,
3241 obj->tiling_mode, true);
3242 unfenced_alignment =
3243 i915_gem_get_gtt_alignment(dev,
3244 obj->base.size,
3245 obj->tiling_mode, false);
3246
3247 if (alignment == 0)
3248 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3249 unfenced_alignment;
3250 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3251 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3252 return ERR_PTR(-EINVAL);
3253 }
3254
3255 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3256
3257 /* If the object is bigger than the entire aperture, reject it early
3258 * before evicting everything in a vain attempt to find space.
3259 */
3260 if (obj->base.size > gtt_max) {
3261 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3262 obj->base.size,
3263 flags & PIN_MAPPABLE ? "mappable" : "total",
3264 gtt_max);
3265 return ERR_PTR(-E2BIG);
3266 }
3267
3268 ret = i915_gem_object_get_pages(obj);
3269 if (ret)
3270 return ERR_PTR(ret);
3271
3272 i915_gem_object_pin_pages(obj);
3273
3274 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3275 if (IS_ERR(vma))
3276 goto err_unpin;
3277
3278 search_free:
3279 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3280 size, alignment,
3281 obj->cache_level, 0, gtt_max,
3282 DRM_MM_SEARCH_DEFAULT,
3283 DRM_MM_CREATE_DEFAULT);
3284 if (ret) {
3285 ret = i915_gem_evict_something(dev, vm, size, alignment,
3286 obj->cache_level, flags);
3287 if (ret == 0)
3288 goto search_free;
3289
3290 goto err_free_vma;
3291 }
3292 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3293 obj->cache_level))) {
3294 ret = -EINVAL;
3295 goto err_remove_node;
3296 }
3297
3298 ret = i915_gem_gtt_prepare_object(obj);
3299 if (ret)
3300 goto err_remove_node;
3301
3302 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3303 list_add_tail(&vma->mm_list, &vm->inactive_list);
3304
3305 if (i915_is_ggtt(vm)) {
3306 bool mappable, fenceable;
3307
3308 fenceable = (vma->node.size == fence_size &&
3309 (vma->node.start & (fence_alignment - 1)) == 0);
3310
3311 mappable = (vma->node.start + obj->base.size <=
3312 dev_priv->gtt.mappable_end);
3313
3314 obj->map_and_fenceable = mappable && fenceable;
3315 }
3316
3317 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3318
3319 trace_i915_vma_bind(vma, flags);
3320 vma->bind_vma(vma, obj->cache_level,
3321 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3322
3323 i915_gem_verify_gtt(dev);
3324 return vma;
3325
3326 err_remove_node:
3327 drm_mm_remove_node(&vma->node);
3328 err_free_vma:
3329 i915_gem_vma_destroy(vma);
3330 vma = ERR_PTR(ret);
3331 err_unpin:
3332 i915_gem_object_unpin_pages(obj);
3333 return vma;
3334 }
3335
3336 bool
3337 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3338 bool force)
3339 {
3340 /* If we don't have a page list set up, then we're not pinned
3341 * to GPU, and we can ignore the cache flush because it'll happen
3342 * again at bind time.
3343 */
3344 if (obj->pages == NULL)
3345 return false;
3346
3347 /*
3348 * Stolen memory is always coherent with the GPU as it is explicitly
3349 * marked as wc by the system, or the system is cache-coherent.
3350 */
3351 if (obj->stolen)
3352 return false;
3353
3354 /* If the GPU is snooping the contents of the CPU cache,
3355 * we do not need to manually clear the CPU cache lines. However,
3356 * the caches are only snooped when the render cache is
3357 * flushed/invalidated. As we always have to emit invalidations
3358 * and flushes when moving into and out of the RENDER domain, correct
3359 * snooping behaviour occurs naturally as the result of our domain
3360 * tracking.
3361 */
3362 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3363 return false;
3364
3365 trace_i915_gem_object_clflush(obj);
3366 drm_clflush_sg(obj->pages);
3367
3368 return true;
3369 }
3370
3371 /** Flushes the GTT write domain for the object if it's dirty. */
3372 static void
3373 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3374 {
3375 uint32_t old_write_domain;
3376
3377 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3378 return;
3379
3380 /* No actual flushing is required for the GTT write domain. Writes
3381 * to it immediately go to main memory as far as we know, so there's
3382 * no chipset flush. It also doesn't land in render cache.
3383 *
3384 * However, we do have to enforce the order so that all writes through
3385 * the GTT land before any writes to the device, such as updates to
3386 * the GATT itself.
3387 */
3388 wmb();
3389
3390 old_write_domain = obj->base.write_domain;
3391 obj->base.write_domain = 0;
3392
3393 trace_i915_gem_object_change_domain(obj,
3394 obj->base.read_domains,
3395 old_write_domain);
3396 }
3397
3398 /** Flushes the CPU write domain for the object if it's dirty. */
3399 static void
3400 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3401 bool force)
3402 {
3403 uint32_t old_write_domain;
3404
3405 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3406 return;
3407
3408 if (i915_gem_clflush_object(obj, force))
3409 i915_gem_chipset_flush(obj->base.dev);
3410
3411 old_write_domain = obj->base.write_domain;
3412 obj->base.write_domain = 0;
3413
3414 trace_i915_gem_object_change_domain(obj,
3415 obj->base.read_domains,
3416 old_write_domain);
3417 }
3418
3419 /**
3420 * Moves a single object to the GTT read, and possibly write domain.
3421 *
3422 * This function returns when the move is complete, including waiting on
3423 * flushes to occur.
3424 */
3425 int
3426 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3427 {
3428 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3429 uint32_t old_write_domain, old_read_domains;
3430 int ret;
3431
3432 /* Not valid to be called on unbound objects. */
3433 if (!i915_gem_obj_bound_any(obj))
3434 return -EINVAL;
3435
3436 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3437 return 0;
3438
3439 ret = i915_gem_object_wait_rendering(obj, !write);
3440 if (ret)
3441 return ret;
3442
3443 i915_gem_object_retire(obj);
3444 i915_gem_object_flush_cpu_write_domain(obj, false);
3445
3446 /* Serialise direct access to this object with the barriers for
3447 * coherent writes from the GPU, by effectively invalidating the
3448 * GTT domain upon first access.
3449 */
3450 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3451 mb();
3452
3453 old_write_domain = obj->base.write_domain;
3454 old_read_domains = obj->base.read_domains;
3455
3456 /* It should now be out of any other write domains, and we can update
3457 * the domain values for our changes.
3458 */
3459 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3460 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3461 if (write) {
3462 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3463 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3464 obj->dirty = 1;
3465 }
3466
3467 trace_i915_gem_object_change_domain(obj,
3468 old_read_domains,
3469 old_write_domain);
3470
3471 /* And bump the LRU for this access */
3472 if (i915_gem_object_is_inactive(obj)) {
3473 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3474 if (vma)
3475 list_move_tail(&vma->mm_list,
3476 &dev_priv->gtt.base.inactive_list);
3477
3478 }
3479
3480 return 0;
3481 }
3482
3483 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3484 enum i915_cache_level cache_level)
3485 {
3486 struct drm_device *dev = obj->base.dev;
3487 struct i915_vma *vma, *next;
3488 int ret;
3489
3490 if (obj->cache_level == cache_level)
3491 return 0;
3492
3493 if (i915_gem_obj_is_pinned(obj)) {
3494 DRM_DEBUG("can not change the cache level of pinned objects\n");
3495 return -EBUSY;
3496 }
3497
3498 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3499 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3500 ret = i915_vma_unbind(vma);
3501 if (ret)
3502 return ret;
3503 }
3504 }
3505
3506 if (i915_gem_obj_bound_any(obj)) {
3507 ret = i915_gem_object_finish_gpu(obj);
3508 if (ret)
3509 return ret;
3510
3511 i915_gem_object_finish_gtt(obj);
3512
3513 /* Before SandyBridge, you could not use tiling or fence
3514 * registers with snooped memory, so relinquish any fences
3515 * currently pointing to our region in the aperture.
3516 */
3517 if (INTEL_INFO(dev)->gen < 6) {
3518 ret = i915_gem_object_put_fence(obj);
3519 if (ret)
3520 return ret;
3521 }
3522
3523 list_for_each_entry(vma, &obj->vma_list, vma_link)
3524 if (drm_mm_node_allocated(&vma->node))
3525 vma->bind_vma(vma, cache_level,
3526 obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
3527 }
3528
3529 list_for_each_entry(vma, &obj->vma_list, vma_link)
3530 vma->node.color = cache_level;
3531 obj->cache_level = cache_level;
3532
3533 if (cpu_write_needs_clflush(obj)) {
3534 u32 old_read_domains, old_write_domain;
3535
3536 /* If we're coming from LLC cached, then we haven't
3537 * actually been tracking whether the data is in the
3538 * CPU cache or not, since we only allow one bit set
3539 * in obj->write_domain and have been skipping the clflushes.
3540 * Just set it to the CPU cache for now.
3541 */
3542 i915_gem_object_retire(obj);
3543 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3544
3545 old_read_domains = obj->base.read_domains;
3546 old_write_domain = obj->base.write_domain;
3547
3548 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3549 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3550
3551 trace_i915_gem_object_change_domain(obj,
3552 old_read_domains,
3553 old_write_domain);
3554 }
3555
3556 i915_gem_verify_gtt(dev);
3557 return 0;
3558 }
3559
3560 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3561 struct drm_file *file)
3562 {
3563 struct drm_i915_gem_caching *args = data;
3564 struct drm_i915_gem_object *obj;
3565 int ret;
3566
3567 ret = i915_mutex_lock_interruptible(dev);
3568 if (ret)
3569 return ret;
3570
3571 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3572 if (&obj->base == NULL) {
3573 ret = -ENOENT;
3574 goto unlock;
3575 }
3576
3577 switch (obj->cache_level) {
3578 case I915_CACHE_LLC:
3579 case I915_CACHE_L3_LLC:
3580 args->caching = I915_CACHING_CACHED;
3581 break;
3582
3583 case I915_CACHE_WT:
3584 args->caching = I915_CACHING_DISPLAY;
3585 break;
3586
3587 default:
3588 args->caching = I915_CACHING_NONE;
3589 break;
3590 }
3591
3592 drm_gem_object_unreference(&obj->base);
3593 unlock:
3594 mutex_unlock(&dev->struct_mutex);
3595 return ret;
3596 }
3597
3598 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3599 struct drm_file *file)
3600 {
3601 struct drm_i915_gem_caching *args = data;
3602 struct drm_i915_gem_object *obj;
3603 enum i915_cache_level level;
3604 int ret;
3605
3606 switch (args->caching) {
3607 case I915_CACHING_NONE:
3608 level = I915_CACHE_NONE;
3609 break;
3610 case I915_CACHING_CACHED:
3611 level = I915_CACHE_LLC;
3612 break;
3613 case I915_CACHING_DISPLAY:
3614 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3615 break;
3616 default:
3617 return -EINVAL;
3618 }
3619
3620 ret = i915_mutex_lock_interruptible(dev);
3621 if (ret)
3622 return ret;
3623
3624 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3625 if (&obj->base == NULL) {
3626 ret = -ENOENT;
3627 goto unlock;
3628 }
3629
3630 ret = i915_gem_object_set_cache_level(obj, level);
3631
3632 drm_gem_object_unreference(&obj->base);
3633 unlock:
3634 mutex_unlock(&dev->struct_mutex);
3635 return ret;
3636 }
3637
3638 static bool is_pin_display(struct drm_i915_gem_object *obj)
3639 {
3640 /* There are 3 sources that pin objects:
3641 * 1. The display engine (scanouts, sprites, cursors);
3642 * 2. Reservations for execbuffer;
3643 * 3. The user.
3644 *
3645 * We can ignore reservations as we hold the struct_mutex and
3646 * are only called outside of the reservation path. The user
3647 * can only increment pin_count once, and so if after
3648 * subtracting the potential reference by the user, any pin_count
3649 * remains, it must be due to another use by the display engine.
3650 */
3651 return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
3652 }
3653
3654 /*
3655 * Prepare buffer for display plane (scanout, cursors, etc).
3656 * Can be called from an uninterruptible phase (modesetting) and allows
3657 * any flushes to be pipelined (for pageflips).
3658 */
3659 int
3660 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3661 u32 alignment,
3662 struct intel_ring_buffer *pipelined)
3663 {
3664 u32 old_read_domains, old_write_domain;
3665 int ret;
3666
3667 if (pipelined != obj->ring) {
3668 ret = i915_gem_object_sync(obj, pipelined);
3669 if (ret)
3670 return ret;
3671 }
3672
3673 /* Mark the pin_display early so that we account for the
3674 * display coherency whilst setting up the cache domains.
3675 */
3676 obj->pin_display = true;
3677
3678 /* The display engine is not coherent with the LLC cache on gen6. As
3679 * a result, we make sure that the pinning that is about to occur is
3680 * done with uncached PTEs. This is lowest common denominator for all
3681 * chipsets.
3682 *
3683 * However for gen6+, we could do better by using the GFDT bit instead
3684 * of uncaching, which would allow us to flush all the LLC-cached data
3685 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3686 */
3687 ret = i915_gem_object_set_cache_level(obj,
3688 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3689 if (ret)
3690 goto err_unpin_display;
3691
3692 /* As the user may map the buffer once pinned in the display plane
3693 * (e.g. libkms for the bootup splash), we have to ensure that we
3694 * always use map_and_fenceable for all scanout buffers.
3695 */
3696 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3697 if (ret)
3698 goto err_unpin_display;
3699
3700 i915_gem_object_flush_cpu_write_domain(obj, true);
3701
3702 old_write_domain = obj->base.write_domain;
3703 old_read_domains = obj->base.read_domains;
3704
3705 /* It should now be out of any other write domains, and we can update
3706 * the domain values for our changes.
3707 */
3708 obj->base.write_domain = 0;
3709 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3710
3711 trace_i915_gem_object_change_domain(obj,
3712 old_read_domains,
3713 old_write_domain);
3714
3715 return 0;
3716
3717 err_unpin_display:
3718 obj->pin_display = is_pin_display(obj);
3719 return ret;
3720 }
3721
3722 void
3723 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3724 {
3725 i915_gem_object_ggtt_unpin(obj);
3726 obj->pin_display = is_pin_display(obj);
3727 }
3728
3729 int
3730 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3731 {
3732 int ret;
3733
3734 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3735 return 0;
3736
3737 ret = i915_gem_object_wait_rendering(obj, false);
3738 if (ret)
3739 return ret;
3740
3741 /* Ensure that we invalidate the GPU's caches and TLBs. */
3742 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3743 return 0;
3744 }
3745
3746 /**
3747 * Moves a single object to the CPU read, and possibly write domain.
3748 *
3749 * This function returns when the move is complete, including waiting on
3750 * flushes to occur.
3751 */
3752 int
3753 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3754 {
3755 uint32_t old_write_domain, old_read_domains;
3756 int ret;
3757
3758 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3759 return 0;
3760
3761 ret = i915_gem_object_wait_rendering(obj, !write);
3762 if (ret)
3763 return ret;
3764
3765 i915_gem_object_retire(obj);
3766 i915_gem_object_flush_gtt_write_domain(obj);
3767
3768 old_write_domain = obj->base.write_domain;
3769 old_read_domains = obj->base.read_domains;
3770
3771 /* Flush the CPU cache if it's still invalid. */
3772 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3773 i915_gem_clflush_object(obj, false);
3774
3775 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3776 }
3777
3778 /* It should now be out of any other write domains, and we can update
3779 * the domain values for our changes.
3780 */
3781 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3782
3783 /* If we're writing through the CPU, then the GPU read domains will
3784 * need to be invalidated at next use.
3785 */
3786 if (write) {
3787 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3788 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3789 }
3790
3791 trace_i915_gem_object_change_domain(obj,
3792 old_read_domains,
3793 old_write_domain);
3794
3795 return 0;
3796 }
3797
3798 /* Throttle our rendering by waiting until the ring has completed our requests
3799 * emitted over 20 msec ago.
3800 *
3801 * Note that if we were to use the current jiffies each time around the loop,
3802 * we wouldn't escape the function with any frames outstanding if the time to
3803 * render a frame was over 20ms.
3804 *
3805 * This should get us reasonable parallelism between CPU and GPU but also
3806 * relatively low latency when blocking on a particular request to finish.
3807 */
3808 static int
3809 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3810 {
3811 struct drm_i915_private *dev_priv = dev->dev_private;
3812 struct drm_i915_file_private *file_priv = file->driver_priv;
3813 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3814 struct drm_i915_gem_request *request;
3815 struct intel_ring_buffer *ring = NULL;
3816 unsigned reset_counter;
3817 u32 seqno = 0;
3818 int ret;
3819
3820 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3821 if (ret)
3822 return ret;
3823
3824 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3825 if (ret)
3826 return ret;
3827
3828 spin_lock(&file_priv->mm.lock);
3829 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3830 if (time_after_eq(request->emitted_jiffies, recent_enough))
3831 break;
3832
3833 ring = request->ring;
3834 seqno = request->seqno;
3835 }
3836 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3837 spin_unlock(&file_priv->mm.lock);
3838
3839 if (seqno == 0)
3840 return 0;
3841
3842 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
3843 if (ret == 0)
3844 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3845
3846 return ret;
3847 }
3848
3849 int
3850 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3851 struct i915_address_space *vm,
3852 uint32_t alignment,
3853 unsigned flags)
3854 {
3855 struct i915_vma *vma;
3856 int ret;
3857
3858 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
3859 return -EINVAL;
3860
3861 vma = i915_gem_obj_to_vma(obj, vm);
3862 if (vma) {
3863 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3864 return -EBUSY;
3865
3866 if ((alignment &&
3867 vma->node.start & (alignment - 1)) ||
3868 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3869 WARN(vma->pin_count,
3870 "bo is already pinned with incorrect alignment:"
3871 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3872 " obj->map_and_fenceable=%d\n",
3873 i915_gem_obj_offset(obj, vm), alignment,
3874 flags & PIN_MAPPABLE,
3875 obj->map_and_fenceable);
3876 ret = i915_vma_unbind(vma);
3877 if (ret)
3878 return ret;
3879
3880 vma = NULL;
3881 }
3882 }
3883
3884 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
3885 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
3886 if (IS_ERR(vma))
3887 return PTR_ERR(vma);
3888 }
3889
3890 if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
3891 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
3892
3893 vma->pin_count++;
3894 if (flags & PIN_MAPPABLE)
3895 obj->pin_mappable |= true;
3896
3897 return 0;
3898 }
3899
3900 void
3901 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
3902 {
3903 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3904
3905 BUG_ON(!vma);
3906 BUG_ON(vma->pin_count == 0);
3907 BUG_ON(!i915_gem_obj_ggtt_bound(obj));
3908
3909 if (--vma->pin_count == 0)
3910 obj->pin_mappable = false;
3911 }
3912
3913 int
3914 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3915 struct drm_file *file)
3916 {
3917 struct drm_i915_gem_pin *args = data;
3918 struct drm_i915_gem_object *obj;
3919 int ret;
3920
3921 if (INTEL_INFO(dev)->gen >= 6)
3922 return -ENODEV;
3923
3924 ret = i915_mutex_lock_interruptible(dev);
3925 if (ret)
3926 return ret;
3927
3928 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3929 if (&obj->base == NULL) {
3930 ret = -ENOENT;
3931 goto unlock;
3932 }
3933
3934 if (obj->madv != I915_MADV_WILLNEED) {
3935 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
3936 ret = -EFAULT;
3937 goto out;
3938 }
3939
3940 if (obj->pin_filp != NULL && obj->pin_filp != file) {
3941 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
3942 args->handle);
3943 ret = -EINVAL;
3944 goto out;
3945 }
3946
3947 if (obj->user_pin_count == ULONG_MAX) {
3948 ret = -EBUSY;
3949 goto out;
3950 }
3951
3952 if (obj->user_pin_count == 0) {
3953 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
3954 if (ret)
3955 goto out;
3956 }
3957
3958 obj->user_pin_count++;
3959 obj->pin_filp = file;
3960
3961 args->offset = i915_gem_obj_ggtt_offset(obj);
3962 out:
3963 drm_gem_object_unreference(&obj->base);
3964 unlock:
3965 mutex_unlock(&dev->struct_mutex);
3966 return ret;
3967 }
3968
3969 int
3970 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3971 struct drm_file *file)
3972 {
3973 struct drm_i915_gem_pin *args = data;
3974 struct drm_i915_gem_object *obj;
3975 int ret;
3976
3977 ret = i915_mutex_lock_interruptible(dev);
3978 if (ret)
3979 return ret;
3980
3981 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3982 if (&obj->base == NULL) {
3983 ret = -ENOENT;
3984 goto unlock;
3985 }
3986
3987 if (obj->pin_filp != file) {
3988 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3989 args->handle);
3990 ret = -EINVAL;
3991 goto out;
3992 }
3993 obj->user_pin_count--;
3994 if (obj->user_pin_count == 0) {
3995 obj->pin_filp = NULL;
3996 i915_gem_object_ggtt_unpin(obj);
3997 }
3998
3999 out:
4000 drm_gem_object_unreference(&obj->base);
4001 unlock:
4002 mutex_unlock(&dev->struct_mutex);
4003 return ret;
4004 }
4005
4006 int
4007 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4008 struct drm_file *file)
4009 {
4010 struct drm_i915_gem_busy *args = data;
4011 struct drm_i915_gem_object *obj;
4012 int ret;
4013
4014 ret = i915_mutex_lock_interruptible(dev);
4015 if (ret)
4016 return ret;
4017
4018 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4019 if (&obj->base == NULL) {
4020 ret = -ENOENT;
4021 goto unlock;
4022 }
4023
4024 /* Count all active objects as busy, even if they are currently not used
4025 * by the gpu. Users of this interface expect objects to eventually
4026 * become non-busy without any further actions, therefore emit any
4027 * necessary flushes here.
4028 */
4029 ret = i915_gem_object_flush_active(obj);
4030
4031 args->busy = obj->active;
4032 if (obj->ring) {
4033 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4034 args->busy |= intel_ring_flag(obj->ring) << 16;
4035 }
4036
4037 drm_gem_object_unreference(&obj->base);
4038 unlock:
4039 mutex_unlock(&dev->struct_mutex);
4040 return ret;
4041 }
4042
4043 int
4044 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4045 struct drm_file *file_priv)
4046 {
4047 return i915_gem_ring_throttle(dev, file_priv);
4048 }
4049
4050 int
4051 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4052 struct drm_file *file_priv)
4053 {
4054 struct drm_i915_gem_madvise *args = data;
4055 struct drm_i915_gem_object *obj;
4056 int ret;
4057
4058 switch (args->madv) {
4059 case I915_MADV_DONTNEED:
4060 case I915_MADV_WILLNEED:
4061 break;
4062 default:
4063 return -EINVAL;
4064 }
4065
4066 ret = i915_mutex_lock_interruptible(dev);
4067 if (ret)
4068 return ret;
4069
4070 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4071 if (&obj->base == NULL) {
4072 ret = -ENOENT;
4073 goto unlock;
4074 }
4075
4076 if (i915_gem_obj_is_pinned(obj)) {
4077 ret = -EINVAL;
4078 goto out;
4079 }
4080
4081 if (obj->madv != __I915_MADV_PURGED)
4082 obj->madv = args->madv;
4083
4084 /* if the object is no longer attached, discard its backing storage */
4085 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4086 i915_gem_object_truncate(obj);
4087
4088 args->retained = obj->madv != __I915_MADV_PURGED;
4089
4090 out:
4091 drm_gem_object_unreference(&obj->base);
4092 unlock:
4093 mutex_unlock(&dev->struct_mutex);
4094 return ret;
4095 }
4096
4097 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4098 const struct drm_i915_gem_object_ops *ops)
4099 {
4100 INIT_LIST_HEAD(&obj->global_list);
4101 INIT_LIST_HEAD(&obj->ring_list);
4102 INIT_LIST_HEAD(&obj->obj_exec_link);
4103 INIT_LIST_HEAD(&obj->vma_list);
4104
4105 obj->ops = ops;
4106
4107 obj->fence_reg = I915_FENCE_REG_NONE;
4108 obj->madv = I915_MADV_WILLNEED;
4109 /* Avoid an unnecessary call to unbind on the first bind. */
4110 obj->map_and_fenceable = true;
4111
4112 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4113 }
4114
4115 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4116 .get_pages = i915_gem_object_get_pages_gtt,
4117 .put_pages = i915_gem_object_put_pages_gtt,
4118 };
4119
4120 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4121 size_t size)
4122 {
4123 struct drm_i915_gem_object *obj;
4124 struct address_space *mapping;
4125 gfp_t mask;
4126
4127 obj = i915_gem_object_alloc(dev);
4128 if (obj == NULL)
4129 return NULL;
4130
4131 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4132 i915_gem_object_free(obj);
4133 return NULL;
4134 }
4135
4136 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4137 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4138 /* 965gm cannot relocate objects above 4GiB. */
4139 mask &= ~__GFP_HIGHMEM;
4140 mask |= __GFP_DMA32;
4141 }
4142
4143 mapping = file_inode(obj->base.filp)->i_mapping;
4144 mapping_set_gfp_mask(mapping, mask);
4145
4146 i915_gem_object_init(obj, &i915_gem_object_ops);
4147
4148 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4149 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4150
4151 if (HAS_LLC(dev)) {
4152 /* On some devices, we can have the GPU use the LLC (the CPU
4153 * cache) for about a 10% performance improvement
4154 * compared to uncached. Graphics requests other than
4155 * display scanout are coherent with the CPU in
4156 * accessing this cache. This means in this mode we
4157 * don't need to clflush on the CPU side, and on the
4158 * GPU side we only need to flush internal caches to
4159 * get data visible to the CPU.
4160 *
4161 * However, we maintain the display planes as UC, and so
4162 * need to rebind when first used as such.
4163 */
4164 obj->cache_level = I915_CACHE_LLC;
4165 } else
4166 obj->cache_level = I915_CACHE_NONE;
4167
4168 trace_i915_gem_object_create(obj);
4169
4170 return obj;
4171 }
4172
4173 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4174 {
4175 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4176 struct drm_device *dev = obj->base.dev;
4177 struct drm_i915_private *dev_priv = dev->dev_private;
4178 struct i915_vma *vma, *next;
4179
4180 intel_runtime_pm_get(dev_priv);
4181
4182 trace_i915_gem_object_destroy(obj);
4183
4184 if (obj->phys_obj)
4185 i915_gem_detach_phys_object(dev, obj);
4186
4187 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4188 int ret;
4189
4190 vma->pin_count = 0;
4191 ret = i915_vma_unbind(vma);
4192 if (WARN_ON(ret == -ERESTARTSYS)) {
4193 bool was_interruptible;
4194
4195 was_interruptible = dev_priv->mm.interruptible;
4196 dev_priv->mm.interruptible = false;
4197
4198 WARN_ON(i915_vma_unbind(vma));
4199
4200 dev_priv->mm.interruptible = was_interruptible;
4201 }
4202 }
4203
4204 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4205 * before progressing. */
4206 if (obj->stolen)
4207 i915_gem_object_unpin_pages(obj);
4208
4209 if (WARN_ON(obj->pages_pin_count))
4210 obj->pages_pin_count = 0;
4211 i915_gem_object_put_pages(obj);
4212 i915_gem_object_free_mmap_offset(obj);
4213 i915_gem_object_release_stolen(obj);
4214
4215 BUG_ON(obj->pages);
4216
4217 if (obj->base.import_attach)
4218 drm_prime_gem_destroy(&obj->base, NULL);
4219
4220 drm_gem_object_release(&obj->base);
4221 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4222
4223 kfree(obj->bit_17);
4224 i915_gem_object_free(obj);
4225
4226 intel_runtime_pm_put(dev_priv);
4227 }
4228
4229 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4230 struct i915_address_space *vm)
4231 {
4232 struct i915_vma *vma;
4233 list_for_each_entry(vma, &obj->vma_list, vma_link)
4234 if (vma->vm == vm)
4235 return vma;
4236
4237 return NULL;
4238 }
4239
4240 void i915_gem_vma_destroy(struct i915_vma *vma)
4241 {
4242 WARN_ON(vma->node.allocated);
4243
4244 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4245 if (!list_empty(&vma->exec_list))
4246 return;
4247
4248 list_del(&vma->vma_link);
4249
4250 kfree(vma);
4251 }
4252
4253 static void
4254 i915_gem_stop_ringbuffers(struct drm_device *dev)
4255 {
4256 struct drm_i915_private *dev_priv = dev->dev_private;
4257 struct intel_ring_buffer *ring;
4258 int i;
4259
4260 for_each_ring(ring, dev_priv, i)
4261 intel_stop_ring_buffer(ring);
4262 }
4263
4264 int
4265 i915_gem_suspend(struct drm_device *dev)
4266 {
4267 struct drm_i915_private *dev_priv = dev->dev_private;
4268 int ret = 0;
4269
4270 mutex_lock(&dev->struct_mutex);
4271 if (dev_priv->ums.mm_suspended)
4272 goto err;
4273
4274 ret = i915_gpu_idle(dev);
4275 if (ret)
4276 goto err;
4277
4278 i915_gem_retire_requests(dev);
4279
4280 /* Under UMS, be paranoid and evict. */
4281 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4282 i915_gem_evict_everything(dev);
4283
4284 i915_kernel_lost_context(dev);
4285 i915_gem_stop_ringbuffers(dev);
4286
4287 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4288 * We need to replace this with a semaphore, or something.
4289 * And not confound ums.mm_suspended!
4290 */
4291 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4292 DRIVER_MODESET);
4293 mutex_unlock(&dev->struct_mutex);
4294
4295 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4296 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4297 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4298
4299 return 0;
4300
4301 err:
4302 mutex_unlock(&dev->struct_mutex);
4303 return ret;
4304 }
4305
4306 int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4307 {
4308 struct drm_device *dev = ring->dev;
4309 struct drm_i915_private *dev_priv = dev->dev_private;
4310 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4311 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4312 int i, ret;
4313
4314 if (!HAS_L3_DPF(dev) || !remap_info)
4315 return 0;
4316
4317 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4318 if (ret)
4319 return ret;
4320
4321 /*
4322 * Note: We do not worry about the concurrent register cacheline hang
4323 * here because no other code should access these registers other than
4324 * at initialization time.
4325 */
4326 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4327 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4328 intel_ring_emit(ring, reg_base + i);
4329 intel_ring_emit(ring, remap_info[i/4]);
4330 }
4331
4332 intel_ring_advance(ring);
4333
4334 return ret;
4335 }
4336
4337 void i915_gem_init_swizzling(struct drm_device *dev)
4338 {
4339 struct drm_i915_private *dev_priv = dev->dev_private;
4340
4341 if (INTEL_INFO(dev)->gen < 5 ||
4342 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4343 return;
4344
4345 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4346 DISP_TILE_SURFACE_SWIZZLING);
4347
4348 if (IS_GEN5(dev))
4349 return;
4350
4351 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4352 if (IS_GEN6(dev))
4353 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4354 else if (IS_GEN7(dev))
4355 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4356 else if (IS_GEN8(dev))
4357 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4358 else
4359 BUG();
4360 }
4361
4362 static bool
4363 intel_enable_blt(struct drm_device *dev)
4364 {
4365 if (!HAS_BLT(dev))
4366 return false;
4367
4368 /* The blitter was dysfunctional on early prototypes */
4369 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4370 DRM_INFO("BLT not supported on this pre-production hardware;"
4371 " graphics performance will be degraded.\n");
4372 return false;
4373 }
4374
4375 return true;
4376 }
4377
4378 static int i915_gem_init_rings(struct drm_device *dev)
4379 {
4380 struct drm_i915_private *dev_priv = dev->dev_private;
4381 int ret;
4382
4383 ret = intel_init_render_ring_buffer(dev);
4384 if (ret)
4385 return ret;
4386
4387 if (HAS_BSD(dev)) {
4388 ret = intel_init_bsd_ring_buffer(dev);
4389 if (ret)
4390 goto cleanup_render_ring;
4391 }
4392
4393 if (intel_enable_blt(dev)) {
4394 ret = intel_init_blt_ring_buffer(dev);
4395 if (ret)
4396 goto cleanup_bsd_ring;
4397 }
4398
4399 if (HAS_VEBOX(dev)) {
4400 ret = intel_init_vebox_ring_buffer(dev);
4401 if (ret)
4402 goto cleanup_blt_ring;
4403 }
4404
4405 if (HAS_BSD2(dev)) {
4406 ret = intel_init_bsd2_ring_buffer(dev);
4407 if (ret)
4408 goto cleanup_vebox_ring;
4409 }
4410
4411 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4412 if (ret)
4413 goto cleanup_bsd2_ring;
4414
4415 return 0;
4416
4417 cleanup_bsd2_ring:
4418 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4419 cleanup_vebox_ring:
4420 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4421 cleanup_blt_ring:
4422 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4423 cleanup_bsd_ring:
4424 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4425 cleanup_render_ring:
4426 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4427
4428 return ret;
4429 }
4430
4431 int
4432 i915_gem_init_hw(struct drm_device *dev)
4433 {
4434 struct drm_i915_private *dev_priv = dev->dev_private;
4435 int ret, i;
4436
4437 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4438 return -EIO;
4439
4440 if (dev_priv->ellc_size)
4441 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4442
4443 if (IS_HASWELL(dev))
4444 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4445 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4446
4447 if (HAS_PCH_NOP(dev)) {
4448 if (IS_IVYBRIDGE(dev)) {
4449 u32 temp = I915_READ(GEN7_MSG_CTL);
4450 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4451 I915_WRITE(GEN7_MSG_CTL, temp);
4452 } else if (INTEL_INFO(dev)->gen >= 7) {
4453 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4454 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4455 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4456 }
4457 }
4458
4459 i915_gem_init_swizzling(dev);
4460
4461 ret = i915_gem_init_rings(dev);
4462 if (ret)
4463 return ret;
4464
4465 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4466 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4467
4468 /*
4469 * XXX: Contexts should only be initialized once. Doing a switch to the
4470 * default context switch however is something we'd like to do after
4471 * reset or thaw (the latter may not actually be necessary for HW, but
4472 * goes with our code better). Context switching requires rings (for
4473 * the do_switch), but before enabling PPGTT. So don't move this.
4474 */
4475 ret = i915_gem_context_enable(dev_priv);
4476 if (ret && ret != -EIO) {
4477 DRM_ERROR("Context enable failed %d\n", ret);
4478 i915_gem_cleanup_ringbuffer(dev);
4479 }
4480
4481 return ret;
4482 }
4483
4484 int i915_gem_init(struct drm_device *dev)
4485 {
4486 struct drm_i915_private *dev_priv = dev->dev_private;
4487 int ret;
4488
4489 mutex_lock(&dev->struct_mutex);
4490
4491 if (IS_VALLEYVIEW(dev)) {
4492 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4493 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4494 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4495 VLV_GTLC_ALLOWWAKEACK), 10))
4496 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4497 }
4498
4499 i915_gem_init_global_gtt(dev);
4500
4501 ret = i915_gem_context_init(dev);
4502 if (ret) {
4503 mutex_unlock(&dev->struct_mutex);
4504 return ret;
4505 }
4506
4507 ret = i915_gem_init_hw(dev);
4508 if (ret == -EIO) {
4509 /* Allow ring initialisation to fail by marking the GPU as
4510 * wedged. But we only want to do this where the GPU is angry,
4511 * for all other failure, such as an allocation failure, bail.
4512 */
4513 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4514 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4515 ret = 0;
4516 }
4517 mutex_unlock(&dev->struct_mutex);
4518
4519 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4520 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4521 dev_priv->dri1.allow_batchbuffer = 1;
4522 return ret;
4523 }
4524
4525 void
4526 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4527 {
4528 struct drm_i915_private *dev_priv = dev->dev_private;
4529 struct intel_ring_buffer *ring;
4530 int i;
4531
4532 for_each_ring(ring, dev_priv, i)
4533 intel_cleanup_ring_buffer(ring);
4534 }
4535
4536 int
4537 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4538 struct drm_file *file_priv)
4539 {
4540 struct drm_i915_private *dev_priv = dev->dev_private;
4541 int ret;
4542
4543 if (drm_core_check_feature(dev, DRIVER_MODESET))
4544 return 0;
4545
4546 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4547 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4548 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4549 }
4550
4551 mutex_lock(&dev->struct_mutex);
4552 dev_priv->ums.mm_suspended = 0;
4553
4554 ret = i915_gem_init_hw(dev);
4555 if (ret != 0) {
4556 mutex_unlock(&dev->struct_mutex);
4557 return ret;
4558 }
4559
4560 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4561
4562 ret = drm_irq_install(dev, dev->pdev->irq);
4563 if (ret)
4564 goto cleanup_ringbuffer;
4565 mutex_unlock(&dev->struct_mutex);
4566
4567 return 0;
4568
4569 cleanup_ringbuffer:
4570 i915_gem_cleanup_ringbuffer(dev);
4571 dev_priv->ums.mm_suspended = 1;
4572 mutex_unlock(&dev->struct_mutex);
4573
4574 return ret;
4575 }
4576
4577 int
4578 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4579 struct drm_file *file_priv)
4580 {
4581 if (drm_core_check_feature(dev, DRIVER_MODESET))
4582 return 0;
4583
4584 mutex_lock(&dev->struct_mutex);
4585 drm_irq_uninstall(dev);
4586 mutex_unlock(&dev->struct_mutex);
4587
4588 return i915_gem_suspend(dev);
4589 }
4590
4591 void
4592 i915_gem_lastclose(struct drm_device *dev)
4593 {
4594 int ret;
4595
4596 if (drm_core_check_feature(dev, DRIVER_MODESET))
4597 return;
4598
4599 ret = i915_gem_suspend(dev);
4600 if (ret)
4601 DRM_ERROR("failed to idle hardware: %d\n", ret);
4602 }
4603
4604 static void
4605 init_ring_lists(struct intel_ring_buffer *ring)
4606 {
4607 INIT_LIST_HEAD(&ring->active_list);
4608 INIT_LIST_HEAD(&ring->request_list);
4609 }
4610
4611 void i915_init_vm(struct drm_i915_private *dev_priv,
4612 struct i915_address_space *vm)
4613 {
4614 if (!i915_is_ggtt(vm))
4615 drm_mm_init(&vm->mm, vm->start, vm->total);
4616 vm->dev = dev_priv->dev;
4617 INIT_LIST_HEAD(&vm->active_list);
4618 INIT_LIST_HEAD(&vm->inactive_list);
4619 INIT_LIST_HEAD(&vm->global_link);
4620 list_add_tail(&vm->global_link, &dev_priv->vm_list);
4621 }
4622
4623 void
4624 i915_gem_load(struct drm_device *dev)
4625 {
4626 struct drm_i915_private *dev_priv = dev->dev_private;
4627 int i;
4628
4629 dev_priv->slab =
4630 kmem_cache_create("i915_gem_object",
4631 sizeof(struct drm_i915_gem_object), 0,
4632 SLAB_HWCACHE_ALIGN,
4633 NULL);
4634
4635 INIT_LIST_HEAD(&dev_priv->vm_list);
4636 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4637
4638 INIT_LIST_HEAD(&dev_priv->context_list);
4639 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4640 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4641 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4642 for (i = 0; i < I915_NUM_RINGS; i++)
4643 init_ring_lists(&dev_priv->ring[i]);
4644 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4645 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4646 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4647 i915_gem_retire_work_handler);
4648 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4649 i915_gem_idle_work_handler);
4650 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4651
4652 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4653 if (IS_GEN3(dev)) {
4654 I915_WRITE(MI_ARB_STATE,
4655 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4656 }
4657
4658 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4659
4660 /* Old X drivers will take 0-2 for front, back, depth buffers */
4661 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4662 dev_priv->fence_reg_start = 3;
4663
4664 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4665 dev_priv->num_fence_regs = 32;
4666 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4667 dev_priv->num_fence_regs = 16;
4668 else
4669 dev_priv->num_fence_regs = 8;
4670
4671 /* Initialize fence registers to zero */
4672 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4673 i915_gem_restore_fences(dev);
4674
4675 i915_gem_detect_bit_6_swizzle(dev);
4676 init_waitqueue_head(&dev_priv->pending_flip_queue);
4677
4678 dev_priv->mm.interruptible = true;
4679
4680 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4681 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
4682 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4683 register_shrinker(&dev_priv->mm.inactive_shrinker);
4684 }
4685
4686 /*
4687 * Create a physically contiguous memory object for this object
4688 * e.g. for cursor + overlay regs
4689 */
4690 static int i915_gem_init_phys_object(struct drm_device *dev,
4691 int id, int size, int align)
4692 {
4693 struct drm_i915_private *dev_priv = dev->dev_private;
4694 struct drm_i915_gem_phys_object *phys_obj;
4695 int ret;
4696
4697 if (dev_priv->mm.phys_objs[id - 1] || !size)
4698 return 0;
4699
4700 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4701 if (!phys_obj)
4702 return -ENOMEM;
4703
4704 phys_obj->id = id;
4705
4706 phys_obj->handle = drm_pci_alloc(dev, size, align);
4707 if (!phys_obj->handle) {
4708 ret = -ENOMEM;
4709 goto kfree_obj;
4710 }
4711 #ifdef CONFIG_X86
4712 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4713 #endif
4714
4715 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4716
4717 return 0;
4718 kfree_obj:
4719 kfree(phys_obj);
4720 return ret;
4721 }
4722
4723 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4724 {
4725 struct drm_i915_private *dev_priv = dev->dev_private;
4726 struct drm_i915_gem_phys_object *phys_obj;
4727
4728 if (!dev_priv->mm.phys_objs[id - 1])
4729 return;
4730
4731 phys_obj = dev_priv->mm.phys_objs[id - 1];
4732 if (phys_obj->cur_obj) {
4733 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4734 }
4735
4736 #ifdef CONFIG_X86
4737 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4738 #endif
4739 drm_pci_free(dev, phys_obj->handle);
4740 kfree(phys_obj);
4741 dev_priv->mm.phys_objs[id - 1] = NULL;
4742 }
4743
4744 void i915_gem_free_all_phys_object(struct drm_device *dev)
4745 {
4746 int i;
4747
4748 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4749 i915_gem_free_phys_object(dev, i);
4750 }
4751
4752 void i915_gem_detach_phys_object(struct drm_device *dev,
4753 struct drm_i915_gem_object *obj)
4754 {
4755 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4756 char *vaddr;
4757 int i;
4758 int page_count;
4759
4760 if (!obj->phys_obj)
4761 return;
4762 vaddr = obj->phys_obj->handle->vaddr;
4763
4764 page_count = obj->base.size / PAGE_SIZE;
4765 for (i = 0; i < page_count; i++) {
4766 struct page *page = shmem_read_mapping_page(mapping, i);
4767 if (!IS_ERR(page)) {
4768 char *dst = kmap_atomic(page);
4769 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4770 kunmap_atomic(dst);
4771
4772 drm_clflush_pages(&page, 1);
4773
4774 set_page_dirty(page);
4775 mark_page_accessed(page);
4776 page_cache_release(page);
4777 }
4778 }
4779 i915_gem_chipset_flush(dev);
4780
4781 obj->phys_obj->cur_obj = NULL;
4782 obj->phys_obj = NULL;
4783 }
4784
4785 int
4786 i915_gem_attach_phys_object(struct drm_device *dev,
4787 struct drm_i915_gem_object *obj,
4788 int id,
4789 int align)
4790 {
4791 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4792 struct drm_i915_private *dev_priv = dev->dev_private;
4793 int ret = 0;
4794 int page_count;
4795 int i;
4796
4797 if (id > I915_MAX_PHYS_OBJECT)
4798 return -EINVAL;
4799
4800 if (obj->phys_obj) {
4801 if (obj->phys_obj->id == id)
4802 return 0;
4803 i915_gem_detach_phys_object(dev, obj);
4804 }
4805
4806 /* create a new object */
4807 if (!dev_priv->mm.phys_objs[id - 1]) {
4808 ret = i915_gem_init_phys_object(dev, id,
4809 obj->base.size, align);
4810 if (ret) {
4811 DRM_ERROR("failed to init phys object %d size: %zu\n",
4812 id, obj->base.size);
4813 return ret;
4814 }
4815 }
4816
4817 /* bind to the object */
4818 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4819 obj->phys_obj->cur_obj = obj;
4820
4821 page_count = obj->base.size / PAGE_SIZE;
4822
4823 for (i = 0; i < page_count; i++) {
4824 struct page *page;
4825 char *dst, *src;
4826
4827 page = shmem_read_mapping_page(mapping, i);
4828 if (IS_ERR(page))
4829 return PTR_ERR(page);
4830
4831 src = kmap_atomic(page);
4832 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4833 memcpy(dst, src, PAGE_SIZE);
4834 kunmap_atomic(src);
4835
4836 mark_page_accessed(page);
4837 page_cache_release(page);
4838 }
4839
4840 return 0;
4841 }
4842
4843 static int
4844 i915_gem_phys_pwrite(struct drm_device *dev,
4845 struct drm_i915_gem_object *obj,
4846 struct drm_i915_gem_pwrite *args,
4847 struct drm_file *file_priv)
4848 {
4849 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4850 char __user *user_data = to_user_ptr(args->data_ptr);
4851
4852 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4853 unsigned long unwritten;
4854
4855 /* The physical object once assigned is fixed for the lifetime
4856 * of the obj, so we can safely drop the lock and continue
4857 * to access vaddr.
4858 */
4859 mutex_unlock(&dev->struct_mutex);
4860 unwritten = copy_from_user(vaddr, user_data, args->size);
4861 mutex_lock(&dev->struct_mutex);
4862 if (unwritten)
4863 return -EFAULT;
4864 }
4865
4866 i915_gem_chipset_flush(dev);
4867 return 0;
4868 }
4869
4870 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4871 {
4872 struct drm_i915_file_private *file_priv = file->driver_priv;
4873
4874 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4875
4876 /* Clean up our request list when the client is going away, so that
4877 * later retire_requests won't dereference our soon-to-be-gone
4878 * file_priv.
4879 */
4880 spin_lock(&file_priv->mm.lock);
4881 while (!list_empty(&file_priv->mm.request_list)) {
4882 struct drm_i915_gem_request *request;
4883
4884 request = list_first_entry(&file_priv->mm.request_list,
4885 struct drm_i915_gem_request,
4886 client_list);
4887 list_del(&request->client_list);
4888 request->file_priv = NULL;
4889 }
4890 spin_unlock(&file_priv->mm.lock);
4891 }
4892
4893 static void
4894 i915_gem_file_idle_work_handler(struct work_struct *work)
4895 {
4896 struct drm_i915_file_private *file_priv =
4897 container_of(work, typeof(*file_priv), mm.idle_work.work);
4898
4899 atomic_set(&file_priv->rps_wait_boost, false);
4900 }
4901
4902 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4903 {
4904 struct drm_i915_file_private *file_priv;
4905 int ret;
4906
4907 DRM_DEBUG_DRIVER("\n");
4908
4909 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4910 if (!file_priv)
4911 return -ENOMEM;
4912
4913 file->driver_priv = file_priv;
4914 file_priv->dev_priv = dev->dev_private;
4915 file_priv->file = file;
4916
4917 spin_lock_init(&file_priv->mm.lock);
4918 INIT_LIST_HEAD(&file_priv->mm.request_list);
4919 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4920 i915_gem_file_idle_work_handler);
4921
4922 ret = i915_gem_context_open(dev, file);
4923 if (ret)
4924 kfree(file_priv);
4925
4926 return ret;
4927 }
4928
4929 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4930 {
4931 if (!mutex_is_locked(mutex))
4932 return false;
4933
4934 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4935 return mutex->owner == task;
4936 #else
4937 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4938 return false;
4939 #endif
4940 }
4941
4942 static unsigned long
4943 i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
4944 {
4945 struct drm_i915_private *dev_priv =
4946 container_of(shrinker,
4947 struct drm_i915_private,
4948 mm.inactive_shrinker);
4949 struct drm_device *dev = dev_priv->dev;
4950 struct drm_i915_gem_object *obj;
4951 bool unlock = true;
4952 unsigned long count;
4953
4954 if (!mutex_trylock(&dev->struct_mutex)) {
4955 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4956 return 0;
4957
4958 if (dev_priv->mm.shrinker_no_lock_stealing)
4959 return 0;
4960
4961 unlock = false;
4962 }
4963
4964 count = 0;
4965 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4966 if (obj->pages_pin_count == 0)
4967 count += obj->base.size >> PAGE_SHIFT;
4968
4969 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4970 if (obj->active)
4971 continue;
4972
4973 if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
4974 count += obj->base.size >> PAGE_SHIFT;
4975 }
4976
4977 if (unlock)
4978 mutex_unlock(&dev->struct_mutex);
4979
4980 return count;
4981 }
4982
4983 /* All the new VM stuff */
4984 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4985 struct i915_address_space *vm)
4986 {
4987 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4988 struct i915_vma *vma;
4989
4990 if (!dev_priv->mm.aliasing_ppgtt ||
4991 vm == &dev_priv->mm.aliasing_ppgtt->base)
4992 vm = &dev_priv->gtt.base;
4993
4994 BUG_ON(list_empty(&o->vma_list));
4995 list_for_each_entry(vma, &o->vma_list, vma_link) {
4996 if (vma->vm == vm)
4997 return vma->node.start;
4998
4999 }
5000 return -1;
5001 }
5002
5003 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5004 struct i915_address_space *vm)
5005 {
5006 struct i915_vma *vma;
5007
5008 list_for_each_entry(vma, &o->vma_list, vma_link)
5009 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5010 return true;
5011
5012 return false;
5013 }
5014
5015 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5016 {
5017 struct i915_vma *vma;
5018
5019 list_for_each_entry(vma, &o->vma_list, vma_link)
5020 if (drm_mm_node_allocated(&vma->node))
5021 return true;
5022
5023 return false;
5024 }
5025
5026 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5027 struct i915_address_space *vm)
5028 {
5029 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5030 struct i915_vma *vma;
5031
5032 if (!dev_priv->mm.aliasing_ppgtt ||
5033 vm == &dev_priv->mm.aliasing_ppgtt->base)
5034 vm = &dev_priv->gtt.base;
5035
5036 BUG_ON(list_empty(&o->vma_list));
5037
5038 list_for_each_entry(vma, &o->vma_list, vma_link)
5039 if (vma->vm == vm)
5040 return vma->node.size;
5041
5042 return 0;
5043 }
5044
5045 static unsigned long
5046 i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
5047 {
5048 struct drm_i915_private *dev_priv =
5049 container_of(shrinker,
5050 struct drm_i915_private,
5051 mm.inactive_shrinker);
5052 struct drm_device *dev = dev_priv->dev;
5053 unsigned long freed;
5054 bool unlock = true;
5055
5056 if (!mutex_trylock(&dev->struct_mutex)) {
5057 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5058 return SHRINK_STOP;
5059
5060 if (dev_priv->mm.shrinker_no_lock_stealing)
5061 return SHRINK_STOP;
5062
5063 unlock = false;
5064 }
5065
5066 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5067 if (freed < sc->nr_to_scan)
5068 freed += __i915_gem_shrink(dev_priv,
5069 sc->nr_to_scan - freed,
5070 false);
5071 if (freed < sc->nr_to_scan)
5072 freed += i915_gem_shrink_all(dev_priv);
5073
5074 if (unlock)
5075 mutex_unlock(&dev->struct_mutex);
5076
5077 return freed;
5078 }
5079
5080 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5081 {
5082 struct i915_vma *vma;
5083
5084 if (WARN_ON(list_empty(&obj->vma_list)))
5085 return NULL;
5086
5087 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5088 if (vma->vm != obj_to_ggtt(obj))
5089 return NULL;
5090
5091 return vma;
5092 }
This page took 0.171171 seconds and 5 git commands to generate.