2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/intel-gtt.h>
39 static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object
*obj
);
40 static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
);
41 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
);
42 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
);
43 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
,
45 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
48 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
);
49 static int i915_gem_object_wait_rendering(struct drm_gem_object
*obj
);
50 static int i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
,
52 static void i915_gem_clear_fence_reg(struct drm_gem_object
*obj
);
53 static int i915_gem_phys_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
54 struct drm_i915_gem_pwrite
*args
,
55 struct drm_file
*file_priv
);
56 static void i915_gem_free_object_tail(struct drm_gem_object
*obj
);
58 static LIST_HEAD(shrink_list
);
59 static DEFINE_SPINLOCK(shrink_list_lock
);
62 i915_gem_object_is_inactive(struct drm_i915_gem_object
*obj_priv
)
64 return obj_priv
->gtt_space
&&
66 obj_priv
->pin_count
== 0;
69 int i915_gem_do_init(struct drm_device
*dev
, unsigned long start
,
72 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
75 (start
& (PAGE_SIZE
- 1)) != 0 ||
76 (end
& (PAGE_SIZE
- 1)) != 0) {
80 drm_mm_init(&dev_priv
->mm
.gtt_space
, start
,
83 dev
->gtt_total
= (uint32_t) (end
- start
);
89 i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
90 struct drm_file
*file_priv
)
92 struct drm_i915_gem_init
*args
= data
;
95 mutex_lock(&dev
->struct_mutex
);
96 ret
= i915_gem_do_init(dev
, args
->gtt_start
, args
->gtt_end
);
97 mutex_unlock(&dev
->struct_mutex
);
103 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
104 struct drm_file
*file_priv
)
106 struct drm_i915_gem_get_aperture
*args
= data
;
108 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
111 args
->aper_size
= dev
->gtt_total
;
112 args
->aper_available_size
= (args
->aper_size
-
113 atomic_read(&dev
->pin_memory
));
120 * Creates a new mm object and returns a handle to it.
123 i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
124 struct drm_file
*file_priv
)
126 struct drm_i915_gem_create
*args
= data
;
127 struct drm_gem_object
*obj
;
131 args
->size
= roundup(args
->size
, PAGE_SIZE
);
133 /* Allocate the new object */
134 obj
= i915_gem_alloc_object(dev
, args
->size
);
138 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
139 /* drop reference from allocate - handle holds it now */
140 drm_gem_object_unreference_unlocked(obj
);
145 args
->handle
= handle
;
150 fast_shmem_read(struct page
**pages
,
151 loff_t page_base
, int page_offset
,
158 vaddr
= kmap_atomic(pages
[page_base
>> PAGE_SHIFT
], KM_USER0
);
161 unwritten
= __copy_to_user_inatomic(data
, vaddr
+ page_offset
, length
);
162 kunmap_atomic(vaddr
, KM_USER0
);
170 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object
*obj
)
172 drm_i915_private_t
*dev_priv
= obj
->dev
->dev_private
;
173 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
175 return dev_priv
->mm
.bit_6_swizzle_x
== I915_BIT_6_SWIZZLE_9_10_17
&&
176 obj_priv
->tiling_mode
!= I915_TILING_NONE
;
180 slow_shmem_copy(struct page
*dst_page
,
182 struct page
*src_page
,
186 char *dst_vaddr
, *src_vaddr
;
188 dst_vaddr
= kmap(dst_page
);
189 src_vaddr
= kmap(src_page
);
191 memcpy(dst_vaddr
+ dst_offset
, src_vaddr
+ src_offset
, length
);
198 slow_shmem_bit17_copy(struct page
*gpu_page
,
200 struct page
*cpu_page
,
205 char *gpu_vaddr
, *cpu_vaddr
;
207 /* Use the unswizzled path if this page isn't affected. */
208 if ((page_to_phys(gpu_page
) & (1 << 17)) == 0) {
210 return slow_shmem_copy(cpu_page
, cpu_offset
,
211 gpu_page
, gpu_offset
, length
);
213 return slow_shmem_copy(gpu_page
, gpu_offset
,
214 cpu_page
, cpu_offset
, length
);
217 gpu_vaddr
= kmap(gpu_page
);
218 cpu_vaddr
= kmap(cpu_page
);
220 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
221 * XORing with the other bits (A9 for Y, A9 and A10 for X)
224 int cacheline_end
= ALIGN(gpu_offset
+ 1, 64);
225 int this_length
= min(cacheline_end
- gpu_offset
, length
);
226 int swizzled_gpu_offset
= gpu_offset
^ 64;
229 memcpy(cpu_vaddr
+ cpu_offset
,
230 gpu_vaddr
+ swizzled_gpu_offset
,
233 memcpy(gpu_vaddr
+ swizzled_gpu_offset
,
234 cpu_vaddr
+ cpu_offset
,
237 cpu_offset
+= this_length
;
238 gpu_offset
+= this_length
;
239 length
-= this_length
;
247 * This is the fast shmem pread path, which attempts to copy_from_user directly
248 * from the backing pages of the object to the user's address space. On a
249 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
252 i915_gem_shmem_pread_fast(struct drm_device
*dev
, struct drm_gem_object
*obj
,
253 struct drm_i915_gem_pread
*args
,
254 struct drm_file
*file_priv
)
256 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
258 loff_t offset
, page_base
;
259 char __user
*user_data
;
260 int page_offset
, page_length
;
263 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
266 mutex_lock(&dev
->struct_mutex
);
268 ret
= i915_gem_object_get_pages(obj
, 0);
272 ret
= i915_gem_object_set_cpu_read_domain_range(obj
, args
->offset
,
277 obj_priv
= to_intel_bo(obj
);
278 offset
= args
->offset
;
281 /* Operation in this page
283 * page_base = page offset within aperture
284 * page_offset = offset within page
285 * page_length = bytes to copy for this page
287 page_base
= (offset
& ~(PAGE_SIZE
-1));
288 page_offset
= offset
& (PAGE_SIZE
-1);
289 page_length
= remain
;
290 if ((page_offset
+ remain
) > PAGE_SIZE
)
291 page_length
= PAGE_SIZE
- page_offset
;
293 ret
= fast_shmem_read(obj_priv
->pages
,
294 page_base
, page_offset
,
295 user_data
, page_length
);
299 remain
-= page_length
;
300 user_data
+= page_length
;
301 offset
+= page_length
;
305 i915_gem_object_put_pages(obj
);
307 mutex_unlock(&dev
->struct_mutex
);
313 i915_gem_object_get_pages_or_evict(struct drm_gem_object
*obj
)
317 ret
= i915_gem_object_get_pages(obj
, __GFP_NORETRY
| __GFP_NOWARN
);
319 /* If we've insufficient memory to map in the pages, attempt
320 * to make some space by throwing out some old buffers.
322 if (ret
== -ENOMEM
) {
323 struct drm_device
*dev
= obj
->dev
;
325 ret
= i915_gem_evict_something(dev
, obj
->size
,
326 i915_gem_get_gtt_alignment(obj
));
330 ret
= i915_gem_object_get_pages(obj
, 0);
337 * This is the fallback shmem pread path, which allocates temporary storage
338 * in kernel space to copy_to_user into outside of the struct_mutex, so we
339 * can copy out of the object's backing pages while holding the struct mutex
340 * and not take page faults.
343 i915_gem_shmem_pread_slow(struct drm_device
*dev
, struct drm_gem_object
*obj
,
344 struct drm_i915_gem_pread
*args
,
345 struct drm_file
*file_priv
)
347 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
348 struct mm_struct
*mm
= current
->mm
;
349 struct page
**user_pages
;
351 loff_t offset
, pinned_pages
, i
;
352 loff_t first_data_page
, last_data_page
, num_pages
;
353 int shmem_page_index
, shmem_page_offset
;
354 int data_page_index
, data_page_offset
;
357 uint64_t data_ptr
= args
->data_ptr
;
358 int do_bit17_swizzling
;
362 /* Pin the user pages containing the data. We can't fault while
363 * holding the struct mutex, yet we want to hold it while
364 * dereferencing the user data.
366 first_data_page
= data_ptr
/ PAGE_SIZE
;
367 last_data_page
= (data_ptr
+ args
->size
- 1) / PAGE_SIZE
;
368 num_pages
= last_data_page
- first_data_page
+ 1;
370 user_pages
= drm_calloc_large(num_pages
, sizeof(struct page
*));
371 if (user_pages
== NULL
)
374 down_read(&mm
->mmap_sem
);
375 pinned_pages
= get_user_pages(current
, mm
, (uintptr_t)args
->data_ptr
,
376 num_pages
, 1, 0, user_pages
, NULL
);
377 up_read(&mm
->mmap_sem
);
378 if (pinned_pages
< num_pages
) {
380 goto fail_put_user_pages
;
383 do_bit17_swizzling
= i915_gem_object_needs_bit17_swizzle(obj
);
385 mutex_lock(&dev
->struct_mutex
);
387 ret
= i915_gem_object_get_pages_or_evict(obj
);
391 ret
= i915_gem_object_set_cpu_read_domain_range(obj
, args
->offset
,
396 obj_priv
= to_intel_bo(obj
);
397 offset
= args
->offset
;
400 /* Operation in this page
402 * shmem_page_index = page number within shmem file
403 * shmem_page_offset = offset within page in shmem file
404 * data_page_index = page number in get_user_pages return
405 * data_page_offset = offset with data_page_index page.
406 * page_length = bytes to copy for this page
408 shmem_page_index
= offset
/ PAGE_SIZE
;
409 shmem_page_offset
= offset
& ~PAGE_MASK
;
410 data_page_index
= data_ptr
/ PAGE_SIZE
- first_data_page
;
411 data_page_offset
= data_ptr
& ~PAGE_MASK
;
413 page_length
= remain
;
414 if ((shmem_page_offset
+ page_length
) > PAGE_SIZE
)
415 page_length
= PAGE_SIZE
- shmem_page_offset
;
416 if ((data_page_offset
+ page_length
) > PAGE_SIZE
)
417 page_length
= PAGE_SIZE
- data_page_offset
;
419 if (do_bit17_swizzling
) {
420 slow_shmem_bit17_copy(obj_priv
->pages
[shmem_page_index
],
422 user_pages
[data_page_index
],
427 slow_shmem_copy(user_pages
[data_page_index
],
429 obj_priv
->pages
[shmem_page_index
],
434 remain
-= page_length
;
435 data_ptr
+= page_length
;
436 offset
+= page_length
;
440 i915_gem_object_put_pages(obj
);
442 mutex_unlock(&dev
->struct_mutex
);
444 for (i
= 0; i
< pinned_pages
; i
++) {
445 SetPageDirty(user_pages
[i
]);
446 page_cache_release(user_pages
[i
]);
448 drm_free_large(user_pages
);
454 * Reads data from the object referenced by handle.
456 * On error, the contents of *data are undefined.
459 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
460 struct drm_file
*file_priv
)
462 struct drm_i915_gem_pread
*args
= data
;
463 struct drm_gem_object
*obj
;
464 struct drm_i915_gem_object
*obj_priv
;
467 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
470 obj_priv
= to_intel_bo(obj
);
472 /* Bounds check source. */
473 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
- args
->offset
) {
478 if (!access_ok(VERIFY_WRITE
,
479 (char __user
*)(uintptr_t)args
->data_ptr
,
485 if (i915_gem_object_needs_bit17_swizzle(obj
)) {
486 ret
= i915_gem_shmem_pread_slow(dev
, obj
, args
, file_priv
);
488 ret
= i915_gem_shmem_pread_fast(dev
, obj
, args
, file_priv
);
490 ret
= i915_gem_shmem_pread_slow(dev
, obj
, args
,
495 drm_gem_object_unreference_unlocked(obj
);
499 /* This is the fast write path which cannot handle
500 * page faults in the source data
504 fast_user_write(struct io_mapping
*mapping
,
505 loff_t page_base
, int page_offset
,
506 char __user
*user_data
,
510 unsigned long unwritten
;
512 vaddr_atomic
= io_mapping_map_atomic_wc(mapping
, page_base
, KM_USER0
);
513 unwritten
= __copy_from_user_inatomic_nocache(vaddr_atomic
+ page_offset
,
515 io_mapping_unmap_atomic(vaddr_atomic
, KM_USER0
);
521 /* Here's the write path which can sleep for
526 slow_kernel_write(struct io_mapping
*mapping
,
527 loff_t gtt_base
, int gtt_offset
,
528 struct page
*user_page
, int user_offset
,
531 char __iomem
*dst_vaddr
;
534 dst_vaddr
= io_mapping_map_wc(mapping
, gtt_base
);
535 src_vaddr
= kmap(user_page
);
537 memcpy_toio(dst_vaddr
+ gtt_offset
,
538 src_vaddr
+ user_offset
,
542 io_mapping_unmap(dst_vaddr
);
546 fast_shmem_write(struct page
**pages
,
547 loff_t page_base
, int page_offset
,
552 unsigned long unwritten
;
554 vaddr
= kmap_atomic(pages
[page_base
>> PAGE_SHIFT
], KM_USER0
);
557 unwritten
= __copy_from_user_inatomic(vaddr
+ page_offset
, data
, length
);
558 kunmap_atomic(vaddr
, KM_USER0
);
566 * This is the fast pwrite path, where we copy the data directly from the
567 * user into the GTT, uncached.
570 i915_gem_gtt_pwrite_fast(struct drm_device
*dev
, struct drm_gem_object
*obj
,
571 struct drm_i915_gem_pwrite
*args
,
572 struct drm_file
*file_priv
)
574 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
575 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
577 loff_t offset
, page_base
;
578 char __user
*user_data
;
579 int page_offset
, page_length
;
582 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
586 mutex_lock(&dev
->struct_mutex
);
587 ret
= i915_gem_object_pin(obj
, 0);
589 mutex_unlock(&dev
->struct_mutex
);
592 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
596 obj_priv
= to_intel_bo(obj
);
597 offset
= obj_priv
->gtt_offset
+ args
->offset
;
600 /* Operation in this page
602 * page_base = page offset within aperture
603 * page_offset = offset within page
604 * page_length = bytes to copy for this page
606 page_base
= (offset
& ~(PAGE_SIZE
-1));
607 page_offset
= offset
& (PAGE_SIZE
-1);
608 page_length
= remain
;
609 if ((page_offset
+ remain
) > PAGE_SIZE
)
610 page_length
= PAGE_SIZE
- page_offset
;
612 ret
= fast_user_write (dev_priv
->mm
.gtt_mapping
, page_base
,
613 page_offset
, user_data
, page_length
);
615 /* If we get a fault while copying data, then (presumably) our
616 * source page isn't available. Return the error and we'll
617 * retry in the slow path.
622 remain
-= page_length
;
623 user_data
+= page_length
;
624 offset
+= page_length
;
628 i915_gem_object_unpin(obj
);
629 mutex_unlock(&dev
->struct_mutex
);
635 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
636 * the memory and maps it using kmap_atomic for copying.
638 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
639 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
642 i915_gem_gtt_pwrite_slow(struct drm_device
*dev
, struct drm_gem_object
*obj
,
643 struct drm_i915_gem_pwrite
*args
,
644 struct drm_file
*file_priv
)
646 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
647 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
649 loff_t gtt_page_base
, offset
;
650 loff_t first_data_page
, last_data_page
, num_pages
;
651 loff_t pinned_pages
, i
;
652 struct page
**user_pages
;
653 struct mm_struct
*mm
= current
->mm
;
654 int gtt_page_offset
, data_page_offset
, data_page_index
, page_length
;
656 uint64_t data_ptr
= args
->data_ptr
;
660 /* Pin the user pages containing the data. We can't fault while
661 * holding the struct mutex, and all of the pwrite implementations
662 * want to hold it while dereferencing the user data.
664 first_data_page
= data_ptr
/ PAGE_SIZE
;
665 last_data_page
= (data_ptr
+ args
->size
- 1) / PAGE_SIZE
;
666 num_pages
= last_data_page
- first_data_page
+ 1;
668 user_pages
= drm_calloc_large(num_pages
, sizeof(struct page
*));
669 if (user_pages
== NULL
)
672 down_read(&mm
->mmap_sem
);
673 pinned_pages
= get_user_pages(current
, mm
, (uintptr_t)args
->data_ptr
,
674 num_pages
, 0, 0, user_pages
, NULL
);
675 up_read(&mm
->mmap_sem
);
676 if (pinned_pages
< num_pages
) {
678 goto out_unpin_pages
;
681 mutex_lock(&dev
->struct_mutex
);
682 ret
= i915_gem_object_pin(obj
, 0);
686 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
688 goto out_unpin_object
;
690 obj_priv
= to_intel_bo(obj
);
691 offset
= obj_priv
->gtt_offset
+ args
->offset
;
694 /* Operation in this page
696 * gtt_page_base = page offset within aperture
697 * gtt_page_offset = offset within page in aperture
698 * data_page_index = page number in get_user_pages return
699 * data_page_offset = offset with data_page_index page.
700 * page_length = bytes to copy for this page
702 gtt_page_base
= offset
& PAGE_MASK
;
703 gtt_page_offset
= offset
& ~PAGE_MASK
;
704 data_page_index
= data_ptr
/ PAGE_SIZE
- first_data_page
;
705 data_page_offset
= data_ptr
& ~PAGE_MASK
;
707 page_length
= remain
;
708 if ((gtt_page_offset
+ page_length
) > PAGE_SIZE
)
709 page_length
= PAGE_SIZE
- gtt_page_offset
;
710 if ((data_page_offset
+ page_length
) > PAGE_SIZE
)
711 page_length
= PAGE_SIZE
- data_page_offset
;
713 slow_kernel_write(dev_priv
->mm
.gtt_mapping
,
714 gtt_page_base
, gtt_page_offset
,
715 user_pages
[data_page_index
],
719 remain
-= page_length
;
720 offset
+= page_length
;
721 data_ptr
+= page_length
;
725 i915_gem_object_unpin(obj
);
727 mutex_unlock(&dev
->struct_mutex
);
729 for (i
= 0; i
< pinned_pages
; i
++)
730 page_cache_release(user_pages
[i
]);
731 drm_free_large(user_pages
);
737 * This is the fast shmem pwrite path, which attempts to directly
738 * copy_from_user into the kmapped pages backing the object.
741 i915_gem_shmem_pwrite_fast(struct drm_device
*dev
, struct drm_gem_object
*obj
,
742 struct drm_i915_gem_pwrite
*args
,
743 struct drm_file
*file_priv
)
745 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
747 loff_t offset
, page_base
;
748 char __user
*user_data
;
749 int page_offset
, page_length
;
752 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
755 mutex_lock(&dev
->struct_mutex
);
757 ret
= i915_gem_object_get_pages(obj
, 0);
761 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
765 obj_priv
= to_intel_bo(obj
);
766 offset
= args
->offset
;
770 /* Operation in this page
772 * page_base = page offset within aperture
773 * page_offset = offset within page
774 * page_length = bytes to copy for this page
776 page_base
= (offset
& ~(PAGE_SIZE
-1));
777 page_offset
= offset
& (PAGE_SIZE
-1);
778 page_length
= remain
;
779 if ((page_offset
+ remain
) > PAGE_SIZE
)
780 page_length
= PAGE_SIZE
- page_offset
;
782 ret
= fast_shmem_write(obj_priv
->pages
,
783 page_base
, page_offset
,
784 user_data
, page_length
);
788 remain
-= page_length
;
789 user_data
+= page_length
;
790 offset
+= page_length
;
794 i915_gem_object_put_pages(obj
);
796 mutex_unlock(&dev
->struct_mutex
);
802 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
803 * the memory and maps it using kmap_atomic for copying.
805 * This avoids taking mmap_sem for faulting on the user's address while the
806 * struct_mutex is held.
809 i915_gem_shmem_pwrite_slow(struct drm_device
*dev
, struct drm_gem_object
*obj
,
810 struct drm_i915_gem_pwrite
*args
,
811 struct drm_file
*file_priv
)
813 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
814 struct mm_struct
*mm
= current
->mm
;
815 struct page
**user_pages
;
817 loff_t offset
, pinned_pages
, i
;
818 loff_t first_data_page
, last_data_page
, num_pages
;
819 int shmem_page_index
, shmem_page_offset
;
820 int data_page_index
, data_page_offset
;
823 uint64_t data_ptr
= args
->data_ptr
;
824 int do_bit17_swizzling
;
828 /* Pin the user pages containing the data. We can't fault while
829 * holding the struct mutex, and all of the pwrite implementations
830 * want to hold it while dereferencing the user data.
832 first_data_page
= data_ptr
/ PAGE_SIZE
;
833 last_data_page
= (data_ptr
+ args
->size
- 1) / PAGE_SIZE
;
834 num_pages
= last_data_page
- first_data_page
+ 1;
836 user_pages
= drm_calloc_large(num_pages
, sizeof(struct page
*));
837 if (user_pages
== NULL
)
840 down_read(&mm
->mmap_sem
);
841 pinned_pages
= get_user_pages(current
, mm
, (uintptr_t)args
->data_ptr
,
842 num_pages
, 0, 0, user_pages
, NULL
);
843 up_read(&mm
->mmap_sem
);
844 if (pinned_pages
< num_pages
) {
846 goto fail_put_user_pages
;
849 do_bit17_swizzling
= i915_gem_object_needs_bit17_swizzle(obj
);
851 mutex_lock(&dev
->struct_mutex
);
853 ret
= i915_gem_object_get_pages_or_evict(obj
);
857 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
861 obj_priv
= to_intel_bo(obj
);
862 offset
= args
->offset
;
866 /* Operation in this page
868 * shmem_page_index = page number within shmem file
869 * shmem_page_offset = offset within page in shmem file
870 * data_page_index = page number in get_user_pages return
871 * data_page_offset = offset with data_page_index page.
872 * page_length = bytes to copy for this page
874 shmem_page_index
= offset
/ PAGE_SIZE
;
875 shmem_page_offset
= offset
& ~PAGE_MASK
;
876 data_page_index
= data_ptr
/ PAGE_SIZE
- first_data_page
;
877 data_page_offset
= data_ptr
& ~PAGE_MASK
;
879 page_length
= remain
;
880 if ((shmem_page_offset
+ page_length
) > PAGE_SIZE
)
881 page_length
= PAGE_SIZE
- shmem_page_offset
;
882 if ((data_page_offset
+ page_length
) > PAGE_SIZE
)
883 page_length
= PAGE_SIZE
- data_page_offset
;
885 if (do_bit17_swizzling
) {
886 slow_shmem_bit17_copy(obj_priv
->pages
[shmem_page_index
],
888 user_pages
[data_page_index
],
893 slow_shmem_copy(obj_priv
->pages
[shmem_page_index
],
895 user_pages
[data_page_index
],
900 remain
-= page_length
;
901 data_ptr
+= page_length
;
902 offset
+= page_length
;
906 i915_gem_object_put_pages(obj
);
908 mutex_unlock(&dev
->struct_mutex
);
910 for (i
= 0; i
< pinned_pages
; i
++)
911 page_cache_release(user_pages
[i
]);
912 drm_free_large(user_pages
);
918 * Writes data to the object referenced by handle.
920 * On error, the contents of the buffer that were to be modified are undefined.
923 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
924 struct drm_file
*file_priv
)
926 struct drm_i915_gem_pwrite
*args
= data
;
927 struct drm_gem_object
*obj
;
928 struct drm_i915_gem_object
*obj_priv
;
931 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
934 obj_priv
= to_intel_bo(obj
);
936 /* Bounds check destination. */
937 if (args
->offset
> obj
->size
|| args
->size
> obj
->size
- args
->offset
) {
942 if (!access_ok(VERIFY_READ
,
943 (char __user
*)(uintptr_t)args
->data_ptr
,
949 /* We can only do the GTT pwrite on untiled buffers, as otherwise
950 * it would end up going through the fenced access, and we'll get
951 * different detiling behavior between reading and writing.
952 * pread/pwrite currently are reading and writing from the CPU
953 * perspective, requiring manual detiling by the client.
955 if (obj_priv
->phys_obj
)
956 ret
= i915_gem_phys_pwrite(dev
, obj
, args
, file_priv
);
957 else if (obj_priv
->tiling_mode
== I915_TILING_NONE
&&
958 dev
->gtt_total
!= 0 &&
959 obj
->write_domain
!= I915_GEM_DOMAIN_CPU
) {
960 ret
= i915_gem_gtt_pwrite_fast(dev
, obj
, args
, file_priv
);
961 if (ret
== -EFAULT
) {
962 ret
= i915_gem_gtt_pwrite_slow(dev
, obj
, args
,
965 } else if (i915_gem_object_needs_bit17_swizzle(obj
)) {
966 ret
= i915_gem_shmem_pwrite_slow(dev
, obj
, args
, file_priv
);
968 ret
= i915_gem_shmem_pwrite_fast(dev
, obj
, args
, file_priv
);
969 if (ret
== -EFAULT
) {
970 ret
= i915_gem_shmem_pwrite_slow(dev
, obj
, args
,
977 DRM_INFO("pwrite failed %d\n", ret
);
981 drm_gem_object_unreference_unlocked(obj
);
986 * Called when user space prepares to use an object with the CPU, either
987 * through the mmap ioctl's mapping or a GTT mapping.
990 i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
991 struct drm_file
*file_priv
)
993 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
994 struct drm_i915_gem_set_domain
*args
= data
;
995 struct drm_gem_object
*obj
;
996 struct drm_i915_gem_object
*obj_priv
;
997 uint32_t read_domains
= args
->read_domains
;
998 uint32_t write_domain
= args
->write_domain
;
1001 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
1004 /* Only handle setting domains to types used by the CPU. */
1005 if (write_domain
& I915_GEM_GPU_DOMAINS
)
1008 if (read_domains
& I915_GEM_GPU_DOMAINS
)
1011 /* Having something in the write domain implies it's in the read
1012 * domain, and only that read domain. Enforce that in the request.
1014 if (write_domain
!= 0 && read_domains
!= write_domain
)
1017 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
1020 obj_priv
= to_intel_bo(obj
);
1022 mutex_lock(&dev
->struct_mutex
);
1024 intel_mark_busy(dev
, obj
);
1027 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
1028 obj
, obj
->size
, read_domains
, write_domain
);
1030 if (read_domains
& I915_GEM_DOMAIN_GTT
) {
1031 ret
= i915_gem_object_set_to_gtt_domain(obj
, write_domain
!= 0);
1033 /* Update the LRU on the fence for the CPU access that's
1036 if (obj_priv
->fence_reg
!= I915_FENCE_REG_NONE
) {
1037 struct drm_i915_fence_reg
*reg
=
1038 &dev_priv
->fence_regs
[obj_priv
->fence_reg
];
1039 list_move_tail(®
->lru_list
,
1040 &dev_priv
->mm
.fence_list
);
1043 /* Silently promote "you're not bound, there was nothing to do"
1044 * to success, since the client was just asking us to
1045 * make sure everything was done.
1050 ret
= i915_gem_object_set_to_cpu_domain(obj
, write_domain
!= 0);
1054 /* Maintain LRU order of "inactive" objects */
1055 if (ret
== 0 && i915_gem_object_is_inactive(obj_priv
))
1056 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
1058 drm_gem_object_unreference(obj
);
1059 mutex_unlock(&dev
->struct_mutex
);
1064 * Called when user space has done writes to this buffer
1067 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
1068 struct drm_file
*file_priv
)
1070 struct drm_i915_gem_sw_finish
*args
= data
;
1071 struct drm_gem_object
*obj
;
1072 struct drm_i915_gem_object
*obj_priv
;
1075 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
1078 mutex_lock(&dev
->struct_mutex
);
1079 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
1081 mutex_unlock(&dev
->struct_mutex
);
1086 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1087 __func__
, args
->handle
, obj
, obj
->size
);
1089 obj_priv
= to_intel_bo(obj
);
1091 /* Pinned buffers may be scanout, so flush the cache */
1092 if (obj_priv
->pin_count
)
1093 i915_gem_object_flush_cpu_write_domain(obj
);
1095 drm_gem_object_unreference(obj
);
1096 mutex_unlock(&dev
->struct_mutex
);
1101 * Maps the contents of an object, returning the address it is mapped
1104 * While the mapping holds a reference on the contents of the object, it doesn't
1105 * imply a ref on the object itself.
1108 i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
1109 struct drm_file
*file_priv
)
1111 struct drm_i915_gem_mmap
*args
= data
;
1112 struct drm_gem_object
*obj
;
1116 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
1119 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
1123 offset
= args
->offset
;
1125 down_write(¤t
->mm
->mmap_sem
);
1126 addr
= do_mmap(obj
->filp
, 0, args
->size
,
1127 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
1129 up_write(¤t
->mm
->mmap_sem
);
1130 drm_gem_object_unreference_unlocked(obj
);
1131 if (IS_ERR((void *)addr
))
1134 args
->addr_ptr
= (uint64_t) addr
;
1140 * i915_gem_fault - fault a page into the GTT
1141 * vma: VMA in question
1144 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1145 * from userspace. The fault handler takes care of binding the object to
1146 * the GTT (if needed), allocating and programming a fence register (again,
1147 * only if needed based on whether the old reg is still valid or the object
1148 * is tiled) and inserting a new PTE into the faulting process.
1150 * Note that the faulting process may involve evicting existing objects
1151 * from the GTT and/or fence registers to make room. So performance may
1152 * suffer if the GTT working set is large or there are few fence registers
1155 int i915_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1157 struct drm_gem_object
*obj
= vma
->vm_private_data
;
1158 struct drm_device
*dev
= obj
->dev
;
1159 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1160 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
1161 pgoff_t page_offset
;
1164 bool write
= !!(vmf
->flags
& FAULT_FLAG_WRITE
);
1166 /* We don't use vmf->pgoff since that has the fake offset */
1167 page_offset
= ((unsigned long)vmf
->virtual_address
- vma
->vm_start
) >>
1170 /* Now bind it into the GTT if needed */
1171 mutex_lock(&dev
->struct_mutex
);
1172 if (!obj_priv
->gtt_space
) {
1173 ret
= i915_gem_object_bind_to_gtt(obj
, 0);
1177 ret
= i915_gem_object_set_to_gtt_domain(obj
, write
);
1182 /* Need a new fence register? */
1183 if (obj_priv
->tiling_mode
!= I915_TILING_NONE
) {
1184 ret
= i915_gem_object_get_fence_reg(obj
);
1189 if (i915_gem_object_is_inactive(obj_priv
))
1190 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
1192 pfn
= ((dev
->agp
->base
+ obj_priv
->gtt_offset
) >> PAGE_SHIFT
) +
1195 /* Finally, remap it using the new GTT offset */
1196 ret
= vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
, pfn
);
1198 mutex_unlock(&dev
->struct_mutex
);
1203 return VM_FAULT_NOPAGE
;
1206 return VM_FAULT_OOM
;
1208 return VM_FAULT_SIGBUS
;
1213 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1214 * @obj: obj in question
1216 * GEM memory mapping works by handing back to userspace a fake mmap offset
1217 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1218 * up the object based on the offset and sets up the various memory mapping
1221 * This routine allocates and attaches a fake offset for @obj.
1224 i915_gem_create_mmap_offset(struct drm_gem_object
*obj
)
1226 struct drm_device
*dev
= obj
->dev
;
1227 struct drm_gem_mm
*mm
= dev
->mm_private
;
1228 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
1229 struct drm_map_list
*list
;
1230 struct drm_local_map
*map
;
1233 /* Set the object up for mmap'ing */
1234 list
= &obj
->map_list
;
1235 list
->map
= kzalloc(sizeof(struct drm_map_list
), GFP_KERNEL
);
1240 map
->type
= _DRM_GEM
;
1241 map
->size
= obj
->size
;
1244 /* Get a DRM GEM mmap offset allocated... */
1245 list
->file_offset_node
= drm_mm_search_free(&mm
->offset_manager
,
1246 obj
->size
/ PAGE_SIZE
, 0, 0);
1247 if (!list
->file_offset_node
) {
1248 DRM_ERROR("failed to allocate offset for bo %d\n", obj
->name
);
1253 list
->file_offset_node
= drm_mm_get_block(list
->file_offset_node
,
1254 obj
->size
/ PAGE_SIZE
, 0);
1255 if (!list
->file_offset_node
) {
1260 list
->hash
.key
= list
->file_offset_node
->start
;
1261 if (drm_ht_insert_item(&mm
->offset_hash
, &list
->hash
)) {
1262 DRM_ERROR("failed to add to map hash\n");
1267 /* By now we should be all set, any drm_mmap request on the offset
1268 * below will get to our mmap & fault handler */
1269 obj_priv
->mmap_offset
= ((uint64_t) list
->hash
.key
) << PAGE_SHIFT
;
1274 drm_mm_put_block(list
->file_offset_node
);
1282 * i915_gem_release_mmap - remove physical page mappings
1283 * @obj: obj in question
1285 * Preserve the reservation of the mmapping with the DRM core code, but
1286 * relinquish ownership of the pages back to the system.
1288 * It is vital that we remove the page mapping if we have mapped a tiled
1289 * object through the GTT and then lose the fence register due to
1290 * resource pressure. Similarly if the object has been moved out of the
1291 * aperture, than pages mapped into userspace must be revoked. Removing the
1292 * mapping will then trigger a page fault on the next user access, allowing
1293 * fixup by i915_gem_fault().
1296 i915_gem_release_mmap(struct drm_gem_object
*obj
)
1298 struct drm_device
*dev
= obj
->dev
;
1299 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
1301 if (dev
->dev_mapping
)
1302 unmap_mapping_range(dev
->dev_mapping
,
1303 obj_priv
->mmap_offset
, obj
->size
, 1);
1307 i915_gem_free_mmap_offset(struct drm_gem_object
*obj
)
1309 struct drm_device
*dev
= obj
->dev
;
1310 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
1311 struct drm_gem_mm
*mm
= dev
->mm_private
;
1312 struct drm_map_list
*list
;
1314 list
= &obj
->map_list
;
1315 drm_ht_remove_item(&mm
->offset_hash
, &list
->hash
);
1317 if (list
->file_offset_node
) {
1318 drm_mm_put_block(list
->file_offset_node
);
1319 list
->file_offset_node
= NULL
;
1327 obj_priv
->mmap_offset
= 0;
1331 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1332 * @obj: object to check
1334 * Return the required GTT alignment for an object, taking into account
1335 * potential fence register mapping if needed.
1338 i915_gem_get_gtt_alignment(struct drm_gem_object
*obj
)
1340 struct drm_device
*dev
= obj
->dev
;
1341 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
1345 * Minimum alignment is 4k (GTT page size), but might be greater
1346 * if a fence register is needed for the object.
1348 if (IS_I965G(dev
) || obj_priv
->tiling_mode
== I915_TILING_NONE
)
1352 * Previous chips need to be aligned to the size of the smallest
1353 * fence register that can contain the object.
1360 for (i
= start
; i
< obj
->size
; i
<<= 1)
1367 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1369 * @data: GTT mapping ioctl data
1370 * @file_priv: GEM object info
1372 * Simply returns the fake offset to userspace so it can mmap it.
1373 * The mmap call will end up in drm_gem_mmap(), which will set things
1374 * up so we can get faults in the handler above.
1376 * The fault handler will take care of binding the object into the GTT
1377 * (since it may have been evicted to make room for something), allocating
1378 * a fence register, and mapping the appropriate aperture address into
1382 i915_gem_mmap_gtt_ioctl(struct drm_device
*dev
, void *data
,
1383 struct drm_file
*file_priv
)
1385 struct drm_i915_gem_mmap_gtt
*args
= data
;
1386 struct drm_gem_object
*obj
;
1387 struct drm_i915_gem_object
*obj_priv
;
1390 if (!(dev
->driver
->driver_features
& DRIVER_GEM
))
1393 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
1397 mutex_lock(&dev
->struct_mutex
);
1399 obj_priv
= to_intel_bo(obj
);
1401 if (obj_priv
->madv
!= I915_MADV_WILLNEED
) {
1402 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1403 drm_gem_object_unreference(obj
);
1404 mutex_unlock(&dev
->struct_mutex
);
1409 if (!obj_priv
->mmap_offset
) {
1410 ret
= i915_gem_create_mmap_offset(obj
);
1412 drm_gem_object_unreference(obj
);
1413 mutex_unlock(&dev
->struct_mutex
);
1418 args
->offset
= obj_priv
->mmap_offset
;
1421 * Pull it into the GTT so that we have a page list (makes the
1422 * initial fault faster and any subsequent flushing possible).
1424 if (!obj_priv
->agp_mem
) {
1425 ret
= i915_gem_object_bind_to_gtt(obj
, 0);
1427 drm_gem_object_unreference(obj
);
1428 mutex_unlock(&dev
->struct_mutex
);
1433 drm_gem_object_unreference(obj
);
1434 mutex_unlock(&dev
->struct_mutex
);
1440 i915_gem_object_put_pages(struct drm_gem_object
*obj
)
1442 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
1443 int page_count
= obj
->size
/ PAGE_SIZE
;
1446 BUG_ON(obj_priv
->pages_refcount
== 0);
1447 BUG_ON(obj_priv
->madv
== __I915_MADV_PURGED
);
1449 if (--obj_priv
->pages_refcount
!= 0)
1452 if (obj_priv
->tiling_mode
!= I915_TILING_NONE
)
1453 i915_gem_object_save_bit_17_swizzle(obj
);
1455 if (obj_priv
->madv
== I915_MADV_DONTNEED
)
1456 obj_priv
->dirty
= 0;
1458 for (i
= 0; i
< page_count
; i
++) {
1459 if (obj_priv
->dirty
)
1460 set_page_dirty(obj_priv
->pages
[i
]);
1462 if (obj_priv
->madv
== I915_MADV_WILLNEED
)
1463 mark_page_accessed(obj_priv
->pages
[i
]);
1465 page_cache_release(obj_priv
->pages
[i
]);
1467 obj_priv
->dirty
= 0;
1469 drm_free_large(obj_priv
->pages
);
1470 obj_priv
->pages
= NULL
;
1474 i915_gem_object_move_to_active(struct drm_gem_object
*obj
, uint32_t seqno
,
1475 struct intel_ring_buffer
*ring
)
1477 struct drm_device
*dev
= obj
->dev
;
1478 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1479 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
1480 BUG_ON(ring
== NULL
);
1481 obj_priv
->ring
= ring
;
1483 /* Add a reference if we're newly entering the active list. */
1484 if (!obj_priv
->active
) {
1485 drm_gem_object_reference(obj
);
1486 obj_priv
->active
= 1;
1488 /* Move from whatever list we were on to the tail of execution. */
1489 spin_lock(&dev_priv
->mm
.active_list_lock
);
1490 list_move_tail(&obj_priv
->list
, &ring
->active_list
);
1491 spin_unlock(&dev_priv
->mm
.active_list_lock
);
1492 obj_priv
->last_rendering_seqno
= seqno
;
1496 i915_gem_object_move_to_flushing(struct drm_gem_object
*obj
)
1498 struct drm_device
*dev
= obj
->dev
;
1499 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1500 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
1502 BUG_ON(!obj_priv
->active
);
1503 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.flushing_list
);
1504 obj_priv
->last_rendering_seqno
= 0;
1507 /* Immediately discard the backing storage */
1509 i915_gem_object_truncate(struct drm_gem_object
*obj
)
1511 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
1512 struct inode
*inode
;
1514 /* Our goal here is to return as much of the memory as
1515 * is possible back to the system as we are called from OOM.
1516 * To do this we must instruct the shmfs to drop all of its
1517 * backing pages, *now*. Here we mirror the actions taken
1518 * when by shmem_delete_inode() to release the backing store.
1520 inode
= obj
->filp
->f_path
.dentry
->d_inode
;
1521 truncate_inode_pages(inode
->i_mapping
, 0);
1522 if (inode
->i_op
->truncate_range
)
1523 inode
->i_op
->truncate_range(inode
, 0, (loff_t
)-1);
1525 obj_priv
->madv
= __I915_MADV_PURGED
;
1529 i915_gem_object_is_purgeable(struct drm_i915_gem_object
*obj_priv
)
1531 return obj_priv
->madv
== I915_MADV_DONTNEED
;
1535 i915_gem_object_move_to_inactive(struct drm_gem_object
*obj
)
1537 struct drm_device
*dev
= obj
->dev
;
1538 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1539 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
1541 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1542 if (obj_priv
->pin_count
!= 0)
1543 list_del_init(&obj_priv
->list
);
1545 list_move_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
1547 BUG_ON(!list_empty(&obj_priv
->gpu_write_list
));
1549 obj_priv
->last_rendering_seqno
= 0;
1550 obj_priv
->ring
= NULL
;
1551 if (obj_priv
->active
) {
1552 obj_priv
->active
= 0;
1553 drm_gem_object_unreference(obj
);
1555 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
1559 i915_gem_process_flushing_list(struct drm_device
*dev
,
1560 uint32_t flush_domains
, uint32_t seqno
,
1561 struct intel_ring_buffer
*ring
)
1563 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1564 struct drm_i915_gem_object
*obj_priv
, *next
;
1566 list_for_each_entry_safe(obj_priv
, next
,
1567 &dev_priv
->mm
.gpu_write_list
,
1569 struct drm_gem_object
*obj
= &obj_priv
->base
;
1571 if ((obj
->write_domain
& flush_domains
) ==
1572 obj
->write_domain
&&
1573 obj_priv
->ring
->ring_flag
== ring
->ring_flag
) {
1574 uint32_t old_write_domain
= obj
->write_domain
;
1576 obj
->write_domain
= 0;
1577 list_del_init(&obj_priv
->gpu_write_list
);
1578 i915_gem_object_move_to_active(obj
, seqno
, ring
);
1580 /* update the fence lru list */
1581 if (obj_priv
->fence_reg
!= I915_FENCE_REG_NONE
) {
1582 struct drm_i915_fence_reg
*reg
=
1583 &dev_priv
->fence_regs
[obj_priv
->fence_reg
];
1584 list_move_tail(®
->lru_list
,
1585 &dev_priv
->mm
.fence_list
);
1588 trace_i915_gem_object_change_domain(obj
,
1596 i915_add_request(struct drm_device
*dev
, struct drm_file
*file_priv
,
1597 uint32_t flush_domains
, struct intel_ring_buffer
*ring
)
1599 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1600 struct drm_i915_file_private
*i915_file_priv
= NULL
;
1601 struct drm_i915_gem_request
*request
;
1605 if (file_priv
!= NULL
)
1606 i915_file_priv
= file_priv
->driver_priv
;
1608 request
= kzalloc(sizeof(*request
), GFP_KERNEL
);
1609 if (request
== NULL
)
1612 seqno
= ring
->add_request(dev
, ring
, file_priv
, flush_domains
);
1614 request
->seqno
= seqno
;
1615 request
->ring
= ring
;
1616 request
->emitted_jiffies
= jiffies
;
1617 was_empty
= list_empty(&ring
->request_list
);
1618 list_add_tail(&request
->list
, &ring
->request_list
);
1620 if (i915_file_priv
) {
1621 list_add_tail(&request
->client_list
,
1622 &i915_file_priv
->mm
.request_list
);
1624 INIT_LIST_HEAD(&request
->client_list
);
1627 /* Associate any objects on the flushing list matching the write
1628 * domain we're flushing with our flush.
1630 if (flush_domains
!= 0)
1631 i915_gem_process_flushing_list(dev
, flush_domains
, seqno
, ring
);
1633 if (!dev_priv
->mm
.suspended
) {
1634 mod_timer(&dev_priv
->hangcheck_timer
, jiffies
+ DRM_I915_HANGCHECK_PERIOD
);
1636 queue_delayed_work(dev_priv
->wq
, &dev_priv
->mm
.retire_work
, HZ
);
1642 * Command execution barrier
1644 * Ensures that all commands in the ring are finished
1645 * before signalling the CPU
1648 i915_retire_commands(struct drm_device
*dev
, struct intel_ring_buffer
*ring
)
1650 uint32_t flush_domains
= 0;
1652 /* The sampler always gets flushed on i965 (sigh) */
1654 flush_domains
|= I915_GEM_DOMAIN_SAMPLER
;
1656 ring
->flush(dev
, ring
,
1657 I915_GEM_DOMAIN_COMMAND
, flush_domains
);
1658 return flush_domains
;
1662 * Moves buffers associated only with the given active seqno from the active
1663 * to inactive list, potentially freeing them.
1666 i915_gem_retire_request(struct drm_device
*dev
,
1667 struct drm_i915_gem_request
*request
)
1669 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1671 trace_i915_gem_request_retire(dev
, request
->seqno
);
1673 /* Move any buffers on the active list that are no longer referenced
1674 * by the ringbuffer to the flushing/inactive lists as appropriate.
1676 spin_lock(&dev_priv
->mm
.active_list_lock
);
1677 while (!list_empty(&request
->ring
->active_list
)) {
1678 struct drm_gem_object
*obj
;
1679 struct drm_i915_gem_object
*obj_priv
;
1681 obj_priv
= list_first_entry(&request
->ring
->active_list
,
1682 struct drm_i915_gem_object
,
1684 obj
= &obj_priv
->base
;
1686 /* If the seqno being retired doesn't match the oldest in the
1687 * list, then the oldest in the list must still be newer than
1690 if (obj_priv
->last_rendering_seqno
!= request
->seqno
)
1694 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1695 __func__
, request
->seqno
, obj
);
1698 if (obj
->write_domain
!= 0)
1699 i915_gem_object_move_to_flushing(obj
);
1701 /* Take a reference on the object so it won't be
1702 * freed while the spinlock is held. The list
1703 * protection for this spinlock is safe when breaking
1704 * the lock like this since the next thing we do
1705 * is just get the head of the list again.
1707 drm_gem_object_reference(obj
);
1708 i915_gem_object_move_to_inactive(obj
);
1709 spin_unlock(&dev_priv
->mm
.active_list_lock
);
1710 drm_gem_object_unreference(obj
);
1711 spin_lock(&dev_priv
->mm
.active_list_lock
);
1715 spin_unlock(&dev_priv
->mm
.active_list_lock
);
1719 * Returns true if seq1 is later than seq2.
1722 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
1724 return (int32_t)(seq1
- seq2
) >= 0;
1728 i915_get_gem_seqno(struct drm_device
*dev
,
1729 struct intel_ring_buffer
*ring
)
1731 return ring
->get_gem_seqno(dev
, ring
);
1735 * This function clears the request list as sequence numbers are passed.
1738 i915_gem_retire_requests_ring(struct drm_device
*dev
,
1739 struct intel_ring_buffer
*ring
)
1741 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1744 if (!ring
->status_page
.page_addr
1745 || list_empty(&ring
->request_list
))
1748 seqno
= i915_get_gem_seqno(dev
, ring
);
1750 while (!list_empty(&ring
->request_list
)) {
1751 struct drm_i915_gem_request
*request
;
1752 uint32_t retiring_seqno
;
1754 request
= list_first_entry(&ring
->request_list
,
1755 struct drm_i915_gem_request
,
1757 retiring_seqno
= request
->seqno
;
1759 if (i915_seqno_passed(seqno
, retiring_seqno
) ||
1760 atomic_read(&dev_priv
->mm
.wedged
)) {
1761 i915_gem_retire_request(dev
, request
);
1763 list_del(&request
->list
);
1764 list_del(&request
->client_list
);
1770 if (unlikely (dev_priv
->trace_irq_seqno
&&
1771 i915_seqno_passed(dev_priv
->trace_irq_seqno
, seqno
))) {
1773 ring
->user_irq_put(dev
, ring
);
1774 dev_priv
->trace_irq_seqno
= 0;
1779 i915_gem_retire_requests(struct drm_device
*dev
)
1781 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1783 if (!list_empty(&dev_priv
->mm
.deferred_free_list
)) {
1784 struct drm_i915_gem_object
*obj_priv
, *tmp
;
1786 /* We must be careful that during unbind() we do not
1787 * accidentally infinitely recurse into retire requests.
1789 * retire -> free -> unbind -> wait -> retire_ring
1791 list_for_each_entry_safe(obj_priv
, tmp
,
1792 &dev_priv
->mm
.deferred_free_list
,
1794 i915_gem_free_object_tail(&obj_priv
->base
);
1797 i915_gem_retire_requests_ring(dev
, &dev_priv
->render_ring
);
1799 i915_gem_retire_requests_ring(dev
, &dev_priv
->bsd_ring
);
1803 i915_gem_retire_work_handler(struct work_struct
*work
)
1805 drm_i915_private_t
*dev_priv
;
1806 struct drm_device
*dev
;
1808 dev_priv
= container_of(work
, drm_i915_private_t
,
1809 mm
.retire_work
.work
);
1810 dev
= dev_priv
->dev
;
1812 mutex_lock(&dev
->struct_mutex
);
1813 i915_gem_retire_requests(dev
);
1815 if (!dev_priv
->mm
.suspended
&&
1816 (!list_empty(&dev_priv
->render_ring
.request_list
) ||
1818 !list_empty(&dev_priv
->bsd_ring
.request_list
))))
1819 queue_delayed_work(dev_priv
->wq
, &dev_priv
->mm
.retire_work
, HZ
);
1820 mutex_unlock(&dev
->struct_mutex
);
1824 i915_do_wait_request(struct drm_device
*dev
, uint32_t seqno
,
1825 int interruptible
, struct intel_ring_buffer
*ring
)
1827 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1833 if (atomic_read(&dev_priv
->mm
.wedged
))
1836 if (!i915_seqno_passed(ring
->get_gem_seqno(dev
, ring
), seqno
)) {
1837 if (HAS_PCH_SPLIT(dev
))
1838 ier
= I915_READ(DEIER
) | I915_READ(GTIER
);
1840 ier
= I915_READ(IER
);
1842 DRM_ERROR("something (likely vbetool) disabled "
1843 "interrupts, re-enabling\n");
1844 i915_driver_irq_preinstall(dev
);
1845 i915_driver_irq_postinstall(dev
);
1848 trace_i915_gem_request_wait_begin(dev
, seqno
);
1850 ring
->waiting_gem_seqno
= seqno
;
1851 ring
->user_irq_get(dev
, ring
);
1853 ret
= wait_event_interruptible(ring
->irq_queue
,
1855 ring
->get_gem_seqno(dev
, ring
), seqno
)
1856 || atomic_read(&dev_priv
->mm
.wedged
));
1858 wait_event(ring
->irq_queue
,
1860 ring
->get_gem_seqno(dev
, ring
), seqno
)
1861 || atomic_read(&dev_priv
->mm
.wedged
));
1863 ring
->user_irq_put(dev
, ring
);
1864 ring
->waiting_gem_seqno
= 0;
1866 trace_i915_gem_request_wait_end(dev
, seqno
);
1868 if (atomic_read(&dev_priv
->mm
.wedged
))
1871 if (ret
&& ret
!= -ERESTARTSYS
)
1872 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1873 __func__
, ret
, seqno
, ring
->get_gem_seqno(dev
, ring
));
1875 /* Directly dispatch request retiring. While we have the work queue
1876 * to handle this, the waiter on a request often wants an associated
1877 * buffer to have made it to the inactive list, and we would need
1878 * a separate wait queue to handle that.
1881 i915_gem_retire_requests_ring(dev
, ring
);
1887 * Waits for a sequence number to be signaled, and cleans up the
1888 * request and object lists appropriately for that event.
1891 i915_wait_request(struct drm_device
*dev
, uint32_t seqno
,
1892 struct intel_ring_buffer
*ring
)
1894 return i915_do_wait_request(dev
, seqno
, 1, ring
);
1898 i915_gem_flush(struct drm_device
*dev
,
1899 uint32_t invalidate_domains
,
1900 uint32_t flush_domains
)
1902 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1903 if (flush_domains
& I915_GEM_DOMAIN_CPU
)
1904 drm_agp_chipset_flush(dev
);
1905 dev_priv
->render_ring
.flush(dev
, &dev_priv
->render_ring
,
1910 dev_priv
->bsd_ring
.flush(dev
, &dev_priv
->bsd_ring
,
1916 * Ensures that all rendering to the object has completed and the object is
1917 * safe to unbind from the GTT or access from the CPU.
1920 i915_gem_object_wait_rendering(struct drm_gem_object
*obj
)
1922 struct drm_device
*dev
= obj
->dev
;
1923 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
1926 /* This function only exists to support waiting for existing rendering,
1927 * not for emitting required flushes.
1929 BUG_ON((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) != 0);
1931 /* If there is rendering queued on the buffer being evicted, wait for
1934 if (obj_priv
->active
) {
1936 DRM_INFO("%s: object %p wait for seqno %08x\n",
1937 __func__
, obj
, obj_priv
->last_rendering_seqno
);
1939 ret
= i915_wait_request(dev
,
1940 obj_priv
->last_rendering_seqno
, obj_priv
->ring
);
1949 * Unbinds an object from the GTT aperture.
1952 i915_gem_object_unbind(struct drm_gem_object
*obj
)
1954 struct drm_device
*dev
= obj
->dev
;
1955 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1956 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
1960 DRM_INFO("%s:%d %p\n", __func__
, __LINE__
, obj
);
1961 DRM_INFO("gtt_space %p\n", obj_priv
->gtt_space
);
1963 if (obj_priv
->gtt_space
== NULL
)
1966 if (obj_priv
->pin_count
!= 0) {
1967 DRM_ERROR("Attempting to unbind pinned buffer\n");
1971 /* blow away mappings if mapped through GTT */
1972 i915_gem_release_mmap(obj
);
1974 /* Move the object to the CPU domain to ensure that
1975 * any possible CPU writes while it's not in the GTT
1976 * are flushed when we go to remap it. This will
1977 * also ensure that all pending GPU writes are finished
1980 ret
= i915_gem_object_set_to_cpu_domain(obj
, 1);
1981 if (ret
== -ERESTARTSYS
)
1983 /* Continue on if we fail due to EIO, the GPU is hung so we
1984 * should be safe and we need to cleanup or else we might
1985 * cause memory corruption through use-after-free.
1988 /* release the fence reg _after_ flushing */
1989 if (obj_priv
->fence_reg
!= I915_FENCE_REG_NONE
)
1990 i915_gem_clear_fence_reg(obj
);
1992 if (obj_priv
->agp_mem
!= NULL
) {
1993 drm_unbind_agp(obj_priv
->agp_mem
);
1994 drm_free_agp(obj_priv
->agp_mem
, obj
->size
/ PAGE_SIZE
);
1995 obj_priv
->agp_mem
= NULL
;
1998 i915_gem_object_put_pages(obj
);
1999 BUG_ON(obj_priv
->pages_refcount
);
2001 if (obj_priv
->gtt_space
) {
2002 atomic_dec(&dev
->gtt_count
);
2003 atomic_sub(obj
->size
, &dev
->gtt_memory
);
2005 drm_mm_put_block(obj_priv
->gtt_space
);
2006 obj_priv
->gtt_space
= NULL
;
2009 /* Remove ourselves from the LRU list if present. */
2010 spin_lock(&dev_priv
->mm
.active_list_lock
);
2011 if (!list_empty(&obj_priv
->list
))
2012 list_del_init(&obj_priv
->list
);
2013 spin_unlock(&dev_priv
->mm
.active_list_lock
);
2015 if (i915_gem_object_is_purgeable(obj_priv
))
2016 i915_gem_object_truncate(obj
);
2018 trace_i915_gem_object_unbind(obj
);
2024 i915_gpu_idle(struct drm_device
*dev
)
2026 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2028 uint32_t seqno1
, seqno2
;
2031 spin_lock(&dev_priv
->mm
.active_list_lock
);
2032 lists_empty
= (list_empty(&dev_priv
->mm
.flushing_list
) &&
2033 list_empty(&dev_priv
->render_ring
.active_list
) &&
2035 list_empty(&dev_priv
->bsd_ring
.active_list
)));
2036 spin_unlock(&dev_priv
->mm
.active_list_lock
);
2041 /* Flush everything onto the inactive list. */
2042 i915_gem_flush(dev
, I915_GEM_GPU_DOMAINS
, I915_GEM_GPU_DOMAINS
);
2043 seqno1
= i915_add_request(dev
, NULL
, I915_GEM_GPU_DOMAINS
,
2044 &dev_priv
->render_ring
);
2047 ret
= i915_wait_request(dev
, seqno1
, &dev_priv
->render_ring
);
2050 seqno2
= i915_add_request(dev
, NULL
, I915_GEM_GPU_DOMAINS
,
2051 &dev_priv
->bsd_ring
);
2055 ret
= i915_wait_request(dev
, seqno2
, &dev_priv
->bsd_ring
);
2065 i915_gem_object_get_pages(struct drm_gem_object
*obj
,
2068 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2070 struct address_space
*mapping
;
2071 struct inode
*inode
;
2074 BUG_ON(obj_priv
->pages_refcount
2075 == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT
);
2077 if (obj_priv
->pages_refcount
++ != 0)
2080 /* Get the list of pages out of our struct file. They'll be pinned
2081 * at this point until we release them.
2083 page_count
= obj
->size
/ PAGE_SIZE
;
2084 BUG_ON(obj_priv
->pages
!= NULL
);
2085 obj_priv
->pages
= drm_calloc_large(page_count
, sizeof(struct page
*));
2086 if (obj_priv
->pages
== NULL
) {
2087 obj_priv
->pages_refcount
--;
2091 inode
= obj
->filp
->f_path
.dentry
->d_inode
;
2092 mapping
= inode
->i_mapping
;
2093 for (i
= 0; i
< page_count
; i
++) {
2094 page
= read_cache_page_gfp(mapping
, i
,
2102 obj_priv
->pages
[i
] = page
;
2105 if (obj_priv
->tiling_mode
!= I915_TILING_NONE
)
2106 i915_gem_object_do_bit_17_swizzle(obj
);
2112 page_cache_release(obj_priv
->pages
[i
]);
2114 drm_free_large(obj_priv
->pages
);
2115 obj_priv
->pages
= NULL
;
2116 obj_priv
->pages_refcount
--;
2117 return PTR_ERR(page
);
2120 static void sandybridge_write_fence_reg(struct drm_i915_fence_reg
*reg
)
2122 struct drm_gem_object
*obj
= reg
->obj
;
2123 struct drm_device
*dev
= obj
->dev
;
2124 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2125 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2126 int regnum
= obj_priv
->fence_reg
;
2129 val
= (uint64_t)((obj_priv
->gtt_offset
+ obj
->size
- 4096) &
2131 val
|= obj_priv
->gtt_offset
& 0xfffff000;
2132 val
|= (uint64_t)((obj_priv
->stride
/ 128) - 1) <<
2133 SANDYBRIDGE_FENCE_PITCH_SHIFT
;
2135 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
2136 val
|= 1 << I965_FENCE_TILING_Y_SHIFT
;
2137 val
|= I965_FENCE_REG_VALID
;
2139 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0
+ (regnum
* 8), val
);
2142 static void i965_write_fence_reg(struct drm_i915_fence_reg
*reg
)
2144 struct drm_gem_object
*obj
= reg
->obj
;
2145 struct drm_device
*dev
= obj
->dev
;
2146 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2147 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2148 int regnum
= obj_priv
->fence_reg
;
2151 val
= (uint64_t)((obj_priv
->gtt_offset
+ obj
->size
- 4096) &
2153 val
|= obj_priv
->gtt_offset
& 0xfffff000;
2154 val
|= ((obj_priv
->stride
/ 128) - 1) << I965_FENCE_PITCH_SHIFT
;
2155 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
2156 val
|= 1 << I965_FENCE_TILING_Y_SHIFT
;
2157 val
|= I965_FENCE_REG_VALID
;
2159 I915_WRITE64(FENCE_REG_965_0
+ (regnum
* 8), val
);
2162 static void i915_write_fence_reg(struct drm_i915_fence_reg
*reg
)
2164 struct drm_gem_object
*obj
= reg
->obj
;
2165 struct drm_device
*dev
= obj
->dev
;
2166 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2167 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2168 int regnum
= obj_priv
->fence_reg
;
2170 uint32_t fence_reg
, val
;
2173 if ((obj_priv
->gtt_offset
& ~I915_FENCE_START_MASK
) ||
2174 (obj_priv
->gtt_offset
& (obj
->size
- 1))) {
2175 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2176 __func__
, obj_priv
->gtt_offset
, obj
->size
);
2180 if (obj_priv
->tiling_mode
== I915_TILING_Y
&&
2181 HAS_128_BYTE_Y_TILING(dev
))
2186 /* Note: pitch better be a power of two tile widths */
2187 pitch_val
= obj_priv
->stride
/ tile_width
;
2188 pitch_val
= ffs(pitch_val
) - 1;
2190 if (obj_priv
->tiling_mode
== I915_TILING_Y
&&
2191 HAS_128_BYTE_Y_TILING(dev
))
2192 WARN_ON(pitch_val
> I830_FENCE_MAX_PITCH_VAL
);
2194 WARN_ON(pitch_val
> I915_FENCE_MAX_PITCH_VAL
);
2196 val
= obj_priv
->gtt_offset
;
2197 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
2198 val
|= 1 << I830_FENCE_TILING_Y_SHIFT
;
2199 val
|= I915_FENCE_SIZE_BITS(obj
->size
);
2200 val
|= pitch_val
<< I830_FENCE_PITCH_SHIFT
;
2201 val
|= I830_FENCE_REG_VALID
;
2204 fence_reg
= FENCE_REG_830_0
+ (regnum
* 4);
2206 fence_reg
= FENCE_REG_945_8
+ ((regnum
- 8) * 4);
2207 I915_WRITE(fence_reg
, val
);
2210 static void i830_write_fence_reg(struct drm_i915_fence_reg
*reg
)
2212 struct drm_gem_object
*obj
= reg
->obj
;
2213 struct drm_device
*dev
= obj
->dev
;
2214 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2215 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2216 int regnum
= obj_priv
->fence_reg
;
2219 uint32_t fence_size_bits
;
2221 if ((obj_priv
->gtt_offset
& ~I830_FENCE_START_MASK
) ||
2222 (obj_priv
->gtt_offset
& (obj
->size
- 1))) {
2223 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2224 __func__
, obj_priv
->gtt_offset
);
2228 pitch_val
= obj_priv
->stride
/ 128;
2229 pitch_val
= ffs(pitch_val
) - 1;
2230 WARN_ON(pitch_val
> I830_FENCE_MAX_PITCH_VAL
);
2232 val
= obj_priv
->gtt_offset
;
2233 if (obj_priv
->tiling_mode
== I915_TILING_Y
)
2234 val
|= 1 << I830_FENCE_TILING_Y_SHIFT
;
2235 fence_size_bits
= I830_FENCE_SIZE_BITS(obj
->size
);
2236 WARN_ON(fence_size_bits
& ~0x00000f00);
2237 val
|= fence_size_bits
;
2238 val
|= pitch_val
<< I830_FENCE_PITCH_SHIFT
;
2239 val
|= I830_FENCE_REG_VALID
;
2241 I915_WRITE(FENCE_REG_830_0
+ (regnum
* 4), val
);
2244 static int i915_find_fence_reg(struct drm_device
*dev
)
2246 struct drm_i915_fence_reg
*reg
= NULL
;
2247 struct drm_i915_gem_object
*obj_priv
= NULL
;
2248 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2249 struct drm_gem_object
*obj
= NULL
;
2252 /* First try to find a free reg */
2254 for (i
= dev_priv
->fence_reg_start
; i
< dev_priv
->num_fence_regs
; i
++) {
2255 reg
= &dev_priv
->fence_regs
[i
];
2259 obj_priv
= to_intel_bo(reg
->obj
);
2260 if (!obj_priv
->pin_count
)
2267 /* None available, try to steal one or wait for a user to finish */
2268 i
= I915_FENCE_REG_NONE
;
2269 list_for_each_entry(reg
, &dev_priv
->mm
.fence_list
,
2272 obj_priv
= to_intel_bo(obj
);
2274 if (obj_priv
->pin_count
)
2278 i
= obj_priv
->fence_reg
;
2282 BUG_ON(i
== I915_FENCE_REG_NONE
);
2284 /* We only have a reference on obj from the active list. put_fence_reg
2285 * might drop that one, causing a use-after-free in it. So hold a
2286 * private reference to obj like the other callers of put_fence_reg
2287 * (set_tiling ioctl) do. */
2288 drm_gem_object_reference(obj
);
2289 ret
= i915_gem_object_put_fence_reg(obj
);
2290 drm_gem_object_unreference(obj
);
2298 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2299 * @obj: object to map through a fence reg
2301 * When mapping objects through the GTT, userspace wants to be able to write
2302 * to them without having to worry about swizzling if the object is tiled.
2304 * This function walks the fence regs looking for a free one for @obj,
2305 * stealing one if it can't find any.
2307 * It then sets up the reg based on the object's properties: address, pitch
2308 * and tiling format.
2311 i915_gem_object_get_fence_reg(struct drm_gem_object
*obj
)
2313 struct drm_device
*dev
= obj
->dev
;
2314 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2315 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2316 struct drm_i915_fence_reg
*reg
= NULL
;
2319 /* Just update our place in the LRU if our fence is getting used. */
2320 if (obj_priv
->fence_reg
!= I915_FENCE_REG_NONE
) {
2321 reg
= &dev_priv
->fence_regs
[obj_priv
->fence_reg
];
2322 list_move_tail(®
->lru_list
, &dev_priv
->mm
.fence_list
);
2326 switch (obj_priv
->tiling_mode
) {
2327 case I915_TILING_NONE
:
2328 WARN(1, "allocating a fence for non-tiled object?\n");
2331 if (!obj_priv
->stride
)
2333 WARN((obj_priv
->stride
& (512 - 1)),
2334 "object 0x%08x is X tiled but has non-512B pitch\n",
2335 obj_priv
->gtt_offset
);
2338 if (!obj_priv
->stride
)
2340 WARN((obj_priv
->stride
& (128 - 1)),
2341 "object 0x%08x is Y tiled but has non-128B pitch\n",
2342 obj_priv
->gtt_offset
);
2346 ret
= i915_find_fence_reg(dev
);
2350 obj_priv
->fence_reg
= ret
;
2351 reg
= &dev_priv
->fence_regs
[obj_priv
->fence_reg
];
2352 list_add_tail(®
->lru_list
, &dev_priv
->mm
.fence_list
);
2356 switch (INTEL_INFO(dev
)->gen
) {
2358 sandybridge_write_fence_reg(reg
);
2362 i965_write_fence_reg(reg
);
2365 i915_write_fence_reg(reg
);
2368 i830_write_fence_reg(reg
);
2372 trace_i915_gem_object_get_fence(obj
, obj_priv
->fence_reg
,
2373 obj_priv
->tiling_mode
);
2379 * i915_gem_clear_fence_reg - clear out fence register info
2380 * @obj: object to clear
2382 * Zeroes out the fence register itself and clears out the associated
2383 * data structures in dev_priv and obj_priv.
2386 i915_gem_clear_fence_reg(struct drm_gem_object
*obj
)
2388 struct drm_device
*dev
= obj
->dev
;
2389 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2390 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2391 struct drm_i915_fence_reg
*reg
=
2392 &dev_priv
->fence_regs
[obj_priv
->fence_reg
];
2395 switch (INTEL_INFO(dev
)->gen
) {
2397 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0
+
2398 (obj_priv
->fence_reg
* 8), 0);
2402 I915_WRITE64(FENCE_REG_965_0
+ (obj_priv
->fence_reg
* 8), 0);
2405 if (obj_priv
->fence_reg
>= 8)
2406 fence_reg
= FENCE_REG_945_8
+ (obj_priv
->fence_reg
- 8) * 4;
2409 fence_reg
= FENCE_REG_830_0
+ obj_priv
->fence_reg
* 4;
2411 I915_WRITE(fence_reg
, 0);
2416 obj_priv
->fence_reg
= I915_FENCE_REG_NONE
;
2417 list_del_init(®
->lru_list
);
2421 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2422 * to the buffer to finish, and then resets the fence register.
2423 * @obj: tiled object holding a fence register.
2425 * Zeroes out the fence register itself and clears out the associated
2426 * data structures in dev_priv and obj_priv.
2429 i915_gem_object_put_fence_reg(struct drm_gem_object
*obj
)
2431 struct drm_device
*dev
= obj
->dev
;
2432 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2434 if (obj_priv
->fence_reg
== I915_FENCE_REG_NONE
)
2437 /* If we've changed tiling, GTT-mappings of the object
2438 * need to re-fault to ensure that the correct fence register
2439 * setup is in place.
2441 i915_gem_release_mmap(obj
);
2443 /* On the i915, GPU access to tiled buffers is via a fence,
2444 * therefore we must wait for any outstanding access to complete
2445 * before clearing the fence.
2447 if (!IS_I965G(dev
)) {
2450 ret
= i915_gem_object_flush_gpu_write_domain(obj
);
2454 ret
= i915_gem_object_wait_rendering(obj
);
2459 i915_gem_object_flush_gtt_write_domain(obj
);
2460 i915_gem_clear_fence_reg (obj
);
2466 * Finds free space in the GTT aperture and binds the object there.
2469 i915_gem_object_bind_to_gtt(struct drm_gem_object
*obj
, unsigned alignment
)
2471 struct drm_device
*dev
= obj
->dev
;
2472 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2473 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2474 struct drm_mm_node
*free_space
;
2475 gfp_t gfpmask
= __GFP_NORETRY
| __GFP_NOWARN
;
2478 if (obj_priv
->madv
!= I915_MADV_WILLNEED
) {
2479 DRM_ERROR("Attempting to bind a purgeable object\n");
2484 alignment
= i915_gem_get_gtt_alignment(obj
);
2485 if (alignment
& (i915_gem_get_gtt_alignment(obj
) - 1)) {
2486 DRM_ERROR("Invalid object alignment requested %u\n", alignment
);
2490 /* If the object is bigger than the entire aperture, reject it early
2491 * before evicting everything in a vain attempt to find space.
2493 if (obj
->size
> dev
->gtt_total
) {
2494 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2499 free_space
= drm_mm_search_free(&dev_priv
->mm
.gtt_space
,
2500 obj
->size
, alignment
, 0);
2501 if (free_space
!= NULL
) {
2502 obj_priv
->gtt_space
= drm_mm_get_block(free_space
, obj
->size
,
2504 if (obj_priv
->gtt_space
!= NULL
)
2505 obj_priv
->gtt_offset
= obj_priv
->gtt_space
->start
;
2507 if (obj_priv
->gtt_space
== NULL
) {
2508 /* If the gtt is empty and we're still having trouble
2509 * fitting our object in, we're out of memory.
2512 DRM_INFO("%s: GTT full, evicting something\n", __func__
);
2514 ret
= i915_gem_evict_something(dev
, obj
->size
, alignment
);
2522 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2523 obj
->size
, obj_priv
->gtt_offset
);
2525 ret
= i915_gem_object_get_pages(obj
, gfpmask
);
2527 drm_mm_put_block(obj_priv
->gtt_space
);
2528 obj_priv
->gtt_space
= NULL
;
2530 if (ret
== -ENOMEM
) {
2531 /* first try to clear up some space from the GTT */
2532 ret
= i915_gem_evict_something(dev
, obj
->size
,
2535 /* now try to shrink everyone else */
2550 /* Create an AGP memory structure pointing at our pages, and bind it
2553 obj_priv
->agp_mem
= drm_agp_bind_pages(dev
,
2555 obj
->size
>> PAGE_SHIFT
,
2556 obj_priv
->gtt_offset
,
2557 obj_priv
->agp_type
);
2558 if (obj_priv
->agp_mem
== NULL
) {
2559 i915_gem_object_put_pages(obj
);
2560 drm_mm_put_block(obj_priv
->gtt_space
);
2561 obj_priv
->gtt_space
= NULL
;
2563 ret
= i915_gem_evict_something(dev
, obj
->size
, alignment
);
2569 atomic_inc(&dev
->gtt_count
);
2570 atomic_add(obj
->size
, &dev
->gtt_memory
);
2572 /* keep track of bounds object by adding it to the inactive list */
2573 list_add_tail(&obj_priv
->list
, &dev_priv
->mm
.inactive_list
);
2575 /* Assert that the object is not currently in any GPU domain. As it
2576 * wasn't in the GTT, there shouldn't be any way it could have been in
2579 BUG_ON(obj
->read_domains
& I915_GEM_GPU_DOMAINS
);
2580 BUG_ON(obj
->write_domain
& I915_GEM_GPU_DOMAINS
);
2582 trace_i915_gem_object_bind(obj
, obj_priv
->gtt_offset
);
2588 i915_gem_clflush_object(struct drm_gem_object
*obj
)
2590 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2592 /* If we don't have a page list set up, then we're not pinned
2593 * to GPU, and we can ignore the cache flush because it'll happen
2594 * again at bind time.
2596 if (obj_priv
->pages
== NULL
)
2599 trace_i915_gem_object_clflush(obj
);
2601 drm_clflush_pages(obj_priv
->pages
, obj
->size
/ PAGE_SIZE
);
2604 /** Flushes any GPU write domain for the object if it's dirty. */
2606 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object
*obj
)
2608 struct drm_device
*dev
= obj
->dev
;
2609 uint32_t old_write_domain
;
2610 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2612 if ((obj
->write_domain
& I915_GEM_GPU_DOMAINS
) == 0)
2615 /* Queue the GPU write cache flushing we need. */
2616 old_write_domain
= obj
->write_domain
;
2617 i915_gem_flush(dev
, 0, obj
->write_domain
);
2618 if (i915_add_request(dev
, NULL
, obj
->write_domain
, obj_priv
->ring
) == 0)
2621 trace_i915_gem_object_change_domain(obj
,
2627 /** Flushes the GTT write domain for the object if it's dirty. */
2629 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object
*obj
)
2631 uint32_t old_write_domain
;
2633 if (obj
->write_domain
!= I915_GEM_DOMAIN_GTT
)
2636 /* No actual flushing is required for the GTT write domain. Writes
2637 * to it immediately go to main memory as far as we know, so there's
2638 * no chipset flush. It also doesn't land in render cache.
2640 old_write_domain
= obj
->write_domain
;
2641 obj
->write_domain
= 0;
2643 trace_i915_gem_object_change_domain(obj
,
2648 /** Flushes the CPU write domain for the object if it's dirty. */
2650 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object
*obj
)
2652 struct drm_device
*dev
= obj
->dev
;
2653 uint32_t old_write_domain
;
2655 if (obj
->write_domain
!= I915_GEM_DOMAIN_CPU
)
2658 i915_gem_clflush_object(obj
);
2659 drm_agp_chipset_flush(dev
);
2660 old_write_domain
= obj
->write_domain
;
2661 obj
->write_domain
= 0;
2663 trace_i915_gem_object_change_domain(obj
,
2669 i915_gem_object_flush_write_domain(struct drm_gem_object
*obj
)
2673 switch (obj
->write_domain
) {
2674 case I915_GEM_DOMAIN_GTT
:
2675 i915_gem_object_flush_gtt_write_domain(obj
);
2677 case I915_GEM_DOMAIN_CPU
:
2678 i915_gem_object_flush_cpu_write_domain(obj
);
2681 ret
= i915_gem_object_flush_gpu_write_domain(obj
);
2689 * Moves a single object to the GTT read, and possibly write domain.
2691 * This function returns when the move is complete, including waiting on
2695 i915_gem_object_set_to_gtt_domain(struct drm_gem_object
*obj
, int write
)
2697 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2698 uint32_t old_write_domain
, old_read_domains
;
2701 /* Not valid to be called on unbound objects. */
2702 if (obj_priv
->gtt_space
== NULL
)
2705 ret
= i915_gem_object_flush_gpu_write_domain(obj
);
2709 /* Wait on any GPU rendering and flushing to occur. */
2710 ret
= i915_gem_object_wait_rendering(obj
);
2714 old_write_domain
= obj
->write_domain
;
2715 old_read_domains
= obj
->read_domains
;
2717 /* If we're writing through the GTT domain, then CPU and GPU caches
2718 * will need to be invalidated at next use.
2721 obj
->read_domains
&= I915_GEM_DOMAIN_GTT
;
2723 i915_gem_object_flush_cpu_write_domain(obj
);
2725 /* It should now be out of any other write domains, and we can update
2726 * the domain values for our changes.
2728 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_GTT
) != 0);
2729 obj
->read_domains
|= I915_GEM_DOMAIN_GTT
;
2731 obj
->write_domain
= I915_GEM_DOMAIN_GTT
;
2732 obj_priv
->dirty
= 1;
2735 trace_i915_gem_object_change_domain(obj
,
2743 * Prepare buffer for display plane. Use uninterruptible for possible flush
2744 * wait, as in modesetting process we're not supposed to be interrupted.
2747 i915_gem_object_set_to_display_plane(struct drm_gem_object
*obj
)
2749 struct drm_device
*dev
= obj
->dev
;
2750 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2751 uint32_t old_write_domain
, old_read_domains
;
2754 /* Not valid to be called on unbound objects. */
2755 if (obj_priv
->gtt_space
== NULL
)
2758 ret
= i915_gem_object_flush_gpu_write_domain(obj
);
2762 /* Wait on any GPU rendering and flushing to occur. */
2763 if (obj_priv
->active
) {
2765 DRM_INFO("%s: object %p wait for seqno %08x\n",
2766 __func__
, obj
, obj_priv
->last_rendering_seqno
);
2768 ret
= i915_do_wait_request(dev
,
2769 obj_priv
->last_rendering_seqno
,
2776 i915_gem_object_flush_cpu_write_domain(obj
);
2778 old_write_domain
= obj
->write_domain
;
2779 old_read_domains
= obj
->read_domains
;
2781 /* It should now be out of any other write domains, and we can update
2782 * the domain values for our changes.
2784 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_GTT
) != 0);
2785 obj
->read_domains
= I915_GEM_DOMAIN_GTT
;
2786 obj
->write_domain
= I915_GEM_DOMAIN_GTT
;
2787 obj_priv
->dirty
= 1;
2789 trace_i915_gem_object_change_domain(obj
,
2797 * Moves a single object to the CPU read, and possibly write domain.
2799 * This function returns when the move is complete, including waiting on
2803 i915_gem_object_set_to_cpu_domain(struct drm_gem_object
*obj
, int write
)
2805 uint32_t old_write_domain
, old_read_domains
;
2808 ret
= i915_gem_object_flush_gpu_write_domain(obj
);
2812 /* Wait on any GPU rendering and flushing to occur. */
2813 ret
= i915_gem_object_wait_rendering(obj
);
2817 i915_gem_object_flush_gtt_write_domain(obj
);
2819 /* If we have a partially-valid cache of the object in the CPU,
2820 * finish invalidating it and free the per-page flags.
2822 i915_gem_object_set_to_full_cpu_read_domain(obj
);
2824 old_write_domain
= obj
->write_domain
;
2825 old_read_domains
= obj
->read_domains
;
2827 /* Flush the CPU cache if it's still invalid. */
2828 if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0) {
2829 i915_gem_clflush_object(obj
);
2831 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
2834 /* It should now be out of any other write domains, and we can update
2835 * the domain values for our changes.
2837 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
2839 /* If we're writing through the CPU, then the GPU read domains will
2840 * need to be invalidated at next use.
2843 obj
->read_domains
&= I915_GEM_DOMAIN_CPU
;
2844 obj
->write_domain
= I915_GEM_DOMAIN_CPU
;
2847 trace_i915_gem_object_change_domain(obj
,
2855 * Set the next domain for the specified object. This
2856 * may not actually perform the necessary flushing/invaliding though,
2857 * as that may want to be batched with other set_domain operations
2859 * This is (we hope) the only really tricky part of gem. The goal
2860 * is fairly simple -- track which caches hold bits of the object
2861 * and make sure they remain coherent. A few concrete examples may
2862 * help to explain how it works. For shorthand, we use the notation
2863 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2864 * a pair of read and write domain masks.
2866 * Case 1: the batch buffer
2872 * 5. Unmapped from GTT
2875 * Let's take these a step at a time
2878 * Pages allocated from the kernel may still have
2879 * cache contents, so we set them to (CPU, CPU) always.
2880 * 2. Written by CPU (using pwrite)
2881 * The pwrite function calls set_domain (CPU, CPU) and
2882 * this function does nothing (as nothing changes)
2884 * This function asserts that the object is not
2885 * currently in any GPU-based read or write domains
2887 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2888 * As write_domain is zero, this function adds in the
2889 * current read domains (CPU+COMMAND, 0).
2890 * flush_domains is set to CPU.
2891 * invalidate_domains is set to COMMAND
2892 * clflush is run to get data out of the CPU caches
2893 * then i915_dev_set_domain calls i915_gem_flush to
2894 * emit an MI_FLUSH and drm_agp_chipset_flush
2895 * 5. Unmapped from GTT
2896 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2897 * flush_domains and invalidate_domains end up both zero
2898 * so no flushing/invalidating happens
2902 * Case 2: The shared render buffer
2906 * 3. Read/written by GPU
2907 * 4. set_domain to (CPU,CPU)
2908 * 5. Read/written by CPU
2909 * 6. Read/written by GPU
2912 * Same as last example, (CPU, CPU)
2914 * Nothing changes (assertions find that it is not in the GPU)
2915 * 3. Read/written by GPU
2916 * execbuffer calls set_domain (RENDER, RENDER)
2917 * flush_domains gets CPU
2918 * invalidate_domains gets GPU
2920 * MI_FLUSH and drm_agp_chipset_flush
2921 * 4. set_domain (CPU, CPU)
2922 * flush_domains gets GPU
2923 * invalidate_domains gets CPU
2924 * wait_rendering (obj) to make sure all drawing is complete.
2925 * This will include an MI_FLUSH to get the data from GPU
2927 * clflush (obj) to invalidate the CPU cache
2928 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2929 * 5. Read/written by CPU
2930 * cache lines are loaded and dirtied
2931 * 6. Read written by GPU
2932 * Same as last GPU access
2934 * Case 3: The constant buffer
2939 * 4. Updated (written) by CPU again
2948 * flush_domains = CPU
2949 * invalidate_domains = RENDER
2952 * drm_agp_chipset_flush
2953 * 4. Updated (written) by CPU again
2955 * flush_domains = 0 (no previous write domain)
2956 * invalidate_domains = 0 (no new read domains)
2959 * flush_domains = CPU
2960 * invalidate_domains = RENDER
2963 * drm_agp_chipset_flush
2966 i915_gem_object_set_to_gpu_domain(struct drm_gem_object
*obj
)
2968 struct drm_device
*dev
= obj
->dev
;
2969 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2970 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
2971 uint32_t invalidate_domains
= 0;
2972 uint32_t flush_domains
= 0;
2973 uint32_t old_read_domains
;
2975 BUG_ON(obj
->pending_read_domains
& I915_GEM_DOMAIN_CPU
);
2976 BUG_ON(obj
->pending_write_domain
== I915_GEM_DOMAIN_CPU
);
2978 intel_mark_busy(dev
, obj
);
2981 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2983 obj
->read_domains
, obj
->pending_read_domains
,
2984 obj
->write_domain
, obj
->pending_write_domain
);
2987 * If the object isn't moving to a new write domain,
2988 * let the object stay in multiple read domains
2990 if (obj
->pending_write_domain
== 0)
2991 obj
->pending_read_domains
|= obj
->read_domains
;
2993 obj_priv
->dirty
= 1;
2996 * Flush the current write domain if
2997 * the new read domains don't match. Invalidate
2998 * any read domains which differ from the old
3001 if (obj
->write_domain
&&
3002 obj
->write_domain
!= obj
->pending_read_domains
) {
3003 flush_domains
|= obj
->write_domain
;
3004 invalidate_domains
|=
3005 obj
->pending_read_domains
& ~obj
->write_domain
;
3008 * Invalidate any read caches which may have
3009 * stale data. That is, any new read domains.
3011 invalidate_domains
|= obj
->pending_read_domains
& ~obj
->read_domains
;
3012 if ((flush_domains
| invalidate_domains
) & I915_GEM_DOMAIN_CPU
) {
3014 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3015 __func__
, flush_domains
, invalidate_domains
);
3017 i915_gem_clflush_object(obj
);
3020 old_read_domains
= obj
->read_domains
;
3022 /* The actual obj->write_domain will be updated with
3023 * pending_write_domain after we emit the accumulated flush for all
3024 * of our domain changes in execbuffers (which clears objects'
3025 * write_domains). So if we have a current write domain that we
3026 * aren't changing, set pending_write_domain to that.
3028 if (flush_domains
== 0 && obj
->pending_write_domain
== 0)
3029 obj
->pending_write_domain
= obj
->write_domain
;
3030 obj
->read_domains
= obj
->pending_read_domains
;
3032 if (flush_domains
& I915_GEM_GPU_DOMAINS
) {
3033 if (obj_priv
->ring
== &dev_priv
->render_ring
)
3034 dev_priv
->flush_rings
|= FLUSH_RENDER_RING
;
3035 else if (obj_priv
->ring
== &dev_priv
->bsd_ring
)
3036 dev_priv
->flush_rings
|= FLUSH_BSD_RING
;
3039 dev
->invalidate_domains
|= invalidate_domains
;
3040 dev
->flush_domains
|= flush_domains
;
3042 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3044 obj
->read_domains
, obj
->write_domain
,
3045 dev
->invalidate_domains
, dev
->flush_domains
);
3048 trace_i915_gem_object_change_domain(obj
,
3054 * Moves the object from a partially CPU read to a full one.
3056 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3057 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3060 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object
*obj
)
3062 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
3064 if (!obj_priv
->page_cpu_valid
)
3067 /* If we're partially in the CPU read domain, finish moving it in.
3069 if (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) {
3072 for (i
= 0; i
<= (obj
->size
- 1) / PAGE_SIZE
; i
++) {
3073 if (obj_priv
->page_cpu_valid
[i
])
3075 drm_clflush_pages(obj_priv
->pages
+ i
, 1);
3079 /* Free the page_cpu_valid mappings which are now stale, whether
3080 * or not we've got I915_GEM_DOMAIN_CPU.
3082 kfree(obj_priv
->page_cpu_valid
);
3083 obj_priv
->page_cpu_valid
= NULL
;
3087 * Set the CPU read domain on a range of the object.
3089 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3090 * not entirely valid. The page_cpu_valid member of the object flags which
3091 * pages have been flushed, and will be respected by
3092 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3093 * of the whole object.
3095 * This function returns when the move is complete, including waiting on
3099 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object
*obj
,
3100 uint64_t offset
, uint64_t size
)
3102 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
3103 uint32_t old_read_domains
;
3106 if (offset
== 0 && size
== obj
->size
)
3107 return i915_gem_object_set_to_cpu_domain(obj
, 0);
3109 ret
= i915_gem_object_flush_gpu_write_domain(obj
);
3113 /* Wait on any GPU rendering and flushing to occur. */
3114 ret
= i915_gem_object_wait_rendering(obj
);
3117 i915_gem_object_flush_gtt_write_domain(obj
);
3119 /* If we're already fully in the CPU read domain, we're done. */
3120 if (obj_priv
->page_cpu_valid
== NULL
&&
3121 (obj
->read_domains
& I915_GEM_DOMAIN_CPU
) != 0)
3124 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3125 * newly adding I915_GEM_DOMAIN_CPU
3127 if (obj_priv
->page_cpu_valid
== NULL
) {
3128 obj_priv
->page_cpu_valid
= kzalloc(obj
->size
/ PAGE_SIZE
,
3130 if (obj_priv
->page_cpu_valid
== NULL
)
3132 } else if ((obj
->read_domains
& I915_GEM_DOMAIN_CPU
) == 0)
3133 memset(obj_priv
->page_cpu_valid
, 0, obj
->size
/ PAGE_SIZE
);
3135 /* Flush the cache on any pages that are still invalid from the CPU's
3138 for (i
= offset
/ PAGE_SIZE
; i
<= (offset
+ size
- 1) / PAGE_SIZE
;
3140 if (obj_priv
->page_cpu_valid
[i
])
3143 drm_clflush_pages(obj_priv
->pages
+ i
, 1);
3145 obj_priv
->page_cpu_valid
[i
] = 1;
3148 /* It should now be out of any other write domains, and we can update
3149 * the domain values for our changes.
3151 BUG_ON((obj
->write_domain
& ~I915_GEM_DOMAIN_CPU
) != 0);
3153 old_read_domains
= obj
->read_domains
;
3154 obj
->read_domains
|= I915_GEM_DOMAIN_CPU
;
3156 trace_i915_gem_object_change_domain(obj
,
3164 * Pin an object to the GTT and evaluate the relocations landing in it.
3167 i915_gem_object_pin_and_relocate(struct drm_gem_object
*obj
,
3168 struct drm_file
*file_priv
,
3169 struct drm_i915_gem_exec_object2
*entry
,
3170 struct drm_i915_gem_relocation_entry
*relocs
)
3172 struct drm_device
*dev
= obj
->dev
;
3173 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3174 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
3176 void __iomem
*reloc_page
;
3179 need_fence
= entry
->flags
& EXEC_OBJECT_NEEDS_FENCE
&&
3180 obj_priv
->tiling_mode
!= I915_TILING_NONE
;
3182 /* Check fence reg constraints and rebind if necessary */
3184 !i915_gem_object_fence_offset_ok(obj
,
3185 obj_priv
->tiling_mode
)) {
3186 ret
= i915_gem_object_unbind(obj
);
3191 /* Choose the GTT offset for our buffer and put it there. */
3192 ret
= i915_gem_object_pin(obj
, (uint32_t) entry
->alignment
);
3197 * Pre-965 chips need a fence register set up in order to
3198 * properly handle blits to/from tiled surfaces.
3201 ret
= i915_gem_object_get_fence_reg(obj
);
3203 i915_gem_object_unpin(obj
);
3208 entry
->offset
= obj_priv
->gtt_offset
;
3210 /* Apply the relocations, using the GTT aperture to avoid cache
3211 * flushing requirements.
3213 for (i
= 0; i
< entry
->relocation_count
; i
++) {
3214 struct drm_i915_gem_relocation_entry
*reloc
= &relocs
[i
];
3215 struct drm_gem_object
*target_obj
;
3216 struct drm_i915_gem_object
*target_obj_priv
;
3217 uint32_t reloc_val
, reloc_offset
;
3218 uint32_t __iomem
*reloc_entry
;
3220 target_obj
= drm_gem_object_lookup(obj
->dev
, file_priv
,
3221 reloc
->target_handle
);
3222 if (target_obj
== NULL
) {
3223 i915_gem_object_unpin(obj
);
3226 target_obj_priv
= to_intel_bo(target_obj
);
3229 DRM_INFO("%s: obj %p offset %08x target %d "
3230 "read %08x write %08x gtt %08x "
3231 "presumed %08x delta %08x\n",
3234 (int) reloc
->offset
,
3235 (int) reloc
->target_handle
,
3236 (int) reloc
->read_domains
,
3237 (int) reloc
->write_domain
,
3238 (int) target_obj_priv
->gtt_offset
,
3239 (int) reloc
->presumed_offset
,
3243 /* The target buffer should have appeared before us in the
3244 * exec_object list, so it should have a GTT space bound by now.
3246 if (target_obj_priv
->gtt_space
== NULL
) {
3247 DRM_ERROR("No GTT space found for object %d\n",
3248 reloc
->target_handle
);
3249 drm_gem_object_unreference(target_obj
);
3250 i915_gem_object_unpin(obj
);
3254 /* Validate that the target is in a valid r/w GPU domain */
3255 if (reloc
->write_domain
& (reloc
->write_domain
- 1)) {
3256 DRM_ERROR("reloc with multiple write domains: "
3257 "obj %p target %d offset %d "
3258 "read %08x write %08x",
3259 obj
, reloc
->target_handle
,
3260 (int) reloc
->offset
,
3261 reloc
->read_domains
,
3262 reloc
->write_domain
);
3263 drm_gem_object_unreference(target_obj
);
3264 i915_gem_object_unpin(obj
);
3267 if (reloc
->write_domain
& I915_GEM_DOMAIN_CPU
||
3268 reloc
->read_domains
& I915_GEM_DOMAIN_CPU
) {
3269 DRM_ERROR("reloc with read/write CPU domains: "
3270 "obj %p target %d offset %d "
3271 "read %08x write %08x",
3272 obj
, reloc
->target_handle
,
3273 (int) reloc
->offset
,
3274 reloc
->read_domains
,
3275 reloc
->write_domain
);
3276 drm_gem_object_unreference(target_obj
);
3277 i915_gem_object_unpin(obj
);
3280 if (reloc
->write_domain
&& target_obj
->pending_write_domain
&&
3281 reloc
->write_domain
!= target_obj
->pending_write_domain
) {
3282 DRM_ERROR("Write domain conflict: "
3283 "obj %p target %d offset %d "
3284 "new %08x old %08x\n",
3285 obj
, reloc
->target_handle
,
3286 (int) reloc
->offset
,
3287 reloc
->write_domain
,
3288 target_obj
->pending_write_domain
);
3289 drm_gem_object_unreference(target_obj
);
3290 i915_gem_object_unpin(obj
);
3294 target_obj
->pending_read_domains
|= reloc
->read_domains
;
3295 target_obj
->pending_write_domain
|= reloc
->write_domain
;
3297 /* If the relocation already has the right value in it, no
3298 * more work needs to be done.
3300 if (target_obj_priv
->gtt_offset
== reloc
->presumed_offset
) {
3301 drm_gem_object_unreference(target_obj
);
3305 /* Check that the relocation address is valid... */
3306 if (reloc
->offset
> obj
->size
- 4) {
3307 DRM_ERROR("Relocation beyond object bounds: "
3308 "obj %p target %d offset %d size %d.\n",
3309 obj
, reloc
->target_handle
,
3310 (int) reloc
->offset
, (int) obj
->size
);
3311 drm_gem_object_unreference(target_obj
);
3312 i915_gem_object_unpin(obj
);
3315 if (reloc
->offset
& 3) {
3316 DRM_ERROR("Relocation not 4-byte aligned: "
3317 "obj %p target %d offset %d.\n",
3318 obj
, reloc
->target_handle
,
3319 (int) reloc
->offset
);
3320 drm_gem_object_unreference(target_obj
);
3321 i915_gem_object_unpin(obj
);
3325 /* and points to somewhere within the target object. */
3326 if (reloc
->delta
>= target_obj
->size
) {
3327 DRM_ERROR("Relocation beyond target object bounds: "
3328 "obj %p target %d delta %d size %d.\n",
3329 obj
, reloc
->target_handle
,
3330 (int) reloc
->delta
, (int) target_obj
->size
);
3331 drm_gem_object_unreference(target_obj
);
3332 i915_gem_object_unpin(obj
);
3336 ret
= i915_gem_object_set_to_gtt_domain(obj
, 1);
3338 drm_gem_object_unreference(target_obj
);
3339 i915_gem_object_unpin(obj
);
3343 /* Map the page containing the relocation we're going to
3346 reloc_offset
= obj_priv
->gtt_offset
+ reloc
->offset
;
3347 reloc_page
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
3351 reloc_entry
= (uint32_t __iomem
*)(reloc_page
+
3352 (reloc_offset
& (PAGE_SIZE
- 1)));
3353 reloc_val
= target_obj_priv
->gtt_offset
+ reloc
->delta
;
3356 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3357 obj
, (unsigned int) reloc
->offset
,
3358 readl(reloc_entry
), reloc_val
);
3360 writel(reloc_val
, reloc_entry
);
3361 io_mapping_unmap_atomic(reloc_page
, KM_USER0
);
3363 /* The updated presumed offset for this entry will be
3364 * copied back out to the user.
3366 reloc
->presumed_offset
= target_obj_priv
->gtt_offset
;
3368 drm_gem_object_unreference(target_obj
);
3373 i915_gem_dump_object(obj
, 128, __func__
, ~0);
3378 /* Throttle our rendering by waiting until the ring has completed our requests
3379 * emitted over 20 msec ago.
3381 * Note that if we were to use the current jiffies each time around the loop,
3382 * we wouldn't escape the function with any frames outstanding if the time to
3383 * render a frame was over 20ms.
3385 * This should get us reasonable parallelism between CPU and GPU but also
3386 * relatively low latency when blocking on a particular request to finish.
3389 i915_gem_ring_throttle(struct drm_device
*dev
, struct drm_file
*file_priv
)
3391 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
3393 unsigned long recent_enough
= jiffies
- msecs_to_jiffies(20);
3395 mutex_lock(&dev
->struct_mutex
);
3396 while (!list_empty(&i915_file_priv
->mm
.request_list
)) {
3397 struct drm_i915_gem_request
*request
;
3399 request
= list_first_entry(&i915_file_priv
->mm
.request_list
,
3400 struct drm_i915_gem_request
,
3403 if (time_after_eq(request
->emitted_jiffies
, recent_enough
))
3406 ret
= i915_wait_request(dev
, request
->seqno
, request
->ring
);
3410 mutex_unlock(&dev
->struct_mutex
);
3416 i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2
*exec_list
,
3417 uint32_t buffer_count
,
3418 struct drm_i915_gem_relocation_entry
**relocs
)
3420 uint32_t reloc_count
= 0, reloc_index
= 0, i
;
3424 for (i
= 0; i
< buffer_count
; i
++) {
3425 if (reloc_count
+ exec_list
[i
].relocation_count
< reloc_count
)
3427 reloc_count
+= exec_list
[i
].relocation_count
;
3430 *relocs
= drm_calloc_large(reloc_count
, sizeof(**relocs
));
3431 if (*relocs
== NULL
) {
3432 DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count
);
3436 for (i
= 0; i
< buffer_count
; i
++) {
3437 struct drm_i915_gem_relocation_entry __user
*user_relocs
;
3439 user_relocs
= (void __user
*)(uintptr_t)exec_list
[i
].relocs_ptr
;
3441 ret
= copy_from_user(&(*relocs
)[reloc_index
],
3443 exec_list
[i
].relocation_count
*
3446 drm_free_large(*relocs
);
3451 reloc_index
+= exec_list
[i
].relocation_count
;
3458 i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2
*exec_list
,
3459 uint32_t buffer_count
,
3460 struct drm_i915_gem_relocation_entry
*relocs
)
3462 uint32_t reloc_count
= 0, i
;
3468 for (i
= 0; i
< buffer_count
; i
++) {
3469 struct drm_i915_gem_relocation_entry __user
*user_relocs
;
3472 user_relocs
= (void __user
*)(uintptr_t)exec_list
[i
].relocs_ptr
;
3474 unwritten
= copy_to_user(user_relocs
,
3475 &relocs
[reloc_count
],
3476 exec_list
[i
].relocation_count
*
3484 reloc_count
+= exec_list
[i
].relocation_count
;
3488 drm_free_large(relocs
);
3494 i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2
*exec
,
3495 uint64_t exec_offset
)
3497 uint32_t exec_start
, exec_len
;
3499 exec_start
= (uint32_t) exec_offset
+ exec
->batch_start_offset
;
3500 exec_len
= (uint32_t) exec
->batch_len
;
3502 if ((exec_start
| exec_len
) & 0x7)
3512 i915_gem_wait_for_pending_flip(struct drm_device
*dev
,
3513 struct drm_gem_object
**object_list
,
3516 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3517 struct drm_i915_gem_object
*obj_priv
;
3522 prepare_to_wait(&dev_priv
->pending_flip_queue
,
3523 &wait
, TASK_INTERRUPTIBLE
);
3524 for (i
= 0; i
< count
; i
++) {
3525 obj_priv
= to_intel_bo(object_list
[i
]);
3526 if (atomic_read(&obj_priv
->pending_flip
) > 0)
3532 if (!signal_pending(current
)) {
3533 mutex_unlock(&dev
->struct_mutex
);
3535 mutex_lock(&dev
->struct_mutex
);
3541 finish_wait(&dev_priv
->pending_flip_queue
, &wait
);
3548 i915_gem_do_execbuffer(struct drm_device
*dev
, void *data
,
3549 struct drm_file
*file_priv
,
3550 struct drm_i915_gem_execbuffer2
*args
,
3551 struct drm_i915_gem_exec_object2
*exec_list
)
3553 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3554 struct drm_gem_object
**object_list
= NULL
;
3555 struct drm_gem_object
*batch_obj
;
3556 struct drm_i915_gem_object
*obj_priv
;
3557 struct drm_clip_rect
*cliprects
= NULL
;
3558 struct drm_i915_gem_relocation_entry
*relocs
= NULL
;
3559 int ret
= 0, ret2
, i
, pinned
= 0;
3560 uint64_t exec_offset
;
3561 uint32_t seqno
, flush_domains
, reloc_index
;
3562 int pin_tries
, flips
;
3564 struct intel_ring_buffer
*ring
= NULL
;
3567 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3568 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
3570 if (args
->flags
& I915_EXEC_BSD
) {
3571 if (!HAS_BSD(dev
)) {
3572 DRM_ERROR("execbuf with wrong flag\n");
3575 ring
= &dev_priv
->bsd_ring
;
3577 ring
= &dev_priv
->render_ring
;
3580 if (args
->buffer_count
< 1) {
3581 DRM_ERROR("execbuf with %d buffers\n", args
->buffer_count
);
3584 object_list
= drm_malloc_ab(sizeof(*object_list
), args
->buffer_count
);
3585 if (object_list
== NULL
) {
3586 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3587 args
->buffer_count
);
3592 if (args
->num_cliprects
!= 0) {
3593 cliprects
= kcalloc(args
->num_cliprects
, sizeof(*cliprects
),
3595 if (cliprects
== NULL
) {
3600 ret
= copy_from_user(cliprects
,
3601 (struct drm_clip_rect __user
*)
3602 (uintptr_t) args
->cliprects_ptr
,
3603 sizeof(*cliprects
) * args
->num_cliprects
);
3605 DRM_ERROR("copy %d cliprects failed: %d\n",
3606 args
->num_cliprects
, ret
);
3612 ret
= i915_gem_get_relocs_from_user(exec_list
, args
->buffer_count
,
3617 mutex_lock(&dev
->struct_mutex
);
3619 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3621 if (atomic_read(&dev_priv
->mm
.wedged
)) {
3622 mutex_unlock(&dev
->struct_mutex
);
3627 if (dev_priv
->mm
.suspended
) {
3628 mutex_unlock(&dev
->struct_mutex
);
3633 /* Look up object handles */
3635 for (i
= 0; i
< args
->buffer_count
; i
++) {
3636 object_list
[i
] = drm_gem_object_lookup(dev
, file_priv
,
3637 exec_list
[i
].handle
);
3638 if (object_list
[i
] == NULL
) {
3639 DRM_ERROR("Invalid object handle %d at index %d\n",
3640 exec_list
[i
].handle
, i
);
3641 /* prevent error path from reading uninitialized data */
3642 args
->buffer_count
= i
+ 1;
3647 obj_priv
= to_intel_bo(object_list
[i
]);
3648 if (obj_priv
->in_execbuffer
) {
3649 DRM_ERROR("Object %p appears more than once in object list\n",
3651 /* prevent error path from reading uninitialized data */
3652 args
->buffer_count
= i
+ 1;
3656 obj_priv
->in_execbuffer
= true;
3657 flips
+= atomic_read(&obj_priv
->pending_flip
);
3661 ret
= i915_gem_wait_for_pending_flip(dev
, object_list
,
3662 args
->buffer_count
);
3667 /* Pin and relocate */
3668 for (pin_tries
= 0; ; pin_tries
++) {
3672 for (i
= 0; i
< args
->buffer_count
; i
++) {
3673 object_list
[i
]->pending_read_domains
= 0;
3674 object_list
[i
]->pending_write_domain
= 0;
3675 ret
= i915_gem_object_pin_and_relocate(object_list
[i
],
3678 &relocs
[reloc_index
]);
3682 reloc_index
+= exec_list
[i
].relocation_count
;
3688 /* error other than GTT full, or we've already tried again */
3689 if (ret
!= -ENOSPC
|| pin_tries
>= 1) {
3690 if (ret
!= -ERESTARTSYS
) {
3691 unsigned long long total_size
= 0;
3693 for (i
= 0; i
< args
->buffer_count
; i
++) {
3694 obj_priv
= to_intel_bo(object_list
[i
]);
3696 total_size
+= object_list
[i
]->size
;
3698 exec_list
[i
].flags
& EXEC_OBJECT_NEEDS_FENCE
&&
3699 obj_priv
->tiling_mode
!= I915_TILING_NONE
;
3701 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
3702 pinned
+1, args
->buffer_count
,
3703 total_size
, num_fences
,
3705 DRM_ERROR("%d objects [%d pinned], "
3706 "%d object bytes [%d pinned], "
3707 "%d/%d gtt bytes\n",
3708 atomic_read(&dev
->object_count
),
3709 atomic_read(&dev
->pin_count
),
3710 atomic_read(&dev
->object_memory
),
3711 atomic_read(&dev
->pin_memory
),
3712 atomic_read(&dev
->gtt_memory
),
3718 /* unpin all of our buffers */
3719 for (i
= 0; i
< pinned
; i
++)
3720 i915_gem_object_unpin(object_list
[i
]);
3723 /* evict everyone we can from the aperture */
3724 ret
= i915_gem_evict_everything(dev
);
3725 if (ret
&& ret
!= -ENOSPC
)
3729 /* Set the pending read domains for the batch buffer to COMMAND */
3730 batch_obj
= object_list
[args
->buffer_count
-1];
3731 if (batch_obj
->pending_write_domain
) {
3732 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3736 batch_obj
->pending_read_domains
|= I915_GEM_DOMAIN_COMMAND
;
3738 /* Sanity check the batch buffer, prior to moving objects */
3739 exec_offset
= exec_list
[args
->buffer_count
- 1].offset
;
3740 ret
= i915_gem_check_execbuffer (args
, exec_offset
);
3742 DRM_ERROR("execbuf with invalid offset/length\n");
3746 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3748 /* Zero the global flush/invalidate flags. These
3749 * will be modified as new domains are computed
3752 dev
->invalidate_domains
= 0;
3753 dev
->flush_domains
= 0;
3754 dev_priv
->flush_rings
= 0;
3756 for (i
= 0; i
< args
->buffer_count
; i
++) {
3757 struct drm_gem_object
*obj
= object_list
[i
];
3759 /* Compute new gpu domains and update invalidate/flush */
3760 i915_gem_object_set_to_gpu_domain(obj
);
3763 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3765 if (dev
->invalidate_domains
| dev
->flush_domains
) {
3767 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3769 dev
->invalidate_domains
,
3770 dev
->flush_domains
);
3773 dev
->invalidate_domains
,
3774 dev
->flush_domains
);
3775 if (dev_priv
->flush_rings
& FLUSH_RENDER_RING
)
3776 (void)i915_add_request(dev
, file_priv
,
3778 &dev_priv
->render_ring
);
3779 if (dev_priv
->flush_rings
& FLUSH_BSD_RING
)
3780 (void)i915_add_request(dev
, file_priv
,
3782 &dev_priv
->bsd_ring
);
3785 for (i
= 0; i
< args
->buffer_count
; i
++) {
3786 struct drm_gem_object
*obj
= object_list
[i
];
3787 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
3788 uint32_t old_write_domain
= obj
->write_domain
;
3790 obj
->write_domain
= obj
->pending_write_domain
;
3791 if (obj
->write_domain
)
3792 list_move_tail(&obj_priv
->gpu_write_list
,
3793 &dev_priv
->mm
.gpu_write_list
);
3795 list_del_init(&obj_priv
->gpu_write_list
);
3797 trace_i915_gem_object_change_domain(obj
,
3802 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3805 for (i
= 0; i
< args
->buffer_count
; i
++) {
3806 i915_gem_object_check_coherency(object_list
[i
],
3807 exec_list
[i
].handle
);
3812 i915_gem_dump_object(batch_obj
,
3818 /* Exec the batchbuffer */
3819 ret
= ring
->dispatch_gem_execbuffer(dev
, ring
, args
,
3820 cliprects
, exec_offset
);
3822 DRM_ERROR("dispatch failed %d\n", ret
);
3827 * Ensure that the commands in the batch buffer are
3828 * finished before the interrupt fires
3830 flush_domains
= i915_retire_commands(dev
, ring
);
3832 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3835 * Get a seqno representing the execution of the current buffer,
3836 * which we can wait on. We would like to mitigate these interrupts,
3837 * likely by only creating seqnos occasionally (so that we have
3838 * *some* interrupts representing completion of buffers that we can
3839 * wait on when trying to clear up gtt space).
3841 seqno
= i915_add_request(dev
, file_priv
, flush_domains
, ring
);
3843 for (i
= 0; i
< args
->buffer_count
; i
++) {
3844 struct drm_gem_object
*obj
= object_list
[i
];
3845 obj_priv
= to_intel_bo(obj
);
3847 i915_gem_object_move_to_active(obj
, seqno
, ring
);
3849 DRM_INFO("%s: move to exec list %p\n", __func__
, obj
);
3853 i915_dump_lru(dev
, __func__
);
3856 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
3859 for (i
= 0; i
< pinned
; i
++)
3860 i915_gem_object_unpin(object_list
[i
]);
3862 for (i
= 0; i
< args
->buffer_count
; i
++) {
3863 if (object_list
[i
]) {
3864 obj_priv
= to_intel_bo(object_list
[i
]);
3865 obj_priv
->in_execbuffer
= false;
3867 drm_gem_object_unreference(object_list
[i
]);
3870 mutex_unlock(&dev
->struct_mutex
);
3873 /* Copy the updated relocations out regardless of current error
3874 * state. Failure to update the relocs would mean that the next
3875 * time userland calls execbuf, it would do so with presumed offset
3876 * state that didn't match the actual object state.
3878 ret2
= i915_gem_put_relocs_to_user(exec_list
, args
->buffer_count
,
3881 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2
);
3887 drm_free_large(object_list
);
3894 * Legacy execbuffer just creates an exec2 list from the original exec object
3895 * list array and passes it to the real function.
3898 i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
3899 struct drm_file
*file_priv
)
3901 struct drm_i915_gem_execbuffer
*args
= data
;
3902 struct drm_i915_gem_execbuffer2 exec2
;
3903 struct drm_i915_gem_exec_object
*exec_list
= NULL
;
3904 struct drm_i915_gem_exec_object2
*exec2_list
= NULL
;
3908 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3909 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
3912 if (args
->buffer_count
< 1) {
3913 DRM_ERROR("execbuf with %d buffers\n", args
->buffer_count
);
3917 /* Copy in the exec list from userland */
3918 exec_list
= drm_malloc_ab(sizeof(*exec_list
), args
->buffer_count
);
3919 exec2_list
= drm_malloc_ab(sizeof(*exec2_list
), args
->buffer_count
);
3920 if (exec_list
== NULL
|| exec2_list
== NULL
) {
3921 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3922 args
->buffer_count
);
3923 drm_free_large(exec_list
);
3924 drm_free_large(exec2_list
);
3927 ret
= copy_from_user(exec_list
,
3928 (struct drm_i915_relocation_entry __user
*)
3929 (uintptr_t) args
->buffers_ptr
,
3930 sizeof(*exec_list
) * args
->buffer_count
);
3932 DRM_ERROR("copy %d exec entries failed %d\n",
3933 args
->buffer_count
, ret
);
3934 drm_free_large(exec_list
);
3935 drm_free_large(exec2_list
);
3939 for (i
= 0; i
< args
->buffer_count
; i
++) {
3940 exec2_list
[i
].handle
= exec_list
[i
].handle
;
3941 exec2_list
[i
].relocation_count
= exec_list
[i
].relocation_count
;
3942 exec2_list
[i
].relocs_ptr
= exec_list
[i
].relocs_ptr
;
3943 exec2_list
[i
].alignment
= exec_list
[i
].alignment
;
3944 exec2_list
[i
].offset
= exec_list
[i
].offset
;
3946 exec2_list
[i
].flags
= EXEC_OBJECT_NEEDS_FENCE
;
3948 exec2_list
[i
].flags
= 0;
3951 exec2
.buffers_ptr
= args
->buffers_ptr
;
3952 exec2
.buffer_count
= args
->buffer_count
;
3953 exec2
.batch_start_offset
= args
->batch_start_offset
;
3954 exec2
.batch_len
= args
->batch_len
;
3955 exec2
.DR1
= args
->DR1
;
3956 exec2
.DR4
= args
->DR4
;
3957 exec2
.num_cliprects
= args
->num_cliprects
;
3958 exec2
.cliprects_ptr
= args
->cliprects_ptr
;
3959 exec2
.flags
= I915_EXEC_RENDER
;
3961 ret
= i915_gem_do_execbuffer(dev
, data
, file_priv
, &exec2
, exec2_list
);
3963 /* Copy the new buffer offsets back to the user's exec list. */
3964 for (i
= 0; i
< args
->buffer_count
; i
++)
3965 exec_list
[i
].offset
= exec2_list
[i
].offset
;
3966 /* ... and back out to userspace */
3967 ret
= copy_to_user((struct drm_i915_relocation_entry __user
*)
3968 (uintptr_t) args
->buffers_ptr
,
3970 sizeof(*exec_list
) * args
->buffer_count
);
3973 DRM_ERROR("failed to copy %d exec entries "
3974 "back to user (%d)\n",
3975 args
->buffer_count
, ret
);
3979 drm_free_large(exec_list
);
3980 drm_free_large(exec2_list
);
3985 i915_gem_execbuffer2(struct drm_device
*dev
, void *data
,
3986 struct drm_file
*file_priv
)
3988 struct drm_i915_gem_execbuffer2
*args
= data
;
3989 struct drm_i915_gem_exec_object2
*exec2_list
= NULL
;
3993 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3994 (int) args
->buffers_ptr
, args
->buffer_count
, args
->batch_len
);
3997 if (args
->buffer_count
< 1) {
3998 DRM_ERROR("execbuf2 with %d buffers\n", args
->buffer_count
);
4002 exec2_list
= drm_malloc_ab(sizeof(*exec2_list
), args
->buffer_count
);
4003 if (exec2_list
== NULL
) {
4004 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4005 args
->buffer_count
);
4008 ret
= copy_from_user(exec2_list
,
4009 (struct drm_i915_relocation_entry __user
*)
4010 (uintptr_t) args
->buffers_ptr
,
4011 sizeof(*exec2_list
) * args
->buffer_count
);
4013 DRM_ERROR("copy %d exec entries failed %d\n",
4014 args
->buffer_count
, ret
);
4015 drm_free_large(exec2_list
);
4019 ret
= i915_gem_do_execbuffer(dev
, data
, file_priv
, args
, exec2_list
);
4021 /* Copy the new buffer offsets back to the user's exec list. */
4022 ret
= copy_to_user((struct drm_i915_relocation_entry __user
*)
4023 (uintptr_t) args
->buffers_ptr
,
4025 sizeof(*exec2_list
) * args
->buffer_count
);
4028 DRM_ERROR("failed to copy %d exec entries "
4029 "back to user (%d)\n",
4030 args
->buffer_count
, ret
);
4034 drm_free_large(exec2_list
);
4039 i915_gem_object_pin(struct drm_gem_object
*obj
, uint32_t alignment
)
4041 struct drm_device
*dev
= obj
->dev
;
4042 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
4045 BUG_ON(obj_priv
->pin_count
== DRM_I915_GEM_OBJECT_MAX_PIN_COUNT
);
4047 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
4049 if (obj_priv
->gtt_space
!= NULL
) {
4051 alignment
= i915_gem_get_gtt_alignment(obj
);
4052 if (obj_priv
->gtt_offset
& (alignment
- 1)) {
4053 WARN(obj_priv
->pin_count
,
4054 "bo is already pinned with incorrect alignment:"
4055 " offset=%x, req.alignment=%x\n",
4056 obj_priv
->gtt_offset
, alignment
);
4057 ret
= i915_gem_object_unbind(obj
);
4063 if (obj_priv
->gtt_space
== NULL
) {
4064 ret
= i915_gem_object_bind_to_gtt(obj
, alignment
);
4069 obj_priv
->pin_count
++;
4071 /* If the object is not active and not pending a flush,
4072 * remove it from the inactive list
4074 if (obj_priv
->pin_count
== 1) {
4075 atomic_inc(&dev
->pin_count
);
4076 atomic_add(obj
->size
, &dev
->pin_memory
);
4077 if (!obj_priv
->active
&&
4078 (obj
->write_domain
& I915_GEM_GPU_DOMAINS
) == 0)
4079 list_del_init(&obj_priv
->list
);
4081 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
4087 i915_gem_object_unpin(struct drm_gem_object
*obj
)
4089 struct drm_device
*dev
= obj
->dev
;
4090 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4091 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
4093 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
4094 obj_priv
->pin_count
--;
4095 BUG_ON(obj_priv
->pin_count
< 0);
4096 BUG_ON(obj_priv
->gtt_space
== NULL
);
4098 /* If the object is no longer pinned, and is
4099 * neither active nor being flushed, then stick it on
4102 if (obj_priv
->pin_count
== 0) {
4103 if (!obj_priv
->active
&&
4104 (obj
->write_domain
& I915_GEM_GPU_DOMAINS
) == 0)
4105 list_move_tail(&obj_priv
->list
,
4106 &dev_priv
->mm
.inactive_list
);
4107 atomic_dec(&dev
->pin_count
);
4108 atomic_sub(obj
->size
, &dev
->pin_memory
);
4110 i915_verify_inactive(dev
, __FILE__
, __LINE__
);
4114 i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
4115 struct drm_file
*file_priv
)
4117 struct drm_i915_gem_pin
*args
= data
;
4118 struct drm_gem_object
*obj
;
4119 struct drm_i915_gem_object
*obj_priv
;
4122 mutex_lock(&dev
->struct_mutex
);
4124 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
4126 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
4128 mutex_unlock(&dev
->struct_mutex
);
4131 obj_priv
= to_intel_bo(obj
);
4133 if (obj_priv
->madv
!= I915_MADV_WILLNEED
) {
4134 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4135 drm_gem_object_unreference(obj
);
4136 mutex_unlock(&dev
->struct_mutex
);
4140 if (obj_priv
->pin_filp
!= NULL
&& obj_priv
->pin_filp
!= file_priv
) {
4141 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4143 drm_gem_object_unreference(obj
);
4144 mutex_unlock(&dev
->struct_mutex
);
4148 obj_priv
->user_pin_count
++;
4149 obj_priv
->pin_filp
= file_priv
;
4150 if (obj_priv
->user_pin_count
== 1) {
4151 ret
= i915_gem_object_pin(obj
, args
->alignment
);
4153 drm_gem_object_unreference(obj
);
4154 mutex_unlock(&dev
->struct_mutex
);
4159 /* XXX - flush the CPU caches for pinned objects
4160 * as the X server doesn't manage domains yet
4162 i915_gem_object_flush_cpu_write_domain(obj
);
4163 args
->offset
= obj_priv
->gtt_offset
;
4164 drm_gem_object_unreference(obj
);
4165 mutex_unlock(&dev
->struct_mutex
);
4171 i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
4172 struct drm_file
*file_priv
)
4174 struct drm_i915_gem_pin
*args
= data
;
4175 struct drm_gem_object
*obj
;
4176 struct drm_i915_gem_object
*obj_priv
;
4178 mutex_lock(&dev
->struct_mutex
);
4180 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
4182 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4184 mutex_unlock(&dev
->struct_mutex
);
4188 obj_priv
= to_intel_bo(obj
);
4189 if (obj_priv
->pin_filp
!= file_priv
) {
4190 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4192 drm_gem_object_unreference(obj
);
4193 mutex_unlock(&dev
->struct_mutex
);
4196 obj_priv
->user_pin_count
--;
4197 if (obj_priv
->user_pin_count
== 0) {
4198 obj_priv
->pin_filp
= NULL
;
4199 i915_gem_object_unpin(obj
);
4202 drm_gem_object_unreference(obj
);
4203 mutex_unlock(&dev
->struct_mutex
);
4208 i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
4209 struct drm_file
*file_priv
)
4211 struct drm_i915_gem_busy
*args
= data
;
4212 struct drm_gem_object
*obj
;
4213 struct drm_i915_gem_object
*obj_priv
;
4215 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
4217 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4222 mutex_lock(&dev
->struct_mutex
);
4224 /* Count all active objects as busy, even if they are currently not used
4225 * by the gpu. Users of this interface expect objects to eventually
4226 * become non-busy without any further actions, therefore emit any
4227 * necessary flushes here.
4229 obj_priv
= to_intel_bo(obj
);
4230 args
->busy
= obj_priv
->active
;
4232 /* Unconditionally flush objects, even when the gpu still uses this
4233 * object. Userspace calling this function indicates that it wants to
4234 * use this buffer rather sooner than later, so issuing the required
4235 * flush earlier is beneficial.
4237 if (obj
->write_domain
) {
4238 i915_gem_flush(dev
, 0, obj
->write_domain
);
4239 (void)i915_add_request(dev
, file_priv
, obj
->write_domain
, obj_priv
->ring
);
4242 /* Update the active list for the hardware's current position.
4243 * Otherwise this only updates on a delayed timer or when irqs
4244 * are actually unmasked, and our working set ends up being
4245 * larger than required.
4247 i915_gem_retire_requests_ring(dev
, obj_priv
->ring
);
4249 args
->busy
= obj_priv
->active
;
4252 drm_gem_object_unreference(obj
);
4253 mutex_unlock(&dev
->struct_mutex
);
4258 i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
4259 struct drm_file
*file_priv
)
4261 return i915_gem_ring_throttle(dev
, file_priv
);
4265 i915_gem_madvise_ioctl(struct drm_device
*dev
, void *data
,
4266 struct drm_file
*file_priv
)
4268 struct drm_i915_gem_madvise
*args
= data
;
4269 struct drm_gem_object
*obj
;
4270 struct drm_i915_gem_object
*obj_priv
;
4272 switch (args
->madv
) {
4273 case I915_MADV_DONTNEED
:
4274 case I915_MADV_WILLNEED
:
4280 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
4282 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4287 mutex_lock(&dev
->struct_mutex
);
4288 obj_priv
= to_intel_bo(obj
);
4290 if (obj_priv
->pin_count
) {
4291 drm_gem_object_unreference(obj
);
4292 mutex_unlock(&dev
->struct_mutex
);
4294 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4298 if (obj_priv
->madv
!= __I915_MADV_PURGED
)
4299 obj_priv
->madv
= args
->madv
;
4301 /* if the object is no longer bound, discard its backing storage */
4302 if (i915_gem_object_is_purgeable(obj_priv
) &&
4303 obj_priv
->gtt_space
== NULL
)
4304 i915_gem_object_truncate(obj
);
4306 args
->retained
= obj_priv
->madv
!= __I915_MADV_PURGED
;
4308 drm_gem_object_unreference(obj
);
4309 mutex_unlock(&dev
->struct_mutex
);
4314 struct drm_gem_object
* i915_gem_alloc_object(struct drm_device
*dev
,
4317 struct drm_i915_gem_object
*obj
;
4319 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
4323 if (drm_gem_object_init(dev
, &obj
->base
, size
) != 0) {
4328 obj
->base
.write_domain
= I915_GEM_DOMAIN_CPU
;
4329 obj
->base
.read_domains
= I915_GEM_DOMAIN_CPU
;
4331 obj
->agp_type
= AGP_USER_MEMORY
;
4332 obj
->base
.driver_private
= NULL
;
4333 obj
->fence_reg
= I915_FENCE_REG_NONE
;
4334 INIT_LIST_HEAD(&obj
->list
);
4335 INIT_LIST_HEAD(&obj
->gpu_write_list
);
4336 obj
->madv
= I915_MADV_WILLNEED
;
4338 trace_i915_gem_object_create(&obj
->base
);
4343 int i915_gem_init_object(struct drm_gem_object
*obj
)
4350 static void i915_gem_free_object_tail(struct drm_gem_object
*obj
)
4352 struct drm_device
*dev
= obj
->dev
;
4353 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4354 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
4357 ret
= i915_gem_object_unbind(obj
);
4358 if (ret
== -ERESTARTSYS
) {
4359 list_move(&obj_priv
->list
,
4360 &dev_priv
->mm
.deferred_free_list
);
4364 if (obj_priv
->mmap_offset
)
4365 i915_gem_free_mmap_offset(obj
);
4367 drm_gem_object_release(obj
);
4369 kfree(obj_priv
->page_cpu_valid
);
4370 kfree(obj_priv
->bit_17
);
4374 void i915_gem_free_object(struct drm_gem_object
*obj
)
4376 struct drm_device
*dev
= obj
->dev
;
4377 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
4379 trace_i915_gem_object_destroy(obj
);
4381 while (obj_priv
->pin_count
> 0)
4382 i915_gem_object_unpin(obj
);
4384 if (obj_priv
->phys_obj
)
4385 i915_gem_detach_phys_object(dev
, obj
);
4387 i915_gem_free_object_tail(obj
);
4391 i915_gem_idle(struct drm_device
*dev
)
4393 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4396 mutex_lock(&dev
->struct_mutex
);
4398 if (dev_priv
->mm
.suspended
||
4399 (dev_priv
->render_ring
.gem_object
== NULL
) ||
4401 dev_priv
->bsd_ring
.gem_object
== NULL
)) {
4402 mutex_unlock(&dev
->struct_mutex
);
4406 ret
= i915_gpu_idle(dev
);
4408 mutex_unlock(&dev
->struct_mutex
);
4412 /* Under UMS, be paranoid and evict. */
4413 if (!drm_core_check_feature(dev
, DRIVER_MODESET
)) {
4414 ret
= i915_gem_evict_inactive(dev
);
4416 mutex_unlock(&dev
->struct_mutex
);
4421 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4422 * We need to replace this with a semaphore, or something.
4423 * And not confound mm.suspended!
4425 dev_priv
->mm
.suspended
= 1;
4426 del_timer(&dev_priv
->hangcheck_timer
);
4428 i915_kernel_lost_context(dev
);
4429 i915_gem_cleanup_ringbuffer(dev
);
4431 mutex_unlock(&dev
->struct_mutex
);
4433 /* Cancel the retire work handler, which should be idle now. */
4434 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
4440 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4441 * over cache flushing.
4444 i915_gem_init_pipe_control(struct drm_device
*dev
)
4446 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4447 struct drm_gem_object
*obj
;
4448 struct drm_i915_gem_object
*obj_priv
;
4451 obj
= i915_gem_alloc_object(dev
, 4096);
4453 DRM_ERROR("Failed to allocate seqno page\n");
4457 obj_priv
= to_intel_bo(obj
);
4458 obj_priv
->agp_type
= AGP_USER_CACHED_MEMORY
;
4460 ret
= i915_gem_object_pin(obj
, 4096);
4464 dev_priv
->seqno_gfx_addr
= obj_priv
->gtt_offset
;
4465 dev_priv
->seqno_page
= kmap(obj_priv
->pages
[0]);
4466 if (dev_priv
->seqno_page
== NULL
)
4469 dev_priv
->seqno_obj
= obj
;
4470 memset(dev_priv
->seqno_page
, 0, PAGE_SIZE
);
4475 i915_gem_object_unpin(obj
);
4477 drm_gem_object_unreference(obj
);
4484 i915_gem_cleanup_pipe_control(struct drm_device
*dev
)
4486 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4487 struct drm_gem_object
*obj
;
4488 struct drm_i915_gem_object
*obj_priv
;
4490 obj
= dev_priv
->seqno_obj
;
4491 obj_priv
= to_intel_bo(obj
);
4492 kunmap(obj_priv
->pages
[0]);
4493 i915_gem_object_unpin(obj
);
4494 drm_gem_object_unreference(obj
);
4495 dev_priv
->seqno_obj
= NULL
;
4497 dev_priv
->seqno_page
= NULL
;
4501 i915_gem_init_ringbuffer(struct drm_device
*dev
)
4503 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4506 dev_priv
->render_ring
= render_ring
;
4508 if (!I915_NEED_GFX_HWS(dev
)) {
4509 dev_priv
->render_ring
.status_page
.page_addr
4510 = dev_priv
->status_page_dmah
->vaddr
;
4511 memset(dev_priv
->render_ring
.status_page
.page_addr
,
4515 if (HAS_PIPE_CONTROL(dev
)) {
4516 ret
= i915_gem_init_pipe_control(dev
);
4521 ret
= intel_init_ring_buffer(dev
, &dev_priv
->render_ring
);
4523 goto cleanup_pipe_control
;
4526 dev_priv
->bsd_ring
= bsd_ring
;
4527 ret
= intel_init_ring_buffer(dev
, &dev_priv
->bsd_ring
);
4529 goto cleanup_render_ring
;
4532 dev_priv
->next_seqno
= 1;
4536 cleanup_render_ring
:
4537 intel_cleanup_ring_buffer(dev
, &dev_priv
->render_ring
);
4538 cleanup_pipe_control
:
4539 if (HAS_PIPE_CONTROL(dev
))
4540 i915_gem_cleanup_pipe_control(dev
);
4545 i915_gem_cleanup_ringbuffer(struct drm_device
*dev
)
4547 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4549 intel_cleanup_ring_buffer(dev
, &dev_priv
->render_ring
);
4551 intel_cleanup_ring_buffer(dev
, &dev_priv
->bsd_ring
);
4552 if (HAS_PIPE_CONTROL(dev
))
4553 i915_gem_cleanup_pipe_control(dev
);
4557 i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
4558 struct drm_file
*file_priv
)
4560 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4563 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
4566 if (atomic_read(&dev_priv
->mm
.wedged
)) {
4567 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4568 atomic_set(&dev_priv
->mm
.wedged
, 0);
4571 mutex_lock(&dev
->struct_mutex
);
4572 dev_priv
->mm
.suspended
= 0;
4574 ret
= i915_gem_init_ringbuffer(dev
);
4576 mutex_unlock(&dev
->struct_mutex
);
4580 spin_lock(&dev_priv
->mm
.active_list_lock
);
4581 BUG_ON(!list_empty(&dev_priv
->render_ring
.active_list
));
4582 BUG_ON(HAS_BSD(dev
) && !list_empty(&dev_priv
->bsd_ring
.active_list
));
4583 spin_unlock(&dev_priv
->mm
.active_list_lock
);
4585 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
4586 BUG_ON(!list_empty(&dev_priv
->mm
.inactive_list
));
4587 BUG_ON(!list_empty(&dev_priv
->render_ring
.request_list
));
4588 BUG_ON(HAS_BSD(dev
) && !list_empty(&dev_priv
->bsd_ring
.request_list
));
4589 mutex_unlock(&dev
->struct_mutex
);
4591 ret
= drm_irq_install(dev
);
4593 goto cleanup_ringbuffer
;
4598 mutex_lock(&dev
->struct_mutex
);
4599 i915_gem_cleanup_ringbuffer(dev
);
4600 dev_priv
->mm
.suspended
= 1;
4601 mutex_unlock(&dev
->struct_mutex
);
4607 i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
4608 struct drm_file
*file_priv
)
4610 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
4613 drm_irq_uninstall(dev
);
4614 return i915_gem_idle(dev
);
4618 i915_gem_lastclose(struct drm_device
*dev
)
4622 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
4625 ret
= i915_gem_idle(dev
);
4627 DRM_ERROR("failed to idle hardware: %d\n", ret
);
4631 i915_gem_load(struct drm_device
*dev
)
4634 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4636 spin_lock_init(&dev_priv
->mm
.active_list_lock
);
4637 INIT_LIST_HEAD(&dev_priv
->mm
.flushing_list
);
4638 INIT_LIST_HEAD(&dev_priv
->mm
.gpu_write_list
);
4639 INIT_LIST_HEAD(&dev_priv
->mm
.inactive_list
);
4640 INIT_LIST_HEAD(&dev_priv
->mm
.fence_list
);
4641 INIT_LIST_HEAD(&dev_priv
->mm
.deferred_free_list
);
4642 INIT_LIST_HEAD(&dev_priv
->render_ring
.active_list
);
4643 INIT_LIST_HEAD(&dev_priv
->render_ring
.request_list
);
4645 INIT_LIST_HEAD(&dev_priv
->bsd_ring
.active_list
);
4646 INIT_LIST_HEAD(&dev_priv
->bsd_ring
.request_list
);
4648 for (i
= 0; i
< 16; i
++)
4649 INIT_LIST_HEAD(&dev_priv
->fence_regs
[i
].lru_list
);
4650 INIT_DELAYED_WORK(&dev_priv
->mm
.retire_work
,
4651 i915_gem_retire_work_handler
);
4652 spin_lock(&shrink_list_lock
);
4653 list_add(&dev_priv
->mm
.shrink_list
, &shrink_list
);
4654 spin_unlock(&shrink_list_lock
);
4656 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4658 u32 tmp
= I915_READ(MI_ARB_STATE
);
4659 if (!(tmp
& MI_ARB_C3_LP_WRITE_ENABLE
)) {
4660 /* arb state is a masked write, so set bit + bit in mask */
4661 tmp
= MI_ARB_C3_LP_WRITE_ENABLE
| (MI_ARB_C3_LP_WRITE_ENABLE
<< MI_ARB_MASK_SHIFT
);
4662 I915_WRITE(MI_ARB_STATE
, tmp
);
4666 /* Old X drivers will take 0-2 for front, back, depth buffers */
4667 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
4668 dev_priv
->fence_reg_start
= 3;
4670 if (IS_I965G(dev
) || IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
4671 dev_priv
->num_fence_regs
= 16;
4673 dev_priv
->num_fence_regs
= 8;
4675 /* Initialize fence registers to zero */
4676 if (IS_I965G(dev
)) {
4677 for (i
= 0; i
< 16; i
++)
4678 I915_WRITE64(FENCE_REG_965_0
+ (i
* 8), 0);
4680 for (i
= 0; i
< 8; i
++)
4681 I915_WRITE(FENCE_REG_830_0
+ (i
* 4), 0);
4682 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
4683 for (i
= 0; i
< 8; i
++)
4684 I915_WRITE(FENCE_REG_945_8
+ (i
* 4), 0);
4686 i915_gem_detect_bit_6_swizzle(dev
);
4687 init_waitqueue_head(&dev_priv
->pending_flip_queue
);
4691 * Create a physically contiguous memory object for this object
4692 * e.g. for cursor + overlay regs
4694 int i915_gem_init_phys_object(struct drm_device
*dev
,
4695 int id
, int size
, int align
)
4697 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4698 struct drm_i915_gem_phys_object
*phys_obj
;
4701 if (dev_priv
->mm
.phys_objs
[id
- 1] || !size
)
4704 phys_obj
= kzalloc(sizeof(struct drm_i915_gem_phys_object
), GFP_KERNEL
);
4710 phys_obj
->handle
= drm_pci_alloc(dev
, size
, align
);
4711 if (!phys_obj
->handle
) {
4716 set_memory_wc((unsigned long)phys_obj
->handle
->vaddr
, phys_obj
->handle
->size
/ PAGE_SIZE
);
4719 dev_priv
->mm
.phys_objs
[id
- 1] = phys_obj
;
4727 void i915_gem_free_phys_object(struct drm_device
*dev
, int id
)
4729 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4730 struct drm_i915_gem_phys_object
*phys_obj
;
4732 if (!dev_priv
->mm
.phys_objs
[id
- 1])
4735 phys_obj
= dev_priv
->mm
.phys_objs
[id
- 1];
4736 if (phys_obj
->cur_obj
) {
4737 i915_gem_detach_phys_object(dev
, phys_obj
->cur_obj
);
4741 set_memory_wb((unsigned long)phys_obj
->handle
->vaddr
, phys_obj
->handle
->size
/ PAGE_SIZE
);
4743 drm_pci_free(dev
, phys_obj
->handle
);
4745 dev_priv
->mm
.phys_objs
[id
- 1] = NULL
;
4748 void i915_gem_free_all_phys_object(struct drm_device
*dev
)
4752 for (i
= I915_GEM_PHYS_CURSOR_0
; i
<= I915_MAX_PHYS_OBJECT
; i
++)
4753 i915_gem_free_phys_object(dev
, i
);
4756 void i915_gem_detach_phys_object(struct drm_device
*dev
,
4757 struct drm_gem_object
*obj
)
4759 struct drm_i915_gem_object
*obj_priv
;
4764 obj_priv
= to_intel_bo(obj
);
4765 if (!obj_priv
->phys_obj
)
4768 ret
= i915_gem_object_get_pages(obj
, 0);
4772 page_count
= obj
->size
/ PAGE_SIZE
;
4774 for (i
= 0; i
< page_count
; i
++) {
4775 char *dst
= kmap_atomic(obj_priv
->pages
[i
], KM_USER0
);
4776 char *src
= obj_priv
->phys_obj
->handle
->vaddr
+ (i
* PAGE_SIZE
);
4778 memcpy(dst
, src
, PAGE_SIZE
);
4779 kunmap_atomic(dst
, KM_USER0
);
4781 drm_clflush_pages(obj_priv
->pages
, page_count
);
4782 drm_agp_chipset_flush(dev
);
4784 i915_gem_object_put_pages(obj
);
4786 obj_priv
->phys_obj
->cur_obj
= NULL
;
4787 obj_priv
->phys_obj
= NULL
;
4791 i915_gem_attach_phys_object(struct drm_device
*dev
,
4792 struct drm_gem_object
*obj
,
4796 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4797 struct drm_i915_gem_object
*obj_priv
;
4802 if (id
> I915_MAX_PHYS_OBJECT
)
4805 obj_priv
= to_intel_bo(obj
);
4807 if (obj_priv
->phys_obj
) {
4808 if (obj_priv
->phys_obj
->id
== id
)
4810 i915_gem_detach_phys_object(dev
, obj
);
4813 /* create a new object */
4814 if (!dev_priv
->mm
.phys_objs
[id
- 1]) {
4815 ret
= i915_gem_init_phys_object(dev
, id
,
4818 DRM_ERROR("failed to init phys object %d size: %zu\n", id
, obj
->size
);
4823 /* bind to the object */
4824 obj_priv
->phys_obj
= dev_priv
->mm
.phys_objs
[id
- 1];
4825 obj_priv
->phys_obj
->cur_obj
= obj
;
4827 ret
= i915_gem_object_get_pages(obj
, 0);
4829 DRM_ERROR("failed to get page list\n");
4833 page_count
= obj
->size
/ PAGE_SIZE
;
4835 for (i
= 0; i
< page_count
; i
++) {
4836 char *src
= kmap_atomic(obj_priv
->pages
[i
], KM_USER0
);
4837 char *dst
= obj_priv
->phys_obj
->handle
->vaddr
+ (i
* PAGE_SIZE
);
4839 memcpy(dst
, src
, PAGE_SIZE
);
4840 kunmap_atomic(src
, KM_USER0
);
4843 i915_gem_object_put_pages(obj
);
4851 i915_gem_phys_pwrite(struct drm_device
*dev
, struct drm_gem_object
*obj
,
4852 struct drm_i915_gem_pwrite
*args
,
4853 struct drm_file
*file_priv
)
4855 struct drm_i915_gem_object
*obj_priv
= to_intel_bo(obj
);
4858 char __user
*user_data
;
4860 user_data
= (char __user
*) (uintptr_t) args
->data_ptr
;
4861 obj_addr
= obj_priv
->phys_obj
->handle
->vaddr
+ args
->offset
;
4863 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr
, args
->size
);
4864 ret
= copy_from_user(obj_addr
, user_data
, args
->size
);
4868 drm_agp_chipset_flush(dev
);
4872 void i915_gem_release(struct drm_device
* dev
, struct drm_file
*file_priv
)
4874 struct drm_i915_file_private
*i915_file_priv
= file_priv
->driver_priv
;
4876 /* Clean up our request list when the client is going away, so that
4877 * later retire_requests won't dereference our soon-to-be-gone
4880 mutex_lock(&dev
->struct_mutex
);
4881 while (!list_empty(&i915_file_priv
->mm
.request_list
))
4882 list_del_init(i915_file_priv
->mm
.request_list
.next
);
4883 mutex_unlock(&dev
->struct_mutex
);
4887 i915_gpu_is_active(struct drm_device
*dev
)
4889 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
4892 spin_lock(&dev_priv
->mm
.active_list_lock
);
4893 lists_empty
= list_empty(&dev_priv
->mm
.flushing_list
) &&
4894 list_empty(&dev_priv
->render_ring
.active_list
);
4896 lists_empty
&= list_empty(&dev_priv
->bsd_ring
.active_list
);
4897 spin_unlock(&dev_priv
->mm
.active_list_lock
);
4899 return !lists_empty
;
4903 i915_gem_shrink(struct shrinker
*shrink
, int nr_to_scan
, gfp_t gfp_mask
)
4905 drm_i915_private_t
*dev_priv
, *next_dev
;
4906 struct drm_i915_gem_object
*obj_priv
, *next_obj
;
4908 int would_deadlock
= 1;
4910 /* "fast-path" to count number of available objects */
4911 if (nr_to_scan
== 0) {
4912 spin_lock(&shrink_list_lock
);
4913 list_for_each_entry(dev_priv
, &shrink_list
, mm
.shrink_list
) {
4914 struct drm_device
*dev
= dev_priv
->dev
;
4916 if (mutex_trylock(&dev
->struct_mutex
)) {
4917 list_for_each_entry(obj_priv
,
4918 &dev_priv
->mm
.inactive_list
,
4921 mutex_unlock(&dev
->struct_mutex
);
4924 spin_unlock(&shrink_list_lock
);
4926 return (cnt
/ 100) * sysctl_vfs_cache_pressure
;
4929 spin_lock(&shrink_list_lock
);
4932 /* first scan for clean buffers */
4933 list_for_each_entry_safe(dev_priv
, next_dev
,
4934 &shrink_list
, mm
.shrink_list
) {
4935 struct drm_device
*dev
= dev_priv
->dev
;
4937 if (! mutex_trylock(&dev
->struct_mutex
))
4940 spin_unlock(&shrink_list_lock
);
4941 i915_gem_retire_requests(dev
);
4943 list_for_each_entry_safe(obj_priv
, next_obj
,
4944 &dev_priv
->mm
.inactive_list
,
4946 if (i915_gem_object_is_purgeable(obj_priv
)) {
4947 i915_gem_object_unbind(&obj_priv
->base
);
4948 if (--nr_to_scan
<= 0)
4953 spin_lock(&shrink_list_lock
);
4954 mutex_unlock(&dev
->struct_mutex
);
4958 if (nr_to_scan
<= 0)
4962 /* second pass, evict/count anything still on the inactive list */
4963 list_for_each_entry_safe(dev_priv
, next_dev
,
4964 &shrink_list
, mm
.shrink_list
) {
4965 struct drm_device
*dev
= dev_priv
->dev
;
4967 if (! mutex_trylock(&dev
->struct_mutex
))
4970 spin_unlock(&shrink_list_lock
);
4972 list_for_each_entry_safe(obj_priv
, next_obj
,
4973 &dev_priv
->mm
.inactive_list
,
4975 if (nr_to_scan
> 0) {
4976 i915_gem_object_unbind(&obj_priv
->base
);
4982 spin_lock(&shrink_list_lock
);
4983 mutex_unlock(&dev
->struct_mutex
);
4992 * We are desperate for pages, so as a last resort, wait
4993 * for the GPU to finish and discard whatever we can.
4994 * This has a dramatic impact to reduce the number of
4995 * OOM-killer events whilst running the GPU aggressively.
4997 list_for_each_entry(dev_priv
, &shrink_list
, mm
.shrink_list
) {
4998 struct drm_device
*dev
= dev_priv
->dev
;
5000 if (!mutex_trylock(&dev
->struct_mutex
))
5003 spin_unlock(&shrink_list_lock
);
5005 if (i915_gpu_is_active(dev
)) {
5010 spin_lock(&shrink_list_lock
);
5011 mutex_unlock(&dev
->struct_mutex
);
5018 spin_unlock(&shrink_list_lock
);
5023 return (cnt
/ 100) * sysctl_vfs_cache_pressure
;
5028 static struct shrinker shrinker
= {
5029 .shrink
= i915_gem_shrink
,
5030 .seeks
= DEFAULT_SEEKS
,
5034 i915_gem_shrinker_init(void)
5036 register_shrinker(&shrinker
);
5040 i915_gem_shrinker_exit(void)
5042 unregister_shrinker(&shrinker
);