drm/i915: increase default latency constant (v2 w/comment)
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32#include <linux/swap.h>
79e53945 33#include <linux/pci.h>
673a394b 34
28dfe52a
EA
35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36
e47c68e9
EA
37static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
38static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
39static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
e47c68e9
EA
40static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
41 int write);
42static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset,
44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
673a394b 46static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
de151cf6
JB
47static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
48 unsigned alignment);
de151cf6
JB
49static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
50static int i915_gem_evict_something(struct drm_device *dev);
71acb5eb
DA
51static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
52 struct drm_i915_gem_pwrite *args,
53 struct drm_file *file_priv);
673a394b 54
79e53945
JB
55int i915_gem_do_init(struct drm_device *dev, unsigned long start,
56 unsigned long end)
673a394b
EA
57{
58 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 59
79e53945
JB
60 if (start >= end ||
61 (start & (PAGE_SIZE - 1)) != 0 ||
62 (end & (PAGE_SIZE - 1)) != 0) {
673a394b
EA
63 return -EINVAL;
64 }
65
79e53945
JB
66 drm_mm_init(&dev_priv->mm.gtt_space, start,
67 end - start);
673a394b 68
79e53945
JB
69 dev->gtt_total = (uint32_t) (end - start);
70
71 return 0;
72}
673a394b 73
79e53945
JB
74int
75i915_gem_init_ioctl(struct drm_device *dev, void *data,
76 struct drm_file *file_priv)
77{
78 struct drm_i915_gem_init *args = data;
79 int ret;
80
81 mutex_lock(&dev->struct_mutex);
82 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
673a394b
EA
83 mutex_unlock(&dev->struct_mutex);
84
79e53945 85 return ret;
673a394b
EA
86}
87
5a125c3c
EA
88int
89i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
90 struct drm_file *file_priv)
91{
5a125c3c 92 struct drm_i915_gem_get_aperture *args = data;
5a125c3c
EA
93
94 if (!(dev->driver->driver_features & DRIVER_GEM))
95 return -ENODEV;
96
97 args->aper_size = dev->gtt_total;
2678d9d6
KP
98 args->aper_available_size = (args->aper_size -
99 atomic_read(&dev->pin_memory));
5a125c3c
EA
100
101 return 0;
102}
103
673a394b
EA
104
105/**
106 * Creates a new mm object and returns a handle to it.
107 */
108int
109i915_gem_create_ioctl(struct drm_device *dev, void *data,
110 struct drm_file *file_priv)
111{
112 struct drm_i915_gem_create *args = data;
113 struct drm_gem_object *obj;
114 int handle, ret;
115
116 args->size = roundup(args->size, PAGE_SIZE);
117
118 /* Allocate the new object */
119 obj = drm_gem_object_alloc(dev, args->size);
120 if (obj == NULL)
121 return -ENOMEM;
122
123 ret = drm_gem_handle_create(file_priv, obj, &handle);
124 mutex_lock(&dev->struct_mutex);
125 drm_gem_object_handle_unreference(obj);
126 mutex_unlock(&dev->struct_mutex);
127
128 if (ret)
129 return ret;
130
131 args->handle = handle;
132
133 return 0;
134}
135
eb01459f
EA
136static inline int
137fast_shmem_read(struct page **pages,
138 loff_t page_base, int page_offset,
139 char __user *data,
140 int length)
141{
142 char __iomem *vaddr;
2bc43b5c 143 int unwritten;
eb01459f
EA
144
145 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
146 if (vaddr == NULL)
147 return -ENOMEM;
2bc43b5c 148 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
eb01459f
EA
149 kunmap_atomic(vaddr, KM_USER0);
150
2bc43b5c
FM
151 if (unwritten)
152 return -EFAULT;
153
154 return 0;
eb01459f
EA
155}
156
280b713b
EA
157static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
158{
159 drm_i915_private_t *dev_priv = obj->dev->dev_private;
160 struct drm_i915_gem_object *obj_priv = obj->driver_private;
161
162 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
163 obj_priv->tiling_mode != I915_TILING_NONE;
164}
165
40123c1f
EA
166static inline int
167slow_shmem_copy(struct page *dst_page,
168 int dst_offset,
169 struct page *src_page,
170 int src_offset,
171 int length)
172{
173 char *dst_vaddr, *src_vaddr;
174
175 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
176 if (dst_vaddr == NULL)
177 return -ENOMEM;
178
179 src_vaddr = kmap_atomic(src_page, KM_USER1);
180 if (src_vaddr == NULL) {
181 kunmap_atomic(dst_vaddr, KM_USER0);
182 return -ENOMEM;
183 }
184
185 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
186
187 kunmap_atomic(src_vaddr, KM_USER1);
188 kunmap_atomic(dst_vaddr, KM_USER0);
189
190 return 0;
191}
192
280b713b
EA
193static inline int
194slow_shmem_bit17_copy(struct page *gpu_page,
195 int gpu_offset,
196 struct page *cpu_page,
197 int cpu_offset,
198 int length,
199 int is_read)
200{
201 char *gpu_vaddr, *cpu_vaddr;
202
203 /* Use the unswizzled path if this page isn't affected. */
204 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
205 if (is_read)
206 return slow_shmem_copy(cpu_page, cpu_offset,
207 gpu_page, gpu_offset, length);
208 else
209 return slow_shmem_copy(gpu_page, gpu_offset,
210 cpu_page, cpu_offset, length);
211 }
212
213 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
214 if (gpu_vaddr == NULL)
215 return -ENOMEM;
216
217 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
218 if (cpu_vaddr == NULL) {
219 kunmap_atomic(gpu_vaddr, KM_USER0);
220 return -ENOMEM;
221 }
222
223 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
224 * XORing with the other bits (A9 for Y, A9 and A10 for X)
225 */
226 while (length > 0) {
227 int cacheline_end = ALIGN(gpu_offset + 1, 64);
228 int this_length = min(cacheline_end - gpu_offset, length);
229 int swizzled_gpu_offset = gpu_offset ^ 64;
230
231 if (is_read) {
232 memcpy(cpu_vaddr + cpu_offset,
233 gpu_vaddr + swizzled_gpu_offset,
234 this_length);
235 } else {
236 memcpy(gpu_vaddr + swizzled_gpu_offset,
237 cpu_vaddr + cpu_offset,
238 this_length);
239 }
240 cpu_offset += this_length;
241 gpu_offset += this_length;
242 length -= this_length;
243 }
244
245 kunmap_atomic(cpu_vaddr, KM_USER1);
246 kunmap_atomic(gpu_vaddr, KM_USER0);
247
248 return 0;
249}
250
eb01459f
EA
251/**
252 * This is the fast shmem pread path, which attempts to copy_from_user directly
253 * from the backing pages of the object to the user's address space. On a
254 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
255 */
256static int
257i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
258 struct drm_i915_gem_pread *args,
259 struct drm_file *file_priv)
260{
261 struct drm_i915_gem_object *obj_priv = obj->driver_private;
262 ssize_t remain;
263 loff_t offset, page_base;
264 char __user *user_data;
265 int page_offset, page_length;
266 int ret;
267
268 user_data = (char __user *) (uintptr_t) args->data_ptr;
269 remain = args->size;
270
271 mutex_lock(&dev->struct_mutex);
272
273 ret = i915_gem_object_get_pages(obj);
274 if (ret != 0)
275 goto fail_unlock;
276
277 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
278 args->size);
279 if (ret != 0)
280 goto fail_put_pages;
281
282 obj_priv = obj->driver_private;
283 offset = args->offset;
284
285 while (remain > 0) {
286 /* Operation in this page
287 *
288 * page_base = page offset within aperture
289 * page_offset = offset within page
290 * page_length = bytes to copy for this page
291 */
292 page_base = (offset & ~(PAGE_SIZE-1));
293 page_offset = offset & (PAGE_SIZE-1);
294 page_length = remain;
295 if ((page_offset + remain) > PAGE_SIZE)
296 page_length = PAGE_SIZE - page_offset;
297
298 ret = fast_shmem_read(obj_priv->pages,
299 page_base, page_offset,
300 user_data, page_length);
301 if (ret)
302 goto fail_put_pages;
303
304 remain -= page_length;
305 user_data += page_length;
306 offset += page_length;
307 }
308
309fail_put_pages:
310 i915_gem_object_put_pages(obj);
311fail_unlock:
312 mutex_unlock(&dev->struct_mutex);
313
314 return ret;
315}
316
317/**
318 * This is the fallback shmem pread path, which allocates temporary storage
319 * in kernel space to copy_to_user into outside of the struct_mutex, so we
320 * can copy out of the object's backing pages while holding the struct mutex
321 * and not take page faults.
322 */
323static int
324i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
325 struct drm_i915_gem_pread *args,
326 struct drm_file *file_priv)
327{
328 struct drm_i915_gem_object *obj_priv = obj->driver_private;
329 struct mm_struct *mm = current->mm;
330 struct page **user_pages;
331 ssize_t remain;
332 loff_t offset, pinned_pages, i;
333 loff_t first_data_page, last_data_page, num_pages;
334 int shmem_page_index, shmem_page_offset;
335 int data_page_index, data_page_offset;
336 int page_length;
337 int ret;
338 uint64_t data_ptr = args->data_ptr;
280b713b 339 int do_bit17_swizzling;
eb01459f
EA
340
341 remain = args->size;
342
343 /* Pin the user pages containing the data. We can't fault while
344 * holding the struct mutex, yet we want to hold it while
345 * dereferencing the user data.
346 */
347 first_data_page = data_ptr / PAGE_SIZE;
348 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
349 num_pages = last_data_page - first_data_page + 1;
350
8e7d2b2c 351 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
eb01459f
EA
352 if (user_pages == NULL)
353 return -ENOMEM;
354
355 down_read(&mm->mmap_sem);
356 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
e5e9ecde 357 num_pages, 1, 0, user_pages, NULL);
eb01459f
EA
358 up_read(&mm->mmap_sem);
359 if (pinned_pages < num_pages) {
360 ret = -EFAULT;
361 goto fail_put_user_pages;
362 }
363
280b713b
EA
364 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
365
eb01459f
EA
366 mutex_lock(&dev->struct_mutex);
367
368 ret = i915_gem_object_get_pages(obj);
369 if (ret != 0)
370 goto fail_unlock;
371
372 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
373 args->size);
374 if (ret != 0)
375 goto fail_put_pages;
376
377 obj_priv = obj->driver_private;
378 offset = args->offset;
379
380 while (remain > 0) {
381 /* Operation in this page
382 *
383 * shmem_page_index = page number within shmem file
384 * shmem_page_offset = offset within page in shmem file
385 * data_page_index = page number in get_user_pages return
386 * data_page_offset = offset with data_page_index page.
387 * page_length = bytes to copy for this page
388 */
389 shmem_page_index = offset / PAGE_SIZE;
390 shmem_page_offset = offset & ~PAGE_MASK;
391 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
392 data_page_offset = data_ptr & ~PAGE_MASK;
393
394 page_length = remain;
395 if ((shmem_page_offset + page_length) > PAGE_SIZE)
396 page_length = PAGE_SIZE - shmem_page_offset;
397 if ((data_page_offset + page_length) > PAGE_SIZE)
398 page_length = PAGE_SIZE - data_page_offset;
399
280b713b
EA
400 if (do_bit17_swizzling) {
401 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
402 shmem_page_offset,
403 user_pages[data_page_index],
404 data_page_offset,
405 page_length,
406 1);
407 } else {
408 ret = slow_shmem_copy(user_pages[data_page_index],
409 data_page_offset,
410 obj_priv->pages[shmem_page_index],
411 shmem_page_offset,
412 page_length);
413 }
eb01459f
EA
414 if (ret)
415 goto fail_put_pages;
416
417 remain -= page_length;
418 data_ptr += page_length;
419 offset += page_length;
420 }
421
422fail_put_pages:
423 i915_gem_object_put_pages(obj);
424fail_unlock:
425 mutex_unlock(&dev->struct_mutex);
426fail_put_user_pages:
427 for (i = 0; i < pinned_pages; i++) {
428 SetPageDirty(user_pages[i]);
429 page_cache_release(user_pages[i]);
430 }
8e7d2b2c 431 drm_free_large(user_pages);
eb01459f
EA
432
433 return ret;
434}
435
673a394b
EA
436/**
437 * Reads data from the object referenced by handle.
438 *
439 * On error, the contents of *data are undefined.
440 */
441int
442i915_gem_pread_ioctl(struct drm_device *dev, void *data,
443 struct drm_file *file_priv)
444{
445 struct drm_i915_gem_pread *args = data;
446 struct drm_gem_object *obj;
447 struct drm_i915_gem_object *obj_priv;
673a394b
EA
448 int ret;
449
450 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
451 if (obj == NULL)
452 return -EBADF;
453 obj_priv = obj->driver_private;
454
455 /* Bounds check source.
456 *
457 * XXX: This could use review for overflow issues...
458 */
459 if (args->offset > obj->size || args->size > obj->size ||
460 args->offset + args->size > obj->size) {
461 drm_gem_object_unreference(obj);
462 return -EINVAL;
463 }
464
280b713b 465 if (i915_gem_object_needs_bit17_swizzle(obj)) {
eb01459f 466 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
280b713b
EA
467 } else {
468 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
469 if (ret != 0)
470 ret = i915_gem_shmem_pread_slow(dev, obj, args,
471 file_priv);
472 }
673a394b
EA
473
474 drm_gem_object_unreference(obj);
673a394b 475
eb01459f 476 return ret;
673a394b
EA
477}
478
0839ccb8
KP
479/* This is the fast write path which cannot handle
480 * page faults in the source data
9b7530cc 481 */
0839ccb8
KP
482
483static inline int
484fast_user_write(struct io_mapping *mapping,
485 loff_t page_base, int page_offset,
486 char __user *user_data,
487 int length)
9b7530cc 488{
9b7530cc 489 char *vaddr_atomic;
0839ccb8 490 unsigned long unwritten;
9b7530cc 491
0839ccb8
KP
492 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
493 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
494 user_data, length);
495 io_mapping_unmap_atomic(vaddr_atomic);
496 if (unwritten)
497 return -EFAULT;
498 return 0;
499}
500
501/* Here's the write path which can sleep for
502 * page faults
503 */
504
505static inline int
3de09aa3
EA
506slow_kernel_write(struct io_mapping *mapping,
507 loff_t gtt_base, int gtt_offset,
508 struct page *user_page, int user_offset,
509 int length)
0839ccb8 510{
3de09aa3 511 char *src_vaddr, *dst_vaddr;
0839ccb8
KP
512 unsigned long unwritten;
513
3de09aa3
EA
514 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
515 src_vaddr = kmap_atomic(user_page, KM_USER1);
516 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
517 src_vaddr + user_offset,
518 length);
519 kunmap_atomic(src_vaddr, KM_USER1);
520 io_mapping_unmap_atomic(dst_vaddr);
0839ccb8
KP
521 if (unwritten)
522 return -EFAULT;
9b7530cc 523 return 0;
9b7530cc
LT
524}
525
40123c1f
EA
526static inline int
527fast_shmem_write(struct page **pages,
528 loff_t page_base, int page_offset,
529 char __user *data,
530 int length)
531{
532 char __iomem *vaddr;
d0088775 533 unsigned long unwritten;
40123c1f
EA
534
535 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
536 if (vaddr == NULL)
537 return -ENOMEM;
d0088775 538 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
40123c1f
EA
539 kunmap_atomic(vaddr, KM_USER0);
540
d0088775
DA
541 if (unwritten)
542 return -EFAULT;
40123c1f
EA
543 return 0;
544}
545
3de09aa3
EA
546/**
547 * This is the fast pwrite path, where we copy the data directly from the
548 * user into the GTT, uncached.
549 */
673a394b 550static int
3de09aa3
EA
551i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
552 struct drm_i915_gem_pwrite *args,
553 struct drm_file *file_priv)
673a394b
EA
554{
555 struct drm_i915_gem_object *obj_priv = obj->driver_private;
0839ccb8 556 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b 557 ssize_t remain;
0839ccb8 558 loff_t offset, page_base;
673a394b 559 char __user *user_data;
0839ccb8
KP
560 int page_offset, page_length;
561 int ret;
673a394b
EA
562
563 user_data = (char __user *) (uintptr_t) args->data_ptr;
564 remain = args->size;
565 if (!access_ok(VERIFY_READ, user_data, remain))
566 return -EFAULT;
567
568
569 mutex_lock(&dev->struct_mutex);
570 ret = i915_gem_object_pin(obj, 0);
571 if (ret) {
572 mutex_unlock(&dev->struct_mutex);
573 return ret;
574 }
2ef7eeaa 575 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
673a394b
EA
576 if (ret)
577 goto fail;
578
579 obj_priv = obj->driver_private;
580 offset = obj_priv->gtt_offset + args->offset;
673a394b
EA
581
582 while (remain > 0) {
583 /* Operation in this page
584 *
0839ccb8
KP
585 * page_base = page offset within aperture
586 * page_offset = offset within page
587 * page_length = bytes to copy for this page
673a394b 588 */
0839ccb8
KP
589 page_base = (offset & ~(PAGE_SIZE-1));
590 page_offset = offset & (PAGE_SIZE-1);
591 page_length = remain;
592 if ((page_offset + remain) > PAGE_SIZE)
593 page_length = PAGE_SIZE - page_offset;
594
595 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
596 page_offset, user_data, page_length);
597
598 /* If we get a fault while copying data, then (presumably) our
3de09aa3
EA
599 * source page isn't available. Return the error and we'll
600 * retry in the slow path.
0839ccb8 601 */
3de09aa3
EA
602 if (ret)
603 goto fail;
673a394b 604
0839ccb8
KP
605 remain -= page_length;
606 user_data += page_length;
607 offset += page_length;
673a394b 608 }
673a394b
EA
609
610fail:
611 i915_gem_object_unpin(obj);
612 mutex_unlock(&dev->struct_mutex);
613
614 return ret;
615}
616
3de09aa3
EA
617/**
618 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
619 * the memory and maps it using kmap_atomic for copying.
620 *
621 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
622 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
623 */
3043c60c 624static int
3de09aa3
EA
625i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
626 struct drm_i915_gem_pwrite *args,
627 struct drm_file *file_priv)
673a394b 628{
3de09aa3
EA
629 struct drm_i915_gem_object *obj_priv = obj->driver_private;
630 drm_i915_private_t *dev_priv = dev->dev_private;
631 ssize_t remain;
632 loff_t gtt_page_base, offset;
633 loff_t first_data_page, last_data_page, num_pages;
634 loff_t pinned_pages, i;
635 struct page **user_pages;
636 struct mm_struct *mm = current->mm;
637 int gtt_page_offset, data_page_offset, data_page_index, page_length;
673a394b 638 int ret;
3de09aa3
EA
639 uint64_t data_ptr = args->data_ptr;
640
641 remain = args->size;
642
643 /* Pin the user pages containing the data. We can't fault while
644 * holding the struct mutex, and all of the pwrite implementations
645 * want to hold it while dereferencing the user data.
646 */
647 first_data_page = data_ptr / PAGE_SIZE;
648 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
649 num_pages = last_data_page - first_data_page + 1;
650
8e7d2b2c 651 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
3de09aa3
EA
652 if (user_pages == NULL)
653 return -ENOMEM;
654
655 down_read(&mm->mmap_sem);
656 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
657 num_pages, 0, 0, user_pages, NULL);
658 up_read(&mm->mmap_sem);
659 if (pinned_pages < num_pages) {
660 ret = -EFAULT;
661 goto out_unpin_pages;
662 }
673a394b
EA
663
664 mutex_lock(&dev->struct_mutex);
3de09aa3
EA
665 ret = i915_gem_object_pin(obj, 0);
666 if (ret)
667 goto out_unlock;
668
669 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
670 if (ret)
671 goto out_unpin_object;
672
673 obj_priv = obj->driver_private;
674 offset = obj_priv->gtt_offset + args->offset;
675
676 while (remain > 0) {
677 /* Operation in this page
678 *
679 * gtt_page_base = page offset within aperture
680 * gtt_page_offset = offset within page in aperture
681 * data_page_index = page number in get_user_pages return
682 * data_page_offset = offset with data_page_index page.
683 * page_length = bytes to copy for this page
684 */
685 gtt_page_base = offset & PAGE_MASK;
686 gtt_page_offset = offset & ~PAGE_MASK;
687 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
688 data_page_offset = data_ptr & ~PAGE_MASK;
689
690 page_length = remain;
691 if ((gtt_page_offset + page_length) > PAGE_SIZE)
692 page_length = PAGE_SIZE - gtt_page_offset;
693 if ((data_page_offset + page_length) > PAGE_SIZE)
694 page_length = PAGE_SIZE - data_page_offset;
695
696 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
697 gtt_page_base, gtt_page_offset,
698 user_pages[data_page_index],
699 data_page_offset,
700 page_length);
701
702 /* If we get a fault while copying data, then (presumably) our
703 * source page isn't available. Return the error and we'll
704 * retry in the slow path.
705 */
706 if (ret)
707 goto out_unpin_object;
708
709 remain -= page_length;
710 offset += page_length;
711 data_ptr += page_length;
712 }
713
714out_unpin_object:
715 i915_gem_object_unpin(obj);
716out_unlock:
717 mutex_unlock(&dev->struct_mutex);
718out_unpin_pages:
719 for (i = 0; i < pinned_pages; i++)
720 page_cache_release(user_pages[i]);
8e7d2b2c 721 drm_free_large(user_pages);
3de09aa3
EA
722
723 return ret;
724}
725
40123c1f
EA
726/**
727 * This is the fast shmem pwrite path, which attempts to directly
728 * copy_from_user into the kmapped pages backing the object.
729 */
3043c60c 730static int
40123c1f
EA
731i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
732 struct drm_i915_gem_pwrite *args,
733 struct drm_file *file_priv)
673a394b 734{
40123c1f
EA
735 struct drm_i915_gem_object *obj_priv = obj->driver_private;
736 ssize_t remain;
737 loff_t offset, page_base;
738 char __user *user_data;
739 int page_offset, page_length;
673a394b 740 int ret;
40123c1f
EA
741
742 user_data = (char __user *) (uintptr_t) args->data_ptr;
743 remain = args->size;
673a394b
EA
744
745 mutex_lock(&dev->struct_mutex);
746
40123c1f
EA
747 ret = i915_gem_object_get_pages(obj);
748 if (ret != 0)
749 goto fail_unlock;
673a394b 750
e47c68e9 751 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
40123c1f
EA
752 if (ret != 0)
753 goto fail_put_pages;
754
755 obj_priv = obj->driver_private;
756 offset = args->offset;
757 obj_priv->dirty = 1;
758
759 while (remain > 0) {
760 /* Operation in this page
761 *
762 * page_base = page offset within aperture
763 * page_offset = offset within page
764 * page_length = bytes to copy for this page
765 */
766 page_base = (offset & ~(PAGE_SIZE-1));
767 page_offset = offset & (PAGE_SIZE-1);
768 page_length = remain;
769 if ((page_offset + remain) > PAGE_SIZE)
770 page_length = PAGE_SIZE - page_offset;
771
772 ret = fast_shmem_write(obj_priv->pages,
773 page_base, page_offset,
774 user_data, page_length);
775 if (ret)
776 goto fail_put_pages;
777
778 remain -= page_length;
779 user_data += page_length;
780 offset += page_length;
781 }
782
783fail_put_pages:
784 i915_gem_object_put_pages(obj);
785fail_unlock:
786 mutex_unlock(&dev->struct_mutex);
787
788 return ret;
789}
790
791/**
792 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
793 * the memory and maps it using kmap_atomic for copying.
794 *
795 * This avoids taking mmap_sem for faulting on the user's address while the
796 * struct_mutex is held.
797 */
798static int
799i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
800 struct drm_i915_gem_pwrite *args,
801 struct drm_file *file_priv)
802{
803 struct drm_i915_gem_object *obj_priv = obj->driver_private;
804 struct mm_struct *mm = current->mm;
805 struct page **user_pages;
806 ssize_t remain;
807 loff_t offset, pinned_pages, i;
808 loff_t first_data_page, last_data_page, num_pages;
809 int shmem_page_index, shmem_page_offset;
810 int data_page_index, data_page_offset;
811 int page_length;
812 int ret;
813 uint64_t data_ptr = args->data_ptr;
280b713b 814 int do_bit17_swizzling;
40123c1f
EA
815
816 remain = args->size;
817
818 /* Pin the user pages containing the data. We can't fault while
819 * holding the struct mutex, and all of the pwrite implementations
820 * want to hold it while dereferencing the user data.
821 */
822 first_data_page = data_ptr / PAGE_SIZE;
823 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
824 num_pages = last_data_page - first_data_page + 1;
825
8e7d2b2c 826 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
40123c1f
EA
827 if (user_pages == NULL)
828 return -ENOMEM;
829
830 down_read(&mm->mmap_sem);
831 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
832 num_pages, 0, 0, user_pages, NULL);
833 up_read(&mm->mmap_sem);
834 if (pinned_pages < num_pages) {
835 ret = -EFAULT;
836 goto fail_put_user_pages;
673a394b
EA
837 }
838
280b713b
EA
839 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
840
40123c1f
EA
841 mutex_lock(&dev->struct_mutex);
842
843 ret = i915_gem_object_get_pages(obj);
844 if (ret != 0)
845 goto fail_unlock;
846
847 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
848 if (ret != 0)
849 goto fail_put_pages;
850
851 obj_priv = obj->driver_private;
673a394b 852 offset = args->offset;
40123c1f 853 obj_priv->dirty = 1;
673a394b 854
40123c1f
EA
855 while (remain > 0) {
856 /* Operation in this page
857 *
858 * shmem_page_index = page number within shmem file
859 * shmem_page_offset = offset within page in shmem file
860 * data_page_index = page number in get_user_pages return
861 * data_page_offset = offset with data_page_index page.
862 * page_length = bytes to copy for this page
863 */
864 shmem_page_index = offset / PAGE_SIZE;
865 shmem_page_offset = offset & ~PAGE_MASK;
866 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
867 data_page_offset = data_ptr & ~PAGE_MASK;
868
869 page_length = remain;
870 if ((shmem_page_offset + page_length) > PAGE_SIZE)
871 page_length = PAGE_SIZE - shmem_page_offset;
872 if ((data_page_offset + page_length) > PAGE_SIZE)
873 page_length = PAGE_SIZE - data_page_offset;
874
280b713b
EA
875 if (do_bit17_swizzling) {
876 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
877 shmem_page_offset,
878 user_pages[data_page_index],
879 data_page_offset,
880 page_length,
881 0);
882 } else {
883 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
884 shmem_page_offset,
885 user_pages[data_page_index],
886 data_page_offset,
887 page_length);
888 }
40123c1f
EA
889 if (ret)
890 goto fail_put_pages;
891
892 remain -= page_length;
893 data_ptr += page_length;
894 offset += page_length;
673a394b
EA
895 }
896
40123c1f
EA
897fail_put_pages:
898 i915_gem_object_put_pages(obj);
899fail_unlock:
673a394b 900 mutex_unlock(&dev->struct_mutex);
40123c1f
EA
901fail_put_user_pages:
902 for (i = 0; i < pinned_pages; i++)
903 page_cache_release(user_pages[i]);
8e7d2b2c 904 drm_free_large(user_pages);
673a394b 905
40123c1f 906 return ret;
673a394b
EA
907}
908
909/**
910 * Writes data to the object referenced by handle.
911 *
912 * On error, the contents of the buffer that were to be modified are undefined.
913 */
914int
915i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
916 struct drm_file *file_priv)
917{
918 struct drm_i915_gem_pwrite *args = data;
919 struct drm_gem_object *obj;
920 struct drm_i915_gem_object *obj_priv;
921 int ret = 0;
922
923 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
924 if (obj == NULL)
925 return -EBADF;
926 obj_priv = obj->driver_private;
927
928 /* Bounds check destination.
929 *
930 * XXX: This could use review for overflow issues...
931 */
932 if (args->offset > obj->size || args->size > obj->size ||
933 args->offset + args->size > obj->size) {
934 drm_gem_object_unreference(obj);
935 return -EINVAL;
936 }
937
938 /* We can only do the GTT pwrite on untiled buffers, as otherwise
939 * it would end up going through the fenced access, and we'll get
940 * different detiling behavior between reading and writing.
941 * pread/pwrite currently are reading and writing from the CPU
942 * perspective, requiring manual detiling by the client.
943 */
71acb5eb
DA
944 if (obj_priv->phys_obj)
945 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
946 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
3de09aa3
EA
947 dev->gtt_total != 0) {
948 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
949 if (ret == -EFAULT) {
950 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
951 file_priv);
952 }
280b713b
EA
953 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
954 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
40123c1f
EA
955 } else {
956 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
957 if (ret == -EFAULT) {
958 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
959 file_priv);
960 }
961 }
673a394b
EA
962
963#if WATCH_PWRITE
964 if (ret)
965 DRM_INFO("pwrite failed %d\n", ret);
966#endif
967
968 drm_gem_object_unreference(obj);
969
970 return ret;
971}
972
973/**
2ef7eeaa
EA
974 * Called when user space prepares to use an object with the CPU, either
975 * through the mmap ioctl's mapping or a GTT mapping.
673a394b
EA
976 */
977int
978i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
979 struct drm_file *file_priv)
980{
a09ba7fa 981 struct drm_i915_private *dev_priv = dev->dev_private;
673a394b
EA
982 struct drm_i915_gem_set_domain *args = data;
983 struct drm_gem_object *obj;
2ef7eeaa
EA
984 uint32_t read_domains = args->read_domains;
985 uint32_t write_domain = args->write_domain;
673a394b
EA
986 int ret;
987
988 if (!(dev->driver->driver_features & DRIVER_GEM))
989 return -ENODEV;
990
2ef7eeaa 991 /* Only handle setting domains to types used by the CPU. */
21d509e3 992 if (write_domain & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
993 return -EINVAL;
994
21d509e3 995 if (read_domains & I915_GEM_GPU_DOMAINS)
2ef7eeaa
EA
996 return -EINVAL;
997
998 /* Having something in the write domain implies it's in the read
999 * domain, and only that read domain. Enforce that in the request.
1000 */
1001 if (write_domain != 0 && read_domains != write_domain)
1002 return -EINVAL;
1003
673a394b
EA
1004 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1005 if (obj == NULL)
1006 return -EBADF;
1007
1008 mutex_lock(&dev->struct_mutex);
1009#if WATCH_BUF
cfd43c02 1010 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
2ef7eeaa 1011 obj, obj->size, read_domains, write_domain);
673a394b 1012#endif
2ef7eeaa 1013 if (read_domains & I915_GEM_DOMAIN_GTT) {
a09ba7fa
EA
1014 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1015
2ef7eeaa 1016 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
02354392 1017
a09ba7fa
EA
1018 /* Update the LRU on the fence for the CPU access that's
1019 * about to occur.
1020 */
1021 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1022 list_move_tail(&obj_priv->fence_list,
1023 &dev_priv->mm.fence_list);
1024 }
1025
02354392
EA
1026 /* Silently promote "you're not bound, there was nothing to do"
1027 * to success, since the client was just asking us to
1028 * make sure everything was done.
1029 */
1030 if (ret == -EINVAL)
1031 ret = 0;
2ef7eeaa 1032 } else {
e47c68e9 1033 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
2ef7eeaa
EA
1034 }
1035
673a394b
EA
1036 drm_gem_object_unreference(obj);
1037 mutex_unlock(&dev->struct_mutex);
1038 return ret;
1039}
1040
1041/**
1042 * Called when user space has done writes to this buffer
1043 */
1044int
1045i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1046 struct drm_file *file_priv)
1047{
1048 struct drm_i915_gem_sw_finish *args = data;
1049 struct drm_gem_object *obj;
1050 struct drm_i915_gem_object *obj_priv;
1051 int ret = 0;
1052
1053 if (!(dev->driver->driver_features & DRIVER_GEM))
1054 return -ENODEV;
1055
1056 mutex_lock(&dev->struct_mutex);
1057 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1058 if (obj == NULL) {
1059 mutex_unlock(&dev->struct_mutex);
1060 return -EBADF;
1061 }
1062
1063#if WATCH_BUF
cfd43c02 1064 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
673a394b
EA
1065 __func__, args->handle, obj, obj->size);
1066#endif
1067 obj_priv = obj->driver_private;
1068
1069 /* Pinned buffers may be scanout, so flush the cache */
e47c68e9
EA
1070 if (obj_priv->pin_count)
1071 i915_gem_object_flush_cpu_write_domain(obj);
1072
673a394b
EA
1073 drm_gem_object_unreference(obj);
1074 mutex_unlock(&dev->struct_mutex);
1075 return ret;
1076}
1077
1078/**
1079 * Maps the contents of an object, returning the address it is mapped
1080 * into.
1081 *
1082 * While the mapping holds a reference on the contents of the object, it doesn't
1083 * imply a ref on the object itself.
1084 */
1085int
1086i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1087 struct drm_file *file_priv)
1088{
1089 struct drm_i915_gem_mmap *args = data;
1090 struct drm_gem_object *obj;
1091 loff_t offset;
1092 unsigned long addr;
1093
1094 if (!(dev->driver->driver_features & DRIVER_GEM))
1095 return -ENODEV;
1096
1097 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1098 if (obj == NULL)
1099 return -EBADF;
1100
1101 offset = args->offset;
1102
1103 down_write(&current->mm->mmap_sem);
1104 addr = do_mmap(obj->filp, 0, args->size,
1105 PROT_READ | PROT_WRITE, MAP_SHARED,
1106 args->offset);
1107 up_write(&current->mm->mmap_sem);
1108 mutex_lock(&dev->struct_mutex);
1109 drm_gem_object_unreference(obj);
1110 mutex_unlock(&dev->struct_mutex);
1111 if (IS_ERR((void *)addr))
1112 return addr;
1113
1114 args->addr_ptr = (uint64_t) addr;
1115
1116 return 0;
1117}
1118
de151cf6
JB
1119/**
1120 * i915_gem_fault - fault a page into the GTT
1121 * vma: VMA in question
1122 * vmf: fault info
1123 *
1124 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1125 * from userspace. The fault handler takes care of binding the object to
1126 * the GTT (if needed), allocating and programming a fence register (again,
1127 * only if needed based on whether the old reg is still valid or the object
1128 * is tiled) and inserting a new PTE into the faulting process.
1129 *
1130 * Note that the faulting process may involve evicting existing objects
1131 * from the GTT and/or fence registers to make room. So performance may
1132 * suffer if the GTT working set is large or there are few fence registers
1133 * left.
1134 */
1135int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1136{
1137 struct drm_gem_object *obj = vma->vm_private_data;
1138 struct drm_device *dev = obj->dev;
1139 struct drm_i915_private *dev_priv = dev->dev_private;
1140 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1141 pgoff_t page_offset;
1142 unsigned long pfn;
1143 int ret = 0;
0f973f27 1144 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
de151cf6
JB
1145
1146 /* We don't use vmf->pgoff since that has the fake offset */
1147 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1148 PAGE_SHIFT;
1149
1150 /* Now bind it into the GTT if needed */
1151 mutex_lock(&dev->struct_mutex);
1152 if (!obj_priv->gtt_space) {
1153 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1154 if (ret) {
1155 mutex_unlock(&dev->struct_mutex);
1156 return VM_FAULT_SIGBUS;
1157 }
07f4f3e8
KH
1158
1159 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1160 if (ret) {
1161 mutex_unlock(&dev->struct_mutex);
1162 return VM_FAULT_SIGBUS;
1163 }
1164
14b60391 1165 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
de151cf6
JB
1166 }
1167
1168 /* Need a new fence register? */
a09ba7fa 1169 if (obj_priv->tiling_mode != I915_TILING_NONE) {
8c4b8c3f 1170 ret = i915_gem_object_get_fence_reg(obj);
7d8d58b2
CW
1171 if (ret) {
1172 mutex_unlock(&dev->struct_mutex);
d9ddcb96 1173 return VM_FAULT_SIGBUS;
7d8d58b2 1174 }
d9ddcb96 1175 }
de151cf6
JB
1176
1177 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1178 page_offset;
1179
1180 /* Finally, remap it using the new GTT offset */
1181 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1182
1183 mutex_unlock(&dev->struct_mutex);
1184
1185 switch (ret) {
1186 case -ENOMEM:
1187 case -EAGAIN:
1188 return VM_FAULT_OOM;
1189 case -EFAULT:
959b887c 1190 case -EINVAL:
de151cf6
JB
1191 return VM_FAULT_SIGBUS;
1192 default:
1193 return VM_FAULT_NOPAGE;
1194 }
1195}
1196
1197/**
1198 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1199 * @obj: obj in question
1200 *
1201 * GEM memory mapping works by handing back to userspace a fake mmap offset
1202 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1203 * up the object based on the offset and sets up the various memory mapping
1204 * structures.
1205 *
1206 * This routine allocates and attaches a fake offset for @obj.
1207 */
1208static int
1209i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1210{
1211 struct drm_device *dev = obj->dev;
1212 struct drm_gem_mm *mm = dev->mm_private;
1213 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1214 struct drm_map_list *list;
f77d390c 1215 struct drm_local_map *map;
de151cf6
JB
1216 int ret = 0;
1217
1218 /* Set the object up for mmap'ing */
1219 list = &obj->map_list;
9a298b2a 1220 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
de151cf6
JB
1221 if (!list->map)
1222 return -ENOMEM;
1223
1224 map = list->map;
1225 map->type = _DRM_GEM;
1226 map->size = obj->size;
1227 map->handle = obj;
1228
1229 /* Get a DRM GEM mmap offset allocated... */
1230 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1231 obj->size / PAGE_SIZE, 0, 0);
1232 if (!list->file_offset_node) {
1233 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1234 ret = -ENOMEM;
1235 goto out_free_list;
1236 }
1237
1238 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1239 obj->size / PAGE_SIZE, 0);
1240 if (!list->file_offset_node) {
1241 ret = -ENOMEM;
1242 goto out_free_list;
1243 }
1244
1245 list->hash.key = list->file_offset_node->start;
1246 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1247 DRM_ERROR("failed to add to map hash\n");
1248 goto out_free_mm;
1249 }
1250
1251 /* By now we should be all set, any drm_mmap request on the offset
1252 * below will get to our mmap & fault handler */
1253 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1254
1255 return 0;
1256
1257out_free_mm:
1258 drm_mm_put_block(list->file_offset_node);
1259out_free_list:
9a298b2a 1260 kfree(list->map);
de151cf6
JB
1261
1262 return ret;
1263}
1264
901782b2
CW
1265/**
1266 * i915_gem_release_mmap - remove physical page mappings
1267 * @obj: obj in question
1268 *
1269 * Preserve the reservation of the mmaping with the DRM core code, but
1270 * relinquish ownership of the pages back to the system.
1271 *
1272 * It is vital that we remove the page mapping if we have mapped a tiled
1273 * object through the GTT and then lose the fence register due to
1274 * resource pressure. Similarly if the object has been moved out of the
1275 * aperture, than pages mapped into userspace must be revoked. Removing the
1276 * mapping will then trigger a page fault on the next user access, allowing
1277 * fixup by i915_gem_fault().
1278 */
d05ca301 1279void
901782b2
CW
1280i915_gem_release_mmap(struct drm_gem_object *obj)
1281{
1282 struct drm_device *dev = obj->dev;
1283 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1284
1285 if (dev->dev_mapping)
1286 unmap_mapping_range(dev->dev_mapping,
1287 obj_priv->mmap_offset, obj->size, 1);
1288}
1289
ab00b3e5
JB
1290static void
1291i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1292{
1293 struct drm_device *dev = obj->dev;
1294 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1295 struct drm_gem_mm *mm = dev->mm_private;
1296 struct drm_map_list *list;
1297
1298 list = &obj->map_list;
1299 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1300
1301 if (list->file_offset_node) {
1302 drm_mm_put_block(list->file_offset_node);
1303 list->file_offset_node = NULL;
1304 }
1305
1306 if (list->map) {
9a298b2a 1307 kfree(list->map);
ab00b3e5
JB
1308 list->map = NULL;
1309 }
1310
1311 obj_priv->mmap_offset = 0;
1312}
1313
de151cf6
JB
1314/**
1315 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1316 * @obj: object to check
1317 *
1318 * Return the required GTT alignment for an object, taking into account
1319 * potential fence register mapping if needed.
1320 */
1321static uint32_t
1322i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1323{
1324 struct drm_device *dev = obj->dev;
1325 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1326 int start, i;
1327
1328 /*
1329 * Minimum alignment is 4k (GTT page size), but might be greater
1330 * if a fence register is needed for the object.
1331 */
1332 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1333 return 4096;
1334
1335 /*
1336 * Previous chips need to be aligned to the size of the smallest
1337 * fence register that can contain the object.
1338 */
1339 if (IS_I9XX(dev))
1340 start = 1024*1024;
1341 else
1342 start = 512*1024;
1343
1344 for (i = start; i < obj->size; i <<= 1)
1345 ;
1346
1347 return i;
1348}
1349
1350/**
1351 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1352 * @dev: DRM device
1353 * @data: GTT mapping ioctl data
1354 * @file_priv: GEM object info
1355 *
1356 * Simply returns the fake offset to userspace so it can mmap it.
1357 * The mmap call will end up in drm_gem_mmap(), which will set things
1358 * up so we can get faults in the handler above.
1359 *
1360 * The fault handler will take care of binding the object into the GTT
1361 * (since it may have been evicted to make room for something), allocating
1362 * a fence register, and mapping the appropriate aperture address into
1363 * userspace.
1364 */
1365int
1366i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1367 struct drm_file *file_priv)
1368{
1369 struct drm_i915_gem_mmap_gtt *args = data;
1370 struct drm_i915_private *dev_priv = dev->dev_private;
1371 struct drm_gem_object *obj;
1372 struct drm_i915_gem_object *obj_priv;
1373 int ret;
1374
1375 if (!(dev->driver->driver_features & DRIVER_GEM))
1376 return -ENODEV;
1377
1378 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1379 if (obj == NULL)
1380 return -EBADF;
1381
1382 mutex_lock(&dev->struct_mutex);
1383
1384 obj_priv = obj->driver_private;
1385
1386 if (!obj_priv->mmap_offset) {
1387 ret = i915_gem_create_mmap_offset(obj);
13af1062
CW
1388 if (ret) {
1389 drm_gem_object_unreference(obj);
1390 mutex_unlock(&dev->struct_mutex);
de151cf6 1391 return ret;
13af1062 1392 }
de151cf6
JB
1393 }
1394
1395 args->offset = obj_priv->mmap_offset;
1396
1397 obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
1398
1399 /* Make sure the alignment is correct for fence regs etc */
1400 if (obj_priv->agp_mem &&
1401 (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
1402 drm_gem_object_unreference(obj);
1403 mutex_unlock(&dev->struct_mutex);
1404 return -EINVAL;
1405 }
1406
1407 /*
1408 * Pull it into the GTT so that we have a page list (makes the
1409 * initial fault faster and any subsequent flushing possible).
1410 */
1411 if (!obj_priv->agp_mem) {
1412 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1413 if (ret) {
1414 drm_gem_object_unreference(obj);
1415 mutex_unlock(&dev->struct_mutex);
1416 return ret;
1417 }
14b60391 1418 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
de151cf6
JB
1419 }
1420
1421 drm_gem_object_unreference(obj);
1422 mutex_unlock(&dev->struct_mutex);
1423
1424 return 0;
1425}
1426
6911a9b8 1427void
856fa198 1428i915_gem_object_put_pages(struct drm_gem_object *obj)
673a394b
EA
1429{
1430 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1431 int page_count = obj->size / PAGE_SIZE;
1432 int i;
1433
856fa198 1434 BUG_ON(obj_priv->pages_refcount == 0);
673a394b 1435
856fa198
EA
1436 if (--obj_priv->pages_refcount != 0)
1437 return;
673a394b 1438
280b713b
EA
1439 if (obj_priv->tiling_mode != I915_TILING_NONE)
1440 i915_gem_object_save_bit_17_swizzle(obj);
1441
673a394b 1442 for (i = 0; i < page_count; i++)
856fa198 1443 if (obj_priv->pages[i] != NULL) {
673a394b 1444 if (obj_priv->dirty)
856fa198
EA
1445 set_page_dirty(obj_priv->pages[i]);
1446 mark_page_accessed(obj_priv->pages[i]);
1447 page_cache_release(obj_priv->pages[i]);
673a394b
EA
1448 }
1449 obj_priv->dirty = 0;
1450
8e7d2b2c 1451 drm_free_large(obj_priv->pages);
856fa198 1452 obj_priv->pages = NULL;
673a394b
EA
1453}
1454
1455static void
ce44b0ea 1456i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
673a394b
EA
1457{
1458 struct drm_device *dev = obj->dev;
1459 drm_i915_private_t *dev_priv = dev->dev_private;
1460 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1461
1462 /* Add a reference if we're newly entering the active list. */
1463 if (!obj_priv->active) {
1464 drm_gem_object_reference(obj);
1465 obj_priv->active = 1;
1466 }
1467 /* Move from whatever list we were on to the tail of execution. */
5e118f41 1468 spin_lock(&dev_priv->mm.active_list_lock);
673a394b
EA
1469 list_move_tail(&obj_priv->list,
1470 &dev_priv->mm.active_list);
5e118f41 1471 spin_unlock(&dev_priv->mm.active_list_lock);
ce44b0ea 1472 obj_priv->last_rendering_seqno = seqno;
673a394b
EA
1473}
1474
ce44b0ea
EA
1475static void
1476i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1477{
1478 struct drm_device *dev = obj->dev;
1479 drm_i915_private_t *dev_priv = dev->dev_private;
1480 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1481
1482 BUG_ON(!obj_priv->active);
1483 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1484 obj_priv->last_rendering_seqno = 0;
1485}
673a394b
EA
1486
1487static void
1488i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1489{
1490 struct drm_device *dev = obj->dev;
1491 drm_i915_private_t *dev_priv = dev->dev_private;
1492 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1493
1494 i915_verify_inactive(dev, __FILE__, __LINE__);
1495 if (obj_priv->pin_count != 0)
1496 list_del_init(&obj_priv->list);
1497 else
1498 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1499
ce44b0ea 1500 obj_priv->last_rendering_seqno = 0;
673a394b
EA
1501 if (obj_priv->active) {
1502 obj_priv->active = 0;
1503 drm_gem_object_unreference(obj);
1504 }
1505 i915_verify_inactive(dev, __FILE__, __LINE__);
1506}
1507
1508/**
1509 * Creates a new sequence number, emitting a write of it to the status page
1510 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1511 *
1512 * Must be called with struct_lock held.
1513 *
1514 * Returned sequence numbers are nonzero on success.
1515 */
1516static uint32_t
b962442e
EA
1517i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1518 uint32_t flush_domains)
673a394b
EA
1519{
1520 drm_i915_private_t *dev_priv = dev->dev_private;
b962442e 1521 struct drm_i915_file_private *i915_file_priv = NULL;
673a394b
EA
1522 struct drm_i915_gem_request *request;
1523 uint32_t seqno;
1524 int was_empty;
1525 RING_LOCALS;
1526
b962442e
EA
1527 if (file_priv != NULL)
1528 i915_file_priv = file_priv->driver_priv;
1529
9a298b2a 1530 request = kzalloc(sizeof(*request), GFP_KERNEL);
673a394b
EA
1531 if (request == NULL)
1532 return 0;
1533
1534 /* Grab the seqno we're going to make this request be, and bump the
1535 * next (skipping 0 so it can be the reserved no-seqno value).
1536 */
1537 seqno = dev_priv->mm.next_gem_seqno;
1538 dev_priv->mm.next_gem_seqno++;
1539 if (dev_priv->mm.next_gem_seqno == 0)
1540 dev_priv->mm.next_gem_seqno++;
1541
1542 BEGIN_LP_RING(4);
1543 OUT_RING(MI_STORE_DWORD_INDEX);
1544 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1545 OUT_RING(seqno);
1546
1547 OUT_RING(MI_USER_INTERRUPT);
1548 ADVANCE_LP_RING();
1549
1550 DRM_DEBUG("%d\n", seqno);
1551
1552 request->seqno = seqno;
1553 request->emitted_jiffies = jiffies;
673a394b
EA
1554 was_empty = list_empty(&dev_priv->mm.request_list);
1555 list_add_tail(&request->list, &dev_priv->mm.request_list);
b962442e
EA
1556 if (i915_file_priv) {
1557 list_add_tail(&request->client_list,
1558 &i915_file_priv->mm.request_list);
1559 } else {
1560 INIT_LIST_HEAD(&request->client_list);
1561 }
673a394b 1562
ce44b0ea
EA
1563 /* Associate any objects on the flushing list matching the write
1564 * domain we're flushing with our flush.
1565 */
1566 if (flush_domains != 0) {
1567 struct drm_i915_gem_object *obj_priv, *next;
1568
1569 list_for_each_entry_safe(obj_priv, next,
1570 &dev_priv->mm.flushing_list, list) {
1571 struct drm_gem_object *obj = obj_priv->obj;
1572
1573 if ((obj->write_domain & flush_domains) ==
1574 obj->write_domain) {
1575 obj->write_domain = 0;
1576 i915_gem_object_move_to_active(obj, seqno);
1577 }
1578 }
1579
1580 }
1581
6dbe2772 1582 if (was_empty && !dev_priv->mm.suspended)
9c9fe1f8 1583 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
673a394b
EA
1584 return seqno;
1585}
1586
1587/**
1588 * Command execution barrier
1589 *
1590 * Ensures that all commands in the ring are finished
1591 * before signalling the CPU
1592 */
3043c60c 1593static uint32_t
673a394b
EA
1594i915_retire_commands(struct drm_device *dev)
1595{
1596 drm_i915_private_t *dev_priv = dev->dev_private;
1597 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1598 uint32_t flush_domains = 0;
1599 RING_LOCALS;
1600
1601 /* The sampler always gets flushed on i965 (sigh) */
1602 if (IS_I965G(dev))
1603 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1604 BEGIN_LP_RING(2);
1605 OUT_RING(cmd);
1606 OUT_RING(0); /* noop */
1607 ADVANCE_LP_RING();
1608 return flush_domains;
1609}
1610
1611/**
1612 * Moves buffers associated only with the given active seqno from the active
1613 * to inactive list, potentially freeing them.
1614 */
1615static void
1616i915_gem_retire_request(struct drm_device *dev,
1617 struct drm_i915_gem_request *request)
1618{
1619 drm_i915_private_t *dev_priv = dev->dev_private;
1620
1621 /* Move any buffers on the active list that are no longer referenced
1622 * by the ringbuffer to the flushing/inactive lists as appropriate.
1623 */
5e118f41 1624 spin_lock(&dev_priv->mm.active_list_lock);
673a394b
EA
1625 while (!list_empty(&dev_priv->mm.active_list)) {
1626 struct drm_gem_object *obj;
1627 struct drm_i915_gem_object *obj_priv;
1628
1629 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1630 struct drm_i915_gem_object,
1631 list);
1632 obj = obj_priv->obj;
1633
1634 /* If the seqno being retired doesn't match the oldest in the
1635 * list, then the oldest in the list must still be newer than
1636 * this seqno.
1637 */
1638 if (obj_priv->last_rendering_seqno != request->seqno)
5e118f41 1639 goto out;
de151cf6 1640
673a394b
EA
1641#if WATCH_LRU
1642 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1643 __func__, request->seqno, obj);
1644#endif
1645
ce44b0ea
EA
1646 if (obj->write_domain != 0)
1647 i915_gem_object_move_to_flushing(obj);
68c84342
SL
1648 else {
1649 /* Take a reference on the object so it won't be
1650 * freed while the spinlock is held. The list
1651 * protection for this spinlock is safe when breaking
1652 * the lock like this since the next thing we do
1653 * is just get the head of the list again.
1654 */
1655 drm_gem_object_reference(obj);
673a394b 1656 i915_gem_object_move_to_inactive(obj);
68c84342
SL
1657 spin_unlock(&dev_priv->mm.active_list_lock);
1658 drm_gem_object_unreference(obj);
1659 spin_lock(&dev_priv->mm.active_list_lock);
1660 }
673a394b 1661 }
5e118f41
CW
1662out:
1663 spin_unlock(&dev_priv->mm.active_list_lock);
673a394b
EA
1664}
1665
1666/**
1667 * Returns true if seq1 is later than seq2.
1668 */
1669static int
1670i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1671{
1672 return (int32_t)(seq1 - seq2) >= 0;
1673}
1674
1675uint32_t
1676i915_get_gem_seqno(struct drm_device *dev)
1677{
1678 drm_i915_private_t *dev_priv = dev->dev_private;
1679
1680 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1681}
1682
1683/**
1684 * This function clears the request list as sequence numbers are passed.
1685 */
1686void
1687i915_gem_retire_requests(struct drm_device *dev)
1688{
1689 drm_i915_private_t *dev_priv = dev->dev_private;
1690 uint32_t seqno;
1691
6c0594a3
KW
1692 if (!dev_priv->hw_status_page)
1693 return;
1694
673a394b
EA
1695 seqno = i915_get_gem_seqno(dev);
1696
1697 while (!list_empty(&dev_priv->mm.request_list)) {
1698 struct drm_i915_gem_request *request;
1699 uint32_t retiring_seqno;
1700
1701 request = list_first_entry(&dev_priv->mm.request_list,
1702 struct drm_i915_gem_request,
1703 list);
1704 retiring_seqno = request->seqno;
1705
1706 if (i915_seqno_passed(seqno, retiring_seqno) ||
1707 dev_priv->mm.wedged) {
1708 i915_gem_retire_request(dev, request);
1709
1710 list_del(&request->list);
b962442e 1711 list_del(&request->client_list);
9a298b2a 1712 kfree(request);
673a394b
EA
1713 } else
1714 break;
1715 }
1716}
1717
1718void
1719i915_gem_retire_work_handler(struct work_struct *work)
1720{
1721 drm_i915_private_t *dev_priv;
1722 struct drm_device *dev;
1723
1724 dev_priv = container_of(work, drm_i915_private_t,
1725 mm.retire_work.work);
1726 dev = dev_priv->dev;
1727
1728 mutex_lock(&dev->struct_mutex);
1729 i915_gem_retire_requests(dev);
6dbe2772
KP
1730 if (!dev_priv->mm.suspended &&
1731 !list_empty(&dev_priv->mm.request_list))
9c9fe1f8 1732 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
673a394b
EA
1733 mutex_unlock(&dev->struct_mutex);
1734}
1735
1736/**
1737 * Waits for a sequence number to be signaled, and cleans up the
1738 * request and object lists appropriately for that event.
1739 */
3043c60c 1740static int
673a394b
EA
1741i915_wait_request(struct drm_device *dev, uint32_t seqno)
1742{
1743 drm_i915_private_t *dev_priv = dev->dev_private;
802c7eb6 1744 u32 ier;
673a394b
EA
1745 int ret = 0;
1746
1747 BUG_ON(seqno == 0);
1748
1749 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
036a4a7d
ZW
1750 if (IS_IGDNG(dev))
1751 ier = I915_READ(DEIER) | I915_READ(GTIER);
1752 else
1753 ier = I915_READ(IER);
802c7eb6
JB
1754 if (!ier) {
1755 DRM_ERROR("something (likely vbetool) disabled "
1756 "interrupts, re-enabling\n");
1757 i915_driver_irq_preinstall(dev);
1758 i915_driver_irq_postinstall(dev);
1759 }
1760
673a394b
EA
1761 dev_priv->mm.waiting_gem_seqno = seqno;
1762 i915_user_irq_get(dev);
1763 ret = wait_event_interruptible(dev_priv->irq_queue,
1764 i915_seqno_passed(i915_get_gem_seqno(dev),
1765 seqno) ||
1766 dev_priv->mm.wedged);
1767 i915_user_irq_put(dev);
1768 dev_priv->mm.waiting_gem_seqno = 0;
1769 }
1770 if (dev_priv->mm.wedged)
1771 ret = -EIO;
1772
1773 if (ret && ret != -ERESTARTSYS)
1774 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1775 __func__, ret, seqno, i915_get_gem_seqno(dev));
1776
1777 /* Directly dispatch request retiring. While we have the work queue
1778 * to handle this, the waiter on a request often wants an associated
1779 * buffer to have made it to the inactive list, and we would need
1780 * a separate wait queue to handle that.
1781 */
1782 if (ret == 0)
1783 i915_gem_retire_requests(dev);
1784
1785 return ret;
1786}
1787
1788static void
1789i915_gem_flush(struct drm_device *dev,
1790 uint32_t invalidate_domains,
1791 uint32_t flush_domains)
1792{
1793 drm_i915_private_t *dev_priv = dev->dev_private;
1794 uint32_t cmd;
1795 RING_LOCALS;
1796
1797#if WATCH_EXEC
1798 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1799 invalidate_domains, flush_domains);
1800#endif
1801
1802 if (flush_domains & I915_GEM_DOMAIN_CPU)
1803 drm_agp_chipset_flush(dev);
1804
21d509e3 1805 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
673a394b
EA
1806 /*
1807 * read/write caches:
1808 *
1809 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1810 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1811 * also flushed at 2d versus 3d pipeline switches.
1812 *
1813 * read-only caches:
1814 *
1815 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1816 * MI_READ_FLUSH is set, and is always flushed on 965.
1817 *
1818 * I915_GEM_DOMAIN_COMMAND may not exist?
1819 *
1820 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1821 * invalidated when MI_EXE_FLUSH is set.
1822 *
1823 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1824 * invalidated with every MI_FLUSH.
1825 *
1826 * TLBs:
1827 *
1828 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1829 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1830 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1831 * are flushed at any MI_FLUSH.
1832 */
1833
1834 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1835 if ((invalidate_domains|flush_domains) &
1836 I915_GEM_DOMAIN_RENDER)
1837 cmd &= ~MI_NO_WRITE_FLUSH;
1838 if (!IS_I965G(dev)) {
1839 /*
1840 * On the 965, the sampler cache always gets flushed
1841 * and this bit is reserved.
1842 */
1843 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1844 cmd |= MI_READ_FLUSH;
1845 }
1846 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1847 cmd |= MI_EXE_FLUSH;
1848
1849#if WATCH_EXEC
1850 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1851#endif
1852 BEGIN_LP_RING(2);
1853 OUT_RING(cmd);
1854 OUT_RING(0); /* noop */
1855 ADVANCE_LP_RING();
1856 }
1857}
1858
1859/**
1860 * Ensures that all rendering to the object has completed and the object is
1861 * safe to unbind from the GTT or access from the CPU.
1862 */
1863static int
1864i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1865{
1866 struct drm_device *dev = obj->dev;
1867 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1868 int ret;
1869
e47c68e9
EA
1870 /* This function only exists to support waiting for existing rendering,
1871 * not for emitting required flushes.
673a394b 1872 */
e47c68e9 1873 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
673a394b
EA
1874
1875 /* If there is rendering queued on the buffer being evicted, wait for
1876 * it.
1877 */
1878 if (obj_priv->active) {
1879#if WATCH_BUF
1880 DRM_INFO("%s: object %p wait for seqno %08x\n",
1881 __func__, obj, obj_priv->last_rendering_seqno);
1882#endif
1883 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1884 if (ret != 0)
1885 return ret;
1886 }
1887
1888 return 0;
1889}
1890
1891/**
1892 * Unbinds an object from the GTT aperture.
1893 */
0f973f27 1894int
673a394b
EA
1895i915_gem_object_unbind(struct drm_gem_object *obj)
1896{
1897 struct drm_device *dev = obj->dev;
1898 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1899 int ret = 0;
1900
1901#if WATCH_BUF
1902 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1903 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1904#endif
1905 if (obj_priv->gtt_space == NULL)
1906 return 0;
1907
1908 if (obj_priv->pin_count != 0) {
1909 DRM_ERROR("Attempting to unbind pinned buffer\n");
1910 return -EINVAL;
1911 }
1912
673a394b
EA
1913 /* Move the object to the CPU domain to ensure that
1914 * any possible CPU writes while it's not in the GTT
1915 * are flushed when we go to remap it. This will
1916 * also ensure that all pending GPU writes are finished
1917 * before we unbind.
1918 */
e47c68e9 1919 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
673a394b 1920 if (ret) {
e47c68e9
EA
1921 if (ret != -ERESTARTSYS)
1922 DRM_ERROR("set_domain failed: %d\n", ret);
673a394b
EA
1923 return ret;
1924 }
1925
1926 if (obj_priv->agp_mem != NULL) {
1927 drm_unbind_agp(obj_priv->agp_mem);
1928 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1929 obj_priv->agp_mem = NULL;
1930 }
1931
1932 BUG_ON(obj_priv->active);
1933
de151cf6 1934 /* blow away mappings if mapped through GTT */
901782b2 1935 i915_gem_release_mmap(obj);
de151cf6
JB
1936
1937 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1938 i915_gem_clear_fence_reg(obj);
1939
856fa198 1940 i915_gem_object_put_pages(obj);
673a394b
EA
1941
1942 if (obj_priv->gtt_space) {
1943 atomic_dec(&dev->gtt_count);
1944 atomic_sub(obj->size, &dev->gtt_memory);
1945
1946 drm_mm_put_block(obj_priv->gtt_space);
1947 obj_priv->gtt_space = NULL;
1948 }
1949
1950 /* Remove ourselves from the LRU list if present. */
1951 if (!list_empty(&obj_priv->list))
1952 list_del_init(&obj_priv->list);
1953
1954 return 0;
1955}
1956
1957static int
1958i915_gem_evict_something(struct drm_device *dev)
1959{
1960 drm_i915_private_t *dev_priv = dev->dev_private;
1961 struct drm_gem_object *obj;
1962 struct drm_i915_gem_object *obj_priv;
1963 int ret = 0;
1964
1965 for (;;) {
1966 /* If there's an inactive buffer available now, grab it
1967 * and be done.
1968 */
1969 if (!list_empty(&dev_priv->mm.inactive_list)) {
1970 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1971 struct drm_i915_gem_object,
1972 list);
1973 obj = obj_priv->obj;
1974 BUG_ON(obj_priv->pin_count != 0);
1975#if WATCH_LRU
1976 DRM_INFO("%s: evicting %p\n", __func__, obj);
1977#endif
1978 BUG_ON(obj_priv->active);
1979
1980 /* Wait on the rendering and unbind the buffer. */
1981 ret = i915_gem_object_unbind(obj);
1982 break;
1983 }
1984
1985 /* If we didn't get anything, but the ring is still processing
1986 * things, wait for one of those things to finish and hopefully
1987 * leave us a buffer to evict.
1988 */
1989 if (!list_empty(&dev_priv->mm.request_list)) {
1990 struct drm_i915_gem_request *request;
1991
1992 request = list_first_entry(&dev_priv->mm.request_list,
1993 struct drm_i915_gem_request,
1994 list);
1995
1996 ret = i915_wait_request(dev, request->seqno);
1997 if (ret)
1998 break;
1999
2000 /* if waiting caused an object to become inactive,
2001 * then loop around and wait for it. Otherwise, we
2002 * assume that waiting freed and unbound something,
2003 * so there should now be some space in the GTT
2004 */
2005 if (!list_empty(&dev_priv->mm.inactive_list))
2006 continue;
2007 break;
2008 }
2009
2010 /* If we didn't have anything on the request list but there
2011 * are buffers awaiting a flush, emit one and try again.
2012 * When we wait on it, those buffers waiting for that flush
2013 * will get moved to inactive.
2014 */
2015 if (!list_empty(&dev_priv->mm.flushing_list)) {
2016 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
2017 struct drm_i915_gem_object,
2018 list);
2019 obj = obj_priv->obj;
2020
2021 i915_gem_flush(dev,
2022 obj->write_domain,
2023 obj->write_domain);
b962442e 2024 i915_add_request(dev, NULL, obj->write_domain);
673a394b
EA
2025
2026 obj = NULL;
2027 continue;
2028 }
2029
2030 DRM_ERROR("inactive empty %d request empty %d "
2031 "flushing empty %d\n",
2032 list_empty(&dev_priv->mm.inactive_list),
2033 list_empty(&dev_priv->mm.request_list),
2034 list_empty(&dev_priv->mm.flushing_list));
2035 /* If we didn't do any of the above, there's nothing to be done
2036 * and we just can't fit it in.
2037 */
2939e1f5 2038 return -ENOSPC;
673a394b
EA
2039 }
2040 return ret;
2041}
2042
ac94a962
KP
2043static int
2044i915_gem_evict_everything(struct drm_device *dev)
2045{
2046 int ret;
2047
2048 for (;;) {
2049 ret = i915_gem_evict_something(dev);
2050 if (ret != 0)
2051 break;
2052 }
2939e1f5 2053 if (ret == -ENOSPC)
15c35334 2054 return 0;
ac94a962
KP
2055 return ret;
2056}
2057
6911a9b8 2058int
856fa198 2059i915_gem_object_get_pages(struct drm_gem_object *obj)
673a394b
EA
2060{
2061 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2062 int page_count, i;
2063 struct address_space *mapping;
2064 struct inode *inode;
2065 struct page *page;
2066 int ret;
2067
856fa198 2068 if (obj_priv->pages_refcount++ != 0)
673a394b
EA
2069 return 0;
2070
2071 /* Get the list of pages out of our struct file. They'll be pinned
2072 * at this point until we release them.
2073 */
2074 page_count = obj->size / PAGE_SIZE;
856fa198 2075 BUG_ON(obj_priv->pages != NULL);
8e7d2b2c 2076 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
856fa198 2077 if (obj_priv->pages == NULL) {
673a394b 2078 DRM_ERROR("Faled to allocate page list\n");
856fa198 2079 obj_priv->pages_refcount--;
673a394b
EA
2080 return -ENOMEM;
2081 }
2082
2083 inode = obj->filp->f_path.dentry->d_inode;
2084 mapping = inode->i_mapping;
2085 for (i = 0; i < page_count; i++) {
2086 page = read_mapping_page(mapping, i, NULL);
2087 if (IS_ERR(page)) {
2088 ret = PTR_ERR(page);
2089 DRM_ERROR("read_mapping_page failed: %d\n", ret);
856fa198 2090 i915_gem_object_put_pages(obj);
673a394b
EA
2091 return ret;
2092 }
856fa198 2093 obj_priv->pages[i] = page;
673a394b 2094 }
280b713b
EA
2095
2096 if (obj_priv->tiling_mode != I915_TILING_NONE)
2097 i915_gem_object_do_bit_17_swizzle(obj);
2098
673a394b
EA
2099 return 0;
2100}
2101
de151cf6
JB
2102static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2103{
2104 struct drm_gem_object *obj = reg->obj;
2105 struct drm_device *dev = obj->dev;
2106 drm_i915_private_t *dev_priv = dev->dev_private;
2107 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2108 int regnum = obj_priv->fence_reg;
2109 uint64_t val;
2110
2111 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2112 0xfffff000) << 32;
2113 val |= obj_priv->gtt_offset & 0xfffff000;
2114 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2115 if (obj_priv->tiling_mode == I915_TILING_Y)
2116 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2117 val |= I965_FENCE_REG_VALID;
2118
2119 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2120}
2121
2122static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2123{
2124 struct drm_gem_object *obj = reg->obj;
2125 struct drm_device *dev = obj->dev;
2126 drm_i915_private_t *dev_priv = dev->dev_private;
2127 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2128 int regnum = obj_priv->fence_reg;
0f973f27 2129 int tile_width;
dc529a4f 2130 uint32_t fence_reg, val;
de151cf6
JB
2131 uint32_t pitch_val;
2132
2133 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2134 (obj_priv->gtt_offset & (obj->size - 1))) {
f06da264 2135 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
0f973f27 2136 __func__, obj_priv->gtt_offset, obj->size);
de151cf6
JB
2137 return;
2138 }
2139
0f973f27
JB
2140 if (obj_priv->tiling_mode == I915_TILING_Y &&
2141 HAS_128_BYTE_Y_TILING(dev))
2142 tile_width = 128;
de151cf6 2143 else
0f973f27
JB
2144 tile_width = 512;
2145
2146 /* Note: pitch better be a power of two tile widths */
2147 pitch_val = obj_priv->stride / tile_width;
2148 pitch_val = ffs(pitch_val) - 1;
de151cf6
JB
2149
2150 val = obj_priv->gtt_offset;
2151 if (obj_priv->tiling_mode == I915_TILING_Y)
2152 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2153 val |= I915_FENCE_SIZE_BITS(obj->size);
2154 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2155 val |= I830_FENCE_REG_VALID;
2156
dc529a4f
EA
2157 if (regnum < 8)
2158 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2159 else
2160 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2161 I915_WRITE(fence_reg, val);
de151cf6
JB
2162}
2163
2164static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2165{
2166 struct drm_gem_object *obj = reg->obj;
2167 struct drm_device *dev = obj->dev;
2168 drm_i915_private_t *dev_priv = dev->dev_private;
2169 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2170 int regnum = obj_priv->fence_reg;
2171 uint32_t val;
2172 uint32_t pitch_val;
8d7773a3 2173 uint32_t fence_size_bits;
de151cf6 2174
8d7773a3 2175 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
de151cf6 2176 (obj_priv->gtt_offset & (obj->size - 1))) {
8d7773a3 2177 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
0f973f27 2178 __func__, obj_priv->gtt_offset);
de151cf6
JB
2179 return;
2180 }
2181
e76a16de
EA
2182 pitch_val = obj_priv->stride / 128;
2183 pitch_val = ffs(pitch_val) - 1;
2184 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2185
de151cf6
JB
2186 val = obj_priv->gtt_offset;
2187 if (obj_priv->tiling_mode == I915_TILING_Y)
2188 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
8d7773a3
DV
2189 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2190 WARN_ON(fence_size_bits & ~0x00000f00);
2191 val |= fence_size_bits;
de151cf6
JB
2192 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2193 val |= I830_FENCE_REG_VALID;
2194
2195 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
de151cf6
JB
2196}
2197
2198/**
2199 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2200 * @obj: object to map through a fence reg
2201 *
2202 * When mapping objects through the GTT, userspace wants to be able to write
2203 * to them without having to worry about swizzling if the object is tiled.
2204 *
2205 * This function walks the fence regs looking for a free one for @obj,
2206 * stealing one if it can't find any.
2207 *
2208 * It then sets up the reg based on the object's properties: address, pitch
2209 * and tiling format.
2210 */
8c4b8c3f
CW
2211int
2212i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
de151cf6
JB
2213{
2214 struct drm_device *dev = obj->dev;
79e53945 2215 struct drm_i915_private *dev_priv = dev->dev_private;
de151cf6
JB
2216 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2217 struct drm_i915_fence_reg *reg = NULL;
fc7170ba
CW
2218 struct drm_i915_gem_object *old_obj_priv = NULL;
2219 int i, ret, avail;
de151cf6 2220
a09ba7fa
EA
2221 /* Just update our place in the LRU if our fence is getting used. */
2222 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2223 list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2224 return 0;
2225 }
2226
de151cf6
JB
2227 switch (obj_priv->tiling_mode) {
2228 case I915_TILING_NONE:
2229 WARN(1, "allocating a fence for non-tiled object?\n");
2230 break;
2231 case I915_TILING_X:
0f973f27
JB
2232 if (!obj_priv->stride)
2233 return -EINVAL;
2234 WARN((obj_priv->stride & (512 - 1)),
2235 "object 0x%08x is X tiled but has non-512B pitch\n",
2236 obj_priv->gtt_offset);
de151cf6
JB
2237 break;
2238 case I915_TILING_Y:
0f973f27
JB
2239 if (!obj_priv->stride)
2240 return -EINVAL;
2241 WARN((obj_priv->stride & (128 - 1)),
2242 "object 0x%08x is Y tiled but has non-128B pitch\n",
2243 obj_priv->gtt_offset);
de151cf6
JB
2244 break;
2245 }
2246
2247 /* First try to find a free reg */
fc7170ba 2248 avail = 0;
de151cf6
JB
2249 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2250 reg = &dev_priv->fence_regs[i];
2251 if (!reg->obj)
2252 break;
fc7170ba
CW
2253
2254 old_obj_priv = reg->obj->driver_private;
2255 if (!old_obj_priv->pin_count)
2256 avail++;
de151cf6
JB
2257 }
2258
2259 /* None available, try to steal one or wait for a user to finish */
2260 if (i == dev_priv->num_fence_regs) {
a09ba7fa 2261 struct drm_gem_object *old_obj = NULL;
de151cf6 2262
fc7170ba 2263 if (avail == 0)
2939e1f5 2264 return -ENOSPC;
fc7170ba 2265
a09ba7fa
EA
2266 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2267 fence_list) {
2268 old_obj = old_obj_priv->obj;
d7619c4b 2269
a09ba7fa 2270 reg = &dev_priv->fence_regs[old_obj_priv->fence_reg];
d7619c4b
CW
2271
2272 if (old_obj_priv->pin_count)
2273 continue;
2274
a09ba7fa
EA
2275 /* Take a reference, as otherwise the wait_rendering
2276 * below may cause the object to get freed out from
2277 * under us.
2278 */
2279 drm_gem_object_reference(old_obj);
2280
d7619c4b
CW
2281 /* i915 uses fences for GPU access to tiled buffers */
2282 if (IS_I965G(dev) || !old_obj_priv->active)
de151cf6 2283 break;
d7619c4b 2284
a09ba7fa
EA
2285 /* This brings the object to the head of the LRU if it
2286 * had been written to. The only way this should
2287 * result in us waiting longer than the expected
2288 * optimal amount of time is if there was a
2289 * fence-using buffer later that was read-only.
2290 */
2291 i915_gem_object_flush_gpu_write_domain(old_obj);
2292 ret = i915_gem_object_wait_rendering(old_obj);
2293 if (ret != 0)
d7619c4b 2294 return ret;
a09ba7fa 2295 break;
de151cf6
JB
2296 }
2297
2298 /*
2299 * Zap this virtual mapping so we can set up a fence again
2300 * for this object next time we need it.
2301 */
901782b2 2302 i915_gem_release_mmap(reg->obj);
a09ba7fa 2303 i = old_obj_priv->fence_reg;
de151cf6 2304 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
a09ba7fa
EA
2305 list_del_init(&old_obj_priv->fence_list);
2306 drm_gem_object_unreference(old_obj);
de151cf6
JB
2307 }
2308
2309 obj_priv->fence_reg = i;
a09ba7fa
EA
2310 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2311
de151cf6
JB
2312 reg->obj = obj;
2313
2314 if (IS_I965G(dev))
2315 i965_write_fence_reg(reg);
2316 else if (IS_I9XX(dev))
2317 i915_write_fence_reg(reg);
2318 else
2319 i830_write_fence_reg(reg);
d9ddcb96
EA
2320
2321 return 0;
de151cf6
JB
2322}
2323
2324/**
2325 * i915_gem_clear_fence_reg - clear out fence register info
2326 * @obj: object to clear
2327 *
2328 * Zeroes out the fence register itself and clears out the associated
2329 * data structures in dev_priv and obj_priv.
2330 */
2331static void
2332i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2333{
2334 struct drm_device *dev = obj->dev;
79e53945 2335 drm_i915_private_t *dev_priv = dev->dev_private;
de151cf6
JB
2336 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2337
2338 if (IS_I965G(dev))
2339 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
dc529a4f
EA
2340 else {
2341 uint32_t fence_reg;
2342
2343 if (obj_priv->fence_reg < 8)
2344 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2345 else
2346 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2347 8) * 4;
2348
2349 I915_WRITE(fence_reg, 0);
2350 }
de151cf6
JB
2351
2352 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2353 obj_priv->fence_reg = I915_FENCE_REG_NONE;
a09ba7fa 2354 list_del_init(&obj_priv->fence_list);
de151cf6
JB
2355}
2356
52dc7d32
CW
2357/**
2358 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2359 * to the buffer to finish, and then resets the fence register.
2360 * @obj: tiled object holding a fence register.
2361 *
2362 * Zeroes out the fence register itself and clears out the associated
2363 * data structures in dev_priv and obj_priv.
2364 */
2365int
2366i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2367{
2368 struct drm_device *dev = obj->dev;
2369 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2370
2371 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2372 return 0;
2373
2374 /* On the i915, GPU access to tiled buffers is via a fence,
2375 * therefore we must wait for any outstanding access to complete
2376 * before clearing the fence.
2377 */
2378 if (!IS_I965G(dev)) {
2379 int ret;
2380
2381 i915_gem_object_flush_gpu_write_domain(obj);
2382 i915_gem_object_flush_gtt_write_domain(obj);
2383 ret = i915_gem_object_wait_rendering(obj);
2384 if (ret != 0)
2385 return ret;
2386 }
2387
2388 i915_gem_clear_fence_reg (obj);
2389
2390 return 0;
2391}
2392
673a394b
EA
2393/**
2394 * Finds free space in the GTT aperture and binds the object there.
2395 */
2396static int
2397i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2398{
2399 struct drm_device *dev = obj->dev;
2400 drm_i915_private_t *dev_priv = dev->dev_private;
2401 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2402 struct drm_mm_node *free_space;
2403 int page_count, ret;
2404
9bb2d6f9
EA
2405 if (dev_priv->mm.suspended)
2406 return -EBUSY;
673a394b 2407 if (alignment == 0)
0f973f27 2408 alignment = i915_gem_get_gtt_alignment(obj);
8d7773a3 2409 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
673a394b
EA
2410 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2411 return -EINVAL;
2412 }
2413
2414 search_free:
2415 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2416 obj->size, alignment, 0);
2417 if (free_space != NULL) {
2418 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2419 alignment);
2420 if (obj_priv->gtt_space != NULL) {
2421 obj_priv->gtt_space->private = obj;
2422 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2423 }
2424 }
2425 if (obj_priv->gtt_space == NULL) {
5e118f41
CW
2426 bool lists_empty;
2427
673a394b
EA
2428 /* If the gtt is empty and we're still having trouble
2429 * fitting our object in, we're out of memory.
2430 */
2431#if WATCH_LRU
2432 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2433#endif
5e118f41
CW
2434 spin_lock(&dev_priv->mm.active_list_lock);
2435 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2436 list_empty(&dev_priv->mm.flushing_list) &&
2437 list_empty(&dev_priv->mm.active_list));
2438 spin_unlock(&dev_priv->mm.active_list_lock);
2439 if (lists_empty) {
673a394b 2440 DRM_ERROR("GTT full, but LRU list empty\n");
2939e1f5 2441 return -ENOSPC;
673a394b
EA
2442 }
2443
2444 ret = i915_gem_evict_something(dev);
2445 if (ret != 0) {
ac94a962
KP
2446 if (ret != -ERESTARTSYS)
2447 DRM_ERROR("Failed to evict a buffer %d\n", ret);
673a394b
EA
2448 return ret;
2449 }
2450 goto search_free;
2451 }
2452
2453#if WATCH_BUF
cfd43c02 2454 DRM_INFO("Binding object of size %zd at 0x%08x\n",
673a394b
EA
2455 obj->size, obj_priv->gtt_offset);
2456#endif
856fa198 2457 ret = i915_gem_object_get_pages(obj);
673a394b
EA
2458 if (ret) {
2459 drm_mm_put_block(obj_priv->gtt_space);
2460 obj_priv->gtt_space = NULL;
2461 return ret;
2462 }
2463
2464 page_count = obj->size / PAGE_SIZE;
2465 /* Create an AGP memory structure pointing at our pages, and bind it
2466 * into the GTT.
2467 */
2468 obj_priv->agp_mem = drm_agp_bind_pages(dev,
856fa198 2469 obj_priv->pages,
673a394b 2470 page_count,
ba1eb1d8
KP
2471 obj_priv->gtt_offset,
2472 obj_priv->agp_type);
673a394b 2473 if (obj_priv->agp_mem == NULL) {
856fa198 2474 i915_gem_object_put_pages(obj);
673a394b
EA
2475 drm_mm_put_block(obj_priv->gtt_space);
2476 obj_priv->gtt_space = NULL;
2477 return -ENOMEM;
2478 }
2479 atomic_inc(&dev->gtt_count);
2480 atomic_add(obj->size, &dev->gtt_memory);
2481
2482 /* Assert that the object is not currently in any GPU domain. As it
2483 * wasn't in the GTT, there shouldn't be any way it could have been in
2484 * a GPU cache
2485 */
21d509e3
CW
2486 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2487 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
673a394b
EA
2488
2489 return 0;
2490}
2491
2492void
2493i915_gem_clflush_object(struct drm_gem_object *obj)
2494{
2495 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2496
2497 /* If we don't have a page list set up, then we're not pinned
2498 * to GPU, and we can ignore the cache flush because it'll happen
2499 * again at bind time.
2500 */
856fa198 2501 if (obj_priv->pages == NULL)
673a394b
EA
2502 return;
2503
cfa16a0d
EA
2504 /* XXX: The 865 in particular appears to be weird in how it handles
2505 * cache flushing. We haven't figured it out, but the
2506 * clflush+agp_chipset_flush doesn't appear to successfully get the
2507 * data visible to the PGU, while wbinvd + agp_chipset_flush does.
2508 */
2509 if (IS_I865G(obj->dev)) {
2510 wbinvd();
2511 return;
2512 }
2513
856fa198 2514 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
673a394b
EA
2515}
2516
e47c68e9
EA
2517/** Flushes any GPU write domain for the object if it's dirty. */
2518static void
2519i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2520{
2521 struct drm_device *dev = obj->dev;
2522 uint32_t seqno;
2523
2524 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2525 return;
2526
2527 /* Queue the GPU write cache flushing we need. */
2528 i915_gem_flush(dev, 0, obj->write_domain);
b962442e 2529 seqno = i915_add_request(dev, NULL, obj->write_domain);
e47c68e9
EA
2530 obj->write_domain = 0;
2531 i915_gem_object_move_to_active(obj, seqno);
2532}
2533
2534/** Flushes the GTT write domain for the object if it's dirty. */
2535static void
2536i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2537{
2538 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2539 return;
2540
2541 /* No actual flushing is required for the GTT write domain. Writes
2542 * to it immediately go to main memory as far as we know, so there's
2543 * no chipset flush. It also doesn't land in render cache.
2544 */
2545 obj->write_domain = 0;
2546}
2547
2548/** Flushes the CPU write domain for the object if it's dirty. */
2549static void
2550i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2551{
2552 struct drm_device *dev = obj->dev;
2553
2554 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2555 return;
2556
2557 i915_gem_clflush_object(obj);
2558 drm_agp_chipset_flush(dev);
2559 obj->write_domain = 0;
2560}
2561
2ef7eeaa
EA
2562/**
2563 * Moves a single object to the GTT read, and possibly write domain.
2564 *
2565 * This function returns when the move is complete, including waiting on
2566 * flushes to occur.
2567 */
79e53945 2568int
2ef7eeaa
EA
2569i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2570{
2ef7eeaa 2571 struct drm_i915_gem_object *obj_priv = obj->driver_private;
e47c68e9 2572 int ret;
2ef7eeaa 2573
02354392
EA
2574 /* Not valid to be called on unbound objects. */
2575 if (obj_priv->gtt_space == NULL)
2576 return -EINVAL;
2577
e47c68e9
EA
2578 i915_gem_object_flush_gpu_write_domain(obj);
2579 /* Wait on any GPU rendering and flushing to occur. */
2580 ret = i915_gem_object_wait_rendering(obj);
2581 if (ret != 0)
2582 return ret;
2583
2584 /* If we're writing through the GTT domain, then CPU and GPU caches
2585 * will need to be invalidated at next use.
2ef7eeaa 2586 */
e47c68e9
EA
2587 if (write)
2588 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2ef7eeaa 2589
e47c68e9 2590 i915_gem_object_flush_cpu_write_domain(obj);
2ef7eeaa 2591
e47c68e9
EA
2592 /* It should now be out of any other write domains, and we can update
2593 * the domain values for our changes.
2594 */
2595 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2596 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2597 if (write) {
2598 obj->write_domain = I915_GEM_DOMAIN_GTT;
2599 obj_priv->dirty = 1;
2ef7eeaa
EA
2600 }
2601
e47c68e9
EA
2602 return 0;
2603}
2604
2605/**
2606 * Moves a single object to the CPU read, and possibly write domain.
2607 *
2608 * This function returns when the move is complete, including waiting on
2609 * flushes to occur.
2610 */
2611static int
2612i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2613{
e47c68e9
EA
2614 int ret;
2615
2616 i915_gem_object_flush_gpu_write_domain(obj);
2ef7eeaa 2617 /* Wait on any GPU rendering and flushing to occur. */
e47c68e9
EA
2618 ret = i915_gem_object_wait_rendering(obj);
2619 if (ret != 0)
2620 return ret;
2ef7eeaa 2621
e47c68e9 2622 i915_gem_object_flush_gtt_write_domain(obj);
2ef7eeaa 2623
e47c68e9
EA
2624 /* If we have a partially-valid cache of the object in the CPU,
2625 * finish invalidating it and free the per-page flags.
2ef7eeaa 2626 */
e47c68e9 2627 i915_gem_object_set_to_full_cpu_read_domain(obj);
2ef7eeaa 2628
e47c68e9
EA
2629 /* Flush the CPU cache if it's still invalid. */
2630 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2ef7eeaa 2631 i915_gem_clflush_object(obj);
2ef7eeaa 2632
e47c68e9 2633 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2ef7eeaa
EA
2634 }
2635
2636 /* It should now be out of any other write domains, and we can update
2637 * the domain values for our changes.
2638 */
e47c68e9
EA
2639 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2640
2641 /* If we're writing through the CPU, then the GPU read domains will
2642 * need to be invalidated at next use.
2643 */
2644 if (write) {
2645 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2646 obj->write_domain = I915_GEM_DOMAIN_CPU;
2647 }
2ef7eeaa
EA
2648
2649 return 0;
2650}
2651
673a394b
EA
2652/*
2653 * Set the next domain for the specified object. This
2654 * may not actually perform the necessary flushing/invaliding though,
2655 * as that may want to be batched with other set_domain operations
2656 *
2657 * This is (we hope) the only really tricky part of gem. The goal
2658 * is fairly simple -- track which caches hold bits of the object
2659 * and make sure they remain coherent. A few concrete examples may
2660 * help to explain how it works. For shorthand, we use the notation
2661 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2662 * a pair of read and write domain masks.
2663 *
2664 * Case 1: the batch buffer
2665 *
2666 * 1. Allocated
2667 * 2. Written by CPU
2668 * 3. Mapped to GTT
2669 * 4. Read by GPU
2670 * 5. Unmapped from GTT
2671 * 6. Freed
2672 *
2673 * Let's take these a step at a time
2674 *
2675 * 1. Allocated
2676 * Pages allocated from the kernel may still have
2677 * cache contents, so we set them to (CPU, CPU) always.
2678 * 2. Written by CPU (using pwrite)
2679 * The pwrite function calls set_domain (CPU, CPU) and
2680 * this function does nothing (as nothing changes)
2681 * 3. Mapped by GTT
2682 * This function asserts that the object is not
2683 * currently in any GPU-based read or write domains
2684 * 4. Read by GPU
2685 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2686 * As write_domain is zero, this function adds in the
2687 * current read domains (CPU+COMMAND, 0).
2688 * flush_domains is set to CPU.
2689 * invalidate_domains is set to COMMAND
2690 * clflush is run to get data out of the CPU caches
2691 * then i915_dev_set_domain calls i915_gem_flush to
2692 * emit an MI_FLUSH and drm_agp_chipset_flush
2693 * 5. Unmapped from GTT
2694 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2695 * flush_domains and invalidate_domains end up both zero
2696 * so no flushing/invalidating happens
2697 * 6. Freed
2698 * yay, done
2699 *
2700 * Case 2: The shared render buffer
2701 *
2702 * 1. Allocated
2703 * 2. Mapped to GTT
2704 * 3. Read/written by GPU
2705 * 4. set_domain to (CPU,CPU)
2706 * 5. Read/written by CPU
2707 * 6. Read/written by GPU
2708 *
2709 * 1. Allocated
2710 * Same as last example, (CPU, CPU)
2711 * 2. Mapped to GTT
2712 * Nothing changes (assertions find that it is not in the GPU)
2713 * 3. Read/written by GPU
2714 * execbuffer calls set_domain (RENDER, RENDER)
2715 * flush_domains gets CPU
2716 * invalidate_domains gets GPU
2717 * clflush (obj)
2718 * MI_FLUSH and drm_agp_chipset_flush
2719 * 4. set_domain (CPU, CPU)
2720 * flush_domains gets GPU
2721 * invalidate_domains gets CPU
2722 * wait_rendering (obj) to make sure all drawing is complete.
2723 * This will include an MI_FLUSH to get the data from GPU
2724 * to memory
2725 * clflush (obj) to invalidate the CPU cache
2726 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2727 * 5. Read/written by CPU
2728 * cache lines are loaded and dirtied
2729 * 6. Read written by GPU
2730 * Same as last GPU access
2731 *
2732 * Case 3: The constant buffer
2733 *
2734 * 1. Allocated
2735 * 2. Written by CPU
2736 * 3. Read by GPU
2737 * 4. Updated (written) by CPU again
2738 * 5. Read by GPU
2739 *
2740 * 1. Allocated
2741 * (CPU, CPU)
2742 * 2. Written by CPU
2743 * (CPU, CPU)
2744 * 3. Read by GPU
2745 * (CPU+RENDER, 0)
2746 * flush_domains = CPU
2747 * invalidate_domains = RENDER
2748 * clflush (obj)
2749 * MI_FLUSH
2750 * drm_agp_chipset_flush
2751 * 4. Updated (written) by CPU again
2752 * (CPU, CPU)
2753 * flush_domains = 0 (no previous write domain)
2754 * invalidate_domains = 0 (no new read domains)
2755 * 5. Read by GPU
2756 * (CPU+RENDER, 0)
2757 * flush_domains = CPU
2758 * invalidate_domains = RENDER
2759 * clflush (obj)
2760 * MI_FLUSH
2761 * drm_agp_chipset_flush
2762 */
c0d90829 2763static void
8b0e378a 2764i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
673a394b
EA
2765{
2766 struct drm_device *dev = obj->dev;
2767 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2768 uint32_t invalidate_domains = 0;
2769 uint32_t flush_domains = 0;
e47c68e9 2770
8b0e378a
EA
2771 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2772 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
673a394b
EA
2773
2774#if WATCH_BUF
2775 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2776 __func__, obj,
8b0e378a
EA
2777 obj->read_domains, obj->pending_read_domains,
2778 obj->write_domain, obj->pending_write_domain);
673a394b
EA
2779#endif
2780 /*
2781 * If the object isn't moving to a new write domain,
2782 * let the object stay in multiple read domains
2783 */
8b0e378a
EA
2784 if (obj->pending_write_domain == 0)
2785 obj->pending_read_domains |= obj->read_domains;
673a394b
EA
2786 else
2787 obj_priv->dirty = 1;
2788
2789 /*
2790 * Flush the current write domain if
2791 * the new read domains don't match. Invalidate
2792 * any read domains which differ from the old
2793 * write domain
2794 */
8b0e378a
EA
2795 if (obj->write_domain &&
2796 obj->write_domain != obj->pending_read_domains) {
673a394b 2797 flush_domains |= obj->write_domain;
8b0e378a
EA
2798 invalidate_domains |=
2799 obj->pending_read_domains & ~obj->write_domain;
673a394b
EA
2800 }
2801 /*
2802 * Invalidate any read caches which may have
2803 * stale data. That is, any new read domains.
2804 */
8b0e378a 2805 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
673a394b
EA
2806 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2807#if WATCH_BUF
2808 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2809 __func__, flush_domains, invalidate_domains);
2810#endif
673a394b
EA
2811 i915_gem_clflush_object(obj);
2812 }
2813
efbeed96
EA
2814 /* The actual obj->write_domain will be updated with
2815 * pending_write_domain after we emit the accumulated flush for all
2816 * of our domain changes in execbuffers (which clears objects'
2817 * write_domains). So if we have a current write domain that we
2818 * aren't changing, set pending_write_domain to that.
2819 */
2820 if (flush_domains == 0 && obj->pending_write_domain == 0)
2821 obj->pending_write_domain = obj->write_domain;
8b0e378a 2822 obj->read_domains = obj->pending_read_domains;
673a394b
EA
2823
2824 dev->invalidate_domains |= invalidate_domains;
2825 dev->flush_domains |= flush_domains;
2826#if WATCH_BUF
2827 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2828 __func__,
2829 obj->read_domains, obj->write_domain,
2830 dev->invalidate_domains, dev->flush_domains);
2831#endif
673a394b
EA
2832}
2833
2834/**
e47c68e9 2835 * Moves the object from a partially CPU read to a full one.
673a394b 2836 *
e47c68e9
EA
2837 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2838 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
673a394b 2839 */
e47c68e9
EA
2840static void
2841i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
673a394b
EA
2842{
2843 struct drm_i915_gem_object *obj_priv = obj->driver_private;
673a394b 2844
e47c68e9
EA
2845 if (!obj_priv->page_cpu_valid)
2846 return;
2847
2848 /* If we're partially in the CPU read domain, finish moving it in.
2849 */
2850 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
2851 int i;
2852
2853 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2854 if (obj_priv->page_cpu_valid[i])
2855 continue;
856fa198 2856 drm_clflush_pages(obj_priv->pages + i, 1);
e47c68e9 2857 }
e47c68e9
EA
2858 }
2859
2860 /* Free the page_cpu_valid mappings which are now stale, whether
2861 * or not we've got I915_GEM_DOMAIN_CPU.
2862 */
9a298b2a 2863 kfree(obj_priv->page_cpu_valid);
e47c68e9
EA
2864 obj_priv->page_cpu_valid = NULL;
2865}
2866
2867/**
2868 * Set the CPU read domain on a range of the object.
2869 *
2870 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2871 * not entirely valid. The page_cpu_valid member of the object flags which
2872 * pages have been flushed, and will be respected by
2873 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2874 * of the whole object.
2875 *
2876 * This function returns when the move is complete, including waiting on
2877 * flushes to occur.
2878 */
2879static int
2880i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2881 uint64_t offset, uint64_t size)
2882{
2883 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2884 int i, ret;
673a394b 2885
e47c68e9
EA
2886 if (offset == 0 && size == obj->size)
2887 return i915_gem_object_set_to_cpu_domain(obj, 0);
673a394b 2888
e47c68e9
EA
2889 i915_gem_object_flush_gpu_write_domain(obj);
2890 /* Wait on any GPU rendering and flushing to occur. */
6a47baa6 2891 ret = i915_gem_object_wait_rendering(obj);
e47c68e9 2892 if (ret != 0)
6a47baa6 2893 return ret;
e47c68e9
EA
2894 i915_gem_object_flush_gtt_write_domain(obj);
2895
2896 /* If we're already fully in the CPU read domain, we're done. */
2897 if (obj_priv->page_cpu_valid == NULL &&
2898 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
2899 return 0;
673a394b 2900
e47c68e9
EA
2901 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2902 * newly adding I915_GEM_DOMAIN_CPU
2903 */
673a394b 2904 if (obj_priv->page_cpu_valid == NULL) {
9a298b2a
EA
2905 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
2906 GFP_KERNEL);
e47c68e9
EA
2907 if (obj_priv->page_cpu_valid == NULL)
2908 return -ENOMEM;
2909 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
2910 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
673a394b
EA
2911
2912 /* Flush the cache on any pages that are still invalid from the CPU's
2913 * perspective.
2914 */
e47c68e9
EA
2915 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
2916 i++) {
673a394b
EA
2917 if (obj_priv->page_cpu_valid[i])
2918 continue;
2919
856fa198 2920 drm_clflush_pages(obj_priv->pages + i, 1);
673a394b
EA
2921
2922 obj_priv->page_cpu_valid[i] = 1;
2923 }
2924
e47c68e9
EA
2925 /* It should now be out of any other write domains, and we can update
2926 * the domain values for our changes.
2927 */
2928 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2929
2930 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2931
673a394b
EA
2932 return 0;
2933}
2934
673a394b
EA
2935/**
2936 * Pin an object to the GTT and evaluate the relocations landing in it.
2937 */
2938static int
2939i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2940 struct drm_file *file_priv,
40a5f0de
EA
2941 struct drm_i915_gem_exec_object *entry,
2942 struct drm_i915_gem_relocation_entry *relocs)
673a394b
EA
2943{
2944 struct drm_device *dev = obj->dev;
0839ccb8 2945 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b
EA
2946 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2947 int i, ret;
0839ccb8 2948 void __iomem *reloc_page;
673a394b
EA
2949
2950 /* Choose the GTT offset for our buffer and put it there. */
2951 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2952 if (ret)
2953 return ret;
2954
2955 entry->offset = obj_priv->gtt_offset;
2956
673a394b
EA
2957 /* Apply the relocations, using the GTT aperture to avoid cache
2958 * flushing requirements.
2959 */
2960 for (i = 0; i < entry->relocation_count; i++) {
40a5f0de 2961 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
673a394b
EA
2962 struct drm_gem_object *target_obj;
2963 struct drm_i915_gem_object *target_obj_priv;
3043c60c
EA
2964 uint32_t reloc_val, reloc_offset;
2965 uint32_t __iomem *reloc_entry;
673a394b 2966
673a394b 2967 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
40a5f0de 2968 reloc->target_handle);
673a394b
EA
2969 if (target_obj == NULL) {
2970 i915_gem_object_unpin(obj);
2971 return -EBADF;
2972 }
2973 target_obj_priv = target_obj->driver_private;
2974
2975 /* The target buffer should have appeared before us in the
2976 * exec_object list, so it should have a GTT space bound by now.
2977 */
2978 if (target_obj_priv->gtt_space == NULL) {
2979 DRM_ERROR("No GTT space found for object %d\n",
40a5f0de 2980 reloc->target_handle);
673a394b
EA
2981 drm_gem_object_unreference(target_obj);
2982 i915_gem_object_unpin(obj);
2983 return -EINVAL;
2984 }
2985
40a5f0de 2986 if (reloc->offset > obj->size - 4) {
673a394b
EA
2987 DRM_ERROR("Relocation beyond object bounds: "
2988 "obj %p target %d offset %d size %d.\n",
40a5f0de
EA
2989 obj, reloc->target_handle,
2990 (int) reloc->offset, (int) obj->size);
673a394b
EA
2991 drm_gem_object_unreference(target_obj);
2992 i915_gem_object_unpin(obj);
2993 return -EINVAL;
2994 }
40a5f0de 2995 if (reloc->offset & 3) {
673a394b
EA
2996 DRM_ERROR("Relocation not 4-byte aligned: "
2997 "obj %p target %d offset %d.\n",
40a5f0de
EA
2998 obj, reloc->target_handle,
2999 (int) reloc->offset);
673a394b
EA
3000 drm_gem_object_unreference(target_obj);
3001 i915_gem_object_unpin(obj);
3002 return -EINVAL;
3003 }
3004
40a5f0de
EA
3005 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3006 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
e47c68e9
EA
3007 DRM_ERROR("reloc with read/write CPU domains: "
3008 "obj %p target %d offset %d "
3009 "read %08x write %08x",
40a5f0de
EA
3010 obj, reloc->target_handle,
3011 (int) reloc->offset,
3012 reloc->read_domains,
3013 reloc->write_domain);
491152b8
CW
3014 drm_gem_object_unreference(target_obj);
3015 i915_gem_object_unpin(obj);
e47c68e9
EA
3016 return -EINVAL;
3017 }
3018
40a5f0de
EA
3019 if (reloc->write_domain && target_obj->pending_write_domain &&
3020 reloc->write_domain != target_obj->pending_write_domain) {
673a394b
EA
3021 DRM_ERROR("Write domain conflict: "
3022 "obj %p target %d offset %d "
3023 "new %08x old %08x\n",
40a5f0de
EA
3024 obj, reloc->target_handle,
3025 (int) reloc->offset,
3026 reloc->write_domain,
673a394b
EA
3027 target_obj->pending_write_domain);
3028 drm_gem_object_unreference(target_obj);
3029 i915_gem_object_unpin(obj);
3030 return -EINVAL;
3031 }
3032
3033#if WATCH_RELOC
3034 DRM_INFO("%s: obj %p offset %08x target %d "
3035 "read %08x write %08x gtt %08x "
3036 "presumed %08x delta %08x\n",
3037 __func__,
3038 obj,
40a5f0de
EA
3039 (int) reloc->offset,
3040 (int) reloc->target_handle,
3041 (int) reloc->read_domains,
3042 (int) reloc->write_domain,
673a394b 3043 (int) target_obj_priv->gtt_offset,
40a5f0de
EA
3044 (int) reloc->presumed_offset,
3045 reloc->delta);
673a394b
EA
3046#endif
3047
40a5f0de
EA
3048 target_obj->pending_read_domains |= reloc->read_domains;
3049 target_obj->pending_write_domain |= reloc->write_domain;
673a394b
EA
3050
3051 /* If the relocation already has the right value in it, no
3052 * more work needs to be done.
3053 */
40a5f0de 3054 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
673a394b
EA
3055 drm_gem_object_unreference(target_obj);
3056 continue;
3057 }
3058
2ef7eeaa
EA
3059 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3060 if (ret != 0) {
3061 drm_gem_object_unreference(target_obj);
3062 i915_gem_object_unpin(obj);
3063 return -EINVAL;
673a394b
EA
3064 }
3065
3066 /* Map the page containing the relocation we're going to
3067 * perform.
3068 */
40a5f0de 3069 reloc_offset = obj_priv->gtt_offset + reloc->offset;
0839ccb8
KP
3070 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3071 (reloc_offset &
3072 ~(PAGE_SIZE - 1)));
3043c60c 3073 reloc_entry = (uint32_t __iomem *)(reloc_page +
0839ccb8 3074 (reloc_offset & (PAGE_SIZE - 1)));
40a5f0de 3075 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
673a394b
EA
3076
3077#if WATCH_BUF
3078 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
40a5f0de 3079 obj, (unsigned int) reloc->offset,
673a394b
EA
3080 readl(reloc_entry), reloc_val);
3081#endif
3082 writel(reloc_val, reloc_entry);
0839ccb8 3083 io_mapping_unmap_atomic(reloc_page);
673a394b 3084
40a5f0de
EA
3085 /* The updated presumed offset for this entry will be
3086 * copied back out to the user.
673a394b 3087 */
40a5f0de 3088 reloc->presumed_offset = target_obj_priv->gtt_offset;
673a394b
EA
3089
3090 drm_gem_object_unreference(target_obj);
3091 }
3092
673a394b
EA
3093#if WATCH_BUF
3094 if (0)
3095 i915_gem_dump_object(obj, 128, __func__, ~0);
3096#endif
3097 return 0;
3098}
3099
3100/** Dispatch a batchbuffer to the ring
3101 */
3102static int
3103i915_dispatch_gem_execbuffer(struct drm_device *dev,
3104 struct drm_i915_gem_execbuffer *exec,
201361a5 3105 struct drm_clip_rect *cliprects,
673a394b
EA
3106 uint64_t exec_offset)
3107{
3108 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b
EA
3109 int nbox = exec->num_cliprects;
3110 int i = 0, count;
83d60795 3111 uint32_t exec_start, exec_len;
673a394b
EA
3112 RING_LOCALS;
3113
3114 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3115 exec_len = (uint32_t) exec->batch_len;
3116
673a394b
EA
3117 count = nbox ? nbox : 1;
3118
3119 for (i = 0; i < count; i++) {
3120 if (i < nbox) {
201361a5 3121 int ret = i915_emit_box(dev, cliprects, i,
673a394b
EA
3122 exec->DR1, exec->DR4);
3123 if (ret)
3124 return ret;
3125 }
3126
3127 if (IS_I830(dev) || IS_845G(dev)) {
3128 BEGIN_LP_RING(4);
3129 OUT_RING(MI_BATCH_BUFFER);
3130 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3131 OUT_RING(exec_start + exec_len - 4);
3132 OUT_RING(0);
3133 ADVANCE_LP_RING();
3134 } else {
3135 BEGIN_LP_RING(2);
3136 if (IS_I965G(dev)) {
3137 OUT_RING(MI_BATCH_BUFFER_START |
3138 (2 << 6) |
3139 MI_BATCH_NON_SECURE_I965);
3140 OUT_RING(exec_start);
3141 } else {
3142 OUT_RING(MI_BATCH_BUFFER_START |
3143 (2 << 6));
3144 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3145 }
3146 ADVANCE_LP_RING();
3147 }
3148 }
3149
3150 /* XXX breadcrumb */
3151 return 0;
3152}
3153
3154/* Throttle our rendering by waiting until the ring has completed our requests
3155 * emitted over 20 msec ago.
3156 *
b962442e
EA
3157 * Note that if we were to use the current jiffies each time around the loop,
3158 * we wouldn't escape the function with any frames outstanding if the time to
3159 * render a frame was over 20ms.
3160 *
673a394b
EA
3161 * This should get us reasonable parallelism between CPU and GPU but also
3162 * relatively low latency when blocking on a particular request to finish.
3163 */
3164static int
3165i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3166{
3167 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3168 int ret = 0;
b962442e 3169 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
673a394b
EA
3170
3171 mutex_lock(&dev->struct_mutex);
b962442e
EA
3172 while (!list_empty(&i915_file_priv->mm.request_list)) {
3173 struct drm_i915_gem_request *request;
3174
3175 request = list_first_entry(&i915_file_priv->mm.request_list,
3176 struct drm_i915_gem_request,
3177 client_list);
3178
3179 if (time_after_eq(request->emitted_jiffies, recent_enough))
3180 break;
3181
3182 ret = i915_wait_request(dev, request->seqno);
3183 if (ret != 0)
3184 break;
3185 }
673a394b 3186 mutex_unlock(&dev->struct_mutex);
b962442e 3187
673a394b
EA
3188 return ret;
3189}
3190
40a5f0de
EA
3191static int
3192i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3193 uint32_t buffer_count,
3194 struct drm_i915_gem_relocation_entry **relocs)
3195{
3196 uint32_t reloc_count = 0, reloc_index = 0, i;
3197 int ret;
3198
3199 *relocs = NULL;
3200 for (i = 0; i < buffer_count; i++) {
3201 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3202 return -EINVAL;
3203 reloc_count += exec_list[i].relocation_count;
3204 }
3205
8e7d2b2c 3206 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
40a5f0de
EA
3207 if (*relocs == NULL)
3208 return -ENOMEM;
3209
3210 for (i = 0; i < buffer_count; i++) {
3211 struct drm_i915_gem_relocation_entry __user *user_relocs;
3212
3213 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3214
3215 ret = copy_from_user(&(*relocs)[reloc_index],
3216 user_relocs,
3217 exec_list[i].relocation_count *
3218 sizeof(**relocs));
3219 if (ret != 0) {
8e7d2b2c 3220 drm_free_large(*relocs);
40a5f0de 3221 *relocs = NULL;
2bc43b5c 3222 return -EFAULT;
40a5f0de
EA
3223 }
3224
3225 reloc_index += exec_list[i].relocation_count;
3226 }
3227
2bc43b5c 3228 return 0;
40a5f0de
EA
3229}
3230
3231static int
3232i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3233 uint32_t buffer_count,
3234 struct drm_i915_gem_relocation_entry *relocs)
3235{
3236 uint32_t reloc_count = 0, i;
2bc43b5c 3237 int ret = 0;
40a5f0de
EA
3238
3239 for (i = 0; i < buffer_count; i++) {
3240 struct drm_i915_gem_relocation_entry __user *user_relocs;
2bc43b5c 3241 int unwritten;
40a5f0de
EA
3242
3243 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3244
2bc43b5c
FM
3245 unwritten = copy_to_user(user_relocs,
3246 &relocs[reloc_count],
3247 exec_list[i].relocation_count *
3248 sizeof(*relocs));
3249
3250 if (unwritten) {
3251 ret = -EFAULT;
3252 goto err;
40a5f0de
EA
3253 }
3254
3255 reloc_count += exec_list[i].relocation_count;
3256 }
3257
2bc43b5c 3258err:
8e7d2b2c 3259 drm_free_large(relocs);
40a5f0de
EA
3260
3261 return ret;
3262}
3263
83d60795
CW
3264static int
3265i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3266 uint64_t exec_offset)
3267{
3268 uint32_t exec_start, exec_len;
3269
3270 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3271 exec_len = (uint32_t) exec->batch_len;
3272
3273 if ((exec_start | exec_len) & 0x7)
3274 return -EINVAL;
3275
3276 if (!exec_start)
3277 return -EINVAL;
3278
3279 return 0;
3280}
3281
673a394b
EA
3282int
3283i915_gem_execbuffer(struct drm_device *dev, void *data,
3284 struct drm_file *file_priv)
3285{
3286 drm_i915_private_t *dev_priv = dev->dev_private;
673a394b
EA
3287 struct drm_i915_gem_execbuffer *args = data;
3288 struct drm_i915_gem_exec_object *exec_list = NULL;
3289 struct drm_gem_object **object_list = NULL;
3290 struct drm_gem_object *batch_obj;
b70d11da 3291 struct drm_i915_gem_object *obj_priv;
201361a5 3292 struct drm_clip_rect *cliprects = NULL;
40a5f0de
EA
3293 struct drm_i915_gem_relocation_entry *relocs;
3294 int ret, ret2, i, pinned = 0;
673a394b 3295 uint64_t exec_offset;
40a5f0de 3296 uint32_t seqno, flush_domains, reloc_index;
ac94a962 3297 int pin_tries;
673a394b
EA
3298
3299#if WATCH_EXEC
3300 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3301 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3302#endif
3303
4f481ed2
EA
3304 if (args->buffer_count < 1) {
3305 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3306 return -EINVAL;
3307 }
673a394b 3308 /* Copy in the exec list from userland */
8e7d2b2c
JB
3309 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3310 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
673a394b
EA
3311 if (exec_list == NULL || object_list == NULL) {
3312 DRM_ERROR("Failed to allocate exec or object list "
3313 "for %d buffers\n",
3314 args->buffer_count);
3315 ret = -ENOMEM;
3316 goto pre_mutex_err;
3317 }
3318 ret = copy_from_user(exec_list,
3319 (struct drm_i915_relocation_entry __user *)
3320 (uintptr_t) args->buffers_ptr,
3321 sizeof(*exec_list) * args->buffer_count);
3322 if (ret != 0) {
3323 DRM_ERROR("copy %d exec entries failed %d\n",
3324 args->buffer_count, ret);
3325 goto pre_mutex_err;
3326 }
3327
201361a5 3328 if (args->num_cliprects != 0) {
9a298b2a
EA
3329 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3330 GFP_KERNEL);
201361a5
EA
3331 if (cliprects == NULL)
3332 goto pre_mutex_err;
3333
3334 ret = copy_from_user(cliprects,
3335 (struct drm_clip_rect __user *)
3336 (uintptr_t) args->cliprects_ptr,
3337 sizeof(*cliprects) * args->num_cliprects);
3338 if (ret != 0) {
3339 DRM_ERROR("copy %d cliprects failed: %d\n",
3340 args->num_cliprects, ret);
3341 goto pre_mutex_err;
3342 }
3343 }
3344
40a5f0de
EA
3345 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3346 &relocs);
3347 if (ret != 0)
3348 goto pre_mutex_err;
3349
673a394b
EA
3350 mutex_lock(&dev->struct_mutex);
3351
3352 i915_verify_inactive(dev, __FILE__, __LINE__);
3353
3354 if (dev_priv->mm.wedged) {
3355 DRM_ERROR("Execbuf while wedged\n");
3356 mutex_unlock(&dev->struct_mutex);
a198bc80
CW
3357 ret = -EIO;
3358 goto pre_mutex_err;
673a394b
EA
3359 }
3360
3361 if (dev_priv->mm.suspended) {
3362 DRM_ERROR("Execbuf while VT-switched.\n");
3363 mutex_unlock(&dev->struct_mutex);
a198bc80
CW
3364 ret = -EBUSY;
3365 goto pre_mutex_err;
673a394b
EA
3366 }
3367
ac94a962 3368 /* Look up object handles */
673a394b
EA
3369 for (i = 0; i < args->buffer_count; i++) {
3370 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3371 exec_list[i].handle);
3372 if (object_list[i] == NULL) {
3373 DRM_ERROR("Invalid object handle %d at index %d\n",
3374 exec_list[i].handle, i);
3375 ret = -EBADF;
3376 goto err;
3377 }
b70d11da
KH
3378
3379 obj_priv = object_list[i]->driver_private;
3380 if (obj_priv->in_execbuffer) {
3381 DRM_ERROR("Object %p appears more than once in object list\n",
3382 object_list[i]);
3383 ret = -EBADF;
3384 goto err;
3385 }
3386 obj_priv->in_execbuffer = true;
ac94a962 3387 }
673a394b 3388
ac94a962
KP
3389 /* Pin and relocate */
3390 for (pin_tries = 0; ; pin_tries++) {
3391 ret = 0;
40a5f0de
EA
3392 reloc_index = 0;
3393
ac94a962
KP
3394 for (i = 0; i < args->buffer_count; i++) {
3395 object_list[i]->pending_read_domains = 0;
3396 object_list[i]->pending_write_domain = 0;
3397 ret = i915_gem_object_pin_and_relocate(object_list[i],
3398 file_priv,
40a5f0de
EA
3399 &exec_list[i],
3400 &relocs[reloc_index]);
ac94a962
KP
3401 if (ret)
3402 break;
3403 pinned = i + 1;
40a5f0de 3404 reloc_index += exec_list[i].relocation_count;
ac94a962
KP
3405 }
3406 /* success */
3407 if (ret == 0)
3408 break;
3409
3410 /* error other than GTT full, or we've already tried again */
2939e1f5 3411 if (ret != -ENOSPC || pin_tries >= 1) {
f1acec93
EA
3412 if (ret != -ERESTARTSYS)
3413 DRM_ERROR("Failed to pin buffers %d\n", ret);
673a394b
EA
3414 goto err;
3415 }
ac94a962
KP
3416
3417 /* unpin all of our buffers */
3418 for (i = 0; i < pinned; i++)
3419 i915_gem_object_unpin(object_list[i]);
b1177636 3420 pinned = 0;
ac94a962
KP
3421
3422 /* evict everyone we can from the aperture */
3423 ret = i915_gem_evict_everything(dev);
3424 if (ret)
3425 goto err;
673a394b
EA
3426 }
3427
3428 /* Set the pending read domains for the batch buffer to COMMAND */
3429 batch_obj = object_list[args->buffer_count-1];
5f26a2c7
CW
3430 if (batch_obj->pending_write_domain) {
3431 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3432 ret = -EINVAL;
3433 goto err;
3434 }
3435 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
673a394b 3436
83d60795
CW
3437 /* Sanity check the batch buffer, prior to moving objects */
3438 exec_offset = exec_list[args->buffer_count - 1].offset;
3439 ret = i915_gem_check_execbuffer (args, exec_offset);
3440 if (ret != 0) {
3441 DRM_ERROR("execbuf with invalid offset/length\n");
3442 goto err;
3443 }
3444
673a394b
EA
3445 i915_verify_inactive(dev, __FILE__, __LINE__);
3446
646f0f6e
KP
3447 /* Zero the global flush/invalidate flags. These
3448 * will be modified as new domains are computed
3449 * for each object
3450 */
3451 dev->invalidate_domains = 0;
3452 dev->flush_domains = 0;
3453
673a394b
EA
3454 for (i = 0; i < args->buffer_count; i++) {
3455 struct drm_gem_object *obj = object_list[i];
673a394b 3456
646f0f6e 3457 /* Compute new gpu domains and update invalidate/flush */
8b0e378a 3458 i915_gem_object_set_to_gpu_domain(obj);
673a394b
EA
3459 }
3460
3461 i915_verify_inactive(dev, __FILE__, __LINE__);
3462
646f0f6e
KP
3463 if (dev->invalidate_domains | dev->flush_domains) {
3464#if WATCH_EXEC
3465 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3466 __func__,
3467 dev->invalidate_domains,
3468 dev->flush_domains);
3469#endif
3470 i915_gem_flush(dev,
3471 dev->invalidate_domains,
3472 dev->flush_domains);
3473 if (dev->flush_domains)
b962442e
EA
3474 (void)i915_add_request(dev, file_priv,
3475 dev->flush_domains);
646f0f6e 3476 }
673a394b 3477
efbeed96
EA
3478 for (i = 0; i < args->buffer_count; i++) {
3479 struct drm_gem_object *obj = object_list[i];
3480
3481 obj->write_domain = obj->pending_write_domain;
3482 }
3483
673a394b
EA
3484 i915_verify_inactive(dev, __FILE__, __LINE__);
3485
3486#if WATCH_COHERENCY
3487 for (i = 0; i < args->buffer_count; i++) {
3488 i915_gem_object_check_coherency(object_list[i],
3489 exec_list[i].handle);
3490 }
3491#endif
3492
673a394b 3493#if WATCH_EXEC
6911a9b8 3494 i915_gem_dump_object(batch_obj,
673a394b
EA
3495 args->batch_len,
3496 __func__,
3497 ~0);
3498#endif
3499
673a394b 3500 /* Exec the batchbuffer */
201361a5 3501 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
673a394b
EA
3502 if (ret) {
3503 DRM_ERROR("dispatch failed %d\n", ret);
3504 goto err;
3505 }
3506
3507 /*
3508 * Ensure that the commands in the batch buffer are
3509 * finished before the interrupt fires
3510 */
3511 flush_domains = i915_retire_commands(dev);
3512
3513 i915_verify_inactive(dev, __FILE__, __LINE__);
3514
3515 /*
3516 * Get a seqno representing the execution of the current buffer,
3517 * which we can wait on. We would like to mitigate these interrupts,
3518 * likely by only creating seqnos occasionally (so that we have
3519 * *some* interrupts representing completion of buffers that we can
3520 * wait on when trying to clear up gtt space).
3521 */
b962442e 3522 seqno = i915_add_request(dev, file_priv, flush_domains);
673a394b 3523 BUG_ON(seqno == 0);
673a394b
EA
3524 for (i = 0; i < args->buffer_count; i++) {
3525 struct drm_gem_object *obj = object_list[i];
673a394b 3526
ce44b0ea 3527 i915_gem_object_move_to_active(obj, seqno);
673a394b
EA
3528#if WATCH_LRU
3529 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3530#endif
3531 }
3532#if WATCH_LRU
3533 i915_dump_lru(dev, __func__);
3534#endif
3535
3536 i915_verify_inactive(dev, __FILE__, __LINE__);
3537
673a394b 3538err:
aad87dff
JL
3539 for (i = 0; i < pinned; i++)
3540 i915_gem_object_unpin(object_list[i]);
3541
b70d11da
KH
3542 for (i = 0; i < args->buffer_count; i++) {
3543 if (object_list[i]) {
3544 obj_priv = object_list[i]->driver_private;
3545 obj_priv->in_execbuffer = false;
3546 }
aad87dff 3547 drm_gem_object_unreference(object_list[i]);
b70d11da 3548 }
673a394b 3549
673a394b
EA
3550 mutex_unlock(&dev->struct_mutex);
3551
a35f2e2b
RD
3552 if (!ret) {
3553 /* Copy the new buffer offsets back to the user's exec list. */
3554 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3555 (uintptr_t) args->buffers_ptr,
3556 exec_list,
3557 sizeof(*exec_list) * args->buffer_count);
2bc43b5c
FM
3558 if (ret) {
3559 ret = -EFAULT;
a35f2e2b
RD
3560 DRM_ERROR("failed to copy %d exec entries "
3561 "back to user (%d)\n",
3562 args->buffer_count, ret);
2bc43b5c 3563 }
a35f2e2b
RD
3564 }
3565
40a5f0de
EA
3566 /* Copy the updated relocations out regardless of current error
3567 * state. Failure to update the relocs would mean that the next
3568 * time userland calls execbuf, it would do so with presumed offset
3569 * state that didn't match the actual object state.
3570 */
3571 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3572 relocs);
3573 if (ret2 != 0) {
3574 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3575
3576 if (ret == 0)
3577 ret = ret2;
3578 }
3579
673a394b 3580pre_mutex_err:
8e7d2b2c
JB
3581 drm_free_large(object_list);
3582 drm_free_large(exec_list);
9a298b2a 3583 kfree(cliprects);
673a394b
EA
3584
3585 return ret;
3586}
3587
3588int
3589i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3590{
3591 struct drm_device *dev = obj->dev;
3592 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3593 int ret;
3594
3595 i915_verify_inactive(dev, __FILE__, __LINE__);
3596 if (obj_priv->gtt_space == NULL) {
3597 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3598 if (ret != 0) {
9bb2d6f9 3599 if (ret != -EBUSY && ret != -ERESTARTSYS)
0fce81e3 3600 DRM_ERROR("Failure to bind: %d\n", ret);
673a394b
EA
3601 return ret;
3602 }
22c344e9
CW
3603 }
3604 /*
3605 * Pre-965 chips need a fence register set up in order to
3606 * properly handle tiled surfaces.
3607 */
a09ba7fa 3608 if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
8c4b8c3f 3609 ret = i915_gem_object_get_fence_reg(obj);
22c344e9
CW
3610 if (ret != 0) {
3611 if (ret != -EBUSY && ret != -ERESTARTSYS)
3612 DRM_ERROR("Failure to install fence: %d\n",
3613 ret);
3614 return ret;
3615 }
673a394b
EA
3616 }
3617 obj_priv->pin_count++;
3618
3619 /* If the object is not active and not pending a flush,
3620 * remove it from the inactive list
3621 */
3622 if (obj_priv->pin_count == 1) {
3623 atomic_inc(&dev->pin_count);
3624 atomic_add(obj->size, &dev->pin_memory);
3625 if (!obj_priv->active &&
21d509e3 3626 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
673a394b
EA
3627 !list_empty(&obj_priv->list))
3628 list_del_init(&obj_priv->list);
3629 }
3630 i915_verify_inactive(dev, __FILE__, __LINE__);
3631
3632 return 0;
3633}
3634
3635void
3636i915_gem_object_unpin(struct drm_gem_object *obj)
3637{
3638 struct drm_device *dev = obj->dev;
3639 drm_i915_private_t *dev_priv = dev->dev_private;
3640 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3641
3642 i915_verify_inactive(dev, __FILE__, __LINE__);
3643 obj_priv->pin_count--;
3644 BUG_ON(obj_priv->pin_count < 0);
3645 BUG_ON(obj_priv->gtt_space == NULL);
3646
3647 /* If the object is no longer pinned, and is
3648 * neither active nor being flushed, then stick it on
3649 * the inactive list
3650 */
3651 if (obj_priv->pin_count == 0) {
3652 if (!obj_priv->active &&
21d509e3 3653 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
673a394b
EA
3654 list_move_tail(&obj_priv->list,
3655 &dev_priv->mm.inactive_list);
3656 atomic_dec(&dev->pin_count);
3657 atomic_sub(obj->size, &dev->pin_memory);
3658 }
3659 i915_verify_inactive(dev, __FILE__, __LINE__);
3660}
3661
3662int
3663i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3664 struct drm_file *file_priv)
3665{
3666 struct drm_i915_gem_pin *args = data;
3667 struct drm_gem_object *obj;
3668 struct drm_i915_gem_object *obj_priv;
3669 int ret;
3670
3671 mutex_lock(&dev->struct_mutex);
3672
3673 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3674 if (obj == NULL) {
3675 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3676 args->handle);
3677 mutex_unlock(&dev->struct_mutex);
3678 return -EBADF;
3679 }
3680 obj_priv = obj->driver_private;
3681
79e53945
JB
3682 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3683 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3684 args->handle);
96dec61d 3685 drm_gem_object_unreference(obj);
673a394b 3686 mutex_unlock(&dev->struct_mutex);
79e53945
JB
3687 return -EINVAL;
3688 }
3689
3690 obj_priv->user_pin_count++;
3691 obj_priv->pin_filp = file_priv;
3692 if (obj_priv->user_pin_count == 1) {
3693 ret = i915_gem_object_pin(obj, args->alignment);
3694 if (ret != 0) {
3695 drm_gem_object_unreference(obj);
3696 mutex_unlock(&dev->struct_mutex);
3697 return ret;
3698 }
673a394b
EA
3699 }
3700
3701 /* XXX - flush the CPU caches for pinned objects
3702 * as the X server doesn't manage domains yet
3703 */
e47c68e9 3704 i915_gem_object_flush_cpu_write_domain(obj);
673a394b
EA
3705 args->offset = obj_priv->gtt_offset;
3706 drm_gem_object_unreference(obj);
3707 mutex_unlock(&dev->struct_mutex);
3708
3709 return 0;
3710}
3711
3712int
3713i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3714 struct drm_file *file_priv)
3715{
3716 struct drm_i915_gem_pin *args = data;
3717 struct drm_gem_object *obj;
79e53945 3718 struct drm_i915_gem_object *obj_priv;
673a394b
EA
3719
3720 mutex_lock(&dev->struct_mutex);
3721
3722 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3723 if (obj == NULL) {
3724 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3725 args->handle);
3726 mutex_unlock(&dev->struct_mutex);
3727 return -EBADF;
3728 }
3729
79e53945
JB
3730 obj_priv = obj->driver_private;
3731 if (obj_priv->pin_filp != file_priv) {
3732 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3733 args->handle);
3734 drm_gem_object_unreference(obj);
3735 mutex_unlock(&dev->struct_mutex);
3736 return -EINVAL;
3737 }
3738 obj_priv->user_pin_count--;
3739 if (obj_priv->user_pin_count == 0) {
3740 obj_priv->pin_filp = NULL;
3741 i915_gem_object_unpin(obj);
3742 }
673a394b
EA
3743
3744 drm_gem_object_unreference(obj);
3745 mutex_unlock(&dev->struct_mutex);
3746 return 0;
3747}
3748
3749int
3750i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3751 struct drm_file *file_priv)
3752{
3753 struct drm_i915_gem_busy *args = data;
3754 struct drm_gem_object *obj;
3755 struct drm_i915_gem_object *obj_priv;
3756
673a394b
EA
3757 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3758 if (obj == NULL) {
3759 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3760 args->handle);
673a394b
EA
3761 return -EBADF;
3762 }
3763
b1ce786c 3764 mutex_lock(&dev->struct_mutex);
f21289b3
EA
3765 /* Update the active list for the hardware's current position.
3766 * Otherwise this only updates on a delayed timer or when irqs are
3767 * actually unmasked, and our working set ends up being larger than
3768 * required.
3769 */
3770 i915_gem_retire_requests(dev);
3771
673a394b 3772 obj_priv = obj->driver_private;
c4de0a5d
EA
3773 /* Don't count being on the flushing list against the object being
3774 * done. Otherwise, a buffer left on the flushing list but not getting
3775 * flushed (because nobody's flushing that domain) won't ever return
3776 * unbusy and get reused by libdrm's bo cache. The other expected
3777 * consumer of this interface, OpenGL's occlusion queries, also specs
3778 * that the objects get unbusy "eventually" without any interference.
3779 */
3780 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
673a394b
EA
3781
3782 drm_gem_object_unreference(obj);
3783 mutex_unlock(&dev->struct_mutex);
3784 return 0;
3785}
3786
3787int
3788i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3789 struct drm_file *file_priv)
3790{
3791 return i915_gem_ring_throttle(dev, file_priv);
3792}
3793
3794int i915_gem_init_object(struct drm_gem_object *obj)
3795{
3796 struct drm_i915_gem_object *obj_priv;
3797
9a298b2a 3798 obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
673a394b
EA
3799 if (obj_priv == NULL)
3800 return -ENOMEM;
3801
3802 /*
3803 * We've just allocated pages from the kernel,
3804 * so they've just been written by the CPU with
3805 * zeros. They'll need to be clflushed before we
3806 * use them with the GPU.
3807 */
3808 obj->write_domain = I915_GEM_DOMAIN_CPU;
3809 obj->read_domains = I915_GEM_DOMAIN_CPU;
3810
ba1eb1d8
KP
3811 obj_priv->agp_type = AGP_USER_MEMORY;
3812
673a394b
EA
3813 obj->driver_private = obj_priv;
3814 obj_priv->obj = obj;
de151cf6 3815 obj_priv->fence_reg = I915_FENCE_REG_NONE;
673a394b 3816 INIT_LIST_HEAD(&obj_priv->list);
a09ba7fa 3817 INIT_LIST_HEAD(&obj_priv->fence_list);
de151cf6 3818
673a394b
EA
3819 return 0;
3820}
3821
3822void i915_gem_free_object(struct drm_gem_object *obj)
3823{
de151cf6 3824 struct drm_device *dev = obj->dev;
673a394b
EA
3825 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3826
3827 while (obj_priv->pin_count > 0)
3828 i915_gem_object_unpin(obj);
3829
71acb5eb
DA
3830 if (obj_priv->phys_obj)
3831 i915_gem_detach_phys_object(dev, obj);
3832
673a394b
EA
3833 i915_gem_object_unbind(obj);
3834
ab00b3e5 3835 i915_gem_free_mmap_offset(obj);
de151cf6 3836
9a298b2a 3837 kfree(obj_priv->page_cpu_valid);
280b713b 3838 kfree(obj_priv->bit_17);
9a298b2a 3839 kfree(obj->driver_private);
673a394b
EA
3840}
3841
673a394b
EA
3842/** Unbinds all objects that are on the given buffer list. */
3843static int
3844i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
3845{
3846 struct drm_gem_object *obj;
3847 struct drm_i915_gem_object *obj_priv;
3848 int ret;
3849
3850 while (!list_empty(head)) {
3851 obj_priv = list_first_entry(head,
3852 struct drm_i915_gem_object,
3853 list);
3854 obj = obj_priv->obj;
3855
3856 if (obj_priv->pin_count != 0) {
3857 DRM_ERROR("Pinned object in unbind list\n");
3858 mutex_unlock(&dev->struct_mutex);
3859 return -EINVAL;
3860 }
3861
3862 ret = i915_gem_object_unbind(obj);
3863 if (ret != 0) {
3864 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3865 ret);
3866 mutex_unlock(&dev->struct_mutex);
3867 return ret;
3868 }
3869 }
3870
3871
3872 return 0;
3873}
3874
5669fcac 3875int
673a394b
EA
3876i915_gem_idle(struct drm_device *dev)
3877{
3878 drm_i915_private_t *dev_priv = dev->dev_private;
3879 uint32_t seqno, cur_seqno, last_seqno;
3880 int stuck, ret;
3881
6dbe2772
KP
3882 mutex_lock(&dev->struct_mutex);
3883
3884 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
3885 mutex_unlock(&dev->struct_mutex);
673a394b 3886 return 0;
6dbe2772 3887 }
673a394b
EA
3888
3889 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3890 * We need to replace this with a semaphore, or something.
3891 */
3892 dev_priv->mm.suspended = 1;
3893
6dbe2772
KP
3894 /* Cancel the retire work handler, wait for it to finish if running
3895 */
3896 mutex_unlock(&dev->struct_mutex);
3897 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3898 mutex_lock(&dev->struct_mutex);
3899
673a394b
EA
3900 i915_kernel_lost_context(dev);
3901
3902 /* Flush the GPU along with all non-CPU write domains
3903 */
21d509e3
CW
3904 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
3905 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
673a394b
EA
3906
3907 if (seqno == 0) {
3908 mutex_unlock(&dev->struct_mutex);
3909 return -ENOMEM;
3910 }
3911
3912 dev_priv->mm.waiting_gem_seqno = seqno;
3913 last_seqno = 0;
3914 stuck = 0;
3915 for (;;) {
3916 cur_seqno = i915_get_gem_seqno(dev);
3917 if (i915_seqno_passed(cur_seqno, seqno))
3918 break;
3919 if (last_seqno == cur_seqno) {
3920 if (stuck++ > 100) {
3921 DRM_ERROR("hardware wedged\n");
3922 dev_priv->mm.wedged = 1;
3923 DRM_WAKEUP(&dev_priv->irq_queue);
3924 break;
3925 }
3926 }
3927 msleep(10);
3928 last_seqno = cur_seqno;
3929 }
3930 dev_priv->mm.waiting_gem_seqno = 0;
3931
3932 i915_gem_retire_requests(dev);
3933
5e118f41 3934 spin_lock(&dev_priv->mm.active_list_lock);
28dfe52a
EA
3935 if (!dev_priv->mm.wedged) {
3936 /* Active and flushing should now be empty as we've
3937 * waited for a sequence higher than any pending execbuffer
3938 */
3939 WARN_ON(!list_empty(&dev_priv->mm.active_list));
3940 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
3941 /* Request should now be empty as we've also waited
3942 * for the last request in the list
3943 */
3944 WARN_ON(!list_empty(&dev_priv->mm.request_list));
3945 }
673a394b 3946
28dfe52a
EA
3947 /* Empty the active and flushing lists to inactive. If there's
3948 * anything left at this point, it means that we're wedged and
3949 * nothing good's going to happen by leaving them there. So strip
3950 * the GPU domains and just stuff them onto inactive.
673a394b 3951 */
28dfe52a
EA
3952 while (!list_empty(&dev_priv->mm.active_list)) {
3953 struct drm_i915_gem_object *obj_priv;
673a394b 3954
28dfe52a
EA
3955 obj_priv = list_first_entry(&dev_priv->mm.active_list,
3956 struct drm_i915_gem_object,
3957 list);
3958 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3959 i915_gem_object_move_to_inactive(obj_priv->obj);
3960 }
5e118f41 3961 spin_unlock(&dev_priv->mm.active_list_lock);
28dfe52a
EA
3962
3963 while (!list_empty(&dev_priv->mm.flushing_list)) {
3964 struct drm_i915_gem_object *obj_priv;
3965
151903d5 3966 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
28dfe52a
EA
3967 struct drm_i915_gem_object,
3968 list);
3969 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3970 i915_gem_object_move_to_inactive(obj_priv->obj);
3971 }
3972
3973
3974 /* Move all inactive buffers out of the GTT. */
673a394b 3975 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
28dfe52a 3976 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
6dbe2772
KP
3977 if (ret) {
3978 mutex_unlock(&dev->struct_mutex);
673a394b 3979 return ret;
6dbe2772 3980 }
673a394b 3981
6dbe2772
KP
3982 i915_gem_cleanup_ringbuffer(dev);
3983 mutex_unlock(&dev->struct_mutex);
3984
673a394b
EA
3985 return 0;
3986}
3987
3988static int
3989i915_gem_init_hws(struct drm_device *dev)
3990{
3991 drm_i915_private_t *dev_priv = dev->dev_private;
3992 struct drm_gem_object *obj;
3993 struct drm_i915_gem_object *obj_priv;
3994 int ret;
3995
3996 /* If we need a physical address for the status page, it's already
3997 * initialized at driver load time.
3998 */
3999 if (!I915_NEED_GFX_HWS(dev))
4000 return 0;
4001
4002 obj = drm_gem_object_alloc(dev, 4096);
4003 if (obj == NULL) {
4004 DRM_ERROR("Failed to allocate status page\n");
4005 return -ENOMEM;
4006 }
4007 obj_priv = obj->driver_private;
ba1eb1d8 4008 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
673a394b
EA
4009
4010 ret = i915_gem_object_pin(obj, 4096);
4011 if (ret != 0) {
4012 drm_gem_object_unreference(obj);
4013 return ret;
4014 }
4015
4016 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
673a394b 4017
856fa198 4018 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
ba1eb1d8 4019 if (dev_priv->hw_status_page == NULL) {
673a394b
EA
4020 DRM_ERROR("Failed to map status page.\n");
4021 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3eb2ee77 4022 i915_gem_object_unpin(obj);
673a394b
EA
4023 drm_gem_object_unreference(obj);
4024 return -EINVAL;
4025 }
4026 dev_priv->hws_obj = obj;
673a394b
EA
4027 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4028 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
ba1eb1d8 4029 I915_READ(HWS_PGA); /* posting read */
673a394b
EA
4030 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4031
4032 return 0;
4033}
4034
85a7bb98
CW
4035static void
4036i915_gem_cleanup_hws(struct drm_device *dev)
4037{
4038 drm_i915_private_t *dev_priv = dev->dev_private;
bab2d1f6
CW
4039 struct drm_gem_object *obj;
4040 struct drm_i915_gem_object *obj_priv;
85a7bb98
CW
4041
4042 if (dev_priv->hws_obj == NULL)
4043 return;
4044
bab2d1f6
CW
4045 obj = dev_priv->hws_obj;
4046 obj_priv = obj->driver_private;
4047
856fa198 4048 kunmap(obj_priv->pages[0]);
85a7bb98
CW
4049 i915_gem_object_unpin(obj);
4050 drm_gem_object_unreference(obj);
4051 dev_priv->hws_obj = NULL;
bab2d1f6 4052
85a7bb98
CW
4053 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4054 dev_priv->hw_status_page = NULL;
4055
4056 /* Write high address into HWS_PGA when disabling. */
4057 I915_WRITE(HWS_PGA, 0x1ffff000);
4058}
4059
79e53945 4060int
673a394b
EA
4061i915_gem_init_ringbuffer(struct drm_device *dev)
4062{
4063 drm_i915_private_t *dev_priv = dev->dev_private;
4064 struct drm_gem_object *obj;
4065 struct drm_i915_gem_object *obj_priv;
79e53945 4066 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
673a394b 4067 int ret;
50aa253d 4068 u32 head;
673a394b
EA
4069
4070 ret = i915_gem_init_hws(dev);
4071 if (ret != 0)
4072 return ret;
4073
4074 obj = drm_gem_object_alloc(dev, 128 * 1024);
4075 if (obj == NULL) {
4076 DRM_ERROR("Failed to allocate ringbuffer\n");
85a7bb98 4077 i915_gem_cleanup_hws(dev);
673a394b
EA
4078 return -ENOMEM;
4079 }
4080 obj_priv = obj->driver_private;
4081
4082 ret = i915_gem_object_pin(obj, 4096);
4083 if (ret != 0) {
4084 drm_gem_object_unreference(obj);
85a7bb98 4085 i915_gem_cleanup_hws(dev);
673a394b
EA
4086 return ret;
4087 }
4088
4089 /* Set up the kernel mapping for the ring. */
79e53945
JB
4090 ring->Size = obj->size;
4091 ring->tail_mask = obj->size - 1;
673a394b 4092
79e53945
JB
4093 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
4094 ring->map.size = obj->size;
4095 ring->map.type = 0;
4096 ring->map.flags = 0;
4097 ring->map.mtrr = 0;
673a394b 4098
79e53945
JB
4099 drm_core_ioremap_wc(&ring->map, dev);
4100 if (ring->map.handle == NULL) {
673a394b
EA
4101 DRM_ERROR("Failed to map ringbuffer.\n");
4102 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
47ed185a 4103 i915_gem_object_unpin(obj);
673a394b 4104 drm_gem_object_unreference(obj);
85a7bb98 4105 i915_gem_cleanup_hws(dev);
673a394b
EA
4106 return -EINVAL;
4107 }
79e53945
JB
4108 ring->ring_obj = obj;
4109 ring->virtual_start = ring->map.handle;
673a394b
EA
4110
4111 /* Stop the ring if it's running. */
4112 I915_WRITE(PRB0_CTL, 0);
673a394b 4113 I915_WRITE(PRB0_TAIL, 0);
50aa253d 4114 I915_WRITE(PRB0_HEAD, 0);
673a394b
EA
4115
4116 /* Initialize the ring. */
4117 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
50aa253d
KP
4118 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4119
4120 /* G45 ring initialization fails to reset head to zero */
4121 if (head != 0) {
4122 DRM_ERROR("Ring head not reset to zero "
4123 "ctl %08x head %08x tail %08x start %08x\n",
4124 I915_READ(PRB0_CTL),
4125 I915_READ(PRB0_HEAD),
4126 I915_READ(PRB0_TAIL),
4127 I915_READ(PRB0_START));
4128 I915_WRITE(PRB0_HEAD, 0);
4129
4130 DRM_ERROR("Ring head forced to zero "
4131 "ctl %08x head %08x tail %08x start %08x\n",
4132 I915_READ(PRB0_CTL),
4133 I915_READ(PRB0_HEAD),
4134 I915_READ(PRB0_TAIL),
4135 I915_READ(PRB0_START));
4136 }
4137
673a394b
EA
4138 I915_WRITE(PRB0_CTL,
4139 ((obj->size - 4096) & RING_NR_PAGES) |
4140 RING_NO_REPORT |
4141 RING_VALID);
4142
50aa253d
KP
4143 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4144
4145 /* If the head is still not zero, the ring is dead */
4146 if (head != 0) {
4147 DRM_ERROR("Ring initialization failed "
4148 "ctl %08x head %08x tail %08x start %08x\n",
4149 I915_READ(PRB0_CTL),
4150 I915_READ(PRB0_HEAD),
4151 I915_READ(PRB0_TAIL),
4152 I915_READ(PRB0_START));
4153 return -EIO;
4154 }
4155
673a394b 4156 /* Update our cache of the ring state */
79e53945
JB
4157 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4158 i915_kernel_lost_context(dev);
4159 else {
4160 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4161 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4162 ring->space = ring->head - (ring->tail + 8);
4163 if (ring->space < 0)
4164 ring->space += ring->Size;
4165 }
673a394b
EA
4166
4167 return 0;
4168}
4169
79e53945 4170void
673a394b
EA
4171i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4172{
4173 drm_i915_private_t *dev_priv = dev->dev_private;
4174
4175 if (dev_priv->ring.ring_obj == NULL)
4176 return;
4177
4178 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4179
4180 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4181 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4182 dev_priv->ring.ring_obj = NULL;
4183 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4184
85a7bb98 4185 i915_gem_cleanup_hws(dev);
673a394b
EA
4186}
4187
4188int
4189i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4190 struct drm_file *file_priv)
4191{
4192 drm_i915_private_t *dev_priv = dev->dev_private;
4193 int ret;
4194
79e53945
JB
4195 if (drm_core_check_feature(dev, DRIVER_MODESET))
4196 return 0;
4197
673a394b
EA
4198 if (dev_priv->mm.wedged) {
4199 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4200 dev_priv->mm.wedged = 0;
4201 }
4202
673a394b 4203 mutex_lock(&dev->struct_mutex);
9bb2d6f9
EA
4204 dev_priv->mm.suspended = 0;
4205
4206 ret = i915_gem_init_ringbuffer(dev);
d816f6ac
WF
4207 if (ret != 0) {
4208 mutex_unlock(&dev->struct_mutex);
9bb2d6f9 4209 return ret;
d816f6ac 4210 }
9bb2d6f9 4211
5e118f41 4212 spin_lock(&dev_priv->mm.active_list_lock);
673a394b 4213 BUG_ON(!list_empty(&dev_priv->mm.active_list));
5e118f41
CW
4214 spin_unlock(&dev_priv->mm.active_list_lock);
4215
673a394b
EA
4216 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4217 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4218 BUG_ON(!list_empty(&dev_priv->mm.request_list));
673a394b 4219 mutex_unlock(&dev->struct_mutex);
dbb19d30
KH
4220
4221 drm_irq_install(dev);
4222
673a394b
EA
4223 return 0;
4224}
4225
4226int
4227i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4228 struct drm_file *file_priv)
4229{
4230 int ret;
4231
79e53945
JB
4232 if (drm_core_check_feature(dev, DRIVER_MODESET))
4233 return 0;
4234
673a394b 4235 ret = i915_gem_idle(dev);
dbb19d30
KH
4236 drm_irq_uninstall(dev);
4237
6dbe2772 4238 return ret;
673a394b
EA
4239}
4240
4241void
4242i915_gem_lastclose(struct drm_device *dev)
4243{
4244 int ret;
673a394b 4245
e806b495
EA
4246 if (drm_core_check_feature(dev, DRIVER_MODESET))
4247 return;
4248
6dbe2772
KP
4249 ret = i915_gem_idle(dev);
4250 if (ret)
4251 DRM_ERROR("failed to idle hardware: %d\n", ret);
673a394b
EA
4252}
4253
4254void
4255i915_gem_load(struct drm_device *dev)
4256{
b5aa8a0f 4257 int i;
673a394b
EA
4258 drm_i915_private_t *dev_priv = dev->dev_private;
4259
5e118f41 4260 spin_lock_init(&dev_priv->mm.active_list_lock);
673a394b
EA
4261 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4262 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4263 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4264 INIT_LIST_HEAD(&dev_priv->mm.request_list);
a09ba7fa 4265 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
673a394b
EA
4266 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4267 i915_gem_retire_work_handler);
4268 dev_priv->mm.next_gem_seqno = 1;
4269
de151cf6
JB
4270 /* Old X drivers will take 0-2 for front, back, depth buffers */
4271 dev_priv->fence_reg_start = 3;
4272
0f973f27 4273 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
de151cf6
JB
4274 dev_priv->num_fence_regs = 16;
4275 else
4276 dev_priv->num_fence_regs = 8;
4277
b5aa8a0f
GH
4278 /* Initialize fence registers to zero */
4279 if (IS_I965G(dev)) {
4280 for (i = 0; i < 16; i++)
4281 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4282 } else {
4283 for (i = 0; i < 8; i++)
4284 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4285 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4286 for (i = 0; i < 8; i++)
4287 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4288 }
4289
673a394b
EA
4290 i915_gem_detect_bit_6_swizzle(dev);
4291}
71acb5eb
DA
4292
4293/*
4294 * Create a physically contiguous memory object for this object
4295 * e.g. for cursor + overlay regs
4296 */
4297int i915_gem_init_phys_object(struct drm_device *dev,
4298 int id, int size)
4299{
4300 drm_i915_private_t *dev_priv = dev->dev_private;
4301 struct drm_i915_gem_phys_object *phys_obj;
4302 int ret;
4303
4304 if (dev_priv->mm.phys_objs[id - 1] || !size)
4305 return 0;
4306
9a298b2a 4307 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
71acb5eb
DA
4308 if (!phys_obj)
4309 return -ENOMEM;
4310
4311 phys_obj->id = id;
4312
4313 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4314 if (!phys_obj->handle) {
4315 ret = -ENOMEM;
4316 goto kfree_obj;
4317 }
4318#ifdef CONFIG_X86
4319 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4320#endif
4321
4322 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4323
4324 return 0;
4325kfree_obj:
9a298b2a 4326 kfree(phys_obj);
71acb5eb
DA
4327 return ret;
4328}
4329
4330void i915_gem_free_phys_object(struct drm_device *dev, int id)
4331{
4332 drm_i915_private_t *dev_priv = dev->dev_private;
4333 struct drm_i915_gem_phys_object *phys_obj;
4334
4335 if (!dev_priv->mm.phys_objs[id - 1])
4336 return;
4337
4338 phys_obj = dev_priv->mm.phys_objs[id - 1];
4339 if (phys_obj->cur_obj) {
4340 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4341 }
4342
4343#ifdef CONFIG_X86
4344 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4345#endif
4346 drm_pci_free(dev, phys_obj->handle);
4347 kfree(phys_obj);
4348 dev_priv->mm.phys_objs[id - 1] = NULL;
4349}
4350
4351void i915_gem_free_all_phys_object(struct drm_device *dev)
4352{
4353 int i;
4354
260883c8 4355 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
71acb5eb
DA
4356 i915_gem_free_phys_object(dev, i);
4357}
4358
4359void i915_gem_detach_phys_object(struct drm_device *dev,
4360 struct drm_gem_object *obj)
4361{
4362 struct drm_i915_gem_object *obj_priv;
4363 int i;
4364 int ret;
4365 int page_count;
4366
4367 obj_priv = obj->driver_private;
4368 if (!obj_priv->phys_obj)
4369 return;
4370
856fa198 4371 ret = i915_gem_object_get_pages(obj);
71acb5eb
DA
4372 if (ret)
4373 goto out;
4374
4375 page_count = obj->size / PAGE_SIZE;
4376
4377 for (i = 0; i < page_count; i++) {
856fa198 4378 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
71acb5eb
DA
4379 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4380
4381 memcpy(dst, src, PAGE_SIZE);
4382 kunmap_atomic(dst, KM_USER0);
4383 }
856fa198 4384 drm_clflush_pages(obj_priv->pages, page_count);
71acb5eb 4385 drm_agp_chipset_flush(dev);
d78b47b9
CW
4386
4387 i915_gem_object_put_pages(obj);
71acb5eb
DA
4388out:
4389 obj_priv->phys_obj->cur_obj = NULL;
4390 obj_priv->phys_obj = NULL;
4391}
4392
4393int
4394i915_gem_attach_phys_object(struct drm_device *dev,
4395 struct drm_gem_object *obj, int id)
4396{
4397 drm_i915_private_t *dev_priv = dev->dev_private;
4398 struct drm_i915_gem_object *obj_priv;
4399 int ret = 0;
4400 int page_count;
4401 int i;
4402
4403 if (id > I915_MAX_PHYS_OBJECT)
4404 return -EINVAL;
4405
4406 obj_priv = obj->driver_private;
4407
4408 if (obj_priv->phys_obj) {
4409 if (obj_priv->phys_obj->id == id)
4410 return 0;
4411 i915_gem_detach_phys_object(dev, obj);
4412 }
4413
4414
4415 /* create a new object */
4416 if (!dev_priv->mm.phys_objs[id - 1]) {
4417 ret = i915_gem_init_phys_object(dev, id,
4418 obj->size);
4419 if (ret) {
aeb565df 4420 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
71acb5eb
DA
4421 goto out;
4422 }
4423 }
4424
4425 /* bind to the object */
4426 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4427 obj_priv->phys_obj->cur_obj = obj;
4428
856fa198 4429 ret = i915_gem_object_get_pages(obj);
71acb5eb
DA
4430 if (ret) {
4431 DRM_ERROR("failed to get page list\n");
4432 goto out;
4433 }
4434
4435 page_count = obj->size / PAGE_SIZE;
4436
4437 for (i = 0; i < page_count; i++) {
856fa198 4438 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
71acb5eb
DA
4439 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4440
4441 memcpy(dst, src, PAGE_SIZE);
4442 kunmap_atomic(src, KM_USER0);
4443 }
4444
d78b47b9
CW
4445 i915_gem_object_put_pages(obj);
4446
71acb5eb
DA
4447 return 0;
4448out:
4449 return ret;
4450}
4451
4452static int
4453i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4454 struct drm_i915_gem_pwrite *args,
4455 struct drm_file *file_priv)
4456{
4457 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4458 void *obj_addr;
4459 int ret;
4460 char __user *user_data;
4461
4462 user_data = (char __user *) (uintptr_t) args->data_ptr;
4463 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4464
e08fb4f6 4465 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
71acb5eb
DA
4466 ret = copy_from_user(obj_addr, user_data, args->size);
4467 if (ret)
4468 return -EFAULT;
4469
4470 drm_agp_chipset_flush(dev);
4471 return 0;
4472}
b962442e
EA
4473
4474void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4475{
4476 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4477
4478 /* Clean up our request list when the client is going away, so that
4479 * later retire_requests won't dereference our soon-to-be-gone
4480 * file_priv.
4481 */
4482 mutex_lock(&dev->struct_mutex);
4483 while (!list_empty(&i915_file_priv->mm.request_list))
4484 list_del_init(i915_file_priv->mm.request_list.next);
4485 mutex_unlock(&dev->struct_mutex);
4486}
This page took 0.31149 seconds and 5 git commands to generate.