drm/i915: Pin backing pages for pread
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
CommitLineData
54cf91dc
CW
1/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33#include "i915_trace.h"
34#include "intel_drv.h"
f45b5557 35#include <linux/dma_remapping.h>
54cf91dc 36
67731b87
CW
37struct eb_objects {
38 int and;
39 struct hlist_head buckets[0];
40};
41
42static struct eb_objects *
43eb_create(int size)
44{
45 struct eb_objects *eb;
46 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
47 while (count > size)
48 count >>= 1;
49 eb = kzalloc(count*sizeof(struct hlist_head) +
50 sizeof(struct eb_objects),
51 GFP_KERNEL);
52 if (eb == NULL)
53 return eb;
54
55 eb->and = count - 1;
56 return eb;
57}
58
59static void
60eb_reset(struct eb_objects *eb)
61{
62 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
63}
64
65static void
66eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
67{
68 hlist_add_head(&obj->exec_node,
69 &eb->buckets[obj->exec_handle & eb->and]);
70}
71
72static struct drm_i915_gem_object *
73eb_get_object(struct eb_objects *eb, unsigned long handle)
74{
75 struct hlist_head *head;
76 struct hlist_node *node;
77 struct drm_i915_gem_object *obj;
78
79 head = &eb->buckets[handle & eb->and];
80 hlist_for_each(node, head) {
81 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
82 if (obj->exec_handle == handle)
83 return obj;
84 }
85
86 return NULL;
87}
88
89static void
90eb_destroy(struct eb_objects *eb)
91{
92 kfree(eb);
93}
94
dabdfe02
CW
95static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
96{
97 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
504c7267 98 !obj->map_and_fenceable ||
dabdfe02
CW
99 obj->cache_level != I915_CACHE_NONE);
100}
101
54cf91dc
CW
102static int
103i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
67731b87 104 struct eb_objects *eb,
54cf91dc
CW
105 struct drm_i915_gem_relocation_entry *reloc)
106{
107 struct drm_device *dev = obj->base.dev;
108 struct drm_gem_object *target_obj;
149c8407 109 struct drm_i915_gem_object *target_i915_obj;
54cf91dc
CW
110 uint32_t target_offset;
111 int ret = -EINVAL;
112
67731b87
CW
113 /* we've already hold a reference to all valid objects */
114 target_obj = &eb_get_object(eb, reloc->target_handle)->base;
115 if (unlikely(target_obj == NULL))
54cf91dc
CW
116 return -ENOENT;
117
149c8407
DV
118 target_i915_obj = to_intel_bo(target_obj);
119 target_offset = target_i915_obj->gtt_offset;
54cf91dc 120
e844b990
EA
121 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
122 * pipe_control writes because the gpu doesn't properly redirect them
123 * through the ppgtt for non_secure batchbuffers. */
124 if (unlikely(IS_GEN6(dev) &&
125 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
126 !target_i915_obj->has_global_gtt_mapping)) {
127 i915_gem_gtt_bind_object(target_i915_obj,
128 target_i915_obj->cache_level);
129 }
130
54cf91dc
CW
131 /* The target buffer should have appeared before us in the
132 * exec_object list, so it should have a GTT space bound by now.
133 */
b8f7ab17 134 if (unlikely(target_offset == 0)) {
ff240199 135 DRM_DEBUG("No GTT space found for object %d\n",
54cf91dc 136 reloc->target_handle);
67731b87 137 return ret;
54cf91dc
CW
138 }
139
140 /* Validate that the target is in a valid r/w GPU domain */
b8f7ab17 141 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
ff240199 142 DRM_DEBUG("reloc with multiple write domains: "
54cf91dc
CW
143 "obj %p target %d offset %d "
144 "read %08x write %08x",
145 obj, reloc->target_handle,
146 (int) reloc->offset,
147 reloc->read_domains,
148 reloc->write_domain);
67731b87 149 return ret;
54cf91dc 150 }
4ca4a250
DV
151 if (unlikely((reloc->write_domain | reloc->read_domains)
152 & ~I915_GEM_GPU_DOMAINS)) {
ff240199 153 DRM_DEBUG("reloc with read/write non-GPU domains: "
54cf91dc
CW
154 "obj %p target %d offset %d "
155 "read %08x write %08x",
156 obj, reloc->target_handle,
157 (int) reloc->offset,
158 reloc->read_domains,
159 reloc->write_domain);
67731b87 160 return ret;
54cf91dc 161 }
b8f7ab17
CW
162 if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
163 reloc->write_domain != target_obj->pending_write_domain)) {
ff240199 164 DRM_DEBUG("Write domain conflict: "
54cf91dc
CW
165 "obj %p target %d offset %d "
166 "new %08x old %08x\n",
167 obj, reloc->target_handle,
168 (int) reloc->offset,
169 reloc->write_domain,
170 target_obj->pending_write_domain);
67731b87 171 return ret;
54cf91dc
CW
172 }
173
174 target_obj->pending_read_domains |= reloc->read_domains;
175 target_obj->pending_write_domain |= reloc->write_domain;
176
177 /* If the relocation already has the right value in it, no
178 * more work needs to be done.
179 */
180 if (target_offset == reloc->presumed_offset)
67731b87 181 return 0;
54cf91dc
CW
182
183 /* Check that the relocation address is valid... */
b8f7ab17 184 if (unlikely(reloc->offset > obj->base.size - 4)) {
ff240199 185 DRM_DEBUG("Relocation beyond object bounds: "
54cf91dc
CW
186 "obj %p target %d offset %d size %d.\n",
187 obj, reloc->target_handle,
188 (int) reloc->offset,
189 (int) obj->base.size);
67731b87 190 return ret;
54cf91dc 191 }
b8f7ab17 192 if (unlikely(reloc->offset & 3)) {
ff240199 193 DRM_DEBUG("Relocation not 4-byte aligned: "
54cf91dc
CW
194 "obj %p target %d offset %d.\n",
195 obj, reloc->target_handle,
196 (int) reloc->offset);
67731b87 197 return ret;
54cf91dc
CW
198 }
199
dabdfe02
CW
200 /* We can't wait for rendering with pagefaults disabled */
201 if (obj->active && in_atomic())
202 return -EFAULT;
203
54cf91dc 204 reloc->delta += target_offset;
dabdfe02 205 if (use_cpu_reloc(obj)) {
54cf91dc
CW
206 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
207 char *vaddr;
208
dabdfe02
CW
209 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
210 if (ret)
211 return ret;
212
54cf91dc
CW
213 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
214 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
215 kunmap_atomic(vaddr);
216 } else {
217 struct drm_i915_private *dev_priv = dev->dev_private;
218 uint32_t __iomem *reloc_entry;
219 void __iomem *reloc_page;
220
7b09638f
CW
221 ret = i915_gem_object_set_to_gtt_domain(obj, true);
222 if (ret)
223 return ret;
224
225 ret = i915_gem_object_put_fence(obj);
54cf91dc 226 if (ret)
67731b87 227 return ret;
54cf91dc
CW
228
229 /* Map the page containing the relocation we're going to perform. */
230 reloc->offset += obj->gtt_offset;
231 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
232 reloc->offset & PAGE_MASK);
233 reloc_entry = (uint32_t __iomem *)
234 (reloc_page + (reloc->offset & ~PAGE_MASK));
235 iowrite32(reloc->delta, reloc_entry);
236 io_mapping_unmap_atomic(reloc_page);
237 }
238
239 /* and update the user's relocation entry */
240 reloc->presumed_offset = target_offset;
241
67731b87 242 return 0;
54cf91dc
CW
243}
244
245static int
246i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
6fe4f140 247 struct eb_objects *eb)
54cf91dc 248{
1d83f442
CW
249#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
250 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
54cf91dc 251 struct drm_i915_gem_relocation_entry __user *user_relocs;
6fe4f140 252 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
1d83f442 253 int remain, ret;
54cf91dc
CW
254
255 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
54cf91dc 256
1d83f442
CW
257 remain = entry->relocation_count;
258 while (remain) {
259 struct drm_i915_gem_relocation_entry *r = stack_reloc;
260 int count = remain;
261 if (count > ARRAY_SIZE(stack_reloc))
262 count = ARRAY_SIZE(stack_reloc);
263 remain -= count;
264
265 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
54cf91dc
CW
266 return -EFAULT;
267
1d83f442
CW
268 do {
269 u64 offset = r->presumed_offset;
54cf91dc 270
1d83f442
CW
271 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
272 if (ret)
273 return ret;
274
275 if (r->presumed_offset != offset &&
276 __copy_to_user_inatomic(&user_relocs->presumed_offset,
277 &r->presumed_offset,
278 sizeof(r->presumed_offset))) {
279 return -EFAULT;
280 }
281
282 user_relocs++;
283 r++;
284 } while (--count);
54cf91dc
CW
285 }
286
287 return 0;
1d83f442 288#undef N_RELOC
54cf91dc
CW
289}
290
291static int
292i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
67731b87 293 struct eb_objects *eb,
54cf91dc
CW
294 struct drm_i915_gem_relocation_entry *relocs)
295{
6fe4f140 296 const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
54cf91dc
CW
297 int i, ret;
298
299 for (i = 0; i < entry->relocation_count; i++) {
6fe4f140 300 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
54cf91dc
CW
301 if (ret)
302 return ret;
303 }
304
305 return 0;
306}
307
308static int
309i915_gem_execbuffer_relocate(struct drm_device *dev,
67731b87 310 struct eb_objects *eb,
6fe4f140 311 struct list_head *objects)
54cf91dc 312{
432e58ed 313 struct drm_i915_gem_object *obj;
d4aeee77
CW
314 int ret = 0;
315
316 /* This is the fast path and we cannot handle a pagefault whilst
317 * holding the struct mutex lest the user pass in the relocations
318 * contained within a mmaped bo. For in such a case we, the page
319 * fault handler would call i915_gem_fault() and we would try to
320 * acquire the struct mutex again. Obviously this is bad and so
321 * lockdep complains vehemently.
322 */
323 pagefault_disable();
432e58ed 324 list_for_each_entry(obj, objects, exec_list) {
6fe4f140 325 ret = i915_gem_execbuffer_relocate_object(obj, eb);
54cf91dc 326 if (ret)
d4aeee77 327 break;
54cf91dc 328 }
d4aeee77 329 pagefault_enable();
54cf91dc 330
d4aeee77 331 return ret;
54cf91dc
CW
332}
333
7788a765
CW
334#define __EXEC_OBJECT_HAS_PIN (1<<31)
335#define __EXEC_OBJECT_HAS_FENCE (1<<30)
1690e1eb 336
dabdfe02
CW
337static int
338need_reloc_mappable(struct drm_i915_gem_object *obj)
339{
340 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
341 return entry->relocation_count && !use_cpu_reloc(obj);
342}
343
1690e1eb 344static int
7788a765
CW
345i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
346 struct intel_ring_buffer *ring)
1690e1eb 347{
7788a765 348 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1690e1eb
CW
349 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
350 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
351 bool need_fence, need_mappable;
352 int ret;
353
354 need_fence =
355 has_fenced_gpu_access &&
356 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
357 obj->tiling_mode != I915_TILING_NONE;
dabdfe02 358 need_mappable = need_fence || need_reloc_mappable(obj);
1690e1eb 359
86a1ee26 360 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
1690e1eb
CW
361 if (ret)
362 return ret;
363
7788a765
CW
364 entry->flags |= __EXEC_OBJECT_HAS_PIN;
365
1690e1eb
CW
366 if (has_fenced_gpu_access) {
367 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
06d98131 368 ret = i915_gem_object_get_fence(obj);
9a5a53b3 369 if (ret)
7788a765 370 return ret;
1690e1eb 371
9a5a53b3 372 if (i915_gem_object_pin_fence(obj))
1690e1eb 373 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
9a5a53b3 374
7dd49065 375 obj->pending_fenced_gpu_access = true;
1690e1eb 376 }
1690e1eb
CW
377 }
378
7788a765
CW
379 /* Ensure ppgtt mapping exists if needed */
380 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
381 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
382 obj, obj->cache_level);
383
384 obj->has_aliasing_ppgtt_mapping = 1;
385 }
386
1690e1eb
CW
387 entry->offset = obj->gtt_offset;
388 return 0;
7788a765 389}
1690e1eb 390
7788a765
CW
391static void
392i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
393{
394 struct drm_i915_gem_exec_object2 *entry;
395
396 if (!obj->gtt_space)
397 return;
398
399 entry = obj->exec_entry;
400
401 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
402 i915_gem_object_unpin_fence(obj);
403
404 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
405 i915_gem_object_unpin(obj);
406
407 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
1690e1eb
CW
408}
409
54cf91dc 410static int
d9e86c0e 411i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
54cf91dc 412 struct drm_file *file,
6fe4f140 413 struct list_head *objects)
54cf91dc 414{
432e58ed 415 struct drm_i915_gem_object *obj;
6fe4f140 416 struct list_head ordered_objects;
7788a765
CW
417 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
418 int retry;
6fe4f140
CW
419
420 INIT_LIST_HEAD(&ordered_objects);
421 while (!list_empty(objects)) {
422 struct drm_i915_gem_exec_object2 *entry;
423 bool need_fence, need_mappable;
424
425 obj = list_first_entry(objects,
426 struct drm_i915_gem_object,
427 exec_list);
428 entry = obj->exec_entry;
429
430 need_fence =
431 has_fenced_gpu_access &&
432 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
433 obj->tiling_mode != I915_TILING_NONE;
dabdfe02 434 need_mappable = need_fence || need_reloc_mappable(obj);
6fe4f140
CW
435
436 if (need_mappable)
437 list_move(&obj->exec_list, &ordered_objects);
438 else
439 list_move_tail(&obj->exec_list, &ordered_objects);
595dad76
CW
440
441 obj->base.pending_read_domains = 0;
442 obj->base.pending_write_domain = 0;
016fd0c1 443 obj->pending_fenced_gpu_access = false;
6fe4f140
CW
444 }
445 list_splice(&ordered_objects, objects);
54cf91dc
CW
446
447 /* Attempt to pin all of the buffers into the GTT.
448 * This is done in 3 phases:
449 *
450 * 1a. Unbind all objects that do not match the GTT constraints for
451 * the execbuffer (fenceable, mappable, alignment etc).
452 * 1b. Increment pin count for already bound objects.
453 * 2. Bind new objects.
454 * 3. Decrement pin count.
455 *
7788a765 456 * This avoid unnecessary unbinding of later objects in order to make
54cf91dc
CW
457 * room for the earlier objects *unless* we need to defragment.
458 */
459 retry = 0;
460 do {
7788a765 461 int ret = 0;
54cf91dc
CW
462
463 /* Unbind any ill-fitting objects or pin. */
432e58ed 464 list_for_each_entry(obj, objects, exec_list) {
6fe4f140 465 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
54cf91dc 466 bool need_fence, need_mappable;
1690e1eb 467
6fe4f140 468 if (!obj->gtt_space)
54cf91dc
CW
469 continue;
470
471 need_fence =
9b3826bf 472 has_fenced_gpu_access &&
54cf91dc
CW
473 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
474 obj->tiling_mode != I915_TILING_NONE;
dabdfe02 475 need_mappable = need_fence || need_reloc_mappable(obj);
54cf91dc
CW
476
477 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
478 (need_mappable && !obj->map_and_fenceable))
479 ret = i915_gem_object_unbind(obj);
480 else
7788a765 481 ret = i915_gem_execbuffer_reserve_object(obj, ring);
432e58ed 482 if (ret)
54cf91dc 483 goto err;
54cf91dc
CW
484 }
485
486 /* Bind fresh objects */
432e58ed 487 list_for_each_entry(obj, objects, exec_list) {
1690e1eb
CW
488 if (obj->gtt_space)
489 continue;
54cf91dc 490
7788a765
CW
491 ret = i915_gem_execbuffer_reserve_object(obj, ring);
492 if (ret)
493 goto err;
54cf91dc
CW
494 }
495
7788a765
CW
496err: /* Decrement pin count for bound objects */
497 list_for_each_entry(obj, objects, exec_list)
498 i915_gem_execbuffer_unreserve_object(obj);
54cf91dc 499
6c085a72 500 if (ret != -ENOSPC || retry++)
54cf91dc
CW
501 return ret;
502
6c085a72 503 ret = i915_gem_evict_everything(ring->dev);
54cf91dc
CW
504 if (ret)
505 return ret;
54cf91dc
CW
506 } while (1);
507}
508
509static int
510i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
511 struct drm_file *file,
d9e86c0e 512 struct intel_ring_buffer *ring,
432e58ed 513 struct list_head *objects,
67731b87 514 struct eb_objects *eb,
432e58ed 515 struct drm_i915_gem_exec_object2 *exec,
54cf91dc
CW
516 int count)
517{
518 struct drm_i915_gem_relocation_entry *reloc;
432e58ed 519 struct drm_i915_gem_object *obj;
dd6864a4 520 int *reloc_offset;
54cf91dc
CW
521 int i, total, ret;
522
67731b87 523 /* We may process another execbuffer during the unlock... */
36cf1742 524 while (!list_empty(objects)) {
67731b87
CW
525 obj = list_first_entry(objects,
526 struct drm_i915_gem_object,
527 exec_list);
528 list_del_init(&obj->exec_list);
529 drm_gem_object_unreference(&obj->base);
530 }
531
54cf91dc
CW
532 mutex_unlock(&dev->struct_mutex);
533
534 total = 0;
535 for (i = 0; i < count; i++)
432e58ed 536 total += exec[i].relocation_count;
54cf91dc 537
dd6864a4 538 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
54cf91dc 539 reloc = drm_malloc_ab(total, sizeof(*reloc));
dd6864a4
CW
540 if (reloc == NULL || reloc_offset == NULL) {
541 drm_free_large(reloc);
542 drm_free_large(reloc_offset);
54cf91dc
CW
543 mutex_lock(&dev->struct_mutex);
544 return -ENOMEM;
545 }
546
547 total = 0;
548 for (i = 0; i < count; i++) {
549 struct drm_i915_gem_relocation_entry __user *user_relocs;
550
432e58ed 551 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
54cf91dc
CW
552
553 if (copy_from_user(reloc+total, user_relocs,
432e58ed 554 exec[i].relocation_count * sizeof(*reloc))) {
54cf91dc
CW
555 ret = -EFAULT;
556 mutex_lock(&dev->struct_mutex);
557 goto err;
558 }
559
dd6864a4 560 reloc_offset[i] = total;
432e58ed 561 total += exec[i].relocation_count;
54cf91dc
CW
562 }
563
564 ret = i915_mutex_lock_interruptible(dev);
565 if (ret) {
566 mutex_lock(&dev->struct_mutex);
567 goto err;
568 }
569
67731b87 570 /* reacquire the objects */
67731b87
CW
571 eb_reset(eb);
572 for (i = 0; i < count; i++) {
67731b87
CW
573 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
574 exec[i].handle));
c8725226 575 if (&obj->base == NULL) {
ff240199 576 DRM_DEBUG("Invalid object handle %d at index %d\n",
67731b87
CW
577 exec[i].handle, i);
578 ret = -ENOENT;
579 goto err;
580 }
581
582 list_add_tail(&obj->exec_list, objects);
583 obj->exec_handle = exec[i].handle;
6fe4f140 584 obj->exec_entry = &exec[i];
67731b87
CW
585 eb_add_object(eb, obj);
586 }
587
6fe4f140 588 ret = i915_gem_execbuffer_reserve(ring, file, objects);
54cf91dc
CW
589 if (ret)
590 goto err;
591
432e58ed 592 list_for_each_entry(obj, objects, exec_list) {
dd6864a4 593 int offset = obj->exec_entry - exec;
67731b87 594 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
dd6864a4 595 reloc + reloc_offset[offset]);
54cf91dc
CW
596 if (ret)
597 goto err;
54cf91dc
CW
598 }
599
600 /* Leave the user relocations as are, this is the painfully slow path,
601 * and we want to avoid the complication of dropping the lock whilst
602 * having buffers reserved in the aperture and so causing spurious
603 * ENOSPC for random operations.
604 */
605
606err:
607 drm_free_large(reloc);
dd6864a4 608 drm_free_large(reloc_offset);
54cf91dc
CW
609 return ret;
610}
611
c59a333f
CW
612static int
613i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
614{
615 u32 plane, flip_mask;
616 int ret;
617
618 /* Check for any pending flips. As we only maintain a flip queue depth
619 * of 1, we can simply insert a WAIT for the next display flip prior
620 * to executing the batch and avoid stalling the CPU.
621 */
622
623 for (plane = 0; flips >> plane; plane++) {
624 if (((flips >> plane) & 1) == 0)
625 continue;
626
627 if (plane)
628 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
629 else
630 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
631
632 ret = intel_ring_begin(ring, 2);
633 if (ret)
634 return ret;
635
636 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
637 intel_ring_emit(ring, MI_NOOP);
638 intel_ring_advance(ring);
639 }
640
641 return 0;
642}
643
54cf91dc 644static int
432e58ed
CW
645i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
646 struct list_head *objects)
54cf91dc 647{
432e58ed 648 struct drm_i915_gem_object *obj;
6ac42f41
DV
649 uint32_t flush_domains = 0;
650 uint32_t flips = 0;
432e58ed 651 int ret;
54cf91dc 652
6ac42f41
DV
653 list_for_each_entry(obj, objects, exec_list) {
654 ret = i915_gem_object_sync(obj, ring);
c59a333f
CW
655 if (ret)
656 return ret;
6ac42f41
DV
657
658 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
659 i915_gem_clflush_object(obj);
660
661 if (obj->base.pending_write_domain)
662 flips |= atomic_read(&obj->pending_flip);
663
664 flush_domains |= obj->base.write_domain;
c59a333f
CW
665 }
666
6ac42f41
DV
667 if (flips) {
668 ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
1ec14ad3
CW
669 if (ret)
670 return ret;
54cf91dc
CW
671 }
672
6ac42f41
DV
673 if (flush_domains & I915_GEM_DOMAIN_CPU)
674 intel_gtt_chipset_flush();
675
676 if (flush_domains & I915_GEM_DOMAIN_GTT)
677 wmb();
678
09cf7c9a
CW
679 /* Unconditionally invalidate gpu caches and ensure that we do flush
680 * any residual writes from the previous batch.
681 */
a7b9761d 682 return intel_ring_invalidate_all_caches(ring);
54cf91dc
CW
683}
684
432e58ed
CW
685static bool
686i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
54cf91dc 687{
432e58ed 688 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
54cf91dc
CW
689}
690
691static int
692validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
693 int count)
694{
695 int i;
696
697 for (i = 0; i < count; i++) {
698 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
699 int length; /* limited by fault_in_pages_readable() */
700
701 /* First check for malicious input causing overflow */
702 if (exec[i].relocation_count >
703 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
704 return -EINVAL;
705
706 length = exec[i].relocation_count *
707 sizeof(struct drm_i915_gem_relocation_entry);
708 if (!access_ok(VERIFY_READ, ptr, length))
709 return -EFAULT;
710
711 /* we may also need to update the presumed offsets */
712 if (!access_ok(VERIFY_WRITE, ptr, length))
713 return -EFAULT;
714
f56f821f 715 if (fault_in_multipages_readable(ptr, length))
54cf91dc
CW
716 return -EFAULT;
717 }
718
719 return 0;
720}
721
432e58ed
CW
722static void
723i915_gem_execbuffer_move_to_active(struct list_head *objects,
1ec14ad3
CW
724 struct intel_ring_buffer *ring,
725 u32 seqno)
432e58ed
CW
726{
727 struct drm_i915_gem_object *obj;
728
729 list_for_each_entry(obj, objects, exec_list) {
69c2fc89
CW
730 u32 old_read = obj->base.read_domains;
731 u32 old_write = obj->base.write_domain;
db53a302 732
432e58ed
CW
733 obj->base.read_domains = obj->base.pending_read_domains;
734 obj->base.write_domain = obj->base.pending_write_domain;
735 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
736
1ec14ad3 737 i915_gem_object_move_to_active(obj, ring, seqno);
432e58ed
CW
738 if (obj->base.write_domain) {
739 obj->dirty = 1;
0201f1ec 740 obj->last_write_seqno = seqno;
acb87dfb 741 if (obj->pin_count) /* check for potential scanout */
f047e395 742 intel_mark_fb_busy(obj);
432e58ed
CW
743 }
744
db53a302 745 trace_i915_gem_object_change_domain(obj, old_read, old_write);
432e58ed
CW
746 }
747}
748
54cf91dc
CW
749static void
750i915_gem_execbuffer_retire_commands(struct drm_device *dev,
432e58ed 751 struct drm_file *file,
54cf91dc
CW
752 struct intel_ring_buffer *ring)
753{
cc889e0f
DV
754 /* Unconditionally force add_request to emit a full flush. */
755 ring->gpu_caches_dirty = true;
54cf91dc 756
432e58ed 757 /* Add a breadcrumb for the completion of the batch buffer */
3bb73aba 758 (void)i915_add_request(ring, file, NULL);
432e58ed 759}
54cf91dc 760
ae662d31
EA
761static int
762i915_reset_gen7_sol_offsets(struct drm_device *dev,
763 struct intel_ring_buffer *ring)
764{
765 drm_i915_private_t *dev_priv = dev->dev_private;
766 int ret, i;
767
768 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
769 return 0;
770
771 ret = intel_ring_begin(ring, 4 * 3);
772 if (ret)
773 return ret;
774
775 for (i = 0; i < 4; i++) {
776 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
777 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
778 intel_ring_emit(ring, 0);
779 }
780
781 intel_ring_advance(ring);
782
783 return 0;
784}
785
54cf91dc
CW
786static int
787i915_gem_do_execbuffer(struct drm_device *dev, void *data,
788 struct drm_file *file,
789 struct drm_i915_gem_execbuffer2 *args,
432e58ed 790 struct drm_i915_gem_exec_object2 *exec)
54cf91dc
CW
791{
792 drm_i915_private_t *dev_priv = dev->dev_private;
432e58ed 793 struct list_head objects;
67731b87 794 struct eb_objects *eb;
54cf91dc
CW
795 struct drm_i915_gem_object *batch_obj;
796 struct drm_clip_rect *cliprects = NULL;
54cf91dc 797 struct intel_ring_buffer *ring;
6e0a69db 798 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
c4e7a414 799 u32 exec_start, exec_len;
1ec14ad3 800 u32 seqno;
84f9f938 801 u32 mask;
72bfa19c 802 int ret, mode, i;
54cf91dc 803
432e58ed 804 if (!i915_gem_check_execbuffer(args)) {
ff240199 805 DRM_DEBUG("execbuf with invalid offset/length\n");
432e58ed
CW
806 return -EINVAL;
807 }
808
809 ret = validate_exec_list(exec, args->buffer_count);
54cf91dc
CW
810 if (ret)
811 return ret;
812
54cf91dc
CW
813 switch (args->flags & I915_EXEC_RING_MASK) {
814 case I915_EXEC_DEFAULT:
815 case I915_EXEC_RENDER:
1ec14ad3 816 ring = &dev_priv->ring[RCS];
54cf91dc
CW
817 break;
818 case I915_EXEC_BSD:
1ec14ad3 819 ring = &dev_priv->ring[VCS];
6e0a69db
BW
820 if (ctx_id != 0) {
821 DRM_DEBUG("Ring %s doesn't support contexts\n",
822 ring->name);
823 return -EPERM;
824 }
54cf91dc
CW
825 break;
826 case I915_EXEC_BLT:
1ec14ad3 827 ring = &dev_priv->ring[BCS];
6e0a69db
BW
828 if (ctx_id != 0) {
829 DRM_DEBUG("Ring %s doesn't support contexts\n",
830 ring->name);
831 return -EPERM;
832 }
54cf91dc
CW
833 break;
834 default:
ff240199 835 DRM_DEBUG("execbuf with unknown ring: %d\n",
54cf91dc
CW
836 (int)(args->flags & I915_EXEC_RING_MASK));
837 return -EINVAL;
838 }
a15817cf
CW
839 if (!intel_ring_initialized(ring)) {
840 DRM_DEBUG("execbuf with invalid ring: %d\n",
841 (int)(args->flags & I915_EXEC_RING_MASK));
842 return -EINVAL;
843 }
54cf91dc 844
72bfa19c 845 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
84f9f938 846 mask = I915_EXEC_CONSTANTS_MASK;
72bfa19c
CW
847 switch (mode) {
848 case I915_EXEC_CONSTANTS_REL_GENERAL:
849 case I915_EXEC_CONSTANTS_ABSOLUTE:
850 case I915_EXEC_CONSTANTS_REL_SURFACE:
851 if (ring == &dev_priv->ring[RCS] &&
852 mode != dev_priv->relative_constants_mode) {
853 if (INTEL_INFO(dev)->gen < 4)
854 return -EINVAL;
855
856 if (INTEL_INFO(dev)->gen > 5 &&
857 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
858 return -EINVAL;
84f9f938
BW
859
860 /* The HW changed the meaning on this bit on gen6 */
861 if (INTEL_INFO(dev)->gen >= 6)
862 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
72bfa19c
CW
863 }
864 break;
865 default:
ff240199 866 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
72bfa19c
CW
867 return -EINVAL;
868 }
869
54cf91dc 870 if (args->buffer_count < 1) {
ff240199 871 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
872 return -EINVAL;
873 }
54cf91dc
CW
874
875 if (args->num_cliprects != 0) {
1ec14ad3 876 if (ring != &dev_priv->ring[RCS]) {
ff240199 877 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
c4e7a414
CW
878 return -EINVAL;
879 }
880
6ebebc92
DV
881 if (INTEL_INFO(dev)->gen >= 5) {
882 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
883 return -EINVAL;
884 }
885
44afb3a0
XW
886 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
887 DRM_DEBUG("execbuf with %u cliprects\n",
888 args->num_cliprects);
889 return -EINVAL;
890 }
5e13a0c5 891
432e58ed 892 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
54cf91dc
CW
893 GFP_KERNEL);
894 if (cliprects == NULL) {
895 ret = -ENOMEM;
896 goto pre_mutex_err;
897 }
898
432e58ed
CW
899 if (copy_from_user(cliprects,
900 (struct drm_clip_rect __user *)(uintptr_t)
901 args->cliprects_ptr,
902 sizeof(*cliprects)*args->num_cliprects)) {
54cf91dc
CW
903 ret = -EFAULT;
904 goto pre_mutex_err;
905 }
906 }
907
54cf91dc
CW
908 ret = i915_mutex_lock_interruptible(dev);
909 if (ret)
910 goto pre_mutex_err;
911
912 if (dev_priv->mm.suspended) {
913 mutex_unlock(&dev->struct_mutex);
914 ret = -EBUSY;
915 goto pre_mutex_err;
916 }
917
67731b87
CW
918 eb = eb_create(args->buffer_count);
919 if (eb == NULL) {
920 mutex_unlock(&dev->struct_mutex);
921 ret = -ENOMEM;
922 goto pre_mutex_err;
923 }
924
54cf91dc 925 /* Look up object handles */
432e58ed 926 INIT_LIST_HEAD(&objects);
54cf91dc
CW
927 for (i = 0; i < args->buffer_count; i++) {
928 struct drm_i915_gem_object *obj;
929
432e58ed
CW
930 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
931 exec[i].handle));
c8725226 932 if (&obj->base == NULL) {
ff240199 933 DRM_DEBUG("Invalid object handle %d at index %d\n",
432e58ed 934 exec[i].handle, i);
54cf91dc 935 /* prevent error path from reading uninitialized data */
54cf91dc
CW
936 ret = -ENOENT;
937 goto err;
938 }
54cf91dc 939
432e58ed 940 if (!list_empty(&obj->exec_list)) {
ff240199 941 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
432e58ed 942 obj, exec[i].handle, i);
54cf91dc
CW
943 ret = -EINVAL;
944 goto err;
945 }
432e58ed
CW
946
947 list_add_tail(&obj->exec_list, &objects);
67731b87 948 obj->exec_handle = exec[i].handle;
6fe4f140 949 obj->exec_entry = &exec[i];
67731b87 950 eb_add_object(eb, obj);
54cf91dc
CW
951 }
952
6fe4f140
CW
953 /* take note of the batch buffer before we might reorder the lists */
954 batch_obj = list_entry(objects.prev,
955 struct drm_i915_gem_object,
956 exec_list);
957
54cf91dc 958 /* Move the objects en-masse into the GTT, evicting if necessary. */
6fe4f140 959 ret = i915_gem_execbuffer_reserve(ring, file, &objects);
54cf91dc
CW
960 if (ret)
961 goto err;
962
963 /* The objects are in their final locations, apply the relocations. */
6fe4f140 964 ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
54cf91dc
CW
965 if (ret) {
966 if (ret == -EFAULT) {
d9e86c0e 967 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
67731b87
CW
968 &objects, eb,
969 exec,
54cf91dc
CW
970 args->buffer_count);
971 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
972 }
973 if (ret)
974 goto err;
975 }
976
977 /* Set the pending read domains for the batch buffer to COMMAND */
54cf91dc 978 if (batch_obj->base.pending_write_domain) {
ff240199 979 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
54cf91dc
CW
980 ret = -EINVAL;
981 goto err;
982 }
983 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
984
432e58ed
CW
985 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
986 if (ret)
54cf91dc 987 goto err;
54cf91dc 988
db53a302 989 seqno = i915_gem_next_request_seqno(ring);
076e2c0e 990 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
1ec14ad3
CW
991 if (seqno < ring->sync_seqno[i]) {
992 /* The GPU can not handle its semaphore value wrapping,
993 * so every billion or so execbuffers, we need to stall
994 * the GPU in order to reset the counters.
995 */
b2da9fe5 996 ret = i915_gpu_idle(dev);
1ec14ad3
CW
997 if (ret)
998 goto err;
b2da9fe5 999 i915_gem_retire_requests(dev);
1ec14ad3
CW
1000
1001 BUG_ON(ring->sync_seqno[i]);
1002 }
1003 }
1004
0da5cec1
EA
1005 ret = i915_switch_context(ring, file, ctx_id);
1006 if (ret)
1007 goto err;
1008
e2971bda
BW
1009 if (ring == &dev_priv->ring[RCS] &&
1010 mode != dev_priv->relative_constants_mode) {
1011 ret = intel_ring_begin(ring, 4);
1012 if (ret)
1013 goto err;
1014
1015 intel_ring_emit(ring, MI_NOOP);
1016 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1017 intel_ring_emit(ring, INSTPM);
84f9f938 1018 intel_ring_emit(ring, mask << 16 | mode);
e2971bda
BW
1019 intel_ring_advance(ring);
1020
1021 dev_priv->relative_constants_mode = mode;
1022 }
1023
ae662d31
EA
1024 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1025 ret = i915_reset_gen7_sol_offsets(dev, ring);
1026 if (ret)
1027 goto err;
1028 }
1029
db53a302
CW
1030 trace_i915_gem_ring_dispatch(ring, seqno);
1031
c4e7a414
CW
1032 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1033 exec_len = args->batch_len;
1034 if (cliprects) {
1035 for (i = 0; i < args->num_cliprects; i++) {
1036 ret = i915_emit_box(dev, &cliprects[i],
1037 args->DR1, args->DR4);
1038 if (ret)
1039 goto err;
1040
1041 ret = ring->dispatch_execbuffer(ring,
1042 exec_start, exec_len);
1043 if (ret)
1044 goto err;
1045 }
1046 } else {
1047 ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
1048 if (ret)
1049 goto err;
1050 }
54cf91dc 1051
1ec14ad3 1052 i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
432e58ed 1053 i915_gem_execbuffer_retire_commands(dev, file, ring);
54cf91dc
CW
1054
1055err:
67731b87 1056 eb_destroy(eb);
432e58ed
CW
1057 while (!list_empty(&objects)) {
1058 struct drm_i915_gem_object *obj;
1059
1060 obj = list_first_entry(&objects,
1061 struct drm_i915_gem_object,
1062 exec_list);
1063 list_del_init(&obj->exec_list);
1064 drm_gem_object_unreference(&obj->base);
54cf91dc
CW
1065 }
1066
1067 mutex_unlock(&dev->struct_mutex);
1068
1069pre_mutex_err:
54cf91dc 1070 kfree(cliprects);
54cf91dc
CW
1071 return ret;
1072}
1073
1074/*
1075 * Legacy execbuffer just creates an exec2 list from the original exec object
1076 * list array and passes it to the real function.
1077 */
1078int
1079i915_gem_execbuffer(struct drm_device *dev, void *data,
1080 struct drm_file *file)
1081{
1082 struct drm_i915_gem_execbuffer *args = data;
1083 struct drm_i915_gem_execbuffer2 exec2;
1084 struct drm_i915_gem_exec_object *exec_list = NULL;
1085 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1086 int ret, i;
1087
54cf91dc 1088 if (args->buffer_count < 1) {
ff240199 1089 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
54cf91dc
CW
1090 return -EINVAL;
1091 }
1092
1093 /* Copy in the exec list from userland */
1094 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1095 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1096 if (exec_list == NULL || exec2_list == NULL) {
ff240199 1097 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1098 args->buffer_count);
1099 drm_free_large(exec_list);
1100 drm_free_large(exec2_list);
1101 return -ENOMEM;
1102 }
1103 ret = copy_from_user(exec_list,
1104 (struct drm_i915_relocation_entry __user *)
1105 (uintptr_t) args->buffers_ptr,
1106 sizeof(*exec_list) * args->buffer_count);
1107 if (ret != 0) {
ff240199 1108 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1109 args->buffer_count, ret);
1110 drm_free_large(exec_list);
1111 drm_free_large(exec2_list);
1112 return -EFAULT;
1113 }
1114
1115 for (i = 0; i < args->buffer_count; i++) {
1116 exec2_list[i].handle = exec_list[i].handle;
1117 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1118 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1119 exec2_list[i].alignment = exec_list[i].alignment;
1120 exec2_list[i].offset = exec_list[i].offset;
1121 if (INTEL_INFO(dev)->gen < 4)
1122 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1123 else
1124 exec2_list[i].flags = 0;
1125 }
1126
1127 exec2.buffers_ptr = args->buffers_ptr;
1128 exec2.buffer_count = args->buffer_count;
1129 exec2.batch_start_offset = args->batch_start_offset;
1130 exec2.batch_len = args->batch_len;
1131 exec2.DR1 = args->DR1;
1132 exec2.DR4 = args->DR4;
1133 exec2.num_cliprects = args->num_cliprects;
1134 exec2.cliprects_ptr = args->cliprects_ptr;
1135 exec2.flags = I915_EXEC_RENDER;
6e0a69db 1136 i915_execbuffer2_set_context_id(exec2, 0);
54cf91dc
CW
1137
1138 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1139 if (!ret) {
1140 /* Copy the new buffer offsets back to the user's exec list. */
1141 for (i = 0; i < args->buffer_count; i++)
1142 exec_list[i].offset = exec2_list[i].offset;
1143 /* ... and back out to userspace */
1144 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1145 (uintptr_t) args->buffers_ptr,
1146 exec_list,
1147 sizeof(*exec_list) * args->buffer_count);
1148 if (ret) {
1149 ret = -EFAULT;
ff240199 1150 DRM_DEBUG("failed to copy %d exec entries "
54cf91dc
CW
1151 "back to user (%d)\n",
1152 args->buffer_count, ret);
1153 }
1154 }
1155
1156 drm_free_large(exec_list);
1157 drm_free_large(exec2_list);
1158 return ret;
1159}
1160
1161int
1162i915_gem_execbuffer2(struct drm_device *dev, void *data,
1163 struct drm_file *file)
1164{
1165 struct drm_i915_gem_execbuffer2 *args = data;
1166 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1167 int ret;
1168
ed8cd3b2
XW
1169 if (args->buffer_count < 1 ||
1170 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
ff240199 1171 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
54cf91dc
CW
1172 return -EINVAL;
1173 }
1174
8408c282
CW
1175 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1176 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
1177 if (exec2_list == NULL)
1178 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1179 args->buffer_count);
54cf91dc 1180 if (exec2_list == NULL) {
ff240199 1181 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
54cf91dc
CW
1182 args->buffer_count);
1183 return -ENOMEM;
1184 }
1185 ret = copy_from_user(exec2_list,
1186 (struct drm_i915_relocation_entry __user *)
1187 (uintptr_t) args->buffers_ptr,
1188 sizeof(*exec2_list) * args->buffer_count);
1189 if (ret != 0) {
ff240199 1190 DRM_DEBUG("copy %d exec entries failed %d\n",
54cf91dc
CW
1191 args->buffer_count, ret);
1192 drm_free_large(exec2_list);
1193 return -EFAULT;
1194 }
1195
1196 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1197 if (!ret) {
1198 /* Copy the new buffer offsets back to the user's exec list. */
1199 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1200 (uintptr_t) args->buffers_ptr,
1201 exec2_list,
1202 sizeof(*exec2_list) * args->buffer_count);
1203 if (ret) {
1204 ret = -EFAULT;
ff240199 1205 DRM_DEBUG("failed to copy %d exec entries "
54cf91dc
CW
1206 "back to user (%d)\n",
1207 args->buffer_count, ret);
1208 }
1209 }
1210
1211 drm_free_large(exec2_list);
1212 return ret;
1213}
This page took 0.214157 seconds and 5 git commands to generate.