end - start);
dev_priv->mm.gtt_total = end - start;
+ dev_priv->mm.gtt_mappable_end = end;
return 0;
}
char *vaddr;
int ret;
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+ vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
- kunmap_atomic(vaddr, KM_USER0);
+ kunmap_atomic(vaddr);
return ret;
}
struct drm_device *dev = obj->dev;
ret = i915_gem_evict_something(dev, obj->size,
- i915_gem_get_gtt_alignment(obj));
+ i915_gem_get_gtt_alignment(obj),
+ false);
if (ret)
return ret;
char *vaddr_atomic;
unsigned long unwritten;
- vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
+ vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
user_data, length);
- io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
+ io_mapping_unmap_atomic(vaddr_atomic);
return unwritten;
}
char *vaddr;
int ret;
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+ vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
- kunmap_atomic(vaddr, KM_USER0);
+ kunmap_atomic(vaddr);
return ret;
}
/* Maintain LRU order of "inactive" objects */
if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
drm_gem_object_unreference(obj);
unlock:
}
if (i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
page_offset;
struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
}
/* Move from whatever list we were on to the tail of execution. */
- list_move_tail(&obj_priv->list, &ring->active_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
+ list_move_tail(&obj_priv->ring_list, &ring->active_list);
obj_priv->last_rendering_seqno = seqno;
}
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
BUG_ON(!obj_priv->active);
- list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
+ list_del_init(&obj_priv->ring_list);
obj_priv->last_rendering_seqno = 0;
}
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (obj_priv->pin_count != 0)
- list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
else
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+ list_del_init(&obj_priv->ring_list);
BUG_ON(!list_empty(&obj_priv->gpu_write_list));
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.gpu_write_list,
+ &ring->gpu_write_list,
gpu_write_list) {
struct drm_gem_object *obj = &obj_priv->base;
- if (obj->write_domain & flush_domains &&
- obj_priv->ring == ring) {
+ if (obj->write_domain & flush_domains) {
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = 0;
}
}
-uint32_t
+int
i915_add_request(struct drm_device *dev,
struct drm_file *file,
struct drm_i915_gem_request *request,
struct drm_i915_file_private *file_priv = NULL;
uint32_t seqno;
int was_empty;
+ int ret;
+
+ BUG_ON(request == NULL);
if (file != NULL)
file_priv = file->driver_priv;
- if (request == NULL) {
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return 0;
- }
+ ret = ring->add_request(ring, &seqno);
+ if (ret)
+ return ret;
- seqno = ring->add_request(dev, ring, 0);
ring->outstanding_lazy_request = false;
request->seqno = seqno;
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
}
- return seqno;
+ return 0;
}
/**
if (INTEL_INFO(dev)->gen >= 4)
flush_domains |= I915_GEM_DOMAIN_SAMPLER;
- ring->flush(dev, ring,
- I915_GEM_DOMAIN_COMMAND, flush_domains);
+ ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
}
static inline void
obj_priv = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
- list);
+ ring_list);
obj_priv->base.write_domain = 0;
list_del_init(&obj_priv->gpu_write_list);
int i;
i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
- if (HAS_BSD(dev))
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
/* Remove anything from the flushing lists. The GPU cache is likely
* to be lost on reset along with the data, so simply move the
while (!list_empty(&dev_priv->mm.flushing_list)) {
obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
struct drm_i915_gem_object,
- list);
+ mm_list);
obj_priv->base.write_domain = 0;
list_del_init(&obj_priv->gpu_write_list);
*/
list_for_each_entry(obj_priv,
&dev_priv->mm.inactive_list,
- list)
+ mm_list)
{
obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
}
WARN_ON(i915_verify_lists(dev));
- seqno = ring->get_seqno(dev, ring);
+ seqno = ring->get_seqno(ring);
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
obj_priv = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
- list);
+ ring_list);
if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
break;
if (unlikely (dev_priv->trace_irq_seqno &&
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
- ring->user_irq_put(dev, ring);
+ ring->user_irq_put(ring);
dev_priv->trace_irq_seqno = 0;
}
*/
list_for_each_entry_safe(obj_priv, tmp,
&dev_priv->mm.deferred_free_list,
- list)
+ mm_list)
i915_gem_free_object_tail(&obj_priv->base);
}
i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
- if (HAS_BSD(dev))
- i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+ i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+ i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
}
static void
if (!dev_priv->mm.suspended &&
(!list_empty(&dev_priv->render_ring.request_list) ||
- (HAS_BSD(dev) &&
- !list_empty(&dev_priv->bsd_ring.request_list))))
+ !list_empty(&dev_priv->bsd_ring.request_list) ||
+ !list_empty(&dev_priv->blt_ring.request_list)))
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex);
}
return -EAGAIN;
if (ring->outstanding_lazy_request) {
- seqno = i915_add_request(dev, NULL, NULL, ring);
- if (seqno == 0)
+ struct drm_i915_gem_request *request;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
return -ENOMEM;
+
+ ret = i915_add_request(dev, NULL, request, ring);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
+ seqno = request->seqno;
}
BUG_ON(seqno == dev_priv->next_seqno);
- if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
trace_i915_gem_request_wait_begin(dev, seqno);
- ring->waiting_gem_seqno = seqno;
- ring->user_irq_get(dev, ring);
+ ring->waiting_seqno = seqno;
+ ring->user_irq_get(ring);
if (interruptible)
ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(
- ring->get_seqno(dev, ring), seqno)
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
else
wait_event(ring->irq_queue,
- i915_seqno_passed(
- ring->get_seqno(dev, ring), seqno)
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
- ring->user_irq_put(dev, ring);
- ring->waiting_gem_seqno = 0;
+ ring->user_irq_put(ring);
+ ring->waiting_seqno = 0;
trace_i915_gem_request_wait_end(dev, seqno);
}
if (ret && ret != -ERESTARTSYS)
DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
- __func__, ret, seqno, ring->get_seqno(dev, ring),
+ __func__, ret, seqno, ring->get_seqno(ring),
dev_priv->next_seqno);
/* Directly dispatch request retiring. While we have the work queue
uint32_t invalidate_domains,
uint32_t flush_domains)
{
- ring->flush(dev, ring, invalidate_domains, flush_domains);
+ ring->flush(ring, invalidate_domains, flush_domains);
i915_gem_process_flushing_list(dev, flush_domains, ring);
}
i915_gem_flush_ring(dev, file_priv,
&dev_priv->bsd_ring,
invalidate_domains, flush_domains);
+ if (flush_rings & RING_BLT)
+ i915_gem_flush_ring(dev, file_priv,
+ &dev_priv->blt_ring,
+ invalidate_domains, flush_domains);
}
}
BUG_ON(obj_priv->pages_refcount);
i915_gem_info_remove_gtt(dev_priv, obj->size);
- list_del_init(&obj_priv->list);
+ list_del_init(&obj_priv->mm_list);
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
+ if (list_empty(&ring->gpu_write_list))
+ return 0;
+
i915_gem_flush_ring(dev, NULL, ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
return i915_wait_request(dev,
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev) ||
- list_empty(&dev_priv->bsd_ring.active_list)));
+ list_empty(&dev_priv->bsd_ring.active_list) &&
+ list_empty(&dev_priv->blt_ring.active_list));
if (lists_empty)
return 0;
if (ret)
return ret;
- if (HAS_BSD(dev)) {
- ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
- if (ret)
- return ret;
- }
+ ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
+ if (ret)
+ return ret;
+
+ ret = i915_ring_idle(dev, &dev_priv->blt_ring);
+ if (ret)
+ return ret;
return 0;
}
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_evict_something(dev, obj->size, alignment, true);
if (ret)
return ret;
if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */
ret = i915_gem_evict_something(dev, obj->size,
- alignment);
+ alignment, true);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_evict_something(dev, obj->size, alignment, true);
if (ret)
return ret;
}
/* keep track of bounds object by adding it to the inactive list */
- list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
i915_gem_info_add_gtt(dev_priv, obj->size);
/* Assert that the object is not currently in any GPU domain. As it
* drm_agp_chipset_flush
*/
static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+ struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
- uint32_t old_read_domains;
-
- intel_mark_busy(dev, obj);
/*
* If the object isn't moving to a new write domain,
*/
if (obj->pending_write_domain == 0)
obj->pending_read_domains |= obj->read_domains;
- else
- obj_priv->dirty = 1;
/*
* Flush the current write domain if
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
- old_read_domains = obj->read_domains;
-
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
*/
if (flush_domains == 0 && obj->pending_write_domain == 0)
obj->pending_write_domain = obj->write_domain;
- obj->read_domains = obj->pending_read_domains;
dev->invalidate_domains |= invalidate_domains;
dev->flush_domains |= flush_domains;
- if (obj_priv->ring)
+ if (flush_domains & I915_GEM_GPU_DOMAINS)
dev_priv->mm.flush_rings |= obj_priv->ring->id;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- obj->write_domain);
+ if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+ dev_priv->mm.flush_rings |= ring->id;
}
/**
}
target_obj->pending_read_domains |= reloc.read_domains;
- target_obj->pending_write_domain = reloc.write_domain;
+ target_obj->pending_write_domain |= reloc.write_domain;
/* If the relocation already has the right value in it, no
* more work needs to be done.
uint32_t page_offset = reloc.offset & ~PAGE_MASK;
char *vaddr;
- vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT], KM_USER0);
+ vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
*(uint32_t *)(vaddr + page_offset) = reloc.delta;
- kunmap_atomic(vaddr, KM_USER0);
+ kunmap_atomic(vaddr);
} else {
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
/* Map the page containing the relocation we're going to perform. */
reloc.offset += obj->gtt_offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc.offset & PAGE_MASK,
- KM_USER0);
+ reloc.offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc.offset & ~PAGE_MASK));
iowrite32(reloc.delta, reloc_entry);
- io_mapping_unmap_atomic(reloc_page, KM_USER0);
+ io_mapping_unmap_atomic(reloc_page);
+ }
+
+ /* and update the user's relocation entry */
+ reloc.presumed_offset = target_offset;
+ if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
+ &reloc.presumed_offset,
+ sizeof(reloc.presumed_offset))) {
+ ret = -EFAULT;
+ break;
}
}
return 0;
ret = 0;
- if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
/* And wait for the seqno passing without holding any locks and
* causing extra latency for others. This is safe as the irq
* generation is designed to be run atomically and so is
* lockless.
*/
- ring->user_irq_get(dev, ring);
+ ring->user_irq_get(ring);
ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
- ring->user_irq_put(dev, ring);
+ ring->user_irq_put(ring);
if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
ret = -EIO;
if (!access_ok(VERIFY_READ, ptr, length))
return -EFAULT;
+ /* we may also need to update the presumed offsets */
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
+
if (fault_in_pages_readable(ptr, length))
return -EFAULT;
}
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object **object_list = NULL;
struct drm_gem_object *batch_obj;
- struct drm_i915_gem_object *obj_priv;
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_gem_request *request = NULL;
int ret, i, flips;
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif
- if (args->flags & I915_EXEC_BSD) {
+ switch (args->flags & I915_EXEC_RING_MASK) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
+ ring = &dev_priv->render_ring;
+ break;
+ case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
- DRM_ERROR("execbuf with wrong flag\n");
+ DRM_ERROR("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->bsd_ring;
- } else {
- ring = &dev_priv->render_ring;
+ break;
+ case I915_EXEC_BLT:
+ if (!HAS_BLT(dev)) {
+ DRM_ERROR("execbuf with invalid ring (BLT)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->blt_ring;
+ break;
+ default:
+ DRM_ERROR("execbuf with unknown ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
}
if (args->buffer_count < 1) {
/* Look up object handles */
for (i = 0; i < args->buffer_count; i++) {
+ struct drm_i915_gem_object *obj_priv;
+
object_list[i] = drm_gem_object_lookup(dev, file,
exec_list[i].handle);
if (object_list[i] == NULL) {
dev->invalidate_domains = 0;
dev->flush_domains = 0;
dev_priv->mm.flush_rings = 0;
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
-
- /* Compute new gpu domains and update invalidate/flush */
- i915_gem_object_set_to_gpu_domain(obj);
- }
+ for (i = 0; i < args->buffer_count; i++)
+ i915_gem_object_set_to_gpu_domain(object_list[i], ring);
if (dev->invalidate_domains | dev->flush_domains) {
#if WATCH_EXEC
dev_priv->mm.flush_rings);
}
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- uint32_t old_write_domain = obj->write_domain;
-
- obj->write_domain = obj->pending_write_domain;
- if (obj->write_domain)
- list_move_tail(&obj_priv->gpu_write_list,
- &dev_priv->mm.gpu_write_list);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
-
#if WATCH_COHERENCY
for (i = 0; i < args->buffer_count; i++) {
i915_gem_object_check_coherency(object_list[i],
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring,
- MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ goto err;
+
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
}
}
/* Exec the batchbuffer */
- ret = ring->dispatch_gem_execbuffer(dev, ring, args,
- cliprects, exec_offset);
+ ret = ring->dispatch_execbuffer(ring, args, cliprects, exec_offset);
if (ret) {
DRM_ERROR("dispatch failed %d\n", ret);
goto err;
}
- /*
- * Ensure that the commands in the batch buffer are
- * finished before the interrupt fires
- */
- i915_retire_commands(dev, ring);
-
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- obj_priv = to_intel_bo(obj);
+
+ obj->read_domains = obj->pending_read_domains;
+ obj->write_domain = obj->pending_write_domain;
i915_gem_object_move_to_active(obj, ring);
+ if (obj->write_domain) {
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ obj_priv->dirty = 1;
+ list_move_tail(&obj_priv->gpu_write_list,
+ &ring->gpu_write_list);
+ intel_mark_busy(dev, obj);
+ }
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ obj->write_domain);
}
- i915_add_request(dev, file, request, ring);
- request = NULL;
+ /*
+ * Ensure that the commands in the batch buffer are
+ * finished before the interrupt fires
+ */
+ i915_retire_commands(dev, ring);
+
+ if (i915_add_request(dev, file, request, ring))
+ ring->outstanding_lazy_request = true;
+ else
+ request = NULL;
err:
for (i = 0; i < args->buffer_count; i++) {
- if (object_list[i]) {
- obj_priv = to_intel_bo(object_list[i]);
- obj_priv->in_execbuffer = false;
- }
+ if (object_list[i] == NULL)
+ break;
+
+ to_intel_bo(object_list[i])->in_execbuffer = false;
drm_gem_object_unreference(object_list[i]);
}
if (obj_priv->pin_count == 1) {
i915_gem_info_add_pin(dev_priv, obj->size);
if (!obj_priv->active)
- list_move_tail(&obj_priv->list,
+ list_move_tail(&obj_priv->mm_list,
&dev_priv->mm.pinned_list);
}
*/
if (obj_priv->pin_count == 0) {
if (!obj_priv->active)
- list_move_tail(&obj_priv->list,
+ list_move_tail(&obj_priv->mm_list,
&dev_priv->mm.inactive_list);
i915_gem_info_remove_pin(dev_priv, obj->size);
}
obj->agp_type = AGP_USER_MEMORY;
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
- INIT_LIST_HEAD(&obj->list);
+ INIT_LIST_HEAD(&obj->mm_list);
+ INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->gpu_write_list);
obj->madv = I915_MADV_WILLNEED;
ret = i915_gem_object_unbind(obj);
if (ret == -ERESTARTSYS) {
- list_move(&obj_priv->list,
+ list_move(&obj_priv->mm_list,
&dev_priv->mm.deferred_free_list);
return;
}
mutex_lock(&dev->struct_mutex);
- if (dev_priv->mm.suspended ||
- (dev_priv->render_ring.gem_object == NULL) ||
- (HAS_BSD(dev) &&
- dev_priv->bsd_ring.gem_object == NULL)) {
+ if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
goto cleanup_render_ring;
}
+ if (HAS_BLT(dev)) {
+ ret = intel_init_blt_ring_buffer(dev);
+ if (ret)
+ goto cleanup_bsd_ring;
+ }
+
dev_priv->next_seqno = 1;
return 0;
+cleanup_bsd_ring:
+ intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
cleanup_render_ring:
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
+ intel_cleanup_ring_buffer(&dev_priv->render_ring);
cleanup_pipe_control:
if (HAS_PIPE_CONTROL(dev))
i915_gem_cleanup_pipe_control(dev);
{
drm_i915_private_t *dev_priv = dev->dev_private;
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- if (HAS_BSD(dev))
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(&dev_priv->render_ring);
+ intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(&dev_priv->blt_ring);
if (HAS_PIPE_CONTROL(dev))
i915_gem_cleanup_pipe_control(dev);
}
return ret;
}
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
- BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
+ BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
+ BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
- BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
+ BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
+ BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
DRM_ERROR("failed to idle hardware: %d\n", ret);
}
+static void
+init_ring_lists(struct intel_ring_buffer *ring)
+{
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&ring->gpu_write_list);
+}
+
void
i915_gem_load(struct drm_device *dev)
{
int i;
drm_i915_private_t *dev_priv = dev->dev_private;
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
- INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
- INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
- INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
- if (HAS_BSD(dev)) {
- INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
- INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
- }
+ init_ring_lists(&dev_priv->render_ring);
+ init_ring_lists(&dev_priv->bsd_ring);
+ init_ring_lists(&dev_priv->blt_ring);
for (i = 0; i < 16; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
+ char *dst = kmap_atomic(obj_priv->pages[i]);
char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(dst, KM_USER0);
+ kunmap_atomic(dst);
}
drm_clflush_pages(obj_priv->pages, page_count);
drm_agp_chipset_flush(dev);
page_count = obj->size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
+ char *src = kmap_atomic(obj_priv->pages[i]);
char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(src, KM_USER0);
+ kunmap_atomic(src);
}
i915_gem_object_put_pages(obj);
int lists_empty;
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list);
- if (HAS_BSD(dev))
- lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
+ list_empty(&dev_priv->render_ring.active_list) &&
+ list_empty(&dev_priv->bsd_ring.active_list) &&
+ list_empty(&dev_priv->blt_ring.active_list);
return !lists_empty;
}
if (mutex_trylock(&dev->struct_mutex)) {
list_for_each_entry(obj_priv,
&dev_priv->mm.inactive_list,
- list)
+ mm_list)
cnt++;
mutex_unlock(&dev->struct_mutex);
}
list_for_each_entry_safe(obj_priv, next_obj,
&dev_priv->mm.inactive_list,
- list) {
+ mm_list) {
if (i915_gem_object_is_purgeable(obj_priv)) {
i915_gem_object_unbind(&obj_priv->base);
if (--nr_to_scan <= 0)
list_for_each_entry_safe(obj_priv, next_obj,
&dev_priv->mm.inactive_list,
- list) {
+ mm_list) {
if (nr_to_scan > 0) {
i915_gem_object_unbind(&obj_priv->base);
nr_to_scan--;