drm/i915: Clear the pending_gpu_fenced_access flag at the start of execbuffer
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index 108e4c2b5ffa8581c8bd51de2397e137536671ae..3659d47a9f6e5585cc0c2a6352fc2630430a4e39 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
 
-static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
@@ -96,9 +95,18 @@ i915_gem_wait_for_error(struct drm_device *dev)
        if (!atomic_read(&dev_priv->mm.wedged))
                return 0;
 
-       ret = wait_for_completion_interruptible(x);
-       if (ret)
+       /*
+        * Only wait 10 seconds for the gpu reset to complete to avoid hanging
+        * userspace. If it takes that long something really bad is going on and
+        * we should simply try to bail out and fail as gracefully as possible.
+        */
+       ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
+       if (ret == 0) {
+               DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
+               return -EIO;
+       } else if (ret < 0) {
                return ret;
+       }
 
        if (atomic_read(&dev_priv->mm.wedged)) {
                /* GPU is hung, bump the completion count to account for
@@ -1132,6 +1140,11 @@ unlock:
 out:
        switch (ret) {
        case -EIO:
+               /* If this -EIO is due to a gpu hang, give the reset code a
+                * chance to clean up the mess. Otherwise return the proper
+                * SIGBUS. */
+               if (!atomic_read(&dev_priv->mm.wedged))
+                       return VM_FAULT_SIGBUS;
        case -EAGAIN:
                /* Give the error handler a chance to run and move the
                 * objects off the GPU active list. Next time we service the
@@ -1427,7 +1440,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
        list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
        list_move_tail(&obj->ring_list, &ring->active_list);
 
-       obj->last_rendering_seqno = seqno;
+       obj->last_read_seqno = seqno;
 
        if (obj->fenced_gpu_access) {
                obj->last_fenced_seqno = seqno;
@@ -1443,26 +1456,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
        }
 }
 
-static void
-i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
-{
-       list_del_init(&obj->ring_list);
-       obj->last_rendering_seqno = 0;
-       obj->last_fenced_seqno = 0;
-}
-
-static void
-i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
-{
-       struct drm_device *dev = obj->base.dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       BUG_ON(!obj->active);
-       list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
-
-       i915_gem_object_move_off_active(obj);
-}
-
 static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
@@ -1471,15 +1464,20 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 
        list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-       BUG_ON(!list_empty(&obj->gpu_write_list));
+       BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
        BUG_ON(!obj->active);
+
+       list_del_init(&obj->ring_list);
        obj->ring = NULL;
 
-       i915_gem_object_move_off_active(obj);
+       obj->last_read_seqno = 0;
+       obj->last_write_seqno = 0;
+       obj->base.write_domain = 0;
+
+       obj->last_fenced_seqno = 0;
        obj->fenced_gpu_access = false;
 
        obj->active = 0;
-       obj->pending_gpu_write = false;
        drm_gem_object_unreference(&obj->base);
 
        WARN_ON(i915_verify_lists(dev));
@@ -1511,30 +1509,6 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
        return obj->madv == I915_MADV_DONTNEED;
 }
 
-static void
-i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
-                              uint32_t flush_domains)
-{
-       struct drm_i915_gem_object *obj, *next;
-
-       list_for_each_entry_safe(obj, next,
-                                &ring->gpu_write_list,
-                                gpu_write_list) {
-               if (obj->base.write_domain & flush_domains) {
-                       uint32_t old_write_domain = obj->base.write_domain;
-
-                       obj->base.write_domain = 0;
-                       list_del_init(&obj->gpu_write_list);
-                       i915_gem_object_move_to_active(obj, ring,
-                                                      i915_gem_next_request_seqno(ring));
-
-                       trace_i915_gem_object_change_domain(obj,
-                                                           obj->base.read_domains,
-                                                           old_write_domain);
-               }
-       }
-}
-
 static u32
 i915_gem_get_seqno(struct drm_device *dev)
 {
@@ -1568,7 +1542,27 @@ i915_add_request(struct intel_ring_buffer *ring,
        int was_empty;
        int ret;
 
-       BUG_ON(request == NULL);
+       /*
+        * Emit any outstanding flushes - execbuf can fail to emit the flush
+        * after having emitted the batchbuffer command. Hence we need to fix
+        * things up similar to emitting the lazy request. The difference here
+        * is that the flush _must_ happen before the next request, no matter
+        * what.
+        */
+       if (ring->gpu_caches_dirty) {
+               ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
+               if (ret)
+                       return ret;
+
+               ring->gpu_caches_dirty = false;
+       }
+
+       if (request == NULL) {
+               request = kmalloc(sizeof(*request), GFP_KERNEL);
+               if (request == NULL)
+                       return -ENOMEM;
+       }
+
        seqno = i915_gem_next_request_seqno(ring);
 
        /* Record the position of the start of the request so that
@@ -1579,8 +1573,10 @@ i915_add_request(struct intel_ring_buffer *ring,
        request_ring_position = intel_ring_get_tail(ring);
 
        ret = ring->add_request(ring, &seqno);
-       if (ret)
-           return ret;
+       if (ret) {
+               kfree(request);
+               return ret;
+       }
 
        trace_i915_gem_request_add(ring, seqno);
 
@@ -1590,6 +1586,7 @@ i915_add_request(struct intel_ring_buffer *ring,
        request->emitted_jiffies = jiffies;
        was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
+       request->file_priv = NULL;
 
        if (file) {
                struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -1613,6 +1610,7 @@ i915_add_request(struct intel_ring_buffer *ring,
                        queue_delayed_work(dev_priv->wq,
                                           &dev_priv->mm.retire_work, HZ);
        }
+
        return 0;
 }
 
@@ -1654,8 +1652,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
                                       struct drm_i915_gem_object,
                                       ring_list);
 
-               obj->base.write_domain = 0;
-               list_del_init(&obj->gpu_write_list);
                i915_gem_object_move_to_inactive(obj);
        }
 }
@@ -1691,20 +1687,6 @@ void i915_gem_reset(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i)
                i915_gem_reset_ring_lists(dev_priv, ring);
 
-       /* Remove anything from the flushing lists. The GPU cache is likely
-        * to be lost on reset along with the data, so simply move the
-        * lost bo to the inactive list.
-        */
-       while (!list_empty(&dev_priv->mm.flushing_list)) {
-               obj = list_first_entry(&dev_priv->mm.flushing_list,
-                                     struct drm_i915_gem_object,
-                                     mm_list);
-
-               obj->base.write_domain = 0;
-               list_del_init(&obj->gpu_write_list);
-               i915_gem_object_move_to_inactive(obj);
-       }
-
        /* Move everything out of the GPU domains to ensure we do any
         * necessary invalidation upon reuse.
         */
@@ -1772,13 +1754,10 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
                                      struct drm_i915_gem_object,
                                      ring_list);
 
-               if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
+               if (!i915_seqno_passed(seqno, obj->last_read_seqno))
                        break;
 
-               if (obj->base.write_domain != 0)
-                       i915_gem_object_move_to_flushing(obj);
-               else
-                       i915_gem_object_move_to_inactive(obj);
+               i915_gem_object_move_to_inactive(obj);
        }
 
        if (unlikely(ring->trace_irq_seqno &&
@@ -1827,17 +1806,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
         */
        idle = true;
        for_each_ring(ring, dev_priv, i) {
-               if (!list_empty(&ring->gpu_write_list)) {
-                       struct drm_i915_gem_request *request;
-                       int ret;
-
-                       ret = i915_gem_flush_ring(ring,
-                                                 0, I915_GEM_GPU_DOMAINS);
-                       request = kzalloc(sizeof(*request), GFP_KERNEL);
-                       if (ret || request == NULL ||
-                           i915_add_request(ring, NULL, request))
-                           kfree(request);
-               }
+               if (ring->gpu_caches_dirty)
+                       i915_add_request(ring, NULL, NULL);
 
                idle &= list_empty(&ring->request_list);
        }
@@ -1848,11 +1818,10 @@ i915_gem_retire_work_handler(struct work_struct *work)
        mutex_unlock(&dev->struct_mutex);
 }
 
-static int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv)
+int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+                    bool interruptible)
 {
-       BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
-
        if (atomic_read(&dev_priv->mm.wedged)) {
                struct completion *x = &dev_priv->error_completion;
                bool recovery_complete;
@@ -1863,7 +1832,16 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv)
                recovery_complete = x->done > 0;
                spin_unlock_irqrestore(&x->wait.lock, flags);
 
-               return recovery_complete ? -EIO : -EAGAIN;
+               /* Non-interruptible callers can't handle -EAGAIN, hence return
+                * -EIO unconditionally for these. */
+               if (!interruptible)
+                       return -EIO;
+
+               /* Recovery complete, but still wedged means reset failure. */
+               if (recovery_complete)
+                       return -EIO;
+
+               return -EAGAIN;
        }
 
        return 0;
@@ -1876,25 +1854,13 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv)
 static int
 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
 {
-       int ret = 0;
+       int ret;
 
        BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 
-       if (seqno == ring->outstanding_lazy_request) {
-               struct drm_i915_gem_request *request;
-
-               request = kzalloc(sizeof(*request), GFP_KERNEL);
-               if (request == NULL)
-                       return -ENOMEM;
-
-               ret = i915_add_request(ring, NULL, request);
-               if (ret) {
-                       kfree(request);
-                       return ret;
-               }
-
-               BUG_ON(seqno != request->seqno);
-       }
+       ret = 0;
+       if (seqno == ring->outstanding_lazy_request)
+               ret = i915_add_request(ring, NULL, NULL);
 
        return ret;
 }
@@ -1917,6 +1883,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        unsigned long timeout_jiffies;
        long end;
        bool wait_forever = true;
+       int ret;
 
        if (i915_seqno_passed(ring->get_seqno(ring), seqno))
                return 0;
@@ -1948,8 +1915,9 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        end = wait_event_timeout(ring->irq_queue, EXIT_COND,
                                                 timeout_jiffies);
 
-               if (atomic_read(&dev_priv->mm.wedged))
-                       end = -EAGAIN;
+               ret = i915_gem_check_wedge(dev_priv, interruptible);
+               if (ret)
+                       end = ret;
        } while (end == 0 && wait_forever);
 
        getrawmonotonic(&now);
@@ -1964,6 +1932,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        }
 
        switch (end) {
+       case -EIO:
        case -EAGAIN: /* Wedged */
        case -ERESTARTSYS: /* Signal */
                return (int)end;
@@ -1989,7 +1958,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
 
        BUG_ON(seqno == 0);
 
-       ret = i915_gem_check_wedge(dev_priv);
+       ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
        if (ret)
                return ret;
 
@@ -2006,26 +1975,37 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
  */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
+static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+                              bool readonly)
 {
+       u32 seqno;
        int ret;
 
-       /* This function only exists to support waiting for existing rendering,
-        * not for emitting required flushes.
-        */
-       BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
-
        /* If there is rendering queued on the buffer being evicted, wait for
         * it.
         */
-       if (obj->active) {
-               ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
-               if (ret)
-                       return ret;
-               i915_gem_retire_requests_ring(obj->ring);
+       if (readonly)
+               seqno = obj->last_write_seqno;
+       else
+               seqno = obj->last_read_seqno;
+       if (seqno == 0)
+               return 0;
+
+       ret = i915_wait_seqno(obj->ring, seqno);
+       if (ret)
+               return ret;
+
+       /* Manually manage the write flush as we may have not yet retired
+        * the buffer.
+        */
+       if (obj->last_write_seqno &&
+           i915_seqno_passed(seqno, obj->last_write_seqno)) {
+               obj->last_write_seqno = 0;
+               obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
        }
 
+       i915_gem_retire_requests_ring(obj->ring);
        return 0;
 }
 
@@ -2040,14 +2020,10 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
        int ret;
 
        if (obj->active) {
-               ret = i915_gem_object_flush_gpu_write_domain(obj);
+               ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
                if (ret)
                        return ret;
 
-               ret = i915_gem_check_olr(obj->ring,
-                                        obj->last_rendering_seqno);
-               if (ret)
-                       return ret;
                i915_gem_retire_requests_ring(obj->ring);
        }
 
@@ -2107,7 +2083,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                goto out;
 
        if (obj->active) {
-               seqno = obj->last_rendering_seqno;
+               seqno = obj->last_read_seqno;
                ring = obj->ring;
        }
 
@@ -2162,11 +2138,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
                return 0;
 
        if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
-               return i915_gem_object_wait_rendering(obj);
+               return i915_gem_object_wait_rendering(obj, false);
 
        idx = intel_ring_sync_index(from, to);
 
-       seqno = obj->last_rendering_seqno;
+       seqno = obj->last_read_seqno;
        if (seqno <= from->sync_seqno[idx])
                return 0;
 
@@ -2294,26 +2270,14 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
        if (ret)
                return ret;
 
-       if (flush_domains & I915_GEM_GPU_DOMAINS)
-               i915_gem_process_flushing_list(ring, flush_domains);
-
        return 0;
 }
 
 static int i915_ring_idle(struct intel_ring_buffer *ring)
 {
-       int ret;
-
-       if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
+       if (list_empty(&ring->active_list))
                return 0;
 
-       if (!list_empty(&ring->gpu_write_list)) {
-               ret = i915_gem_flush_ring(ring,
-                                   I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-               if (ret)
-                       return ret;
-       }
-
        return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
 }
 
@@ -2329,9 +2293,9 @@ int i915_gpu_idle(struct drm_device *dev)
                if (ret)
                        return ret;
 
-               /* Is the device fubar? */
-               if (WARN_ON(!list_empty(&ring->gpu_write_list)))
-                       return -EBUSY;
+               ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
+               if (ret)
+                       return ret;
        }
 
        return 0;
@@ -2504,21 +2468,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
 static int
 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
 {
-       int ret;
-
-       if (obj->fenced_gpu_access) {
-               if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-                       ret = i915_gem_flush_ring(obj->ring,
-                                                 0, obj->base.write_domain);
-                       if (ret)
-                               return ret;
-               }
-
-               obj->fenced_gpu_access = false;
-       }
-
        if (obj->last_fenced_seqno) {
-               ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+               int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
                if (ret)
                        return ret;
 
@@ -2531,6 +2482,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
        if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
                mb();
 
+       obj->fenced_gpu_access = false;
        return 0;
 }
 
@@ -2705,8 +2657,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
        if (map_and_fenceable)
                free_space =
                        drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
-                                                   size, alignment, 0,
-                                                   dev_priv->mm.gtt_mappable_end,
+                                                   size, alignment,
+                                                   0, dev_priv->mm.gtt_mappable_end,
                                                    0);
        else
                free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
@@ -2717,7 +2669,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
                        obj->gtt_space =
                                drm_mm_get_block_range_generic(free_space,
                                                               size, alignment, 0,
-                                                              dev_priv->mm.gtt_mappable_end,
+                                                              0, dev_priv->mm.gtt_mappable_end,
                                                               0);
                else
                        obj->gtt_space =
@@ -2825,17 +2777,6 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
        drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
 }
 
-/** Flushes any GPU write domain for the object if it's dirty. */
-static int
-i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
-{
-       if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
-               return 0;
-
-       /* Queue the GPU write cache flushing we need. */
-       return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
-}
-
 /** Flushes the GTT write domain for the object if it's dirty. */
 static void
 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
@@ -2902,16 +2843,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
                return 0;
 
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
+       ret = i915_gem_object_wait_rendering(obj, !write);
        if (ret)
                return ret;
 
-       if (obj->pending_gpu_write || write) {
-               ret = i915_gem_object_wait_rendering(obj);
-               if (ret)
-                       return ret;
-       }
-
        i915_gem_object_flush_cpu_write_domain(obj);
 
        old_write_domain = obj->base.write_domain;
@@ -3018,10 +2953,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
        u32 old_read_domains, old_write_domain;
        int ret;
 
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
-       if (ret)
-               return ret;
-
        if (pipelined != obj->ring) {
                ret = i915_gem_object_sync(obj, pipelined);
                if (ret)
@@ -3057,7 +2988,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
-       BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+       obj->base.write_domain = 0;
        obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 
        trace_i915_gem_object_change_domain(obj,
@@ -3075,13 +3006,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
        if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
                return 0;
 
-       if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
-               ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
-               if (ret)
-                       return ret;
-       }
-
-       ret = i915_gem_object_wait_rendering(obj);
+       ret = i915_gem_object_wait_rendering(obj, false);
        if (ret)
                return ret;
 
@@ -3105,16 +3030,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
        if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
                return 0;
 
-       ret = i915_gem_object_flush_gpu_write_domain(obj);
+       ret = i915_gem_object_wait_rendering(obj, !write);
        if (ret)
                return ret;
 
-       if (write || obj->pending_gpu_write) {
-               ret = i915_gem_object_wait_rendering(obj);
-               if (ret)
-                       return ret;
-       }
-
        i915_gem_object_flush_gtt_write_domain(obj);
 
        old_write_domain = obj->base.write_domain;
@@ -3356,6 +3275,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        ret = i915_gem_object_flush_active(obj);
 
        args->busy = obj->active;
+       if (obj->ring) {
+               BUILD_BUG_ON(I915_NUM_RINGS > 16);
+               args->busy |= intel_ring_flag(obj->ring) << 16;
+       }
 
        drm_gem_object_unreference(&obj->base);
 unlock:
@@ -3473,7 +3396,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        INIT_LIST_HEAD(&obj->gtt_list);
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->exec_list);
-       INIT_LIST_HEAD(&obj->gpu_write_list);
        obj->madv = I915_MADV_WILLNEED;
        /* Avoid an unnecessary call to unbind on the first bind. */
        obj->map_and_fenceable = true;
@@ -3683,12 +3605,31 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
        }
 }
 
+static bool
+intel_enable_blt(struct drm_device *dev)
+{
+       if (!HAS_BLT(dev))
+               return false;
+
+       /* The blitter was dysfunctional on early prototypes */
+       if (IS_GEN6(dev) && dev->pdev->revision < 8) {
+               DRM_INFO("BLT not supported on this pre-production hardware;"
+                        " graphics performance will be degraded.\n");
+               return false;
+       }
+
+       return true;
+}
+
 int
 i915_gem_init_hw(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
+       if (!intel_enable_gtt())
+               return -EIO;
+
        i915_gem_l3_remap(dev);
 
        i915_gem_init_swizzling(dev);
@@ -3703,7 +3644,7 @@ i915_gem_init_hw(struct drm_device *dev)
                        goto cleanup_render_ring;
        }
 
-       if (HAS_BLT(dev)) {
+       if (intel_enable_blt(dev)) {
                ret = intel_init_blt_ring_buffer(dev);
                if (ret)
                        goto cleanup_bsd_ring;
@@ -3711,6 +3652,11 @@ i915_gem_init_hw(struct drm_device *dev)
 
        dev_priv->next_seqno = 1;
 
+       /*
+        * XXX: There was some w/a described somewhere suggesting loading
+        * contexts before PPGTT.
+        */
+       i915_gem_context_init(dev);
        i915_gem_init_ppgtt(dev);
 
        return 0;
@@ -3823,7 +3769,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        }
 
        BUG_ON(!list_empty(&dev_priv->mm.active_list));
-       BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
        mutex_unlock(&dev->struct_mutex);
 
@@ -3871,7 +3816,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
 {
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
-       INIT_LIST_HEAD(&ring->gpu_write_list);
 }
 
 void
@@ -3881,7 +3825,6 @@ i915_gem_load(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        INIT_LIST_HEAD(&dev_priv->mm.active_list);
-       INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
@@ -4132,12 +4075,7 @@ static int
 i915_gpu_is_active(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int lists_empty;
-
-       lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-                     list_empty(&dev_priv->mm.active_list);
-
-       return !lists_empty;
+       return !list_empty(&dev_priv->mm.active_list);
 }
 
 static int
This page took 0.05931 seconds and 5 git commands to generate.