drm/i915: vlv: handle only enabled pipestat interrupt events
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index e3431e748ee2dec630398caaf93bc161a242f8e5..b0a244a5effa87fd0162839e1096950829a606d2 100644 (file)
@@ -476,7 +476,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
 
                mutex_unlock(&dev->struct_mutex);
 
-               if (likely(!i915_prefault_disable) && !prefaulted) {
+               if (likely(!i915.prefault_disable) && !prefaulted) {
                        ret = fault_in_multipages_writeable(user_data, remain);
                        /* Userspace is tricking us, but we've already clobbered
                         * its pages with the prefault and promised to write the
@@ -868,7 +868,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                       args->size))
                return -EFAULT;
 
-       if (likely(!i915_prefault_disable)) {
+       if (likely(!i915.prefault_disable)) {
                ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
                                                   args->size);
                if (ret)
@@ -1014,10 +1014,13 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        struct timespec *timeout,
                        struct drm_i915_file_private *file_priv)
 {
-       drm_i915_private_t *dev_priv = ring->dev->dev_private;
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       const bool irq_test_in_progress =
+               ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
        struct timespec before, now;
        DEFINE_WAIT(wait);
-       long timeout_jiffies;
+       unsigned long timeout_expire;
        int ret;
 
        WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1025,9 +1028,9 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
                return 0;
 
-       timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
+       timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
 
-       if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
+       if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
                gen6_rps_boost(dev_priv);
                if (file_priv)
                        mod_delayed_work(dev_priv->wq,
@@ -1035,8 +1038,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                                         msecs_to_jiffies(100));
        }
 
-       if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
-           WARN_ON(!ring->irq_get(ring)))
+       if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
                return -ENODEV;
 
        /* Record current time in case interrupted by signal, or wedged */
@@ -1044,7 +1046,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        getrawmonotonic(&before);
        for (;;) {
                struct timer_list timer;
-               unsigned long expire;
 
                prepare_to_wait(&ring->irq_queue, &wait,
                                interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
@@ -1070,23 +1071,22 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        break;
                }
 
-               if (timeout_jiffies <= 0) {
+               if (timeout && time_after_eq(jiffies, timeout_expire)) {
                        ret = -ETIME;
                        break;
                }
 
                timer.function = NULL;
                if (timeout || missed_irq(dev_priv, ring)) {
+                       unsigned long expire;
+
                        setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
-                       expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
+                       expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
                        mod_timer(&timer, expire);
                }
 
                io_schedule();
 
-               if (timeout)
-                       timeout_jiffies = expire - jiffies;
-
                if (timer.function) {
                        del_singleshot_timer_sync(&timer);
                        destroy_timer_on_stack(&timer);
@@ -1095,7 +1095,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        getrawmonotonic(&now);
        trace_i915_gem_request_wait_end(ring, seqno);
 
-       ring->irq_put(ring);
+       if (!irq_test_in_progress)
+               ring->irq_put(ring);
 
        finish_wait(&ring->irq_queue, &wait);
 
@@ -1380,6 +1381,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        int ret = 0;
        bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
 
+       intel_runtime_pm_get(dev_priv);
+
        /* We don't use vmf->pgoff since that has the fake offset */
        page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
                PAGE_SHIFT;
@@ -1427,8 +1430,10 @@ out:
                /* If this -EIO is due to a gpu hang, give the reset code a
                 * chance to clean up the mess. Otherwise return the proper
                 * SIGBUS. */
-               if (i915_terminally_wedged(&dev_priv->gpu_error))
-                       return VM_FAULT_SIGBUS;
+               if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+                       ret = VM_FAULT_SIGBUS;
+                       break;
+               }
        case -EAGAIN:
                /*
                 * EAGAIN means the gpu is hung and we'll wait for the error
@@ -1443,15 +1448,39 @@ out:
                 * EBUSY is ok: this just means that another thread
                 * already did the job.
                 */
-               return VM_FAULT_NOPAGE;
+               ret = VM_FAULT_NOPAGE;
+               break;
        case -ENOMEM:
-               return VM_FAULT_OOM;
+               ret = VM_FAULT_OOM;
+               break;
        case -ENOSPC:
-               return VM_FAULT_SIGBUS;
+       case -EFAULT:
+               ret = VM_FAULT_SIGBUS;
+               break;
        default:
                WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
-               return VM_FAULT_SIGBUS;
+               ret = VM_FAULT_SIGBUS;
+               break;
        }
+
+       intel_runtime_pm_put(dev_priv);
+       return ret;
+}
+
+void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+       struct i915_vma *vma;
+
+       /*
+        * Only the global gtt is relevant for gtt memory mappings, so restrict
+        * list traversal to objects bound into the global address space. Note
+        * that the active list should be empty, but better safe than sorry.
+        */
+       WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
+       list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
+               i915_gem_release_mmap(vma->obj);
+       list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
+               i915_gem_release_mmap(vma->obj);
 }
 
 /**
@@ -1590,8 +1619,8 @@ i915_gem_mmap_gtt(struct drm_file *file,
        }
 
        if (obj->madv != I915_MADV_WILLNEED) {
-               DRM_ERROR("Attempting to mmap a purgeable buffer\n");
-               ret = -EINVAL;
+               DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
+               ret = -EFAULT;
                goto out;
        }
 
@@ -1944,8 +1973,8 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
                return 0;
 
        if (obj->madv != I915_MADV_WILLNEED) {
-               DRM_ERROR("Attempting to obtain a purgeable object\n");
-               return -EINVAL;
+               DRM_DEBUG("Attempting to obtain a purgeable object\n");
+               return -EFAULT;
        }
 
        BUG_ON(obj->pages_pin_count);
@@ -2214,148 +2243,116 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
        spin_unlock(&file_priv->mm.lock);
 }
 
-static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
-                                   struct i915_address_space *vm)
+static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
+                                  const struct i915_hw_context *ctx)
 {
-       if (acthd >= i915_gem_obj_offset(obj, vm) &&
-           acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
-               return true;
+       unsigned long elapsed;
 
-       return false;
-}
+       elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
 
-static bool i915_head_inside_request(const u32 acthd_unmasked,
-                                    const u32 request_start,
-                                    const u32 request_end)
-{
-       const u32 acthd = acthd_unmasked & HEAD_ADDR;
+       if (ctx->hang_stats.banned)
+               return true;
 
-       if (request_start < request_end) {
-               if (acthd >= request_start && acthd < request_end)
-                       return true;
-       } else if (request_start > request_end) {
-               if (acthd >= request_start || acthd < request_end)
-                       return true;
+       if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
+               if (dev_priv->gpu_error.stop_rings == 0 &&
+                   i915_gem_context_is_default(ctx)) {
+                       DRM_ERROR("gpu hanging too fast, banning!\n");
+               } else {
+                       DRM_DEBUG("context hanging too fast, banning!\n");
+               }
+
+               return true;
        }
 
        return false;
 }
 
-static struct i915_address_space *
-request_to_vm(struct drm_i915_gem_request *request)
+static void i915_set_reset_status(struct drm_i915_private *dev_priv,
+                                 struct i915_hw_context *ctx,
+                                 const bool guilty)
 {
-       struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
-       struct i915_address_space *vm;
+       struct i915_ctx_hang_stats *hs;
+
+       if (WARN_ON(!ctx))
+               return;
 
-       vm = &dev_priv->gtt.base;
+       hs = &ctx->hang_stats;
 
-       return vm;
+       if (guilty) {
+               hs->banned = i915_context_is_banned(dev_priv, ctx);
+               hs->batch_active++;
+               hs->guilty_ts = get_seconds();
+       } else {
+               hs->batch_pending++;
+       }
 }
 
-static bool i915_request_guilty(struct drm_i915_gem_request *request,
-                               const u32 acthd, bool *inside)
+static void i915_gem_free_request(struct drm_i915_gem_request *request)
 {
-       /* There is a possibility that unmasked head address
-        * pointing inside the ring, matches the batch_obj address range.
-        * However this is extremely unlikely.
-        */
-       if (request->batch_obj) {
-               if (i915_head_inside_object(acthd, request->batch_obj,
-                                           request_to_vm(request))) {
-                       *inside = true;
-                       return true;
-               }
-       }
+       list_del(&request->list);
+       i915_gem_request_remove_from_client(request);
 
-       if (i915_head_inside_request(acthd, request->head, request->tail)) {
-               *inside = false;
-               return true;
-       }
+       if (request->ctx)
+               i915_gem_context_unreference(request->ctx);
 
-       return false;
+       kfree(request);
 }
 
-static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
+static struct drm_i915_gem_request *
+i915_gem_find_first_non_complete(struct intel_ring_buffer *ring)
 {
-       const unsigned long elapsed = get_seconds() - hs->guilty_ts;
+       struct drm_i915_gem_request *request;
+       const u32 completed_seqno = ring->get_seqno(ring, false);
 
-       if (hs->banned)
-               return true;
+       list_for_each_entry(request, &ring->request_list, list) {
+               if (i915_seqno_passed(completed_seqno, request->seqno))
+                       continue;
 
-       if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
-               DRM_ERROR("context hanging too fast, declaring banned!\n");
-               return true;
+               return request;
        }
 
-       return false;
+       return NULL;
 }
 
-static void i915_set_reset_status(struct intel_ring_buffer *ring,
-                                 struct drm_i915_gem_request *request,
-                                 u32 acthd)
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+                                      struct intel_ring_buffer *ring)
 {
-       struct i915_ctx_hang_stats *hs = NULL;
-       bool inside, guilty;
-       unsigned long offset = 0;
+       struct drm_i915_gem_request *request;
+       bool ring_hung;
 
-       /* Innocent until proven guilty */
-       guilty = false;
+       request = i915_gem_find_first_non_complete(ring);
 
-       if (request->batch_obj)
-               offset = i915_gem_obj_offset(request->batch_obj,
-                                            request_to_vm(request));
+       if (request == NULL)
+               return;
 
-       if (ring->hangcheck.action != HANGCHECK_WAIT &&
-           i915_request_guilty(request, acthd, &inside)) {
-               DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
-                         ring->name,
-                         inside ? "inside" : "flushing",
-                         offset,
-                         request->ctx ? request->ctx->id : 0,
-                         acthd);
+       ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
 
-               guilty = true;
-       }
+       i915_set_reset_status(dev_priv, request->ctx, ring_hung);
 
-       /* If contexts are disabled or this is the default context, use
-        * file_priv->reset_state
-        */
-       if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
-               hs = &request->ctx->hang_stats;
-       else if (request->file_priv)
-               hs = &request->file_priv->hang_stats;
-
-       if (hs) {
-               if (guilty) {
-                       hs->banned = i915_context_is_banned(hs);
-                       hs->batch_active++;
-                       hs->guilty_ts = get_seconds();
-               } else {
-                       hs->batch_pending++;
-               }
-       }
+       list_for_each_entry_continue(request, &ring->request_list, list)
+               i915_set_reset_status(dev_priv, request->ctx, false);
 }
 
-static void i915_gem_free_request(struct drm_i915_gem_request *request)
+static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
+                                       struct intel_ring_buffer *ring)
 {
-       list_del(&request->list);
-       i915_gem_request_remove_from_client(request);
-
-       if (request->ctx)
-               i915_gem_context_unreference(request->ctx);
-
-       kfree(request);
-}
+       while (!list_empty(&ring->active_list)) {
+               struct drm_i915_gem_object *obj;
 
-static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
-                                     struct intel_ring_buffer *ring)
-{
-       u32 completed_seqno;
-       u32 acthd;
+               obj = list_first_entry(&ring->active_list,
+                                      struct drm_i915_gem_object,
+                                      ring_list);
 
-       acthd = intel_ring_get_active_head(ring);
-       completed_seqno = ring->get_seqno(ring, false);
+               i915_gem_object_move_to_inactive(obj);
+       }
 
+       /*
+        * We must free the requests after all the corresponding objects have
+        * been moved off active lists. Which is the same order as the normal
+        * retire_requests function does. This is important if object hold
+        * implicit references on things like e.g. ppgtt address spaces through
+        * the request.
+        */
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
 
@@ -2363,21 +2360,8 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
                                           struct drm_i915_gem_request,
                                           list);
 
-               if (request->seqno > completed_seqno)
-                       i915_set_reset_status(ring, request, acthd);
-
                i915_gem_free_request(request);
        }
-
-       while (!list_empty(&ring->active_list)) {
-               struct drm_i915_gem_object *obj;
-
-               obj = list_first_entry(&ring->active_list,
-                                      struct drm_i915_gem_object,
-                                      ring_list);
-
-               i915_gem_object_move_to_inactive(obj);
-       }
 }
 
 void i915_gem_restore_fences(struct drm_device *dev)
@@ -2407,8 +2391,16 @@ void i915_gem_reset(struct drm_device *dev)
        struct intel_ring_buffer *ring;
        int i;
 
+       /*
+        * Before we free the objects from the requests, we need to inspect
+        * them for finding the guilty party. As the requests only borrow
+        * their reference to the objects, the inspection must be done first.
+        */
+       for_each_ring(ring, dev_priv, i)
+               i915_gem_reset_ring_status(dev_priv, ring);
+
        for_each_ring(ring, dev_priv, i)
-               i915_gem_reset_ring_lists(dev_priv, ring);
+               i915_gem_reset_ring_cleanup(dev_priv, ring);
 
        i915_gem_cleanup_ringbuffer(dev);
 
@@ -2432,6 +2424,24 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 
        seqno = ring->get_seqno(ring, true);
 
+       /* Move any buffers on the active list that are no longer referenced
+        * by the ringbuffer to the flushing/inactive lists as appropriate,
+        * before we free the context associated with the requests.
+        */
+       while (!list_empty(&ring->active_list)) {
+               struct drm_i915_gem_object *obj;
+
+               obj = list_first_entry(&ring->active_list,
+                                     struct drm_i915_gem_object,
+                                     ring_list);
+
+               if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+                       break;
+
+               i915_gem_object_move_to_inactive(obj);
+       }
+
+
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
 
@@ -2453,22 +2463,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
                i915_gem_free_request(request);
        }
 
-       /* Move any buffers on the active list that are no longer referenced
-        * by the ringbuffer to the flushing/inactive lists as appropriate.
-        */
-       while (!list_empty(&ring->active_list)) {
-               struct drm_i915_gem_object *obj;
-
-               obj = list_first_entry(&ring->active_list,
-                                     struct drm_i915_gem_object,
-                                     ring_list);
-
-               if (!i915_seqno_passed(seqno, obj->last_read_seqno))
-                       break;
-
-               i915_gem_object_move_to_inactive(obj);
-       }
-
        if (unlikely(ring->trace_irq_seqno &&
                     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
                ring->irq_put(ring);
@@ -2711,9 +2705,6 @@ int i915_vma_unbind(struct i915_vma *vma)
        drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
        int ret;
 
-       /* For now we only ever use 1 vma per object */
-       WARN_ON(!list_is_singular(&obj->vma_list));
-
        if (list_empty(&vma->vma_link))
                return 0;
 
@@ -2799,7 +2790,7 @@ int i915_gpu_idle(struct drm_device *dev)
 
        /* Flush everything onto the inactive list. */
        for_each_ring(ring, dev_priv, i) {
-               ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
+               ret = i915_switch_context(ring, NULL, ring->default_context);
                if (ret)
                        return ret;
 
@@ -3060,7 +3051,7 @@ i915_find_fence_reg(struct drm_device *dev)
        }
 
        if (avail == NULL)
-               return NULL;
+               goto deadlock;
 
        /* None available, try to steal one or wait for a user to finish */
        list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
@@ -3070,7 +3061,12 @@ i915_find_fence_reg(struct drm_device *dev)
                return reg;
        }
 
-       return NULL;
+deadlock:
+       /* Wait for completion of pending flips which consume fences */
+       if (intel_has_pending_fb_unpin(dev))
+               return ERR_PTR(-EAGAIN);
+
+       return ERR_PTR(-EDEADLK);
 }
 
 /**
@@ -3115,8 +3111,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
                }
        } else if (enable) {
                reg = i915_find_fence_reg(dev);
-               if (reg == NULL)
-                       return -EDEADLK;
+               if (IS_ERR(reg))
+                       return PTR_ERR(reg);
 
                if (reg->obj) {
                        struct drm_i915_gem_object *old = reg->obj;
@@ -3238,7 +3234,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
                alignment = map_and_fenceable ? fence_alignment :
                                                unfenced_alignment;
        if (map_and_fenceable && alignment & (fence_alignment - 1)) {
-               DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+               DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
                return -EINVAL;
        }
 
@@ -3248,7 +3244,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
         * before evicting everything in a vain attempt to find space.
         */
        if (obj->base.size > gtt_max) {
-               DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
+               DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
                          obj->base.size,
                          map_and_fenceable ? "mappable" : "total",
                          gtt_max);
@@ -3261,17 +3257,12 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 
        i915_gem_object_pin_pages(obj);
 
-       BUG_ON(!i915_is_ggtt(vm));
-
        vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err_unpin;
        }
 
-       /* For now we only ever use 1 vma per object */
-       WARN_ON(!list_is_singular(&obj->vma_list));
-
 search_free:
        ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
                                                  size, alignment,
@@ -3912,6 +3903,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_object *obj;
        int ret;
 
+       if (INTEL_INFO(dev)->gen >= 6)
+               return -ENODEV;
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
@@ -3923,13 +3917,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        }
 
        if (obj->madv != I915_MADV_WILLNEED) {
-               DRM_ERROR("Attempting to pin a purgeable buffer\n");
-               ret = -EINVAL;
+               DRM_DEBUG("Attempting to pin a purgeable buffer\n");
+               ret = -EFAULT;
                goto out;
        }
 
        if (obj->pin_filp != NULL && obj->pin_filp != file) {
-               DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
+               DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
                ret = -EINVAL;
                goto out;
@@ -3976,7 +3970,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
        }
 
        if (obj->pin_filp != file) {
-               DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
+               DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
                ret = -EINVAL;
                goto out;
@@ -4168,14 +4162,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct i915_vma *vma, *next;
 
+       intel_runtime_pm_get(dev_priv);
+
        trace_i915_gem_object_destroy(obj);
 
        if (obj->phys_obj)
                i915_gem_detach_phys_object(dev, obj);
 
-       /* NB: 0 or 1 elements */
-       WARN_ON(!list_empty(&obj->vma_list) &&
-               !list_is_singular(&obj->vma_list));
        list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
                int ret;
 
@@ -4214,6 +4207,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 
        kfree(obj->bit_17);
        i915_gem_object_free(obj);
+
+       intel_runtime_pm_put(dev_priv);
 }
 
 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
@@ -4412,15 +4407,20 @@ i915_gem_init_hw(struct drm_device *dev)
        if (dev_priv->ellc_size)
                I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
-       if (IS_HSW_GT3(dev))
-               I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
-       else
-               I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
+       if (IS_HASWELL(dev))
+               I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
+                          LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 
        if (HAS_PCH_NOP(dev)) {
-               u32 temp = I915_READ(GEN7_MSG_CTL);
-               temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
-               I915_WRITE(GEN7_MSG_CTL, temp);
+               if (IS_IVYBRIDGE(dev)) {
+                       u32 temp = I915_READ(GEN7_MSG_CTL);
+                       temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
+                       I915_WRITE(GEN7_MSG_CTL, temp);
+               } else if (INTEL_INFO(dev)->gen >= 7) {
+                       u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
+                       temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
+                       I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
+               }
        }
 
        i915_gem_init_swizzling(dev);
@@ -4445,15 +4445,6 @@ i915_gem_init_hw(struct drm_device *dev)
                goto err_out;
        }
 
-       if (dev_priv->mm.aliasing_ppgtt) {
-               ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
-               if (ret) {
-                       i915_gem_cleanup_aliasing_ppgtt(dev);
-                       DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
-                       ret = 0;
-               }
-       }
-
        return 0;
 
 err_out:
@@ -4478,14 +4469,16 @@ int i915_gem_init(struct drm_device *dev)
        i915_gem_init_global_gtt(dev);
 
        ret = i915_gem_context_init(dev);
-       if (ret)
+       if (ret) {
+               mutex_unlock(&dev->struct_mutex);
                return ret;
+       }
 
        ret = i915_gem_init_hw(dev);
        mutex_unlock(&dev->struct_mutex);
        if (ret) {
+               WARN_ON(dev_priv->mm.aliasing_ppgtt);
                i915_gem_context_fini(dev);
-               i915_gem_cleanup_aliasing_ppgtt(dev);
                drm_mm_takedown(&dev_priv->gtt.base.mm);
                return ret;
        }
@@ -4581,14 +4574,16 @@ init_ring_lists(struct intel_ring_buffer *ring)
        INIT_LIST_HEAD(&ring->request_list);
 }
 
-static void i915_init_vm(struct drm_i915_private *dev_priv,
-                        struct i915_address_space *vm)
+void i915_init_vm(struct drm_i915_private *dev_priv,
+                 struct i915_address_space *vm)
 {
+       if (!i915_is_ggtt(vm))
+               drm_mm_init(&vm->mm, vm->start, vm->total);
        vm->dev = dev_priv->dev;
        INIT_LIST_HEAD(&vm->active_list);
        INIT_LIST_HEAD(&vm->inactive_list);
        INIT_LIST_HEAD(&vm->global_link);
-       list_add(&vm->global_link, &dev_priv->vm_list);
+       list_add_tail(&vm->global_link, &dev_priv->vm_list);
 }
 
 void
This page took 0.051926 seconds and 5 git commands to generate.