drm/i915: vlv: handle only enabled pipestat interrupt events
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index 76d3d1ab73c6965063eba62527594dce82dc41d4..b0a244a5effa87fd0162839e1096950829a606d2 100644 (file)
@@ -204,7 +204,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        pinned = 0;
        mutex_lock(&dev->struct_mutex);
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-               if (obj->pin_count)
+               if (i915_gem_obj_is_pinned(obj))
                        pinned += i915_gem_obj_ggtt_size(obj);
        mutex_unlock(&dev->struct_mutex);
 
@@ -476,7 +476,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
 
                mutex_unlock(&dev->struct_mutex);
 
-               if (likely(!i915_prefault_disable) && !prefaulted) {
+               if (likely(!i915.prefault_disable) && !prefaulted) {
                        ret = fault_in_multipages_writeable(user_data, remain);
                        /* Userspace is tricking us, but we've already clobbered
                         * its pages with the prefault and promised to write the
@@ -651,7 +651,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        }
 
 out_unpin:
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
 out:
        return ret;
 }
@@ -868,7 +868,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                       args->size))
                return -EFAULT;
 
-       if (likely(!i915_prefault_disable)) {
+       if (likely(!i915.prefault_disable)) {
                ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
                                                   args->size);
                if (ret)
@@ -1014,10 +1014,13 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        struct timespec *timeout,
                        struct drm_i915_file_private *file_priv)
 {
-       drm_i915_private_t *dev_priv = ring->dev->dev_private;
+       struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       const bool irq_test_in_progress =
+               ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
        struct timespec before, now;
        DEFINE_WAIT(wait);
-       long timeout_jiffies;
+       unsigned long timeout_expire;
        int ret;
 
        WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1025,9 +1028,9 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
                return 0;
 
-       timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
+       timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
 
-       if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
+       if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
                gen6_rps_boost(dev_priv);
                if (file_priv)
                        mod_delayed_work(dev_priv->wq,
@@ -1035,8 +1038,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                                         msecs_to_jiffies(100));
        }
 
-       if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
-           WARN_ON(!ring->irq_get(ring)))
+       if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
                return -ENODEV;
 
        /* Record current time in case interrupted by signal, or wedged */
@@ -1044,7 +1046,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        getrawmonotonic(&before);
        for (;;) {
                struct timer_list timer;
-               unsigned long expire;
 
                prepare_to_wait(&ring->irq_queue, &wait,
                                interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
@@ -1070,23 +1071,22 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        break;
                }
 
-               if (timeout_jiffies <= 0) {
+               if (timeout && time_after_eq(jiffies, timeout_expire)) {
                        ret = -ETIME;
                        break;
                }
 
                timer.function = NULL;
                if (timeout || missed_irq(dev_priv, ring)) {
+                       unsigned long expire;
+
                        setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
-                       expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
+                       expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
                        mod_timer(&timer, expire);
                }
 
                io_schedule();
 
-               if (timeout)
-                       timeout_jiffies = expire - jiffies;
-
                if (timer.function) {
                        del_singleshot_timer_sync(&timer);
                        destroy_timer_on_stack(&timer);
@@ -1095,7 +1095,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        getrawmonotonic(&now);
        trace_i915_gem_request_wait_end(ring, seqno);
 
-       ring->irq_put(ring);
+       if (!irq_test_in_progress)
+               ring->irq_put(ring);
 
        finish_wait(&ring->irq_queue, &wait);
 
@@ -1380,6 +1381,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        int ret = 0;
        bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
 
+       intel_runtime_pm_get(dev_priv);
+
        /* We don't use vmf->pgoff since that has the fake offset */
        page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
                PAGE_SHIFT;
@@ -1418,7 +1421,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        /* Finally, remap it using the new GTT offset */
        ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
 unpin:
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
 out:
@@ -1427,8 +1430,10 @@ out:
                /* If this -EIO is due to a gpu hang, give the reset code a
                 * chance to clean up the mess. Otherwise return the proper
                 * SIGBUS. */
-               if (i915_terminally_wedged(&dev_priv->gpu_error))
-                       return VM_FAULT_SIGBUS;
+               if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+                       ret = VM_FAULT_SIGBUS;
+                       break;
+               }
        case -EAGAIN:
                /*
                 * EAGAIN means the gpu is hung and we'll wait for the error
@@ -1443,15 +1448,39 @@ out:
                 * EBUSY is ok: this just means that another thread
                 * already did the job.
                 */
-               return VM_FAULT_NOPAGE;
+               ret = VM_FAULT_NOPAGE;
+               break;
        case -ENOMEM:
-               return VM_FAULT_OOM;
+               ret = VM_FAULT_OOM;
+               break;
        case -ENOSPC:
-               return VM_FAULT_SIGBUS;
+       case -EFAULT:
+               ret = VM_FAULT_SIGBUS;
+               break;
        default:
                WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
-               return VM_FAULT_SIGBUS;
+               ret = VM_FAULT_SIGBUS;
+               break;
        }
+
+       intel_runtime_pm_put(dev_priv);
+       return ret;
+}
+
+void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+       struct i915_vma *vma;
+
+       /*
+        * Only the global gtt is relevant for gtt memory mappings, so restrict
+        * list traversal to objects bound into the global address space. Note
+        * that the active list should be empty, but better safe than sorry.
+        */
+       WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
+       list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
+               i915_gem_release_mmap(vma->obj);
+       list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
+               i915_gem_release_mmap(vma->obj);
 }
 
 /**
@@ -1590,8 +1619,8 @@ i915_gem_mmap_gtt(struct drm_file *file,
        }
 
        if (obj->madv != I915_MADV_WILLNEED) {
-               DRM_ERROR("Attempting to mmap a purgeable buffer\n");
-               ret = -EINVAL;
+               DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
+               ret = -EFAULT;
                goto out;
        }
 
@@ -1944,8 +1973,8 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
                return 0;
 
        if (obj->madv != I915_MADV_WILLNEED) {
-               DRM_ERROR("Attempting to obtain a purgeable object\n");
-               return -EINVAL;
+               DRM_DEBUG("Attempting to obtain a purgeable object\n");
+               return -EFAULT;
        }
 
        BUG_ON(obj->pages_pin_count);
@@ -2008,13 +2037,17 @@ static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-       struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
-       struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+       struct i915_address_space *vm;
+       struct i915_vma *vma;
 
        BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
        BUG_ON(!obj->active);
 
-       list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+               vma = i915_gem_obj_to_vma(obj, vm);
+               if (vma && !list_empty(&vma->mm_list))
+                       list_move_tail(&vma->mm_list, &vm->inactive_list);
+       }
 
        list_del_init(&obj->ring_list);
        obj->ring = NULL;
@@ -2210,125 +2243,47 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
        spin_unlock(&file_priv->mm.lock);
 }
 
-static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
-                                   struct i915_address_space *vm)
+static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
+                                  const struct i915_hw_context *ctx)
 {
-       if (acthd >= i915_gem_obj_offset(obj, vm) &&
-           acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
-               return true;
-
-       return false;
-}
-
-static bool i915_head_inside_request(const u32 acthd_unmasked,
-                                    const u32 request_start,
-                                    const u32 request_end)
-{
-       const u32 acthd = acthd_unmasked & HEAD_ADDR;
-
-       if (request_start < request_end) {
-               if (acthd >= request_start && acthd < request_end)
-                       return true;
-       } else if (request_start > request_end) {
-               if (acthd >= request_start || acthd < request_end)
-                       return true;
-       }
-
-       return false;
-}
-
-static struct i915_address_space *
-request_to_vm(struct drm_i915_gem_request *request)
-{
-       struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
-       struct i915_address_space *vm;
-
-       vm = &dev_priv->gtt.base;
-
-       return vm;
-}
-
-static bool i915_request_guilty(struct drm_i915_gem_request *request,
-                               const u32 acthd, bool *inside)
-{
-       /* There is a possibility that unmasked head address
-        * pointing inside the ring, matches the batch_obj address range.
-        * However this is extremely unlikely.
-        */
-       if (request->batch_obj) {
-               if (i915_head_inside_object(acthd, request->batch_obj,
-                                           request_to_vm(request))) {
-                       *inside = true;
-                       return true;
-               }
-       }
+       unsigned long elapsed;
 
-       if (i915_head_inside_request(acthd, request->head, request->tail)) {
-               *inside = false;
-               return true;
-       }
+       elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
 
-       return false;
-}
-
-static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
-{
-       const unsigned long elapsed = get_seconds() - hs->guilty_ts;
-
-       if (hs->banned)
+       if (ctx->hang_stats.banned)
                return true;
 
        if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
-               DRM_ERROR("context hanging too fast, declaring banned!\n");
+               if (dev_priv->gpu_error.stop_rings == 0 &&
+                   i915_gem_context_is_default(ctx)) {
+                       DRM_ERROR("gpu hanging too fast, banning!\n");
+               } else {
+                       DRM_DEBUG("context hanging too fast, banning!\n");
+               }
+
                return true;
        }
 
        return false;
 }
 
-static void i915_set_reset_status(struct intel_ring_buffer *ring,
-                                 struct drm_i915_gem_request *request,
-                                 u32 acthd)
+static void i915_set_reset_status(struct drm_i915_private *dev_priv,
+                                 struct i915_hw_context *ctx,
+                                 const bool guilty)
 {
-       struct i915_ctx_hang_stats *hs = NULL;
-       bool inside, guilty;
-       unsigned long offset = 0;
+       struct i915_ctx_hang_stats *hs;
 
-       /* Innocent until proven guilty */
-       guilty = false;
-
-       if (request->batch_obj)
-               offset = i915_gem_obj_offset(request->batch_obj,
-                                            request_to_vm(request));
-
-       if (ring->hangcheck.action != HANGCHECK_WAIT &&
-           i915_request_guilty(request, acthd, &inside)) {
-               DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
-                         ring->name,
-                         inside ? "inside" : "flushing",
-                         offset,
-                         request->ctx ? request->ctx->id : 0,
-                         acthd);
+       if (WARN_ON(!ctx))
+               return;
 
-               guilty = true;
-       }
+       hs = &ctx->hang_stats;
 
-       /* If contexts are disabled or this is the default context, use
-        * file_priv->reset_state
-        */
-       if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
-               hs = &request->ctx->hang_stats;
-       else if (request->file_priv)
-               hs = &request->file_priv->hang_stats;
-
-       if (hs) {
-               if (guilty) {
-                       hs->banned = i915_context_is_banned(hs);
-                       hs->batch_active++;
-                       hs->guilty_ts = get_seconds();
-               } else {
-                       hs->batch_pending++;
-               }
+       if (guilty) {
+               hs->banned = i915_context_is_banned(dev_priv, ctx);
+               hs->batch_active++;
+               hs->guilty_ts = get_seconds();
+       } else {
+               hs->batch_pending++;
        }
 }
 
@@ -2343,34 +2298,44 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
        kfree(request);
 }
 
-static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
-                                      struct intel_ring_buffer *ring)
+static struct drm_i915_gem_request *
+i915_gem_find_first_non_complete(struct intel_ring_buffer *ring)
 {
-       u32 completed_seqno = ring->get_seqno(ring, false);
-       u32 acthd = intel_ring_get_active_head(ring);
        struct drm_i915_gem_request *request;
+       const u32 completed_seqno = ring->get_seqno(ring, false);
 
        list_for_each_entry(request, &ring->request_list, list) {
                if (i915_seqno_passed(completed_seqno, request->seqno))
                        continue;
 
-               i915_set_reset_status(ring, request, acthd);
+               return request;
        }
+
+       return NULL;
 }
 
-static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
-                                       struct intel_ring_buffer *ring)
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+                                      struct intel_ring_buffer *ring)
 {
-       while (!list_empty(&ring->request_list)) {
-               struct drm_i915_gem_request *request;
+       struct drm_i915_gem_request *request;
+       bool ring_hung;
 
-               request = list_first_entry(&ring->request_list,
-                                          struct drm_i915_gem_request,
-                                          list);
+       request = i915_gem_find_first_non_complete(ring);
 
-               i915_gem_free_request(request);
-       }
+       if (request == NULL)
+               return;
 
+       ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+
+       i915_set_reset_status(dev_priv, request->ctx, ring_hung);
+
+       list_for_each_entry_continue(request, &ring->request_list, list)
+               i915_set_reset_status(dev_priv, request->ctx, false);
+}
+
+static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
+                                       struct intel_ring_buffer *ring)
+{
        while (!list_empty(&ring->active_list)) {
                struct drm_i915_gem_object *obj;
 
@@ -2380,6 +2345,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 
                i915_gem_object_move_to_inactive(obj);
        }
+
+       /*
+        * We must free the requests after all the corresponding objects have
+        * been moved off active lists. Which is the same order as the normal
+        * retire_requests function does. This is important if object hold
+        * implicit references on things like e.g. ppgtt address spaces through
+        * the request.
+        */
+       while (!list_empty(&ring->request_list)) {
+               struct drm_i915_gem_request *request;
+
+               request = list_first_entry(&ring->request_list,
+                                          struct drm_i915_gem_request,
+                                          list);
+
+               i915_gem_free_request(request);
+       }
 }
 
 void i915_gem_restore_fences(struct drm_device *dev)
@@ -2422,6 +2404,8 @@ void i915_gem_reset(struct drm_device *dev)
 
        i915_gem_cleanup_ringbuffer(dev);
 
+       i915_gem_context_reset(dev);
+
        i915_gem_restore_fences(dev);
 }
 
@@ -2440,6 +2424,24 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 
        seqno = ring->get_seqno(ring, true);
 
+       /* Move any buffers on the active list that are no longer referenced
+        * by the ringbuffer to the flushing/inactive lists as appropriate,
+        * before we free the context associated with the requests.
+        */
+       while (!list_empty(&ring->active_list)) {
+               struct drm_i915_gem_object *obj;
+
+               obj = list_first_entry(&ring->active_list,
+                                     struct drm_i915_gem_object,
+                                     ring_list);
+
+               if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+                       break;
+
+               i915_gem_object_move_to_inactive(obj);
+       }
+
+
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
 
@@ -2461,22 +2463,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
                i915_gem_free_request(request);
        }
 
-       /* Move any buffers on the active list that are no longer referenced
-        * by the ringbuffer to the flushing/inactive lists as appropriate.
-        */
-       while (!list_empty(&ring->active_list)) {
-               struct drm_i915_gem_object *obj;
-
-               obj = list_first_entry(&ring->active_list,
-                                     struct drm_i915_gem_object,
-                                     ring_list);
-
-               if (!i915_seqno_passed(seqno, obj->last_read_seqno))
-                       break;
-
-               i915_gem_object_move_to_inactive(obj);
-       }
-
        if (unlikely(ring->trace_irq_seqno &&
                     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
                ring->irq_put(ring);
@@ -2719,9 +2705,6 @@ int i915_vma_unbind(struct i915_vma *vma)
        drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
        int ret;
 
-       /* For now we only ever use 1 vma per object */
-       WARN_ON(!list_is_singular(&obj->vma_list));
-
        if (list_empty(&vma->vma_link))
                return 0;
 
@@ -2731,7 +2714,7 @@ int i915_vma_unbind(struct i915_vma *vma)
                return 0;
        }
 
-       if (obj->pin_count)
+       if (vma->pin_count)
                return -EBUSY;
 
        BUG_ON(obj->pages == NULL);
@@ -2753,14 +2736,9 @@ int i915_vma_unbind(struct i915_vma *vma)
 
        trace_i915_vma_unbind(vma);
 
-       if (obj->has_global_gtt_mapping)
-               i915_gem_gtt_unbind_object(obj);
-       if (obj->has_aliasing_ppgtt_mapping) {
-               i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
-               obj->has_aliasing_ppgtt_mapping = 0;
-       }
+       vma->unbind_vma(vma);
+
        i915_gem_gtt_finish_object(obj);
-       i915_gem_object_unpin_pages(obj);
 
        list_del(&vma->mm_list);
        /* Avoid an unnecessary call to unbind on rebind. */
@@ -2768,7 +2746,6 @@ int i915_vma_unbind(struct i915_vma *vma)
                obj->map_and_fenceable = true;
 
        drm_mm_remove_node(&vma->node);
-
        i915_gem_vma_destroy(vma);
 
        /* Since the unbound list is global, only move to that list if
@@ -2776,6 +2753,12 @@ int i915_vma_unbind(struct i915_vma *vma)
        if (list_empty(&obj->vma_list))
                list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
+       /* And finally now the object is completely decoupled from this vma,
+        * we can drop its hold on the backing storage and allow it to be
+        * reaped by the shrinker.
+        */
+       i915_gem_object_unpin_pages(obj);
+
        return 0;
 }
 
@@ -2791,7 +2774,7 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
        if (!i915_gem_obj_ggtt_bound(obj))
                return 0;
 
-       if (obj->pin_count)
+       if (i915_gem_obj_to_ggtt(obj)->pin_count)
                return -EBUSY;
 
        BUG_ON(obj->pages == NULL);
@@ -2807,7 +2790,7 @@ int i915_gpu_idle(struct drm_device *dev)
 
        /* Flush everything onto the inactive list. */
        for_each_ring(ring, dev_priv, i) {
-               ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
+               ret = i915_switch_context(ring, NULL, ring->default_context);
                if (ret)
                        return ret;
 
@@ -3068,7 +3051,7 @@ i915_find_fence_reg(struct drm_device *dev)
        }
 
        if (avail == NULL)
-               return NULL;
+               goto deadlock;
 
        /* None available, try to steal one or wait for a user to finish */
        list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
@@ -3078,7 +3061,12 @@ i915_find_fence_reg(struct drm_device *dev)
                return reg;
        }
 
-       return NULL;
+deadlock:
+       /* Wait for completion of pending flips which consume fences */
+       if (intel_has_pending_fb_unpin(dev))
+               return ERR_PTR(-EAGAIN);
+
+       return ERR_PTR(-EDEADLK);
 }
 
 /**
@@ -3123,8 +3111,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
                }
        } else if (enable) {
                reg = i915_find_fence_reg(dev);
-               if (reg == NULL)
-                       return -EDEADLK;
+               if (IS_ERR(reg))
+                       return PTR_ERR(reg);
 
                if (reg->obj) {
                        struct drm_i915_gem_object *old = reg->obj;
@@ -3246,7 +3234,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
                alignment = map_and_fenceable ? fence_alignment :
                                                unfenced_alignment;
        if (map_and_fenceable && alignment & (fence_alignment - 1)) {
-               DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+               DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
                return -EINVAL;
        }
 
@@ -3256,7 +3244,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
         * before evicting everything in a vain attempt to find space.
         */
        if (obj->base.size > gtt_max) {
-               DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
+               DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
                          obj->base.size,
                          map_and_fenceable ? "mappable" : "total",
                          gtt_max);
@@ -3269,17 +3257,12 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 
        i915_gem_object_pin_pages(obj);
 
-       BUG_ON(!i915_is_ggtt(vm));
-
        vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err_unpin;
        }
 
-       /* For now we only ever use 1 vma per object */
-       WARN_ON(!list_is_singular(&obj->vma_list));
-
 search_free:
        ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
                                                  size, alignment,
@@ -3485,14 +3468,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                    enum i915_cache_level cache_level)
 {
        struct drm_device *dev = obj->base.dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct i915_vma *vma;
        int ret;
 
        if (obj->cache_level == cache_level)
                return 0;
 
-       if (obj->pin_count) {
+       if (i915_gem_obj_is_pinned(obj)) {
                DRM_DEBUG("can not change the cache level of pinned objects\n");
                return -EBUSY;
        }
@@ -3524,11 +3506,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                return ret;
                }
 
-               if (obj->has_global_gtt_mapping)
-                       i915_gem_gtt_bind_object(obj, cache_level);
-               if (obj->has_aliasing_ppgtt_mapping)
-                       i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-                                              obj, cache_level);
+               list_for_each_entry(vma, &obj->vma_list, vma_link)
+                       vma->bind_vma(vma, cache_level, 0);
        }
 
        list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3652,7 +3631,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
         * subtracting the potential reference by the user, any pin_count
         * remains, it must be due to another use by the display engine.
         */
-       return obj->pin_count - !!obj->user_pin_count;
+       return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
 }
 
 /*
@@ -3726,7 +3705,7 @@ err_unpin_display:
 void
 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
 {
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
        obj->pin_display = is_pin_display(obj);
 }
 
@@ -3856,21 +3835,22 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
                    bool map_and_fenceable,
                    bool nonblocking)
 {
+       const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
        struct i915_vma *vma;
        int ret;
 
-       if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
-               return -EBUSY;
-
        WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
 
        vma = i915_gem_obj_to_vma(obj, vm);
 
        if (vma) {
+               if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+                       return -EBUSY;
+
                if ((alignment &&
                     vma->node.start & (alignment - 1)) ||
                    (map_and_fenceable && !obj->map_and_fenceable)) {
-                       WARN(obj->pin_count,
+                       WARN(vma->pin_count,
                             "bo is already pinned with incorrect alignment:"
                             " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
@@ -3884,34 +3864,34 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
        }
 
        if (!i915_gem_obj_bound(obj, vm)) {
-               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
                ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
                                                 map_and_fenceable,
                                                 nonblocking);
                if (ret)
                        return ret;
 
-               if (!dev_priv->mm.aliasing_ppgtt)
-                       i915_gem_gtt_bind_object(obj, obj->cache_level);
        }
 
-       if (!obj->has_global_gtt_mapping && map_and_fenceable)
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
+       vma = i915_gem_obj_to_vma(obj, vm);
+
+       vma->bind_vma(vma, obj->cache_level, flags);
 
-       obj->pin_count++;
+       i915_gem_obj_to_vma(obj, vm)->pin_count++;
        obj->pin_mappable |= map_and_fenceable;
 
        return 0;
 }
 
 void
-i915_gem_object_unpin(struct drm_i915_gem_object *obj)
+i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
 {
-       BUG_ON(obj->pin_count == 0);
-       BUG_ON(!i915_gem_obj_bound_any(obj));
+       struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+
+       BUG_ON(!vma);
+       BUG_ON(vma->pin_count == 0);
+       BUG_ON(!i915_gem_obj_ggtt_bound(obj));
 
-       if (--obj->pin_count == 0)
+       if (--vma->pin_count == 0)
                obj->pin_mappable = false;
 }
 
@@ -3923,6 +3903,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_object *obj;
        int ret;
 
+       if (INTEL_INFO(dev)->gen >= 6)
+               return -ENODEV;
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
@@ -3934,13 +3917,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        }
 
        if (obj->madv != I915_MADV_WILLNEED) {
-               DRM_ERROR("Attempting to pin a purgeable buffer\n");
-               ret = -EINVAL;
+               DRM_DEBUG("Attempting to pin a purgeable buffer\n");
+               ret = -EFAULT;
                goto out;
        }
 
        if (obj->pin_filp != NULL && obj->pin_filp != file) {
-               DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
+               DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
                ret = -EINVAL;
                goto out;
@@ -3987,7 +3970,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
        }
 
        if (obj->pin_filp != file) {
-               DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
+               DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
                ret = -EINVAL;
                goto out;
@@ -3995,7 +3978,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
        obj->user_pin_count--;
        if (obj->user_pin_count == 0) {
                obj->pin_filp = NULL;
-               i915_gem_object_unpin(obj);
+               i915_gem_object_ggtt_unpin(obj);
        }
 
 out:
@@ -4075,7 +4058,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                goto unlock;
        }
 
-       if (obj->pin_count) {
+       if (i915_gem_obj_is_pinned(obj)) {
                ret = -EINVAL;
                goto out;
        }
@@ -4179,17 +4162,18 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct i915_vma *vma, *next;
 
+       intel_runtime_pm_get(dev_priv);
+
        trace_i915_gem_object_destroy(obj);
 
        if (obj->phys_obj)
                i915_gem_detach_phys_object(dev, obj);
 
-       obj->pin_count = 0;
-       /* NB: 0 or 1 elements */
-       WARN_ON(!list_empty(&obj->vma_list) &&
-               !list_is_singular(&obj->vma_list));
        list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
-               int ret = i915_vma_unbind(vma);
+               int ret;
+
+               vma->pin_count = 0;
+               ret = i915_vma_unbind(vma);
                if (WARN_ON(ret == -ERESTARTSYS)) {
                        bool was_interruptible;
 
@@ -4223,6 +4207,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 
        kfree(obj->bit_17);
        i915_gem_object_free(obj);
+
+       intel_runtime_pm_put(dev_priv);
 }
 
 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
@@ -4236,41 +4222,6 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
        return NULL;
 }
 
-static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
-                                             struct i915_address_space *vm)
-{
-       struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
-       if (vma == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       INIT_LIST_HEAD(&vma->vma_link);
-       INIT_LIST_HEAD(&vma->mm_list);
-       INIT_LIST_HEAD(&vma->exec_list);
-       vma->vm = vm;
-       vma->obj = obj;
-
-       /* Keep GGTT vmas first to make debug easier */
-       if (i915_is_ggtt(vm))
-               list_add(&vma->vma_link, &obj->vma_list);
-       else
-               list_add_tail(&vma->vma_link, &obj->vma_list);
-
-       return vma;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-                                 struct i915_address_space *vm)
-{
-       struct i915_vma *vma;
-
-       vma = i915_gem_obj_to_vma(obj, vm);
-       if (!vma)
-               vma = __i915_gem_vma_create(obj, vm);
-
-       return vma;
-}
-
 void i915_gem_vma_destroy(struct i915_vma *vma)
 {
        WARN_ON(vma->node.allocated);
@@ -4461,9 +4412,15 @@ i915_gem_init_hw(struct drm_device *dev)
                           LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 
        if (HAS_PCH_NOP(dev)) {
-               u32 temp = I915_READ(GEN7_MSG_CTL);
-               temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
-               I915_WRITE(GEN7_MSG_CTL, temp);
+               if (IS_IVYBRIDGE(dev)) {
+                       u32 temp = I915_READ(GEN7_MSG_CTL);
+                       temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
+                       I915_WRITE(GEN7_MSG_CTL, temp);
+               } else if (INTEL_INFO(dev)->gen >= 7) {
+                       u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
+                       temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
+                       I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
+               }
        }
 
        i915_gem_init_swizzling(dev);
@@ -4476,19 +4433,23 @@ i915_gem_init_hw(struct drm_device *dev)
                i915_gem_l3_remap(&dev_priv->ring[RCS], i);
 
        /*
-        * XXX: There was some w/a described somewhere suggesting loading
-        * contexts before PPGTT.
+        * XXX: Contexts should only be initialized once. Doing a switch to the
+        * default context switch however is something we'd like to do after
+        * reset or thaw (the latter may not actually be necessary for HW, but
+        * goes with our code better). Context switching requires rings (for
+        * the do_switch), but before enabling PPGTT. So don't move this.
         */
-       i915_gem_context_init(dev);
-       if (dev_priv->mm.aliasing_ppgtt) {
-               ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
-               if (ret) {
-                       i915_gem_cleanup_aliasing_ppgtt(dev);
-                       DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
-               }
+       ret = i915_gem_context_enable(dev_priv);
+       if (ret) {
+               DRM_ERROR("Context enable failed %d\n", ret);
+               goto err_out;
        }
 
        return 0;
+
+err_out:
+       i915_gem_cleanup_ringbuffer(dev);
+       return ret;
 }
 
 int i915_gem_init(struct drm_device *dev)
@@ -4507,10 +4468,18 @@ int i915_gem_init(struct drm_device *dev)
 
        i915_gem_init_global_gtt(dev);
 
+       ret = i915_gem_context_init(dev);
+       if (ret) {
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
        ret = i915_gem_init_hw(dev);
        mutex_unlock(&dev->struct_mutex);
        if (ret) {
-               i915_gem_cleanup_aliasing_ppgtt(dev);
+               WARN_ON(dev_priv->mm.aliasing_ppgtt);
+               i915_gem_context_fini(dev);
+               drm_mm_takedown(&dev_priv->gtt.base.mm);
                return ret;
        }
 
@@ -4605,14 +4574,16 @@ init_ring_lists(struct intel_ring_buffer *ring)
        INIT_LIST_HEAD(&ring->request_list);
 }
 
-static void i915_init_vm(struct drm_i915_private *dev_priv,
-                        struct i915_address_space *vm)
+void i915_init_vm(struct drm_i915_private *dev_priv,
+                 struct i915_address_space *vm)
 {
+       if (!i915_is_ggtt(vm))
+               drm_mm_init(&vm->mm, vm->start, vm->total);
        vm->dev = dev_priv->dev;
        INIT_LIST_HEAD(&vm->active_list);
        INIT_LIST_HEAD(&vm->inactive_list);
        INIT_LIST_HEAD(&vm->global_link);
-       list_add(&vm->global_link, &dev_priv->vm_list);
+       list_add_tail(&vm->global_link, &dev_priv->vm_list);
 }
 
 void
@@ -4897,6 +4868,7 @@ i915_gem_file_idle_work_handler(struct work_struct *work)
 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv;
+       int ret;
 
        DRM_DEBUG_DRIVER("\n");
 
@@ -4912,9 +4884,11 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
        INIT_DELAYED_WORK(&file_priv->mm.idle_work,
                          i915_gem_file_idle_work_handler);
 
-       idr_init(&file_priv->context_idr);
+       ret = i915_gem_context_open(dev, file);
+       if (ret)
+               kfree(file_priv);
 
-       return 0;
+       return ret;
 }
 
 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
@@ -4961,7 +4935,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
                if (obj->active)
                        continue;
 
-               if (obj->pin_count == 0 && obj->pages_pin_count == 0)
+               if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
                        count += obj->base.size >> PAGE_SHIFT;
        }
 
@@ -4978,7 +4952,8 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
        struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
 
-       if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+       if (!dev_priv->mm.aliasing_ppgtt ||
+           vm == &dev_priv->mm.aliasing_ppgtt->base)
                vm = &dev_priv->gtt.base;
 
        BUG_ON(list_empty(&o->vma_list));
@@ -5019,7 +4994,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
        struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
 
-       if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+       if (!dev_priv->mm.aliasing_ppgtt ||
+           vm == &dev_priv->mm.aliasing_ppgtt->base)
                vm = &dev_priv->gtt.base;
 
        BUG_ON(list_empty(&o->vma_list));
@@ -5074,7 +5050,7 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
                return NULL;
 
        vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
-       if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
+       if (vma->vm != obj_to_ggtt(obj))
                return NULL;
 
        return vma;
This page took 0.076724 seconds and 5 git commands to generate.