drm/i915: Adjust size of PIPE_CONTROL used for gen8 render seqno write
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_lrc.c
index 6a978ce8024436251009979cf1b1b47fa14ff549..5e08ea5aa6d14a523085bd74f2cd865523f82e3d 100644 (file)
  * preemption, but just sampling the new tail pointer).
  *
  */
+#include <linux/interrupt.h>
 
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
@@ -228,9 +229,6 @@ enum {
 
 static int intel_lr_context_pin(struct intel_context *ctx,
                                struct intel_engine_cs *engine);
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
-               struct drm_i915_gem_object *default_ctx_obj);
-
 
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -266,20 +264,23 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
 }
 
 static void
-logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
+logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
+
+       if (IS_GEN8(dev) || IS_GEN9(dev))
+               engine->idle_lite_restore_wa = ~0;
 
-       ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
+       engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
                                        IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
-                                       (ring->id == VCS || ring->id == VCS2);
+                                       (engine->id == VCS || engine->id == VCS2);
 
-       ring->ctx_desc_template = GEN8_CTX_VALID;
-       ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
+       engine->ctx_desc_template = GEN8_CTX_VALID;
+       engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
                                   GEN8_CTX_ADDRESSING_MODE_SHIFT;
        if (IS_GEN8(dev))
-               ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
-       ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
+               engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
+       engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
 
        /* TODO: WaDisableLiteRestore when we start using semaphore
         * signalling between Command Streamers */
@@ -287,8 +288,8 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
 
        /* WaEnableForceRestoreInCtxtDescForVCS:skl */
        /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
-       if (ring->disable_lite_restore_wa)
-               ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
+       if (engine->disable_lite_restore_wa)
+               engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
 }
 
 /**
@@ -311,24 +312,24 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
  */
 static void
 intel_lr_context_descriptor_update(struct intel_context *ctx,
-                                  struct intel_engine_cs *ring)
+                                  struct intel_engine_cs *engine)
 {
        uint64_t lrca, desc;
 
-       lrca = ctx->engine[ring->id].lrc_vma->node.start +
+       lrca = ctx->engine[engine->id].lrc_vma->node.start +
               LRC_PPHWSP_PN * PAGE_SIZE;
 
-       desc = ring->ctx_desc_template;                    /* bits  0-11 */
+       desc = engine->ctx_desc_template;                          /* bits  0-11 */
        desc |= lrca;                                      /* bits 12-31 */
        desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
 
-       ctx->engine[ring->id].lrc_desc = desc;
+       ctx->engine[engine->id].lrc_desc = desc;
 }
 
 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
-                                    struct intel_engine_cs *ring)
+                                    struct intel_engine_cs *engine)
 {
-       return ctx->engine[ring->id].lrc_desc;
+       return ctx->engine[engine->id].lrc_desc;
 }
 
 /**
@@ -348,98 +349,103 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
  * Return: 20-bits globally unique context ID.
  */
 u32 intel_execlists_ctx_id(struct intel_context *ctx,
-                          struct intel_engine_cs *ring)
+                          struct intel_engine_cs *engine)
 {
-       return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
+       return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
 }
 
 static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
                                 struct drm_i915_gem_request *rq1)
 {
 
-       struct intel_engine_cs *ring = rq0->ring;
-       struct drm_device *dev = ring->dev;
+       struct intel_engine_cs *engine = rq0->engine;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint64_t desc[2];
 
        if (rq1) {
-               desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
+               desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
                rq1->elsp_submitted++;
        } else {
                desc[1] = 0;
        }
 
-       desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
+       desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
        rq0->elsp_submitted++;
 
        /* You must always write both descriptors in the order below. */
-       spin_lock(&dev_priv->uncore.lock);
-       intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
-       I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
-       I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
+       I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
+       I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
 
-       I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
+       I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
        /* The context is automatically loaded after the following */
-       I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
+       I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
 
        /* ELSP is a wo register, use another nearby reg for posting */
-       POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
-       intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
-       spin_unlock(&dev_priv->uncore.lock);
+       POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
 }
 
-static int execlists_update_context(struct drm_i915_gem_request *rq)
+static void
+execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 {
-       struct intel_engine_cs *ring = rq->ring;
+       ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
+       ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
+       ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
+       ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+}
+
+static void execlists_update_context(struct drm_i915_gem_request *rq)
+{
+       struct intel_engine_cs *engine = rq->engine;
        struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-       uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
+       uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
 
        reg_state[CTX_RING_TAIL+1] = rq->tail;
 
-       if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
-               /* True 32b PPGTT with dynamic page allocation: update PDP
-                * registers and point the unallocated PDPs to scratch page.
-                * PML4 is allocated during ppgtt init, so this is not needed
-                * in 48-bit mode.
-                */
-               ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
-               ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
-               ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
-               ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
-       }
-
-       return 0;
+       /* True 32b PPGTT with dynamic page allocation: update PDP
+        * registers and point the unallocated PDPs to scratch page.
+        * PML4 is allocated during ppgtt init, so this is not needed
+        * in 48-bit mode.
+        */
+       if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+               execlists_update_context_pdps(ppgtt, reg_state);
 }
 
 static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
                                      struct drm_i915_gem_request *rq1)
 {
+       struct drm_i915_private *dev_priv = rq0->i915;
+       unsigned int fw_domains = rq0->engine->fw_domains;
+
        execlists_update_context(rq0);
 
        if (rq1)
                execlists_update_context(rq1);
 
+       spin_lock_irq(&dev_priv->uncore.lock);
+       intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+
        execlists_elsp_write(rq0, rq1);
+
+       intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+       spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
-static void execlists_context_unqueue(struct intel_engine_cs *ring)
+static void execlists_context_unqueue(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
-       struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
+       struct drm_i915_gem_request *cursor, *tmp;
 
-       assert_spin_locked(&ring->execlist_lock);
+       assert_spin_locked(&engine->execlist_lock);
 
        /*
         * If irqs are not active generate a warning as batches that finish
         * without the irqs may get lost and a GPU Hang may occur.
         */
-       WARN_ON(!intel_irqs_enabled(ring->dev->dev_private));
-
-       if (list_empty(&ring->execlist_queue))
-               return;
+       WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
 
        /* Try to read in pairs */
-       list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
+       list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
                                 execlist_link) {
                if (!req0) {
                        req0 = cursor;
@@ -448,172 +454,179 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
                         * will update tail past first request's workload */
                        cursor->elsp_submitted = req0->elsp_submitted;
                        list_move_tail(&req0->execlist_link,
-                                      &ring->execlist_retired_req_list);
+                                      &engine->execlist_retired_req_list);
                        req0 = cursor;
                } else {
                        req1 = cursor;
+                       WARN_ON(req1->elsp_submitted);
                        break;
                }
        }
 
-       if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) {
+       if (unlikely(!req0))
+               return;
+
+       if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
                /*
-                * WaIdleLiteRestore: make sure we never cause a lite
-                * restore with HEAD==TAIL
+                * WaIdleLiteRestore: make sure we never cause a lite restore
+                * with HEAD==TAIL.
+                *
+                * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
+                * resubmit the request. See gen8_emit_request() for where we
+                * prepare the padding after the end of the request.
                 */
-               if (req0->elsp_submitted) {
-                       /*
-                        * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
-                        * as we resubmit the request. See gen8_emit_request()
-                        * for where we prepare the padding after the end of the
-                        * request.
-                        */
-                       struct intel_ringbuffer *ringbuf;
+               struct intel_ringbuffer *ringbuf;
 
-                       ringbuf = req0->ctx->engine[ring->id].ringbuf;
-                       req0->tail += 8;
-                       req0->tail &= ringbuf->size - 1;
-               }
+               ringbuf = req0->ctx->engine[engine->id].ringbuf;
+               req0->tail += 8;
+               req0->tail &= ringbuf->size - 1;
        }
 
-       WARN_ON(req1 && req1->elsp_submitted);
-
        execlists_submit_requests(req0, req1);
 }
 
-static bool execlists_check_remove_request(struct intel_engine_cs *ring,
-                                          u32 request_id)
+static unsigned int
+execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
 {
        struct drm_i915_gem_request *head_req;
 
-       assert_spin_locked(&ring->execlist_lock);
+       assert_spin_locked(&engine->execlist_lock);
 
-       head_req = list_first_entry_or_null(&ring->execlist_queue,
+       head_req = list_first_entry_or_null(&engine->execlist_queue,
                                            struct drm_i915_gem_request,
                                            execlist_link);
 
-       if (head_req != NULL) {
-               if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) {
-                       WARN(head_req->elsp_submitted == 0,
-                            "Never submitted head request\n");
+       if (!head_req)
+               return 0;
 
-                       if (--head_req->elsp_submitted <= 0) {
-                               list_move_tail(&head_req->execlist_link,
-                                              &ring->execlist_retired_req_list);
-                               return true;
-                       }
-               }
-       }
+       if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
+               return 0;
+
+       WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
 
-       return false;
+       if (--head_req->elsp_submitted > 0)
+               return 0;
+
+       list_move_tail(&head_req->execlist_link,
+                      &engine->execlist_retired_req_list);
+
+       return 1;
 }
 
-static void get_context_status(struct intel_engine_cs *ring,
-                              u8 read_pointer,
-                              u32 *status, u32 *context_id)
+static u32
+get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
+                  u32 *context_id)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       u32 status;
 
-       if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES))
-               return;
+       read_pointer %= GEN8_CSB_ENTRIES;
+
+       status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
+
+       if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+               return 0;
+
+       *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
+                                                             read_pointer));
 
-       *status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
-       *context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer));
+       return status;
 }
 
 /**
  * intel_lrc_irq_handler() - handle Context Switch interrupts
- * @ring: Engine Command Streamer to handle.
+ * @engine: Engine Command Streamer to handle.
  *
  * Check the unread Context Status Buffers and manage the submission of new
  * contexts to the ELSP accordingly.
  */
-void intel_lrc_irq_handler(struct intel_engine_cs *ring)
+static void intel_lrc_irq_handler(unsigned long data)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        u32 status_pointer;
-       u8 read_pointer;
-       u8 write_pointer;
-       u32 status = 0;
-       u32 status_id;
-       u32 submit_contexts = 0;
+       unsigned int read_pointer, write_pointer;
+       u32 csb[GEN8_CSB_ENTRIES][2];
+       unsigned int csb_read = 0, i;
+       unsigned int submit_contexts = 0;
 
-       status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+       intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
 
-       read_pointer = ring->next_context_status_buffer;
+       status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
+
+       read_pointer = engine->next_context_status_buffer;
        write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
        if (read_pointer > write_pointer)
                write_pointer += GEN8_CSB_ENTRIES;
 
-       spin_lock(&ring->execlist_lock);
-
        while (read_pointer < write_pointer) {
+               if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
+                       break;
+               csb[csb_read][0] = get_context_status(engine, ++read_pointer,
+                                                     &csb[csb_read][1]);
+               csb_read++;
+       }
 
-               get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES,
-                                  &status, &status_id);
+       engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
 
-               if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
-                       continue;
+       /* Update the read pointer to the old write pointer. Manual ringbuffer
+        * management ftw </sarcasm> */
+       I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
+                     _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+                                   engine->next_context_status_buffer << 8));
+
+       intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
+
+       spin_lock(&engine->execlist_lock);
 
-               if (status & GEN8_CTX_STATUS_PREEMPTED) {
-                       if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
-                               if (execlists_check_remove_request(ring, status_id))
+       for (i = 0; i < csb_read; i++) {
+               if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
+                       if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
+                               if (execlists_check_remove_request(engine, csb[i][1]))
                                        WARN(1, "Lite Restored request removed from queue\n");
                        } else
                                WARN(1, "Preemption without Lite Restore\n");
                }
 
-               if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
-                   (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
-                       if (execlists_check_remove_request(ring, status_id))
-                               submit_contexts++;
-               }
+               if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
+                   GEN8_CTX_STATUS_ELEMENT_SWITCH))
+                       submit_contexts +=
+                               execlists_check_remove_request(engine, csb[i][1]);
        }
 
-       if (ring->disable_lite_restore_wa) {
-               /* Prevent a ctx to preempt itself */
-               if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
-                   (submit_contexts != 0))
-                       execlists_context_unqueue(ring);
-       } else if (submit_contexts != 0) {
-               execlists_context_unqueue(ring);
+       if (submit_contexts) {
+               if (!engine->disable_lite_restore_wa ||
+                   (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
+                       execlists_context_unqueue(engine);
        }
 
-       spin_unlock(&ring->execlist_lock);
+       spin_unlock(&engine->execlist_lock);
 
        if (unlikely(submit_contexts > 2))
                DRM_ERROR("More than two context complete events?\n");
-
-       ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
-
-       /* Update the read pointer to the old write pointer. Manual ringbuffer
-        * management ftw </sarcasm> */
-       I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-                  _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
-                                ring->next_context_status_buffer << 8));
 }
 
-static int execlists_context_queue(struct drm_i915_gem_request *request)
+static void execlists_context_queue(struct drm_i915_gem_request *request)
 {
-       struct intel_engine_cs *ring = request->ring;
+       struct intel_engine_cs *engine = request->engine;
        struct drm_i915_gem_request *cursor;
        int num_elements = 0;
 
        if (request->ctx != request->i915->kernel_context)
-               intel_lr_context_pin(request->ctx, ring);
+               intel_lr_context_pin(request->ctx, engine);
 
        i915_gem_request_reference(request);
 
-       spin_lock_irq(&ring->execlist_lock);
+       spin_lock_bh(&engine->execlist_lock);
 
-       list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
+       list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
                if (++num_elements > 2)
                        break;
 
        if (num_elements > 2) {
                struct drm_i915_gem_request *tail_req;
 
-               tail_req = list_last_entry(&ring->execlist_queue,
+               tail_req = list_last_entry(&engine->execlist_queue,
                                           struct drm_i915_gem_request,
                                           execlist_link);
 
@@ -621,41 +634,39 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
                        WARN(tail_req->elsp_submitted != 0,
                                "More than 2 already-submitted reqs queued\n");
                        list_move_tail(&tail_req->execlist_link,
-                                      &ring->execlist_retired_req_list);
+                                      &engine->execlist_retired_req_list);
                }
        }
 
-       list_add_tail(&request->execlist_link, &ring->execlist_queue);
+       list_add_tail(&request->execlist_link, &engine->execlist_queue);
        if (num_elements == 0)
-               execlists_context_unqueue(ring);
+               execlists_context_unqueue(engine);
 
-       spin_unlock_irq(&ring->execlist_lock);
-
-       return 0;
+       spin_unlock_bh(&engine->execlist_lock);
 }
 
 static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        uint32_t flush_domains;
        int ret;
 
        flush_domains = 0;
-       if (ring->gpu_caches_dirty)
+       if (engine->gpu_caches_dirty)
                flush_domains = I915_GEM_GPU_DOMAINS;
 
-       ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+       ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
        if (ret)
                return ret;
 
-       ring->gpu_caches_dirty = false;
+       engine->gpu_caches_dirty = false;
        return 0;
 }
 
 static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
                                 struct list_head *vmas)
 {
-       const unsigned other_rings = ~intel_ring_flag(req->ring);
+       const unsigned other_rings = ~intel_engine_flag(req->engine);
        struct i915_vma *vma;
        uint32_t flush_domains = 0;
        bool flush_chipset = false;
@@ -665,7 +676,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (obj->active & other_rings) {
-                       ret = i915_gem_object_sync(obj, req->ring, &req);
+                       ret = i915_gem_object_sync(obj, req->engine, &req);
                        if (ret)
                                return ret;
                }
@@ -689,7 +700,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 {
        int ret = 0;
 
-       request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+       request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
 
        if (i915.enable_guc_submission) {
                /*
@@ -705,7 +716,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
        }
 
        if (request->ctx != request->i915->kernel_context)
-               ret = intel_lr_context_pin(request->ctx, request->ring);
+               ret = intel_lr_context_pin(request->ctx, request->engine);
 
        return ret;
 }
@@ -714,7 +725,7 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
                                       int bytes)
 {
        struct intel_ringbuffer *ringbuf = req->ringbuf;
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        struct drm_i915_gem_request *target;
        unsigned space;
        int ret;
@@ -725,7 +736,7 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
        /* The whole point of reserving space is to not wait! */
        WARN_ON(ringbuf->reserved_in_use);
 
-       list_for_each_entry(target, &ring->request_list, list) {
+       list_for_each_entry(target, &engine->request_list, list) {
                /*
                 * The request queue is per-engine, so can contain requests
                 * from multiple ringbuffers. Here, we must ignore any that
@@ -741,7 +752,7 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
                        break;
        }
 
-       if (WARN_ON(&target->list == &ring->request_list))
+       if (WARN_ON(&target->list == &engine->request_list))
                return -ENOSPC;
 
        ret = i915_wait_request(target);
@@ -766,7 +777,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
        struct intel_ringbuffer *ringbuf = request->ringbuf;
        struct drm_i915_private *dev_priv = request->i915;
-       struct intel_engine_cs *engine = request->ring;
+       struct intel_engine_cs *engine = request->engine;
 
        intel_logical_ring_advance(ringbuf);
        request->tail = ringbuf->tail;
@@ -781,7 +792,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
        intel_logical_ring_emit(ringbuf, MI_NOOP);
        intel_logical_ring_advance(ringbuf);
 
-       if (intel_ring_stopped(engine))
+       if (intel_engine_stopped(engine))
                return 0;
 
        if (engine->last_context != request->ctx) {
@@ -841,11 +852,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
                if (unlikely(total_bytes > remain_usable)) {
                        /*
                         * The base request will fit but the reserved space
-                        * falls off the end. So only need to to wait for the
-                        * reserved size after flushing out the remainder.
+                        * falls off the end. So don't need an immediate wrap
+                        * and only need to effectively wait for the reserved
+                        * size space from the start of ringbuffer.
                         */
                        wait_bytes = remain_actual + ringbuf->reserved_size;
-                       need_wrap = true;
                } else if (total_bytes > ringbuf->space) {
                        /* No wrapping required, just waiting. */
                        wait_bytes = total_bytes;
@@ -883,7 +894,7 @@ int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
        int ret;
 
        WARN_ON(req == NULL);
-       dev_priv = req->ring->dev->dev_private;
+       dev_priv = req->i915;
 
        ret = i915_gem_check_wedge(&dev_priv->gpu_error,
                                   dev_priv->mm.interruptible);
@@ -935,9 +946,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
                               struct list_head *vmas)
 {
        struct drm_device       *dev = params->dev;
-       struct intel_engine_cs  *ring = params->ring;
+       struct intel_engine_cs *engine = params->engine;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+       struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
        u64 exec_start;
        int instp_mode;
        u32 instp_mask;
@@ -949,7 +960,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
        case I915_EXEC_CONSTANTS_REL_GENERAL:
        case I915_EXEC_CONSTANTS_ABSOLUTE:
        case I915_EXEC_CONSTANTS_REL_SURFACE:
-               if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+               if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
                        DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
                        return -EINVAL;
                }
@@ -978,7 +989,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
        if (ret)
                return ret;
 
-       if (ring == &dev_priv->ring[RCS] &&
+       if (engine == &dev_priv->engine[RCS] &&
            instp_mode != dev_priv->relative_constants_mode) {
                ret = intel_logical_ring_begin(params->request, 4);
                if (ret)
@@ -996,7 +1007,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
        exec_start = params->batch_obj_vm_offset +
                     args->batch_start_offset;
 
-       ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
+       ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
        if (ret)
                return ret;
 
@@ -1008,104 +1019,105 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
        return 0;
 }
 
-void intel_execlists_retire_requests(struct intel_engine_cs *ring)
+void intel_execlists_retire_requests(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *req, *tmp;
        struct list_head retired_list;
 
-       WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-       if (list_empty(&ring->execlist_retired_req_list))
+       WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
+       if (list_empty(&engine->execlist_retired_req_list))
                return;
 
        INIT_LIST_HEAD(&retired_list);
-       spin_lock_irq(&ring->execlist_lock);
-       list_replace_init(&ring->execlist_retired_req_list, &retired_list);
-       spin_unlock_irq(&ring->execlist_lock);
+       spin_lock_bh(&engine->execlist_lock);
+       list_replace_init(&engine->execlist_retired_req_list, &retired_list);
+       spin_unlock_bh(&engine->execlist_lock);
 
        list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
                struct intel_context *ctx = req->ctx;
                struct drm_i915_gem_object *ctx_obj =
-                               ctx->engine[ring->id].state;
+                               ctx->engine[engine->id].state;
 
                if (ctx_obj && (ctx != req->i915->kernel_context))
-                       intel_lr_context_unpin(ctx, ring);
+                       intel_lr_context_unpin(ctx, engine);
 
                list_del(&req->execlist_link);
                i915_gem_request_unreference(req);
        }
 }
 
-void intel_logical_ring_stop(struct intel_engine_cs *ring)
+void intel_logical_ring_stop(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        int ret;
 
-       if (!intel_ring_initialized(ring))
+       if (!intel_engine_initialized(engine))
                return;
 
-       ret = intel_ring_idle(ring);
-       if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+       ret = intel_engine_idle(engine);
+       if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
                DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-                         ring->name, ret);
+                         engine->name, ret);
 
        /* TODO: Is this correct with Execlists enabled? */
-       I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
-       if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
-               DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+       I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
+       if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+               DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
                return;
        }
-       I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+       I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
-       if (!ring->gpu_caches_dirty)
+       if (!engine->gpu_caches_dirty)
                return 0;
 
-       ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
+       ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
-       ring->gpu_caches_dirty = false;
+       engine->gpu_caches_dirty = false;
        return 0;
 }
 
 static int intel_lr_context_do_pin(struct intel_context *ctx,
-                                  struct intel_engine_cs *ring)
+                                  struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-       struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
-       struct page *lrc_state_page;
-       uint32_t *lrc_reg_state;
+       struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+       struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
+       void *vaddr;
+       u32 *lrc_reg_state;
        int ret;
 
-       WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
 
        ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
                        PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
        if (ret)
                return ret;
 
-       lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-       if (WARN_ON(!lrc_state_page)) {
-               ret = -ENODEV;
+       vaddr = i915_gem_object_pin_map(ctx_obj);
+       if (IS_ERR(vaddr)) {
+               ret = PTR_ERR(vaddr);
                goto unpin_ctx_obj;
        }
 
-       ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+       lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+
+       ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
        if (ret)
-               goto unpin_ctx_obj;
+               goto unpin_map;
 
-       ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
-       intel_lr_context_descriptor_update(ctx, ring);
-       lrc_reg_state = kmap(lrc_state_page);
+       ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+       intel_lr_context_descriptor_update(ctx, engine);
        lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
-       ctx->engine[ring->id].lrc_reg_state = lrc_reg_state;
+       ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
        ctx_obj->dirty = true;
 
        /* Invalidate GuC TLB. */
@@ -1114,6 +1126,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
 
        return ret;
 
+unpin_map:
+       i915_gem_object_unpin_map(ctx_obj);
 unpin_ctx_obj:
        i915_gem_object_ggtt_unpin(ctx_obj);
 
@@ -1146,7 +1160,7 @@ void intel_lr_context_unpin(struct intel_context *ctx,
 
        WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
        if (--ctx->engine[engine->id].pin_count == 0) {
-               kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
+               i915_gem_object_unpin_map(ctx_obj);
                intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
                i915_gem_object_ggtt_unpin(ctx_obj);
                ctx->engine[engine->id].lrc_vma = NULL;
@@ -1160,16 +1174,16 @@ void intel_lr_context_unpin(struct intel_context *ctx,
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
        int ret, i;
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        struct intel_ringbuffer *ringbuf = req->ringbuf;
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_workarounds *w = &dev_priv->workarounds;
 
        if (w->count == 0)
                return 0;
 
-       ring->gpu_caches_dirty = true;
+       engine->gpu_caches_dirty = true;
        ret = logical_ring_flush_all_caches(req);
        if (ret)
                return ret;
@@ -1187,7 +1201,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 
        intel_logical_ring_advance(ringbuf);
 
-       ring->gpu_caches_dirty = true;
+       engine->gpu_caches_dirty = true;
        ret = logical_ring_flush_all_caches(req);
        if (ret)
                return ret;
@@ -1223,7 +1237,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
  * This WA is also required for Gen9 so extracting as a function avoids
  * code duplication.
  */
-static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
+static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
                                                uint32_t *const batch,
                                                uint32_t index)
 {
@@ -1235,13 +1249,13 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
         * this batch updates GEN8_L3SQCREG4 with default value we need to
         * set this bit here to retain the WA during flush.
         */
-       if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0))
+       if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
                l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
 
        wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
                                   MI_SRM_LRM_GLOBAL_GTT));
        wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
-       wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+       wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
        wa_ctx_emit(batch, index, 0);
 
        wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
@@ -1259,7 +1273,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
        wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
                                   MI_SRM_LRM_GLOBAL_GTT));
        wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
-       wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+       wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
        wa_ctx_emit(batch, index, 0);
 
        return index;
@@ -1312,7 +1326,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
  * Return: non-zero if we exceed the PAGE_SIZE limit.
  */
 
-static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
+static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
                                    struct i915_wa_ctx_bb *wa_ctx,
                                    uint32_t *const batch,
                                    uint32_t *offset)
@@ -1324,8 +1338,8 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
        wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
-       if (IS_BROADWELL(ring->dev)) {
-               int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+       if (IS_BROADWELL(engine->dev)) {
+               int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
                if (rc < 0)
                        return rc;
                index = rc;
@@ -1333,7 +1347,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
 
        /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
        /* Actual scratch location is at 128 bytes offset */
-       scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
+       scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
 
        wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
        wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1375,7 +1389,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
  *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
  *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
  */
-static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
+static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
                               struct i915_wa_ctx_bb *wa_ctx,
                               uint32_t *const batch,
                               uint32_t *offset)
@@ -1390,13 +1404,13 @@ static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
        return wa_ctx_end(wa_ctx, *offset = index, 1);
 }
 
-static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
+static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
                                    struct i915_wa_ctx_bb *wa_ctx,
                                    uint32_t *const batch,
                                    uint32_t *offset)
 {
        int ret;
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
        /* WaDisableCtxRestoreArbitration:skl,bxt */
@@ -1405,7 +1419,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
                wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
-       ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+       ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
        if (ret < 0)
                return ret;
        index = ret;
@@ -1417,12 +1431,12 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
        return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
 }
 
-static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
+static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
                               struct i915_wa_ctx_bb *wa_ctx,
                               uint32_t *const batch,
                               uint32_t *offset)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
        /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
@@ -1435,6 +1449,25 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
                wa_ctx_emit(batch, index, MI_NOOP);
        }
 
+       /* WaClearTdlStateAckDirtyBits:bxt */
+       if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+               wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
+
+               wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
+               wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+               wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
+               wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+               wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
+               wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+               wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
+               /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
+               wa_ctx_emit(batch, index, 0x0);
+               wa_ctx_emit(batch, index, MI_NOOP);
+       }
+
        /* WaDisableCtxRestoreArbitration:skl,bxt */
        if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
            IS_BXT_REVID(dev, 0, BXT_REVID_A1))
@@ -1445,60 +1478,61 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
        return wa_ctx_end(wa_ctx, *offset = index, 1);
 }
 
-static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
+static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
 {
        int ret;
 
-       ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
-       if (!ring->wa_ctx.obj) {
+       engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
+                                                  PAGE_ALIGN(size));
+       if (!engine->wa_ctx.obj) {
                DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
                return -ENOMEM;
        }
 
-       ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
+       ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
        if (ret) {
                DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
                                 ret);
-               drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+               drm_gem_object_unreference(&engine->wa_ctx.obj->base);
                return ret;
        }
 
        return 0;
 }
 
-static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
+static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
 {
-       if (ring->wa_ctx.obj) {
-               i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
-               drm_gem_object_unreference(&ring->wa_ctx.obj->base);
-               ring->wa_ctx.obj = NULL;
+       if (engine->wa_ctx.obj) {
+               i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
+               drm_gem_object_unreference(&engine->wa_ctx.obj->base);
+               engine->wa_ctx.obj = NULL;
        }
 }
 
-static int intel_init_workaround_bb(struct intel_engine_cs *ring)
+static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 {
        int ret;
        uint32_t *batch;
        uint32_t offset;
        struct page *page;
-       struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+       struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
 
-       WARN_ON(ring->id != RCS);
+       WARN_ON(engine->id != RCS);
 
        /* update this when WA for higher Gen are added */
-       if (INTEL_INFO(ring->dev)->gen > 9) {
+       if (INTEL_INFO(engine->dev)->gen > 9) {
                DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
-                         INTEL_INFO(ring->dev)->gen);
+                         INTEL_INFO(engine->dev)->gen);
                return 0;
        }
 
        /* some WA perform writes to scratch page, ensure it is valid */
-       if (ring->scratch.obj == NULL) {
-               DRM_ERROR("scratch page not allocated for %s\n", ring->name);
+       if (engine->scratch.obj == NULL) {
+               DRM_ERROR("scratch page not allocated for %s\n", engine->name);
                return -EINVAL;
        }
 
-       ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
+       ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
                return ret;
@@ -1508,29 +1542,29 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
        batch = kmap_atomic(page);
        offset = 0;
 
-       if (INTEL_INFO(ring->dev)->gen == 8) {
-               ret = gen8_init_indirectctx_bb(ring,
+       if (INTEL_INFO(engine->dev)->gen == 8) {
+               ret = gen8_init_indirectctx_bb(engine,
                                               &wa_ctx->indirect_ctx,
                                               batch,
                                               &offset);
                if (ret)
                        goto out;
 
-               ret = gen8_init_perctx_bb(ring,
+               ret = gen8_init_perctx_bb(engine,
                                          &wa_ctx->per_ctx,
                                          batch,
                                          &offset);
                if (ret)
                        goto out;
-       } else if (INTEL_INFO(ring->dev)->gen == 9) {
-               ret = gen9_init_indirectctx_bb(ring,
+       } else if (INTEL_INFO(engine->dev)->gen == 9) {
+               ret = gen9_init_indirectctx_bb(engine,
                                               &wa_ctx->indirect_ctx,
                                               batch,
                                               &offset);
                if (ret)
                        goto out;
 
-               ret = gen9_init_perctx_bb(ring,
+               ret = gen9_init_perctx_bb(engine,
                                          &wa_ctx->per_ctx,
                                          batch,
                                          &offset);
@@ -1541,27 +1575,36 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
 out:
        kunmap_atomic(batch);
        if (ret)
-               lrc_destroy_wa_ctx_obj(ring);
+               lrc_destroy_wa_ctx_obj(engine);
 
        return ret;
 }
 
-static int gen8_init_common_ring(struct intel_engine_cs *ring)
+static void lrc_init_hws(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
+       I915_WRITE(RING_HWS_PGA(engine->mmio_base),
+                  (u32)engine->status_page.gfx_addr);
+       POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+}
+
+static int gen8_init_common_ring(struct intel_engine_cs *engine)
+{
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u8 next_context_status_buffer_hw;
+       unsigned int next_context_status_buffer_hw;
 
-       lrc_setup_hardware_status_page(ring,
-                               dev_priv->kernel_context->engine[ring->id].state);
+       lrc_init_hws(engine);
 
-       I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
-       I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
+       I915_WRITE_IMR(engine,
+                      ~(engine->irq_enable_mask | engine->irq_keep_mask));
+       I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
 
-       I915_WRITE(RING_MODE_GEN7(ring),
+       I915_WRITE(RING_MODE_GEN7(engine),
                   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
                   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
-       POSTING_READ(RING_MODE_GEN7(ring));
+       POSTING_READ(RING_MODE_GEN7(engine));
 
        /*
         * Instead of resetting the Context Status Buffer (CSB) read pointer to
@@ -1576,7 +1619,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
         * BXT  |         ?                |         ?            |
         */
        next_context_status_buffer_hw =
-               GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring)));
+               GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
 
        /*
         * When the CSB registers are reset (also after power-up / gpu reset),
@@ -1586,21 +1629,21 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
        if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
                next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
 
-       ring->next_context_status_buffer = next_context_status_buffer_hw;
-       DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
+       engine->next_context_status_buffer = next_context_status_buffer_hw;
+       DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
 
-       memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+       intel_engine_init_hangcheck(engine);
 
        return 0;
 }
 
-static int gen8_init_render_ring(struct intel_engine_cs *ring)
+static int gen8_init_render_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       ret = gen8_init_common_ring(ring);
+       ret = gen8_init_common_ring(engine);
        if (ret)
                return ret;
 
@@ -1614,24 +1657,24 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
 
        I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-       return init_workarounds_ring(ring);
+       return init_workarounds_ring(engine);
 }
 
-static int gen9_init_render_ring(struct intel_engine_cs *ring)
+static int gen9_init_render_ring(struct intel_engine_cs *engine)
 {
        int ret;
 
-       ret = gen8_init_common_ring(ring);
+       ret = gen8_init_common_ring(engine);
        if (ret)
                return ret;
 
-       return init_workarounds_ring(ring);
+       return init_workarounds_ring(engine);
 }
 
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
        struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        struct intel_ringbuffer *ringbuf = req->ringbuf;
        const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
        int i, ret;
@@ -1644,9 +1687,11 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
        for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
                const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-               intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+               intel_logical_ring_emit_reg(ringbuf,
+                                           GEN8_RING_PDP_UDW(engine, i));
                intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
-               intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+               intel_logical_ring_emit_reg(ringbuf,
+                                           GEN8_RING_PDP_LDW(engine, i));
                intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
        }
 
@@ -1670,7 +1715,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
         * not idle). PML4 is allocated during ppgtt init so this is
         * not needed in 48-bit.*/
        if (req->ctx->ppgtt &&
-           (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
+           (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
                if (!USES_FULL_48BIT_PPGTT(req->i915) &&
                    !intel_vgpu_active(req->i915->dev)) {
                        ret = intel_logical_ring_emit_pdps(req);
@@ -1678,7 +1723,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
                                return ret;
                }
 
-               req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+               req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
        }
 
        ret = intel_logical_ring_begin(req, 4);
@@ -1698,9 +1743,9 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
        return 0;
 }
 
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
+static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
@@ -1708,25 +1753,26 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount++ == 0) {
-               I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
-               POSTING_READ(RING_IMR(ring->mmio_base));
+       if (engine->irq_refcount++ == 0) {
+               I915_WRITE_IMR(engine,
+                              ~(engine->irq_enable_mask | engine->irq_keep_mask));
+               POSTING_READ(RING_IMR(engine->mmio_base));
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
        return true;
 }
 
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
+static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount == 0) {
-               I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
-               POSTING_READ(RING_IMR(ring->mmio_base));
+       if (--engine->irq_refcount == 0) {
+               I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+               POSTING_READ(RING_IMR(engine->mmio_base));
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
@@ -1736,8 +1782,8 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
                           u32 unused)
 {
        struct intel_ringbuffer *ringbuf = request->ringbuf;
-       struct intel_engine_cs *ring = ringbuf->ring;
-       struct drm_device *dev = ring->dev;
+       struct intel_engine_cs *engine = ringbuf->engine;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t cmd;
        int ret;
@@ -1757,7 +1803,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 
        if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
                cmd |= MI_INVALIDATE_TLB;
-               if (ring == &dev_priv->ring[VCS])
+               if (engine == &dev_priv->engine[VCS])
                        cmd |= MI_INVALIDATE_BSD;
        }
 
@@ -1777,8 +1823,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
                                  u32 flush_domains)
 {
        struct intel_ringbuffer *ringbuf = request->ringbuf;
-       struct intel_engine_cs *ring = ringbuf->ring;
-       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       struct intel_engine_cs *engine = ringbuf->engine;
+       u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        bool vf_flush_wa = false;
        u32 flags = 0;
        int ret;
@@ -1806,7 +1852,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
                 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
                 * pipe control.
                 */
-               if (IS_GEN9(ring->dev))
+               if (IS_GEN9(engine->dev))
                        vf_flush_wa = true;
        }
 
@@ -1834,19 +1880,18 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
        return 0;
 }
 
-static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static u32 gen8_get_seqno(struct intel_engine_cs *engine)
 {
-       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
-static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-       intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
 }
 
-static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
 {
-
        /*
         * On BXT A steppings there is a HW coherency issue whereby the
         * MI_STORE_DATA_IMM storing the completed request's seqno
@@ -1857,19 +1902,15 @@ static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
         * bxt_a_set_seqno(), where we also do a clflush after the write. So
         * this clflush in practice becomes an invalidate operation.
         */
-
-       if (!lazy_coherency)
-               intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
-
-       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+       intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
-static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-       intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
 
        /* See bxt_a_get_seqno() explaining the reason for the clflush. */
-       intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
+       intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
 /*
@@ -1899,7 +1940,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
        intel_logical_ring_emit(ringbuf,
                                (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
        intel_logical_ring_emit(ringbuf,
-                               hws_seqno_address(request->ring) |
+                               hws_seqno_address(request->engine) |
                                MI_FLUSH_DW_USE_GTT);
        intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
@@ -1913,23 +1954,29 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
        struct intel_ringbuffer *ringbuf = request->ringbuf;
        int ret;
 
-       ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+       ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
        if (ret)
                return ret;
 
+       /* We're using qword write, seqno should be aligned to 8 bytes. */
+       BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+
        /* w/a for post sync ops following a GPGPU operation we
         * need a prior CS_STALL, which is emitted by the flush
         * following the batch.
         */
-       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
        intel_logical_ring_emit(ringbuf,
                                (PIPE_CONTROL_GLOBAL_GTT_IVB |
                                 PIPE_CONTROL_CS_STALL |
                                 PIPE_CONTROL_QW_WRITE));
-       intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
+       intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
        intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+       /* We're thrashing one dword of HWS. */
+       intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+       intel_logical_ring_emit(ringbuf, MI_NOOP);
        return intel_logical_ring_advance_and_submit(request);
 }
 
@@ -1938,19 +1985,19 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
        struct render_state so;
        int ret;
 
-       ret = i915_gem_render_state_prepare(req->ring, &so);
+       ret = i915_gem_render_state_prepare(req->engine, &so);
        if (ret)
                return ret;
 
        if (so.rodata == NULL)
                return 0;
 
-       ret = req->ring->emit_bb_start(req, so.ggtt_offset,
+       ret = req->engine->emit_bb_start(req, so.ggtt_offset,
                                       I915_DISPATCH_SECURE);
        if (ret)
                goto out;
 
-       ret = req->ring->emit_bb_start(req,
+       ret = req->engine->emit_bb_start(req,
                                       (so.ggtt_offset + so.aux_batch_offset),
                                       I915_DISPATCH_SECURE);
        if (ret)
@@ -1988,146 +2035,197 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
  * @ring: Engine Command Streamer.
  *
  */
-void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
+void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv;
 
-       if (!intel_ring_initialized(ring))
+       if (!intel_engine_initialized(engine))
                return;
 
-       dev_priv = ring->dev->dev_private;
+       /*
+        * Tasklet cannot be active at this point due intel_mark_active/idle
+        * so this is just for documentation.
+        */
+       if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
+               tasklet_kill(&engine->irq_tasklet);
+
+       dev_priv = engine->dev->dev_private;
 
-       if (ring->buffer) {
-               intel_logical_ring_stop(ring);
-               WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+       if (engine->buffer) {
+               intel_logical_ring_stop(engine);
+               WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
        }
 
-       if (ring->cleanup)
-               ring->cleanup(ring);
+       if (engine->cleanup)
+               engine->cleanup(engine);
 
-       i915_cmd_parser_fini_ring(ring);
-       i915_gem_batch_pool_fini(&ring->batch_pool);
+       i915_cmd_parser_fini_ring(engine);
+       i915_gem_batch_pool_fini(&engine->batch_pool);
 
-       if (ring->status_page.obj) {
-               kunmap(sg_page(ring->status_page.obj->pages->sgl));
-               ring->status_page.obj = NULL;
+       if (engine->status_page.obj) {
+               i915_gem_object_unpin_map(engine->status_page.obj);
+               engine->status_page.obj = NULL;
        }
 
-       ring->disable_lite_restore_wa = false;
-       ring->ctx_desc_template = 0;
+       engine->idle_lite_restore_wa = 0;
+       engine->disable_lite_restore_wa = false;
+       engine->ctx_desc_template = 0;
 
-       lrc_destroy_wa_ctx_obj(ring);
-       ring->dev = NULL;
+       lrc_destroy_wa_ctx_obj(engine);
+       engine->dev = NULL;
 }
 
 static void
 logical_ring_default_vfuncs(struct drm_device *dev,
-                           struct intel_engine_cs *ring)
+                           struct intel_engine_cs *engine)
 {
        /* Default vfuncs which can be overriden by each engine. */
-       ring->init_hw = gen8_init_common_ring;
-       ring->emit_request = gen8_emit_request;
-       ring->emit_flush = gen8_emit_flush;
-       ring->irq_get = gen8_logical_ring_get_irq;
-       ring->irq_put = gen8_logical_ring_put_irq;
-       ring->emit_bb_start = gen8_emit_bb_start;
+       engine->init_hw = gen8_init_common_ring;
+       engine->emit_request = gen8_emit_request;
+       engine->emit_flush = gen8_emit_flush;
+       engine->irq_get = gen8_logical_ring_get_irq;
+       engine->irq_put = gen8_logical_ring_put_irq;
+       engine->emit_bb_start = gen8_emit_bb_start;
+       engine->get_seqno = gen8_get_seqno;
+       engine->set_seqno = gen8_set_seqno;
        if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-               ring->get_seqno = bxt_a_get_seqno;
-               ring->set_seqno = bxt_a_set_seqno;
-       } else {
-               ring->get_seqno = gen8_get_seqno;
-               ring->set_seqno = gen8_set_seqno;
+               engine->irq_seqno_barrier = bxt_a_seqno_barrier;
+               engine->set_seqno = bxt_a_set_seqno;
        }
 }
 
 static inline void
-logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
+logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
+{
+       engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+       engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+}
+
+static int
+lrc_setup_hws(struct intel_engine_cs *engine,
+             struct drm_i915_gem_object *dctx_obj)
 {
-       ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
-       ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+       void *hws;
+
+       /* The HWSP is part of the default context object in LRC mode. */
+       engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
+                                      LRC_PPHWSP_PN * PAGE_SIZE;
+       hws = i915_gem_object_pin_map(dctx_obj);
+       if (IS_ERR(hws))
+               return PTR_ERR(hws);
+       engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
+       engine->status_page.obj = dctx_obj;
+
+       return 0;
 }
 
 static int
-logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
 {
-       struct intel_context *dctx = to_i915(dev)->kernel_context;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_context *dctx = dev_priv->kernel_context;
+       enum forcewake_domains fw_domains;
        int ret;
 
        /* Intentionally left blank. */
-       ring->buffer = NULL;
+       engine->buffer = NULL;
+
+       engine->dev = dev;
+       INIT_LIST_HEAD(&engine->active_list);
+       INIT_LIST_HEAD(&engine->request_list);
+       i915_gem_batch_pool_init(dev, &engine->batch_pool);
+       init_waitqueue_head(&engine->irq_queue);
+
+       INIT_LIST_HEAD(&engine->buffers);
+       INIT_LIST_HEAD(&engine->execlist_queue);
+       INIT_LIST_HEAD(&engine->execlist_retired_req_list);
+       spin_lock_init(&engine->execlist_lock);
+
+       tasklet_init(&engine->irq_tasklet,
+                    intel_lrc_irq_handler, (unsigned long)engine);
 
-       ring->dev = dev;
-       INIT_LIST_HEAD(&ring->active_list);
-       INIT_LIST_HEAD(&ring->request_list);
-       i915_gem_batch_pool_init(dev, &ring->batch_pool);
-       init_waitqueue_head(&ring->irq_queue);
+       logical_ring_init_platform_invariants(engine);
 
-       INIT_LIST_HEAD(&ring->buffers);
-       INIT_LIST_HEAD(&ring->execlist_queue);
-       INIT_LIST_HEAD(&ring->execlist_retired_req_list);
-       spin_lock_init(&ring->execlist_lock);
+       fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
+                                                   RING_ELSP(engine),
+                                                   FW_REG_WRITE);
 
-       logical_ring_init_platform_invariants(ring);
+       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+                                                    RING_CONTEXT_STATUS_PTR(engine),
+                                                    FW_REG_READ | FW_REG_WRITE);
 
-       ret = i915_cmd_parser_init_ring(ring);
+       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+                                                    RING_CONTEXT_STATUS_BUF_BASE(engine),
+                                                    FW_REG_READ);
+
+       engine->fw_domains = fw_domains;
+
+       ret = i915_cmd_parser_init_ring(engine);
        if (ret)
                goto error;
 
-       ret = intel_lr_context_deferred_alloc(dctx, ring);
+       ret = intel_lr_context_deferred_alloc(dctx, engine);
        if (ret)
                goto error;
 
        /* As this is the default context, always pin it */
-       ret = intel_lr_context_do_pin(dctx, ring);
+       ret = intel_lr_context_do_pin(dctx, engine);
        if (ret) {
                DRM_ERROR(
                        "Failed to pin and map ringbuffer %s: %d\n",
-                       ring->name, ret);
+                       engine->name, ret);
+               goto error;
+       }
+
+       /* And setup the hardware status page. */
+       ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
+       if (ret) {
+               DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
                goto error;
        }
 
        return 0;
 
 error:
-       intel_logical_ring_cleanup(ring);
+       intel_logical_ring_cleanup(engine);
        return ret;
 }
 
 static int logical_render_ring_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+       struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        int ret;
 
-       ring->name = "render ring";
-       ring->id = RCS;
-       ring->exec_id = I915_EXEC_RENDER;
-       ring->guc_id = GUC_RENDER_ENGINE;
-       ring->mmio_base = RENDER_RING_BASE;
+       engine->name = "render ring";
+       engine->id = RCS;
+       engine->exec_id = I915_EXEC_RENDER;
+       engine->guc_id = GUC_RENDER_ENGINE;
+       engine->mmio_base = RENDER_RING_BASE;
 
-       logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
+       logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
        if (HAS_L3_DPF(dev))
-               ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+               engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
-       logical_ring_default_vfuncs(dev, ring);
+       logical_ring_default_vfuncs(dev, engine);
 
        /* Override some for render ring. */
        if (INTEL_INFO(dev)->gen >= 9)
-               ring->init_hw = gen9_init_render_ring;
+               engine->init_hw = gen9_init_render_ring;
        else
-               ring->init_hw = gen8_init_render_ring;
-       ring->init_context = gen8_init_rcs_context;
-       ring->cleanup = intel_fini_pipe_control;
-       ring->emit_flush = gen8_emit_flush_render;
-       ring->emit_request = gen8_emit_request_render;
+               engine->init_hw = gen8_init_render_ring;
+       engine->init_context = gen8_init_rcs_context;
+       engine->cleanup = intel_fini_pipe_control;
+       engine->emit_flush = gen8_emit_flush_render;
+       engine->emit_request = gen8_emit_request_render;
 
-       ring->dev = dev;
+       engine->dev = dev;
 
-       ret = intel_init_pipe_control(ring);
+       ret = intel_init_pipe_control(engine);
        if (ret)
                return ret;
 
-       ret = intel_init_workaround_bb(ring);
+       ret = intel_init_workaround_bb(engine);
        if (ret) {
                /*
                 * We continue even if we fail to initialize WA batch
@@ -2138,9 +2236,9 @@ static int logical_render_ring_init(struct drm_device *dev)
                          ret);
        }
 
-       ret = logical_ring_init(dev, ring);
+       ret = logical_ring_init(dev, engine);
        if (ret) {
-               lrc_destroy_wa_ctx_obj(ring);
+               lrc_destroy_wa_ctx_obj(engine);
        }
 
        return ret;
@@ -2149,69 +2247,69 @@ static int logical_render_ring_init(struct drm_device *dev)
 static int logical_bsd_ring_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+       struct intel_engine_cs *engine = &dev_priv->engine[VCS];
 
-       ring->name = "bsd ring";
-       ring->id = VCS;
-       ring->exec_id = I915_EXEC_BSD;
-       ring->guc_id = GUC_VIDEO_ENGINE;
-       ring->mmio_base = GEN6_BSD_RING_BASE;
+       engine->name = "bsd ring";
+       engine->id = VCS;
+       engine->exec_id = I915_EXEC_BSD;
+       engine->guc_id = GUC_VIDEO_ENGINE;
+       engine->mmio_base = GEN6_BSD_RING_BASE;
 
-       logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
-       logical_ring_default_vfuncs(dev, ring);
+       logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
+       logical_ring_default_vfuncs(dev, engine);
 
-       return logical_ring_init(dev, ring);
+       return logical_ring_init(dev, engine);
 }
 
 static int logical_bsd2_ring_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+       struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
 
-       ring->name = "bsd2 ring";
-       ring->id = VCS2;
-       ring->exec_id = I915_EXEC_BSD;
-       ring->guc_id = GUC_VIDEO_ENGINE2;
-       ring->mmio_base = GEN8_BSD2_RING_BASE;
+       engine->name = "bsd2 ring";
+       engine->id = VCS2;
+       engine->exec_id = I915_EXEC_BSD;
+       engine->guc_id = GUC_VIDEO_ENGINE2;
+       engine->mmio_base = GEN8_BSD2_RING_BASE;
 
-       logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
-       logical_ring_default_vfuncs(dev, ring);
+       logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
+       logical_ring_default_vfuncs(dev, engine);
 
-       return logical_ring_init(dev, ring);
+       return logical_ring_init(dev, engine);
 }
 
 static int logical_blt_ring_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+       struct intel_engine_cs *engine = &dev_priv->engine[BCS];
 
-       ring->name = "blitter ring";
-       ring->id = BCS;
-       ring->exec_id = I915_EXEC_BLT;
-       ring->guc_id = GUC_BLITTER_ENGINE;
-       ring->mmio_base = BLT_RING_BASE;
+       engine->name = "blitter ring";
+       engine->id = BCS;
+       engine->exec_id = I915_EXEC_BLT;
+       engine->guc_id = GUC_BLITTER_ENGINE;
+       engine->mmio_base = BLT_RING_BASE;
 
-       logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
-       logical_ring_default_vfuncs(dev, ring);
+       logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
+       logical_ring_default_vfuncs(dev, engine);
 
-       return logical_ring_init(dev, ring);
+       return logical_ring_init(dev, engine);
 }
 
 static int logical_vebox_ring_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+       struct intel_engine_cs *engine = &dev_priv->engine[VECS];
 
-       ring->name = "video enhancement ring";
-       ring->id = VECS;
-       ring->exec_id = I915_EXEC_VEBOX;
-       ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
-       ring->mmio_base = VEBOX_RING_BASE;
+       engine->name = "video enhancement ring";
+       engine->id = VECS;
+       engine->exec_id = I915_EXEC_VEBOX;
+       engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
+       engine->mmio_base = VEBOX_RING_BASE;
 
-       logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
-       logical_ring_default_vfuncs(dev, ring);
+       logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
+       logical_ring_default_vfuncs(dev, engine);
 
-       return logical_ring_init(dev, ring);
+       return logical_ring_init(dev, engine);
 }
 
 /**
@@ -2219,7 +2317,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
  * @dev: DRM device.
  *
  * This function inits the engines for an Execlists submission style (the equivalent in the
- * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
+ * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
  * those engines that are present in the hardware.
  *
  * Return: non-zero if the initialization failed.
@@ -2260,13 +2358,13 @@ int intel_logical_rings_init(struct drm_device *dev)
        return 0;
 
 cleanup_vebox_ring:
-       intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
+       intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
 cleanup_blt_ring:
-       intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
+       intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
 cleanup_bsd_ring:
-       intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
+       intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
 cleanup_render_ring:
-       intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
+       intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
 
        return ret;
 }
@@ -2314,13 +2412,13 @@ make_rpcs(struct drm_device *dev)
        return rpcs;
 }
 
-static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
+static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 {
        u32 indirect_ctx_offset;
 
-       switch (INTEL_INFO(ring->dev)->gen) {
+       switch (INTEL_INFO(engine->dev)->gen) {
        default:
-               MISSING_CASE(INTEL_INFO(ring->dev)->gen);
+               MISSING_CASE(INTEL_INFO(engine->dev)->gen);
                /* fall through */
        case 9:
                indirect_ctx_offset =
@@ -2336,14 +2434,16 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
 }
 
 static int
-populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
-                   struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
+populate_lr_context(struct intel_context *ctx,
+                   struct drm_i915_gem_object *ctx_obj,
+                   struct intel_engine_cs *engine,
+                   struct intel_ringbuffer *ringbuf)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
-       struct page *page;
-       uint32_t *reg_state;
+       void *vaddr;
+       u32 *reg_state;
        int ret;
 
        if (!ppgtt)
@@ -2355,18 +2455,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
                return ret;
        }
 
-       ret = i915_gem_object_get_pages(ctx_obj);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Could not get object pages\n");
+       vaddr = i915_gem_object_pin_map(ctx_obj);
+       if (IS_ERR(vaddr)) {
+               ret = PTR_ERR(vaddr);
+               DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
                return ret;
        }
-
-       i915_gem_object_pin_pages(ctx_obj);
+       ctx_obj->dirty = true;
 
        /* The second page of the context object contains some fields which must
         * be set up prior to the first execution. */
-       page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-       reg_state = kmap_atomic(page);
+       reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
        /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
         * commands followed by (reg, value) pairs. The values we are setting here are
@@ -2374,33 +2473,47 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
         * recreate this batchbuffer with new values (including all the missing
         * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
        reg_state[CTX_LRI_HEADER_0] =
-               MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
-       ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
+               MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
+       ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
+                      RING_CONTEXT_CONTROL(engine),
                       _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
                                          CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
                                          (HAS_RESOURCE_STREAMER(dev) ?
                                            CTX_CTRL_RS_CTX_ENABLE : 0)));
-       ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
+                      0);
        /* Ring buffer start address is not known until the buffer is pinned.
         * It is written to the context image in execlists_update_context()
         */
-       ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base),
+       ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
+                      RING_START(engine->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
+                      RING_CTL(engine->mmio_base),
                       ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
-       ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base),
+       ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
+                      RING_BBADDR_UDW(engine->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
+                      RING_BBADDR(engine->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
+                      RING_BBSTATE(engine->mmio_base),
                       RING_BB_PPGTT);
-       ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0);
-       if (ring->id == RCS) {
-               ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0);
-               ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0);
-               ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0);
-               if (ring->wa_ctx.obj) {
-                       struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+       ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
+                      RING_SBBADDR_UDW(engine->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
+                      RING_SBBADDR(engine->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
+                      RING_SBBSTATE(engine->mmio_base), 0);
+       if (engine->id == RCS) {
+               ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
+                              RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
+               ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
+                              RING_INDIRECT_CTX(engine->mmio_base), 0);
+               ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
+                              RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
+               if (engine->wa_ctx.obj) {
+                       struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
                        uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
 
                        reg_state[CTX_RCS_INDIRECT_CTX+1] =
@@ -2408,7 +2521,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
                                (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
 
                        reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
-                               intel_lr_indirect_ctx_offset(ring) << 6;
+                               intel_lr_indirect_ctx_offset(engine) << 6;
 
                        reg_state[CTX_BB_PER_CTX_PTR+1] =
                                (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
@@ -2416,16 +2529,25 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
                }
        }
        reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
-       ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
+                      RING_CTX_TIMESTAMP(engine->mmio_base), 0);
        /* PDP values well be assigned later if needed */
-       ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0);
-       ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
+                      0);
+       ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
+                      0);
 
        if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
                /* 64b PPGTT (48bit canonical)
@@ -2439,20 +2561,16 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
                 * With dynamic page allocation, PDPs may not be allocated at
                 * this point. Point the unallocated PDPs to the scratch page
                 */
-               ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
-               ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
-               ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
-               ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+               execlists_update_context_pdps(ppgtt, reg_state);
        }
 
-       if (ring->id == RCS) {
+       if (engine->id == RCS) {
                reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
                ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
                               make_rpcs(dev));
        }
 
-       kunmap_atomic(reg_state);
-       i915_gem_object_unpin_pages(ctx_obj);
+       i915_gem_object_unpin_map(ctx_obj);
 
        return 0;
 }
@@ -2469,7 +2587,7 @@ void intel_lr_context_free(struct intel_context *ctx)
 {
        int i;
 
-       for (i = I915_NUM_RINGS; --i >= 0; ) {
+       for (i = I915_NUM_ENGINES; --i >= 0; ) {
                struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
                struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
 
@@ -2479,6 +2597,7 @@ void intel_lr_context_free(struct intel_context *ctx)
                if (ctx == ctx->i915->kernel_context) {
                        intel_unpin_ringbuffer_obj(ringbuf);
                        i915_gem_object_ggtt_unpin(ctx_obj);
+                       i915_gem_object_unpin_map(ctx_obj);
                }
 
                WARN_ON(ctx->engine[i].pin_count);
@@ -2501,15 +2620,15 @@ void intel_lr_context_free(struct intel_context *ctx)
  * in LRC mode, but does not include the "shared data page" used with
  * GuC submission. The caller should account for this if using the GuC.
  */
-uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
+uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
 {
        int ret = 0;
 
-       WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
+       WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
 
-       switch (ring->id) {
+       switch (engine->id) {
        case RCS:
-               if (INTEL_INFO(ring->dev)->gen >= 9)
+               if (INTEL_INFO(engine->dev)->gen >= 9)
                        ret = GEN9_LR_CONTEXT_RENDER_SIZE;
                else
                        ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2525,24 +2644,6 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
        return ret;
 }
 
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
-               struct drm_i915_gem_object *default_ctx_obj)
-{
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       struct page *page;
-
-       /* The HWSP is part of the default context object in LRC mode. */
-       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
-                       + LRC_PPHWSP_PN * PAGE_SIZE;
-       page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
-       ring->status_page.page_addr = kmap(page);
-       ring->status_page.obj = default_ctx_obj;
-
-       I915_WRITE(RING_HWS_PGA(ring->mmio_base),
-                       (u32)ring->status_page.gfx_addr);
-       POSTING_READ(RING_HWS_PGA(ring->mmio_base));
-}
-
 /**
  * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
  * @ctx: LR context to create.
@@ -2558,18 +2659,18 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
  */
 
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
-                                   struct intel_engine_cs *ring)
+                                   struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = ring->dev;
+       struct drm_device *dev = engine->dev;
        struct drm_i915_gem_object *ctx_obj;
        uint32_t context_size;
        struct intel_ringbuffer *ringbuf;
        int ret;
 
        WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
-       WARN_ON(ctx->engine[ring->id].state);
+       WARN_ON(ctx->engine[engine->id].state);
 
-       context_size = round_up(intel_lr_context_size(ring), 4096);
+       context_size = round_up(intel_lr_context_size(engine), 4096);
 
        /* One extra page as the sharing data between driver and GuC */
        context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@@ -2580,32 +2681,32 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
                return -ENOMEM;
        }
 
-       ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
+       ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
        if (IS_ERR(ringbuf)) {
                ret = PTR_ERR(ringbuf);
                goto error_deref_obj;
        }
 
-       ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
+       ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
                goto error_ringbuf;
        }
 
-       ctx->engine[ring->id].ringbuf = ringbuf;
-       ctx->engine[ring->id].state = ctx_obj;
+       ctx->engine[engine->id].ringbuf = ringbuf;
+       ctx->engine[engine->id].state = ctx_obj;
 
-       if (ctx != ctx->i915->kernel_context && ring->init_context) {
+       if (ctx != ctx->i915->kernel_context && engine->init_context) {
                struct drm_i915_gem_request *req;
 
-               req = i915_gem_request_alloc(ring, ctx);
+               req = i915_gem_request_alloc(engine, ctx);
                if (IS_ERR(req)) {
                        ret = PTR_ERR(req);
                        DRM_ERROR("ring create req: %d\n", ret);
                        goto error_ringbuf;
                }
 
-               ret = ring->init_context(req);
+               ret = engine->init_context(req);
                if (ret) {
                        DRM_ERROR("ring init context: %d\n",
                                ret);
@@ -2620,40 +2721,38 @@ error_ringbuf:
        intel_ringbuffer_free(ringbuf);
 error_deref_obj:
        drm_gem_object_unreference(&ctx_obj->base);
-       ctx->engine[ring->id].ringbuf = NULL;
-       ctx->engine[ring->id].state = NULL;
+       ctx->engine[engine->id].ringbuf = NULL;
+       ctx->engine[engine->id].state = NULL;
        return ret;
 }
 
-void intel_lr_context_reset(struct drm_device *dev,
-                       struct intel_context *ctx)
+void intel_lr_context_reset(struct drm_i915_private *dev_priv,
+                           struct intel_context *ctx)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *ring;
-       int i;
+       struct intel_engine_cs *engine;
 
-       for_each_ring(ring, dev_priv, i) {
+       for_each_engine(engine, dev_priv) {
                struct drm_i915_gem_object *ctx_obj =
-                               ctx->engine[ring->id].state;
+                               ctx->engine[engine->id].state;
                struct intel_ringbuffer *ringbuf =
-                               ctx->engine[ring->id].ringbuf;
+                               ctx->engine[engine->id].ringbuf;
+               void *vaddr;
                uint32_t *reg_state;
-               struct page *page;
 
                if (!ctx_obj)
                        continue;
 
-               if (i915_gem_object_get_pages(ctx_obj)) {
-                       WARN(1, "Failed get_pages for context obj\n");
+               vaddr = i915_gem_object_pin_map(ctx_obj);
+               if (WARN_ON(IS_ERR(vaddr)))
                        continue;
-               }
-               page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-               reg_state = kmap_atomic(page);
+
+               reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+               ctx_obj->dirty = true;
 
                reg_state[CTX_RING_HEAD+1] = 0;
                reg_state[CTX_RING_TAIL+1] = 0;
 
-               kunmap_atomic(reg_state);
+               i915_gem_object_unpin_map(ctx_obj);
 
                ringbuf->head = 0;
                ringbuf->tail = 0;
This page took 0.062609 seconds and 5 git commands to generate.