drm/i915: Compute the ELSP register location once
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_lrc.c
CommitLineData
b20385f1
OM
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
73e4d07f
OM
31/**
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
33 *
34 * Motivation:
b20385f1
OM
35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
38 *
73e4d07f
OM
39 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
42 *
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
47 *
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
55 *
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
58 *
59 * LRC implementation:
60 * Regarding the creation of contexts, we have:
61 *
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
65 *
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
69 *
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
72 *
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
77 * contexts:
78 *
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
83 * so on.
84 *
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
88 *
89 * Execlists implementation:
b20385f1
OM
90 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
73e4d07f
OM
92 * This method works as follows:
93 *
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
101 * context itself.
102 *
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
108 *
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
114 *
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
122 *
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
132 *
b20385f1 133 */
27af5eea 134#include <linux/interrupt.h>
b20385f1
OM
135
136#include <drm/drmP.h>
137#include <drm/i915_drm.h>
138#include "i915_drv.h"
3bbaba0c 139#include "intel_mocs.h"
127f1003 140
468c6816 141#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
8c857917
OM
142#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144
e981e7b1
TD
145#define RING_EXECLIST_QFULL (1 << 0x2)
146#define RING_EXECLIST1_VALID (1 << 0x3)
147#define RING_EXECLIST0_VALID (1 << 0x4)
148#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
149#define RING_EXECLIST1_ACTIVE (1 << 0x11)
150#define RING_EXECLIST0_ACTIVE (1 << 0x12)
151
152#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
153#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
154#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
155#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
156#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
157#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
8670d6f9
OM
158
159#define CTX_LRI_HEADER_0 0x01
160#define CTX_CONTEXT_CONTROL 0x02
161#define CTX_RING_HEAD 0x04
162#define CTX_RING_TAIL 0x06
163#define CTX_RING_BUFFER_START 0x08
164#define CTX_RING_BUFFER_CONTROL 0x0a
165#define CTX_BB_HEAD_U 0x0c
166#define CTX_BB_HEAD_L 0x0e
167#define CTX_BB_STATE 0x10
168#define CTX_SECOND_BB_HEAD_U 0x12
169#define CTX_SECOND_BB_HEAD_L 0x14
170#define CTX_SECOND_BB_STATE 0x16
171#define CTX_BB_PER_CTX_PTR 0x18
172#define CTX_RCS_INDIRECT_CTX 0x1a
173#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
174#define CTX_LRI_HEADER_1 0x21
175#define CTX_CTX_TIMESTAMP 0x22
176#define CTX_PDP3_UDW 0x24
177#define CTX_PDP3_LDW 0x26
178#define CTX_PDP2_UDW 0x28
179#define CTX_PDP2_LDW 0x2a
180#define CTX_PDP1_UDW 0x2c
181#define CTX_PDP1_LDW 0x2e
182#define CTX_PDP0_UDW 0x30
183#define CTX_PDP0_LDW 0x32
184#define CTX_LRI_HEADER_2 0x41
185#define CTX_R_PWR_CLK_STATE 0x42
186#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
187
84b790f8
BW
188#define GEN8_CTX_VALID (1<<0)
189#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
190#define GEN8_CTX_FORCE_RESTORE (1<<2)
191#define GEN8_CTX_L3LLC_COHERENT (1<<5)
192#define GEN8_CTX_PRIVILEGE (1<<8)
e5815a2e 193
0d925ea0 194#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
f0f59a00 195 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
0d925ea0
VS
196 (reg_state)[(pos)+1] = (val); \
197} while (0)
198
199#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
d852c7bf 200 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
e5815a2e
MT
201 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
202 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
9244a817 203} while (0)
e5815a2e 204
9244a817 205#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
2dba3239
MT
206 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
207 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
9244a817 208} while (0)
2dba3239 209
84b790f8
BW
210enum {
211 FAULT_AND_HANG = 0,
212 FAULT_AND_HALT, /* Debug only */
213 FAULT_AND_STREAM,
214 FAULT_AND_CONTINUE /* Unsupported */
215};
216#define GEN8_CTX_ID_SHIFT 32
7069b144 217#define GEN8_CTX_ID_WIDTH 21
71562919
MT
218#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
219#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
84b790f8 220
0e93cdd4
CW
221/* Typical size of the average request (2 pipecontrols and a MI_BB) */
222#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
223
e2efd130 224static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
978f1e09 225 struct intel_engine_cs *engine);
e2efd130 226static int intel_lr_context_pin(struct i915_gem_context *ctx,
e5292823 227 struct intel_engine_cs *engine);
7ba717cf 228
73e4d07f
OM
229/**
230 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
14bb2c11 231 * @dev_priv: i915 device private
73e4d07f
OM
232 * @enable_execlists: value of i915.enable_execlists module parameter.
233 *
234 * Only certain platforms support Execlists (the prerequisites being
27401d12 235 * support for Logical Ring Contexts and Aliasing PPGTT or better).
73e4d07f
OM
236 *
237 * Return: 1 if Execlists is supported and has to be enabled.
238 */
c033666a 239int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
127f1003 240{
a0bd6c31
ZL
241 /* On platforms with execlist available, vGPU will only
242 * support execlist mode, no ring buffer mode.
243 */
c033666a 244 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
a0bd6c31
ZL
245 return 1;
246
c033666a 247 if (INTEL_GEN(dev_priv) >= 9)
70ee45e1
DL
248 return 1;
249
127f1003
OM
250 if (enable_execlists == 0)
251 return 0;
252
5a21b665
DV
253 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
254 USES_PPGTT(dev_priv) &&
255 i915.use_mmio_flip >= 0)
127f1003
OM
256 return 1;
257
258 return 0;
259}
ede7d42b 260
ca82580c 261static void
0bc40be8 262logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
ca82580c 263{
c033666a 264 struct drm_i915_private *dev_priv = engine->i915;
ca82580c 265
c033666a 266 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
0bc40be8 267 engine->idle_lite_restore_wa = ~0;
c6a2ac71 268
c033666a
CW
269 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
270 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
0bc40be8 271 (engine->id == VCS || engine->id == VCS2);
ca82580c 272
0bc40be8 273 engine->ctx_desc_template = GEN8_CTX_VALID;
c033666a 274 if (IS_GEN8(dev_priv))
0bc40be8
TU
275 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
276 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
ca82580c
TU
277
278 /* TODO: WaDisableLiteRestore when we start using semaphore
279 * signalling between Command Streamers */
280 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
281
282 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
283 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
0bc40be8
TU
284 if (engine->disable_lite_restore_wa)
285 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
ca82580c
TU
286}
287
73e4d07f 288/**
ca82580c
TU
289 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
290 * descriptor for a pinned context
ca82580c 291 * @ctx: Context to work on
9021ad03 292 * @engine: Engine the descriptor will be used with
73e4d07f 293 *
ca82580c
TU
294 * The context descriptor encodes various attributes of a context,
295 * including its GTT address and some flags. Because it's fairly
296 * expensive to calculate, we'll just do it once and cache the result,
297 * which remains valid until the context is unpinned.
298 *
6e5248b5
DV
299 * This is what a descriptor looks like, from LSB to MSB::
300 *
301 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
302 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
303 * bits 32-52: ctx ID, a globally unique tag
304 * bits 53-54: mbz, reserved for use by hardware
305 * bits 55-63: group ID, currently unused and set to 0
73e4d07f 306 */
ca82580c 307static void
e2efd130 308intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
0bc40be8 309 struct intel_engine_cs *engine)
84b790f8 310{
9021ad03 311 struct intel_context *ce = &ctx->engine[engine->id];
7069b144 312 u64 desc;
84b790f8 313
7069b144 314 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
84b790f8 315
c01fc532
ZW
316 desc = ctx->desc_template; /* bits 3-4 */
317 desc |= engine->ctx_desc_template; /* bits 0-11 */
bde13ebd 318 desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE;
9021ad03 319 /* bits 12-31 */
7069b144 320 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
5af05fef 321
9021ad03 322 ce->lrc_desc = desc;
5af05fef
MT
323}
324
e2efd130 325uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
0bc40be8 326 struct intel_engine_cs *engine)
84b790f8 327{
0bc40be8 328 return ctx->engine[engine->id].lrc_desc;
ca82580c 329}
203a571b 330
cc3c4253
MK
331static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
332 struct drm_i915_gem_request *rq1)
84b790f8 333{
4a570db5 334 struct intel_engine_cs *engine = rq0->engine;
c033666a 335 struct drm_i915_private *dev_priv = rq0->i915;
8b38b3a1
CW
336 u32 __iomem *elsp =
337 dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
338 u64 desc[2];
84b790f8 339
1cff8cc3 340 if (rq1) {
4a570db5 341 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
1cff8cc3
MK
342 rq1->elsp_submitted++;
343 } else {
344 desc[1] = 0;
345 }
84b790f8 346
4a570db5 347 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
1cff8cc3 348 rq0->elsp_submitted++;
84b790f8 349
1cff8cc3 350 /* You must always write both descriptors in the order below. */
8b38b3a1
CW
351 writel(upper_32_bits(desc[1]), elsp);
352 writel(lower_32_bits(desc[1]), elsp);
6daccb0b 353
8b38b3a1 354 writel(upper_32_bits(desc[0]), elsp);
84b790f8 355 /* The context is automatically loaded after the following */
8b38b3a1 356 writel(lower_32_bits(desc[0]), elsp);
84b790f8
BW
357}
358
c6a2ac71
TU
359static void
360execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
361{
362 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
363 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
364 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
365 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
366}
367
368static void execlists_update_context(struct drm_i915_gem_request *rq)
ae1250b9 369{
4a570db5 370 struct intel_engine_cs *engine = rq->engine;
05d9824b 371 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
e2f80391 372 uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
ae1250b9 373
8f942018 374 reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
ae1250b9 375
c6a2ac71
TU
376 /* True 32b PPGTT with dynamic page allocation: update PDP
377 * registers and point the unallocated PDPs to scratch page.
378 * PML4 is allocated during ppgtt init, so this is not needed
379 * in 48-bit mode.
380 */
381 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
382 execlists_update_context_pdps(ppgtt, reg_state);
ae1250b9
OM
383}
384
f4ea6bdd
CW
385static void execlists_elsp_submit_contexts(struct drm_i915_gem_request *rq0,
386 struct drm_i915_gem_request *rq1)
84b790f8 387{
26720ab9 388 struct drm_i915_private *dev_priv = rq0->i915;
3756685a 389 unsigned int fw_domains = rq0->engine->fw_domains;
26720ab9 390
05d9824b 391 execlists_update_context(rq0);
d8cb8875 392
cc3c4253 393 if (rq1)
05d9824b 394 execlists_update_context(rq1);
84b790f8 395
27af5eea 396 spin_lock_irq(&dev_priv->uncore.lock);
3756685a 397 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
26720ab9 398
cc3c4253 399 execlists_elsp_write(rq0, rq1);
26720ab9 400
3756685a 401 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
27af5eea 402 spin_unlock_irq(&dev_priv->uncore.lock);
84b790f8
BW
403}
404
3c7ba635
ZW
405static inline void execlists_context_status_change(
406 struct drm_i915_gem_request *rq,
407 unsigned long status)
408{
409 /*
410 * Only used when GVT-g is enabled now. When GVT-g is disabled,
411 * The compiler should eliminate this function as dead-code.
412 */
413 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
414 return;
415
416 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
417}
418
f4ea6bdd 419static void execlists_unqueue(struct intel_engine_cs *engine)
acdd884a 420{
6d3d8274 421 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
c6a2ac71 422 struct drm_i915_gem_request *cursor, *tmp;
e981e7b1 423
0bc40be8 424 assert_spin_locked(&engine->execlist_lock);
acdd884a 425
779949f4
PA
426 /*
427 * If irqs are not active generate a warning as batches that finish
428 * without the irqs may get lost and a GPU Hang may occur.
429 */
c033666a 430 WARN_ON(!intel_irqs_enabled(engine->i915));
779949f4 431
acdd884a 432 /* Try to read in pairs */
0bc40be8 433 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
acdd884a
MT
434 execlist_link) {
435 if (!req0) {
436 req0 = cursor;
6d3d8274 437 } else if (req0->ctx == cursor->ctx) {
acdd884a
MT
438 /* Same ctx: ignore first request, as second request
439 * will update tail past first request's workload */
e1fee72c 440 cursor->elsp_submitted = req0->elsp_submitted;
e39d42fa 441 list_del(&req0->execlist_link);
e8a261ea 442 i915_gem_request_put(req0);
acdd884a
MT
443 req0 = cursor;
444 } else {
80a9a8db
ZW
445 if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
446 /*
447 * req0 (after merged) ctx requires single
448 * submission, stop picking
449 */
450 if (req0->ctx->execlists_force_single_submission)
451 break;
452 /*
453 * req0 ctx doesn't require single submission,
454 * but next req ctx requires, stop picking
455 */
456 if (cursor->ctx->execlists_force_single_submission)
457 break;
458 }
acdd884a 459 req1 = cursor;
c6a2ac71 460 WARN_ON(req1->elsp_submitted);
acdd884a
MT
461 break;
462 }
463 }
464
c6a2ac71
TU
465 if (unlikely(!req0))
466 return;
467
3c7ba635
ZW
468 execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
469
470 if (req1)
471 execlists_context_status_change(req1,
472 INTEL_CONTEXT_SCHEDULE_IN);
473
0bc40be8 474 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
53292cdb 475 /*
c6a2ac71
TU
476 * WaIdleLiteRestore: make sure we never cause a lite restore
477 * with HEAD==TAIL.
478 *
479 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
480 * resubmit the request. See gen8_emit_request() for where we
481 * prepare the padding after the end of the request.
53292cdb 482 */
a52abd2f 483 req0->tail = req0->wa_tail;
53292cdb
MT
484 }
485
f4ea6bdd 486 execlists_elsp_submit_contexts(req0, req1);
acdd884a
MT
487}
488
c6a2ac71 489static unsigned int
e39d42fa 490execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
e981e7b1 491{
6d3d8274 492 struct drm_i915_gem_request *head_req;
e981e7b1 493
0bc40be8 494 assert_spin_locked(&engine->execlist_lock);
e981e7b1 495
0bc40be8 496 head_req = list_first_entry_or_null(&engine->execlist_queue,
6d3d8274 497 struct drm_i915_gem_request,
e981e7b1
TD
498 execlist_link);
499
e39d42fa
TU
500 if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
501 return 0;
c6a2ac71
TU
502
503 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
504
505 if (--head_req->elsp_submitted > 0)
506 return 0;
507
3c7ba635
ZW
508 execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
509
e39d42fa 510 list_del(&head_req->execlist_link);
e8a261ea 511 i915_gem_request_put(head_req);
e981e7b1 512
c6a2ac71 513 return 1;
e981e7b1
TD
514}
515
c6a2ac71 516static u32
0bc40be8 517get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
c6a2ac71 518 u32 *context_id)
91a41032 519{
c033666a 520 struct drm_i915_private *dev_priv = engine->i915;
c6a2ac71 521 u32 status;
91a41032 522
c6a2ac71
TU
523 read_pointer %= GEN8_CSB_ENTRIES;
524
0bc40be8 525 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
c6a2ac71
TU
526
527 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
528 return 0;
91a41032 529
0bc40be8 530 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
c6a2ac71
TU
531 read_pointer));
532
533 return status;
91a41032
BW
534}
535
6e5248b5 536/*
73e4d07f
OM
537 * Check the unread Context Status Buffers and manage the submission of new
538 * contexts to the ELSP accordingly.
539 */
27af5eea 540static void intel_lrc_irq_handler(unsigned long data)
e981e7b1 541{
27af5eea 542 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
c033666a 543 struct drm_i915_private *dev_priv = engine->i915;
e981e7b1 544 u32 status_pointer;
c6a2ac71 545 unsigned int read_pointer, write_pointer;
26720ab9
TU
546 u32 csb[GEN8_CSB_ENTRIES][2];
547 unsigned int csb_read = 0, i;
c6a2ac71
TU
548 unsigned int submit_contexts = 0;
549
3756685a 550 intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
c6a2ac71 551
0bc40be8 552 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
e981e7b1 553
0bc40be8 554 read_pointer = engine->next_context_status_buffer;
5590a5f0 555 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
e981e7b1 556 if (read_pointer > write_pointer)
dfc53c5e 557 write_pointer += GEN8_CSB_ENTRIES;
e981e7b1 558
e981e7b1 559 while (read_pointer < write_pointer) {
26720ab9
TU
560 if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
561 break;
562 csb[csb_read][0] = get_context_status(engine, ++read_pointer,
563 &csb[csb_read][1]);
564 csb_read++;
565 }
91a41032 566
26720ab9
TU
567 engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
568
569 /* Update the read pointer to the old write pointer. Manual ringbuffer
570 * management ftw </sarcasm> */
571 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
572 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
573 engine->next_context_status_buffer << 8));
574
3756685a 575 intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
26720ab9
TU
576
577 spin_lock(&engine->execlist_lock);
578
579 for (i = 0; i < csb_read; i++) {
580 if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
581 if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
582 if (execlists_check_remove_request(engine, csb[i][1]))
e1fee72c
OM
583 WARN(1, "Lite Restored request removed from queue\n");
584 } else
585 WARN(1, "Preemption without Lite Restore\n");
586 }
587
26720ab9 588 if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
c6a2ac71
TU
589 GEN8_CTX_STATUS_ELEMENT_SWITCH))
590 submit_contexts +=
26720ab9 591 execlists_check_remove_request(engine, csb[i][1]);
e981e7b1
TD
592 }
593
c6a2ac71 594 if (submit_contexts) {
0bc40be8 595 if (!engine->disable_lite_restore_wa ||
26720ab9 596 (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
f4ea6bdd 597 execlists_unqueue(engine);
5af05fef 598 }
e981e7b1 599
0bc40be8 600 spin_unlock(&engine->execlist_lock);
c6a2ac71
TU
601
602 if (unlikely(submit_contexts > 2))
603 DRM_ERROR("More than two context complete events?\n");
e981e7b1
TD
604}
605
f4ea6bdd 606static void execlists_submit_request(struct drm_i915_gem_request *request)
acdd884a 607{
4a570db5 608 struct intel_engine_cs *engine = request->engine;
acdd884a 609
27af5eea 610 spin_lock_bh(&engine->execlist_lock);
acdd884a 611
e8a261ea 612 i915_gem_request_get(request);
a3d12761 613 request->ctx_hw_id = request->ctx->hw_id;
ba49b2f8
CW
614
615 if (list_empty(&engine->execlist_queue))
616 tasklet_hi_schedule(&engine->irq_tasklet);
617 list_add_tail(&request->execlist_link, &engine->execlist_queue);
acdd884a 618
27af5eea 619 spin_unlock_bh(&engine->execlist_lock);
acdd884a
MT
620}
621
40e895ce 622int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
bc0dce3f 623{
24f1d3cc 624 struct intel_engine_cs *engine = request->engine;
9021ad03 625 struct intel_context *ce = &request->ctx->engine[engine->id];
bfa01200 626 int ret;
bc0dce3f 627
6310346e
CW
628 /* Flush enough space to reduce the likelihood of waiting after
629 * we start building the request - in which case we will just
630 * have to repeat work.
631 */
0e93cdd4 632 request->reserved_space += EXECLISTS_REQUEST_SIZE;
6310346e 633
9021ad03 634 if (!ce->state) {
978f1e09
CW
635 ret = execlists_context_deferred_alloc(request->ctx, engine);
636 if (ret)
637 return ret;
638 }
639
dca33ecc 640 request->ring = ce->ring;
f3cc01f0 641
a7e02199
AD
642 if (i915.enable_guc_submission) {
643 /*
644 * Check that the GuC has space for the request before
645 * going any further, as the i915_add_request() call
646 * later on mustn't fail ...
647 */
7c2c270d 648 ret = i915_guc_wq_check_space(request);
a7e02199
AD
649 if (ret)
650 return ret;
651 }
652
24f1d3cc
CW
653 ret = intel_lr_context_pin(request->ctx, engine);
654 if (ret)
655 return ret;
e28e404c 656
bfa01200
CW
657 ret = intel_ring_begin(request, 0);
658 if (ret)
659 goto err_unpin;
660
9021ad03 661 if (!ce->initialised) {
24f1d3cc
CW
662 ret = engine->init_context(request);
663 if (ret)
664 goto err_unpin;
665
9021ad03 666 ce->initialised = true;
24f1d3cc
CW
667 }
668
669 /* Note that after this point, we have committed to using
670 * this request as it is being used to both track the
671 * state of engine initialisation and liveness of the
672 * golden renderstate above. Think twice before you try
673 * to cancel/unwind this request now.
674 */
675
0e93cdd4 676 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
bfa01200
CW
677 return 0;
678
679err_unpin:
24f1d3cc 680 intel_lr_context_unpin(request->ctx, engine);
e28e404c 681 return ret;
bc0dce3f
JH
682}
683
bc0dce3f 684/*
ddd66c51 685 * intel_logical_ring_advance() - advance the tail and prepare for submission
ae70797d 686 * @request: Request to advance the logical ringbuffer of.
bc0dce3f
JH
687 *
688 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
689 * really happens during submission is that the context and current tail will be placed
690 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
691 * point, the tail *inside* the context is updated and the ELSP written to.
692 */
7c17d377 693static int
ddd66c51 694intel_logical_ring_advance(struct drm_i915_gem_request *request)
bc0dce3f 695{
7e37f889 696 struct intel_ring *ring = request->ring;
4a570db5 697 struct intel_engine_cs *engine = request->engine;
bc0dce3f 698
1dae2dfb
CW
699 intel_ring_advance(ring);
700 request->tail = ring->tail;
bc0dce3f 701
7c17d377
CW
702 /*
703 * Here we add two extra NOOPs as padding to avoid
704 * lite restore of a context with HEAD==TAIL.
705 *
706 * Caller must reserve WA_TAIL_DWORDS for us!
707 */
1dae2dfb
CW
708 intel_ring_emit(ring, MI_NOOP);
709 intel_ring_emit(ring, MI_NOOP);
710 intel_ring_advance(ring);
a52abd2f 711 request->wa_tail = ring->tail;
d1675198 712
a16a4052
CW
713 /* We keep the previous context alive until we retire the following
714 * request. This ensures that any the context object is still pinned
715 * for any residual writes the HW makes into it on the context switch
716 * into the next object following the breadcrumb. Otherwise, we may
717 * retire the context too early.
718 */
719 request->previous_context = engine->last_context;
720 engine->last_context = request->ctx;
7c17d377 721 return 0;
bc0dce3f
JH
722}
723
e39d42fa 724void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
c86ee3a9 725{
6d3d8274 726 struct drm_i915_gem_request *req, *tmp;
e39d42fa 727 LIST_HEAD(cancel_list);
c86ee3a9 728
91c8a326 729 WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
c86ee3a9 730
27af5eea 731 spin_lock_bh(&engine->execlist_lock);
e39d42fa 732 list_replace_init(&engine->execlist_queue, &cancel_list);
27af5eea 733 spin_unlock_bh(&engine->execlist_lock);
c86ee3a9 734
e39d42fa 735 list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
c86ee3a9 736 list_del(&req->execlist_link);
e8a261ea 737 i915_gem_request_put(req);
c86ee3a9
TD
738 }
739}
740
e2efd130 741static int intel_lr_context_pin(struct i915_gem_context *ctx,
24f1d3cc 742 struct intel_engine_cs *engine)
dcb4c12a 743{
9021ad03 744 struct intel_context *ce = &ctx->engine[engine->id];
7d774cac
TU
745 void *vaddr;
746 u32 *lrc_reg_state;
ca82580c 747 int ret;
dcb4c12a 748
91c8a326 749 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
ca82580c 750
9021ad03 751 if (ce->pin_count++)
24f1d3cc
CW
752 return 0;
753
bf3783e5
CW
754 ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN,
755 PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL);
e84fe803 756 if (ret)
24f1d3cc 757 goto err;
7ba717cf 758
bf3783e5 759 vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
7d774cac
TU
760 if (IS_ERR(vaddr)) {
761 ret = PTR_ERR(vaddr);
bf3783e5 762 goto unpin_vma;
82352e90
TU
763 }
764
7d774cac
TU
765 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
766
aad29fbb 767 ret = intel_ring_pin(ce->ring);
e84fe803 768 if (ret)
7d774cac 769 goto unpin_map;
d1675198 770
0bc40be8 771 intel_lr_context_descriptor_update(ctx, engine);
9021ad03 772
bde13ebd
CW
773 lrc_reg_state[CTX_RING_BUFFER_START+1] =
774 i915_ggtt_offset(ce->ring->vma);
9021ad03 775 ce->lrc_reg_state = lrc_reg_state;
bf3783e5 776 ce->state->obj->dirty = true;
e93c28f3 777
e84fe803 778 /* Invalidate GuC TLB. */
bf3783e5
CW
779 if (i915.enable_guc_submission) {
780 struct drm_i915_private *dev_priv = ctx->i915;
e84fe803 781 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
bf3783e5 782 }
dcb4c12a 783
9a6feaf0 784 i915_gem_context_get(ctx);
24f1d3cc 785 return 0;
7ba717cf 786
7d774cac 787unpin_map:
bf3783e5
CW
788 i915_gem_object_unpin_map(ce->state->obj);
789unpin_vma:
790 __i915_vma_unpin(ce->state);
24f1d3cc 791err:
9021ad03 792 ce->pin_count = 0;
e84fe803
NH
793 return ret;
794}
795
e2efd130 796void intel_lr_context_unpin(struct i915_gem_context *ctx,
24f1d3cc 797 struct intel_engine_cs *engine)
e84fe803 798{
9021ad03 799 struct intel_context *ce = &ctx->engine[engine->id];
e84fe803 800
91c8a326 801 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
9021ad03 802 GEM_BUG_ON(ce->pin_count == 0);
321fe304 803
9021ad03 804 if (--ce->pin_count)
24f1d3cc 805 return;
e84fe803 806
aad29fbb 807 intel_ring_unpin(ce->ring);
dcb4c12a 808
bf3783e5
CW
809 i915_gem_object_unpin_map(ce->state->obj);
810 i915_vma_unpin(ce->state);
321fe304 811
9a6feaf0 812 i915_gem_context_put(ctx);
dcb4c12a
OM
813}
814
e2be4faf 815static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
771b9a53
MT
816{
817 int ret, i;
7e37f889 818 struct intel_ring *ring = req->ring;
c033666a 819 struct i915_workarounds *w = &req->i915->workarounds;
771b9a53 820
cd7feaaa 821 if (w->count == 0)
771b9a53
MT
822 return 0;
823
7c9cf4e3 824 ret = req->engine->emit_flush(req, EMIT_BARRIER);
771b9a53
MT
825 if (ret)
826 return ret;
827
987046ad 828 ret = intel_ring_begin(req, w->count * 2 + 2);
771b9a53
MT
829 if (ret)
830 return ret;
831
1dae2dfb 832 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
771b9a53 833 for (i = 0; i < w->count; i++) {
1dae2dfb
CW
834 intel_ring_emit_reg(ring, w->reg[i].addr);
835 intel_ring_emit(ring, w->reg[i].value);
771b9a53 836 }
1dae2dfb 837 intel_ring_emit(ring, MI_NOOP);
771b9a53 838
1dae2dfb 839 intel_ring_advance(ring);
771b9a53 840
7c9cf4e3 841 ret = req->engine->emit_flush(req, EMIT_BARRIER);
771b9a53
MT
842 if (ret)
843 return ret;
844
845 return 0;
846}
847
83b8a982 848#define wa_ctx_emit(batch, index, cmd) \
17ee950d 849 do { \
83b8a982
AS
850 int __index = (index)++; \
851 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
17ee950d
AS
852 return -ENOSPC; \
853 } \
83b8a982 854 batch[__index] = (cmd); \
17ee950d
AS
855 } while (0)
856
8f40db77 857#define wa_ctx_emit_reg(batch, index, reg) \
f0f59a00 858 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
9e000847
AS
859
860/*
861 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
862 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
863 * but there is a slight complication as this is applied in WA batch where the
864 * values are only initialized once so we cannot take register value at the
865 * beginning and reuse it further; hence we save its value to memory, upload a
866 * constant value with bit21 set and then we restore it back with the saved value.
867 * To simplify the WA, a constant value is formed by using the default value
868 * of this register. This shouldn't be a problem because we are only modifying
869 * it for a short period and this batch in non-premptible. We can ofcourse
870 * use additional instructions that read the actual value of the register
871 * at that time and set our bit of interest but it makes the WA complicated.
872 *
873 * This WA is also required for Gen9 so extracting as a function avoids
874 * code duplication.
875 */
0bc40be8 876static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
6e5248b5 877 uint32_t *batch,
9e000847
AS
878 uint32_t index)
879{
5e580523 880 struct drm_i915_private *dev_priv = engine->i915;
9e000847
AS
881 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
882
a4106a78 883 /*
fe905819 884 * WaDisableLSQCROPERFforOCL:skl,kbl
a4106a78
AS
885 * This WA is implemented in skl_init_clock_gating() but since
886 * this batch updates GEN8_L3SQCREG4 with default value we need to
887 * set this bit here to retain the WA during flush.
888 */
738fa1b3
MK
889 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
890 IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
a4106a78
AS
891 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
892
f1afe24f 893 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
83b8a982 894 MI_SRM_LRM_GLOBAL_GTT));
8f40db77 895 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
bde13ebd 896 wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
83b8a982
AS
897 wa_ctx_emit(batch, index, 0);
898
899 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
8f40db77 900 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
83b8a982
AS
901 wa_ctx_emit(batch, index, l3sqc4_flush);
902
903 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
904 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
905 PIPE_CONTROL_DC_FLUSH_ENABLE));
906 wa_ctx_emit(batch, index, 0);
907 wa_ctx_emit(batch, index, 0);
908 wa_ctx_emit(batch, index, 0);
909 wa_ctx_emit(batch, index, 0);
910
f1afe24f 911 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
83b8a982 912 MI_SRM_LRM_GLOBAL_GTT));
8f40db77 913 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
bde13ebd 914 wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
83b8a982 915 wa_ctx_emit(batch, index, 0);
9e000847
AS
916
917 return index;
918}
919
17ee950d
AS
920static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
921 uint32_t offset,
922 uint32_t start_alignment)
923{
924 return wa_ctx->offset = ALIGN(offset, start_alignment);
925}
926
927static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
928 uint32_t offset,
929 uint32_t size_alignment)
930{
931 wa_ctx->size = offset - wa_ctx->offset;
932
933 WARN(wa_ctx->size % size_alignment,
934 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
935 wa_ctx->size, size_alignment);
936 return 0;
937}
938
6e5248b5
DV
939/*
940 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
941 * initialized at the beginning and shared across all contexts but this field
942 * helps us to have multiple batches at different offsets and select them based
943 * on a criteria. At the moment this batch always start at the beginning of the page
944 * and at this point we don't have multiple wa_ctx batch buffers.
4d78c8dc 945 *
6e5248b5
DV
946 * The number of WA applied are not known at the beginning; we use this field
947 * to return the no of DWORDS written.
17ee950d 948 *
6e5248b5
DV
949 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
950 * so it adds NOOPs as padding to make it cacheline aligned.
951 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
952 * makes a complete batch buffer.
17ee950d 953 */
0bc40be8 954static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
17ee950d 955 struct i915_wa_ctx_bb *wa_ctx,
6e5248b5 956 uint32_t *batch,
17ee950d
AS
957 uint32_t *offset)
958{
0160f055 959 uint32_t scratch_addr;
17ee950d
AS
960 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
961
7ad00d1a 962 /* WaDisableCtxRestoreArbitration:bdw,chv */
83b8a982 963 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
17ee950d 964
c82435bb 965 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
c033666a 966 if (IS_BROADWELL(engine->i915)) {
0bc40be8 967 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
604ef734
AH
968 if (rc < 0)
969 return rc;
970 index = rc;
c82435bb
AS
971 }
972
0160f055
AS
973 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
974 /* Actual scratch location is at 128 bytes offset */
bde13ebd 975 scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
0160f055 976
83b8a982
AS
977 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
978 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
979 PIPE_CONTROL_GLOBAL_GTT_IVB |
980 PIPE_CONTROL_CS_STALL |
981 PIPE_CONTROL_QW_WRITE));
982 wa_ctx_emit(batch, index, scratch_addr);
983 wa_ctx_emit(batch, index, 0);
984 wa_ctx_emit(batch, index, 0);
985 wa_ctx_emit(batch, index, 0);
0160f055 986
17ee950d
AS
987 /* Pad to end of cacheline */
988 while (index % CACHELINE_DWORDS)
83b8a982 989 wa_ctx_emit(batch, index, MI_NOOP);
17ee950d
AS
990
991 /*
992 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
993 * execution depends on the length specified in terms of cache lines
994 * in the register CTX_RCS_INDIRECT_CTX
995 */
996
997 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
998}
999
6e5248b5
DV
1000/*
1001 * This batch is started immediately after indirect_ctx batch. Since we ensure
1002 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
17ee950d 1003 *
6e5248b5 1004 * The number of DWORDS written are returned using this field.
17ee950d
AS
1005 *
1006 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1007 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1008 */
0bc40be8 1009static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
17ee950d 1010 struct i915_wa_ctx_bb *wa_ctx,
6e5248b5 1011 uint32_t *batch,
17ee950d
AS
1012 uint32_t *offset)
1013{
1014 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1015
7ad00d1a 1016 /* WaDisableCtxRestoreArbitration:bdw,chv */
83b8a982 1017 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
7ad00d1a 1018
83b8a982 1019 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
17ee950d
AS
1020
1021 return wa_ctx_end(wa_ctx, *offset = index, 1);
1022}
1023
0bc40be8 1024static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
0504cffc 1025 struct i915_wa_ctx_bb *wa_ctx,
6e5248b5 1026 uint32_t *batch,
0504cffc
AS
1027 uint32_t *offset)
1028{
a4106a78 1029 int ret;
5e580523 1030 struct drm_i915_private *dev_priv = engine->i915;
0504cffc
AS
1031 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1032
0907c8f7 1033 /* WaDisableCtxRestoreArbitration:skl,bxt */
5e580523
DA
1034 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
1035 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
0907c8f7 1036 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
0504cffc 1037
a4106a78 1038 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
0bc40be8 1039 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
a4106a78
AS
1040 if (ret < 0)
1041 return ret;
1042 index = ret;
1043
873e8171
MK
1044 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */
1045 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1046 wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
1047 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
1048 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
1049 wa_ctx_emit(batch, index, MI_NOOP);
1050
066d4628
MK
1051 /* WaClearSlmSpaceAtContextSwitch:kbl */
1052 /* Actual scratch location is at 128 bytes offset */
703d1282 1053 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
56c0f1a7 1054 u32 scratch_addr =
bde13ebd 1055 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
066d4628
MK
1056
1057 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1058 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1059 PIPE_CONTROL_GLOBAL_GTT_IVB |
1060 PIPE_CONTROL_CS_STALL |
1061 PIPE_CONTROL_QW_WRITE));
1062 wa_ctx_emit(batch, index, scratch_addr);
1063 wa_ctx_emit(batch, index, 0);
1064 wa_ctx_emit(batch, index, 0);
1065 wa_ctx_emit(batch, index, 0);
1066 }
3485d99e
TG
1067
1068 /* WaMediaPoolStateCmdInWABB:bxt */
1069 if (HAS_POOLED_EU(engine->i915)) {
1070 /*
1071 * EU pool configuration is setup along with golden context
1072 * during context initialization. This value depends on
1073 * device type (2x6 or 3x6) and needs to be updated based
1074 * on which subslice is disabled especially for 2x6
1075 * devices, however it is safe to load default
1076 * configuration of 3x6 device instead of masking off
1077 * corresponding bits because HW ignores bits of a disabled
1078 * subslice and drops down to appropriate config. Please
1079 * see render_state_setup() in i915_gem_render_state.c for
1080 * possible configurations, to avoid duplication they are
1081 * not shown here again.
1082 */
1083 u32 eu_pool_config = 0x00777000;
1084 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
1085 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
1086 wa_ctx_emit(batch, index, eu_pool_config);
1087 wa_ctx_emit(batch, index, 0);
1088 wa_ctx_emit(batch, index, 0);
1089 wa_ctx_emit(batch, index, 0);
1090 }
1091
0504cffc
AS
1092 /* Pad to end of cacheline */
1093 while (index % CACHELINE_DWORDS)
1094 wa_ctx_emit(batch, index, MI_NOOP);
1095
1096 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1097}
1098
0bc40be8 1099static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
0504cffc 1100 struct i915_wa_ctx_bb *wa_ctx,
6e5248b5 1101 uint32_t *batch,
0504cffc
AS
1102 uint32_t *offset)
1103{
1104 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1105
9b01435d 1106 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
c033666a
CW
1107 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1108 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
9b01435d 1109 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
8f40db77 1110 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
9b01435d
AS
1111 wa_ctx_emit(batch, index,
1112 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1113 wa_ctx_emit(batch, index, MI_NOOP);
1114 }
1115
b1e429fe 1116 /* WaClearTdlStateAckDirtyBits:bxt */
c033666a 1117 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
b1e429fe
TG
1118 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1119
1120 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
1121 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1122
1123 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
1124 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1125
1126 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
1127 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1128
1129 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
1130 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
1131 wa_ctx_emit(batch, index, 0x0);
1132 wa_ctx_emit(batch, index, MI_NOOP);
1133 }
1134
0907c8f7 1135 /* WaDisableCtxRestoreArbitration:skl,bxt */
c033666a
CW
1136 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1137 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
0907c8f7
AS
1138 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1139
0504cffc
AS
1140 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1141
1142 return wa_ctx_end(wa_ctx, *offset = index, 1);
1143}
1144
0bc40be8 1145static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
17ee950d 1146{
48bb74e4
CW
1147 struct drm_i915_gem_object *obj;
1148 struct i915_vma *vma;
1149 int err;
17ee950d 1150
48bb74e4
CW
1151 obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size));
1152 if (IS_ERR(obj))
1153 return PTR_ERR(obj);
17ee950d 1154
48bb74e4
CW
1155 vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
1156 if (IS_ERR(vma)) {
1157 err = PTR_ERR(vma);
1158 goto err;
17ee950d
AS
1159 }
1160
48bb74e4
CW
1161 err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH);
1162 if (err)
1163 goto err;
1164
1165 engine->wa_ctx.vma = vma;
17ee950d 1166 return 0;
48bb74e4
CW
1167
1168err:
1169 i915_gem_object_put(obj);
1170 return err;
17ee950d
AS
1171}
1172
0bc40be8 1173static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
17ee950d 1174{
19880c4a 1175 i915_vma_unpin_and_release(&engine->wa_ctx.vma);
17ee950d
AS
1176}
1177
0bc40be8 1178static int intel_init_workaround_bb(struct intel_engine_cs *engine)
17ee950d 1179{
48bb74e4 1180 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
17ee950d
AS
1181 uint32_t *batch;
1182 uint32_t offset;
1183 struct page *page;
48bb74e4 1184 int ret;
17ee950d 1185
0bc40be8 1186 WARN_ON(engine->id != RCS);
17ee950d 1187
5e60d790 1188 /* update this when WA for higher Gen are added */
c033666a 1189 if (INTEL_GEN(engine->i915) > 9) {
0504cffc 1190 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
c033666a 1191 INTEL_GEN(engine->i915));
5e60d790 1192 return 0;
0504cffc 1193 }
5e60d790 1194
c4db7599 1195 /* some WA perform writes to scratch page, ensure it is valid */
56c0f1a7 1196 if (!engine->scratch) {
0bc40be8 1197 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
c4db7599
AS
1198 return -EINVAL;
1199 }
1200
0bc40be8 1201 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
17ee950d
AS
1202 if (ret) {
1203 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1204 return ret;
1205 }
1206
48bb74e4 1207 page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
17ee950d
AS
1208 batch = kmap_atomic(page);
1209 offset = 0;
1210
c033666a 1211 if (IS_GEN8(engine->i915)) {
0bc40be8 1212 ret = gen8_init_indirectctx_bb(engine,
17ee950d
AS
1213 &wa_ctx->indirect_ctx,
1214 batch,
1215 &offset);
1216 if (ret)
1217 goto out;
1218
0bc40be8 1219 ret = gen8_init_perctx_bb(engine,
17ee950d
AS
1220 &wa_ctx->per_ctx,
1221 batch,
1222 &offset);
1223 if (ret)
1224 goto out;
c033666a 1225 } else if (IS_GEN9(engine->i915)) {
0bc40be8 1226 ret = gen9_init_indirectctx_bb(engine,
0504cffc
AS
1227 &wa_ctx->indirect_ctx,
1228 batch,
1229 &offset);
1230 if (ret)
1231 goto out;
1232
0bc40be8 1233 ret = gen9_init_perctx_bb(engine,
0504cffc
AS
1234 &wa_ctx->per_ctx,
1235 batch,
1236 &offset);
1237 if (ret)
1238 goto out;
17ee950d
AS
1239 }
1240
1241out:
1242 kunmap_atomic(batch);
1243 if (ret)
0bc40be8 1244 lrc_destroy_wa_ctx_obj(engine);
17ee950d
AS
1245
1246 return ret;
1247}
1248
04794adb
TU
1249static void lrc_init_hws(struct intel_engine_cs *engine)
1250{
c033666a 1251 struct drm_i915_private *dev_priv = engine->i915;
04794adb
TU
1252
1253 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
57e88531 1254 engine->status_page.ggtt_offset);
04794adb
TU
1255 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1256}
1257
0bc40be8 1258static int gen8_init_common_ring(struct intel_engine_cs *engine)
9b1136d5 1259{
c033666a 1260 struct drm_i915_private *dev_priv = engine->i915;
c6a2ac71 1261 unsigned int next_context_status_buffer_hw;
9b1136d5 1262
04794adb 1263 lrc_init_hws(engine);
e84fe803 1264
0bc40be8
TU
1265 I915_WRITE_IMR(engine,
1266 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1267 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
73d477f6 1268
0bc40be8 1269 I915_WRITE(RING_MODE_GEN7(engine),
9b1136d5
OM
1270 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1271 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
0bc40be8 1272 POSTING_READ(RING_MODE_GEN7(engine));
dfc53c5e
MT
1273
1274 /*
1275 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1276 * zero, we need to read the write pointer from hardware and use its
1277 * value because "this register is power context save restored".
1278 * Effectively, these states have been observed:
1279 *
1280 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1281 * BDW | CSB regs not reset | CSB regs reset |
1282 * CHT | CSB regs not reset | CSB regs not reset |
5590a5f0
BW
1283 * SKL | ? | ? |
1284 * BXT | ? | ? |
dfc53c5e 1285 */
5590a5f0 1286 next_context_status_buffer_hw =
0bc40be8 1287 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
dfc53c5e
MT
1288
1289 /*
1290 * When the CSB registers are reset (also after power-up / gpu reset),
1291 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1292 * this special case, so the first element read is CSB[0].
1293 */
1294 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1295 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1296
0bc40be8
TU
1297 engine->next_context_status_buffer = next_context_status_buffer_hw;
1298 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
9b1136d5 1299
fc0768ce 1300 intel_engine_init_hangcheck(engine);
9b1136d5 1301
0ccdacf6 1302 return intel_mocs_init_engine(engine);
9b1136d5
OM
1303}
1304
0bc40be8 1305static int gen8_init_render_ring(struct intel_engine_cs *engine)
9b1136d5 1306{
c033666a 1307 struct drm_i915_private *dev_priv = engine->i915;
9b1136d5
OM
1308 int ret;
1309
0bc40be8 1310 ret = gen8_init_common_ring(engine);
9b1136d5
OM
1311 if (ret)
1312 return ret;
1313
1314 /* We need to disable the AsyncFlip performance optimisations in order
1315 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1316 * programmed to '1' on all products.
1317 *
1318 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1319 */
1320 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1321
9b1136d5
OM
1322 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1323
0bc40be8 1324 return init_workarounds_ring(engine);
9b1136d5
OM
1325}
1326
0bc40be8 1327static int gen9_init_render_ring(struct intel_engine_cs *engine)
82ef822e
DL
1328{
1329 int ret;
1330
0bc40be8 1331 ret = gen8_init_common_ring(engine);
82ef822e
DL
1332 if (ret)
1333 return ret;
1334
0bc40be8 1335 return init_workarounds_ring(engine);
82ef822e
DL
1336}
1337
7a01a0a2
MT
1338static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1339{
1340 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
7e37f889 1341 struct intel_ring *ring = req->ring;
4a570db5 1342 struct intel_engine_cs *engine = req->engine;
7a01a0a2
MT
1343 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1344 int i, ret;
1345
987046ad 1346 ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
7a01a0a2
MT
1347 if (ret)
1348 return ret;
1349
b5321f30 1350 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
7a01a0a2
MT
1351 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1352 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1353
b5321f30
CW
1354 intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
1355 intel_ring_emit(ring, upper_32_bits(pd_daddr));
1356 intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
1357 intel_ring_emit(ring, lower_32_bits(pd_daddr));
7a01a0a2
MT
1358 }
1359
b5321f30
CW
1360 intel_ring_emit(ring, MI_NOOP);
1361 intel_ring_advance(ring);
7a01a0a2
MT
1362
1363 return 0;
1364}
1365
be795fc1 1366static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
803688ba
CW
1367 u64 offset, u32 len,
1368 unsigned int dispatch_flags)
15648585 1369{
7e37f889 1370 struct intel_ring *ring = req->ring;
8e004efc 1371 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
15648585
OM
1372 int ret;
1373
7a01a0a2
MT
1374 /* Don't rely in hw updating PDPs, specially in lite-restore.
1375 * Ideally, we should set Force PD Restore in ctx descriptor,
1376 * but we can't. Force Restore would be a second option, but
1377 * it is unsafe in case of lite-restore (because the ctx is
2dba3239
MT
1378 * not idle). PML4 is allocated during ppgtt init so this is
1379 * not needed in 48-bit.*/
7a01a0a2 1380 if (req->ctx->ppgtt &&
666796da 1381 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
331f38e7 1382 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
c033666a 1383 !intel_vgpu_active(req->i915)) {
2dba3239
MT
1384 ret = intel_logical_ring_emit_pdps(req);
1385 if (ret)
1386 return ret;
1387 }
7a01a0a2 1388
666796da 1389 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
7a01a0a2
MT
1390 }
1391
987046ad 1392 ret = intel_ring_begin(req, 4);
15648585
OM
1393 if (ret)
1394 return ret;
1395
1396 /* FIXME(BDW): Address space and security selectors. */
b5321f30
CW
1397 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
1398 (ppgtt<<8) |
1399 (dispatch_flags & I915_DISPATCH_RS ?
1400 MI_BATCH_RESOURCE_STREAMER : 0));
1401 intel_ring_emit(ring, lower_32_bits(offset));
1402 intel_ring_emit(ring, upper_32_bits(offset));
1403 intel_ring_emit(ring, MI_NOOP);
1404 intel_ring_advance(ring);
15648585
OM
1405
1406 return 0;
1407}
1408
31bb59cc 1409static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
73d477f6 1410{
c033666a 1411 struct drm_i915_private *dev_priv = engine->i915;
31bb59cc
CW
1412 I915_WRITE_IMR(engine,
1413 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1414 POSTING_READ_FW(RING_IMR(engine->mmio_base));
73d477f6
OM
1415}
1416
31bb59cc 1417static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
73d477f6 1418{
c033666a 1419 struct drm_i915_private *dev_priv = engine->i915;
31bb59cc 1420 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
73d477f6
OM
1421}
1422
7c9cf4e3 1423static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
4712274c 1424{
7e37f889
CW
1425 struct intel_ring *ring = request->ring;
1426 u32 cmd;
4712274c
OM
1427 int ret;
1428
987046ad 1429 ret = intel_ring_begin(request, 4);
4712274c
OM
1430 if (ret)
1431 return ret;
1432
1433 cmd = MI_FLUSH_DW + 1;
1434
f0a1fb10
CW
1435 /* We always require a command barrier so that subsequent
1436 * commands, such as breadcrumb interrupts, are strictly ordered
1437 * wrt the contents of the write cache being flushed to memory
1438 * (and thus being coherent from the CPU).
1439 */
1440 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1441
7c9cf4e3 1442 if (mode & EMIT_INVALIDATE) {
f0a1fb10 1443 cmd |= MI_INVALIDATE_TLB;
1dae2dfb 1444 if (request->engine->id == VCS)
f0a1fb10 1445 cmd |= MI_INVALIDATE_BSD;
4712274c
OM
1446 }
1447
b5321f30
CW
1448 intel_ring_emit(ring, cmd);
1449 intel_ring_emit(ring,
1450 I915_GEM_HWS_SCRATCH_ADDR |
1451 MI_FLUSH_DW_USE_GTT);
1452 intel_ring_emit(ring, 0); /* upper addr */
1453 intel_ring_emit(ring, 0); /* value */
1454 intel_ring_advance(ring);
4712274c
OM
1455
1456 return 0;
1457}
1458
7deb4d39 1459static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
7c9cf4e3 1460 u32 mode)
4712274c 1461{
7e37f889 1462 struct intel_ring *ring = request->ring;
b5321f30 1463 struct intel_engine_cs *engine = request->engine;
bde13ebd
CW
1464 u32 scratch_addr =
1465 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
0b2d0934 1466 bool vf_flush_wa = false, dc_flush_wa = false;
4712274c
OM
1467 u32 flags = 0;
1468 int ret;
0b2d0934 1469 int len;
4712274c
OM
1470
1471 flags |= PIPE_CONTROL_CS_STALL;
1472
7c9cf4e3 1473 if (mode & EMIT_FLUSH) {
4712274c
OM
1474 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1475 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
965fd602 1476 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
40a24488 1477 flags |= PIPE_CONTROL_FLUSH_ENABLE;
4712274c
OM
1478 }
1479
7c9cf4e3 1480 if (mode & EMIT_INVALIDATE) {
4712274c
OM
1481 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1482 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1483 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1484 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1485 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1486 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1487 flags |= PIPE_CONTROL_QW_WRITE;
1488 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
4712274c 1489
1a5a9ce7
BW
1490 /*
1491 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1492 * pipe control.
1493 */
c033666a 1494 if (IS_GEN9(request->i915))
1a5a9ce7 1495 vf_flush_wa = true;
0b2d0934
MK
1496
1497 /* WaForGAMHang:kbl */
1498 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
1499 dc_flush_wa = true;
1a5a9ce7 1500 }
9647ff36 1501
0b2d0934
MK
1502 len = 6;
1503
1504 if (vf_flush_wa)
1505 len += 6;
1506
1507 if (dc_flush_wa)
1508 len += 12;
1509
1510 ret = intel_ring_begin(request, len);
4712274c
OM
1511 if (ret)
1512 return ret;
1513
9647ff36 1514 if (vf_flush_wa) {
b5321f30
CW
1515 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1516 intel_ring_emit(ring, 0);
1517 intel_ring_emit(ring, 0);
1518 intel_ring_emit(ring, 0);
1519 intel_ring_emit(ring, 0);
1520 intel_ring_emit(ring, 0);
9647ff36
ID
1521 }
1522
0b2d0934 1523 if (dc_flush_wa) {
b5321f30
CW
1524 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1525 intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
1526 intel_ring_emit(ring, 0);
1527 intel_ring_emit(ring, 0);
1528 intel_ring_emit(ring, 0);
1529 intel_ring_emit(ring, 0);
0b2d0934
MK
1530 }
1531
b5321f30
CW
1532 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1533 intel_ring_emit(ring, flags);
1534 intel_ring_emit(ring, scratch_addr);
1535 intel_ring_emit(ring, 0);
1536 intel_ring_emit(ring, 0);
1537 intel_ring_emit(ring, 0);
0b2d0934
MK
1538
1539 if (dc_flush_wa) {
b5321f30
CW
1540 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1541 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
1542 intel_ring_emit(ring, 0);
1543 intel_ring_emit(ring, 0);
1544 intel_ring_emit(ring, 0);
1545 intel_ring_emit(ring, 0);
0b2d0934
MK
1546 }
1547
b5321f30 1548 intel_ring_advance(ring);
4712274c
OM
1549
1550 return 0;
1551}
1552
c04e0f3b 1553static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
319404df 1554{
319404df
ID
1555 /*
1556 * On BXT A steppings there is a HW coherency issue whereby the
1557 * MI_STORE_DATA_IMM storing the completed request's seqno
1558 * occasionally doesn't invalidate the CPU cache. Work around this by
1559 * clflushing the corresponding cacheline whenever the caller wants
1560 * the coherency to be guaranteed. Note that this cacheline is known
1561 * to be clean at this point, since we only write it in
1562 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1563 * this clflush in practice becomes an invalidate operation.
1564 */
c04e0f3b 1565 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
319404df
ID
1566}
1567
7c17d377
CW
1568/*
1569 * Reserve space for 2 NOOPs at the end of each request to be
1570 * used as a workaround for not being allowed to do lite
1571 * restore with HEAD==TAIL (WaIdleLiteRestore).
1572 */
1573#define WA_TAIL_DWORDS 2
1574
c4e76638 1575static int gen8_emit_request(struct drm_i915_gem_request *request)
4da46e1e 1576{
7e37f889 1577 struct intel_ring *ring = request->ring;
4da46e1e
OM
1578 int ret;
1579
987046ad 1580 ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
4da46e1e
OM
1581 if (ret)
1582 return ret;
1583
7c17d377
CW
1584 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1585 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
4da46e1e 1586
b5321f30
CW
1587 intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1588 intel_ring_emit(ring,
1589 intel_hws_seqno_address(request->engine) |
1590 MI_FLUSH_DW_USE_GTT);
1591 intel_ring_emit(ring, 0);
1592 intel_ring_emit(ring, request->fence.seqno);
1593 intel_ring_emit(ring, MI_USER_INTERRUPT);
1594 intel_ring_emit(ring, MI_NOOP);
ddd66c51 1595 return intel_logical_ring_advance(request);
7c17d377 1596}
4da46e1e 1597
7c17d377
CW
1598static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1599{
7e37f889 1600 struct intel_ring *ring = request->ring;
7c17d377 1601 int ret;
53292cdb 1602
987046ad 1603 ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
7c17d377
CW
1604 if (ret)
1605 return ret;
1606
ce81a65c
MW
1607 /* We're using qword write, seqno should be aligned to 8 bytes. */
1608 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1609
7c17d377
CW
1610 /* w/a for post sync ops following a GPGPU operation we
1611 * need a prior CS_STALL, which is emitted by the flush
1612 * following the batch.
1613 */
b5321f30
CW
1614 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1615 intel_ring_emit(ring,
1616 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1617 PIPE_CONTROL_CS_STALL |
1618 PIPE_CONTROL_QW_WRITE));
1619 intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
1620 intel_ring_emit(ring, 0);
1621 intel_ring_emit(ring, i915_gem_request_get_seqno(request));
ce81a65c 1622 /* We're thrashing one dword of HWS. */
b5321f30
CW
1623 intel_ring_emit(ring, 0);
1624 intel_ring_emit(ring, MI_USER_INTERRUPT);
1625 intel_ring_emit(ring, MI_NOOP);
ddd66c51 1626 return intel_logical_ring_advance(request);
4da46e1e
OM
1627}
1628
8753181e 1629static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
e7778be1
TD
1630{
1631 int ret;
1632
e2be4faf 1633 ret = intel_logical_ring_workarounds_emit(req);
e7778be1
TD
1634 if (ret)
1635 return ret;
1636
3bbaba0c
PA
1637 ret = intel_rcs_context_init_mocs(req);
1638 /*
1639 * Failing to program the MOCS is non-fatal.The system will not
1640 * run at peak performance. So generate an error and carry on.
1641 */
1642 if (ret)
1643 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1644
e40f9ee6 1645 return i915_gem_render_state_init(req);
e7778be1
TD
1646}
1647
73e4d07f
OM
1648/**
1649 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
14bb2c11 1650 * @engine: Engine Command Streamer.
73e4d07f 1651 */
0bc40be8 1652void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
454afebd 1653{
6402c330 1654 struct drm_i915_private *dev_priv;
9832b9da 1655
117897f4 1656 if (!intel_engine_initialized(engine))
48d82387
OM
1657 return;
1658
27af5eea
TU
1659 /*
1660 * Tasklet cannot be active at this point due intel_mark_active/idle
1661 * so this is just for documentation.
1662 */
1663 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1664 tasklet_kill(&engine->irq_tasklet);
1665
c033666a 1666 dev_priv = engine->i915;
6402c330 1667
0bc40be8 1668 if (engine->buffer) {
0bc40be8 1669 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
b0366a54 1670 }
48d82387 1671
0bc40be8
TU
1672 if (engine->cleanup)
1673 engine->cleanup(engine);
48d82387 1674
96a945aa 1675 intel_engine_cleanup_common(engine);
688e6c72 1676
57e88531
CW
1677 if (engine->status_page.vma) {
1678 i915_gem_object_unpin_map(engine->status_page.vma->obj);
1679 engine->status_page.vma = NULL;
48d82387 1680 }
24f1d3cc 1681 intel_lr_context_unpin(dev_priv->kernel_context, engine);
17ee950d 1682
0bc40be8
TU
1683 engine->idle_lite_restore_wa = 0;
1684 engine->disable_lite_restore_wa = false;
1685 engine->ctx_desc_template = 0;
ca82580c 1686
0bc40be8 1687 lrc_destroy_wa_ctx_obj(engine);
c033666a 1688 engine->i915 = NULL;
454afebd
OM
1689}
1690
ddd66c51
CW
1691void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
1692{
1693 struct intel_engine_cs *engine;
1694
1695 for_each_engine(engine, dev_priv)
f4ea6bdd 1696 engine->submit_request = execlists_submit_request;
ddd66c51
CW
1697}
1698
c9cacf93 1699static void
e1382efb 1700logical_ring_default_vfuncs(struct intel_engine_cs *engine)
c9cacf93
TU
1701{
1702 /* Default vfuncs which can be overriden by each engine. */
0bc40be8 1703 engine->init_hw = gen8_init_common_ring;
0bc40be8 1704 engine->emit_flush = gen8_emit_flush;
ddd66c51 1705 engine->emit_request = gen8_emit_request;
f4ea6bdd 1706 engine->submit_request = execlists_submit_request;
ddd66c51 1707
31bb59cc
CW
1708 engine->irq_enable = gen8_logical_ring_enable_irq;
1709 engine->irq_disable = gen8_logical_ring_disable_irq;
0bc40be8 1710 engine->emit_bb_start = gen8_emit_bb_start;
1b7744e7 1711 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
c04e0f3b 1712 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
c9cacf93
TU
1713}
1714
d9f3af96 1715static inline void
c2c7f240 1716logical_ring_default_irqs(struct intel_engine_cs *engine)
d9f3af96 1717{
c2c7f240 1718 unsigned shift = engine->irq_shift;
0bc40be8
TU
1719 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1720 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
d9f3af96
TU
1721}
1722
7d774cac 1723static int
bf3783e5 1724lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
04794adb 1725{
57e88531 1726 const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE;
7d774cac 1727 void *hws;
04794adb
TU
1728
1729 /* The HWSP is part of the default context object in LRC mode. */
bf3783e5 1730 hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
7d774cac
TU
1731 if (IS_ERR(hws))
1732 return PTR_ERR(hws);
57e88531
CW
1733
1734 engine->status_page.page_addr = hws + hws_offset;
bde13ebd 1735 engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset;
57e88531 1736 engine->status_page.vma = vma;
7d774cac
TU
1737
1738 return 0;
04794adb
TU
1739}
1740
bb45438f
TU
1741static void
1742logical_ring_setup(struct intel_engine_cs *engine)
1743{
1744 struct drm_i915_private *dev_priv = engine->i915;
1745 enum forcewake_domains fw_domains;
1746
019bf277
TU
1747 intel_engine_setup_common(engine);
1748
bb45438f
TU
1749 /* Intentionally left blank. */
1750 engine->buffer = NULL;
1751
1752 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
1753 RING_ELSP(engine),
1754 FW_REG_WRITE);
1755
1756 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1757 RING_CONTEXT_STATUS_PTR(engine),
1758 FW_REG_READ | FW_REG_WRITE);
1759
1760 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1761 RING_CONTEXT_STATUS_BUF_BASE(engine),
1762 FW_REG_READ);
1763
1764 engine->fw_domains = fw_domains;
1765
bb45438f
TU
1766 tasklet_init(&engine->irq_tasklet,
1767 intel_lrc_irq_handler, (unsigned long)engine);
1768
1769 logical_ring_init_platform_invariants(engine);
1770 logical_ring_default_vfuncs(engine);
1771 logical_ring_default_irqs(engine);
bb45438f
TU
1772}
1773
a19d6ff2
TU
1774static int
1775logical_ring_init(struct intel_engine_cs *engine)
1776{
1777 struct i915_gem_context *dctx = engine->i915->kernel_context;
1778 int ret;
1779
019bf277 1780 ret = intel_engine_init_common(engine);
a19d6ff2
TU
1781 if (ret)
1782 goto error;
1783
1784 ret = execlists_context_deferred_alloc(dctx, engine);
1785 if (ret)
1786 goto error;
1787
1788 /* As this is the default context, always pin it */
1789 ret = intel_lr_context_pin(dctx, engine);
1790 if (ret) {
1791 DRM_ERROR("Failed to pin context for %s: %d\n",
1792 engine->name, ret);
1793 goto error;
1794 }
1795
1796 /* And setup the hardware status page. */
1797 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
1798 if (ret) {
1799 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
1800 goto error;
1801 }
1802
1803 return 0;
1804
1805error:
1806 intel_logical_ring_cleanup(engine);
1807 return ret;
1808}
1809
88d2ba2e 1810int logical_render_ring_init(struct intel_engine_cs *engine)
a19d6ff2
TU
1811{
1812 struct drm_i915_private *dev_priv = engine->i915;
1813 int ret;
1814
bb45438f
TU
1815 logical_ring_setup(engine);
1816
a19d6ff2
TU
1817 if (HAS_L3_DPF(dev_priv))
1818 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1819
1820 /* Override some for render ring. */
1821 if (INTEL_GEN(dev_priv) >= 9)
1822 engine->init_hw = gen9_init_render_ring;
1823 else
1824 engine->init_hw = gen8_init_render_ring;
1825 engine->init_context = gen8_init_rcs_context;
a19d6ff2
TU
1826 engine->emit_flush = gen8_emit_flush_render;
1827 engine->emit_request = gen8_emit_request_render;
1828
56c0f1a7 1829 ret = intel_engine_create_scratch(engine, 4096);
a19d6ff2
TU
1830 if (ret)
1831 return ret;
1832
1833 ret = intel_init_workaround_bb(engine);
1834 if (ret) {
1835 /*
1836 * We continue even if we fail to initialize WA batch
1837 * because we only expect rare glitches but nothing
1838 * critical to prevent us from using GPU
1839 */
1840 DRM_ERROR("WA batch buffer initialization failed: %d\n",
1841 ret);
1842 }
1843
1844 ret = logical_ring_init(engine);
1845 if (ret) {
1846 lrc_destroy_wa_ctx_obj(engine);
1847 }
1848
1849 return ret;
1850}
1851
88d2ba2e 1852int logical_xcs_ring_init(struct intel_engine_cs *engine)
bb45438f
TU
1853{
1854 logical_ring_setup(engine);
1855
1856 return logical_ring_init(engine);
454afebd
OM
1857}
1858
0cea6502 1859static u32
c033666a 1860make_rpcs(struct drm_i915_private *dev_priv)
0cea6502
JM
1861{
1862 u32 rpcs = 0;
1863
1864 /*
1865 * No explicit RPCS request is needed to ensure full
1866 * slice/subslice/EU enablement prior to Gen9.
1867 */
c033666a 1868 if (INTEL_GEN(dev_priv) < 9)
0cea6502
JM
1869 return 0;
1870
1871 /*
1872 * Starting in Gen9, render power gating can leave
1873 * slice/subslice/EU in a partially enabled state. We
1874 * must make an explicit request through RPCS for full
1875 * enablement.
1876 */
43b67998 1877 if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
0cea6502 1878 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
f08a0c92 1879 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
0cea6502
JM
1880 GEN8_RPCS_S_CNT_SHIFT;
1881 rpcs |= GEN8_RPCS_ENABLE;
1882 }
1883
43b67998 1884 if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
0cea6502 1885 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
57ec171e 1886 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
0cea6502
JM
1887 GEN8_RPCS_SS_CNT_SHIFT;
1888 rpcs |= GEN8_RPCS_ENABLE;
1889 }
1890
43b67998
ID
1891 if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
1892 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
0cea6502 1893 GEN8_RPCS_EU_MIN_SHIFT;
43b67998 1894 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
0cea6502
JM
1895 GEN8_RPCS_EU_MAX_SHIFT;
1896 rpcs |= GEN8_RPCS_ENABLE;
1897 }
1898
1899 return rpcs;
1900}
1901
0bc40be8 1902static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
71562919
MT
1903{
1904 u32 indirect_ctx_offset;
1905
c033666a 1906 switch (INTEL_GEN(engine->i915)) {
71562919 1907 default:
c033666a 1908 MISSING_CASE(INTEL_GEN(engine->i915));
71562919
MT
1909 /* fall through */
1910 case 9:
1911 indirect_ctx_offset =
1912 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
1913 break;
1914 case 8:
1915 indirect_ctx_offset =
1916 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
1917 break;
1918 }
1919
1920 return indirect_ctx_offset;
1921}
1922
8670d6f9 1923static int
e2efd130 1924populate_lr_context(struct i915_gem_context *ctx,
7d774cac 1925 struct drm_i915_gem_object *ctx_obj,
0bc40be8 1926 struct intel_engine_cs *engine,
7e37f889 1927 struct intel_ring *ring)
8670d6f9 1928{
c033666a 1929 struct drm_i915_private *dev_priv = ctx->i915;
ae6c4806 1930 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
7d774cac
TU
1931 void *vaddr;
1932 u32 *reg_state;
8670d6f9
OM
1933 int ret;
1934
2d965536
TD
1935 if (!ppgtt)
1936 ppgtt = dev_priv->mm.aliasing_ppgtt;
1937
8670d6f9
OM
1938 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
1939 if (ret) {
1940 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
1941 return ret;
1942 }
1943
d31d7cb1 1944 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
7d774cac
TU
1945 if (IS_ERR(vaddr)) {
1946 ret = PTR_ERR(vaddr);
1947 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
8670d6f9
OM
1948 return ret;
1949 }
7d774cac 1950 ctx_obj->dirty = true;
8670d6f9
OM
1951
1952 /* The second page of the context object contains some fields which must
1953 * be set up prior to the first execution. */
7d774cac 1954 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
8670d6f9
OM
1955
1956 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1957 * commands followed by (reg, value) pairs. The values we are setting here are
1958 * only for the first context restore: on a subsequent save, the GPU will
1959 * recreate this batchbuffer with new values (including all the missing
1960 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
0d925ea0 1961 reg_state[CTX_LRI_HEADER_0] =
0bc40be8
TU
1962 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
1963 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
1964 RING_CONTEXT_CONTROL(engine),
0d925ea0
VS
1965 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
1966 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
c033666a 1967 (HAS_RESOURCE_STREAMER(dev_priv) ?
99cf8ea1 1968 CTX_CTRL_RS_CTX_ENABLE : 0)));
0bc40be8
TU
1969 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
1970 0);
1971 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
1972 0);
7ba717cf
TD
1973 /* Ring buffer start address is not known until the buffer is pinned.
1974 * It is written to the context image in execlists_update_context()
1975 */
0bc40be8
TU
1976 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
1977 RING_START(engine->mmio_base), 0);
1978 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
1979 RING_CTL(engine->mmio_base),
7e37f889 1980 ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
0bc40be8
TU
1981 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
1982 RING_BBADDR_UDW(engine->mmio_base), 0);
1983 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
1984 RING_BBADDR(engine->mmio_base), 0);
1985 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
1986 RING_BBSTATE(engine->mmio_base),
0d925ea0 1987 RING_BB_PPGTT);
0bc40be8
TU
1988 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
1989 RING_SBBADDR_UDW(engine->mmio_base), 0);
1990 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
1991 RING_SBBADDR(engine->mmio_base), 0);
1992 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
1993 RING_SBBSTATE(engine->mmio_base), 0);
1994 if (engine->id == RCS) {
1995 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
1996 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
1997 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
1998 RING_INDIRECT_CTX(engine->mmio_base), 0);
1999 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2000 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
48bb74e4 2001 if (engine->wa_ctx.vma) {
0bc40be8 2002 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
bde13ebd 2003 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
17ee950d
AS
2004
2005 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2006 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2007 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2008
2009 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
0bc40be8 2010 intel_lr_indirect_ctx_offset(engine) << 6;
17ee950d
AS
2011
2012 reg_state[CTX_BB_PER_CTX_PTR+1] =
2013 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2014 0x01;
2015 }
8670d6f9 2016 }
0d925ea0 2017 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
0bc40be8
TU
2018 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2019 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
0d925ea0 2020 /* PDP values well be assigned later if needed */
0bc40be8
TU
2021 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2022 0);
2023 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2024 0);
2025 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2026 0);
2027 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2028 0);
2029 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2030 0);
2031 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2032 0);
2033 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2034 0);
2035 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2036 0);
d7b2633d 2037
2dba3239
MT
2038 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2039 /* 64b PPGTT (48bit canonical)
2040 * PDP0_DESCRIPTOR contains the base address to PML4 and
2041 * other PDP Descriptors are ignored.
2042 */
2043 ASSIGN_CTX_PML4(ppgtt, reg_state);
2044 } else {
2045 /* 32b PPGTT
2046 * PDP*_DESCRIPTOR contains the base address of space supported.
2047 * With dynamic page allocation, PDPs may not be allocated at
2048 * this point. Point the unallocated PDPs to the scratch page
2049 */
c6a2ac71 2050 execlists_update_context_pdps(ppgtt, reg_state);
2dba3239
MT
2051 }
2052
0bc40be8 2053 if (engine->id == RCS) {
8670d6f9 2054 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
0d925ea0 2055 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
c033666a 2056 make_rpcs(dev_priv));
8670d6f9
OM
2057 }
2058
7d774cac 2059 i915_gem_object_unpin_map(ctx_obj);
8670d6f9
OM
2060
2061 return 0;
2062}
2063
c5d46ee2
DG
2064/**
2065 * intel_lr_context_size() - return the size of the context for an engine
14bb2c11 2066 * @engine: which engine to find the context size for
c5d46ee2
DG
2067 *
2068 * Each engine may require a different amount of space for a context image,
2069 * so when allocating (or copying) an image, this function can be used to
2070 * find the right size for the specific engine.
2071 *
2072 * Return: size (in bytes) of an engine-specific context image
2073 *
2074 * Note: this size includes the HWSP, which is part of the context image
2075 * in LRC mode, but does not include the "shared data page" used with
2076 * GuC submission. The caller should account for this if using the GuC.
2077 */
0bc40be8 2078uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
8c857917
OM
2079{
2080 int ret = 0;
2081
c033666a 2082 WARN_ON(INTEL_GEN(engine->i915) < 8);
8c857917 2083
0bc40be8 2084 switch (engine->id) {
8c857917 2085 case RCS:
c033666a 2086 if (INTEL_GEN(engine->i915) >= 9)
468c6816
MN
2087 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2088 else
2089 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
8c857917
OM
2090 break;
2091 case VCS:
2092 case BCS:
2093 case VECS:
2094 case VCS2:
2095 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2096 break;
2097 }
2098
2099 return ret;
ede7d42b
OM
2100}
2101
e2efd130 2102static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
978f1e09 2103 struct intel_engine_cs *engine)
ede7d42b 2104{
8c857917 2105 struct drm_i915_gem_object *ctx_obj;
9021ad03 2106 struct intel_context *ce = &ctx->engine[engine->id];
bf3783e5 2107 struct i915_vma *vma;
8c857917 2108 uint32_t context_size;
7e37f889 2109 struct intel_ring *ring;
8c857917
OM
2110 int ret;
2111
9021ad03 2112 WARN_ON(ce->state);
ede7d42b 2113
0bc40be8 2114 context_size = round_up(intel_lr_context_size(engine), 4096);
8c857917 2115
d1675198
AD
2116 /* One extra page as the sharing data between driver and GuC */
2117 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2118
91c8a326 2119 ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
fe3db79b 2120 if (IS_ERR(ctx_obj)) {
3126a660 2121 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
fe3db79b 2122 return PTR_ERR(ctx_obj);
8c857917
OM
2123 }
2124
bf3783e5
CW
2125 vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL);
2126 if (IS_ERR(vma)) {
2127 ret = PTR_ERR(vma);
2128 goto error_deref_obj;
2129 }
2130
7e37f889 2131 ring = intel_engine_create_ring(engine, ctx->ring_size);
dca33ecc
CW
2132 if (IS_ERR(ring)) {
2133 ret = PTR_ERR(ring);
e84fe803 2134 goto error_deref_obj;
8670d6f9
OM
2135 }
2136
dca33ecc 2137 ret = populate_lr_context(ctx, ctx_obj, engine, ring);
8670d6f9
OM
2138 if (ret) {
2139 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
dca33ecc 2140 goto error_ring_free;
84c2377f
OM
2141 }
2142
dca33ecc 2143 ce->ring = ring;
bf3783e5 2144 ce->state = vma;
9021ad03 2145 ce->initialised = engine->init_context == NULL;
ede7d42b
OM
2146
2147 return 0;
8670d6f9 2148
dca33ecc 2149error_ring_free:
7e37f889 2150 intel_ring_free(ring);
e84fe803 2151error_deref_obj:
f8c417cd 2152 i915_gem_object_put(ctx_obj);
8670d6f9 2153 return ret;
ede7d42b 2154}
3e5b6f05 2155
7d774cac 2156void intel_lr_context_reset(struct drm_i915_private *dev_priv,
e2efd130 2157 struct i915_gem_context *ctx)
3e5b6f05 2158{
e2f80391 2159 struct intel_engine_cs *engine;
3e5b6f05 2160
b4ac5afc 2161 for_each_engine(engine, dev_priv) {
9021ad03 2162 struct intel_context *ce = &ctx->engine[engine->id];
7d774cac 2163 void *vaddr;
3e5b6f05 2164 uint32_t *reg_state;
3e5b6f05 2165
bf3783e5 2166 if (!ce->state)
3e5b6f05
TD
2167 continue;
2168
bf3783e5 2169 vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
7d774cac 2170 if (WARN_ON(IS_ERR(vaddr)))
3e5b6f05 2171 continue;
7d774cac
TU
2172
2173 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
3e5b6f05
TD
2174
2175 reg_state[CTX_RING_HEAD+1] = 0;
2176 reg_state[CTX_RING_TAIL+1] = 0;
2177
bf3783e5
CW
2178 ce->state->obj->dirty = true;
2179 i915_gem_object_unpin_map(ce->state->obj);
3e5b6f05 2180
dca33ecc
CW
2181 ce->ring->head = 0;
2182 ce->ring->tail = 0;
3e5b6f05
TD
2183 }
2184}
This page took 0.381642 seconds and 5 git commands to generate.