drm/i915: Extract context unpinning to its own function
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_lrc.c
CommitLineData
b20385f1
OM
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
73e4d07f
OM
31/**
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
33 *
34 * Motivation:
b20385f1
OM
35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
38 *
73e4d07f
OM
39 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
42 *
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
47 *
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
55 *
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
58 *
59 * LRC implementation:
60 * Regarding the creation of contexts, we have:
61 *
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
65 *
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
69 *
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
72 *
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
77 * contexts:
78 *
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
83 * so on.
84 *
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
88 *
89 * Execlists implementation:
b20385f1
OM
90 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
73e4d07f
OM
92 * This method works as follows:
93 *
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
101 * context itself.
102 *
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
108 *
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
114 *
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
122 *
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
132 *
b20385f1
OM
133 */
134
135#include <drm/drmP.h>
136#include <drm/i915_drm.h>
137#include "i915_drv.h"
3bbaba0c 138#include "intel_mocs.h"
127f1003 139
468c6816 140#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
8c857917
OM
141#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
142#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
143
e981e7b1
TD
144#define RING_EXECLIST_QFULL (1 << 0x2)
145#define RING_EXECLIST1_VALID (1 << 0x3)
146#define RING_EXECLIST0_VALID (1 << 0x4)
147#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
148#define RING_EXECLIST1_ACTIVE (1 << 0x11)
149#define RING_EXECLIST0_ACTIVE (1 << 0x12)
150
151#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
152#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
153#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
154#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
155#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
156#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
8670d6f9
OM
157
158#define CTX_LRI_HEADER_0 0x01
159#define CTX_CONTEXT_CONTROL 0x02
160#define CTX_RING_HEAD 0x04
161#define CTX_RING_TAIL 0x06
162#define CTX_RING_BUFFER_START 0x08
163#define CTX_RING_BUFFER_CONTROL 0x0a
164#define CTX_BB_HEAD_U 0x0c
165#define CTX_BB_HEAD_L 0x0e
166#define CTX_BB_STATE 0x10
167#define CTX_SECOND_BB_HEAD_U 0x12
168#define CTX_SECOND_BB_HEAD_L 0x14
169#define CTX_SECOND_BB_STATE 0x16
170#define CTX_BB_PER_CTX_PTR 0x18
171#define CTX_RCS_INDIRECT_CTX 0x1a
172#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
173#define CTX_LRI_HEADER_1 0x21
174#define CTX_CTX_TIMESTAMP 0x22
175#define CTX_PDP3_UDW 0x24
176#define CTX_PDP3_LDW 0x26
177#define CTX_PDP2_UDW 0x28
178#define CTX_PDP2_LDW 0x2a
179#define CTX_PDP1_UDW 0x2c
180#define CTX_PDP1_LDW 0x2e
181#define CTX_PDP0_UDW 0x30
182#define CTX_PDP0_LDW 0x32
183#define CTX_LRI_HEADER_2 0x41
184#define CTX_R_PWR_CLK_STATE 0x42
185#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
186
84b790f8
BW
187#define GEN8_CTX_VALID (1<<0)
188#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
189#define GEN8_CTX_FORCE_RESTORE (1<<2)
190#define GEN8_CTX_L3LLC_COHERENT (1<<5)
191#define GEN8_CTX_PRIVILEGE (1<<8)
e5815a2e 192
0d925ea0 193#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
f0f59a00 194 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
0d925ea0
VS
195 (reg_state)[(pos)+1] = (val); \
196} while (0)
197
198#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
d852c7bf 199 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
e5815a2e
MT
200 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
201 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
9244a817 202} while (0)
e5815a2e 203
9244a817 204#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
2dba3239
MT
205 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
206 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
9244a817 207} while (0)
2dba3239 208
84b790f8
BW
209enum {
210 ADVANCED_CONTEXT = 0,
2dba3239 211 LEGACY_32B_CONTEXT,
84b790f8
BW
212 ADVANCED_AD_CONTEXT,
213 LEGACY_64B_CONTEXT
214};
2dba3239
MT
215#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
216#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
217 LEGACY_64B_CONTEXT :\
218 LEGACY_32B_CONTEXT)
84b790f8
BW
219enum {
220 FAULT_AND_HANG = 0,
221 FAULT_AND_HALT, /* Debug only */
222 FAULT_AND_STREAM,
223 FAULT_AND_CONTINUE /* Unsupported */
224};
225#define GEN8_CTX_ID_SHIFT 32
17ee950d 226#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
84b790f8 227
e5292823
TU
228static int intel_lr_context_pin(struct intel_context *ctx,
229 struct intel_engine_cs *engine);
e84fe803
NH
230static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
231 struct drm_i915_gem_object *default_ctx_obj);
232
7ba717cf 233
73e4d07f
OM
234/**
235 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
236 * @dev: DRM device.
237 * @enable_execlists: value of i915.enable_execlists module parameter.
238 *
239 * Only certain platforms support Execlists (the prerequisites being
27401d12 240 * support for Logical Ring Contexts and Aliasing PPGTT or better).
73e4d07f
OM
241 *
242 * Return: 1 if Execlists is supported and has to be enabled.
243 */
127f1003
OM
244int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
245{
bd84b1e9
DV
246 WARN_ON(i915.enable_ppgtt == -1);
247
a0bd6c31
ZL
248 /* On platforms with execlist available, vGPU will only
249 * support execlist mode, no ring buffer mode.
250 */
251 if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
252 return 1;
253
70ee45e1
DL
254 if (INTEL_INFO(dev)->gen >= 9)
255 return 1;
256
127f1003
OM
257 if (enable_execlists == 0)
258 return 0;
259
14bf993e
OM
260 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
261 i915.use_mmio_flip >= 0)
127f1003
OM
262 return 1;
263
264 return 0;
265}
ede7d42b 266
ca82580c
TU
267static void
268logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
269{
270 struct drm_device *dev = ring->dev;
271
272 ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
273 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
274 (ring->id == VCS || ring->id == VCS2);
275
276 ring->ctx_desc_template = GEN8_CTX_VALID;
277 ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
278 GEN8_CTX_ADDRESSING_MODE_SHIFT;
279 if (IS_GEN8(dev))
280 ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
281 ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
282
283 /* TODO: WaDisableLiteRestore when we start using semaphore
284 * signalling between Command Streamers */
285 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
286
287 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
288 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
289 if (ring->disable_lite_restore_wa)
290 ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
291}
292
73e4d07f 293/**
ca82580c
TU
294 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
295 * descriptor for a pinned context
73e4d07f 296 *
ca82580c
TU
297 * @ctx: Context to work on
298 * @ring: Engine the descriptor will be used with
73e4d07f 299 *
ca82580c
TU
300 * The context descriptor encodes various attributes of a context,
301 * including its GTT address and some flags. Because it's fairly
302 * expensive to calculate, we'll just do it once and cache the result,
303 * which remains valid until the context is unpinned.
304 *
305 * This is what a descriptor looks like, from LSB to MSB:
306 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
307 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
308 * bits 32-51: ctx ID, a globally unique tag (the LRCA again!)
309 * bits 52-63: reserved, may encode the engine ID (for GuC)
73e4d07f 310 */
ca82580c
TU
311static void
312intel_lr_context_descriptor_update(struct intel_context *ctx,
313 struct intel_engine_cs *ring)
84b790f8 314{
ca82580c 315 uint64_t lrca, desc;
84b790f8 316
ca82580c
TU
317 lrca = ctx->engine[ring->id].lrc_vma->node.start +
318 LRC_PPHWSP_PN * PAGE_SIZE;
84b790f8 319
ca82580c
TU
320 desc = ring->ctx_desc_template; /* bits 0-11 */
321 desc |= lrca; /* bits 12-31 */
322 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
5af05fef 323
ca82580c 324 ctx->engine[ring->id].lrc_desc = desc;
5af05fef
MT
325}
326
919f1f55
DG
327uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
328 struct intel_engine_cs *ring)
84b790f8 329{
ca82580c
TU
330 return ctx->engine[ring->id].lrc_desc;
331}
203a571b 332
ca82580c
TU
333/**
334 * intel_execlists_ctx_id() - get the Execlists Context ID
335 * @ctx: Context to get the ID for
336 * @ring: Engine to get the ID for
337 *
338 * Do not confuse with ctx->id! Unfortunately we have a name overload
339 * here: the old context ID we pass to userspace as a handler so that
340 * they can refer to a context, and the new context ID we pass to the
341 * ELSP so that the GPU can inform us of the context status via
342 * interrupts.
343 *
344 * The context ID is a portion of the context descriptor, so we can
345 * just extract the required part from the cached descriptor.
346 *
347 * Return: 20-bits globally unique context ID.
348 */
349u32 intel_execlists_ctx_id(struct intel_context *ctx,
350 struct intel_engine_cs *ring)
351{
352 return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
84b790f8
BW
353}
354
cc3c4253
MK
355static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
356 struct drm_i915_gem_request *rq1)
84b790f8 357{
cc3c4253
MK
358
359 struct intel_engine_cs *ring = rq0->ring;
6e7cc470
TU
360 struct drm_device *dev = ring->dev;
361 struct drm_i915_private *dev_priv = dev->dev_private;
1cff8cc3 362 uint64_t desc[2];
84b790f8 363
1cff8cc3 364 if (rq1) {
919f1f55 365 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
1cff8cc3
MK
366 rq1->elsp_submitted++;
367 } else {
368 desc[1] = 0;
369 }
84b790f8 370
919f1f55 371 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
1cff8cc3 372 rq0->elsp_submitted++;
84b790f8 373
1cff8cc3 374 /* You must always write both descriptors in the order below. */
a6111f7b
CW
375 spin_lock(&dev_priv->uncore.lock);
376 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
1cff8cc3
MK
377 I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
378 I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
6daccb0b 379
1cff8cc3 380 I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
84b790f8 381 /* The context is automatically loaded after the following */
1cff8cc3 382 I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
84b790f8 383
1cff8cc3 384 /* ELSP is a wo register, use another nearby reg for posting */
83843d84 385 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
a6111f7b
CW
386 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
387 spin_unlock(&dev_priv->uncore.lock);
84b790f8
BW
388}
389
05d9824b 390static int execlists_update_context(struct drm_i915_gem_request *rq)
ae1250b9 391{
05d9824b
MK
392 struct intel_engine_cs *ring = rq->ring;
393 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
82352e90 394 uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
ae1250b9 395
05d9824b 396 reg_state[CTX_RING_TAIL+1] = rq->tail;
ae1250b9 397
2dba3239
MT
398 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
399 /* True 32b PPGTT with dynamic page allocation: update PDP
400 * registers and point the unallocated PDPs to scratch page.
401 * PML4 is allocated during ppgtt init, so this is not needed
402 * in 48-bit mode.
403 */
d7b2633d
MT
404 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
405 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
406 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
407 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
408 }
409
ae1250b9
OM
410 return 0;
411}
412
d8cb8875
MK
413static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
414 struct drm_i915_gem_request *rq1)
84b790f8 415{
05d9824b 416 execlists_update_context(rq0);
d8cb8875 417
cc3c4253 418 if (rq1)
05d9824b 419 execlists_update_context(rq1);
84b790f8 420
cc3c4253 421 execlists_elsp_write(rq0, rq1);
84b790f8
BW
422}
423
acdd884a
MT
424static void execlists_context_unqueue(struct intel_engine_cs *ring)
425{
6d3d8274
NH
426 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
427 struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
e981e7b1
TD
428
429 assert_spin_locked(&ring->execlist_lock);
acdd884a 430
779949f4
PA
431 /*
432 * If irqs are not active generate a warning as batches that finish
433 * without the irqs may get lost and a GPU Hang may occur.
434 */
435 WARN_ON(!intel_irqs_enabled(ring->dev->dev_private));
436
acdd884a
MT
437 if (list_empty(&ring->execlist_queue))
438 return;
439
440 /* Try to read in pairs */
441 list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
442 execlist_link) {
443 if (!req0) {
444 req0 = cursor;
6d3d8274 445 } else if (req0->ctx == cursor->ctx) {
acdd884a
MT
446 /* Same ctx: ignore first request, as second request
447 * will update tail past first request's workload */
e1fee72c 448 cursor->elsp_submitted = req0->elsp_submitted;
7eb08a25
TU
449 list_move_tail(&req0->execlist_link,
450 &ring->execlist_retired_req_list);
acdd884a
MT
451 req0 = cursor;
452 } else {
453 req1 = cursor;
454 break;
455 }
456 }
457
53292cdb
MT
458 if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) {
459 /*
460 * WaIdleLiteRestore: make sure we never cause a lite
461 * restore with HEAD==TAIL
462 */
d63f820f 463 if (req0->elsp_submitted) {
53292cdb
MT
464 /*
465 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
466 * as we resubmit the request. See gen8_emit_request()
467 * for where we prepare the padding after the end of the
468 * request.
469 */
470 struct intel_ringbuffer *ringbuf;
471
472 ringbuf = req0->ctx->engine[ring->id].ringbuf;
473 req0->tail += 8;
474 req0->tail &= ringbuf->size - 1;
475 }
476 }
477
e1fee72c
OM
478 WARN_ON(req1 && req1->elsp_submitted);
479
d8cb8875 480 execlists_submit_requests(req0, req1);
acdd884a
MT
481}
482
e981e7b1
TD
483static bool execlists_check_remove_request(struct intel_engine_cs *ring,
484 u32 request_id)
485{
6d3d8274 486 struct drm_i915_gem_request *head_req;
e981e7b1
TD
487
488 assert_spin_locked(&ring->execlist_lock);
489
490 head_req = list_first_entry_or_null(&ring->execlist_queue,
6d3d8274 491 struct drm_i915_gem_request,
e981e7b1
TD
492 execlist_link);
493
494 if (head_req != NULL) {
ca82580c 495 if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) {
e1fee72c
OM
496 WARN(head_req->elsp_submitted == 0,
497 "Never submitted head request\n");
498
499 if (--head_req->elsp_submitted <= 0) {
7eb08a25
TU
500 list_move_tail(&head_req->execlist_link,
501 &ring->execlist_retired_req_list);
e1fee72c
OM
502 return true;
503 }
e981e7b1
TD
504 }
505 }
506
507 return false;
508}
509
91a41032
BW
510static void get_context_status(struct intel_engine_cs *ring,
511 u8 read_pointer,
512 u32 *status, u32 *context_id)
513{
514 struct drm_i915_private *dev_priv = ring->dev->dev_private;
515
516 if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES))
517 return;
518
519 *status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
520 *context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer));
521}
522
73e4d07f 523/**
3f7531c3 524 * intel_lrc_irq_handler() - handle Context Switch interrupts
73e4d07f
OM
525 * @ring: Engine Command Streamer to handle.
526 *
527 * Check the unread Context Status Buffers and manage the submission of new
528 * contexts to the ELSP accordingly.
529 */
3f7531c3 530void intel_lrc_irq_handler(struct intel_engine_cs *ring)
e981e7b1
TD
531{
532 struct drm_i915_private *dev_priv = ring->dev->dev_private;
533 u32 status_pointer;
534 u8 read_pointer;
535 u8 write_pointer;
5af05fef 536 u32 status = 0;
e981e7b1
TD
537 u32 status_id;
538 u32 submit_contexts = 0;
539
540 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
541
542 read_pointer = ring->next_context_status_buffer;
5590a5f0 543 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
e981e7b1 544 if (read_pointer > write_pointer)
dfc53c5e 545 write_pointer += GEN8_CSB_ENTRIES;
e981e7b1
TD
546
547 spin_lock(&ring->execlist_lock);
548
549 while (read_pointer < write_pointer) {
91a41032
BW
550
551 get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES,
552 &status, &status_id);
e981e7b1 553
031a8936
MK
554 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
555 continue;
556
e1fee72c
OM
557 if (status & GEN8_CTX_STATUS_PREEMPTED) {
558 if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
559 if (execlists_check_remove_request(ring, status_id))
560 WARN(1, "Lite Restored request removed from queue\n");
561 } else
562 WARN(1, "Preemption without Lite Restore\n");
563 }
564
eba51190
BW
565 if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
566 (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
e981e7b1
TD
567 if (execlists_check_remove_request(ring, status_id))
568 submit_contexts++;
569 }
570 }
571
ca82580c 572 if (ring->disable_lite_restore_wa) {
5af05fef
MT
573 /* Prevent a ctx to preempt itself */
574 if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
575 (submit_contexts != 0))
576 execlists_context_unqueue(ring);
577 } else if (submit_contexts != 0) {
e981e7b1 578 execlists_context_unqueue(ring);
5af05fef 579 }
e981e7b1
TD
580
581 spin_unlock(&ring->execlist_lock);
582
f764a8b1
BW
583 if (unlikely(submit_contexts > 2))
584 DRM_ERROR("More than two context complete events?\n");
585
dfc53c5e 586 ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
e981e7b1 587
5590a5f0
BW
588 /* Update the read pointer to the old write pointer. Manual ringbuffer
589 * management ftw </sarcasm> */
e981e7b1 590 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
5590a5f0
BW
591 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
592 ring->next_context_status_buffer << 8));
e981e7b1
TD
593}
594
ae70797d 595static int execlists_context_queue(struct drm_i915_gem_request *request)
acdd884a 596{
ae70797d 597 struct intel_engine_cs *ring = request->ring;
6d3d8274 598 struct drm_i915_gem_request *cursor;
f1ad5a1f 599 int num_elements = 0;
acdd884a 600
ed54c1a1 601 if (request->ctx != request->i915->kernel_context)
e5292823 602 intel_lr_context_pin(request->ctx, ring);
af3302b9 603
9bb1af44
JH
604 i915_gem_request_reference(request);
605
b5eba372 606 spin_lock_irq(&ring->execlist_lock);
acdd884a 607
f1ad5a1f
OM
608 list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
609 if (++num_elements > 2)
610 break;
611
612 if (num_elements > 2) {
6d3d8274 613 struct drm_i915_gem_request *tail_req;
f1ad5a1f
OM
614
615 tail_req = list_last_entry(&ring->execlist_queue,
6d3d8274 616 struct drm_i915_gem_request,
f1ad5a1f
OM
617 execlist_link);
618
ae70797d 619 if (request->ctx == tail_req->ctx) {
f1ad5a1f 620 WARN(tail_req->elsp_submitted != 0,
7ba717cf 621 "More than 2 already-submitted reqs queued\n");
7eb08a25
TU
622 list_move_tail(&tail_req->execlist_link,
623 &ring->execlist_retired_req_list);
f1ad5a1f
OM
624 }
625 }
626
6d3d8274 627 list_add_tail(&request->execlist_link, &ring->execlist_queue);
f1ad5a1f 628 if (num_elements == 0)
acdd884a
MT
629 execlists_context_unqueue(ring);
630
b5eba372 631 spin_unlock_irq(&ring->execlist_lock);
acdd884a
MT
632
633 return 0;
634}
635
2f20055d 636static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
ba8b7ccb 637{
2f20055d 638 struct intel_engine_cs *ring = req->ring;
ba8b7ccb
OM
639 uint32_t flush_domains;
640 int ret;
641
642 flush_domains = 0;
643 if (ring->gpu_caches_dirty)
644 flush_domains = I915_GEM_GPU_DOMAINS;
645
7deb4d39 646 ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
ba8b7ccb
OM
647 if (ret)
648 return ret;
649
650 ring->gpu_caches_dirty = false;
651 return 0;
652}
653
535fbe82 654static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
ba8b7ccb
OM
655 struct list_head *vmas)
656{
535fbe82 657 const unsigned other_rings = ~intel_ring_flag(req->ring);
ba8b7ccb
OM
658 struct i915_vma *vma;
659 uint32_t flush_domains = 0;
660 bool flush_chipset = false;
661 int ret;
662
663 list_for_each_entry(vma, vmas, exec_list) {
664 struct drm_i915_gem_object *obj = vma->obj;
665
03ade511 666 if (obj->active & other_rings) {
91af127f 667 ret = i915_gem_object_sync(obj, req->ring, &req);
03ade511
CW
668 if (ret)
669 return ret;
670 }
ba8b7ccb
OM
671
672 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
673 flush_chipset |= i915_gem_clflush_object(obj, false);
674
675 flush_domains |= obj->base.write_domain;
676 }
677
678 if (flush_domains & I915_GEM_DOMAIN_GTT)
679 wmb();
680
681 /* Unconditionally invalidate gpu caches and ensure that we do flush
682 * any residual writes from the previous batch.
683 */
2f20055d 684 return logical_ring_invalidate_all_caches(req);
ba8b7ccb
OM
685}
686
40e895ce 687int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
bc0dce3f 688{
e28e404c 689 int ret = 0;
bc0dce3f 690
f3cc01f0
MK
691 request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
692
a7e02199
AD
693 if (i915.enable_guc_submission) {
694 /*
695 * Check that the GuC has space for the request before
696 * going any further, as the i915_add_request() call
697 * later on mustn't fail ...
698 */
699 struct intel_guc *guc = &request->i915->guc;
700
701 ret = i915_guc_wq_check_space(guc->execbuf_client);
702 if (ret)
703 return ret;
704 }
705
e28e404c 706 if (request->ctx != request->i915->kernel_context)
e5292823 707 ret = intel_lr_context_pin(request->ctx, request->ring);
e28e404c
DG
708
709 return ret;
bc0dce3f
JH
710}
711
ae70797d 712static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
595e1eeb 713 int bytes)
bc0dce3f 714{
ae70797d
JH
715 struct intel_ringbuffer *ringbuf = req->ringbuf;
716 struct intel_engine_cs *ring = req->ring;
717 struct drm_i915_gem_request *target;
b4716185
CW
718 unsigned space;
719 int ret;
bc0dce3f
JH
720
721 if (intel_ring_space(ringbuf) >= bytes)
722 return 0;
723
79bbcc29
JH
724 /* The whole point of reserving space is to not wait! */
725 WARN_ON(ringbuf->reserved_in_use);
726
ae70797d 727 list_for_each_entry(target, &ring->request_list, list) {
bc0dce3f
JH
728 /*
729 * The request queue is per-engine, so can contain requests
730 * from multiple ringbuffers. Here, we must ignore any that
731 * aren't from the ringbuffer we're considering.
732 */
ae70797d 733 if (target->ringbuf != ringbuf)
bc0dce3f
JH
734 continue;
735
736 /* Would completion of this request free enough space? */
ae70797d 737 space = __intel_ring_space(target->postfix, ringbuf->tail,
b4716185
CW
738 ringbuf->size);
739 if (space >= bytes)
bc0dce3f 740 break;
bc0dce3f
JH
741 }
742
ae70797d 743 if (WARN_ON(&target->list == &ring->request_list))
bc0dce3f
JH
744 return -ENOSPC;
745
ae70797d 746 ret = i915_wait_request(target);
bc0dce3f
JH
747 if (ret)
748 return ret;
749
b4716185
CW
750 ringbuf->space = space;
751 return 0;
bc0dce3f
JH
752}
753
754/*
755 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
ae70797d 756 * @request: Request to advance the logical ringbuffer of.
bc0dce3f
JH
757 *
758 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
759 * really happens during submission is that the context and current tail will be placed
760 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
761 * point, the tail *inside* the context is updated and the ELSP written to.
762 */
7c17d377 763static int
ae70797d 764intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
bc0dce3f 765{
7c17d377 766 struct intel_ringbuffer *ringbuf = request->ringbuf;
d1675198 767 struct drm_i915_private *dev_priv = request->i915;
bc0dce3f 768
7c17d377
CW
769 intel_logical_ring_advance(ringbuf);
770 request->tail = ringbuf->tail;
bc0dce3f 771
7c17d377
CW
772 /*
773 * Here we add two extra NOOPs as padding to avoid
774 * lite restore of a context with HEAD==TAIL.
775 *
776 * Caller must reserve WA_TAIL_DWORDS for us!
777 */
778 intel_logical_ring_emit(ringbuf, MI_NOOP);
779 intel_logical_ring_emit(ringbuf, MI_NOOP);
780 intel_logical_ring_advance(ringbuf);
d1675198 781
7c17d377
CW
782 if (intel_ring_stopped(request->ring))
783 return 0;
bc0dce3f 784
d1675198
AD
785 if (dev_priv->guc.execbuf_client)
786 i915_guc_submit(dev_priv->guc.execbuf_client, request);
787 else
788 execlists_context_queue(request);
7c17d377
CW
789
790 return 0;
bc0dce3f
JH
791}
792
79bbcc29 793static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
bc0dce3f
JH
794{
795 uint32_t __iomem *virt;
796 int rem = ringbuf->size - ringbuf->tail;
797
bc0dce3f
JH
798 virt = ringbuf->virtual_start + ringbuf->tail;
799 rem /= 4;
800 while (rem--)
801 iowrite32(MI_NOOP, virt++);
802
803 ringbuf->tail = 0;
804 intel_ring_update_space(ringbuf);
bc0dce3f
JH
805}
806
ae70797d 807static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
bc0dce3f 808{
ae70797d 809 struct intel_ringbuffer *ringbuf = req->ringbuf;
79bbcc29
JH
810 int remain_usable = ringbuf->effective_size - ringbuf->tail;
811 int remain_actual = ringbuf->size - ringbuf->tail;
812 int ret, total_bytes, wait_bytes = 0;
813 bool need_wrap = false;
29b1b415 814
79bbcc29
JH
815 if (ringbuf->reserved_in_use)
816 total_bytes = bytes;
817 else
818 total_bytes = bytes + ringbuf->reserved_size;
29b1b415 819
79bbcc29
JH
820 if (unlikely(bytes > remain_usable)) {
821 /*
822 * Not enough space for the basic request. So need to flush
823 * out the remainder and then wait for base + reserved.
824 */
825 wait_bytes = remain_actual + total_bytes;
826 need_wrap = true;
827 } else {
828 if (unlikely(total_bytes > remain_usable)) {
829 /*
830 * The base request will fit but the reserved space
831 * falls off the end. So only need to to wait for the
832 * reserved size after flushing out the remainder.
833 */
834 wait_bytes = remain_actual + ringbuf->reserved_size;
835 need_wrap = true;
836 } else if (total_bytes > ringbuf->space) {
837 /* No wrapping required, just waiting. */
838 wait_bytes = total_bytes;
29b1b415 839 }
bc0dce3f
JH
840 }
841
79bbcc29
JH
842 if (wait_bytes) {
843 ret = logical_ring_wait_for_space(req, wait_bytes);
bc0dce3f
JH
844 if (unlikely(ret))
845 return ret;
79bbcc29
JH
846
847 if (need_wrap)
848 __wrap_ring_buffer(ringbuf);
bc0dce3f
JH
849 }
850
851 return 0;
852}
853
854/**
855 * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
856 *
374887ba 857 * @req: The request to start some new work for
bc0dce3f
JH
858 * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
859 *
860 * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
861 * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
862 * and also preallocates a request (every workload submission is still mediated through
863 * requests, same as it did with legacy ringbuffer submission).
864 *
865 * Return: non-zero if the ringbuffer is not ready to be written to.
866 */
3bbaba0c 867int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
bc0dce3f 868{
4d616a29 869 struct drm_i915_private *dev_priv;
bc0dce3f
JH
870 int ret;
871
4d616a29
JH
872 WARN_ON(req == NULL);
873 dev_priv = req->ring->dev->dev_private;
874
bc0dce3f
JH
875 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
876 dev_priv->mm.interruptible);
877 if (ret)
878 return ret;
879
ae70797d 880 ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
bc0dce3f
JH
881 if (ret)
882 return ret;
883
4d616a29 884 req->ringbuf->space -= num_dwords * sizeof(uint32_t);
bc0dce3f
JH
885 return 0;
886}
887
ccd98fe4
JH
888int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
889{
890 /*
891 * The first call merely notes the reserve request and is common for
892 * all back ends. The subsequent localised _begin() call actually
893 * ensures that the reservation is available. Without the begin, if
894 * the request creator immediately submitted the request without
895 * adding any commands to it then there might not actually be
896 * sufficient room for the submission commands.
897 */
898 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
899
900 return intel_logical_ring_begin(request, 0);
901}
902
73e4d07f
OM
903/**
904 * execlists_submission() - submit a batchbuffer for execution, Execlists style
905 * @dev: DRM device.
906 * @file: DRM file.
907 * @ring: Engine Command Streamer to submit to.
908 * @ctx: Context to employ for this submission.
909 * @args: execbuffer call arguments.
910 * @vmas: list of vmas.
911 * @batch_obj: the batchbuffer to submit.
912 * @exec_start: batchbuffer start virtual address pointer.
8e004efc 913 * @dispatch_flags: translated execbuffer call flags.
73e4d07f
OM
914 *
915 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
916 * away the submission details of the execbuffer ioctl call.
917 *
918 * Return: non-zero if the submission fails.
919 */
5f19e2bf 920int intel_execlists_submission(struct i915_execbuffer_params *params,
454afebd 921 struct drm_i915_gem_execbuffer2 *args,
5f19e2bf 922 struct list_head *vmas)
454afebd 923{
5f19e2bf
JH
924 struct drm_device *dev = params->dev;
925 struct intel_engine_cs *ring = params->ring;
ba8b7ccb 926 struct drm_i915_private *dev_priv = dev->dev_private;
5f19e2bf
JH
927 struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
928 u64 exec_start;
ba8b7ccb
OM
929 int instp_mode;
930 u32 instp_mask;
931 int ret;
932
933 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
934 instp_mask = I915_EXEC_CONSTANTS_MASK;
935 switch (instp_mode) {
936 case I915_EXEC_CONSTANTS_REL_GENERAL:
937 case I915_EXEC_CONSTANTS_ABSOLUTE:
938 case I915_EXEC_CONSTANTS_REL_SURFACE:
939 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
940 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
941 return -EINVAL;
942 }
943
944 if (instp_mode != dev_priv->relative_constants_mode) {
945 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
946 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
947 return -EINVAL;
948 }
949
950 /* The HW changed the meaning on this bit on gen6 */
951 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
952 }
953 break;
954 default:
955 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
956 return -EINVAL;
957 }
958
ba8b7ccb
OM
959 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
960 DRM_DEBUG("sol reset is gen7 only\n");
961 return -EINVAL;
962 }
963
535fbe82 964 ret = execlists_move_to_gpu(params->request, vmas);
ba8b7ccb
OM
965 if (ret)
966 return ret;
967
968 if (ring == &dev_priv->ring[RCS] &&
969 instp_mode != dev_priv->relative_constants_mode) {
4d616a29 970 ret = intel_logical_ring_begin(params->request, 4);
ba8b7ccb
OM
971 if (ret)
972 return ret;
973
974 intel_logical_ring_emit(ringbuf, MI_NOOP);
975 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
f92a9162 976 intel_logical_ring_emit_reg(ringbuf, INSTPM);
ba8b7ccb
OM
977 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
978 intel_logical_ring_advance(ringbuf);
979
980 dev_priv->relative_constants_mode = instp_mode;
981 }
982
5f19e2bf
JH
983 exec_start = params->batch_obj_vm_offset +
984 args->batch_start_offset;
985
be795fc1 986 ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
ba8b7ccb
OM
987 if (ret)
988 return ret;
989
95c24161 990 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
5e4be7bd 991
8a8edb59 992 i915_gem_execbuffer_move_to_active(vmas, params->request);
adeca76d 993 i915_gem_execbuffer_retire_commands(params);
ba8b7ccb 994
454afebd
OM
995 return 0;
996}
997
c86ee3a9
TD
998void intel_execlists_retire_requests(struct intel_engine_cs *ring)
999{
6d3d8274 1000 struct drm_i915_gem_request *req, *tmp;
c86ee3a9
TD
1001 struct list_head retired_list;
1002
1003 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1004 if (list_empty(&ring->execlist_retired_req_list))
1005 return;
1006
1007 INIT_LIST_HEAD(&retired_list);
b5eba372 1008 spin_lock_irq(&ring->execlist_lock);
c86ee3a9 1009 list_replace_init(&ring->execlist_retired_req_list, &retired_list);
b5eba372 1010 spin_unlock_irq(&ring->execlist_lock);
c86ee3a9
TD
1011
1012 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
af3302b9
DV
1013 struct intel_context *ctx = req->ctx;
1014 struct drm_i915_gem_object *ctx_obj =
1015 ctx->engine[ring->id].state;
1016
ed54c1a1 1017 if (ctx_obj && (ctx != req->i915->kernel_context))
e5292823
TU
1018 intel_lr_context_unpin(ctx, ring);
1019
c86ee3a9 1020 list_del(&req->execlist_link);
f8210795 1021 i915_gem_request_unreference(req);
c86ee3a9
TD
1022 }
1023}
1024
454afebd
OM
1025void intel_logical_ring_stop(struct intel_engine_cs *ring)
1026{
9832b9da
OM
1027 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1028 int ret;
1029
1030 if (!intel_ring_initialized(ring))
1031 return;
1032
1033 ret = intel_ring_idle(ring);
1034 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
1035 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1036 ring->name, ret);
1037
1038 /* TODO: Is this correct with Execlists enabled? */
1039 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
1040 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
1041 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
1042 return;
1043 }
1044 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
454afebd
OM
1045}
1046
4866d729 1047int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
48e29f55 1048{
4866d729 1049 struct intel_engine_cs *ring = req->ring;
48e29f55
OM
1050 int ret;
1051
1052 if (!ring->gpu_caches_dirty)
1053 return 0;
1054
7deb4d39 1055 ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
48e29f55
OM
1056 if (ret)
1057 return ret;
1058
1059 ring->gpu_caches_dirty = false;
1060 return 0;
1061}
1062
e5292823
TU
1063static int intel_lr_context_do_pin(struct intel_context *ctx,
1064 struct intel_engine_cs *ring)
dcb4c12a 1065{
e84fe803
NH
1066 struct drm_device *dev = ring->dev;
1067 struct drm_i915_private *dev_priv = dev->dev_private;
ca82580c
TU
1068 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
1069 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
82352e90 1070 struct page *lrc_state_page;
77b04a04 1071 uint32_t *lrc_reg_state;
ca82580c 1072 int ret;
dcb4c12a
OM
1073
1074 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
ca82580c 1075
e84fe803
NH
1076 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
1077 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
1078 if (ret)
1079 return ret;
7ba717cf 1080
82352e90
TU
1081 lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
1082 if (WARN_ON(!lrc_state_page)) {
1083 ret = -ENODEV;
1084 goto unpin_ctx_obj;
1085 }
1086
e84fe803
NH
1087 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
1088 if (ret)
1089 goto unpin_ctx_obj;
d1675198 1090
ca82580c
TU
1091 ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
1092 intel_lr_context_descriptor_update(ctx, ring);
77b04a04
TU
1093 lrc_reg_state = kmap(lrc_state_page);
1094 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
1095 ctx->engine[ring->id].lrc_reg_state = lrc_reg_state;
e84fe803 1096 ctx_obj->dirty = true;
e93c28f3 1097
e84fe803
NH
1098 /* Invalidate GuC TLB. */
1099 if (i915.enable_guc_submission)
1100 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
dcb4c12a 1101
7ba717cf
TD
1102 return ret;
1103
1104unpin_ctx_obj:
1105 i915_gem_object_ggtt_unpin(ctx_obj);
e84fe803
NH
1106
1107 return ret;
1108}
1109
e5292823
TU
1110static int intel_lr_context_pin(struct intel_context *ctx,
1111 struct intel_engine_cs *engine)
e84fe803
NH
1112{
1113 int ret = 0;
e84fe803 1114
e5292823
TU
1115 if (ctx->engine[engine->id].pin_count++ == 0) {
1116 ret = intel_lr_context_do_pin(ctx, engine);
e84fe803
NH
1117 if (ret)
1118 goto reset_pin_count;
321fe304
TU
1119
1120 i915_gem_context_reference(ctx);
e84fe803
NH
1121 }
1122 return ret;
1123
a7cbedec 1124reset_pin_count:
e5292823 1125 ctx->engine[engine->id].pin_count = 0;
dcb4c12a
OM
1126 return ret;
1127}
1128
e5292823
TU
1129void intel_lr_context_unpin(struct intel_context *ctx,
1130 struct intel_engine_cs *engine)
dcb4c12a 1131{
e5292823 1132 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
af3302b9 1133
e5292823 1134 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
82352e90 1135
e5292823 1136 if (WARN_ON_ONCE(!ctx_obj))
82352e90
TU
1137 return;
1138
e5292823
TU
1139 if (--ctx->engine[engine->id].pin_count == 0) {
1140 kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
1141 intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
82352e90 1142 i915_gem_object_ggtt_unpin(ctx_obj);
e5292823
TU
1143 ctx->engine[engine->id].lrc_vma = NULL;
1144 ctx->engine[engine->id].lrc_desc = 0;
1145 ctx->engine[engine->id].lrc_reg_state = NULL;
321fe304
TU
1146
1147 i915_gem_context_unreference(ctx);
dcb4c12a
OM
1148 }
1149}
1150
e2be4faf 1151static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
771b9a53
MT
1152{
1153 int ret, i;
e2be4faf
JH
1154 struct intel_engine_cs *ring = req->ring;
1155 struct intel_ringbuffer *ringbuf = req->ringbuf;
771b9a53
MT
1156 struct drm_device *dev = ring->dev;
1157 struct drm_i915_private *dev_priv = dev->dev_private;
1158 struct i915_workarounds *w = &dev_priv->workarounds;
1159
cd7feaaa 1160 if (w->count == 0)
771b9a53
MT
1161 return 0;
1162
1163 ring->gpu_caches_dirty = true;
4866d729 1164 ret = logical_ring_flush_all_caches(req);
771b9a53
MT
1165 if (ret)
1166 return ret;
1167
4d616a29 1168 ret = intel_logical_ring_begin(req, w->count * 2 + 2);
771b9a53
MT
1169 if (ret)
1170 return ret;
1171
1172 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1173 for (i = 0; i < w->count; i++) {
f92a9162 1174 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
771b9a53
MT
1175 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1176 }
1177 intel_logical_ring_emit(ringbuf, MI_NOOP);
1178
1179 intel_logical_ring_advance(ringbuf);
1180
1181 ring->gpu_caches_dirty = true;
4866d729 1182 ret = logical_ring_flush_all_caches(req);
771b9a53
MT
1183 if (ret)
1184 return ret;
1185
1186 return 0;
1187}
1188
83b8a982 1189#define wa_ctx_emit(batch, index, cmd) \
17ee950d 1190 do { \
83b8a982
AS
1191 int __index = (index)++; \
1192 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
17ee950d
AS
1193 return -ENOSPC; \
1194 } \
83b8a982 1195 batch[__index] = (cmd); \
17ee950d
AS
1196 } while (0)
1197
8f40db77 1198#define wa_ctx_emit_reg(batch, index, reg) \
f0f59a00 1199 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
9e000847
AS
1200
1201/*
1202 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1203 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1204 * but there is a slight complication as this is applied in WA batch where the
1205 * values are only initialized once so we cannot take register value at the
1206 * beginning and reuse it further; hence we save its value to memory, upload a
1207 * constant value with bit21 set and then we restore it back with the saved value.
1208 * To simplify the WA, a constant value is formed by using the default value
1209 * of this register. This shouldn't be a problem because we are only modifying
1210 * it for a short period and this batch in non-premptible. We can ofcourse
1211 * use additional instructions that read the actual value of the register
1212 * at that time and set our bit of interest but it makes the WA complicated.
1213 *
1214 * This WA is also required for Gen9 so extracting as a function avoids
1215 * code duplication.
1216 */
1217static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
1218 uint32_t *const batch,
1219 uint32_t index)
1220{
1221 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1222
a4106a78
AS
1223 /*
1224 * WaDisableLSQCROPERFforOCL:skl
1225 * This WA is implemented in skl_init_clock_gating() but since
1226 * this batch updates GEN8_L3SQCREG4 with default value we need to
1227 * set this bit here to retain the WA during flush.
1228 */
e87a005d 1229 if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0))
a4106a78
AS
1230 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1231
f1afe24f 1232 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
83b8a982 1233 MI_SRM_LRM_GLOBAL_GTT));
8f40db77 1234 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
83b8a982
AS
1235 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
1236 wa_ctx_emit(batch, index, 0);
1237
1238 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
8f40db77 1239 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
83b8a982
AS
1240 wa_ctx_emit(batch, index, l3sqc4_flush);
1241
1242 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1243 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
1244 PIPE_CONTROL_DC_FLUSH_ENABLE));
1245 wa_ctx_emit(batch, index, 0);
1246 wa_ctx_emit(batch, index, 0);
1247 wa_ctx_emit(batch, index, 0);
1248 wa_ctx_emit(batch, index, 0);
1249
f1afe24f 1250 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
83b8a982 1251 MI_SRM_LRM_GLOBAL_GTT));
8f40db77 1252 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
83b8a982
AS
1253 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
1254 wa_ctx_emit(batch, index, 0);
9e000847
AS
1255
1256 return index;
1257}
1258
17ee950d
AS
1259static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
1260 uint32_t offset,
1261 uint32_t start_alignment)
1262{
1263 return wa_ctx->offset = ALIGN(offset, start_alignment);
1264}
1265
1266static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1267 uint32_t offset,
1268 uint32_t size_alignment)
1269{
1270 wa_ctx->size = offset - wa_ctx->offset;
1271
1272 WARN(wa_ctx->size % size_alignment,
1273 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1274 wa_ctx->size, size_alignment);
1275 return 0;
1276}
1277
1278/**
1279 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
1280 *
1281 * @ring: only applicable for RCS
1282 * @wa_ctx: structure representing wa_ctx
1283 * offset: specifies start of the batch, should be cache-aligned. This is updated
1284 * with the offset value received as input.
1285 * size: size of the batch in DWORDS but HW expects in terms of cachelines
1286 * @batch: page in which WA are loaded
1287 * @offset: This field specifies the start of the batch, it should be
1288 * cache-aligned otherwise it is adjusted accordingly.
1289 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1290 * initialized at the beginning and shared across all contexts but this field
1291 * helps us to have multiple batches at different offsets and select them based
1292 * on a criteria. At the moment this batch always start at the beginning of the page
1293 * and at this point we don't have multiple wa_ctx batch buffers.
1294 *
1295 * The number of WA applied are not known at the beginning; we use this field
1296 * to return the no of DWORDS written.
4d78c8dc 1297 *
17ee950d
AS
1298 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1299 * so it adds NOOPs as padding to make it cacheline aligned.
1300 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1301 * makes a complete batch buffer.
1302 *
1303 * Return: non-zero if we exceed the PAGE_SIZE limit.
1304 */
1305
1306static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
1307 struct i915_wa_ctx_bb *wa_ctx,
1308 uint32_t *const batch,
1309 uint32_t *offset)
1310{
0160f055 1311 uint32_t scratch_addr;
17ee950d
AS
1312 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1313
7ad00d1a 1314 /* WaDisableCtxRestoreArbitration:bdw,chv */
83b8a982 1315 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
17ee950d 1316
c82435bb
AS
1317 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1318 if (IS_BROADWELL(ring->dev)) {
604ef734
AH
1319 int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index);
1320 if (rc < 0)
1321 return rc;
1322 index = rc;
c82435bb
AS
1323 }
1324
0160f055
AS
1325 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1326 /* Actual scratch location is at 128 bytes offset */
1327 scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
1328
83b8a982
AS
1329 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1330 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1331 PIPE_CONTROL_GLOBAL_GTT_IVB |
1332 PIPE_CONTROL_CS_STALL |
1333 PIPE_CONTROL_QW_WRITE));
1334 wa_ctx_emit(batch, index, scratch_addr);
1335 wa_ctx_emit(batch, index, 0);
1336 wa_ctx_emit(batch, index, 0);
1337 wa_ctx_emit(batch, index, 0);
0160f055 1338
17ee950d
AS
1339 /* Pad to end of cacheline */
1340 while (index % CACHELINE_DWORDS)
83b8a982 1341 wa_ctx_emit(batch, index, MI_NOOP);
17ee950d
AS
1342
1343 /*
1344 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1345 * execution depends on the length specified in terms of cache lines
1346 * in the register CTX_RCS_INDIRECT_CTX
1347 */
1348
1349 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1350}
1351
1352/**
1353 * gen8_init_perctx_bb() - initialize per ctx batch with WA
1354 *
1355 * @ring: only applicable for RCS
1356 * @wa_ctx: structure representing wa_ctx
1357 * offset: specifies start of the batch, should be cache-aligned.
1358 * size: size of the batch in DWORDS but HW expects in terms of cachelines
4d78c8dc 1359 * @batch: page in which WA are loaded
17ee950d
AS
1360 * @offset: This field specifies the start of this batch.
1361 * This batch is started immediately after indirect_ctx batch. Since we ensure
1362 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
1363 *
1364 * The number of DWORDS written are returned using this field.
1365 *
1366 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1367 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1368 */
1369static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
1370 struct i915_wa_ctx_bb *wa_ctx,
1371 uint32_t *const batch,
1372 uint32_t *offset)
1373{
1374 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1375
7ad00d1a 1376 /* WaDisableCtxRestoreArbitration:bdw,chv */
83b8a982 1377 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
7ad00d1a 1378
83b8a982 1379 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
17ee950d
AS
1380
1381 return wa_ctx_end(wa_ctx, *offset = index, 1);
1382}
1383
0504cffc
AS
1384static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
1385 struct i915_wa_ctx_bb *wa_ctx,
1386 uint32_t *const batch,
1387 uint32_t *offset)
1388{
a4106a78 1389 int ret;
0907c8f7 1390 struct drm_device *dev = ring->dev;
0504cffc
AS
1391 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1392
0907c8f7 1393 /* WaDisableCtxRestoreArbitration:skl,bxt */
e87a005d 1394 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
cbdc12a9 1395 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
0907c8f7 1396 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
0504cffc 1397
a4106a78
AS
1398 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
1399 ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
1400 if (ret < 0)
1401 return ret;
1402 index = ret;
1403
0504cffc
AS
1404 /* Pad to end of cacheline */
1405 while (index % CACHELINE_DWORDS)
1406 wa_ctx_emit(batch, index, MI_NOOP);
1407
1408 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1409}
1410
1411static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
1412 struct i915_wa_ctx_bb *wa_ctx,
1413 uint32_t *const batch,
1414 uint32_t *offset)
1415{
0907c8f7 1416 struct drm_device *dev = ring->dev;
0504cffc
AS
1417 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1418
9b01435d 1419 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
e87a005d 1420 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
cbdc12a9 1421 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
9b01435d 1422 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
8f40db77 1423 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
9b01435d
AS
1424 wa_ctx_emit(batch, index,
1425 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1426 wa_ctx_emit(batch, index, MI_NOOP);
1427 }
1428
0907c8f7 1429 /* WaDisableCtxRestoreArbitration:skl,bxt */
e87a005d 1430 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
cbdc12a9 1431 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
0907c8f7
AS
1432 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1433
0504cffc
AS
1434 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1435
1436 return wa_ctx_end(wa_ctx, *offset = index, 1);
1437}
1438
17ee950d
AS
1439static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
1440{
1441 int ret;
1442
1443 ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
1444 if (!ring->wa_ctx.obj) {
1445 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1446 return -ENOMEM;
1447 }
1448
1449 ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
1450 if (ret) {
1451 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1452 ret);
1453 drm_gem_object_unreference(&ring->wa_ctx.obj->base);
1454 return ret;
1455 }
1456
1457 return 0;
1458}
1459
1460static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
1461{
1462 if (ring->wa_ctx.obj) {
1463 i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
1464 drm_gem_object_unreference(&ring->wa_ctx.obj->base);
1465 ring->wa_ctx.obj = NULL;
1466 }
1467}
1468
1469static int intel_init_workaround_bb(struct intel_engine_cs *ring)
1470{
1471 int ret;
1472 uint32_t *batch;
1473 uint32_t offset;
1474 struct page *page;
1475 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
1476
1477 WARN_ON(ring->id != RCS);
1478
5e60d790 1479 /* update this when WA for higher Gen are added */
0504cffc
AS
1480 if (INTEL_INFO(ring->dev)->gen > 9) {
1481 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1482 INTEL_INFO(ring->dev)->gen);
5e60d790 1483 return 0;
0504cffc 1484 }
5e60d790 1485
c4db7599
AS
1486 /* some WA perform writes to scratch page, ensure it is valid */
1487 if (ring->scratch.obj == NULL) {
1488 DRM_ERROR("scratch page not allocated for %s\n", ring->name);
1489 return -EINVAL;
1490 }
1491
17ee950d
AS
1492 ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
1493 if (ret) {
1494 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1495 return ret;
1496 }
1497
033908ae 1498 page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
17ee950d
AS
1499 batch = kmap_atomic(page);
1500 offset = 0;
1501
1502 if (INTEL_INFO(ring->dev)->gen == 8) {
1503 ret = gen8_init_indirectctx_bb(ring,
1504 &wa_ctx->indirect_ctx,
1505 batch,
1506 &offset);
1507 if (ret)
1508 goto out;
1509
1510 ret = gen8_init_perctx_bb(ring,
1511 &wa_ctx->per_ctx,
1512 batch,
1513 &offset);
1514 if (ret)
1515 goto out;
0504cffc
AS
1516 } else if (INTEL_INFO(ring->dev)->gen == 9) {
1517 ret = gen9_init_indirectctx_bb(ring,
1518 &wa_ctx->indirect_ctx,
1519 batch,
1520 &offset);
1521 if (ret)
1522 goto out;
1523
1524 ret = gen9_init_perctx_bb(ring,
1525 &wa_ctx->per_ctx,
1526 batch,
1527 &offset);
1528 if (ret)
1529 goto out;
17ee950d
AS
1530 }
1531
1532out:
1533 kunmap_atomic(batch);
1534 if (ret)
1535 lrc_destroy_wa_ctx_obj(ring);
1536
1537 return ret;
1538}
1539
9b1136d5
OM
1540static int gen8_init_common_ring(struct intel_engine_cs *ring)
1541{
1542 struct drm_device *dev = ring->dev;
1543 struct drm_i915_private *dev_priv = dev->dev_private;
dfc53c5e 1544 u8 next_context_status_buffer_hw;
9b1136d5 1545
e84fe803 1546 lrc_setup_hardware_status_page(ring,
ed54c1a1 1547 dev_priv->kernel_context->engine[ring->id].state);
e84fe803 1548
73d477f6
OM
1549 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1550 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
1551
9b1136d5
OM
1552 I915_WRITE(RING_MODE_GEN7(ring),
1553 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1554 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1555 POSTING_READ(RING_MODE_GEN7(ring));
dfc53c5e
MT
1556
1557 /*
1558 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1559 * zero, we need to read the write pointer from hardware and use its
1560 * value because "this register is power context save restored".
1561 * Effectively, these states have been observed:
1562 *
1563 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1564 * BDW | CSB regs not reset | CSB regs reset |
1565 * CHT | CSB regs not reset | CSB regs not reset |
5590a5f0
BW
1566 * SKL | ? | ? |
1567 * BXT | ? | ? |
dfc53c5e 1568 */
5590a5f0
BW
1569 next_context_status_buffer_hw =
1570 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring)));
dfc53c5e
MT
1571
1572 /*
1573 * When the CSB registers are reset (also after power-up / gpu reset),
1574 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1575 * this special case, so the first element read is CSB[0].
1576 */
1577 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1578 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1579
1580 ring->next_context_status_buffer = next_context_status_buffer_hw;
9b1136d5
OM
1581 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
1582
1583 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
1584
1585 return 0;
1586}
1587
1588static int gen8_init_render_ring(struct intel_engine_cs *ring)
1589{
1590 struct drm_device *dev = ring->dev;
1591 struct drm_i915_private *dev_priv = dev->dev_private;
1592 int ret;
1593
1594 ret = gen8_init_common_ring(ring);
1595 if (ret)
1596 return ret;
1597
1598 /* We need to disable the AsyncFlip performance optimisations in order
1599 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1600 * programmed to '1' on all products.
1601 *
1602 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1603 */
1604 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1605
9b1136d5
OM
1606 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1607
771b9a53 1608 return init_workarounds_ring(ring);
9b1136d5
OM
1609}
1610
82ef822e
DL
1611static int gen9_init_render_ring(struct intel_engine_cs *ring)
1612{
1613 int ret;
1614
1615 ret = gen8_init_common_ring(ring);
1616 if (ret)
1617 return ret;
1618
1619 return init_workarounds_ring(ring);
1620}
1621
7a01a0a2
MT
1622static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1623{
1624 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
1625 struct intel_engine_cs *ring = req->ring;
1626 struct intel_ringbuffer *ringbuf = req->ringbuf;
1627 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1628 int i, ret;
1629
1630 ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
1631 if (ret)
1632 return ret;
1633
1634 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1635 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1636 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1637
f92a9162 1638 intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
7a01a0a2 1639 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
f92a9162 1640 intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
7a01a0a2
MT
1641 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
1642 }
1643
1644 intel_logical_ring_emit(ringbuf, MI_NOOP);
1645 intel_logical_ring_advance(ringbuf);
1646
1647 return 0;
1648}
1649
be795fc1 1650static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
8e004efc 1651 u64 offset, unsigned dispatch_flags)
15648585 1652{
be795fc1 1653 struct intel_ringbuffer *ringbuf = req->ringbuf;
8e004efc 1654 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
15648585
OM
1655 int ret;
1656
7a01a0a2
MT
1657 /* Don't rely in hw updating PDPs, specially in lite-restore.
1658 * Ideally, we should set Force PD Restore in ctx descriptor,
1659 * but we can't. Force Restore would be a second option, but
1660 * it is unsafe in case of lite-restore (because the ctx is
2dba3239
MT
1661 * not idle). PML4 is allocated during ppgtt init so this is
1662 * not needed in 48-bit.*/
7a01a0a2
MT
1663 if (req->ctx->ppgtt &&
1664 (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
331f38e7
ZL
1665 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1666 !intel_vgpu_active(req->i915->dev)) {
2dba3239
MT
1667 ret = intel_logical_ring_emit_pdps(req);
1668 if (ret)
1669 return ret;
1670 }
7a01a0a2
MT
1671
1672 req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
1673 }
1674
4d616a29 1675 ret = intel_logical_ring_begin(req, 4);
15648585
OM
1676 if (ret)
1677 return ret;
1678
1679 /* FIXME(BDW): Address space and security selectors. */
6922528a
AJ
1680 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
1681 (ppgtt<<8) |
1682 (dispatch_flags & I915_DISPATCH_RS ?
1683 MI_BATCH_RESOURCE_STREAMER : 0));
15648585
OM
1684 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
1685 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
1686 intel_logical_ring_emit(ringbuf, MI_NOOP);
1687 intel_logical_ring_advance(ringbuf);
1688
1689 return 0;
1690}
1691
73d477f6
OM
1692static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
1693{
1694 struct drm_device *dev = ring->dev;
1695 struct drm_i915_private *dev_priv = dev->dev_private;
1696 unsigned long flags;
1697
7cd512f1 1698 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
73d477f6
OM
1699 return false;
1700
1701 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1702 if (ring->irq_refcount++ == 0) {
1703 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1704 POSTING_READ(RING_IMR(ring->mmio_base));
1705 }
1706 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1707
1708 return true;
1709}
1710
1711static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
1712{
1713 struct drm_device *dev = ring->dev;
1714 struct drm_i915_private *dev_priv = dev->dev_private;
1715 unsigned long flags;
1716
1717 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1718 if (--ring->irq_refcount == 0) {
1719 I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
1720 POSTING_READ(RING_IMR(ring->mmio_base));
1721 }
1722 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1723}
1724
7deb4d39 1725static int gen8_emit_flush(struct drm_i915_gem_request *request,
4712274c
OM
1726 u32 invalidate_domains,
1727 u32 unused)
1728{
7deb4d39 1729 struct intel_ringbuffer *ringbuf = request->ringbuf;
4712274c
OM
1730 struct intel_engine_cs *ring = ringbuf->ring;
1731 struct drm_device *dev = ring->dev;
1732 struct drm_i915_private *dev_priv = dev->dev_private;
1733 uint32_t cmd;
1734 int ret;
1735
4d616a29 1736 ret = intel_logical_ring_begin(request, 4);
4712274c
OM
1737 if (ret)
1738 return ret;
1739
1740 cmd = MI_FLUSH_DW + 1;
1741
f0a1fb10
CW
1742 /* We always require a command barrier so that subsequent
1743 * commands, such as breadcrumb interrupts, are strictly ordered
1744 * wrt the contents of the write cache being flushed to memory
1745 * (and thus being coherent from the CPU).
1746 */
1747 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1748
1749 if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
1750 cmd |= MI_INVALIDATE_TLB;
1751 if (ring == &dev_priv->ring[VCS])
1752 cmd |= MI_INVALIDATE_BSD;
4712274c
OM
1753 }
1754
1755 intel_logical_ring_emit(ringbuf, cmd);
1756 intel_logical_ring_emit(ringbuf,
1757 I915_GEM_HWS_SCRATCH_ADDR |
1758 MI_FLUSH_DW_USE_GTT);
1759 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
1760 intel_logical_ring_emit(ringbuf, 0); /* value */
1761 intel_logical_ring_advance(ringbuf);
1762
1763 return 0;
1764}
1765
7deb4d39 1766static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
4712274c
OM
1767 u32 invalidate_domains,
1768 u32 flush_domains)
1769{
7deb4d39 1770 struct intel_ringbuffer *ringbuf = request->ringbuf;
4712274c
OM
1771 struct intel_engine_cs *ring = ringbuf->ring;
1772 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1a5a9ce7 1773 bool vf_flush_wa = false;
4712274c
OM
1774 u32 flags = 0;
1775 int ret;
1776
1777 flags |= PIPE_CONTROL_CS_STALL;
1778
1779 if (flush_domains) {
1780 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1781 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
965fd602 1782 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
40a24488 1783 flags |= PIPE_CONTROL_FLUSH_ENABLE;
4712274c
OM
1784 }
1785
1786 if (invalidate_domains) {
1787 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1788 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1789 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1790 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1791 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1792 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1793 flags |= PIPE_CONTROL_QW_WRITE;
1794 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
4712274c 1795
1a5a9ce7
BW
1796 /*
1797 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1798 * pipe control.
1799 */
1800 if (IS_GEN9(ring->dev))
1801 vf_flush_wa = true;
1802 }
9647ff36 1803
4d616a29 1804 ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
4712274c
OM
1805 if (ret)
1806 return ret;
1807
9647ff36
ID
1808 if (vf_flush_wa) {
1809 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1810 intel_logical_ring_emit(ringbuf, 0);
1811 intel_logical_ring_emit(ringbuf, 0);
1812 intel_logical_ring_emit(ringbuf, 0);
1813 intel_logical_ring_emit(ringbuf, 0);
1814 intel_logical_ring_emit(ringbuf, 0);
1815 }
1816
4712274c
OM
1817 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1818 intel_logical_ring_emit(ringbuf, flags);
1819 intel_logical_ring_emit(ringbuf, scratch_addr);
1820 intel_logical_ring_emit(ringbuf, 0);
1821 intel_logical_ring_emit(ringbuf, 0);
1822 intel_logical_ring_emit(ringbuf, 0);
1823 intel_logical_ring_advance(ringbuf);
1824
1825 return 0;
1826}
1827
e94e37ad
OM
1828static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1829{
1830 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1831}
1832
1833static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1834{
1835 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
1836}
1837
319404df
ID
1838static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1839{
1840
1841 /*
1842 * On BXT A steppings there is a HW coherency issue whereby the
1843 * MI_STORE_DATA_IMM storing the completed request's seqno
1844 * occasionally doesn't invalidate the CPU cache. Work around this by
1845 * clflushing the corresponding cacheline whenever the caller wants
1846 * the coherency to be guaranteed. Note that this cacheline is known
1847 * to be clean at this point, since we only write it in
1848 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1849 * this clflush in practice becomes an invalidate operation.
1850 */
1851
1852 if (!lazy_coherency)
1853 intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
1854
1855 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1856}
1857
1858static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1859{
1860 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
1861
1862 /* See bxt_a_get_seqno() explaining the reason for the clflush. */
1863 intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
1864}
1865
7c17d377
CW
1866/*
1867 * Reserve space for 2 NOOPs at the end of each request to be
1868 * used as a workaround for not being allowed to do lite
1869 * restore with HEAD==TAIL (WaIdleLiteRestore).
1870 */
1871#define WA_TAIL_DWORDS 2
1872
1873static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
1874{
1875 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
1876}
1877
c4e76638 1878static int gen8_emit_request(struct drm_i915_gem_request *request)
4da46e1e 1879{
c4e76638 1880 struct intel_ringbuffer *ringbuf = request->ringbuf;
4da46e1e
OM
1881 int ret;
1882
7c17d377 1883 ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
4da46e1e
OM
1884 if (ret)
1885 return ret;
1886
7c17d377
CW
1887 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1888 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
4da46e1e 1889
4da46e1e 1890 intel_logical_ring_emit(ringbuf,
7c17d377
CW
1891 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1892 intel_logical_ring_emit(ringbuf,
1893 hws_seqno_address(request->ring) |
1894 MI_FLUSH_DW_USE_GTT);
4da46e1e 1895 intel_logical_ring_emit(ringbuf, 0);
c4e76638 1896 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
4da46e1e
OM
1897 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1898 intel_logical_ring_emit(ringbuf, MI_NOOP);
7c17d377
CW
1899 return intel_logical_ring_advance_and_submit(request);
1900}
4da46e1e 1901
7c17d377
CW
1902static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1903{
1904 struct intel_ringbuffer *ringbuf = request->ringbuf;
1905 int ret;
53292cdb 1906
7c17d377
CW
1907 ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
1908 if (ret)
1909 return ret;
1910
1911 /* w/a for post sync ops following a GPGPU operation we
1912 * need a prior CS_STALL, which is emitted by the flush
1913 * following the batch.
1914 */
1915 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
1916 intel_logical_ring_emit(ringbuf,
1917 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1918 PIPE_CONTROL_CS_STALL |
1919 PIPE_CONTROL_QW_WRITE));
1920 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
1921 intel_logical_ring_emit(ringbuf, 0);
1922 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1923 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1924 return intel_logical_ring_advance_and_submit(request);
4da46e1e
OM
1925}
1926
be01363f 1927static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
cef437ad 1928{
cef437ad 1929 struct render_state so;
cef437ad
DL
1930 int ret;
1931
be01363f 1932 ret = i915_gem_render_state_prepare(req->ring, &so);
cef437ad
DL
1933 if (ret)
1934 return ret;
1935
1936 if (so.rodata == NULL)
1937 return 0;
1938
be795fc1 1939 ret = req->ring->emit_bb_start(req, so.ggtt_offset,
be01363f 1940 I915_DISPATCH_SECURE);
cef437ad
DL
1941 if (ret)
1942 goto out;
1943
84e81020
AS
1944 ret = req->ring->emit_bb_start(req,
1945 (so.ggtt_offset + so.aux_batch_offset),
1946 I915_DISPATCH_SECURE);
1947 if (ret)
1948 goto out;
1949
b2af0376 1950 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
cef437ad 1951
cef437ad
DL
1952out:
1953 i915_gem_render_state_fini(&so);
1954 return ret;
1955}
1956
8753181e 1957static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
e7778be1
TD
1958{
1959 int ret;
1960
e2be4faf 1961 ret = intel_logical_ring_workarounds_emit(req);
e7778be1
TD
1962 if (ret)
1963 return ret;
1964
3bbaba0c
PA
1965 ret = intel_rcs_context_init_mocs(req);
1966 /*
1967 * Failing to program the MOCS is non-fatal.The system will not
1968 * run at peak performance. So generate an error and carry on.
1969 */
1970 if (ret)
1971 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1972
be01363f 1973 return intel_lr_context_render_state_init(req);
e7778be1
TD
1974}
1975
73e4d07f
OM
1976/**
1977 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1978 *
1979 * @ring: Engine Command Streamer.
1980 *
1981 */
454afebd
OM
1982void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
1983{
6402c330 1984 struct drm_i915_private *dev_priv;
9832b9da 1985
48d82387
OM
1986 if (!intel_ring_initialized(ring))
1987 return;
1988
6402c330
JH
1989 dev_priv = ring->dev->dev_private;
1990
b0366a54
DG
1991 if (ring->buffer) {
1992 intel_logical_ring_stop(ring);
1993 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1994 }
48d82387
OM
1995
1996 if (ring->cleanup)
1997 ring->cleanup(ring);
1998
1999 i915_cmd_parser_fini_ring(ring);
06fbca71 2000 i915_gem_batch_pool_fini(&ring->batch_pool);
48d82387
OM
2001
2002 if (ring->status_page.obj) {
2003 kunmap(sg_page(ring->status_page.obj->pages->sgl));
2004 ring->status_page.obj = NULL;
2005 }
17ee950d 2006
ca82580c
TU
2007 ring->disable_lite_restore_wa = false;
2008 ring->ctx_desc_template = 0;
2009
17ee950d 2010 lrc_destroy_wa_ctx_obj(ring);
b0366a54 2011 ring->dev = NULL;
454afebd
OM
2012}
2013
c9cacf93
TU
2014static void
2015logical_ring_default_vfuncs(struct drm_device *dev,
2016 struct intel_engine_cs *ring)
2017{
2018 /* Default vfuncs which can be overriden by each engine. */
2019 ring->init_hw = gen8_init_common_ring;
2020 ring->emit_request = gen8_emit_request;
2021 ring->emit_flush = gen8_emit_flush;
2022 ring->irq_get = gen8_logical_ring_get_irq;
2023 ring->irq_put = gen8_logical_ring_put_irq;
2024 ring->emit_bb_start = gen8_emit_bb_start;
2025 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
2026 ring->get_seqno = bxt_a_get_seqno;
2027 ring->set_seqno = bxt_a_set_seqno;
2028 } else {
2029 ring->get_seqno = gen8_get_seqno;
2030 ring->set_seqno = gen8_set_seqno;
2031 }
2032}
2033
d9f3af96
TU
2034static inline void
2035logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
2036{
2037 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
2038 ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
2039}
2040
c9cacf93
TU
2041static int
2042logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
454afebd 2043{
ed54c1a1 2044 struct intel_context *dctx = to_i915(dev)->kernel_context;
48d82387 2045 int ret;
48d82387
OM
2046
2047 /* Intentionally left blank. */
2048 ring->buffer = NULL;
2049
2050 ring->dev = dev;
2051 INIT_LIST_HEAD(&ring->active_list);
2052 INIT_LIST_HEAD(&ring->request_list);
06fbca71 2053 i915_gem_batch_pool_init(dev, &ring->batch_pool);
48d82387
OM
2054 init_waitqueue_head(&ring->irq_queue);
2055
608c1a52 2056 INIT_LIST_HEAD(&ring->buffers);
acdd884a 2057 INIT_LIST_HEAD(&ring->execlist_queue);
c86ee3a9 2058 INIT_LIST_HEAD(&ring->execlist_retired_req_list);
acdd884a
MT
2059 spin_lock_init(&ring->execlist_lock);
2060
ca82580c
TU
2061 logical_ring_init_platform_invariants(ring);
2062
48d82387
OM
2063 ret = i915_cmd_parser_init_ring(ring);
2064 if (ret)
b0366a54 2065 goto error;
48d82387 2066
ed54c1a1 2067 ret = intel_lr_context_deferred_alloc(dctx, ring);
e84fe803 2068 if (ret)
b0366a54 2069 goto error;
e84fe803
NH
2070
2071 /* As this is the default context, always pin it */
e5292823 2072 ret = intel_lr_context_do_pin(dctx, ring);
e84fe803
NH
2073 if (ret) {
2074 DRM_ERROR(
2075 "Failed to pin and map ringbuffer %s: %d\n",
2076 ring->name, ret);
b0366a54 2077 goto error;
e84fe803 2078 }
564ddb2f 2079
b0366a54
DG
2080 return 0;
2081
2082error:
2083 intel_logical_ring_cleanup(ring);
564ddb2f 2084 return ret;
454afebd
OM
2085}
2086
2087static int logical_render_ring_init(struct drm_device *dev)
2088{
2089 struct drm_i915_private *dev_priv = dev->dev_private;
2090 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
99be1dfe 2091 int ret;
454afebd
OM
2092
2093 ring->name = "render ring";
2094 ring->id = RCS;
426960be 2095 ring->exec_id = I915_EXEC_RENDER;
397097b0 2096 ring->guc_id = GUC_RENDER_ENGINE;
454afebd 2097 ring->mmio_base = RENDER_RING_BASE;
d9f3af96
TU
2098
2099 logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
73d477f6
OM
2100 if (HAS_L3_DPF(dev))
2101 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
454afebd 2102
c9cacf93
TU
2103 logical_ring_default_vfuncs(dev, ring);
2104
2105 /* Override some for render ring. */
82ef822e
DL
2106 if (INTEL_INFO(dev)->gen >= 9)
2107 ring->init_hw = gen9_init_render_ring;
2108 else
2109 ring->init_hw = gen8_init_render_ring;
e7778be1 2110 ring->init_context = gen8_init_rcs_context;
9b1136d5 2111 ring->cleanup = intel_fini_pipe_control;
4712274c 2112 ring->emit_flush = gen8_emit_flush_render;
7c17d377 2113 ring->emit_request = gen8_emit_request_render;
9b1136d5 2114
99be1dfe 2115 ring->dev = dev;
c4db7599
AS
2116
2117 ret = intel_init_pipe_control(ring);
99be1dfe
DV
2118 if (ret)
2119 return ret;
2120
17ee950d
AS
2121 ret = intel_init_workaround_bb(ring);
2122 if (ret) {
2123 /*
2124 * We continue even if we fail to initialize WA batch
2125 * because we only expect rare glitches but nothing
2126 * critical to prevent us from using GPU
2127 */
2128 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2129 ret);
2130 }
2131
c4db7599
AS
2132 ret = logical_ring_init(dev, ring);
2133 if (ret) {
17ee950d 2134 lrc_destroy_wa_ctx_obj(ring);
c4db7599 2135 }
17ee950d
AS
2136
2137 return ret;
454afebd
OM
2138}
2139
2140static int logical_bsd_ring_init(struct drm_device *dev)
2141{
2142 struct drm_i915_private *dev_priv = dev->dev_private;
2143 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
2144
2145 ring->name = "bsd ring";
2146 ring->id = VCS;
426960be 2147 ring->exec_id = I915_EXEC_BSD;
397097b0 2148 ring->guc_id = GUC_VIDEO_ENGINE;
454afebd 2149 ring->mmio_base = GEN6_BSD_RING_BASE;
454afebd 2150
d9f3af96 2151 logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
c9cacf93 2152 logical_ring_default_vfuncs(dev, ring);
9b1136d5 2153
454afebd
OM
2154 return logical_ring_init(dev, ring);
2155}
2156
2157static int logical_bsd2_ring_init(struct drm_device *dev)
2158{
2159 struct drm_i915_private *dev_priv = dev->dev_private;
2160 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
2161
ec8a9776 2162 ring->name = "bsd2 ring";
454afebd 2163 ring->id = VCS2;
426960be 2164 ring->exec_id = I915_EXEC_BSD;
397097b0 2165 ring->guc_id = GUC_VIDEO_ENGINE2;
454afebd 2166 ring->mmio_base = GEN8_BSD2_RING_BASE;
454afebd 2167
d9f3af96 2168 logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
c9cacf93 2169 logical_ring_default_vfuncs(dev, ring);
9b1136d5 2170
454afebd
OM
2171 return logical_ring_init(dev, ring);
2172}
2173
2174static int logical_blt_ring_init(struct drm_device *dev)
2175{
2176 struct drm_i915_private *dev_priv = dev->dev_private;
2177 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
2178
2179 ring->name = "blitter ring";
2180 ring->id = BCS;
426960be 2181 ring->exec_id = I915_EXEC_BLT;
397097b0 2182 ring->guc_id = GUC_BLITTER_ENGINE;
454afebd 2183 ring->mmio_base = BLT_RING_BASE;
454afebd 2184
d9f3af96 2185 logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
c9cacf93 2186 logical_ring_default_vfuncs(dev, ring);
9b1136d5 2187
454afebd
OM
2188 return logical_ring_init(dev, ring);
2189}
2190
2191static int logical_vebox_ring_init(struct drm_device *dev)
2192{
2193 struct drm_i915_private *dev_priv = dev->dev_private;
2194 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
2195
2196 ring->name = "video enhancement ring";
2197 ring->id = VECS;
426960be 2198 ring->exec_id = I915_EXEC_VEBOX;
397097b0 2199 ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
454afebd 2200 ring->mmio_base = VEBOX_RING_BASE;
454afebd 2201
d9f3af96 2202 logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
c9cacf93 2203 logical_ring_default_vfuncs(dev, ring);
9b1136d5 2204
454afebd
OM
2205 return logical_ring_init(dev, ring);
2206}
2207
73e4d07f
OM
2208/**
2209 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
2210 * @dev: DRM device.
2211 *
2212 * This function inits the engines for an Execlists submission style (the equivalent in the
2213 * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
2214 * those engines that are present in the hardware.
2215 *
2216 * Return: non-zero if the initialization failed.
2217 */
454afebd
OM
2218int intel_logical_rings_init(struct drm_device *dev)
2219{
2220 struct drm_i915_private *dev_priv = dev->dev_private;
2221 int ret;
2222
2223 ret = logical_render_ring_init(dev);
2224 if (ret)
2225 return ret;
2226
2227 if (HAS_BSD(dev)) {
2228 ret = logical_bsd_ring_init(dev);
2229 if (ret)
2230 goto cleanup_render_ring;
2231 }
2232
2233 if (HAS_BLT(dev)) {
2234 ret = logical_blt_ring_init(dev);
2235 if (ret)
2236 goto cleanup_bsd_ring;
2237 }
2238
2239 if (HAS_VEBOX(dev)) {
2240 ret = logical_vebox_ring_init(dev);
2241 if (ret)
2242 goto cleanup_blt_ring;
2243 }
2244
2245 if (HAS_BSD2(dev)) {
2246 ret = logical_bsd2_ring_init(dev);
2247 if (ret)
2248 goto cleanup_vebox_ring;
2249 }
2250
454afebd
OM
2251 return 0;
2252
454afebd
OM
2253cleanup_vebox_ring:
2254 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
2255cleanup_blt_ring:
2256 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
2257cleanup_bsd_ring:
2258 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
2259cleanup_render_ring:
2260 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
2261
2262 return ret;
2263}
2264
0cea6502
JM
2265static u32
2266make_rpcs(struct drm_device *dev)
2267{
2268 u32 rpcs = 0;
2269
2270 /*
2271 * No explicit RPCS request is needed to ensure full
2272 * slice/subslice/EU enablement prior to Gen9.
2273 */
2274 if (INTEL_INFO(dev)->gen < 9)
2275 return 0;
2276
2277 /*
2278 * Starting in Gen9, render power gating can leave
2279 * slice/subslice/EU in a partially enabled state. We
2280 * must make an explicit request through RPCS for full
2281 * enablement.
2282 */
2283 if (INTEL_INFO(dev)->has_slice_pg) {
2284 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2285 rpcs |= INTEL_INFO(dev)->slice_total <<
2286 GEN8_RPCS_S_CNT_SHIFT;
2287 rpcs |= GEN8_RPCS_ENABLE;
2288 }
2289
2290 if (INTEL_INFO(dev)->has_subslice_pg) {
2291 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2292 rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
2293 GEN8_RPCS_SS_CNT_SHIFT;
2294 rpcs |= GEN8_RPCS_ENABLE;
2295 }
2296
2297 if (INTEL_INFO(dev)->has_eu_pg) {
2298 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
2299 GEN8_RPCS_EU_MIN_SHIFT;
2300 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
2301 GEN8_RPCS_EU_MAX_SHIFT;
2302 rpcs |= GEN8_RPCS_ENABLE;
2303 }
2304
2305 return rpcs;
2306}
2307
8670d6f9
OM
2308static int
2309populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
2310 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
2311{
2d965536
TD
2312 struct drm_device *dev = ring->dev;
2313 struct drm_i915_private *dev_priv = dev->dev_private;
ae6c4806 2314 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
8670d6f9
OM
2315 struct page *page;
2316 uint32_t *reg_state;
2317 int ret;
2318
2d965536
TD
2319 if (!ppgtt)
2320 ppgtt = dev_priv->mm.aliasing_ppgtt;
2321
8670d6f9
OM
2322 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2323 if (ret) {
2324 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2325 return ret;
2326 }
2327
2328 ret = i915_gem_object_get_pages(ctx_obj);
2329 if (ret) {
2330 DRM_DEBUG_DRIVER("Could not get object pages\n");
2331 return ret;
2332 }
2333
2334 i915_gem_object_pin_pages(ctx_obj);
2335
2336 /* The second page of the context object contains some fields which must
2337 * be set up prior to the first execution. */
033908ae 2338 page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
8670d6f9
OM
2339 reg_state = kmap_atomic(page);
2340
2341 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2342 * commands followed by (reg, value) pairs. The values we are setting here are
2343 * only for the first context restore: on a subsequent save, the GPU will
2344 * recreate this batchbuffer with new values (including all the missing
2345 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
0d925ea0
VS
2346 reg_state[CTX_LRI_HEADER_0] =
2347 MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2348 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
2349 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2350 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2351 CTX_CTRL_RS_CTX_ENABLE));
2352 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
2353 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
7ba717cf
TD
2354 /* Ring buffer start address is not known until the buffer is pinned.
2355 * It is written to the context image in execlists_update_context()
2356 */
0d925ea0
VS
2357 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0);
2358 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base),
2359 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
2360 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0);
2361 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0);
2362 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base),
2363 RING_BB_PPGTT);
2364 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0);
2365 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0);
2366 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0);
8670d6f9 2367 if (ring->id == RCS) {
0d925ea0
VS
2368 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0);
2369 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0);
2370 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0);
17ee950d
AS
2371 if (ring->wa_ctx.obj) {
2372 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
2373 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2374
2375 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2376 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2377 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2378
2379 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
2380 CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
2381
2382 reg_state[CTX_BB_PER_CTX_PTR+1] =
2383 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2384 0x01;
2385 }
8670d6f9 2386 }
0d925ea0
VS
2387 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2388 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0);
2389 /* PDP values well be assigned later if needed */
2390 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0);
2391 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0);
2392 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0);
2393 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0);
2394 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0);
2395 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0);
2396 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0);
2397 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0);
d7b2633d 2398
2dba3239
MT
2399 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2400 /* 64b PPGTT (48bit canonical)
2401 * PDP0_DESCRIPTOR contains the base address to PML4 and
2402 * other PDP Descriptors are ignored.
2403 */
2404 ASSIGN_CTX_PML4(ppgtt, reg_state);
2405 } else {
2406 /* 32b PPGTT
2407 * PDP*_DESCRIPTOR contains the base address of space supported.
2408 * With dynamic page allocation, PDPs may not be allocated at
2409 * this point. Point the unallocated PDPs to the scratch page
2410 */
2411 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
2412 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
2413 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
2414 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
2415 }
2416
8670d6f9
OM
2417 if (ring->id == RCS) {
2418 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
0d925ea0
VS
2419 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2420 make_rpcs(dev));
8670d6f9
OM
2421 }
2422
2423 kunmap_atomic(reg_state);
8670d6f9
OM
2424 i915_gem_object_unpin_pages(ctx_obj);
2425
2426 return 0;
2427}
2428
73e4d07f
OM
2429/**
2430 * intel_lr_context_free() - free the LRC specific bits of a context
2431 * @ctx: the LR context to free.
2432 *
2433 * The real context freeing is done in i915_gem_context_free: this only
2434 * takes care of the bits that are LRC related: the per-engine backing
2435 * objects and the logical ringbuffer.
2436 */
ede7d42b
OM
2437void intel_lr_context_free(struct intel_context *ctx)
2438{
8c857917
OM
2439 int i;
2440
e28e404c
DG
2441 for (i = I915_NUM_RINGS; --i >= 0; ) {
2442 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
8c857917 2443 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
84c2377f 2444
e28e404c
DG
2445 if (!ctx_obj)
2446 continue;
dcb4c12a 2447
e28e404c
DG
2448 if (ctx == ctx->i915->kernel_context) {
2449 intel_unpin_ringbuffer_obj(ringbuf);
2450 i915_gem_object_ggtt_unpin(ctx_obj);
8c857917 2451 }
e28e404c
DG
2452
2453 WARN_ON(ctx->engine[i].pin_count);
2454 intel_ringbuffer_free(ringbuf);
2455 drm_gem_object_unreference(&ctx_obj->base);
8c857917
OM
2456 }
2457}
2458
c5d46ee2
DG
2459/**
2460 * intel_lr_context_size() - return the size of the context for an engine
2461 * @ring: which engine to find the context size for
2462 *
2463 * Each engine may require a different amount of space for a context image,
2464 * so when allocating (or copying) an image, this function can be used to
2465 * find the right size for the specific engine.
2466 *
2467 * Return: size (in bytes) of an engine-specific context image
2468 *
2469 * Note: this size includes the HWSP, which is part of the context image
2470 * in LRC mode, but does not include the "shared data page" used with
2471 * GuC submission. The caller should account for this if using the GuC.
2472 */
95a66f7e 2473uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
8c857917
OM
2474{
2475 int ret = 0;
2476
468c6816 2477 WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
8c857917
OM
2478
2479 switch (ring->id) {
2480 case RCS:
468c6816
MN
2481 if (INTEL_INFO(ring->dev)->gen >= 9)
2482 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2483 else
2484 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
8c857917
OM
2485 break;
2486 case VCS:
2487 case BCS:
2488 case VECS:
2489 case VCS2:
2490 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2491 break;
2492 }
2493
2494 return ret;
ede7d42b
OM
2495}
2496
70b0ea86 2497static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
1df06b75
TD
2498 struct drm_i915_gem_object *default_ctx_obj)
2499{
2500 struct drm_i915_private *dev_priv = ring->dev->dev_private;
d1675198 2501 struct page *page;
1df06b75 2502
d1675198
AD
2503 /* The HWSP is part of the default context object in LRC mode. */
2504 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
2505 + LRC_PPHWSP_PN * PAGE_SIZE;
2506 page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
2507 ring->status_page.page_addr = kmap(page);
1df06b75
TD
2508 ring->status_page.obj = default_ctx_obj;
2509
2510 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
2511 (u32)ring->status_page.gfx_addr);
2512 POSTING_READ(RING_HWS_PGA(ring->mmio_base));
1df06b75
TD
2513}
2514
73e4d07f 2515/**
e84fe803 2516 * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
73e4d07f
OM
2517 * @ctx: LR context to create.
2518 * @ring: engine to be used with the context.
2519 *
2520 * This function can be called more than once, with different engines, if we plan
2521 * to use the context with them. The context backing objects and the ringbuffers
2522 * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
2523 * the creation is a deferred call: it's better to make sure first that we need to use
2524 * a given ring with the context.
2525 *
32197aab 2526 * Return: non-zero on error.
73e4d07f 2527 */
e84fe803
NH
2528
2529int intel_lr_context_deferred_alloc(struct intel_context *ctx,
e28e404c 2530 struct intel_engine_cs *ring)
ede7d42b 2531{
8c857917
OM
2532 struct drm_device *dev = ring->dev;
2533 struct drm_i915_gem_object *ctx_obj;
2534 uint32_t context_size;
84c2377f 2535 struct intel_ringbuffer *ringbuf;
8c857917
OM
2536 int ret;
2537
ede7d42b 2538 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
bfc882b4 2539 WARN_ON(ctx->engine[ring->id].state);
ede7d42b 2540
95a66f7e 2541 context_size = round_up(intel_lr_context_size(ring), 4096);
8c857917 2542
d1675198
AD
2543 /* One extra page as the sharing data between driver and GuC */
2544 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2545
149c86e7 2546 ctx_obj = i915_gem_alloc_object(dev, context_size);
3126a660
DC
2547 if (!ctx_obj) {
2548 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2549 return -ENOMEM;
8c857917
OM
2550 }
2551
01101fa7
CW
2552 ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
2553 if (IS_ERR(ringbuf)) {
2554 ret = PTR_ERR(ringbuf);
e84fe803 2555 goto error_deref_obj;
8670d6f9
OM
2556 }
2557
2558 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
2559 if (ret) {
2560 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
e84fe803 2561 goto error_ringbuf;
84c2377f
OM
2562 }
2563
2564 ctx->engine[ring->id].ringbuf = ringbuf;
8c857917 2565 ctx->engine[ring->id].state = ctx_obj;
ede7d42b 2566
ed54c1a1 2567 if (ctx != ctx->i915->kernel_context && ring->init_context) {
e84fe803 2568 struct drm_i915_gem_request *req;
76c39168 2569
26827088
DG
2570 req = i915_gem_request_alloc(ring, ctx);
2571 if (IS_ERR(req)) {
2572 ret = PTR_ERR(req);
2573 DRM_ERROR("ring create req: %d\n", ret);
e84fe803 2574 goto error_ringbuf;
771b9a53
MT
2575 }
2576
e84fe803
NH
2577 ret = ring->init_context(req);
2578 if (ret) {
2579 DRM_ERROR("ring init context: %d\n",
2580 ret);
2581 i915_gem_request_cancel(req);
2582 goto error_ringbuf;
2583 }
2584 i915_add_request_no_flush(req);
564ddb2f 2585 }
ede7d42b 2586 return 0;
8670d6f9 2587
01101fa7
CW
2588error_ringbuf:
2589 intel_ringbuffer_free(ringbuf);
e84fe803 2590error_deref_obj:
8670d6f9 2591 drm_gem_object_unreference(&ctx_obj->base);
e84fe803
NH
2592 ctx->engine[ring->id].ringbuf = NULL;
2593 ctx->engine[ring->id].state = NULL;
8670d6f9 2594 return ret;
ede7d42b 2595}
3e5b6f05
TD
2596
2597void intel_lr_context_reset(struct drm_device *dev,
2598 struct intel_context *ctx)
2599{
2600 struct drm_i915_private *dev_priv = dev->dev_private;
2601 struct intel_engine_cs *ring;
2602 int i;
2603
2604 for_each_ring(ring, dev_priv, i) {
2605 struct drm_i915_gem_object *ctx_obj =
2606 ctx->engine[ring->id].state;
2607 struct intel_ringbuffer *ringbuf =
2608 ctx->engine[ring->id].ringbuf;
2609 uint32_t *reg_state;
2610 struct page *page;
2611
2612 if (!ctx_obj)
2613 continue;
2614
2615 if (i915_gem_object_get_pages(ctx_obj)) {
2616 WARN(1, "Failed get_pages for context obj\n");
2617 continue;
2618 }
033908ae 2619 page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
3e5b6f05
TD
2620 reg_state = kmap_atomic(page);
2621
2622 reg_state[CTX_RING_HEAD+1] = 0;
2623 reg_state[CTX_RING_TAIL+1] = 0;
2624
2625 kunmap_atomic(reg_state);
2626
2627 ringbuf->head = 0;
2628 ringbuf->tail = 0;
2629 }
2630}
This page took 0.297517 seconds and 5 git commands to generate.