drm/i915: Use ordered seqno write interrupt generation on gen8+ execlists
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_lrc.c
CommitLineData
b20385f1
OM
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
73e4d07f
OM
31/**
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
33 *
34 * Motivation:
b20385f1
OM
35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
38 *
73e4d07f
OM
39 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
42 *
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
47 *
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
55 *
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
58 *
59 * LRC implementation:
60 * Regarding the creation of contexts, we have:
61 *
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
65 *
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
69 *
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
72 *
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
77 * contexts:
78 *
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
83 * so on.
84 *
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
88 *
89 * Execlists implementation:
b20385f1
OM
90 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
73e4d07f
OM
92 * This method works as follows:
93 *
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
101 * context itself.
102 *
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
108 *
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
114 *
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
122 *
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
132 *
b20385f1
OM
133 */
134
135#include <drm/drmP.h>
136#include <drm/i915_drm.h>
137#include "i915_drv.h"
3bbaba0c 138#include "intel_mocs.h"
127f1003 139
468c6816 140#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
8c857917
OM
141#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
142#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
143
e981e7b1
TD
144#define RING_EXECLIST_QFULL (1 << 0x2)
145#define RING_EXECLIST1_VALID (1 << 0x3)
146#define RING_EXECLIST0_VALID (1 << 0x4)
147#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
148#define RING_EXECLIST1_ACTIVE (1 << 0x11)
149#define RING_EXECLIST0_ACTIVE (1 << 0x12)
150
151#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
152#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
153#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
154#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
155#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
156#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
8670d6f9
OM
157
158#define CTX_LRI_HEADER_0 0x01
159#define CTX_CONTEXT_CONTROL 0x02
160#define CTX_RING_HEAD 0x04
161#define CTX_RING_TAIL 0x06
162#define CTX_RING_BUFFER_START 0x08
163#define CTX_RING_BUFFER_CONTROL 0x0a
164#define CTX_BB_HEAD_U 0x0c
165#define CTX_BB_HEAD_L 0x0e
166#define CTX_BB_STATE 0x10
167#define CTX_SECOND_BB_HEAD_U 0x12
168#define CTX_SECOND_BB_HEAD_L 0x14
169#define CTX_SECOND_BB_STATE 0x16
170#define CTX_BB_PER_CTX_PTR 0x18
171#define CTX_RCS_INDIRECT_CTX 0x1a
172#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
173#define CTX_LRI_HEADER_1 0x21
174#define CTX_CTX_TIMESTAMP 0x22
175#define CTX_PDP3_UDW 0x24
176#define CTX_PDP3_LDW 0x26
177#define CTX_PDP2_UDW 0x28
178#define CTX_PDP2_LDW 0x2a
179#define CTX_PDP1_UDW 0x2c
180#define CTX_PDP1_LDW 0x2e
181#define CTX_PDP0_UDW 0x30
182#define CTX_PDP0_LDW 0x32
183#define CTX_LRI_HEADER_2 0x41
184#define CTX_R_PWR_CLK_STATE 0x42
185#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
186
84b790f8
BW
187#define GEN8_CTX_VALID (1<<0)
188#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
189#define GEN8_CTX_FORCE_RESTORE (1<<2)
190#define GEN8_CTX_L3LLC_COHERENT (1<<5)
191#define GEN8_CTX_PRIVILEGE (1<<8)
e5815a2e 192
0d925ea0 193#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
f0f59a00 194 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
0d925ea0
VS
195 (reg_state)[(pos)+1] = (val); \
196} while (0)
197
198#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
d852c7bf 199 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
e5815a2e
MT
200 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
201 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
9244a817 202} while (0)
e5815a2e 203
9244a817 204#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
2dba3239
MT
205 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
206 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
9244a817 207} while (0)
2dba3239 208
84b790f8
BW
209enum {
210 ADVANCED_CONTEXT = 0,
2dba3239 211 LEGACY_32B_CONTEXT,
84b790f8
BW
212 ADVANCED_AD_CONTEXT,
213 LEGACY_64B_CONTEXT
214};
2dba3239
MT
215#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
216#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
217 LEGACY_64B_CONTEXT :\
218 LEGACY_32B_CONTEXT)
84b790f8
BW
219enum {
220 FAULT_AND_HANG = 0,
221 FAULT_AND_HALT, /* Debug only */
222 FAULT_AND_STREAM,
223 FAULT_AND_CONTINUE /* Unsupported */
224};
225#define GEN8_CTX_ID_SHIFT 32
17ee950d 226#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
84b790f8 227
8ba319da 228static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
e84fe803
NH
229static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
230 struct drm_i915_gem_object *default_ctx_obj);
231
7ba717cf 232
73e4d07f
OM
233/**
234 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
235 * @dev: DRM device.
236 * @enable_execlists: value of i915.enable_execlists module parameter.
237 *
238 * Only certain platforms support Execlists (the prerequisites being
27401d12 239 * support for Logical Ring Contexts and Aliasing PPGTT or better).
73e4d07f
OM
240 *
241 * Return: 1 if Execlists is supported and has to be enabled.
242 */
127f1003
OM
243int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
244{
bd84b1e9
DV
245 WARN_ON(i915.enable_ppgtt == -1);
246
a0bd6c31
ZL
247 /* On platforms with execlist available, vGPU will only
248 * support execlist mode, no ring buffer mode.
249 */
250 if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
251 return 1;
252
70ee45e1
DL
253 if (INTEL_INFO(dev)->gen >= 9)
254 return 1;
255
127f1003
OM
256 if (enable_execlists == 0)
257 return 0;
258
14bf993e
OM
259 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
260 i915.use_mmio_flip >= 0)
127f1003
OM
261 return 1;
262
263 return 0;
264}
ede7d42b 265
ca82580c
TU
266static void
267logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
268{
269 struct drm_device *dev = ring->dev;
270
271 ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
272 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
273 (ring->id == VCS || ring->id == VCS2);
274
275 ring->ctx_desc_template = GEN8_CTX_VALID;
276 ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
277 GEN8_CTX_ADDRESSING_MODE_SHIFT;
278 if (IS_GEN8(dev))
279 ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
280 ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
281
282 /* TODO: WaDisableLiteRestore when we start using semaphore
283 * signalling between Command Streamers */
284 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
285
286 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
287 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
288 if (ring->disable_lite_restore_wa)
289 ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
290}
291
73e4d07f 292/**
ca82580c
TU
293 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
294 * descriptor for a pinned context
73e4d07f 295 *
ca82580c
TU
296 * @ctx: Context to work on
297 * @ring: Engine the descriptor will be used with
73e4d07f 298 *
ca82580c
TU
299 * The context descriptor encodes various attributes of a context,
300 * including its GTT address and some flags. Because it's fairly
301 * expensive to calculate, we'll just do it once and cache the result,
302 * which remains valid until the context is unpinned.
303 *
304 * This is what a descriptor looks like, from LSB to MSB:
305 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
306 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
307 * bits 32-51: ctx ID, a globally unique tag (the LRCA again!)
308 * bits 52-63: reserved, may encode the engine ID (for GuC)
73e4d07f 309 */
ca82580c
TU
310static void
311intel_lr_context_descriptor_update(struct intel_context *ctx,
312 struct intel_engine_cs *ring)
84b790f8 313{
ca82580c 314 uint64_t lrca, desc;
84b790f8 315
ca82580c
TU
316 lrca = ctx->engine[ring->id].lrc_vma->node.start +
317 LRC_PPHWSP_PN * PAGE_SIZE;
84b790f8 318
ca82580c
TU
319 desc = ring->ctx_desc_template; /* bits 0-11 */
320 desc |= lrca; /* bits 12-31 */
321 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
5af05fef 322
ca82580c 323 ctx->engine[ring->id].lrc_desc = desc;
5af05fef
MT
324}
325
919f1f55
DG
326uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
327 struct intel_engine_cs *ring)
84b790f8 328{
ca82580c
TU
329 return ctx->engine[ring->id].lrc_desc;
330}
203a571b 331
ca82580c
TU
332/**
333 * intel_execlists_ctx_id() - get the Execlists Context ID
334 * @ctx: Context to get the ID for
335 * @ring: Engine to get the ID for
336 *
337 * Do not confuse with ctx->id! Unfortunately we have a name overload
338 * here: the old context ID we pass to userspace as a handler so that
339 * they can refer to a context, and the new context ID we pass to the
340 * ELSP so that the GPU can inform us of the context status via
341 * interrupts.
342 *
343 * The context ID is a portion of the context descriptor, so we can
344 * just extract the required part from the cached descriptor.
345 *
346 * Return: 20-bits globally unique context ID.
347 */
348u32 intel_execlists_ctx_id(struct intel_context *ctx,
349 struct intel_engine_cs *ring)
350{
351 return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
84b790f8
BW
352}
353
cc3c4253
MK
354static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
355 struct drm_i915_gem_request *rq1)
84b790f8 356{
cc3c4253
MK
357
358 struct intel_engine_cs *ring = rq0->ring;
6e7cc470
TU
359 struct drm_device *dev = ring->dev;
360 struct drm_i915_private *dev_priv = dev->dev_private;
1cff8cc3 361 uint64_t desc[2];
84b790f8 362
1cff8cc3 363 if (rq1) {
919f1f55 364 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
1cff8cc3
MK
365 rq1->elsp_submitted++;
366 } else {
367 desc[1] = 0;
368 }
84b790f8 369
919f1f55 370 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
1cff8cc3 371 rq0->elsp_submitted++;
84b790f8 372
1cff8cc3 373 /* You must always write both descriptors in the order below. */
a6111f7b
CW
374 spin_lock(&dev_priv->uncore.lock);
375 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
1cff8cc3
MK
376 I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
377 I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
6daccb0b 378
1cff8cc3 379 I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
84b790f8 380 /* The context is automatically loaded after the following */
1cff8cc3 381 I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
84b790f8 382
1cff8cc3 383 /* ELSP is a wo register, use another nearby reg for posting */
83843d84 384 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
a6111f7b
CW
385 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
386 spin_unlock(&dev_priv->uncore.lock);
84b790f8
BW
387}
388
05d9824b 389static int execlists_update_context(struct drm_i915_gem_request *rq)
ae1250b9 390{
05d9824b
MK
391 struct intel_engine_cs *ring = rq->ring;
392 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
82352e90 393 uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
ae1250b9 394
05d9824b 395 reg_state[CTX_RING_TAIL+1] = rq->tail;
0eb973d3 396 reg_state[CTX_RING_BUFFER_START+1] = rq->ringbuf->vma->node.start;
ae1250b9 397
2dba3239
MT
398 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
399 /* True 32b PPGTT with dynamic page allocation: update PDP
400 * registers and point the unallocated PDPs to scratch page.
401 * PML4 is allocated during ppgtt init, so this is not needed
402 * in 48-bit mode.
403 */
d7b2633d
MT
404 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
405 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
406 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
407 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
408 }
409
ae1250b9
OM
410 return 0;
411}
412
d8cb8875
MK
413static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
414 struct drm_i915_gem_request *rq1)
84b790f8 415{
05d9824b 416 execlists_update_context(rq0);
d8cb8875 417
cc3c4253 418 if (rq1)
05d9824b 419 execlists_update_context(rq1);
84b790f8 420
cc3c4253 421 execlists_elsp_write(rq0, rq1);
84b790f8
BW
422}
423
acdd884a
MT
424static void execlists_context_unqueue(struct intel_engine_cs *ring)
425{
6d3d8274
NH
426 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
427 struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
e981e7b1
TD
428
429 assert_spin_locked(&ring->execlist_lock);
acdd884a 430
779949f4
PA
431 /*
432 * If irqs are not active generate a warning as batches that finish
433 * without the irqs may get lost and a GPU Hang may occur.
434 */
435 WARN_ON(!intel_irqs_enabled(ring->dev->dev_private));
436
acdd884a
MT
437 if (list_empty(&ring->execlist_queue))
438 return;
439
440 /* Try to read in pairs */
441 list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
442 execlist_link) {
443 if (!req0) {
444 req0 = cursor;
6d3d8274 445 } else if (req0->ctx == cursor->ctx) {
acdd884a
MT
446 /* Same ctx: ignore first request, as second request
447 * will update tail past first request's workload */
e1fee72c 448 cursor->elsp_submitted = req0->elsp_submitted;
7eb08a25
TU
449 list_move_tail(&req0->execlist_link,
450 &ring->execlist_retired_req_list);
acdd884a
MT
451 req0 = cursor;
452 } else {
453 req1 = cursor;
454 break;
455 }
456 }
457
53292cdb
MT
458 if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) {
459 /*
460 * WaIdleLiteRestore: make sure we never cause a lite
461 * restore with HEAD==TAIL
462 */
d63f820f 463 if (req0->elsp_submitted) {
53292cdb
MT
464 /*
465 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
466 * as we resubmit the request. See gen8_emit_request()
467 * for where we prepare the padding after the end of the
468 * request.
469 */
470 struct intel_ringbuffer *ringbuf;
471
472 ringbuf = req0->ctx->engine[ring->id].ringbuf;
473 req0->tail += 8;
474 req0->tail &= ringbuf->size - 1;
475 }
476 }
477
e1fee72c
OM
478 WARN_ON(req1 && req1->elsp_submitted);
479
d8cb8875 480 execlists_submit_requests(req0, req1);
acdd884a
MT
481}
482
e981e7b1
TD
483static bool execlists_check_remove_request(struct intel_engine_cs *ring,
484 u32 request_id)
485{
6d3d8274 486 struct drm_i915_gem_request *head_req;
e981e7b1
TD
487
488 assert_spin_locked(&ring->execlist_lock);
489
490 head_req = list_first_entry_or_null(&ring->execlist_queue,
6d3d8274 491 struct drm_i915_gem_request,
e981e7b1
TD
492 execlist_link);
493
494 if (head_req != NULL) {
ca82580c 495 if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) {
e1fee72c
OM
496 WARN(head_req->elsp_submitted == 0,
497 "Never submitted head request\n");
498
499 if (--head_req->elsp_submitted <= 0) {
7eb08a25
TU
500 list_move_tail(&head_req->execlist_link,
501 &ring->execlist_retired_req_list);
e1fee72c
OM
502 return true;
503 }
e981e7b1
TD
504 }
505 }
506
507 return false;
508}
509
91a41032
BW
510static void get_context_status(struct intel_engine_cs *ring,
511 u8 read_pointer,
512 u32 *status, u32 *context_id)
513{
514 struct drm_i915_private *dev_priv = ring->dev->dev_private;
515
516 if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES))
517 return;
518
519 *status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
520 *context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer));
521}
522
73e4d07f 523/**
3f7531c3 524 * intel_lrc_irq_handler() - handle Context Switch interrupts
73e4d07f
OM
525 * @ring: Engine Command Streamer to handle.
526 *
527 * Check the unread Context Status Buffers and manage the submission of new
528 * contexts to the ELSP accordingly.
529 */
3f7531c3 530void intel_lrc_irq_handler(struct intel_engine_cs *ring)
e981e7b1
TD
531{
532 struct drm_i915_private *dev_priv = ring->dev->dev_private;
533 u32 status_pointer;
534 u8 read_pointer;
535 u8 write_pointer;
5af05fef 536 u32 status = 0;
e981e7b1
TD
537 u32 status_id;
538 u32 submit_contexts = 0;
539
540 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
541
542 read_pointer = ring->next_context_status_buffer;
5590a5f0 543 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
e981e7b1 544 if (read_pointer > write_pointer)
dfc53c5e 545 write_pointer += GEN8_CSB_ENTRIES;
e981e7b1
TD
546
547 spin_lock(&ring->execlist_lock);
548
549 while (read_pointer < write_pointer) {
91a41032
BW
550
551 get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES,
552 &status, &status_id);
e981e7b1 553
031a8936
MK
554 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
555 continue;
556
e1fee72c
OM
557 if (status & GEN8_CTX_STATUS_PREEMPTED) {
558 if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
559 if (execlists_check_remove_request(ring, status_id))
560 WARN(1, "Lite Restored request removed from queue\n");
561 } else
562 WARN(1, "Preemption without Lite Restore\n");
563 }
564
eba51190
BW
565 if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
566 (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
e981e7b1
TD
567 if (execlists_check_remove_request(ring, status_id))
568 submit_contexts++;
569 }
570 }
571
ca82580c 572 if (ring->disable_lite_restore_wa) {
5af05fef
MT
573 /* Prevent a ctx to preempt itself */
574 if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
575 (submit_contexts != 0))
576 execlists_context_unqueue(ring);
577 } else if (submit_contexts != 0) {
e981e7b1 578 execlists_context_unqueue(ring);
5af05fef 579 }
e981e7b1
TD
580
581 spin_unlock(&ring->execlist_lock);
582
f764a8b1
BW
583 if (unlikely(submit_contexts > 2))
584 DRM_ERROR("More than two context complete events?\n");
585
dfc53c5e 586 ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
e981e7b1 587
5590a5f0
BW
588 /* Update the read pointer to the old write pointer. Manual ringbuffer
589 * management ftw </sarcasm> */
e981e7b1 590 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
5590a5f0
BW
591 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
592 ring->next_context_status_buffer << 8));
e981e7b1
TD
593}
594
ae70797d 595static int execlists_context_queue(struct drm_i915_gem_request *request)
acdd884a 596{
ae70797d 597 struct intel_engine_cs *ring = request->ring;
6d3d8274 598 struct drm_i915_gem_request *cursor;
f1ad5a1f 599 int num_elements = 0;
acdd884a 600
ed54c1a1 601 if (request->ctx != request->i915->kernel_context)
af3302b9
DV
602 intel_lr_context_pin(request);
603
9bb1af44
JH
604 i915_gem_request_reference(request);
605
b5eba372 606 spin_lock_irq(&ring->execlist_lock);
acdd884a 607
f1ad5a1f
OM
608 list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
609 if (++num_elements > 2)
610 break;
611
612 if (num_elements > 2) {
6d3d8274 613 struct drm_i915_gem_request *tail_req;
f1ad5a1f
OM
614
615 tail_req = list_last_entry(&ring->execlist_queue,
6d3d8274 616 struct drm_i915_gem_request,
f1ad5a1f
OM
617 execlist_link);
618
ae70797d 619 if (request->ctx == tail_req->ctx) {
f1ad5a1f 620 WARN(tail_req->elsp_submitted != 0,
7ba717cf 621 "More than 2 already-submitted reqs queued\n");
7eb08a25
TU
622 list_move_tail(&tail_req->execlist_link,
623 &ring->execlist_retired_req_list);
f1ad5a1f
OM
624 }
625 }
626
6d3d8274 627 list_add_tail(&request->execlist_link, &ring->execlist_queue);
f1ad5a1f 628 if (num_elements == 0)
acdd884a
MT
629 execlists_context_unqueue(ring);
630
b5eba372 631 spin_unlock_irq(&ring->execlist_lock);
acdd884a
MT
632
633 return 0;
634}
635
2f20055d 636static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
ba8b7ccb 637{
2f20055d 638 struct intel_engine_cs *ring = req->ring;
ba8b7ccb
OM
639 uint32_t flush_domains;
640 int ret;
641
642 flush_domains = 0;
643 if (ring->gpu_caches_dirty)
644 flush_domains = I915_GEM_GPU_DOMAINS;
645
7deb4d39 646 ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
ba8b7ccb
OM
647 if (ret)
648 return ret;
649
650 ring->gpu_caches_dirty = false;
651 return 0;
652}
653
535fbe82 654static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
ba8b7ccb
OM
655 struct list_head *vmas)
656{
535fbe82 657 const unsigned other_rings = ~intel_ring_flag(req->ring);
ba8b7ccb
OM
658 struct i915_vma *vma;
659 uint32_t flush_domains = 0;
660 bool flush_chipset = false;
661 int ret;
662
663 list_for_each_entry(vma, vmas, exec_list) {
664 struct drm_i915_gem_object *obj = vma->obj;
665
03ade511 666 if (obj->active & other_rings) {
91af127f 667 ret = i915_gem_object_sync(obj, req->ring, &req);
03ade511
CW
668 if (ret)
669 return ret;
670 }
ba8b7ccb
OM
671
672 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
673 flush_chipset |= i915_gem_clflush_object(obj, false);
674
675 flush_domains |= obj->base.write_domain;
676 }
677
678 if (flush_domains & I915_GEM_DOMAIN_GTT)
679 wmb();
680
681 /* Unconditionally invalidate gpu caches and ensure that we do flush
682 * any residual writes from the previous batch.
683 */
2f20055d 684 return logical_ring_invalidate_all_caches(req);
ba8b7ccb
OM
685}
686
40e895ce 687int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
bc0dce3f 688{
e28e404c 689 int ret = 0;
bc0dce3f 690
f3cc01f0
MK
691 request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
692
a7e02199
AD
693 if (i915.enable_guc_submission) {
694 /*
695 * Check that the GuC has space for the request before
696 * going any further, as the i915_add_request() call
697 * later on mustn't fail ...
698 */
699 struct intel_guc *guc = &request->i915->guc;
700
701 ret = i915_guc_wq_check_space(guc->execbuf_client);
702 if (ret)
703 return ret;
704 }
705
e28e404c
DG
706 if (request->ctx != request->i915->kernel_context)
707 ret = intel_lr_context_pin(request);
708
709 return ret;
bc0dce3f
JH
710}
711
ae70797d 712static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
595e1eeb 713 int bytes)
bc0dce3f 714{
ae70797d
JH
715 struct intel_ringbuffer *ringbuf = req->ringbuf;
716 struct intel_engine_cs *ring = req->ring;
717 struct drm_i915_gem_request *target;
b4716185
CW
718 unsigned space;
719 int ret;
bc0dce3f
JH
720
721 if (intel_ring_space(ringbuf) >= bytes)
722 return 0;
723
79bbcc29
JH
724 /* The whole point of reserving space is to not wait! */
725 WARN_ON(ringbuf->reserved_in_use);
726
ae70797d 727 list_for_each_entry(target, &ring->request_list, list) {
bc0dce3f
JH
728 /*
729 * The request queue is per-engine, so can contain requests
730 * from multiple ringbuffers. Here, we must ignore any that
731 * aren't from the ringbuffer we're considering.
732 */
ae70797d 733 if (target->ringbuf != ringbuf)
bc0dce3f
JH
734 continue;
735
736 /* Would completion of this request free enough space? */
ae70797d 737 space = __intel_ring_space(target->postfix, ringbuf->tail,
b4716185
CW
738 ringbuf->size);
739 if (space >= bytes)
bc0dce3f 740 break;
bc0dce3f
JH
741 }
742
ae70797d 743 if (WARN_ON(&target->list == &ring->request_list))
bc0dce3f
JH
744 return -ENOSPC;
745
ae70797d 746 ret = i915_wait_request(target);
bc0dce3f
JH
747 if (ret)
748 return ret;
749
b4716185
CW
750 ringbuf->space = space;
751 return 0;
bc0dce3f
JH
752}
753
754/*
755 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
ae70797d 756 * @request: Request to advance the logical ringbuffer of.
bc0dce3f
JH
757 *
758 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
759 * really happens during submission is that the context and current tail will be placed
760 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
761 * point, the tail *inside* the context is updated and the ELSP written to.
762 */
7c17d377 763static int
ae70797d 764intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
bc0dce3f 765{
7c17d377 766 struct intel_ringbuffer *ringbuf = request->ringbuf;
d1675198 767 struct drm_i915_private *dev_priv = request->i915;
bc0dce3f 768
7c17d377
CW
769 intel_logical_ring_advance(ringbuf);
770 request->tail = ringbuf->tail;
bc0dce3f 771
7c17d377
CW
772 /*
773 * Here we add two extra NOOPs as padding to avoid
774 * lite restore of a context with HEAD==TAIL.
775 *
776 * Caller must reserve WA_TAIL_DWORDS for us!
777 */
778 intel_logical_ring_emit(ringbuf, MI_NOOP);
779 intel_logical_ring_emit(ringbuf, MI_NOOP);
780 intel_logical_ring_advance(ringbuf);
d1675198 781
7c17d377
CW
782 if (intel_ring_stopped(request->ring))
783 return 0;
bc0dce3f 784
d1675198
AD
785 if (dev_priv->guc.execbuf_client)
786 i915_guc_submit(dev_priv->guc.execbuf_client, request);
787 else
788 execlists_context_queue(request);
7c17d377
CW
789
790 return 0;
bc0dce3f
JH
791}
792
79bbcc29 793static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
bc0dce3f
JH
794{
795 uint32_t __iomem *virt;
796 int rem = ringbuf->size - ringbuf->tail;
797
bc0dce3f
JH
798 virt = ringbuf->virtual_start + ringbuf->tail;
799 rem /= 4;
800 while (rem--)
801 iowrite32(MI_NOOP, virt++);
802
803 ringbuf->tail = 0;
804 intel_ring_update_space(ringbuf);
bc0dce3f
JH
805}
806
ae70797d 807static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
bc0dce3f 808{
ae70797d 809 struct intel_ringbuffer *ringbuf = req->ringbuf;
79bbcc29
JH
810 int remain_usable = ringbuf->effective_size - ringbuf->tail;
811 int remain_actual = ringbuf->size - ringbuf->tail;
812 int ret, total_bytes, wait_bytes = 0;
813 bool need_wrap = false;
29b1b415 814
79bbcc29
JH
815 if (ringbuf->reserved_in_use)
816 total_bytes = bytes;
817 else
818 total_bytes = bytes + ringbuf->reserved_size;
29b1b415 819
79bbcc29
JH
820 if (unlikely(bytes > remain_usable)) {
821 /*
822 * Not enough space for the basic request. So need to flush
823 * out the remainder and then wait for base + reserved.
824 */
825 wait_bytes = remain_actual + total_bytes;
826 need_wrap = true;
827 } else {
828 if (unlikely(total_bytes > remain_usable)) {
829 /*
830 * The base request will fit but the reserved space
831 * falls off the end. So only need to to wait for the
832 * reserved size after flushing out the remainder.
833 */
834 wait_bytes = remain_actual + ringbuf->reserved_size;
835 need_wrap = true;
836 } else if (total_bytes > ringbuf->space) {
837 /* No wrapping required, just waiting. */
838 wait_bytes = total_bytes;
29b1b415 839 }
bc0dce3f
JH
840 }
841
79bbcc29
JH
842 if (wait_bytes) {
843 ret = logical_ring_wait_for_space(req, wait_bytes);
bc0dce3f
JH
844 if (unlikely(ret))
845 return ret;
79bbcc29
JH
846
847 if (need_wrap)
848 __wrap_ring_buffer(ringbuf);
bc0dce3f
JH
849 }
850
851 return 0;
852}
853
854/**
855 * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
856 *
374887ba 857 * @req: The request to start some new work for
bc0dce3f
JH
858 * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
859 *
860 * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
861 * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
862 * and also preallocates a request (every workload submission is still mediated through
863 * requests, same as it did with legacy ringbuffer submission).
864 *
865 * Return: non-zero if the ringbuffer is not ready to be written to.
866 */
3bbaba0c 867int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
bc0dce3f 868{
4d616a29 869 struct drm_i915_private *dev_priv;
bc0dce3f
JH
870 int ret;
871
4d616a29
JH
872 WARN_ON(req == NULL);
873 dev_priv = req->ring->dev->dev_private;
874
bc0dce3f
JH
875 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
876 dev_priv->mm.interruptible);
877 if (ret)
878 return ret;
879
ae70797d 880 ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
bc0dce3f
JH
881 if (ret)
882 return ret;
883
4d616a29 884 req->ringbuf->space -= num_dwords * sizeof(uint32_t);
bc0dce3f
JH
885 return 0;
886}
887
ccd98fe4
JH
888int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
889{
890 /*
891 * The first call merely notes the reserve request and is common for
892 * all back ends. The subsequent localised _begin() call actually
893 * ensures that the reservation is available. Without the begin, if
894 * the request creator immediately submitted the request without
895 * adding any commands to it then there might not actually be
896 * sufficient room for the submission commands.
897 */
898 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
899
900 return intel_logical_ring_begin(request, 0);
901}
902
73e4d07f
OM
903/**
904 * execlists_submission() - submit a batchbuffer for execution, Execlists style
905 * @dev: DRM device.
906 * @file: DRM file.
907 * @ring: Engine Command Streamer to submit to.
908 * @ctx: Context to employ for this submission.
909 * @args: execbuffer call arguments.
910 * @vmas: list of vmas.
911 * @batch_obj: the batchbuffer to submit.
912 * @exec_start: batchbuffer start virtual address pointer.
8e004efc 913 * @dispatch_flags: translated execbuffer call flags.
73e4d07f
OM
914 *
915 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
916 * away the submission details of the execbuffer ioctl call.
917 *
918 * Return: non-zero if the submission fails.
919 */
5f19e2bf 920int intel_execlists_submission(struct i915_execbuffer_params *params,
454afebd 921 struct drm_i915_gem_execbuffer2 *args,
5f19e2bf 922 struct list_head *vmas)
454afebd 923{
5f19e2bf
JH
924 struct drm_device *dev = params->dev;
925 struct intel_engine_cs *ring = params->ring;
ba8b7ccb 926 struct drm_i915_private *dev_priv = dev->dev_private;
5f19e2bf
JH
927 struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
928 u64 exec_start;
ba8b7ccb
OM
929 int instp_mode;
930 u32 instp_mask;
931 int ret;
932
933 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
934 instp_mask = I915_EXEC_CONSTANTS_MASK;
935 switch (instp_mode) {
936 case I915_EXEC_CONSTANTS_REL_GENERAL:
937 case I915_EXEC_CONSTANTS_ABSOLUTE:
938 case I915_EXEC_CONSTANTS_REL_SURFACE:
939 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
940 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
941 return -EINVAL;
942 }
943
944 if (instp_mode != dev_priv->relative_constants_mode) {
945 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
946 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
947 return -EINVAL;
948 }
949
950 /* The HW changed the meaning on this bit on gen6 */
951 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
952 }
953 break;
954 default:
955 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
956 return -EINVAL;
957 }
958
ba8b7ccb
OM
959 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
960 DRM_DEBUG("sol reset is gen7 only\n");
961 return -EINVAL;
962 }
963
535fbe82 964 ret = execlists_move_to_gpu(params->request, vmas);
ba8b7ccb
OM
965 if (ret)
966 return ret;
967
968 if (ring == &dev_priv->ring[RCS] &&
969 instp_mode != dev_priv->relative_constants_mode) {
4d616a29 970 ret = intel_logical_ring_begin(params->request, 4);
ba8b7ccb
OM
971 if (ret)
972 return ret;
973
974 intel_logical_ring_emit(ringbuf, MI_NOOP);
975 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
f92a9162 976 intel_logical_ring_emit_reg(ringbuf, INSTPM);
ba8b7ccb
OM
977 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
978 intel_logical_ring_advance(ringbuf);
979
980 dev_priv->relative_constants_mode = instp_mode;
981 }
982
5f19e2bf
JH
983 exec_start = params->batch_obj_vm_offset +
984 args->batch_start_offset;
985
be795fc1 986 ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
ba8b7ccb
OM
987 if (ret)
988 return ret;
989
95c24161 990 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
5e4be7bd 991
8a8edb59 992 i915_gem_execbuffer_move_to_active(vmas, params->request);
adeca76d 993 i915_gem_execbuffer_retire_commands(params);
ba8b7ccb 994
454afebd
OM
995 return 0;
996}
997
c86ee3a9
TD
998void intel_execlists_retire_requests(struct intel_engine_cs *ring)
999{
6d3d8274 1000 struct drm_i915_gem_request *req, *tmp;
c86ee3a9
TD
1001 struct list_head retired_list;
1002
1003 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1004 if (list_empty(&ring->execlist_retired_req_list))
1005 return;
1006
1007 INIT_LIST_HEAD(&retired_list);
b5eba372 1008 spin_lock_irq(&ring->execlist_lock);
c86ee3a9 1009 list_replace_init(&ring->execlist_retired_req_list, &retired_list);
b5eba372 1010 spin_unlock_irq(&ring->execlist_lock);
c86ee3a9
TD
1011
1012 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
af3302b9
DV
1013 struct intel_context *ctx = req->ctx;
1014 struct drm_i915_gem_object *ctx_obj =
1015 ctx->engine[ring->id].state;
1016
ed54c1a1 1017 if (ctx_obj && (ctx != req->i915->kernel_context))
af3302b9 1018 intel_lr_context_unpin(req);
c86ee3a9 1019 list_del(&req->execlist_link);
f8210795 1020 i915_gem_request_unreference(req);
c86ee3a9
TD
1021 }
1022}
1023
454afebd
OM
1024void intel_logical_ring_stop(struct intel_engine_cs *ring)
1025{
9832b9da
OM
1026 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1027 int ret;
1028
1029 if (!intel_ring_initialized(ring))
1030 return;
1031
1032 ret = intel_ring_idle(ring);
1033 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
1034 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1035 ring->name, ret);
1036
1037 /* TODO: Is this correct with Execlists enabled? */
1038 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
1039 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
1040 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
1041 return;
1042 }
1043 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
454afebd
OM
1044}
1045
4866d729 1046int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
48e29f55 1047{
4866d729 1048 struct intel_engine_cs *ring = req->ring;
48e29f55
OM
1049 int ret;
1050
1051 if (!ring->gpu_caches_dirty)
1052 return 0;
1053
7deb4d39 1054 ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
48e29f55
OM
1055 if (ret)
1056 return ret;
1057
1058 ring->gpu_caches_dirty = false;
1059 return 0;
1060}
1061
e84fe803 1062static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
ca82580c 1063 struct intel_context *ctx)
dcb4c12a 1064{
e84fe803
NH
1065 struct drm_device *dev = ring->dev;
1066 struct drm_i915_private *dev_priv = dev->dev_private;
ca82580c
TU
1067 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
1068 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
82352e90 1069 struct page *lrc_state_page;
ca82580c 1070 int ret;
dcb4c12a
OM
1071
1072 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
ca82580c 1073
e84fe803
NH
1074 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
1075 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
1076 if (ret)
1077 return ret;
7ba717cf 1078
82352e90
TU
1079 lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
1080 if (WARN_ON(!lrc_state_page)) {
1081 ret = -ENODEV;
1082 goto unpin_ctx_obj;
1083 }
1084
e84fe803
NH
1085 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
1086 if (ret)
1087 goto unpin_ctx_obj;
d1675198 1088
ca82580c
TU
1089 ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
1090 intel_lr_context_descriptor_update(ctx, ring);
82352e90 1091 ctx->engine[ring->id].lrc_reg_state = kmap(lrc_state_page);
e84fe803 1092 ctx_obj->dirty = true;
e93c28f3 1093
e84fe803
NH
1094 /* Invalidate GuC TLB. */
1095 if (i915.enable_guc_submission)
1096 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
dcb4c12a 1097
7ba717cf
TD
1098 return ret;
1099
1100unpin_ctx_obj:
1101 i915_gem_object_ggtt_unpin(ctx_obj);
e84fe803
NH
1102
1103 return ret;
1104}
1105
1106static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
1107{
1108 int ret = 0;
1109 struct intel_engine_cs *ring = rq->ring;
e84fe803
NH
1110
1111 if (rq->ctx->engine[ring->id].pin_count++ == 0) {
ca82580c 1112 ret = intel_lr_context_do_pin(ring, rq->ctx);
e84fe803
NH
1113 if (ret)
1114 goto reset_pin_count;
1115 }
1116 return ret;
1117
a7cbedec 1118reset_pin_count:
8ba319da 1119 rq->ctx->engine[ring->id].pin_count = 0;
dcb4c12a
OM
1120 return ret;
1121}
1122
af3302b9 1123void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
dcb4c12a 1124{
af3302b9
DV
1125 struct intel_engine_cs *ring = rq->ring;
1126 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
1127 struct intel_ringbuffer *ringbuf = rq->ringbuf;
1128
82352e90
TU
1129 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1130
1131 if (!ctx_obj)
1132 return;
1133
1134 if (--rq->ctx->engine[ring->id].pin_count == 0) {
1135 kunmap(kmap_to_page(rq->ctx->engine[ring->id].lrc_reg_state));
1136 intel_unpin_ringbuffer_obj(ringbuf);
1137 i915_gem_object_ggtt_unpin(ctx_obj);
1138 rq->ctx->engine[ring->id].lrc_vma = NULL;
1139 rq->ctx->engine[ring->id].lrc_desc = 0;
1140 rq->ctx->engine[ring->id].lrc_reg_state = NULL;
dcb4c12a
OM
1141 }
1142}
1143
e2be4faf 1144static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
771b9a53
MT
1145{
1146 int ret, i;
e2be4faf
JH
1147 struct intel_engine_cs *ring = req->ring;
1148 struct intel_ringbuffer *ringbuf = req->ringbuf;
771b9a53
MT
1149 struct drm_device *dev = ring->dev;
1150 struct drm_i915_private *dev_priv = dev->dev_private;
1151 struct i915_workarounds *w = &dev_priv->workarounds;
1152
cd7feaaa 1153 if (w->count == 0)
771b9a53
MT
1154 return 0;
1155
1156 ring->gpu_caches_dirty = true;
4866d729 1157 ret = logical_ring_flush_all_caches(req);
771b9a53
MT
1158 if (ret)
1159 return ret;
1160
4d616a29 1161 ret = intel_logical_ring_begin(req, w->count * 2 + 2);
771b9a53
MT
1162 if (ret)
1163 return ret;
1164
1165 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1166 for (i = 0; i < w->count; i++) {
f92a9162 1167 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
771b9a53
MT
1168 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1169 }
1170 intel_logical_ring_emit(ringbuf, MI_NOOP);
1171
1172 intel_logical_ring_advance(ringbuf);
1173
1174 ring->gpu_caches_dirty = true;
4866d729 1175 ret = logical_ring_flush_all_caches(req);
771b9a53
MT
1176 if (ret)
1177 return ret;
1178
1179 return 0;
1180}
1181
83b8a982 1182#define wa_ctx_emit(batch, index, cmd) \
17ee950d 1183 do { \
83b8a982
AS
1184 int __index = (index)++; \
1185 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
17ee950d
AS
1186 return -ENOSPC; \
1187 } \
83b8a982 1188 batch[__index] = (cmd); \
17ee950d
AS
1189 } while (0)
1190
8f40db77 1191#define wa_ctx_emit_reg(batch, index, reg) \
f0f59a00 1192 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
9e000847
AS
1193
1194/*
1195 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1196 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1197 * but there is a slight complication as this is applied in WA batch where the
1198 * values are only initialized once so we cannot take register value at the
1199 * beginning and reuse it further; hence we save its value to memory, upload a
1200 * constant value with bit21 set and then we restore it back with the saved value.
1201 * To simplify the WA, a constant value is formed by using the default value
1202 * of this register. This shouldn't be a problem because we are only modifying
1203 * it for a short period and this batch in non-premptible. We can ofcourse
1204 * use additional instructions that read the actual value of the register
1205 * at that time and set our bit of interest but it makes the WA complicated.
1206 *
1207 * This WA is also required for Gen9 so extracting as a function avoids
1208 * code duplication.
1209 */
1210static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
1211 uint32_t *const batch,
1212 uint32_t index)
1213{
1214 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1215
a4106a78
AS
1216 /*
1217 * WaDisableLSQCROPERFforOCL:skl
1218 * This WA is implemented in skl_init_clock_gating() but since
1219 * this batch updates GEN8_L3SQCREG4 with default value we need to
1220 * set this bit here to retain the WA during flush.
1221 */
e87a005d 1222 if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0))
a4106a78
AS
1223 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1224
f1afe24f 1225 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
83b8a982 1226 MI_SRM_LRM_GLOBAL_GTT));
8f40db77 1227 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
83b8a982
AS
1228 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
1229 wa_ctx_emit(batch, index, 0);
1230
1231 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
8f40db77 1232 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
83b8a982
AS
1233 wa_ctx_emit(batch, index, l3sqc4_flush);
1234
1235 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1236 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
1237 PIPE_CONTROL_DC_FLUSH_ENABLE));
1238 wa_ctx_emit(batch, index, 0);
1239 wa_ctx_emit(batch, index, 0);
1240 wa_ctx_emit(batch, index, 0);
1241 wa_ctx_emit(batch, index, 0);
1242
f1afe24f 1243 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
83b8a982 1244 MI_SRM_LRM_GLOBAL_GTT));
8f40db77 1245 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
83b8a982
AS
1246 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
1247 wa_ctx_emit(batch, index, 0);
9e000847
AS
1248
1249 return index;
1250}
1251
17ee950d
AS
1252static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
1253 uint32_t offset,
1254 uint32_t start_alignment)
1255{
1256 return wa_ctx->offset = ALIGN(offset, start_alignment);
1257}
1258
1259static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1260 uint32_t offset,
1261 uint32_t size_alignment)
1262{
1263 wa_ctx->size = offset - wa_ctx->offset;
1264
1265 WARN(wa_ctx->size % size_alignment,
1266 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1267 wa_ctx->size, size_alignment);
1268 return 0;
1269}
1270
1271/**
1272 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
1273 *
1274 * @ring: only applicable for RCS
1275 * @wa_ctx: structure representing wa_ctx
1276 * offset: specifies start of the batch, should be cache-aligned. This is updated
1277 * with the offset value received as input.
1278 * size: size of the batch in DWORDS but HW expects in terms of cachelines
1279 * @batch: page in which WA are loaded
1280 * @offset: This field specifies the start of the batch, it should be
1281 * cache-aligned otherwise it is adjusted accordingly.
1282 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1283 * initialized at the beginning and shared across all contexts but this field
1284 * helps us to have multiple batches at different offsets and select them based
1285 * on a criteria. At the moment this batch always start at the beginning of the page
1286 * and at this point we don't have multiple wa_ctx batch buffers.
1287 *
1288 * The number of WA applied are not known at the beginning; we use this field
1289 * to return the no of DWORDS written.
4d78c8dc 1290 *
17ee950d
AS
1291 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1292 * so it adds NOOPs as padding to make it cacheline aligned.
1293 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1294 * makes a complete batch buffer.
1295 *
1296 * Return: non-zero if we exceed the PAGE_SIZE limit.
1297 */
1298
1299static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
1300 struct i915_wa_ctx_bb *wa_ctx,
1301 uint32_t *const batch,
1302 uint32_t *offset)
1303{
0160f055 1304 uint32_t scratch_addr;
17ee950d
AS
1305 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1306
7ad00d1a 1307 /* WaDisableCtxRestoreArbitration:bdw,chv */
83b8a982 1308 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
17ee950d 1309
c82435bb
AS
1310 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1311 if (IS_BROADWELL(ring->dev)) {
604ef734
AH
1312 int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index);
1313 if (rc < 0)
1314 return rc;
1315 index = rc;
c82435bb
AS
1316 }
1317
0160f055
AS
1318 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1319 /* Actual scratch location is at 128 bytes offset */
1320 scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
1321
83b8a982
AS
1322 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1323 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1324 PIPE_CONTROL_GLOBAL_GTT_IVB |
1325 PIPE_CONTROL_CS_STALL |
1326 PIPE_CONTROL_QW_WRITE));
1327 wa_ctx_emit(batch, index, scratch_addr);
1328 wa_ctx_emit(batch, index, 0);
1329 wa_ctx_emit(batch, index, 0);
1330 wa_ctx_emit(batch, index, 0);
0160f055 1331
17ee950d
AS
1332 /* Pad to end of cacheline */
1333 while (index % CACHELINE_DWORDS)
83b8a982 1334 wa_ctx_emit(batch, index, MI_NOOP);
17ee950d
AS
1335
1336 /*
1337 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1338 * execution depends on the length specified in terms of cache lines
1339 * in the register CTX_RCS_INDIRECT_CTX
1340 */
1341
1342 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1343}
1344
1345/**
1346 * gen8_init_perctx_bb() - initialize per ctx batch with WA
1347 *
1348 * @ring: only applicable for RCS
1349 * @wa_ctx: structure representing wa_ctx
1350 * offset: specifies start of the batch, should be cache-aligned.
1351 * size: size of the batch in DWORDS but HW expects in terms of cachelines
4d78c8dc 1352 * @batch: page in which WA are loaded
17ee950d
AS
1353 * @offset: This field specifies the start of this batch.
1354 * This batch is started immediately after indirect_ctx batch. Since we ensure
1355 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
1356 *
1357 * The number of DWORDS written are returned using this field.
1358 *
1359 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1360 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1361 */
1362static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
1363 struct i915_wa_ctx_bb *wa_ctx,
1364 uint32_t *const batch,
1365 uint32_t *offset)
1366{
1367 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1368
7ad00d1a 1369 /* WaDisableCtxRestoreArbitration:bdw,chv */
83b8a982 1370 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
7ad00d1a 1371
83b8a982 1372 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
17ee950d
AS
1373
1374 return wa_ctx_end(wa_ctx, *offset = index, 1);
1375}
1376
0504cffc
AS
1377static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
1378 struct i915_wa_ctx_bb *wa_ctx,
1379 uint32_t *const batch,
1380 uint32_t *offset)
1381{
a4106a78 1382 int ret;
0907c8f7 1383 struct drm_device *dev = ring->dev;
0504cffc
AS
1384 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1385
0907c8f7 1386 /* WaDisableCtxRestoreArbitration:skl,bxt */
e87a005d 1387 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
cbdc12a9 1388 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
0907c8f7 1389 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
0504cffc 1390
a4106a78
AS
1391 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
1392 ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
1393 if (ret < 0)
1394 return ret;
1395 index = ret;
1396
0504cffc
AS
1397 /* Pad to end of cacheline */
1398 while (index % CACHELINE_DWORDS)
1399 wa_ctx_emit(batch, index, MI_NOOP);
1400
1401 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1402}
1403
1404static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
1405 struct i915_wa_ctx_bb *wa_ctx,
1406 uint32_t *const batch,
1407 uint32_t *offset)
1408{
0907c8f7 1409 struct drm_device *dev = ring->dev;
0504cffc
AS
1410 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1411
9b01435d 1412 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
e87a005d 1413 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
cbdc12a9 1414 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
9b01435d 1415 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
8f40db77 1416 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
9b01435d
AS
1417 wa_ctx_emit(batch, index,
1418 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1419 wa_ctx_emit(batch, index, MI_NOOP);
1420 }
1421
0907c8f7 1422 /* WaDisableCtxRestoreArbitration:skl,bxt */
e87a005d 1423 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
cbdc12a9 1424 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
0907c8f7
AS
1425 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1426
0504cffc
AS
1427 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1428
1429 return wa_ctx_end(wa_ctx, *offset = index, 1);
1430}
1431
17ee950d
AS
1432static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
1433{
1434 int ret;
1435
1436 ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
1437 if (!ring->wa_ctx.obj) {
1438 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1439 return -ENOMEM;
1440 }
1441
1442 ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
1443 if (ret) {
1444 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1445 ret);
1446 drm_gem_object_unreference(&ring->wa_ctx.obj->base);
1447 return ret;
1448 }
1449
1450 return 0;
1451}
1452
1453static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
1454{
1455 if (ring->wa_ctx.obj) {
1456 i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
1457 drm_gem_object_unreference(&ring->wa_ctx.obj->base);
1458 ring->wa_ctx.obj = NULL;
1459 }
1460}
1461
1462static int intel_init_workaround_bb(struct intel_engine_cs *ring)
1463{
1464 int ret;
1465 uint32_t *batch;
1466 uint32_t offset;
1467 struct page *page;
1468 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
1469
1470 WARN_ON(ring->id != RCS);
1471
5e60d790 1472 /* update this when WA for higher Gen are added */
0504cffc
AS
1473 if (INTEL_INFO(ring->dev)->gen > 9) {
1474 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1475 INTEL_INFO(ring->dev)->gen);
5e60d790 1476 return 0;
0504cffc 1477 }
5e60d790 1478
c4db7599
AS
1479 /* some WA perform writes to scratch page, ensure it is valid */
1480 if (ring->scratch.obj == NULL) {
1481 DRM_ERROR("scratch page not allocated for %s\n", ring->name);
1482 return -EINVAL;
1483 }
1484
17ee950d
AS
1485 ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
1486 if (ret) {
1487 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1488 return ret;
1489 }
1490
033908ae 1491 page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
17ee950d
AS
1492 batch = kmap_atomic(page);
1493 offset = 0;
1494
1495 if (INTEL_INFO(ring->dev)->gen == 8) {
1496 ret = gen8_init_indirectctx_bb(ring,
1497 &wa_ctx->indirect_ctx,
1498 batch,
1499 &offset);
1500 if (ret)
1501 goto out;
1502
1503 ret = gen8_init_perctx_bb(ring,
1504 &wa_ctx->per_ctx,
1505 batch,
1506 &offset);
1507 if (ret)
1508 goto out;
0504cffc
AS
1509 } else if (INTEL_INFO(ring->dev)->gen == 9) {
1510 ret = gen9_init_indirectctx_bb(ring,
1511 &wa_ctx->indirect_ctx,
1512 batch,
1513 &offset);
1514 if (ret)
1515 goto out;
1516
1517 ret = gen9_init_perctx_bb(ring,
1518 &wa_ctx->per_ctx,
1519 batch,
1520 &offset);
1521 if (ret)
1522 goto out;
17ee950d
AS
1523 }
1524
1525out:
1526 kunmap_atomic(batch);
1527 if (ret)
1528 lrc_destroy_wa_ctx_obj(ring);
1529
1530 return ret;
1531}
1532
9b1136d5
OM
1533static int gen8_init_common_ring(struct intel_engine_cs *ring)
1534{
1535 struct drm_device *dev = ring->dev;
1536 struct drm_i915_private *dev_priv = dev->dev_private;
dfc53c5e 1537 u8 next_context_status_buffer_hw;
9b1136d5 1538
e84fe803 1539 lrc_setup_hardware_status_page(ring,
ed54c1a1 1540 dev_priv->kernel_context->engine[ring->id].state);
e84fe803 1541
73d477f6
OM
1542 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1543 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
1544
9b1136d5
OM
1545 I915_WRITE(RING_MODE_GEN7(ring),
1546 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1547 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1548 POSTING_READ(RING_MODE_GEN7(ring));
dfc53c5e
MT
1549
1550 /*
1551 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1552 * zero, we need to read the write pointer from hardware and use its
1553 * value because "this register is power context save restored".
1554 * Effectively, these states have been observed:
1555 *
1556 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1557 * BDW | CSB regs not reset | CSB regs reset |
1558 * CHT | CSB regs not reset | CSB regs not reset |
5590a5f0
BW
1559 * SKL | ? | ? |
1560 * BXT | ? | ? |
dfc53c5e 1561 */
5590a5f0
BW
1562 next_context_status_buffer_hw =
1563 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring)));
dfc53c5e
MT
1564
1565 /*
1566 * When the CSB registers are reset (also after power-up / gpu reset),
1567 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1568 * this special case, so the first element read is CSB[0].
1569 */
1570 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1571 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1572
1573 ring->next_context_status_buffer = next_context_status_buffer_hw;
9b1136d5
OM
1574 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
1575
1576 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
1577
1578 return 0;
1579}
1580
1581static int gen8_init_render_ring(struct intel_engine_cs *ring)
1582{
1583 struct drm_device *dev = ring->dev;
1584 struct drm_i915_private *dev_priv = dev->dev_private;
1585 int ret;
1586
1587 ret = gen8_init_common_ring(ring);
1588 if (ret)
1589 return ret;
1590
1591 /* We need to disable the AsyncFlip performance optimisations in order
1592 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1593 * programmed to '1' on all products.
1594 *
1595 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1596 */
1597 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1598
9b1136d5
OM
1599 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1600
771b9a53 1601 return init_workarounds_ring(ring);
9b1136d5
OM
1602}
1603
82ef822e
DL
1604static int gen9_init_render_ring(struct intel_engine_cs *ring)
1605{
1606 int ret;
1607
1608 ret = gen8_init_common_ring(ring);
1609 if (ret)
1610 return ret;
1611
1612 return init_workarounds_ring(ring);
1613}
1614
7a01a0a2
MT
1615static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1616{
1617 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
1618 struct intel_engine_cs *ring = req->ring;
1619 struct intel_ringbuffer *ringbuf = req->ringbuf;
1620 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1621 int i, ret;
1622
1623 ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
1624 if (ret)
1625 return ret;
1626
1627 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1628 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1629 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1630
f92a9162 1631 intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
7a01a0a2 1632 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
f92a9162 1633 intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
7a01a0a2
MT
1634 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
1635 }
1636
1637 intel_logical_ring_emit(ringbuf, MI_NOOP);
1638 intel_logical_ring_advance(ringbuf);
1639
1640 return 0;
1641}
1642
be795fc1 1643static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
8e004efc 1644 u64 offset, unsigned dispatch_flags)
15648585 1645{
be795fc1 1646 struct intel_ringbuffer *ringbuf = req->ringbuf;
8e004efc 1647 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
15648585
OM
1648 int ret;
1649
7a01a0a2
MT
1650 /* Don't rely in hw updating PDPs, specially in lite-restore.
1651 * Ideally, we should set Force PD Restore in ctx descriptor,
1652 * but we can't. Force Restore would be a second option, but
1653 * it is unsafe in case of lite-restore (because the ctx is
2dba3239
MT
1654 * not idle). PML4 is allocated during ppgtt init so this is
1655 * not needed in 48-bit.*/
7a01a0a2
MT
1656 if (req->ctx->ppgtt &&
1657 (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
331f38e7
ZL
1658 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1659 !intel_vgpu_active(req->i915->dev)) {
2dba3239
MT
1660 ret = intel_logical_ring_emit_pdps(req);
1661 if (ret)
1662 return ret;
1663 }
7a01a0a2
MT
1664
1665 req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
1666 }
1667
4d616a29 1668 ret = intel_logical_ring_begin(req, 4);
15648585
OM
1669 if (ret)
1670 return ret;
1671
1672 /* FIXME(BDW): Address space and security selectors. */
6922528a
AJ
1673 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
1674 (ppgtt<<8) |
1675 (dispatch_flags & I915_DISPATCH_RS ?
1676 MI_BATCH_RESOURCE_STREAMER : 0));
15648585
OM
1677 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
1678 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
1679 intel_logical_ring_emit(ringbuf, MI_NOOP);
1680 intel_logical_ring_advance(ringbuf);
1681
1682 return 0;
1683}
1684
73d477f6
OM
1685static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
1686{
1687 struct drm_device *dev = ring->dev;
1688 struct drm_i915_private *dev_priv = dev->dev_private;
1689 unsigned long flags;
1690
7cd512f1 1691 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
73d477f6
OM
1692 return false;
1693
1694 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1695 if (ring->irq_refcount++ == 0) {
1696 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1697 POSTING_READ(RING_IMR(ring->mmio_base));
1698 }
1699 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1700
1701 return true;
1702}
1703
1704static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
1705{
1706 struct drm_device *dev = ring->dev;
1707 struct drm_i915_private *dev_priv = dev->dev_private;
1708 unsigned long flags;
1709
1710 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1711 if (--ring->irq_refcount == 0) {
1712 I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
1713 POSTING_READ(RING_IMR(ring->mmio_base));
1714 }
1715 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1716}
1717
7deb4d39 1718static int gen8_emit_flush(struct drm_i915_gem_request *request,
4712274c
OM
1719 u32 invalidate_domains,
1720 u32 unused)
1721{
7deb4d39 1722 struct intel_ringbuffer *ringbuf = request->ringbuf;
4712274c
OM
1723 struct intel_engine_cs *ring = ringbuf->ring;
1724 struct drm_device *dev = ring->dev;
1725 struct drm_i915_private *dev_priv = dev->dev_private;
1726 uint32_t cmd;
1727 int ret;
1728
4d616a29 1729 ret = intel_logical_ring_begin(request, 4);
4712274c
OM
1730 if (ret)
1731 return ret;
1732
1733 cmd = MI_FLUSH_DW + 1;
1734
f0a1fb10
CW
1735 /* We always require a command barrier so that subsequent
1736 * commands, such as breadcrumb interrupts, are strictly ordered
1737 * wrt the contents of the write cache being flushed to memory
1738 * (and thus being coherent from the CPU).
1739 */
1740 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1741
1742 if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
1743 cmd |= MI_INVALIDATE_TLB;
1744 if (ring == &dev_priv->ring[VCS])
1745 cmd |= MI_INVALIDATE_BSD;
4712274c
OM
1746 }
1747
1748 intel_logical_ring_emit(ringbuf, cmd);
1749 intel_logical_ring_emit(ringbuf,
1750 I915_GEM_HWS_SCRATCH_ADDR |
1751 MI_FLUSH_DW_USE_GTT);
1752 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
1753 intel_logical_ring_emit(ringbuf, 0); /* value */
1754 intel_logical_ring_advance(ringbuf);
1755
1756 return 0;
1757}
1758
7deb4d39 1759static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
4712274c
OM
1760 u32 invalidate_domains,
1761 u32 flush_domains)
1762{
7deb4d39 1763 struct intel_ringbuffer *ringbuf = request->ringbuf;
4712274c
OM
1764 struct intel_engine_cs *ring = ringbuf->ring;
1765 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1a5a9ce7 1766 bool vf_flush_wa = false;
4712274c
OM
1767 u32 flags = 0;
1768 int ret;
1769
1770 flags |= PIPE_CONTROL_CS_STALL;
1771
1772 if (flush_domains) {
1773 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1774 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
965fd602 1775 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
40a24488 1776 flags |= PIPE_CONTROL_FLUSH_ENABLE;
4712274c
OM
1777 }
1778
1779 if (invalidate_domains) {
1780 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1781 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1782 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1783 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1784 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1785 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1786 flags |= PIPE_CONTROL_QW_WRITE;
1787 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
4712274c 1788
1a5a9ce7
BW
1789 /*
1790 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1791 * pipe control.
1792 */
1793 if (IS_GEN9(ring->dev))
1794 vf_flush_wa = true;
1795 }
9647ff36 1796
4d616a29 1797 ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
4712274c
OM
1798 if (ret)
1799 return ret;
1800
9647ff36
ID
1801 if (vf_flush_wa) {
1802 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1803 intel_logical_ring_emit(ringbuf, 0);
1804 intel_logical_ring_emit(ringbuf, 0);
1805 intel_logical_ring_emit(ringbuf, 0);
1806 intel_logical_ring_emit(ringbuf, 0);
1807 intel_logical_ring_emit(ringbuf, 0);
1808 }
1809
4712274c
OM
1810 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1811 intel_logical_ring_emit(ringbuf, flags);
1812 intel_logical_ring_emit(ringbuf, scratch_addr);
1813 intel_logical_ring_emit(ringbuf, 0);
1814 intel_logical_ring_emit(ringbuf, 0);
1815 intel_logical_ring_emit(ringbuf, 0);
1816 intel_logical_ring_advance(ringbuf);
1817
1818 return 0;
1819}
1820
e94e37ad
OM
1821static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1822{
1823 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1824}
1825
1826static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1827{
1828 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
1829}
1830
319404df
ID
1831static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1832{
1833
1834 /*
1835 * On BXT A steppings there is a HW coherency issue whereby the
1836 * MI_STORE_DATA_IMM storing the completed request's seqno
1837 * occasionally doesn't invalidate the CPU cache. Work around this by
1838 * clflushing the corresponding cacheline whenever the caller wants
1839 * the coherency to be guaranteed. Note that this cacheline is known
1840 * to be clean at this point, since we only write it in
1841 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1842 * this clflush in practice becomes an invalidate operation.
1843 */
1844
1845 if (!lazy_coherency)
1846 intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
1847
1848 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1849}
1850
1851static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1852{
1853 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
1854
1855 /* See bxt_a_get_seqno() explaining the reason for the clflush. */
1856 intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
1857}
1858
7c17d377
CW
1859/*
1860 * Reserve space for 2 NOOPs at the end of each request to be
1861 * used as a workaround for not being allowed to do lite
1862 * restore with HEAD==TAIL (WaIdleLiteRestore).
1863 */
1864#define WA_TAIL_DWORDS 2
1865
1866static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
1867{
1868 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
1869}
1870
c4e76638 1871static int gen8_emit_request(struct drm_i915_gem_request *request)
4da46e1e 1872{
c4e76638 1873 struct intel_ringbuffer *ringbuf = request->ringbuf;
4da46e1e
OM
1874 int ret;
1875
7c17d377 1876 ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
4da46e1e
OM
1877 if (ret)
1878 return ret;
1879
7c17d377
CW
1880 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1881 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
4da46e1e 1882
4da46e1e 1883 intel_logical_ring_emit(ringbuf,
7c17d377
CW
1884 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1885 intel_logical_ring_emit(ringbuf,
1886 hws_seqno_address(request->ring) |
1887 MI_FLUSH_DW_USE_GTT);
4da46e1e 1888 intel_logical_ring_emit(ringbuf, 0);
c4e76638 1889 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
4da46e1e
OM
1890 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1891 intel_logical_ring_emit(ringbuf, MI_NOOP);
7c17d377
CW
1892 return intel_logical_ring_advance_and_submit(request);
1893}
4da46e1e 1894
7c17d377
CW
1895static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1896{
1897 struct intel_ringbuffer *ringbuf = request->ringbuf;
1898 int ret;
53292cdb 1899
7c17d377
CW
1900 ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
1901 if (ret)
1902 return ret;
1903
1904 /* w/a for post sync ops following a GPGPU operation we
1905 * need a prior CS_STALL, which is emitted by the flush
1906 * following the batch.
1907 */
1908 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
1909 intel_logical_ring_emit(ringbuf,
1910 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1911 PIPE_CONTROL_CS_STALL |
1912 PIPE_CONTROL_QW_WRITE));
1913 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
1914 intel_logical_ring_emit(ringbuf, 0);
1915 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1916 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1917 return intel_logical_ring_advance_and_submit(request);
4da46e1e
OM
1918}
1919
be01363f 1920static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
cef437ad 1921{
cef437ad 1922 struct render_state so;
cef437ad
DL
1923 int ret;
1924
be01363f 1925 ret = i915_gem_render_state_prepare(req->ring, &so);
cef437ad
DL
1926 if (ret)
1927 return ret;
1928
1929 if (so.rodata == NULL)
1930 return 0;
1931
be795fc1 1932 ret = req->ring->emit_bb_start(req, so.ggtt_offset,
be01363f 1933 I915_DISPATCH_SECURE);
cef437ad
DL
1934 if (ret)
1935 goto out;
1936
84e81020
AS
1937 ret = req->ring->emit_bb_start(req,
1938 (so.ggtt_offset + so.aux_batch_offset),
1939 I915_DISPATCH_SECURE);
1940 if (ret)
1941 goto out;
1942
b2af0376 1943 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
cef437ad 1944
cef437ad
DL
1945out:
1946 i915_gem_render_state_fini(&so);
1947 return ret;
1948}
1949
8753181e 1950static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
e7778be1
TD
1951{
1952 int ret;
1953
e2be4faf 1954 ret = intel_logical_ring_workarounds_emit(req);
e7778be1
TD
1955 if (ret)
1956 return ret;
1957
3bbaba0c
PA
1958 ret = intel_rcs_context_init_mocs(req);
1959 /*
1960 * Failing to program the MOCS is non-fatal.The system will not
1961 * run at peak performance. So generate an error and carry on.
1962 */
1963 if (ret)
1964 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1965
be01363f 1966 return intel_lr_context_render_state_init(req);
e7778be1
TD
1967}
1968
73e4d07f
OM
1969/**
1970 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1971 *
1972 * @ring: Engine Command Streamer.
1973 *
1974 */
454afebd
OM
1975void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
1976{
6402c330 1977 struct drm_i915_private *dev_priv;
9832b9da 1978
48d82387
OM
1979 if (!intel_ring_initialized(ring))
1980 return;
1981
6402c330
JH
1982 dev_priv = ring->dev->dev_private;
1983
b0366a54
DG
1984 if (ring->buffer) {
1985 intel_logical_ring_stop(ring);
1986 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1987 }
48d82387
OM
1988
1989 if (ring->cleanup)
1990 ring->cleanup(ring);
1991
1992 i915_cmd_parser_fini_ring(ring);
06fbca71 1993 i915_gem_batch_pool_fini(&ring->batch_pool);
48d82387
OM
1994
1995 if (ring->status_page.obj) {
1996 kunmap(sg_page(ring->status_page.obj->pages->sgl));
1997 ring->status_page.obj = NULL;
1998 }
17ee950d 1999
ca82580c
TU
2000 ring->disable_lite_restore_wa = false;
2001 ring->ctx_desc_template = 0;
2002
17ee950d 2003 lrc_destroy_wa_ctx_obj(ring);
b0366a54 2004 ring->dev = NULL;
454afebd
OM
2005}
2006
c9cacf93
TU
2007static void
2008logical_ring_default_vfuncs(struct drm_device *dev,
2009 struct intel_engine_cs *ring)
2010{
2011 /* Default vfuncs which can be overriden by each engine. */
2012 ring->init_hw = gen8_init_common_ring;
2013 ring->emit_request = gen8_emit_request;
2014 ring->emit_flush = gen8_emit_flush;
2015 ring->irq_get = gen8_logical_ring_get_irq;
2016 ring->irq_put = gen8_logical_ring_put_irq;
2017 ring->emit_bb_start = gen8_emit_bb_start;
2018 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
2019 ring->get_seqno = bxt_a_get_seqno;
2020 ring->set_seqno = bxt_a_set_seqno;
2021 } else {
2022 ring->get_seqno = gen8_get_seqno;
2023 ring->set_seqno = gen8_set_seqno;
2024 }
2025}
2026
d9f3af96
TU
2027static inline void
2028logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
2029{
2030 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
2031 ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
2032}
2033
c9cacf93
TU
2034static int
2035logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
454afebd 2036{
ed54c1a1 2037 struct intel_context *dctx = to_i915(dev)->kernel_context;
48d82387 2038 int ret;
48d82387
OM
2039
2040 /* Intentionally left blank. */
2041 ring->buffer = NULL;
2042
2043 ring->dev = dev;
2044 INIT_LIST_HEAD(&ring->active_list);
2045 INIT_LIST_HEAD(&ring->request_list);
06fbca71 2046 i915_gem_batch_pool_init(dev, &ring->batch_pool);
48d82387
OM
2047 init_waitqueue_head(&ring->irq_queue);
2048
608c1a52 2049 INIT_LIST_HEAD(&ring->buffers);
acdd884a 2050 INIT_LIST_HEAD(&ring->execlist_queue);
c86ee3a9 2051 INIT_LIST_HEAD(&ring->execlist_retired_req_list);
acdd884a
MT
2052 spin_lock_init(&ring->execlist_lock);
2053
ca82580c
TU
2054 logical_ring_init_platform_invariants(ring);
2055
48d82387
OM
2056 ret = i915_cmd_parser_init_ring(ring);
2057 if (ret)
b0366a54 2058 goto error;
48d82387 2059
ed54c1a1 2060 ret = intel_lr_context_deferred_alloc(dctx, ring);
e84fe803 2061 if (ret)
b0366a54 2062 goto error;
e84fe803
NH
2063
2064 /* As this is the default context, always pin it */
ed54c1a1 2065 ret = intel_lr_context_do_pin(ring, dctx);
e84fe803
NH
2066 if (ret) {
2067 DRM_ERROR(
2068 "Failed to pin and map ringbuffer %s: %d\n",
2069 ring->name, ret);
b0366a54 2070 goto error;
e84fe803 2071 }
564ddb2f 2072
b0366a54
DG
2073 return 0;
2074
2075error:
2076 intel_logical_ring_cleanup(ring);
564ddb2f 2077 return ret;
454afebd
OM
2078}
2079
2080static int logical_render_ring_init(struct drm_device *dev)
2081{
2082 struct drm_i915_private *dev_priv = dev->dev_private;
2083 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
99be1dfe 2084 int ret;
454afebd
OM
2085
2086 ring->name = "render ring";
2087 ring->id = RCS;
2088 ring->mmio_base = RENDER_RING_BASE;
d9f3af96
TU
2089
2090 logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
73d477f6
OM
2091 if (HAS_L3_DPF(dev))
2092 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
454afebd 2093
c9cacf93
TU
2094 logical_ring_default_vfuncs(dev, ring);
2095
2096 /* Override some for render ring. */
82ef822e
DL
2097 if (INTEL_INFO(dev)->gen >= 9)
2098 ring->init_hw = gen9_init_render_ring;
2099 else
2100 ring->init_hw = gen8_init_render_ring;
e7778be1 2101 ring->init_context = gen8_init_rcs_context;
9b1136d5 2102 ring->cleanup = intel_fini_pipe_control;
4712274c 2103 ring->emit_flush = gen8_emit_flush_render;
7c17d377 2104 ring->emit_request = gen8_emit_request_render;
9b1136d5 2105
99be1dfe 2106 ring->dev = dev;
c4db7599
AS
2107
2108 ret = intel_init_pipe_control(ring);
99be1dfe
DV
2109 if (ret)
2110 return ret;
2111
17ee950d
AS
2112 ret = intel_init_workaround_bb(ring);
2113 if (ret) {
2114 /*
2115 * We continue even if we fail to initialize WA batch
2116 * because we only expect rare glitches but nothing
2117 * critical to prevent us from using GPU
2118 */
2119 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2120 ret);
2121 }
2122
c4db7599
AS
2123 ret = logical_ring_init(dev, ring);
2124 if (ret) {
17ee950d 2125 lrc_destroy_wa_ctx_obj(ring);
c4db7599 2126 }
17ee950d
AS
2127
2128 return ret;
454afebd
OM
2129}
2130
2131static int logical_bsd_ring_init(struct drm_device *dev)
2132{
2133 struct drm_i915_private *dev_priv = dev->dev_private;
2134 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
2135
2136 ring->name = "bsd ring";
2137 ring->id = VCS;
2138 ring->mmio_base = GEN6_BSD_RING_BASE;
454afebd 2139
d9f3af96 2140 logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
c9cacf93 2141 logical_ring_default_vfuncs(dev, ring);
9b1136d5 2142
454afebd
OM
2143 return logical_ring_init(dev, ring);
2144}
2145
2146static int logical_bsd2_ring_init(struct drm_device *dev)
2147{
2148 struct drm_i915_private *dev_priv = dev->dev_private;
2149 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
2150
ec8a9776 2151 ring->name = "bsd2 ring";
454afebd
OM
2152 ring->id = VCS2;
2153 ring->mmio_base = GEN8_BSD2_RING_BASE;
454afebd 2154
d9f3af96 2155 logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
c9cacf93 2156 logical_ring_default_vfuncs(dev, ring);
9b1136d5 2157
454afebd
OM
2158 return logical_ring_init(dev, ring);
2159}
2160
2161static int logical_blt_ring_init(struct drm_device *dev)
2162{
2163 struct drm_i915_private *dev_priv = dev->dev_private;
2164 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
2165
2166 ring->name = "blitter ring";
2167 ring->id = BCS;
2168 ring->mmio_base = BLT_RING_BASE;
454afebd 2169
d9f3af96 2170 logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
c9cacf93 2171 logical_ring_default_vfuncs(dev, ring);
9b1136d5 2172
454afebd
OM
2173 return logical_ring_init(dev, ring);
2174}
2175
2176static int logical_vebox_ring_init(struct drm_device *dev)
2177{
2178 struct drm_i915_private *dev_priv = dev->dev_private;
2179 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
2180
2181 ring->name = "video enhancement ring";
2182 ring->id = VECS;
2183 ring->mmio_base = VEBOX_RING_BASE;
454afebd 2184
d9f3af96 2185 logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
c9cacf93 2186 logical_ring_default_vfuncs(dev, ring);
9b1136d5 2187
454afebd
OM
2188 return logical_ring_init(dev, ring);
2189}
2190
73e4d07f
OM
2191/**
2192 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
2193 * @dev: DRM device.
2194 *
2195 * This function inits the engines for an Execlists submission style (the equivalent in the
2196 * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
2197 * those engines that are present in the hardware.
2198 *
2199 * Return: non-zero if the initialization failed.
2200 */
454afebd
OM
2201int intel_logical_rings_init(struct drm_device *dev)
2202{
2203 struct drm_i915_private *dev_priv = dev->dev_private;
2204 int ret;
2205
2206 ret = logical_render_ring_init(dev);
2207 if (ret)
2208 return ret;
2209
2210 if (HAS_BSD(dev)) {
2211 ret = logical_bsd_ring_init(dev);
2212 if (ret)
2213 goto cleanup_render_ring;
2214 }
2215
2216 if (HAS_BLT(dev)) {
2217 ret = logical_blt_ring_init(dev);
2218 if (ret)
2219 goto cleanup_bsd_ring;
2220 }
2221
2222 if (HAS_VEBOX(dev)) {
2223 ret = logical_vebox_ring_init(dev);
2224 if (ret)
2225 goto cleanup_blt_ring;
2226 }
2227
2228 if (HAS_BSD2(dev)) {
2229 ret = logical_bsd2_ring_init(dev);
2230 if (ret)
2231 goto cleanup_vebox_ring;
2232 }
2233
454afebd
OM
2234 return 0;
2235
454afebd
OM
2236cleanup_vebox_ring:
2237 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
2238cleanup_blt_ring:
2239 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
2240cleanup_bsd_ring:
2241 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
2242cleanup_render_ring:
2243 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
2244
2245 return ret;
2246}
2247
0cea6502
JM
2248static u32
2249make_rpcs(struct drm_device *dev)
2250{
2251 u32 rpcs = 0;
2252
2253 /*
2254 * No explicit RPCS request is needed to ensure full
2255 * slice/subslice/EU enablement prior to Gen9.
2256 */
2257 if (INTEL_INFO(dev)->gen < 9)
2258 return 0;
2259
2260 /*
2261 * Starting in Gen9, render power gating can leave
2262 * slice/subslice/EU in a partially enabled state. We
2263 * must make an explicit request through RPCS for full
2264 * enablement.
2265 */
2266 if (INTEL_INFO(dev)->has_slice_pg) {
2267 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2268 rpcs |= INTEL_INFO(dev)->slice_total <<
2269 GEN8_RPCS_S_CNT_SHIFT;
2270 rpcs |= GEN8_RPCS_ENABLE;
2271 }
2272
2273 if (INTEL_INFO(dev)->has_subslice_pg) {
2274 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2275 rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
2276 GEN8_RPCS_SS_CNT_SHIFT;
2277 rpcs |= GEN8_RPCS_ENABLE;
2278 }
2279
2280 if (INTEL_INFO(dev)->has_eu_pg) {
2281 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
2282 GEN8_RPCS_EU_MIN_SHIFT;
2283 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
2284 GEN8_RPCS_EU_MAX_SHIFT;
2285 rpcs |= GEN8_RPCS_ENABLE;
2286 }
2287
2288 return rpcs;
2289}
2290
8670d6f9
OM
2291static int
2292populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
2293 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
2294{
2d965536
TD
2295 struct drm_device *dev = ring->dev;
2296 struct drm_i915_private *dev_priv = dev->dev_private;
ae6c4806 2297 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
8670d6f9
OM
2298 struct page *page;
2299 uint32_t *reg_state;
2300 int ret;
2301
2d965536
TD
2302 if (!ppgtt)
2303 ppgtt = dev_priv->mm.aliasing_ppgtt;
2304
8670d6f9
OM
2305 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2306 if (ret) {
2307 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2308 return ret;
2309 }
2310
2311 ret = i915_gem_object_get_pages(ctx_obj);
2312 if (ret) {
2313 DRM_DEBUG_DRIVER("Could not get object pages\n");
2314 return ret;
2315 }
2316
2317 i915_gem_object_pin_pages(ctx_obj);
2318
2319 /* The second page of the context object contains some fields which must
2320 * be set up prior to the first execution. */
033908ae 2321 page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
8670d6f9
OM
2322 reg_state = kmap_atomic(page);
2323
2324 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2325 * commands followed by (reg, value) pairs. The values we are setting here are
2326 * only for the first context restore: on a subsequent save, the GPU will
2327 * recreate this batchbuffer with new values (including all the missing
2328 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
0d925ea0
VS
2329 reg_state[CTX_LRI_HEADER_0] =
2330 MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2331 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
2332 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2333 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2334 CTX_CTRL_RS_CTX_ENABLE));
2335 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
2336 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
7ba717cf
TD
2337 /* Ring buffer start address is not known until the buffer is pinned.
2338 * It is written to the context image in execlists_update_context()
2339 */
0d925ea0
VS
2340 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0);
2341 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base),
2342 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
2343 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0);
2344 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0);
2345 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base),
2346 RING_BB_PPGTT);
2347 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0);
2348 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0);
2349 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0);
8670d6f9 2350 if (ring->id == RCS) {
0d925ea0
VS
2351 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0);
2352 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0);
2353 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0);
17ee950d
AS
2354 if (ring->wa_ctx.obj) {
2355 struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
2356 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2357
2358 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2359 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2360 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2361
2362 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
2363 CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
2364
2365 reg_state[CTX_BB_PER_CTX_PTR+1] =
2366 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2367 0x01;
2368 }
8670d6f9 2369 }
0d925ea0
VS
2370 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2371 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0);
2372 /* PDP values well be assigned later if needed */
2373 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0);
2374 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0);
2375 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0);
2376 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0);
2377 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0);
2378 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0);
2379 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0);
2380 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0);
d7b2633d 2381
2dba3239
MT
2382 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2383 /* 64b PPGTT (48bit canonical)
2384 * PDP0_DESCRIPTOR contains the base address to PML4 and
2385 * other PDP Descriptors are ignored.
2386 */
2387 ASSIGN_CTX_PML4(ppgtt, reg_state);
2388 } else {
2389 /* 32b PPGTT
2390 * PDP*_DESCRIPTOR contains the base address of space supported.
2391 * With dynamic page allocation, PDPs may not be allocated at
2392 * this point. Point the unallocated PDPs to the scratch page
2393 */
2394 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
2395 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
2396 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
2397 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
2398 }
2399
8670d6f9
OM
2400 if (ring->id == RCS) {
2401 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
0d925ea0
VS
2402 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2403 make_rpcs(dev));
8670d6f9
OM
2404 }
2405
2406 kunmap_atomic(reg_state);
8670d6f9
OM
2407 i915_gem_object_unpin_pages(ctx_obj);
2408
2409 return 0;
2410}
2411
73e4d07f
OM
2412/**
2413 * intel_lr_context_free() - free the LRC specific bits of a context
2414 * @ctx: the LR context to free.
2415 *
2416 * The real context freeing is done in i915_gem_context_free: this only
2417 * takes care of the bits that are LRC related: the per-engine backing
2418 * objects and the logical ringbuffer.
2419 */
ede7d42b
OM
2420void intel_lr_context_free(struct intel_context *ctx)
2421{
8c857917
OM
2422 int i;
2423
e28e404c
DG
2424 for (i = I915_NUM_RINGS; --i >= 0; ) {
2425 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
8c857917 2426 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
84c2377f 2427
e28e404c
DG
2428 if (!ctx_obj)
2429 continue;
dcb4c12a 2430
e28e404c
DG
2431 if (ctx == ctx->i915->kernel_context) {
2432 intel_unpin_ringbuffer_obj(ringbuf);
2433 i915_gem_object_ggtt_unpin(ctx_obj);
8c857917 2434 }
e28e404c
DG
2435
2436 WARN_ON(ctx->engine[i].pin_count);
2437 intel_ringbuffer_free(ringbuf);
2438 drm_gem_object_unreference(&ctx_obj->base);
8c857917
OM
2439 }
2440}
2441
c5d46ee2
DG
2442/**
2443 * intel_lr_context_size() - return the size of the context for an engine
2444 * @ring: which engine to find the context size for
2445 *
2446 * Each engine may require a different amount of space for a context image,
2447 * so when allocating (or copying) an image, this function can be used to
2448 * find the right size for the specific engine.
2449 *
2450 * Return: size (in bytes) of an engine-specific context image
2451 *
2452 * Note: this size includes the HWSP, which is part of the context image
2453 * in LRC mode, but does not include the "shared data page" used with
2454 * GuC submission. The caller should account for this if using the GuC.
2455 */
95a66f7e 2456uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
8c857917
OM
2457{
2458 int ret = 0;
2459
468c6816 2460 WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
8c857917
OM
2461
2462 switch (ring->id) {
2463 case RCS:
468c6816
MN
2464 if (INTEL_INFO(ring->dev)->gen >= 9)
2465 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2466 else
2467 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
8c857917
OM
2468 break;
2469 case VCS:
2470 case BCS:
2471 case VECS:
2472 case VCS2:
2473 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2474 break;
2475 }
2476
2477 return ret;
ede7d42b
OM
2478}
2479
70b0ea86 2480static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
1df06b75
TD
2481 struct drm_i915_gem_object *default_ctx_obj)
2482{
2483 struct drm_i915_private *dev_priv = ring->dev->dev_private;
d1675198 2484 struct page *page;
1df06b75 2485
d1675198
AD
2486 /* The HWSP is part of the default context object in LRC mode. */
2487 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
2488 + LRC_PPHWSP_PN * PAGE_SIZE;
2489 page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
2490 ring->status_page.page_addr = kmap(page);
1df06b75
TD
2491 ring->status_page.obj = default_ctx_obj;
2492
2493 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
2494 (u32)ring->status_page.gfx_addr);
2495 POSTING_READ(RING_HWS_PGA(ring->mmio_base));
1df06b75
TD
2496}
2497
73e4d07f 2498/**
e84fe803 2499 * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
73e4d07f
OM
2500 * @ctx: LR context to create.
2501 * @ring: engine to be used with the context.
2502 *
2503 * This function can be called more than once, with different engines, if we plan
2504 * to use the context with them. The context backing objects and the ringbuffers
2505 * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
2506 * the creation is a deferred call: it's better to make sure first that we need to use
2507 * a given ring with the context.
2508 *
32197aab 2509 * Return: non-zero on error.
73e4d07f 2510 */
e84fe803
NH
2511
2512int intel_lr_context_deferred_alloc(struct intel_context *ctx,
e28e404c 2513 struct intel_engine_cs *ring)
ede7d42b 2514{
8c857917
OM
2515 struct drm_device *dev = ring->dev;
2516 struct drm_i915_gem_object *ctx_obj;
2517 uint32_t context_size;
84c2377f 2518 struct intel_ringbuffer *ringbuf;
8c857917
OM
2519 int ret;
2520
ede7d42b 2521 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
bfc882b4 2522 WARN_ON(ctx->engine[ring->id].state);
ede7d42b 2523
95a66f7e 2524 context_size = round_up(intel_lr_context_size(ring), 4096);
8c857917 2525
d1675198
AD
2526 /* One extra page as the sharing data between driver and GuC */
2527 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2528
149c86e7 2529 ctx_obj = i915_gem_alloc_object(dev, context_size);
3126a660
DC
2530 if (!ctx_obj) {
2531 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2532 return -ENOMEM;
8c857917
OM
2533 }
2534
01101fa7
CW
2535 ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
2536 if (IS_ERR(ringbuf)) {
2537 ret = PTR_ERR(ringbuf);
e84fe803 2538 goto error_deref_obj;
8670d6f9
OM
2539 }
2540
2541 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
2542 if (ret) {
2543 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
e84fe803 2544 goto error_ringbuf;
84c2377f
OM
2545 }
2546
2547 ctx->engine[ring->id].ringbuf = ringbuf;
8c857917 2548 ctx->engine[ring->id].state = ctx_obj;
ede7d42b 2549
ed54c1a1 2550 if (ctx != ctx->i915->kernel_context && ring->init_context) {
e84fe803 2551 struct drm_i915_gem_request *req;
76c39168 2552
26827088
DG
2553 req = i915_gem_request_alloc(ring, ctx);
2554 if (IS_ERR(req)) {
2555 ret = PTR_ERR(req);
2556 DRM_ERROR("ring create req: %d\n", ret);
e84fe803 2557 goto error_ringbuf;
771b9a53
MT
2558 }
2559
e84fe803
NH
2560 ret = ring->init_context(req);
2561 if (ret) {
2562 DRM_ERROR("ring init context: %d\n",
2563 ret);
2564 i915_gem_request_cancel(req);
2565 goto error_ringbuf;
2566 }
2567 i915_add_request_no_flush(req);
564ddb2f 2568 }
ede7d42b 2569 return 0;
8670d6f9 2570
01101fa7
CW
2571error_ringbuf:
2572 intel_ringbuffer_free(ringbuf);
e84fe803 2573error_deref_obj:
8670d6f9 2574 drm_gem_object_unreference(&ctx_obj->base);
e84fe803
NH
2575 ctx->engine[ring->id].ringbuf = NULL;
2576 ctx->engine[ring->id].state = NULL;
8670d6f9 2577 return ret;
ede7d42b 2578}
3e5b6f05
TD
2579
2580void intel_lr_context_reset(struct drm_device *dev,
2581 struct intel_context *ctx)
2582{
2583 struct drm_i915_private *dev_priv = dev->dev_private;
2584 struct intel_engine_cs *ring;
2585 int i;
2586
2587 for_each_ring(ring, dev_priv, i) {
2588 struct drm_i915_gem_object *ctx_obj =
2589 ctx->engine[ring->id].state;
2590 struct intel_ringbuffer *ringbuf =
2591 ctx->engine[ring->id].ringbuf;
2592 uint32_t *reg_state;
2593 struct page *page;
2594
2595 if (!ctx_obj)
2596 continue;
2597
2598 if (i915_gem_object_get_pages(ctx_obj)) {
2599 WARN(1, "Failed get_pages for context obj\n");
2600 continue;
2601 }
033908ae 2602 page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
3e5b6f05
TD
2603 reg_state = kmap_atomic(page);
2604
2605 reg_state[CTX_RING_HEAD+1] = 0;
2606 reg_state[CTX_RING_TAIL+1] = 0;
2607
2608 kunmap_atomic(reg_state);
2609
2610 ringbuf->head = 0;
2611 ringbuf->tail = 0;
2612 }
2613}
This page took 0.318358 seconds and 5 git commands to generate.