drm/i915: Convert wait_for(I915_READ(reg)) to intel_wait_for_register()
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
CommitLineData
62fdfeaf
EA
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
a4d8a0fe 30#include <linux/log2.h>
760285e7 31#include <drm/drmP.h>
62fdfeaf 32#include "i915_drv.h"
760285e7 33#include <drm/i915_drm.h>
62fdfeaf 34#include "i915_trace.h"
881f47b6 35#include "intel_drv.h"
62fdfeaf 36
a0442461
CW
37/* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
39 */
40#define LEGACY_REQUEST_SIZE 200
41
82e104cc 42int __intel_ring_space(int head, int tail, int size)
c7dca47b 43{
4f54741e
DG
44 int space = head - tail;
45 if (space <= 0)
1cf0ba14 46 space += size;
4f54741e 47 return space - I915_RING_FREE_SPACE;
c7dca47b
CW
48}
49
ebd0fd4b
DG
50void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
51{
52 if (ringbuf->last_retired_head != -1) {
53 ringbuf->head = ringbuf->last_retired_head;
54 ringbuf->last_retired_head = -1;
55 }
56
57 ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
58 ringbuf->tail, ringbuf->size);
59}
60
117897f4 61bool intel_engine_stopped(struct intel_engine_cs *engine)
09246732 62{
c033666a 63 struct drm_i915_private *dev_priv = engine->i915;
666796da 64 return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
88b4aa87 65}
09246732 66
0bc40be8 67static void __intel_ring_advance(struct intel_engine_cs *engine)
88b4aa87 68{
0bc40be8 69 struct intel_ringbuffer *ringbuf = engine->buffer;
93b0a4e0 70 ringbuf->tail &= ringbuf->size - 1;
117897f4 71 if (intel_engine_stopped(engine))
09246732 72 return;
0bc40be8 73 engine->write_tail(engine, ringbuf->tail);
09246732
CW
74}
75
b72f3acb 76static int
a84c3ae1 77gen2_render_ring_flush(struct drm_i915_gem_request *req,
46f0f8d1
CW
78 u32 invalidate_domains,
79 u32 flush_domains)
80{
4a570db5 81 struct intel_engine_cs *engine = req->engine;
46f0f8d1
CW
82 u32 cmd;
83 int ret;
84
85 cmd = MI_FLUSH;
31b14c9f 86 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
46f0f8d1
CW
87 cmd |= MI_NO_WRITE_FLUSH;
88
89 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
90 cmd |= MI_READ_FLUSH;
91
5fb9de1a 92 ret = intel_ring_begin(req, 2);
46f0f8d1
CW
93 if (ret)
94 return ret;
95
e2f80391
TU
96 intel_ring_emit(engine, cmd);
97 intel_ring_emit(engine, MI_NOOP);
98 intel_ring_advance(engine);
46f0f8d1
CW
99
100 return 0;
101}
102
103static int
a84c3ae1 104gen4_render_ring_flush(struct drm_i915_gem_request *req,
46f0f8d1
CW
105 u32 invalidate_domains,
106 u32 flush_domains)
62fdfeaf 107{
4a570db5 108 struct intel_engine_cs *engine = req->engine;
6f392d54 109 u32 cmd;
b72f3acb 110 int ret;
6f392d54 111
36d527de
CW
112 /*
113 * read/write caches:
114 *
115 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
116 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
117 * also flushed at 2d versus 3d pipeline switches.
118 *
119 * read-only caches:
120 *
121 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
122 * MI_READ_FLUSH is set, and is always flushed on 965.
123 *
124 * I915_GEM_DOMAIN_COMMAND may not exist?
125 *
126 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
127 * invalidated when MI_EXE_FLUSH is set.
128 *
129 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
130 * invalidated with every MI_FLUSH.
131 *
132 * TLBs:
133 *
134 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
135 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
136 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
137 * are flushed at any MI_FLUSH.
138 */
139
140 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
46f0f8d1 141 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
36d527de 142 cmd &= ~MI_NO_WRITE_FLUSH;
36d527de
CW
143 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
144 cmd |= MI_EXE_FLUSH;
62fdfeaf 145
36d527de 146 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
c033666a 147 (IS_G4X(req->i915) || IS_GEN5(req->i915)))
36d527de 148 cmd |= MI_INVALIDATE_ISP;
70eac33e 149
5fb9de1a 150 ret = intel_ring_begin(req, 2);
36d527de
CW
151 if (ret)
152 return ret;
b72f3acb 153
e2f80391
TU
154 intel_ring_emit(engine, cmd);
155 intel_ring_emit(engine, MI_NOOP);
156 intel_ring_advance(engine);
b72f3acb
CW
157
158 return 0;
8187a2b7
ZN
159}
160
8d315287
JB
161/**
162 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
163 * implementing two workarounds on gen6. From section 1.4.7.1
164 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
165 *
166 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
167 * produced by non-pipelined state commands), software needs to first
168 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
169 * 0.
170 *
171 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
172 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
173 *
174 * And the workaround for these two requires this workaround first:
175 *
176 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
177 * BEFORE the pipe-control with a post-sync op and no write-cache
178 * flushes.
179 *
180 * And this last workaround is tricky because of the requirements on
181 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
182 * volume 2 part 1:
183 *
184 * "1 of the following must also be set:
185 * - Render Target Cache Flush Enable ([12] of DW1)
186 * - Depth Cache Flush Enable ([0] of DW1)
187 * - Stall at Pixel Scoreboard ([1] of DW1)
188 * - Depth Stall ([13] of DW1)
189 * - Post-Sync Operation ([13] of DW1)
190 * - Notify Enable ([8] of DW1)"
191 *
192 * The cache flushes require the workaround flush that triggered this
193 * one, so we can't use it. Depth stall would trigger the same.
194 * Post-sync nonzero is what triggered this second workaround, so we
195 * can't use that one either. Notify enable is IRQs, which aren't
196 * really our business. That leaves only stall at scoreboard.
197 */
198static int
f2cf1fcc 199intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
8d315287 200{
4a570db5 201 struct intel_engine_cs *engine = req->engine;
e2f80391 202 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
8d315287
JB
203 int ret;
204
5fb9de1a 205 ret = intel_ring_begin(req, 6);
8d315287
JB
206 if (ret)
207 return ret;
208
e2f80391
TU
209 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
210 intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
8d315287 211 PIPE_CONTROL_STALL_AT_SCOREBOARD);
e2f80391
TU
212 intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
213 intel_ring_emit(engine, 0); /* low dword */
214 intel_ring_emit(engine, 0); /* high dword */
215 intel_ring_emit(engine, MI_NOOP);
216 intel_ring_advance(engine);
8d315287 217
5fb9de1a 218 ret = intel_ring_begin(req, 6);
8d315287
JB
219 if (ret)
220 return ret;
221
e2f80391
TU
222 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
223 intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE);
224 intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
225 intel_ring_emit(engine, 0);
226 intel_ring_emit(engine, 0);
227 intel_ring_emit(engine, MI_NOOP);
228 intel_ring_advance(engine);
8d315287
JB
229
230 return 0;
231}
232
233static int
a84c3ae1
JH
234gen6_render_ring_flush(struct drm_i915_gem_request *req,
235 u32 invalidate_domains, u32 flush_domains)
8d315287 236{
4a570db5 237 struct intel_engine_cs *engine = req->engine;
8d315287 238 u32 flags = 0;
e2f80391 239 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
8d315287
JB
240 int ret;
241
b3111509 242 /* Force SNB workarounds for PIPE_CONTROL flushes */
f2cf1fcc 243 ret = intel_emit_post_sync_nonzero_flush(req);
b3111509
PZ
244 if (ret)
245 return ret;
246
8d315287
JB
247 /* Just flush everything. Experiments have shown that reducing the
248 * number of bits based on the write domains has little performance
249 * impact.
250 */
7d54a904
CW
251 if (flush_domains) {
252 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
253 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
254 /*
255 * Ensure that any following seqno writes only happen
256 * when the render cache is indeed flushed.
257 */
97f209bc 258 flags |= PIPE_CONTROL_CS_STALL;
7d54a904
CW
259 }
260 if (invalidate_domains) {
261 flags |= PIPE_CONTROL_TLB_INVALIDATE;
262 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
263 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
264 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
265 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
266 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
267 /*
268 * TLB invalidate requires a post-sync write.
269 */
3ac78313 270 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
7d54a904 271 }
8d315287 272
5fb9de1a 273 ret = intel_ring_begin(req, 4);
8d315287
JB
274 if (ret)
275 return ret;
276
e2f80391
TU
277 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
278 intel_ring_emit(engine, flags);
279 intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
280 intel_ring_emit(engine, 0);
281 intel_ring_advance(engine);
8d315287
JB
282
283 return 0;
284}
285
f3987631 286static int
f2cf1fcc 287gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
f3987631 288{
4a570db5 289 struct intel_engine_cs *engine = req->engine;
f3987631
PZ
290 int ret;
291
5fb9de1a 292 ret = intel_ring_begin(req, 4);
f3987631
PZ
293 if (ret)
294 return ret;
295
e2f80391
TU
296 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
297 intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
f3987631 298 PIPE_CONTROL_STALL_AT_SCOREBOARD);
e2f80391
TU
299 intel_ring_emit(engine, 0);
300 intel_ring_emit(engine, 0);
301 intel_ring_advance(engine);
f3987631
PZ
302
303 return 0;
304}
305
4772eaeb 306static int
a84c3ae1 307gen7_render_ring_flush(struct drm_i915_gem_request *req,
4772eaeb
PZ
308 u32 invalidate_domains, u32 flush_domains)
309{
4a570db5 310 struct intel_engine_cs *engine = req->engine;
4772eaeb 311 u32 flags = 0;
e2f80391 312 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
4772eaeb
PZ
313 int ret;
314
f3987631
PZ
315 /*
316 * Ensure that any following seqno writes only happen when the render
317 * cache is indeed flushed.
318 *
319 * Workaround: 4th PIPE_CONTROL command (except the ones with only
320 * read-cache invalidate bits set) must have the CS_STALL bit set. We
321 * don't try to be clever and just set it unconditionally.
322 */
323 flags |= PIPE_CONTROL_CS_STALL;
324
4772eaeb
PZ
325 /* Just flush everything. Experiments have shown that reducing the
326 * number of bits based on the write domains has little performance
327 * impact.
328 */
329 if (flush_domains) {
330 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
331 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
965fd602 332 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
40a24488 333 flags |= PIPE_CONTROL_FLUSH_ENABLE;
4772eaeb
PZ
334 }
335 if (invalidate_domains) {
336 flags |= PIPE_CONTROL_TLB_INVALIDATE;
337 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
338 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
339 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
340 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
341 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
148b83d0 342 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
4772eaeb
PZ
343 /*
344 * TLB invalidate requires a post-sync write.
345 */
346 flags |= PIPE_CONTROL_QW_WRITE;
b9e1faa7 347 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
f3987631 348
add284a3
CW
349 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
350
f3987631
PZ
351 /* Workaround: we must issue a pipe_control with CS-stall bit
352 * set before a pipe_control command that has the state cache
353 * invalidate bit set. */
f2cf1fcc 354 gen7_render_ring_cs_stall_wa(req);
4772eaeb
PZ
355 }
356
5fb9de1a 357 ret = intel_ring_begin(req, 4);
4772eaeb
PZ
358 if (ret)
359 return ret;
360
e2f80391
TU
361 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
362 intel_ring_emit(engine, flags);
363 intel_ring_emit(engine, scratch_addr);
364 intel_ring_emit(engine, 0);
365 intel_ring_advance(engine);
4772eaeb
PZ
366
367 return 0;
368}
369
884ceace 370static int
f2cf1fcc 371gen8_emit_pipe_control(struct drm_i915_gem_request *req,
884ceace
KG
372 u32 flags, u32 scratch_addr)
373{
4a570db5 374 struct intel_engine_cs *engine = req->engine;
884ceace
KG
375 int ret;
376
5fb9de1a 377 ret = intel_ring_begin(req, 6);
884ceace
KG
378 if (ret)
379 return ret;
380
e2f80391
TU
381 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
382 intel_ring_emit(engine, flags);
383 intel_ring_emit(engine, scratch_addr);
384 intel_ring_emit(engine, 0);
385 intel_ring_emit(engine, 0);
386 intel_ring_emit(engine, 0);
387 intel_ring_advance(engine);
884ceace
KG
388
389 return 0;
390}
391
a5f3d68e 392static int
a84c3ae1 393gen8_render_ring_flush(struct drm_i915_gem_request *req,
a5f3d68e
BW
394 u32 invalidate_domains, u32 flush_domains)
395{
396 u32 flags = 0;
4a570db5 397 u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
02c9f7e3 398 int ret;
a5f3d68e
BW
399
400 flags |= PIPE_CONTROL_CS_STALL;
401
402 if (flush_domains) {
403 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
404 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
965fd602 405 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
40a24488 406 flags |= PIPE_CONTROL_FLUSH_ENABLE;
a5f3d68e
BW
407 }
408 if (invalidate_domains) {
409 flags |= PIPE_CONTROL_TLB_INVALIDATE;
410 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
411 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
412 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
413 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
414 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
415 flags |= PIPE_CONTROL_QW_WRITE;
416 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
02c9f7e3
KG
417
418 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
f2cf1fcc 419 ret = gen8_emit_pipe_control(req,
02c9f7e3
KG
420 PIPE_CONTROL_CS_STALL |
421 PIPE_CONTROL_STALL_AT_SCOREBOARD,
422 0);
423 if (ret)
424 return ret;
a5f3d68e
BW
425 }
426
f2cf1fcc 427 return gen8_emit_pipe_control(req, flags, scratch_addr);
a5f3d68e
BW
428}
429
0bc40be8 430static void ring_write_tail(struct intel_engine_cs *engine,
297b0c5b 431 u32 value)
d46eefa2 432{
c033666a 433 struct drm_i915_private *dev_priv = engine->i915;
0bc40be8 434 I915_WRITE_TAIL(engine, value);
d46eefa2
XH
435}
436
0bc40be8 437u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
8187a2b7 438{
c033666a 439 struct drm_i915_private *dev_priv = engine->i915;
50877445 440 u64 acthd;
8187a2b7 441
c033666a 442 if (INTEL_GEN(dev_priv) >= 8)
0bc40be8
TU
443 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
444 RING_ACTHD_UDW(engine->mmio_base));
c033666a 445 else if (INTEL_GEN(dev_priv) >= 4)
0bc40be8 446 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
50877445
CW
447 else
448 acthd = I915_READ(ACTHD);
449
450 return acthd;
8187a2b7
ZN
451}
452
0bc40be8 453static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
035dc1e0 454{
c033666a 455 struct drm_i915_private *dev_priv = engine->i915;
035dc1e0
DV
456 u32 addr;
457
458 addr = dev_priv->status_page_dmah->busaddr;
c033666a 459 if (INTEL_GEN(dev_priv) >= 4)
035dc1e0
DV
460 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
461 I915_WRITE(HWS_PGA, addr);
462}
463
0bc40be8 464static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
af75f269 465{
c033666a 466 struct drm_i915_private *dev_priv = engine->i915;
f0f59a00 467 i915_reg_t mmio;
af75f269
DL
468
469 /* The ring status page addresses are no longer next to the rest of
470 * the ring registers as of gen7.
471 */
c033666a 472 if (IS_GEN7(dev_priv)) {
0bc40be8 473 switch (engine->id) {
af75f269
DL
474 case RCS:
475 mmio = RENDER_HWS_PGA_GEN7;
476 break;
477 case BCS:
478 mmio = BLT_HWS_PGA_GEN7;
479 break;
480 /*
481 * VCS2 actually doesn't exist on Gen7. Only shut up
482 * gcc switch check warning
483 */
484 case VCS2:
485 case VCS:
486 mmio = BSD_HWS_PGA_GEN7;
487 break;
488 case VECS:
489 mmio = VEBOX_HWS_PGA_GEN7;
490 break;
491 }
c033666a 492 } else if (IS_GEN6(dev_priv)) {
0bc40be8 493 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
af75f269
DL
494 } else {
495 /* XXX: gen8 returns to sanity */
0bc40be8 496 mmio = RING_HWS_PGA(engine->mmio_base);
af75f269
DL
497 }
498
0bc40be8 499 I915_WRITE(mmio, (u32)engine->status_page.gfx_addr);
af75f269
DL
500 POSTING_READ(mmio);
501
502 /*
503 * Flush the TLB for this page
504 *
505 * FIXME: These two bits have disappeared on gen8, so a question
506 * arises: do we still need this and if so how should we go about
507 * invalidating the TLB?
508 */
ac657f64 509 if (IS_GEN(dev_priv, 6, 7)) {
0bc40be8 510 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
af75f269
DL
511
512 /* ring should be idle before issuing a sync flush*/
0bc40be8 513 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
af75f269
DL
514
515 I915_WRITE(reg,
516 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
517 INSTPM_SYNC_FLUSH));
25ab57f4
CW
518 if (intel_wait_for_register(dev_priv,
519 reg, INSTPM_SYNC_FLUSH, 0,
520 1000))
af75f269 521 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
0bc40be8 522 engine->name);
af75f269
DL
523 }
524}
525
0bc40be8 526static bool stop_ring(struct intel_engine_cs *engine)
8187a2b7 527{
c033666a 528 struct drm_i915_private *dev_priv = engine->i915;
8187a2b7 529
c033666a 530 if (!IS_GEN2(dev_priv)) {
0bc40be8
TU
531 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
532 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
533 DRM_ERROR("%s : timed out trying to stop ring\n",
534 engine->name);
9bec9b13
CW
535 /* Sometimes we observe that the idle flag is not
536 * set even though the ring is empty. So double
537 * check before giving up.
538 */
0bc40be8 539 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
9bec9b13 540 return false;
9991ae78
CW
541 }
542 }
b7884eb4 543
0bc40be8
TU
544 I915_WRITE_CTL(engine, 0);
545 I915_WRITE_HEAD(engine, 0);
546 engine->write_tail(engine, 0);
8187a2b7 547
c033666a 548 if (!IS_GEN2(dev_priv)) {
0bc40be8
TU
549 (void)I915_READ_CTL(engine);
550 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
9991ae78 551 }
a51435a3 552
0bc40be8 553 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
9991ae78 554}
8187a2b7 555
fc0768ce
TE
556void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
557{
558 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
559}
560
0bc40be8 561static int init_ring_common(struct intel_engine_cs *engine)
9991ae78 562{
c033666a 563 struct drm_i915_private *dev_priv = engine->i915;
0bc40be8 564 struct intel_ringbuffer *ringbuf = engine->buffer;
93b0a4e0 565 struct drm_i915_gem_object *obj = ringbuf->obj;
9991ae78
CW
566 int ret = 0;
567
59bad947 568 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9991ae78 569
0bc40be8 570 if (!stop_ring(engine)) {
9991ae78 571 /* G45 ring initialization often fails to reset head to zero */
6fd0d56e
CW
572 DRM_DEBUG_KMS("%s head not reset to zero "
573 "ctl %08x head %08x tail %08x start %08x\n",
0bc40be8
TU
574 engine->name,
575 I915_READ_CTL(engine),
576 I915_READ_HEAD(engine),
577 I915_READ_TAIL(engine),
578 I915_READ_START(engine));
8187a2b7 579
0bc40be8 580 if (!stop_ring(engine)) {
6fd0d56e
CW
581 DRM_ERROR("failed to set %s head to zero "
582 "ctl %08x head %08x tail %08x start %08x\n",
0bc40be8
TU
583 engine->name,
584 I915_READ_CTL(engine),
585 I915_READ_HEAD(engine),
586 I915_READ_TAIL(engine),
587 I915_READ_START(engine));
9991ae78
CW
588 ret = -EIO;
589 goto out;
6fd0d56e 590 }
8187a2b7
ZN
591 }
592
c033666a 593 if (I915_NEED_GFX_HWS(dev_priv))
0bc40be8 594 intel_ring_setup_status_page(engine);
9991ae78 595 else
0bc40be8 596 ring_setup_phys_status_page(engine);
9991ae78 597
ece4a17d 598 /* Enforce ordering by reading HEAD register back */
0bc40be8 599 I915_READ_HEAD(engine);
ece4a17d 600
0d8957c8
DV
601 /* Initialize the ring. This must happen _after_ we've cleared the ring
602 * registers with the above sequence (the readback of the HEAD registers
603 * also enforces ordering), otherwise the hw might lose the new ring
604 * register values. */
0bc40be8 605 I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj));
95468892
CW
606
607 /* WaClearRingBufHeadRegAtInit:ctg,elk */
0bc40be8 608 if (I915_READ_HEAD(engine))
95468892 609 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
0bc40be8
TU
610 engine->name, I915_READ_HEAD(engine));
611 I915_WRITE_HEAD(engine, 0);
612 (void)I915_READ_HEAD(engine);
95468892 613
0bc40be8 614 I915_WRITE_CTL(engine,
93b0a4e0 615 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
5d031e5b 616 | RING_VALID);
8187a2b7 617
8187a2b7 618 /* If the head is still not zero, the ring is dead */
0bc40be8
TU
619 if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
620 I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) &&
621 (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
e74cfed5 622 DRM_ERROR("%s initialization failed "
48e48a0b 623 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
0bc40be8
TU
624 engine->name,
625 I915_READ_CTL(engine),
626 I915_READ_CTL(engine) & RING_VALID,
627 I915_READ_HEAD(engine), I915_READ_TAIL(engine),
628 I915_READ_START(engine),
629 (unsigned long)i915_gem_obj_ggtt_offset(obj));
b7884eb4
DV
630 ret = -EIO;
631 goto out;
8187a2b7
ZN
632 }
633
ebd0fd4b 634 ringbuf->last_retired_head = -1;
0bc40be8
TU
635 ringbuf->head = I915_READ_HEAD(engine);
636 ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
ebd0fd4b 637 intel_ring_update_space(ringbuf);
1ec14ad3 638
fc0768ce 639 intel_engine_init_hangcheck(engine);
50f018df 640
b7884eb4 641out:
59bad947 642 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
b7884eb4
DV
643
644 return ret;
8187a2b7
ZN
645}
646
9b1136d5 647void
0bc40be8 648intel_fini_pipe_control(struct intel_engine_cs *engine)
9b1136d5 649{
0bc40be8 650 if (engine->scratch.obj == NULL)
9b1136d5
OM
651 return;
652
c033666a 653 if (INTEL_GEN(engine->i915) >= 5) {
0bc40be8
TU
654 kunmap(sg_page(engine->scratch.obj->pages->sgl));
655 i915_gem_object_ggtt_unpin(engine->scratch.obj);
9b1136d5
OM
656 }
657
0bc40be8
TU
658 drm_gem_object_unreference(&engine->scratch.obj->base);
659 engine->scratch.obj = NULL;
9b1136d5
OM
660}
661
662int
0bc40be8 663intel_init_pipe_control(struct intel_engine_cs *engine)
c6df541c 664{
c6df541c
CW
665 int ret;
666
0bc40be8 667 WARN_ON(engine->scratch.obj);
c6df541c 668
c033666a 669 engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096);
fe3db79b 670 if (IS_ERR(engine->scratch.obj)) {
c6df541c 671 DRM_ERROR("Failed to allocate seqno page\n");
fe3db79b
CW
672 ret = PTR_ERR(engine->scratch.obj);
673 engine->scratch.obj = NULL;
c6df541c
CW
674 goto err;
675 }
e4ffd173 676
0bc40be8
TU
677 ret = i915_gem_object_set_cache_level(engine->scratch.obj,
678 I915_CACHE_LLC);
a9cc726c
DV
679 if (ret)
680 goto err_unref;
c6df541c 681
0bc40be8 682 ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0);
c6df541c
CW
683 if (ret)
684 goto err_unref;
685
0bc40be8
TU
686 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj);
687 engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl));
688 if (engine->scratch.cpu_page == NULL) {
56b085a0 689 ret = -ENOMEM;
c6df541c 690 goto err_unpin;
56b085a0 691 }
c6df541c 692
2b1086cc 693 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
0bc40be8 694 engine->name, engine->scratch.gtt_offset);
c6df541c
CW
695 return 0;
696
697err_unpin:
0bc40be8 698 i915_gem_object_ggtt_unpin(engine->scratch.obj);
c6df541c 699err_unref:
0bc40be8 700 drm_gem_object_unreference(&engine->scratch.obj->base);
c6df541c 701err:
c6df541c
CW
702 return ret;
703}
704
e2be4faf 705static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
86d7f238 706{
4a570db5 707 struct intel_engine_cs *engine = req->engine;
c033666a
CW
708 struct i915_workarounds *w = &req->i915->workarounds;
709 int ret, i;
888b5995 710
02235808 711 if (w->count == 0)
7225342a 712 return 0;
888b5995 713
e2f80391 714 engine->gpu_caches_dirty = true;
4866d729 715 ret = intel_ring_flush_all_caches(req);
7225342a
MK
716 if (ret)
717 return ret;
888b5995 718
5fb9de1a 719 ret = intel_ring_begin(req, (w->count * 2 + 2));
7225342a
MK
720 if (ret)
721 return ret;
722
e2f80391 723 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count));
7225342a 724 for (i = 0; i < w->count; i++) {
e2f80391
TU
725 intel_ring_emit_reg(engine, w->reg[i].addr);
726 intel_ring_emit(engine, w->reg[i].value);
7225342a 727 }
e2f80391 728 intel_ring_emit(engine, MI_NOOP);
7225342a 729
e2f80391 730 intel_ring_advance(engine);
7225342a 731
e2f80391 732 engine->gpu_caches_dirty = true;
4866d729 733 ret = intel_ring_flush_all_caches(req);
7225342a
MK
734 if (ret)
735 return ret;
888b5995 736
7225342a 737 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
888b5995 738
7225342a 739 return 0;
86d7f238
AS
740}
741
8753181e 742static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
8f0e2b9d
DV
743{
744 int ret;
745
e2be4faf 746 ret = intel_ring_workarounds_emit(req);
8f0e2b9d
DV
747 if (ret != 0)
748 return ret;
749
be01363f 750 ret = i915_gem_render_state_init(req);
8f0e2b9d 751 if (ret)
e26e1b97 752 return ret;
8f0e2b9d 753
e26e1b97 754 return 0;
8f0e2b9d
DV
755}
756
7225342a 757static int wa_add(struct drm_i915_private *dev_priv,
f0f59a00
VS
758 i915_reg_t addr,
759 const u32 mask, const u32 val)
7225342a
MK
760{
761 const u32 idx = dev_priv->workarounds.count;
762
763 if (WARN_ON(idx >= I915_MAX_WA_REGS))
764 return -ENOSPC;
765
766 dev_priv->workarounds.reg[idx].addr = addr;
767 dev_priv->workarounds.reg[idx].value = val;
768 dev_priv->workarounds.reg[idx].mask = mask;
769
770 dev_priv->workarounds.count++;
771
772 return 0;
86d7f238
AS
773}
774
ca5a0fbd 775#define WA_REG(addr, mask, val) do { \
cf4b0de6 776 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
7225342a
MK
777 if (r) \
778 return r; \
ca5a0fbd 779 } while (0)
7225342a
MK
780
781#define WA_SET_BIT_MASKED(addr, mask) \
26459343 782 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
7225342a
MK
783
784#define WA_CLR_BIT_MASKED(addr, mask) \
26459343 785 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
7225342a 786
98533251 787#define WA_SET_FIELD_MASKED(addr, mask, value) \
cf4b0de6 788 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
7225342a 789
cf4b0de6
DL
790#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
791#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
7225342a 792
cf4b0de6 793#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
7225342a 794
0bc40be8
TU
795static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
796 i915_reg_t reg)
33136b06 797{
c033666a 798 struct drm_i915_private *dev_priv = engine->i915;
33136b06 799 struct i915_workarounds *wa = &dev_priv->workarounds;
0bc40be8 800 const uint32_t index = wa->hw_whitelist_count[engine->id];
33136b06
AS
801
802 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
803 return -EINVAL;
804
0bc40be8 805 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
33136b06 806 i915_mmio_reg_offset(reg));
0bc40be8 807 wa->hw_whitelist_count[engine->id]++;
33136b06
AS
808
809 return 0;
810}
811
0bc40be8 812static int gen8_init_workarounds(struct intel_engine_cs *engine)
e9a64ada 813{
c033666a 814 struct drm_i915_private *dev_priv = engine->i915;
68c6198b
AS
815
816 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
e9a64ada 817
717d84d6
AS
818 /* WaDisableAsyncFlipPerfMode:bdw,chv */
819 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
820
d0581194
AS
821 /* WaDisablePartialInstShootdown:bdw,chv */
822 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
823 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
824
a340af58
AS
825 /* Use Force Non-Coherent whenever executing a 3D context. This is a
826 * workaround for for a possible hang in the unlikely event a TLB
827 * invalidation occurs during a PSD flush.
828 */
829 /* WaForceEnableNonCoherent:bdw,chv */
120f5d28 830 /* WaHdcDisableFetchWhenMasked:bdw,chv */
a340af58 831 WA_SET_BIT_MASKED(HDC_CHICKEN0,
120f5d28 832 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
a340af58
AS
833 HDC_FORCE_NON_COHERENT);
834
6def8fdd
AS
835 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
836 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
837 * polygons in the same 8x4 pixel/sample area to be processed without
838 * stalling waiting for the earlier ones to write to Hierarchical Z
839 * buffer."
840 *
841 * This optimization is off by default for BDW and CHV; turn it on.
842 */
843 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
844
48404636
AS
845 /* Wa4x4STCOptimizationDisable:bdw,chv */
846 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
847
7eebcde6
AS
848 /*
849 * BSpec recommends 8x4 when MSAA is used,
850 * however in practice 16x4 seems fastest.
851 *
852 * Note that PS/WM thread counts depend on the WIZ hashing
853 * disable bit, which we don't touch here, but it's good
854 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
855 */
856 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
857 GEN6_WIZ_HASHING_MASK,
858 GEN6_WIZ_HASHING_16x4);
859
e9a64ada
AS
860 return 0;
861}
862
0bc40be8 863static int bdw_init_workarounds(struct intel_engine_cs *engine)
86d7f238 864{
c033666a 865 struct drm_i915_private *dev_priv = engine->i915;
e9a64ada 866 int ret;
86d7f238 867
0bc40be8 868 ret = gen8_init_workarounds(engine);
e9a64ada
AS
869 if (ret)
870 return ret;
871
101b376d 872 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
d0581194 873 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
86d7f238 874
101b376d 875 /* WaDisableDopClockGating:bdw */
7225342a
MK
876 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
877 DOP_CLOCK_GATING_DISABLE);
86d7f238 878
7225342a
MK
879 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
880 GEN8_SAMPLER_POWER_BYPASS_DIS);
86d7f238 881
7225342a 882 WA_SET_BIT_MASKED(HDC_CHICKEN0,
35cb6f3b
DL
883 /* WaForceContextSaveRestoreNonCoherent:bdw */
884 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
35cb6f3b 885 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
c033666a 886 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
86d7f238 887
86d7f238
AS
888 return 0;
889}
890
0bc40be8 891static int chv_init_workarounds(struct intel_engine_cs *engine)
00e1e623 892{
c033666a 893 struct drm_i915_private *dev_priv = engine->i915;
e9a64ada 894 int ret;
00e1e623 895
0bc40be8 896 ret = gen8_init_workarounds(engine);
e9a64ada
AS
897 if (ret)
898 return ret;
899
00e1e623 900 /* WaDisableThreadStallDopClockGating:chv */
d0581194 901 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
00e1e623 902
d60de81d
KG
903 /* Improve HiZ throughput on CHV. */
904 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
905
7225342a
MK
906 return 0;
907}
908
0bc40be8 909static int gen9_init_workarounds(struct intel_engine_cs *engine)
3b106531 910{
c033666a 911 struct drm_i915_private *dev_priv = engine->i915;
e0f3fa09 912 int ret;
ab0dfafe 913
a8ab5ed5
TG
914 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
915 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
916
e5f81d65 917 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
9c4cbf82
MK
918 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
919 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
920
e5f81d65 921 /* WaDisableKillLogic:bxt,skl,kbl */
9c4cbf82
MK
922 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
923 ECOCHK_DIS_TLB);
924
e5f81d65
MK
925 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
926 /* WaDisablePartialInstShootdown:skl,bxt,kbl */
ab0dfafe 927 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
950b2aae 928 FLOW_CONTROL_ENABLE |
ab0dfafe
HN
929 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
930
e5f81d65 931 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
8424171e
NH
932 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
933 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
934
e87a005d 935 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
c033666a
CW
936 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
937 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
a86eb582
DL
938 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
939 GEN9_DG_MIRROR_FIX_ENABLE);
1de4582f 940
e87a005d 941 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
c033666a
CW
942 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
943 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
183c6dac
DL
944 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
945 GEN9_RHWO_OPTIMIZATION_DISABLE);
9b01435d
AS
946 /*
947 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
948 * but we do that in per ctx batchbuffer as there is an issue
949 * with this register not getting restored on ctx restore
950 */
183c6dac
DL
951 }
952
e5f81d65
MK
953 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
954 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
bfd8ad4e
TG
955 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
956 GEN9_ENABLE_YV12_BUGFIX |
957 GEN9_ENABLE_GPGPU_PREEMPTION);
cac23df4 958
e5f81d65
MK
959 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
960 /* WaDisablePartialResolveInVc:skl,bxt,kbl */
60294683
AS
961 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
962 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
9370cd98 963
e5f81d65 964 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
e2db7071
DL
965 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
966 GEN9_CCS_TLB_PREFETCH_ENABLE);
967
5a2ae95e 968 /* WaDisableMaskBasedCammingInRCC:skl,bxt */
c033666a
CW
969 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
970 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
38a39a7b
BW
971 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
972 PIXEL_MASK_CAMMING_DISABLE);
973
5b0e3659
MK
974 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
975 WA_SET_BIT_MASKED(HDC_CHICKEN0,
976 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
977 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
8ea6f892 978
bbaefe72
MK
979 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
980 * both tied to WaForceContextSaveRestoreNonCoherent
981 * in some hsds for skl. We keep the tie for all gen9. The
982 * documentation is a bit hazy and so we want to get common behaviour,
983 * even though there is no clear evidence we would need both on kbl/bxt.
984 * This area has been source of system hangs so we play it safe
985 * and mimic the skl regardless of what bspec says.
986 *
987 * Use Force Non-Coherent whenever executing a 3D context. This
988 * is a workaround for a possible hang in the unlikely event
989 * a TLB invalidation occurs during a PSD flush.
990 */
991
992 /* WaForceEnableNonCoherent:skl,bxt,kbl */
993 WA_SET_BIT_MASKED(HDC_CHICKEN0,
994 HDC_FORCE_NON_COHERENT);
995
996 /* WaDisableHDCInvalidation:skl,bxt,kbl */
997 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
998 BDW_DISABLE_HDC_INVALIDATION);
999
e5f81d65
MK
1000 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
1001 if (IS_SKYLAKE(dev_priv) ||
1002 IS_KABYLAKE(dev_priv) ||
1003 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
8c761609
AS
1004 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
1005 GEN8_SAMPLER_POWER_BYPASS_DIS);
8c761609 1006
e5f81d65 1007 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
6b6d5626
RB
1008 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
1009
e5f81d65 1010 /* WaOCLCoherentLineFlush:skl,bxt,kbl */
6ecf56ae
AS
1011 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
1012 GEN8_LQSC_FLUSH_COHERENT_LINES));
1013
6bb62855 1014 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
1015 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
1016 if (ret)
1017 return ret;
1018
e5f81d65 1019 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
0bc40be8 1020 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
e0f3fa09
AS
1021 if (ret)
1022 return ret;
1023
e5f81d65 1024 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
0bc40be8 1025 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
3669ab61
AS
1026 if (ret)
1027 return ret;
1028
3b106531
HN
1029 return 0;
1030}
1031
0bc40be8 1032static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
b7668791 1033{
c033666a 1034 struct drm_i915_private *dev_priv = engine->i915;
b7668791
DL
1035 u8 vals[3] = { 0, 0, 0 };
1036 unsigned int i;
1037
1038 for (i = 0; i < 3; i++) {
1039 u8 ss;
1040
1041 /*
1042 * Only consider slices where one, and only one, subslice has 7
1043 * EUs
1044 */
a4d8a0fe 1045 if (!is_power_of_2(dev_priv->info.subslice_7eu[i]))
b7668791
DL
1046 continue;
1047
1048 /*
1049 * subslice_7eu[i] != 0 (because of the check above) and
1050 * ss_max == 4 (maximum number of subslices possible per slice)
1051 *
1052 * -> 0 <= ss <= 3;
1053 */
1054 ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
1055 vals[i] = 3 - ss;
1056 }
1057
1058 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
1059 return 0;
1060
1061 /* Tune IZ hashing. See intel_device_info_runtime_init() */
1062 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
1063 GEN9_IZ_HASHING_MASK(2) |
1064 GEN9_IZ_HASHING_MASK(1) |
1065 GEN9_IZ_HASHING_MASK(0),
1066 GEN9_IZ_HASHING(2, vals[2]) |
1067 GEN9_IZ_HASHING(1, vals[1]) |
1068 GEN9_IZ_HASHING(0, vals[0]));
1069
1070 return 0;
1071}
1072
0bc40be8 1073static int skl_init_workarounds(struct intel_engine_cs *engine)
8d205494 1074{
c033666a 1075 struct drm_i915_private *dev_priv = engine->i915;
aa0011a8 1076 int ret;
d0bbbc4f 1077
0bc40be8 1078 ret = gen9_init_workarounds(engine);
aa0011a8
AS
1079 if (ret)
1080 return ret;
8d205494 1081
a78536e7
AS
1082 /*
1083 * Actual WA is to disable percontext preemption granularity control
1084 * until D0 which is the default case so this is equivalent to
1085 * !WaDisablePerCtxtPreemptionGranularityControl:skl
1086 */
c033666a 1087 if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
a78536e7
AS
1088 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1089 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1090 }
1091
71dce58c 1092 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
9c4cbf82
MK
1093 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
1094 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1095 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
1096 }
1097
1098 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1099 * involving this register should also be added to WA batch as required.
1100 */
c033666a 1101 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
9c4cbf82
MK
1102 /* WaDisableLSQCROPERFforOCL:skl */
1103 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1104 GEN8_LQSC_RO_PERF_DIS);
1105
1106 /* WaEnableGapsTsvCreditFix:skl */
c033666a 1107 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
9c4cbf82
MK
1108 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1109 GEN9_GAPS_TSV_CREDIT_DISABLE));
1110 }
1111
d0bbbc4f 1112 /* WaDisablePowerCompilerClockGating:skl */
c033666a 1113 if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
d0bbbc4f
DL
1114 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1115 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1116
e87a005d 1117 /* WaBarrierPerformanceFixDisable:skl */
c033666a 1118 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
5b6fd12a
VS
1119 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1120 HDC_FENCE_DEST_SLM_DISABLE |
1121 HDC_BARRIER_PERFORMANCE_DISABLE);
1122
9bd9dfb4 1123 /* WaDisableSbeCacheDispatchPortSharing:skl */
c033666a 1124 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
9bd9dfb4
MK
1125 WA_SET_BIT_MASKED(
1126 GEN7_HALF_SLICE_CHICKEN1,
1127 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
9bd9dfb4 1128
eee8efb0
MK
1129 /* WaDisableGafsUnitClkGating:skl */
1130 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1131
6107497e 1132 /* WaDisableLSQCROPERFforOCL:skl */
0bc40be8 1133 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
6107497e
AS
1134 if (ret)
1135 return ret;
1136
0bc40be8 1137 return skl_tune_iz_hashing(engine);
7225342a
MK
1138}
1139
0bc40be8 1140static int bxt_init_workarounds(struct intel_engine_cs *engine)
cae0437f 1141{
c033666a 1142 struct drm_i915_private *dev_priv = engine->i915;
aa0011a8 1143 int ret;
dfb601e6 1144
0bc40be8 1145 ret = gen9_init_workarounds(engine);
aa0011a8
AS
1146 if (ret)
1147 return ret;
cae0437f 1148
9c4cbf82
MK
1149 /* WaStoreMultiplePTEenable:bxt */
1150 /* This is a requirement according to Hardware specification */
c033666a 1151 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
9c4cbf82
MK
1152 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1153
1154 /* WaSetClckGatingDisableMedia:bxt */
c033666a 1155 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
9c4cbf82
MK
1156 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1157 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1158 }
1159
dfb601e6
NH
1160 /* WaDisableThreadStallDopClockGating:bxt */
1161 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1162 STALL_DOP_GATING_DISABLE);
1163
780f0aeb 1164 /* WaDisablePooledEuLoadBalancingFix:bxt */
1165 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1166 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1167 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1168 }
1169
983b4b9d 1170 /* WaDisableSbeCacheDispatchPortSharing:bxt */
c033666a 1171 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
983b4b9d
NH
1172 WA_SET_BIT_MASKED(
1173 GEN7_HALF_SLICE_CHICKEN1,
1174 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1175 }
1176
2c8580e4
AS
1177 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1178 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1179 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
a786d53a 1180 /* WaDisableLSQCROPERFforOCL:bxt */
c033666a 1181 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
0bc40be8 1182 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
2c8580e4
AS
1183 if (ret)
1184 return ret;
a786d53a 1185
0bc40be8 1186 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
a786d53a
AS
1187 if (ret)
1188 return ret;
2c8580e4
AS
1189 }
1190
050fc465 1191 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
c033666a 1192 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
36579cb6
ID
1193 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1194 L3_HIGH_PRIO_CREDITS(2));
050fc465 1195
ad2bdb44
MK
1196 /* WaInsertDummyPushConstPs:bxt */
1197 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
1198 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1199 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1200
cae0437f
NH
1201 return 0;
1202}
1203
e5f81d65
MK
1204static int kbl_init_workarounds(struct intel_engine_cs *engine)
1205{
e587f6cb 1206 struct drm_i915_private *dev_priv = engine->i915;
e5f81d65
MK
1207 int ret;
1208
1209 ret = gen9_init_workarounds(engine);
1210 if (ret)
1211 return ret;
1212
e587f6cb
MK
1213 /* WaEnableGapsTsvCreditFix:kbl */
1214 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1215 GEN9_GAPS_TSV_CREDIT_DISABLE));
1216
c0b730d5
MK
1217 /* WaDisableDynamicCreditSharing:kbl */
1218 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1219 WA_SET_BIT(GAMT_CHKN_BIT_REG,
1220 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1221
8401d42f
MK
1222 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1223 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1224 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1225 HDC_FENCE_DEST_SLM_DISABLE);
1226
fe905819
MK
1227 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1228 * involving this register should also be added to WA batch as required.
1229 */
1230 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
1231 /* WaDisableLSQCROPERFforOCL:kbl */
1232 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1233 GEN8_LQSC_RO_PERF_DIS);
1234
ad2bdb44
MK
1235 /* WaInsertDummyPushConstPs:kbl */
1236 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1237 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1238 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1239
4de5d7cc
MK
1240 /* WaDisableGafsUnitClkGating:kbl */
1241 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1242
954337aa
MK
1243 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1244 WA_SET_BIT_MASKED(
1245 GEN7_HALF_SLICE_CHICKEN1,
1246 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1247
fe905819
MK
1248 /* WaDisableLSQCROPERFforOCL:kbl */
1249 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1250 if (ret)
1251 return ret;
1252
e5f81d65
MK
1253 return 0;
1254}
1255
0bc40be8 1256int init_workarounds_ring(struct intel_engine_cs *engine)
7225342a 1257{
c033666a 1258 struct drm_i915_private *dev_priv = engine->i915;
7225342a 1259
0bc40be8 1260 WARN_ON(engine->id != RCS);
7225342a
MK
1261
1262 dev_priv->workarounds.count = 0;
33136b06 1263 dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
7225342a 1264
c033666a 1265 if (IS_BROADWELL(dev_priv))
0bc40be8 1266 return bdw_init_workarounds(engine);
7225342a 1267
c033666a 1268 if (IS_CHERRYVIEW(dev_priv))
0bc40be8 1269 return chv_init_workarounds(engine);
00e1e623 1270
c033666a 1271 if (IS_SKYLAKE(dev_priv))
0bc40be8 1272 return skl_init_workarounds(engine);
cae0437f 1273
c033666a 1274 if (IS_BROXTON(dev_priv))
0bc40be8 1275 return bxt_init_workarounds(engine);
3b106531 1276
e5f81d65
MK
1277 if (IS_KABYLAKE(dev_priv))
1278 return kbl_init_workarounds(engine);
1279
00e1e623
VS
1280 return 0;
1281}
1282
0bc40be8 1283static int init_render_ring(struct intel_engine_cs *engine)
8187a2b7 1284{
c033666a 1285 struct drm_i915_private *dev_priv = engine->i915;
0bc40be8 1286 int ret = init_ring_common(engine);
9c33baa6
KZ
1287 if (ret)
1288 return ret;
a69ffdbf 1289
61a563a2 1290 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
ac657f64 1291 if (IS_GEN(dev_priv, 4, 6))
6b26c86d 1292 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1c8c38c5
CW
1293
1294 /* We need to disable the AsyncFlip performance optimisations in order
1295 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1296 * programmed to '1' on all products.
8693a824 1297 *
2441f877 1298 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1c8c38c5 1299 */
ac657f64 1300 if (IS_GEN(dev_priv, 6, 7))
1c8c38c5
CW
1301 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1302
f05bb0c7 1303 /* Required for the hardware to program scanline values for waiting */
01fa0302 1304 /* WaEnableFlushTlbInvalidationMode:snb */
c033666a 1305 if (IS_GEN6(dev_priv))
f05bb0c7 1306 I915_WRITE(GFX_MODE,
aa83e30d 1307 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
f05bb0c7 1308
01fa0302 1309 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
c033666a 1310 if (IS_GEN7(dev_priv))
1c8c38c5 1311 I915_WRITE(GFX_MODE_GEN7,
01fa0302 1312 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1c8c38c5 1313 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
78501eac 1314
c033666a 1315 if (IS_GEN6(dev_priv)) {
3a69ddd6
KG
1316 /* From the Sandybridge PRM, volume 1 part 3, page 24:
1317 * "If this bit is set, STCunit will have LRA as replacement
1318 * policy. [...] This bit must be reset. LRA replacement
1319 * policy is not supported."
1320 */
1321 I915_WRITE(CACHE_MODE_0,
5e13a0c5 1322 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
84f9f938
BW
1323 }
1324
ac657f64 1325 if (IS_GEN(dev_priv, 6, 7))
6b26c86d 1326 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
84f9f938 1327
c033666a
CW
1328 if (HAS_L3_DPF(dev_priv))
1329 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
15b9f80e 1330
0bc40be8 1331 return init_workarounds_ring(engine);
8187a2b7
ZN
1332}
1333
0bc40be8 1334static void render_ring_cleanup(struct intel_engine_cs *engine)
c6df541c 1335{
c033666a 1336 struct drm_i915_private *dev_priv = engine->i915;
3e78998a
BW
1337
1338 if (dev_priv->semaphore_obj) {
1339 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
1340 drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
1341 dev_priv->semaphore_obj = NULL;
1342 }
b45305fc 1343
0bc40be8 1344 intel_fini_pipe_control(engine);
c6df541c
CW
1345}
1346
f7169687 1347static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
3e78998a
BW
1348 unsigned int num_dwords)
1349{
1350#define MBOX_UPDATE_DWORDS 8
4a570db5 1351 struct intel_engine_cs *signaller = signaller_req->engine;
c033666a 1352 struct drm_i915_private *dev_priv = signaller_req->i915;
3e78998a 1353 struct intel_engine_cs *waiter;
c3232b18
DG
1354 enum intel_engine_id id;
1355 int ret, num_rings;
3e78998a 1356
c033666a 1357 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
3e78998a
BW
1358 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1359#undef MBOX_UPDATE_DWORDS
1360
5fb9de1a 1361 ret = intel_ring_begin(signaller_req, num_dwords);
3e78998a
BW
1362 if (ret)
1363 return ret;
1364
c3232b18 1365 for_each_engine_id(waiter, dev_priv, id) {
6259cead 1366 u32 seqno;
c3232b18 1367 u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
3e78998a
BW
1368 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1369 continue;
1370
f7169687 1371 seqno = i915_gem_request_get_seqno(signaller_req);
3e78998a
BW
1372 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
1373 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
1374 PIPE_CONTROL_QW_WRITE |
f9a4ea35 1375 PIPE_CONTROL_CS_STALL);
3e78998a
BW
1376 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
1377 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
6259cead 1378 intel_ring_emit(signaller, seqno);
3e78998a
BW
1379 intel_ring_emit(signaller, 0);
1380 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
215a7e32 1381 MI_SEMAPHORE_TARGET(waiter->hw_id));
3e78998a
BW
1382 intel_ring_emit(signaller, 0);
1383 }
1384
1385 return 0;
1386}
1387
f7169687 1388static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
3e78998a
BW
1389 unsigned int num_dwords)
1390{
1391#define MBOX_UPDATE_DWORDS 6
4a570db5 1392 struct intel_engine_cs *signaller = signaller_req->engine;
c033666a 1393 struct drm_i915_private *dev_priv = signaller_req->i915;
3e78998a 1394 struct intel_engine_cs *waiter;
c3232b18
DG
1395 enum intel_engine_id id;
1396 int ret, num_rings;
3e78998a 1397
c033666a 1398 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
3e78998a
BW
1399 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1400#undef MBOX_UPDATE_DWORDS
1401
5fb9de1a 1402 ret = intel_ring_begin(signaller_req, num_dwords);
3e78998a
BW
1403 if (ret)
1404 return ret;
1405
c3232b18 1406 for_each_engine_id(waiter, dev_priv, id) {
6259cead 1407 u32 seqno;
c3232b18 1408 u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
3e78998a
BW
1409 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1410 continue;
1411
f7169687 1412 seqno = i915_gem_request_get_seqno(signaller_req);
3e78998a
BW
1413 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
1414 MI_FLUSH_DW_OP_STOREDW);
1415 intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
1416 MI_FLUSH_DW_USE_GTT);
1417 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
6259cead 1418 intel_ring_emit(signaller, seqno);
3e78998a 1419 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
215a7e32 1420 MI_SEMAPHORE_TARGET(waiter->hw_id));
3e78998a
BW
1421 intel_ring_emit(signaller, 0);
1422 }
1423
1424 return 0;
1425}
1426
f7169687 1427static int gen6_signal(struct drm_i915_gem_request *signaller_req,
024a43e1 1428 unsigned int num_dwords)
1ec14ad3 1429{
4a570db5 1430 struct intel_engine_cs *signaller = signaller_req->engine;
c033666a 1431 struct drm_i915_private *dev_priv = signaller_req->i915;
a4872ba6 1432 struct intel_engine_cs *useless;
c3232b18
DG
1433 enum intel_engine_id id;
1434 int ret, num_rings;
78325f2d 1435
a1444b79 1436#define MBOX_UPDATE_DWORDS 3
c033666a 1437 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
a1444b79
BW
1438 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
1439#undef MBOX_UPDATE_DWORDS
024a43e1 1440
5fb9de1a 1441 ret = intel_ring_begin(signaller_req, num_dwords);
024a43e1
BW
1442 if (ret)
1443 return ret;
024a43e1 1444
c3232b18
DG
1445 for_each_engine_id(useless, dev_priv, id) {
1446 i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
f0f59a00
VS
1447
1448 if (i915_mmio_reg_valid(mbox_reg)) {
f7169687 1449 u32 seqno = i915_gem_request_get_seqno(signaller_req);
f0f59a00 1450
78325f2d 1451 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
f92a9162 1452 intel_ring_emit_reg(signaller, mbox_reg);
6259cead 1453 intel_ring_emit(signaller, seqno);
78325f2d
BW
1454 }
1455 }
024a43e1 1456
a1444b79
BW
1457 /* If num_dwords was rounded, make sure the tail pointer is correct */
1458 if (num_rings % 2 == 0)
1459 intel_ring_emit(signaller, MI_NOOP);
1460
024a43e1 1461 return 0;
1ec14ad3
CW
1462}
1463
c8c99b0f
BW
1464/**
1465 * gen6_add_request - Update the semaphore mailbox registers
ee044a88
JH
1466 *
1467 * @request - request to write to the ring
c8c99b0f
BW
1468 *
1469 * Update the mailbox registers in the *other* rings with the current seqno.
1470 * This acts like a signal in the canonical semaphore.
1471 */
1ec14ad3 1472static int
ee044a88 1473gen6_add_request(struct drm_i915_gem_request *req)
1ec14ad3 1474{
4a570db5 1475 struct intel_engine_cs *engine = req->engine;
024a43e1 1476 int ret;
52ed2325 1477
e2f80391
TU
1478 if (engine->semaphore.signal)
1479 ret = engine->semaphore.signal(req, 4);
707d9cf9 1480 else
5fb9de1a 1481 ret = intel_ring_begin(req, 4);
707d9cf9 1482
1ec14ad3
CW
1483 if (ret)
1484 return ret;
1485
e2f80391
TU
1486 intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
1487 intel_ring_emit(engine,
1488 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1489 intel_ring_emit(engine, i915_gem_request_get_seqno(req));
1490 intel_ring_emit(engine, MI_USER_INTERRUPT);
1491 __intel_ring_advance(engine);
1ec14ad3 1492
1ec14ad3
CW
1493 return 0;
1494}
1495
a58c01aa
CW
1496static int
1497gen8_render_add_request(struct drm_i915_gem_request *req)
1498{
1499 struct intel_engine_cs *engine = req->engine;
1500 int ret;
1501
1502 if (engine->semaphore.signal)
1503 ret = engine->semaphore.signal(req, 8);
1504 else
1505 ret = intel_ring_begin(req, 8);
1506 if (ret)
1507 return ret;
1508
1509 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
1510 intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
1511 PIPE_CONTROL_CS_STALL |
1512 PIPE_CONTROL_QW_WRITE));
1513 intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
1514 intel_ring_emit(engine, 0);
1515 intel_ring_emit(engine, i915_gem_request_get_seqno(req));
1516 /* We're thrashing one dword of HWS. */
1517 intel_ring_emit(engine, 0);
1518 intel_ring_emit(engine, MI_USER_INTERRUPT);
1519 intel_ring_emit(engine, MI_NOOP);
1520 __intel_ring_advance(engine);
1521
1522 return 0;
1523}
1524
c033666a 1525static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
f72b3435
MK
1526 u32 seqno)
1527{
f72b3435
MK
1528 return dev_priv->last_seqno < seqno;
1529}
1530
c8c99b0f
BW
1531/**
1532 * intel_ring_sync - sync the waiter to the signaller on seqno
1533 *
1534 * @waiter - ring that is waiting
1535 * @signaller - ring which has, or will signal
1536 * @seqno - seqno which the waiter will block on
1537 */
5ee426ca
BW
1538
1539static int
599d924c 1540gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
5ee426ca
BW
1541 struct intel_engine_cs *signaller,
1542 u32 seqno)
1543{
4a570db5 1544 struct intel_engine_cs *waiter = waiter_req->engine;
c033666a 1545 struct drm_i915_private *dev_priv = waiter_req->i915;
6ef48d7f 1546 struct i915_hw_ppgtt *ppgtt;
5ee426ca
BW
1547 int ret;
1548
5fb9de1a 1549 ret = intel_ring_begin(waiter_req, 4);
5ee426ca
BW
1550 if (ret)
1551 return ret;
1552
1553 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
1554 MI_SEMAPHORE_GLOBAL_GTT |
1555 MI_SEMAPHORE_SAD_GTE_SDD);
1556 intel_ring_emit(waiter, seqno);
1557 intel_ring_emit(waiter,
1558 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
1559 intel_ring_emit(waiter,
1560 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
1561 intel_ring_advance(waiter);
6ef48d7f
CW
1562
1563 /* When the !RCS engines idle waiting upon a semaphore, they lose their
1564 * pagetables and we must reload them before executing the batch.
1565 * We do this on the i915_switch_context() following the wait and
1566 * before the dispatch.
1567 */
1568 ppgtt = waiter_req->ctx->ppgtt;
1569 if (ppgtt && waiter_req->engine->id != RCS)
1570 ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
5ee426ca
BW
1571 return 0;
1572}
1573
c8c99b0f 1574static int
599d924c 1575gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
a4872ba6 1576 struct intel_engine_cs *signaller,
686cb5f9 1577 u32 seqno)
1ec14ad3 1578{
4a570db5 1579 struct intel_engine_cs *waiter = waiter_req->engine;
c8c99b0f
BW
1580 u32 dw1 = MI_SEMAPHORE_MBOX |
1581 MI_SEMAPHORE_COMPARE |
1582 MI_SEMAPHORE_REGISTER;
ebc348b2
BW
1583 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
1584 int ret;
1ec14ad3 1585
1500f7ea
BW
1586 /* Throughout all of the GEM code, seqno passed implies our current
1587 * seqno is >= the last seqno executed. However for hardware the
1588 * comparison is strictly greater than.
1589 */
1590 seqno -= 1;
1591
ebc348b2 1592 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
686cb5f9 1593
5fb9de1a 1594 ret = intel_ring_begin(waiter_req, 4);
1ec14ad3
CW
1595 if (ret)
1596 return ret;
1597
f72b3435 1598 /* If seqno wrap happened, omit the wait with no-ops */
c033666a 1599 if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
ebc348b2 1600 intel_ring_emit(waiter, dw1 | wait_mbox);
f72b3435
MK
1601 intel_ring_emit(waiter, seqno);
1602 intel_ring_emit(waiter, 0);
1603 intel_ring_emit(waiter, MI_NOOP);
1604 } else {
1605 intel_ring_emit(waiter, MI_NOOP);
1606 intel_ring_emit(waiter, MI_NOOP);
1607 intel_ring_emit(waiter, MI_NOOP);
1608 intel_ring_emit(waiter, MI_NOOP);
1609 }
c8c99b0f 1610 intel_ring_advance(waiter);
1ec14ad3
CW
1611
1612 return 0;
1613}
1614
c6df541c
CW
1615#define PIPE_CONTROL_FLUSH(ring__, addr__) \
1616do { \
fcbc34e4
KG
1617 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
1618 PIPE_CONTROL_DEPTH_STALL); \
c6df541c
CW
1619 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
1620 intel_ring_emit(ring__, 0); \
1621 intel_ring_emit(ring__, 0); \
1622} while (0)
1623
1624static int
ee044a88 1625pc_render_add_request(struct drm_i915_gem_request *req)
c6df541c 1626{
4a570db5 1627 struct intel_engine_cs *engine = req->engine;
e2f80391 1628 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
c6df541c
CW
1629 int ret;
1630
1631 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
1632 * incoherent with writes to memory, i.e. completely fubar,
1633 * so we need to use PIPE_NOTIFY instead.
1634 *
1635 * However, we also need to workaround the qword write
1636 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
1637 * memory before requesting an interrupt.
1638 */
5fb9de1a 1639 ret = intel_ring_begin(req, 32);
c6df541c
CW
1640 if (ret)
1641 return ret;
1642
e2f80391
TU
1643 intel_ring_emit(engine,
1644 GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
9d971b37
KG
1645 PIPE_CONTROL_WRITE_FLUSH |
1646 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
e2f80391
TU
1647 intel_ring_emit(engine,
1648 engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1649 intel_ring_emit(engine, i915_gem_request_get_seqno(req));
1650 intel_ring_emit(engine, 0);
1651 PIPE_CONTROL_FLUSH(engine, scratch_addr);
18393f63 1652 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
e2f80391 1653 PIPE_CONTROL_FLUSH(engine, scratch_addr);
18393f63 1654 scratch_addr += 2 * CACHELINE_BYTES;
e2f80391 1655 PIPE_CONTROL_FLUSH(engine, scratch_addr);
18393f63 1656 scratch_addr += 2 * CACHELINE_BYTES;
e2f80391 1657 PIPE_CONTROL_FLUSH(engine, scratch_addr);
18393f63 1658 scratch_addr += 2 * CACHELINE_BYTES;
e2f80391 1659 PIPE_CONTROL_FLUSH(engine, scratch_addr);
18393f63 1660 scratch_addr += 2 * CACHELINE_BYTES;
e2f80391 1661 PIPE_CONTROL_FLUSH(engine, scratch_addr);
a71d8d94 1662
e2f80391
TU
1663 intel_ring_emit(engine,
1664 GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
9d971b37
KG
1665 PIPE_CONTROL_WRITE_FLUSH |
1666 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
c6df541c 1667 PIPE_CONTROL_NOTIFY);
e2f80391
TU
1668 intel_ring_emit(engine,
1669 engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1670 intel_ring_emit(engine, i915_gem_request_get_seqno(req));
1671 intel_ring_emit(engine, 0);
1672 __intel_ring_advance(engine);
c6df541c 1673
c6df541c
CW
1674 return 0;
1675}
1676
c04e0f3b
CW
1677static void
1678gen6_seqno_barrier(struct intel_engine_cs *engine)
4cd53c0c 1679{
c033666a 1680 struct drm_i915_private *dev_priv = engine->i915;
bcbdb6d0 1681
4cd53c0c
DV
1682 /* Workaround to force correct ordering between irq and seqno writes on
1683 * ivb (and maybe also on snb) by reading from a CS register (like
9b9ed309
CW
1684 * ACTHD) before reading the status page.
1685 *
1686 * Note that this effectively stalls the read by the time it takes to
1687 * do a memory transaction, which more or less ensures that the write
1688 * from the GPU has sufficient time to invalidate the CPU cacheline.
1689 * Alternatively we could delay the interrupt from the CS ring to give
1690 * the write time to land, but that would incur a delay after every
1691 * batch i.e. much more frequent than a delay when waiting for the
1692 * interrupt (with the same net latency).
bcbdb6d0
CW
1693 *
1694 * Also note that to prevent whole machine hangs on gen7, we have to
1695 * take the spinlock to guard against concurrent cacheline access.
9b9ed309 1696 */
bcbdb6d0 1697 spin_lock_irq(&dev_priv->uncore.lock);
c04e0f3b 1698 POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
bcbdb6d0 1699 spin_unlock_irq(&dev_priv->uncore.lock);
4cd53c0c
DV
1700}
1701
8187a2b7 1702static u32
c04e0f3b 1703ring_get_seqno(struct intel_engine_cs *engine)
8187a2b7 1704{
0bc40be8 1705 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1ec14ad3
CW
1706}
1707
b70ec5bf 1708static void
0bc40be8 1709ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
b70ec5bf 1710{
0bc40be8 1711 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
b70ec5bf
MK
1712}
1713
c6df541c 1714static u32
c04e0f3b 1715pc_render_get_seqno(struct intel_engine_cs *engine)
c6df541c 1716{
0bc40be8 1717 return engine->scratch.cpu_page[0];
c6df541c
CW
1718}
1719
b70ec5bf 1720static void
0bc40be8 1721pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
b70ec5bf 1722{
0bc40be8 1723 engine->scratch.cpu_page[0] = seqno;
b70ec5bf
MK
1724}
1725
e48d8634 1726static bool
0bc40be8 1727gen5_ring_get_irq(struct intel_engine_cs *engine)
e48d8634 1728{
c033666a 1729 struct drm_i915_private *dev_priv = engine->i915;
7338aefa 1730 unsigned long flags;
e48d8634 1731
7cd512f1 1732 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
e48d8634
DV
1733 return false;
1734
7338aefa 1735 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8
TU
1736 if (engine->irq_refcount++ == 0)
1737 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
7338aefa 1738 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
e48d8634
DV
1739
1740 return true;
1741}
1742
1743static void
0bc40be8 1744gen5_ring_put_irq(struct intel_engine_cs *engine)
e48d8634 1745{
c033666a 1746 struct drm_i915_private *dev_priv = engine->i915;
7338aefa 1747 unsigned long flags;
e48d8634 1748
7338aefa 1749 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8
TU
1750 if (--engine->irq_refcount == 0)
1751 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
7338aefa 1752 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
e48d8634
DV
1753}
1754
b13c2b96 1755static bool
0bc40be8 1756i9xx_ring_get_irq(struct intel_engine_cs *engine)
62fdfeaf 1757{
c033666a 1758 struct drm_i915_private *dev_priv = engine->i915;
7338aefa 1759 unsigned long flags;
62fdfeaf 1760
7cd512f1 1761 if (!intel_irqs_enabled(dev_priv))
b13c2b96
CW
1762 return false;
1763
7338aefa 1764 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8
TU
1765 if (engine->irq_refcount++ == 0) {
1766 dev_priv->irq_mask &= ~engine->irq_enable_mask;
f637fde4
DV
1767 I915_WRITE(IMR, dev_priv->irq_mask);
1768 POSTING_READ(IMR);
1769 }
7338aefa 1770 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
b13c2b96
CW
1771
1772 return true;
62fdfeaf
EA
1773}
1774
8187a2b7 1775static void
0bc40be8 1776i9xx_ring_put_irq(struct intel_engine_cs *engine)
62fdfeaf 1777{
c033666a 1778 struct drm_i915_private *dev_priv = engine->i915;
7338aefa 1779 unsigned long flags;
62fdfeaf 1780
7338aefa 1781 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8
TU
1782 if (--engine->irq_refcount == 0) {
1783 dev_priv->irq_mask |= engine->irq_enable_mask;
f637fde4
DV
1784 I915_WRITE(IMR, dev_priv->irq_mask);
1785 POSTING_READ(IMR);
1786 }
7338aefa 1787 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
62fdfeaf
EA
1788}
1789
c2798b19 1790static bool
0bc40be8 1791i8xx_ring_get_irq(struct intel_engine_cs *engine)
c2798b19 1792{
c033666a 1793 struct drm_i915_private *dev_priv = engine->i915;
7338aefa 1794 unsigned long flags;
c2798b19 1795
7cd512f1 1796 if (!intel_irqs_enabled(dev_priv))
c2798b19
CW
1797 return false;
1798
7338aefa 1799 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8
TU
1800 if (engine->irq_refcount++ == 0) {
1801 dev_priv->irq_mask &= ~engine->irq_enable_mask;
c2798b19
CW
1802 I915_WRITE16(IMR, dev_priv->irq_mask);
1803 POSTING_READ16(IMR);
1804 }
7338aefa 1805 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
c2798b19
CW
1806
1807 return true;
1808}
1809
1810static void
0bc40be8 1811i8xx_ring_put_irq(struct intel_engine_cs *engine)
c2798b19 1812{
c033666a 1813 struct drm_i915_private *dev_priv = engine->i915;
7338aefa 1814 unsigned long flags;
c2798b19 1815
7338aefa 1816 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8
TU
1817 if (--engine->irq_refcount == 0) {
1818 dev_priv->irq_mask |= engine->irq_enable_mask;
c2798b19
CW
1819 I915_WRITE16(IMR, dev_priv->irq_mask);
1820 POSTING_READ16(IMR);
1821 }
7338aefa 1822 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
c2798b19
CW
1823}
1824
b72f3acb 1825static int
a84c3ae1 1826bsd_ring_flush(struct drm_i915_gem_request *req,
78501eac
CW
1827 u32 invalidate_domains,
1828 u32 flush_domains)
d1b851fc 1829{
4a570db5 1830 struct intel_engine_cs *engine = req->engine;
b72f3acb
CW
1831 int ret;
1832
5fb9de1a 1833 ret = intel_ring_begin(req, 2);
b72f3acb
CW
1834 if (ret)
1835 return ret;
1836
e2f80391
TU
1837 intel_ring_emit(engine, MI_FLUSH);
1838 intel_ring_emit(engine, MI_NOOP);
1839 intel_ring_advance(engine);
b72f3acb 1840 return 0;
d1b851fc
ZN
1841}
1842
3cce469c 1843static int
ee044a88 1844i9xx_add_request(struct drm_i915_gem_request *req)
d1b851fc 1845{
4a570db5 1846 struct intel_engine_cs *engine = req->engine;
3cce469c
CW
1847 int ret;
1848
5fb9de1a 1849 ret = intel_ring_begin(req, 4);
3cce469c
CW
1850 if (ret)
1851 return ret;
6f392d54 1852
e2f80391
TU
1853 intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
1854 intel_ring_emit(engine,
1855 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1856 intel_ring_emit(engine, i915_gem_request_get_seqno(req));
1857 intel_ring_emit(engine, MI_USER_INTERRUPT);
1858 __intel_ring_advance(engine);
d1b851fc 1859
3cce469c 1860 return 0;
d1b851fc
ZN
1861}
1862
0f46832f 1863static bool
0bc40be8 1864gen6_ring_get_irq(struct intel_engine_cs *engine)
0f46832f 1865{
c033666a 1866 struct drm_i915_private *dev_priv = engine->i915;
7338aefa 1867 unsigned long flags;
0f46832f 1868
7cd512f1
DV
1869 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1870 return false;
0f46832f 1871
7338aefa 1872 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8 1873 if (engine->irq_refcount++ == 0) {
c033666a 1874 if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
0bc40be8
TU
1875 I915_WRITE_IMR(engine,
1876 ~(engine->irq_enable_mask |
c033666a 1877 GT_PARITY_ERROR(dev_priv)));
15b9f80e 1878 else
0bc40be8
TU
1879 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1880 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
0f46832f 1881 }
7338aefa 1882 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
0f46832f
CW
1883
1884 return true;
1885}
1886
1887static void
0bc40be8 1888gen6_ring_put_irq(struct intel_engine_cs *engine)
0f46832f 1889{
c033666a 1890 struct drm_i915_private *dev_priv = engine->i915;
7338aefa 1891 unsigned long flags;
0f46832f 1892
7338aefa 1893 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8 1894 if (--engine->irq_refcount == 0) {
c033666a
CW
1895 if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
1896 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
15b9f80e 1897 else
0bc40be8
TU
1898 I915_WRITE_IMR(engine, ~0);
1899 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1ec14ad3 1900 }
7338aefa 1901 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
d1b851fc
ZN
1902}
1903
a19d2933 1904static bool
0bc40be8 1905hsw_vebox_get_irq(struct intel_engine_cs *engine)
a19d2933 1906{
c033666a 1907 struct drm_i915_private *dev_priv = engine->i915;
a19d2933
BW
1908 unsigned long flags;
1909
7cd512f1 1910 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
a19d2933
BW
1911 return false;
1912
59cdb63d 1913 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8
TU
1914 if (engine->irq_refcount++ == 0) {
1915 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1916 gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
a19d2933 1917 }
59cdb63d 1918 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
a19d2933
BW
1919
1920 return true;
1921}
1922
1923static void
0bc40be8 1924hsw_vebox_put_irq(struct intel_engine_cs *engine)
a19d2933 1925{
c033666a 1926 struct drm_i915_private *dev_priv = engine->i915;
a19d2933
BW
1927 unsigned long flags;
1928
59cdb63d 1929 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8
TU
1930 if (--engine->irq_refcount == 0) {
1931 I915_WRITE_IMR(engine, ~0);
1932 gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
a19d2933 1933 }
59cdb63d 1934 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
a19d2933
BW
1935}
1936
abd58f01 1937static bool
0bc40be8 1938gen8_ring_get_irq(struct intel_engine_cs *engine)
abd58f01 1939{
c033666a 1940 struct drm_i915_private *dev_priv = engine->i915;
abd58f01
BW
1941 unsigned long flags;
1942
7cd512f1 1943 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
abd58f01
BW
1944 return false;
1945
1946 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8 1947 if (engine->irq_refcount++ == 0) {
c033666a 1948 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
0bc40be8
TU
1949 I915_WRITE_IMR(engine,
1950 ~(engine->irq_enable_mask |
abd58f01
BW
1951 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1952 } else {
0bc40be8 1953 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
abd58f01 1954 }
0bc40be8 1955 POSTING_READ(RING_IMR(engine->mmio_base));
abd58f01
BW
1956 }
1957 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1958
1959 return true;
1960}
1961
1962static void
0bc40be8 1963gen8_ring_put_irq(struct intel_engine_cs *engine)
abd58f01 1964{
c033666a 1965 struct drm_i915_private *dev_priv = engine->i915;
abd58f01
BW
1966 unsigned long flags;
1967
1968 spin_lock_irqsave(&dev_priv->irq_lock, flags);
0bc40be8 1969 if (--engine->irq_refcount == 0) {
c033666a 1970 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
0bc40be8 1971 I915_WRITE_IMR(engine,
abd58f01
BW
1972 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1973 } else {
0bc40be8 1974 I915_WRITE_IMR(engine, ~0);
abd58f01 1975 }
0bc40be8 1976 POSTING_READ(RING_IMR(engine->mmio_base));
abd58f01
BW
1977 }
1978 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1979}
1980
d1b851fc 1981static int
53fddaf7 1982i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
9bcb144c 1983 u64 offset, u32 length,
8e004efc 1984 unsigned dispatch_flags)
d1b851fc 1985{
4a570db5 1986 struct intel_engine_cs *engine = req->engine;
e1f99ce6 1987 int ret;
78501eac 1988
5fb9de1a 1989 ret = intel_ring_begin(req, 2);
e1f99ce6
CW
1990 if (ret)
1991 return ret;
1992
e2f80391 1993 intel_ring_emit(engine,
65f56876
CW
1994 MI_BATCH_BUFFER_START |
1995 MI_BATCH_GTT |
8e004efc
JH
1996 (dispatch_flags & I915_DISPATCH_SECURE ?
1997 0 : MI_BATCH_NON_SECURE_I965));
e2f80391
TU
1998 intel_ring_emit(engine, offset);
1999 intel_ring_advance(engine);
78501eac 2000
d1b851fc
ZN
2001 return 0;
2002}
2003
b45305fc
DV
2004/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
2005#define I830_BATCH_LIMIT (256*1024)
c4d69da1
CW
2006#define I830_TLB_ENTRIES (2)
2007#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
8187a2b7 2008static int
53fddaf7 2009i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
8e004efc
JH
2010 u64 offset, u32 len,
2011 unsigned dispatch_flags)
62fdfeaf 2012{
4a570db5 2013 struct intel_engine_cs *engine = req->engine;
e2f80391 2014 u32 cs_offset = engine->scratch.gtt_offset;
c4e7a414 2015 int ret;
62fdfeaf 2016
5fb9de1a 2017 ret = intel_ring_begin(req, 6);
c4d69da1
CW
2018 if (ret)
2019 return ret;
62fdfeaf 2020
c4d69da1 2021 /* Evict the invalid PTE TLBs */
e2f80391
TU
2022 intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA);
2023 intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
2024 intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */
2025 intel_ring_emit(engine, cs_offset);
2026 intel_ring_emit(engine, 0xdeadbeef);
2027 intel_ring_emit(engine, MI_NOOP);
2028 intel_ring_advance(engine);
b45305fc 2029
8e004efc 2030 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
b45305fc
DV
2031 if (len > I830_BATCH_LIMIT)
2032 return -ENOSPC;
2033
5fb9de1a 2034 ret = intel_ring_begin(req, 6 + 2);
b45305fc
DV
2035 if (ret)
2036 return ret;
c4d69da1
CW
2037
2038 /* Blit the batch (which has now all relocs applied) to the
2039 * stable batch scratch bo area (so that the CS never
2040 * stumbles over its tlb invalidation bug) ...
2041 */
e2f80391
TU
2042 intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
2043 intel_ring_emit(engine,
2044 BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
2045 intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096);
2046 intel_ring_emit(engine, cs_offset);
2047 intel_ring_emit(engine, 4096);
2048 intel_ring_emit(engine, offset);
2049
2050 intel_ring_emit(engine, MI_FLUSH);
2051 intel_ring_emit(engine, MI_NOOP);
2052 intel_ring_advance(engine);
b45305fc
DV
2053
2054 /* ... and execute it. */
c4d69da1 2055 offset = cs_offset;
b45305fc 2056 }
e1f99ce6 2057
9d611c03 2058 ret = intel_ring_begin(req, 2);
c4d69da1
CW
2059 if (ret)
2060 return ret;
2061
e2f80391
TU
2062 intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
2063 intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
2064 0 : MI_BATCH_NON_SECURE));
2065 intel_ring_advance(engine);
c4d69da1 2066
fb3256da
DV
2067 return 0;
2068}
2069
2070static int
53fddaf7 2071i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
9bcb144c 2072 u64 offset, u32 len,
8e004efc 2073 unsigned dispatch_flags)
fb3256da 2074{
4a570db5 2075 struct intel_engine_cs *engine = req->engine;
fb3256da
DV
2076 int ret;
2077
5fb9de1a 2078 ret = intel_ring_begin(req, 2);
fb3256da
DV
2079 if (ret)
2080 return ret;
2081
e2f80391
TU
2082 intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
2083 intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
2084 0 : MI_BATCH_NON_SECURE));
2085 intel_ring_advance(engine);
62fdfeaf 2086
62fdfeaf
EA
2087 return 0;
2088}
2089
0bc40be8 2090static void cleanup_phys_status_page(struct intel_engine_cs *engine)
7d3fdfff 2091{
c033666a 2092 struct drm_i915_private *dev_priv = engine->i915;
7d3fdfff
VS
2093
2094 if (!dev_priv->status_page_dmah)
2095 return;
2096
c033666a 2097 drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah);
0bc40be8 2098 engine->status_page.page_addr = NULL;
7d3fdfff
VS
2099}
2100
0bc40be8 2101static void cleanup_status_page(struct intel_engine_cs *engine)
62fdfeaf 2102{
05394f39 2103 struct drm_i915_gem_object *obj;
62fdfeaf 2104
0bc40be8 2105 obj = engine->status_page.obj;
8187a2b7 2106 if (obj == NULL)
62fdfeaf 2107 return;
62fdfeaf 2108
9da3da66 2109 kunmap(sg_page(obj->pages->sgl));
d7f46fc4 2110 i915_gem_object_ggtt_unpin(obj);
05394f39 2111 drm_gem_object_unreference(&obj->base);
0bc40be8 2112 engine->status_page.obj = NULL;
62fdfeaf
EA
2113}
2114
0bc40be8 2115static int init_status_page(struct intel_engine_cs *engine)
62fdfeaf 2116{
0bc40be8 2117 struct drm_i915_gem_object *obj = engine->status_page.obj;
62fdfeaf 2118
7d3fdfff 2119 if (obj == NULL) {
1f767e02 2120 unsigned flags;
e3efda49 2121 int ret;
e4ffd173 2122
c033666a 2123 obj = i915_gem_object_create(engine->i915->dev, 4096);
fe3db79b 2124 if (IS_ERR(obj)) {
e3efda49 2125 DRM_ERROR("Failed to allocate status page\n");
fe3db79b 2126 return PTR_ERR(obj);
e3efda49 2127 }
62fdfeaf 2128
e3efda49
CW
2129 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2130 if (ret)
2131 goto err_unref;
2132
1f767e02 2133 flags = 0;
c033666a 2134 if (!HAS_LLC(engine->i915))
1f767e02
CW
2135 /* On g33, we cannot place HWS above 256MiB, so
2136 * restrict its pinning to the low mappable arena.
2137 * Though this restriction is not documented for
2138 * gen4, gen5, or byt, they also behave similarly
2139 * and hang if the HWS is placed at the top of the
2140 * GTT. To generalise, it appears that all !llc
2141 * platforms have issues with us placing the HWS
2142 * above the mappable region (even though we never
2143 * actualy map it).
2144 */
2145 flags |= PIN_MAPPABLE;
2146 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
e3efda49
CW
2147 if (ret) {
2148err_unref:
2149 drm_gem_object_unreference(&obj->base);
2150 return ret;
2151 }
2152
0bc40be8 2153 engine->status_page.obj = obj;
e3efda49 2154 }
62fdfeaf 2155
0bc40be8
TU
2156 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
2157 engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
2158 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
62fdfeaf 2159
8187a2b7 2160 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
0bc40be8 2161 engine->name, engine->status_page.gfx_addr);
62fdfeaf
EA
2162
2163 return 0;
62fdfeaf
EA
2164}
2165
0bc40be8 2166static int init_phys_status_page(struct intel_engine_cs *engine)
6b8294a4 2167{
c033666a 2168 struct drm_i915_private *dev_priv = engine->i915;
6b8294a4
CW
2169
2170 if (!dev_priv->status_page_dmah) {
2171 dev_priv->status_page_dmah =
c033666a 2172 drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE);
6b8294a4
CW
2173 if (!dev_priv->status_page_dmah)
2174 return -ENOMEM;
2175 }
2176
0bc40be8
TU
2177 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
2178 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
6b8294a4
CW
2179
2180 return 0;
2181}
2182
7ba717cf 2183void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
2919d291 2184{
3d77e9be
CW
2185 GEM_BUG_ON(ringbuf->vma == NULL);
2186 GEM_BUG_ON(ringbuf->virtual_start == NULL);
2187
def0c5f6 2188 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
0a798eb9 2189 i915_gem_object_unpin_map(ringbuf->obj);
def0c5f6 2190 else
3d77e9be 2191 i915_vma_unpin_iomap(ringbuf->vma);
8305216f 2192 ringbuf->virtual_start = NULL;
3d77e9be 2193
2919d291 2194 i915_gem_object_ggtt_unpin(ringbuf->obj);
3d77e9be 2195 ringbuf->vma = NULL;
7ba717cf
TD
2196}
2197
c033666a 2198int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
7ba717cf
TD
2199 struct intel_ringbuffer *ringbuf)
2200{
7ba717cf 2201 struct drm_i915_gem_object *obj = ringbuf->obj;
a687a43a
CW
2202 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
2203 unsigned flags = PIN_OFFSET_BIAS | 4096;
8305216f 2204 void *addr;
7ba717cf
TD
2205 int ret;
2206
def0c5f6 2207 if (HAS_LLC(dev_priv) && !obj->stolen) {
a687a43a 2208 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
def0c5f6
CW
2209 if (ret)
2210 return ret;
7ba717cf 2211
def0c5f6 2212 ret = i915_gem_object_set_to_cpu_domain(obj, true);
d2cad535
CW
2213 if (ret)
2214 goto err_unpin;
def0c5f6 2215
8305216f
DG
2216 addr = i915_gem_object_pin_map(obj);
2217 if (IS_ERR(addr)) {
2218 ret = PTR_ERR(addr);
d2cad535 2219 goto err_unpin;
def0c5f6
CW
2220 }
2221 } else {
a687a43a
CW
2222 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
2223 flags | PIN_MAPPABLE);
def0c5f6
CW
2224 if (ret)
2225 return ret;
7ba717cf 2226
def0c5f6 2227 ret = i915_gem_object_set_to_gtt_domain(obj, true);
d2cad535
CW
2228 if (ret)
2229 goto err_unpin;
def0c5f6 2230
ff3dc087
DCS
2231 /* Access through the GTT requires the device to be awake. */
2232 assert_rpm_wakelock_held(dev_priv);
2233
3d77e9be
CW
2234 addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
2235 if (IS_ERR(addr)) {
2236 ret = PTR_ERR(addr);
d2cad535 2237 goto err_unpin;
def0c5f6 2238 }
7ba717cf
TD
2239 }
2240
8305216f 2241 ringbuf->virtual_start = addr;
0eb973d3 2242 ringbuf->vma = i915_gem_obj_to_ggtt(obj);
7ba717cf 2243 return 0;
d2cad535
CW
2244
2245err_unpin:
2246 i915_gem_object_ggtt_unpin(obj);
2247 return ret;
7ba717cf
TD
2248}
2249
01101fa7 2250static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
7ba717cf 2251{
2919d291
OM
2252 drm_gem_object_unreference(&ringbuf->obj->base);
2253 ringbuf->obj = NULL;
2254}
2255
01101fa7
CW
2256static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
2257 struct intel_ringbuffer *ringbuf)
62fdfeaf 2258{
05394f39 2259 struct drm_i915_gem_object *obj;
62fdfeaf 2260
ebc052e0
CW
2261 obj = NULL;
2262 if (!HAS_LLC(dev))
93b0a4e0 2263 obj = i915_gem_object_create_stolen(dev, ringbuf->size);
ebc052e0 2264 if (obj == NULL)
d37cd8a8 2265 obj = i915_gem_object_create(dev, ringbuf->size);
fe3db79b
CW
2266 if (IS_ERR(obj))
2267 return PTR_ERR(obj);
8187a2b7 2268
24f3a8cf
AG
2269 /* mark ring buffers as read-only from GPU side by default */
2270 obj->gt_ro = 1;
2271
93b0a4e0 2272 ringbuf->obj = obj;
e3efda49 2273
7ba717cf 2274 return 0;
e3efda49
CW
2275}
2276
01101fa7
CW
2277struct intel_ringbuffer *
2278intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
2279{
2280 struct intel_ringbuffer *ring;
2281 int ret;
2282
2283 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
608c1a52
CW
2284 if (ring == NULL) {
2285 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
2286 engine->name);
01101fa7 2287 return ERR_PTR(-ENOMEM);
608c1a52 2288 }
01101fa7 2289
4a570db5 2290 ring->engine = engine;
608c1a52 2291 list_add(&ring->link, &engine->buffers);
01101fa7
CW
2292
2293 ring->size = size;
2294 /* Workaround an erratum on the i830 which causes a hang if
2295 * the TAIL pointer points to within the last 2 cachelines
2296 * of the buffer.
2297 */
2298 ring->effective_size = size;
c033666a 2299 if (IS_I830(engine->i915) || IS_845G(engine->i915))
01101fa7
CW
2300 ring->effective_size -= 2 * CACHELINE_BYTES;
2301
2302 ring->last_retired_head = -1;
2303 intel_ring_update_space(ring);
2304
c033666a 2305 ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring);
01101fa7 2306 if (ret) {
608c1a52
CW
2307 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
2308 engine->name, ret);
2309 list_del(&ring->link);
01101fa7
CW
2310 kfree(ring);
2311 return ERR_PTR(ret);
2312 }
2313
2314 return ring;
2315}
2316
2317void
2318intel_ringbuffer_free(struct intel_ringbuffer *ring)
2319{
2320 intel_destroy_ringbuffer_obj(ring);
608c1a52 2321 list_del(&ring->link);
01101fa7
CW
2322 kfree(ring);
2323}
2324
0cb26a8e
CW
2325static int intel_ring_context_pin(struct i915_gem_context *ctx,
2326 struct intel_engine_cs *engine)
2327{
2328 struct intel_context *ce = &ctx->engine[engine->id];
2329 int ret;
2330
2331 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
2332
2333 if (ce->pin_count++)
2334 return 0;
2335
2336 if (ce->state) {
2337 ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
2338 if (ret)
2339 goto error;
2340 }
2341
c7c3c07d
CW
2342 /* The kernel context is only used as a placeholder for flushing the
2343 * active context. It is never used for submitting user rendering and
2344 * as such never requires the golden render context, and so we can skip
2345 * emitting it when we switch to the kernel context. This is required
2346 * as during eviction we cannot allocate and pin the renderstate in
2347 * order to initialise the context.
2348 */
2349 if (ctx == ctx->i915->kernel_context)
2350 ce->initialised = true;
2351
0cb26a8e
CW
2352 i915_gem_context_reference(ctx);
2353 return 0;
2354
2355error:
2356 ce->pin_count = 0;
2357 return ret;
2358}
2359
2360static void intel_ring_context_unpin(struct i915_gem_context *ctx,
2361 struct intel_engine_cs *engine)
2362{
2363 struct intel_context *ce = &ctx->engine[engine->id];
2364
2365 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
2366
2367 if (--ce->pin_count)
2368 return;
2369
2370 if (ce->state)
2371 i915_gem_object_ggtt_unpin(ce->state);
2372
2373 i915_gem_context_unreference(ctx);
2374}
2375
e3efda49 2376static int intel_init_ring_buffer(struct drm_device *dev,
0bc40be8 2377 struct intel_engine_cs *engine)
e3efda49 2378{
c033666a 2379 struct drm_i915_private *dev_priv = to_i915(dev);
bfc882b4 2380 struct intel_ringbuffer *ringbuf;
e3efda49
CW
2381 int ret;
2382
0bc40be8 2383 WARN_ON(engine->buffer);
bfc882b4 2384
c033666a 2385 engine->i915 = dev_priv;
0bc40be8
TU
2386 INIT_LIST_HEAD(&engine->active_list);
2387 INIT_LIST_HEAD(&engine->request_list);
2388 INIT_LIST_HEAD(&engine->execlist_queue);
2389 INIT_LIST_HEAD(&engine->buffers);
2390 i915_gem_batch_pool_init(dev, &engine->batch_pool);
2391 memset(engine->semaphore.sync_seqno, 0,
2392 sizeof(engine->semaphore.sync_seqno));
e3efda49 2393
0bc40be8 2394 init_waitqueue_head(&engine->irq_queue);
e3efda49 2395
0cb26a8e
CW
2396 /* We may need to do things with the shrinker which
2397 * require us to immediately switch back to the default
2398 * context. This can cause a problem as pinning the
2399 * default context also requires GTT space which may not
2400 * be available. To avoid this we always pin the default
2401 * context.
2402 */
2403 ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
2404 if (ret)
2405 goto error;
2406
0bc40be8 2407 ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
b0366a54
DG
2408 if (IS_ERR(ringbuf)) {
2409 ret = PTR_ERR(ringbuf);
2410 goto error;
2411 }
0bc40be8 2412 engine->buffer = ringbuf;
01101fa7 2413
c033666a 2414 if (I915_NEED_GFX_HWS(dev_priv)) {
0bc40be8 2415 ret = init_status_page(engine);
e3efda49 2416 if (ret)
8ee14975 2417 goto error;
e3efda49 2418 } else {
0bc40be8
TU
2419 WARN_ON(engine->id != RCS);
2420 ret = init_phys_status_page(engine);
e3efda49 2421 if (ret)
8ee14975 2422 goto error;
e3efda49
CW
2423 }
2424
c033666a 2425 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
bfc882b4
DV
2426 if (ret) {
2427 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
0bc40be8 2428 engine->name, ret);
bfc882b4
DV
2429 intel_destroy_ringbuffer_obj(ringbuf);
2430 goto error;
e3efda49 2431 }
62fdfeaf 2432
0bc40be8 2433 ret = i915_cmd_parser_init_ring(engine);
44e895a8 2434 if (ret)
8ee14975
OM
2435 goto error;
2436
8ee14975 2437 return 0;
351e3db2 2438
8ee14975 2439error:
117897f4 2440 intel_cleanup_engine(engine);
8ee14975 2441 return ret;
62fdfeaf
EA
2442}
2443
117897f4 2444void intel_cleanup_engine(struct intel_engine_cs *engine)
62fdfeaf 2445{
6402c330 2446 struct drm_i915_private *dev_priv;
33626e6a 2447
117897f4 2448 if (!intel_engine_initialized(engine))
62fdfeaf
EA
2449 return;
2450
c033666a 2451 dev_priv = engine->i915;
6402c330 2452
0bc40be8 2453 if (engine->buffer) {
117897f4 2454 intel_stop_engine(engine);
c033666a 2455 WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
33626e6a 2456
0bc40be8
TU
2457 intel_unpin_ringbuffer_obj(engine->buffer);
2458 intel_ringbuffer_free(engine->buffer);
2459 engine->buffer = NULL;
b0366a54 2460 }
78501eac 2461
0bc40be8
TU
2462 if (engine->cleanup)
2463 engine->cleanup(engine);
8d19215b 2464
c033666a 2465 if (I915_NEED_GFX_HWS(dev_priv)) {
0bc40be8 2466 cleanup_status_page(engine);
7d3fdfff 2467 } else {
0bc40be8
TU
2468 WARN_ON(engine->id != RCS);
2469 cleanup_phys_status_page(engine);
7d3fdfff 2470 }
44e895a8 2471
0bc40be8
TU
2472 i915_cmd_parser_fini_ring(engine);
2473 i915_gem_batch_pool_fini(&engine->batch_pool);
0cb26a8e
CW
2474
2475 intel_ring_context_unpin(dev_priv->kernel_context, engine);
2476
c033666a 2477 engine->i915 = NULL;
62fdfeaf
EA
2478}
2479
666796da 2480int intel_engine_idle(struct intel_engine_cs *engine)
3e960501 2481{
a4b3a571 2482 struct drm_i915_gem_request *req;
3e960501 2483
3e960501 2484 /* Wait upon the last request to be completed */
0bc40be8 2485 if (list_empty(&engine->request_list))
3e960501
CW
2486 return 0;
2487
0bc40be8
TU
2488 req = list_entry(engine->request_list.prev,
2489 struct drm_i915_gem_request,
2490 list);
b4716185
CW
2491
2492 /* Make sure we do not trigger any retires */
2493 return __i915_wait_request(req,
c19ae989 2494 req->i915->mm.interruptible,
b4716185 2495 NULL, NULL);
3e960501
CW
2496}
2497
6689cb2b 2498int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
9d773091 2499{
6310346e
CW
2500 int ret;
2501
2502 /* Flush enough space to reduce the likelihood of waiting after
2503 * we start building the request - in which case we will just
2504 * have to repeat work.
2505 */
a0442461 2506 request->reserved_space += LEGACY_REQUEST_SIZE;
6310346e 2507
4a570db5 2508 request->ringbuf = request->engine->buffer;
6310346e
CW
2509
2510 ret = intel_ring_begin(request, 0);
2511 if (ret)
2512 return ret;
2513
a0442461 2514 request->reserved_space -= LEGACY_REQUEST_SIZE;
6310346e 2515 return 0;
9d773091
CW
2516}
2517
987046ad
CW
2518static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
2519{
2520 struct intel_ringbuffer *ringbuf = req->ringbuf;
2521 struct intel_engine_cs *engine = req->engine;
2522 struct drm_i915_gem_request *target;
2523
2524 intel_ring_update_space(ringbuf);
2525 if (ringbuf->space >= bytes)
2526 return 0;
2527
2528 /*
2529 * Space is reserved in the ringbuffer for finalising the request,
2530 * as that cannot be allowed to fail. During request finalisation,
2531 * reserved_space is set to 0 to stop the overallocation and the
2532 * assumption is that then we never need to wait (which has the
2533 * risk of failing with EINTR).
2534 *
2535 * See also i915_gem_request_alloc() and i915_add_request().
2536 */
0251a963 2537 GEM_BUG_ON(!req->reserved_space);
987046ad
CW
2538
2539 list_for_each_entry(target, &engine->request_list, list) {
2540 unsigned space;
2541
79bbcc29 2542 /*
987046ad
CW
2543 * The request queue is per-engine, so can contain requests
2544 * from multiple ringbuffers. Here, we must ignore any that
2545 * aren't from the ringbuffer we're considering.
79bbcc29 2546 */
987046ad
CW
2547 if (target->ringbuf != ringbuf)
2548 continue;
2549
2550 /* Would completion of this request free enough space? */
2551 space = __intel_ring_space(target->postfix, ringbuf->tail,
2552 ringbuf->size);
2553 if (space >= bytes)
2554 break;
79bbcc29 2555 }
29b1b415 2556
987046ad
CW
2557 if (WARN_ON(&target->list == &engine->request_list))
2558 return -ENOSPC;
2559
2560 return i915_wait_request(target);
29b1b415
JH
2561}
2562
987046ad 2563int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
cbcc80df 2564{
987046ad 2565 struct intel_ringbuffer *ringbuf = req->ringbuf;
79bbcc29 2566 int remain_actual = ringbuf->size - ringbuf->tail;
987046ad
CW
2567 int remain_usable = ringbuf->effective_size - ringbuf->tail;
2568 int bytes = num_dwords * sizeof(u32);
2569 int total_bytes, wait_bytes;
79bbcc29 2570 bool need_wrap = false;
29b1b415 2571
0251a963 2572 total_bytes = bytes + req->reserved_space;
29b1b415 2573
79bbcc29
JH
2574 if (unlikely(bytes > remain_usable)) {
2575 /*
2576 * Not enough space for the basic request. So need to flush
2577 * out the remainder and then wait for base + reserved.
2578 */
2579 wait_bytes = remain_actual + total_bytes;
2580 need_wrap = true;
987046ad
CW
2581 } else if (unlikely(total_bytes > remain_usable)) {
2582 /*
2583 * The base request will fit but the reserved space
2584 * falls off the end. So we don't need an immediate wrap
2585 * and only need to effectively wait for the reserved
2586 * size space from the start of ringbuffer.
2587 */
0251a963 2588 wait_bytes = remain_actual + req->reserved_space;
79bbcc29 2589 } else {
987046ad
CW
2590 /* No wrapping required, just waiting. */
2591 wait_bytes = total_bytes;
cbcc80df
MK
2592 }
2593
987046ad
CW
2594 if (wait_bytes > ringbuf->space) {
2595 int ret = wait_for_space(req, wait_bytes);
cbcc80df
MK
2596 if (unlikely(ret))
2597 return ret;
79bbcc29 2598
987046ad 2599 intel_ring_update_space(ringbuf);
e075a32f
CW
2600 if (unlikely(ringbuf->space < wait_bytes))
2601 return -EAGAIN;
cbcc80df
MK
2602 }
2603
987046ad
CW
2604 if (unlikely(need_wrap)) {
2605 GEM_BUG_ON(remain_actual > ringbuf->space);
2606 GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
78501eac 2607
987046ad
CW
2608 /* Fill the tail with MI_NOOP */
2609 memset(ringbuf->virtual_start + ringbuf->tail,
2610 0, remain_actual);
2611 ringbuf->tail = 0;
2612 ringbuf->space -= remain_actual;
2613 }
304d695c 2614
987046ad
CW
2615 ringbuf->space -= bytes;
2616 GEM_BUG_ON(ringbuf->space < 0);
304d695c 2617 return 0;
8187a2b7 2618}
78501eac 2619
753b1ad4 2620/* Align the ring tail to a cacheline boundary */
bba09b12 2621int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
753b1ad4 2622{
4a570db5 2623 struct intel_engine_cs *engine = req->engine;
e2f80391 2624 int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
753b1ad4
VS
2625 int ret;
2626
2627 if (num_dwords == 0)
2628 return 0;
2629
18393f63 2630 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
5fb9de1a 2631 ret = intel_ring_begin(req, num_dwords);
753b1ad4
VS
2632 if (ret)
2633 return ret;
2634
2635 while (num_dwords--)
e2f80391 2636 intel_ring_emit(engine, MI_NOOP);
753b1ad4 2637
e2f80391 2638 intel_ring_advance(engine);
753b1ad4
VS
2639
2640 return 0;
2641}
2642
0bc40be8 2643void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
498d2ac1 2644{
c033666a 2645 struct drm_i915_private *dev_priv = engine->i915;
498d2ac1 2646
29dcb570
CW
2647 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
2648 * so long as the semaphore value in the register/page is greater
2649 * than the sync value), so whenever we reset the seqno,
2650 * so long as we reset the tracking semaphore value to 0, it will
2651 * always be before the next request's seqno. If we don't reset
2652 * the semaphore value, then when the seqno moves backwards all
2653 * future waits will complete instantly (causing rendering corruption).
2654 */
7e22dbbb 2655 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
0bc40be8
TU
2656 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
2657 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
d04bce48 2658 if (HAS_VEBOX(dev_priv))
0bc40be8 2659 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
e1f99ce6 2660 }
a058d934
CW
2661 if (dev_priv->semaphore_obj) {
2662 struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
2663 struct page *page = i915_gem_object_get_dirty_page(obj, 0);
2664 void *semaphores = kmap(page);
2665 memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
2666 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
2667 kunmap(page);
2668 }
29dcb570
CW
2669 memset(engine->semaphore.sync_seqno, 0,
2670 sizeof(engine->semaphore.sync_seqno));
d97ed339 2671
0bc40be8 2672 engine->set_seqno(engine, seqno);
01347126 2673 engine->last_submitted_seqno = seqno;
29dcb570 2674
0bc40be8 2675 engine->hangcheck.seqno = seqno;
8187a2b7 2676}
62fdfeaf 2677
0bc40be8 2678static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
297b0c5b 2679 u32 value)
881f47b6 2680{
c033666a 2681 struct drm_i915_private *dev_priv = engine->i915;
881f47b6
XH
2682
2683 /* Every tail move must follow the sequence below */
12f55818
CW
2684
2685 /* Disable notification that the ring is IDLE. The GT
2686 * will then assume that it is busy and bring it out of rc6.
2687 */
0206e353 2688 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
12f55818
CW
2689 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2690
2691 /* Clear the context id. Here be magic! */
2692 I915_WRITE64(GEN6_BSD_RNCID, 0x0);
0206e353 2693
12f55818 2694 /* Wait for the ring not to be idle, i.e. for it to wake up. */
0206e353 2695 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
12f55818
CW
2696 GEN6_BSD_SLEEP_INDICATOR) == 0,
2697 50))
2698 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
0206e353 2699
12f55818 2700 /* Now that the ring is fully powered up, update the tail */
0bc40be8
TU
2701 I915_WRITE_TAIL(engine, value);
2702 POSTING_READ(RING_TAIL(engine->mmio_base));
12f55818
CW
2703
2704 /* Let the ring send IDLE messages to the GT again,
2705 * and so let it sleep to conserve power when idle.
2706 */
0206e353 2707 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
12f55818 2708 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
881f47b6
XH
2709}
2710
a84c3ae1 2711static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
ea251324 2712 u32 invalidate, u32 flush)
881f47b6 2713{
4a570db5 2714 struct intel_engine_cs *engine = req->engine;
71a77e07 2715 uint32_t cmd;
b72f3acb
CW
2716 int ret;
2717
5fb9de1a 2718 ret = intel_ring_begin(req, 4);
b72f3acb
CW
2719 if (ret)
2720 return ret;
2721
71a77e07 2722 cmd = MI_FLUSH_DW;
c033666a 2723 if (INTEL_GEN(req->i915) >= 8)
075b3bba 2724 cmd += 1;
f0a1fb10
CW
2725
2726 /* We always require a command barrier so that subsequent
2727 * commands, such as breadcrumb interrupts, are strictly ordered
2728 * wrt the contents of the write cache being flushed to memory
2729 * (and thus being coherent from the CPU).
2730 */
2731 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2732
9a289771
JB
2733 /*
2734 * Bspec vol 1c.5 - video engine command streamer:
2735 * "If ENABLED, all TLBs will be invalidated once the flush
2736 * operation is complete. This bit is only valid when the
2737 * Post-Sync Operation field is a value of 1h or 3h."
2738 */
71a77e07 2739 if (invalidate & I915_GEM_GPU_DOMAINS)
f0a1fb10
CW
2740 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
2741
e2f80391
TU
2742 intel_ring_emit(engine, cmd);
2743 intel_ring_emit(engine,
2744 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
c033666a 2745 if (INTEL_GEN(req->i915) >= 8) {
e2f80391
TU
2746 intel_ring_emit(engine, 0); /* upper addr */
2747 intel_ring_emit(engine, 0); /* value */
075b3bba 2748 } else {
e2f80391
TU
2749 intel_ring_emit(engine, 0);
2750 intel_ring_emit(engine, MI_NOOP);
075b3bba 2751 }
e2f80391 2752 intel_ring_advance(engine);
b72f3acb 2753 return 0;
881f47b6
XH
2754}
2755
1c7a0623 2756static int
53fddaf7 2757gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
9bcb144c 2758 u64 offset, u32 len,
8e004efc 2759 unsigned dispatch_flags)
1c7a0623 2760{
4a570db5 2761 struct intel_engine_cs *engine = req->engine;
e2f80391 2762 bool ppgtt = USES_PPGTT(engine->dev) &&
8e004efc 2763 !(dispatch_flags & I915_DISPATCH_SECURE);
1c7a0623
BW
2764 int ret;
2765
5fb9de1a 2766 ret = intel_ring_begin(req, 4);
1c7a0623
BW
2767 if (ret)
2768 return ret;
2769
2770 /* FIXME(BDW): Address space and security selectors. */
e2f80391 2771 intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
919032ec
AJ
2772 (dispatch_flags & I915_DISPATCH_RS ?
2773 MI_BATCH_RESOURCE_STREAMER : 0));
e2f80391
TU
2774 intel_ring_emit(engine, lower_32_bits(offset));
2775 intel_ring_emit(engine, upper_32_bits(offset));
2776 intel_ring_emit(engine, MI_NOOP);
2777 intel_ring_advance(engine);
1c7a0623
BW
2778
2779 return 0;
2780}
2781
d7d4eedd 2782static int
53fddaf7 2783hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
8e004efc
JH
2784 u64 offset, u32 len,
2785 unsigned dispatch_flags)
d7d4eedd 2786{
4a570db5 2787 struct intel_engine_cs *engine = req->engine;
d7d4eedd
CW
2788 int ret;
2789
5fb9de1a 2790 ret = intel_ring_begin(req, 2);
d7d4eedd
CW
2791 if (ret)
2792 return ret;
2793
e2f80391 2794 intel_ring_emit(engine,
77072258 2795 MI_BATCH_BUFFER_START |
8e004efc 2796 (dispatch_flags & I915_DISPATCH_SECURE ?
919032ec
AJ
2797 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
2798 (dispatch_flags & I915_DISPATCH_RS ?
2799 MI_BATCH_RESOURCE_STREAMER : 0));
d7d4eedd 2800 /* bit0-7 is the length on GEN6+ */
e2f80391
TU
2801 intel_ring_emit(engine, offset);
2802 intel_ring_advance(engine);
d7d4eedd
CW
2803
2804 return 0;
2805}
2806
881f47b6 2807static int
53fddaf7 2808gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
9bcb144c 2809 u64 offset, u32 len,
8e004efc 2810 unsigned dispatch_flags)
881f47b6 2811{
4a570db5 2812 struct intel_engine_cs *engine = req->engine;
0206e353 2813 int ret;
ab6f8e32 2814
5fb9de1a 2815 ret = intel_ring_begin(req, 2);
0206e353
AJ
2816 if (ret)
2817 return ret;
e1f99ce6 2818
e2f80391 2819 intel_ring_emit(engine,
d7d4eedd 2820 MI_BATCH_BUFFER_START |
8e004efc
JH
2821 (dispatch_flags & I915_DISPATCH_SECURE ?
2822 0 : MI_BATCH_NON_SECURE_I965));
0206e353 2823 /* bit0-7 is the length on GEN6+ */
e2f80391
TU
2824 intel_ring_emit(engine, offset);
2825 intel_ring_advance(engine);
ab6f8e32 2826
0206e353 2827 return 0;
881f47b6
XH
2828}
2829
549f7365
CW
2830/* Blitter support (SandyBridge+) */
2831
a84c3ae1 2832static int gen6_ring_flush(struct drm_i915_gem_request *req,
ea251324 2833 u32 invalidate, u32 flush)
8d19215b 2834{
4a570db5 2835 struct intel_engine_cs *engine = req->engine;
71a77e07 2836 uint32_t cmd;
b72f3acb
CW
2837 int ret;
2838
5fb9de1a 2839 ret = intel_ring_begin(req, 4);
b72f3acb
CW
2840 if (ret)
2841 return ret;
2842
71a77e07 2843 cmd = MI_FLUSH_DW;
c033666a 2844 if (INTEL_GEN(req->i915) >= 8)
075b3bba 2845 cmd += 1;
f0a1fb10
CW
2846
2847 /* We always require a command barrier so that subsequent
2848 * commands, such as breadcrumb interrupts, are strictly ordered
2849 * wrt the contents of the write cache being flushed to memory
2850 * (and thus being coherent from the CPU).
2851 */
2852 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2853
9a289771
JB
2854 /*
2855 * Bspec vol 1c.3 - blitter engine command streamer:
2856 * "If ENABLED, all TLBs will be invalidated once the flush
2857 * operation is complete. This bit is only valid when the
2858 * Post-Sync Operation field is a value of 1h or 3h."
2859 */
71a77e07 2860 if (invalidate & I915_GEM_DOMAIN_RENDER)
f0a1fb10 2861 cmd |= MI_INVALIDATE_TLB;
e2f80391
TU
2862 intel_ring_emit(engine, cmd);
2863 intel_ring_emit(engine,
2864 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
c033666a 2865 if (INTEL_GEN(req->i915) >= 8) {
e2f80391
TU
2866 intel_ring_emit(engine, 0); /* upper addr */
2867 intel_ring_emit(engine, 0); /* value */
075b3bba 2868 } else {
e2f80391
TU
2869 intel_ring_emit(engine, 0);
2870 intel_ring_emit(engine, MI_NOOP);
075b3bba 2871 }
e2f80391 2872 intel_ring_advance(engine);
fd3da6c9 2873
b72f3acb 2874 return 0;
8d19215b
ZN
2875}
2876
5c1143bb
XH
2877int intel_init_render_ring_buffer(struct drm_device *dev)
2878{
4640c4ff 2879 struct drm_i915_private *dev_priv = dev->dev_private;
4a570db5 2880 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
3e78998a
BW
2881 struct drm_i915_gem_object *obj;
2882 int ret;
5c1143bb 2883
e2f80391
TU
2884 engine->name = "render ring";
2885 engine->id = RCS;
2886 engine->exec_id = I915_EXEC_RENDER;
215a7e32 2887 engine->hw_id = 0;
e2f80391 2888 engine->mmio_base = RENDER_RING_BASE;
59465b5f 2889
c033666a
CW
2890 if (INTEL_GEN(dev_priv) >= 8) {
2891 if (i915_semaphore_is_enabled(dev_priv)) {
d37cd8a8 2892 obj = i915_gem_object_create(dev, 4096);
fe3db79b 2893 if (IS_ERR(obj)) {
3e78998a
BW
2894 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2895 i915.semaphores = 0;
2896 } else {
2897 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2898 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
2899 if (ret != 0) {
2900 drm_gem_object_unreference(&obj->base);
2901 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
2902 i915.semaphores = 0;
2903 } else
2904 dev_priv->semaphore_obj = obj;
2905 }
2906 }
7225342a 2907
e2f80391 2908 engine->init_context = intel_rcs_ctx_init;
a58c01aa 2909 engine->add_request = gen8_render_add_request;
e2f80391
TU
2910 engine->flush = gen8_render_ring_flush;
2911 engine->irq_get = gen8_ring_get_irq;
2912 engine->irq_put = gen8_ring_put_irq;
2913 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
c04e0f3b 2914 engine->get_seqno = ring_get_seqno;
e2f80391 2915 engine->set_seqno = ring_set_seqno;
c033666a 2916 if (i915_semaphore_is_enabled(dev_priv)) {
3e78998a 2917 WARN_ON(!dev_priv->semaphore_obj);
e2f80391
TU
2918 engine->semaphore.sync_to = gen8_ring_sync;
2919 engine->semaphore.signal = gen8_rcs_signal;
2920 GEN8_RING_SEMAPHORE_INIT(engine);
707d9cf9 2921 }
c033666a 2922 } else if (INTEL_GEN(dev_priv) >= 6) {
e2f80391
TU
2923 engine->init_context = intel_rcs_ctx_init;
2924 engine->add_request = gen6_add_request;
2925 engine->flush = gen7_render_ring_flush;
c033666a 2926 if (IS_GEN6(dev_priv))
e2f80391
TU
2927 engine->flush = gen6_render_ring_flush;
2928 engine->irq_get = gen6_ring_get_irq;
2929 engine->irq_put = gen6_ring_put_irq;
2930 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
c04e0f3b
CW
2931 engine->irq_seqno_barrier = gen6_seqno_barrier;
2932 engine->get_seqno = ring_get_seqno;
e2f80391 2933 engine->set_seqno = ring_set_seqno;
c033666a 2934 if (i915_semaphore_is_enabled(dev_priv)) {
e2f80391
TU
2935 engine->semaphore.sync_to = gen6_ring_sync;
2936 engine->semaphore.signal = gen6_signal;
707d9cf9
BW
2937 /*
2938 * The current semaphore is only applied on pre-gen8
2939 * platform. And there is no VCS2 ring on the pre-gen8
2940 * platform. So the semaphore between RCS and VCS2 is
2941 * initialized as INVALID. Gen8 will initialize the
2942 * sema between VCS2 and RCS later.
2943 */
e2f80391
TU
2944 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2945 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
2946 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
2947 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
2948 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2949 engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2950 engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
2951 engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
2952 engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2953 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
707d9cf9 2954 }
c033666a 2955 } else if (IS_GEN5(dev_priv)) {
e2f80391
TU
2956 engine->add_request = pc_render_add_request;
2957 engine->flush = gen4_render_ring_flush;
2958 engine->get_seqno = pc_render_get_seqno;
2959 engine->set_seqno = pc_render_set_seqno;
2960 engine->irq_get = gen5_ring_get_irq;
2961 engine->irq_put = gen5_ring_put_irq;
2962 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
cc609d5d 2963 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
59465b5f 2964 } else {
e2f80391 2965 engine->add_request = i9xx_add_request;
c033666a 2966 if (INTEL_GEN(dev_priv) < 4)
e2f80391 2967 engine->flush = gen2_render_ring_flush;
46f0f8d1 2968 else
e2f80391
TU
2969 engine->flush = gen4_render_ring_flush;
2970 engine->get_seqno = ring_get_seqno;
2971 engine->set_seqno = ring_set_seqno;
c033666a 2972 if (IS_GEN2(dev_priv)) {
e2f80391
TU
2973 engine->irq_get = i8xx_ring_get_irq;
2974 engine->irq_put = i8xx_ring_put_irq;
c2798b19 2975 } else {
e2f80391
TU
2976 engine->irq_get = i9xx_ring_get_irq;
2977 engine->irq_put = i9xx_ring_put_irq;
c2798b19 2978 }
e2f80391 2979 engine->irq_enable_mask = I915_USER_INTERRUPT;
1ec14ad3 2980 }
e2f80391 2981 engine->write_tail = ring_write_tail;
707d9cf9 2982
c033666a 2983 if (IS_HASWELL(dev_priv))
e2f80391 2984 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
c033666a 2985 else if (IS_GEN8(dev_priv))
e2f80391 2986 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
c033666a 2987 else if (INTEL_GEN(dev_priv) >= 6)
e2f80391 2988 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
c033666a 2989 else if (INTEL_GEN(dev_priv) >= 4)
e2f80391 2990 engine->dispatch_execbuffer = i965_dispatch_execbuffer;
c033666a 2991 else if (IS_I830(dev_priv) || IS_845G(dev_priv))
e2f80391 2992 engine->dispatch_execbuffer = i830_dispatch_execbuffer;
fb3256da 2993 else
e2f80391
TU
2994 engine->dispatch_execbuffer = i915_dispatch_execbuffer;
2995 engine->init_hw = init_render_ring;
2996 engine->cleanup = render_ring_cleanup;
59465b5f 2997
b45305fc 2998 /* Workaround batchbuffer to combat CS tlb bug. */
c033666a 2999 if (HAS_BROKEN_CS_TLB(dev_priv)) {
d37cd8a8 3000 obj = i915_gem_object_create(dev, I830_WA_SIZE);
fe3db79b 3001 if (IS_ERR(obj)) {
b45305fc 3002 DRM_ERROR("Failed to allocate batch bo\n");
fe3db79b 3003 return PTR_ERR(obj);
b45305fc
DV
3004 }
3005
be1fa129 3006 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
b45305fc
DV
3007 if (ret != 0) {
3008 drm_gem_object_unreference(&obj->base);
3009 DRM_ERROR("Failed to ping batch bo\n");
3010 return ret;
3011 }
3012
e2f80391
TU
3013 engine->scratch.obj = obj;
3014 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
b45305fc
DV
3015 }
3016
e2f80391 3017 ret = intel_init_ring_buffer(dev, engine);
99be1dfe
DV
3018 if (ret)
3019 return ret;
3020
c033666a 3021 if (INTEL_GEN(dev_priv) >= 5) {
e2f80391 3022 ret = intel_init_pipe_control(engine);
99be1dfe
DV
3023 if (ret)
3024 return ret;
3025 }
3026
3027 return 0;
5c1143bb
XH
3028}
3029
3030int intel_init_bsd_ring_buffer(struct drm_device *dev)
3031{
4640c4ff 3032 struct drm_i915_private *dev_priv = dev->dev_private;
4a570db5 3033 struct intel_engine_cs *engine = &dev_priv->engine[VCS];
5c1143bb 3034
e2f80391
TU
3035 engine->name = "bsd ring";
3036 engine->id = VCS;
3037 engine->exec_id = I915_EXEC_BSD;
215a7e32 3038 engine->hw_id = 1;
58fa3835 3039
e2f80391 3040 engine->write_tail = ring_write_tail;
c033666a 3041 if (INTEL_GEN(dev_priv) >= 6) {
e2f80391 3042 engine->mmio_base = GEN6_BSD_RING_BASE;
0fd2c201 3043 /* gen6 bsd needs a special wa for tail updates */
c033666a 3044 if (IS_GEN6(dev_priv))
e2f80391
TU
3045 engine->write_tail = gen6_bsd_ring_write_tail;
3046 engine->flush = gen6_bsd_ring_flush;
3047 engine->add_request = gen6_add_request;
c04e0f3b
CW
3048 engine->irq_seqno_barrier = gen6_seqno_barrier;
3049 engine->get_seqno = ring_get_seqno;
e2f80391 3050 engine->set_seqno = ring_set_seqno;
c033666a 3051 if (INTEL_GEN(dev_priv) >= 8) {
e2f80391 3052 engine->irq_enable_mask =
abd58f01 3053 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
e2f80391
TU
3054 engine->irq_get = gen8_ring_get_irq;
3055 engine->irq_put = gen8_ring_put_irq;
3056 engine->dispatch_execbuffer =
1c7a0623 3057 gen8_ring_dispatch_execbuffer;
c033666a 3058 if (i915_semaphore_is_enabled(dev_priv)) {
e2f80391
TU
3059 engine->semaphore.sync_to = gen8_ring_sync;
3060 engine->semaphore.signal = gen8_xcs_signal;
3061 GEN8_RING_SEMAPHORE_INIT(engine);
707d9cf9 3062 }
abd58f01 3063 } else {
e2f80391
TU
3064 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
3065 engine->irq_get = gen6_ring_get_irq;
3066 engine->irq_put = gen6_ring_put_irq;
3067 engine->dispatch_execbuffer =
1c7a0623 3068 gen6_ring_dispatch_execbuffer;
c033666a 3069 if (i915_semaphore_is_enabled(dev_priv)) {
e2f80391
TU
3070 engine->semaphore.sync_to = gen6_ring_sync;
3071 engine->semaphore.signal = gen6_signal;
3072 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
3073 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
3074 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
3075 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
3076 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
3077 engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
3078 engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
3079 engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
3080 engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
3081 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
707d9cf9 3082 }
abd58f01 3083 }
58fa3835 3084 } else {
e2f80391
TU
3085 engine->mmio_base = BSD_RING_BASE;
3086 engine->flush = bsd_ring_flush;
3087 engine->add_request = i9xx_add_request;
3088 engine->get_seqno = ring_get_seqno;
3089 engine->set_seqno = ring_set_seqno;
c033666a 3090 if (IS_GEN5(dev_priv)) {
e2f80391
TU
3091 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
3092 engine->irq_get = gen5_ring_get_irq;
3093 engine->irq_put = gen5_ring_put_irq;
e48d8634 3094 } else {
e2f80391
TU
3095 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
3096 engine->irq_get = i9xx_ring_get_irq;
3097 engine->irq_put = i9xx_ring_put_irq;
e48d8634 3098 }
e2f80391 3099 engine->dispatch_execbuffer = i965_dispatch_execbuffer;
58fa3835 3100 }
e2f80391 3101 engine->init_hw = init_ring_common;
58fa3835 3102
e2f80391 3103 return intel_init_ring_buffer(dev, engine);
5c1143bb 3104}
549f7365 3105
845f74a7 3106/**
62659920 3107 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
845f74a7
ZY
3108 */
3109int intel_init_bsd2_ring_buffer(struct drm_device *dev)
3110{
3111 struct drm_i915_private *dev_priv = dev->dev_private;
4a570db5 3112 struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
e2f80391
TU
3113
3114 engine->name = "bsd2 ring";
3115 engine->id = VCS2;
3116 engine->exec_id = I915_EXEC_BSD;
215a7e32 3117 engine->hw_id = 4;
e2f80391
TU
3118
3119 engine->write_tail = ring_write_tail;
3120 engine->mmio_base = GEN8_BSD2_RING_BASE;
3121 engine->flush = gen6_bsd_ring_flush;
3122 engine->add_request = gen6_add_request;
c04e0f3b
CW
3123 engine->irq_seqno_barrier = gen6_seqno_barrier;
3124 engine->get_seqno = ring_get_seqno;
e2f80391
TU
3125 engine->set_seqno = ring_set_seqno;
3126 engine->irq_enable_mask =
845f74a7 3127 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
e2f80391
TU
3128 engine->irq_get = gen8_ring_get_irq;
3129 engine->irq_put = gen8_ring_put_irq;
3130 engine->dispatch_execbuffer =
845f74a7 3131 gen8_ring_dispatch_execbuffer;
c033666a 3132 if (i915_semaphore_is_enabled(dev_priv)) {
e2f80391
TU
3133 engine->semaphore.sync_to = gen8_ring_sync;
3134 engine->semaphore.signal = gen8_xcs_signal;
3135 GEN8_RING_SEMAPHORE_INIT(engine);
3e78998a 3136 }
e2f80391 3137 engine->init_hw = init_ring_common;
845f74a7 3138
e2f80391 3139 return intel_init_ring_buffer(dev, engine);
845f74a7
ZY
3140}
3141
549f7365
CW
3142int intel_init_blt_ring_buffer(struct drm_device *dev)
3143{
4640c4ff 3144 struct drm_i915_private *dev_priv = dev->dev_private;
4a570db5 3145 struct intel_engine_cs *engine = &dev_priv->engine[BCS];
e2f80391
TU
3146
3147 engine->name = "blitter ring";
3148 engine->id = BCS;
3149 engine->exec_id = I915_EXEC_BLT;
215a7e32 3150 engine->hw_id = 2;
e2f80391
TU
3151
3152 engine->mmio_base = BLT_RING_BASE;
3153 engine->write_tail = ring_write_tail;
3154 engine->flush = gen6_ring_flush;
3155 engine->add_request = gen6_add_request;
c04e0f3b
CW
3156 engine->irq_seqno_barrier = gen6_seqno_barrier;
3157 engine->get_seqno = ring_get_seqno;
e2f80391 3158 engine->set_seqno = ring_set_seqno;
c033666a 3159 if (INTEL_GEN(dev_priv) >= 8) {
e2f80391 3160 engine->irq_enable_mask =
abd58f01 3161 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
e2f80391
TU
3162 engine->irq_get = gen8_ring_get_irq;
3163 engine->irq_put = gen8_ring_put_irq;
3164 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
c033666a 3165 if (i915_semaphore_is_enabled(dev_priv)) {
e2f80391
TU
3166 engine->semaphore.sync_to = gen8_ring_sync;
3167 engine->semaphore.signal = gen8_xcs_signal;
3168 GEN8_RING_SEMAPHORE_INIT(engine);
707d9cf9 3169 }
abd58f01 3170 } else {
e2f80391
TU
3171 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
3172 engine->irq_get = gen6_ring_get_irq;
3173 engine->irq_put = gen6_ring_put_irq;
3174 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
c033666a 3175 if (i915_semaphore_is_enabled(dev_priv)) {
e2f80391
TU
3176 engine->semaphore.signal = gen6_signal;
3177 engine->semaphore.sync_to = gen6_ring_sync;
707d9cf9
BW
3178 /*
3179 * The current semaphore is only applied on pre-gen8
3180 * platform. And there is no VCS2 ring on the pre-gen8
3181 * platform. So the semaphore between BCS and VCS2 is
3182 * initialized as INVALID. Gen8 will initialize the
3183 * sema between BCS and VCS2 later.
3184 */
e2f80391
TU
3185 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
3186 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
3187 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
3188 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
3189 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
3190 engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
3191 engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
3192 engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
3193 engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
3194 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
707d9cf9 3195 }
abd58f01 3196 }
e2f80391 3197 engine->init_hw = init_ring_common;
549f7365 3198
e2f80391 3199 return intel_init_ring_buffer(dev, engine);
549f7365 3200}
a7b9761d 3201
9a8a2213
BW
3202int intel_init_vebox_ring_buffer(struct drm_device *dev)
3203{
4640c4ff 3204 struct drm_i915_private *dev_priv = dev->dev_private;
4a570db5 3205 struct intel_engine_cs *engine = &dev_priv->engine[VECS];
9a8a2213 3206
e2f80391
TU
3207 engine->name = "video enhancement ring";
3208 engine->id = VECS;
3209 engine->exec_id = I915_EXEC_VEBOX;
215a7e32 3210 engine->hw_id = 3;
9a8a2213 3211
e2f80391
TU
3212 engine->mmio_base = VEBOX_RING_BASE;
3213 engine->write_tail = ring_write_tail;
3214 engine->flush = gen6_ring_flush;
3215 engine->add_request = gen6_add_request;
c04e0f3b
CW
3216 engine->irq_seqno_barrier = gen6_seqno_barrier;
3217 engine->get_seqno = ring_get_seqno;
e2f80391 3218 engine->set_seqno = ring_set_seqno;
abd58f01 3219
c033666a 3220 if (INTEL_GEN(dev_priv) >= 8) {
e2f80391 3221 engine->irq_enable_mask =
40c499f9 3222 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
e2f80391
TU
3223 engine->irq_get = gen8_ring_get_irq;
3224 engine->irq_put = gen8_ring_put_irq;
3225 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
c033666a 3226 if (i915_semaphore_is_enabled(dev_priv)) {
e2f80391
TU
3227 engine->semaphore.sync_to = gen8_ring_sync;
3228 engine->semaphore.signal = gen8_xcs_signal;
3229 GEN8_RING_SEMAPHORE_INIT(engine);
707d9cf9 3230 }
abd58f01 3231 } else {
e2f80391
TU
3232 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
3233 engine->irq_get = hsw_vebox_get_irq;
3234 engine->irq_put = hsw_vebox_put_irq;
3235 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
c033666a 3236 if (i915_semaphore_is_enabled(dev_priv)) {
e2f80391
TU
3237 engine->semaphore.sync_to = gen6_ring_sync;
3238 engine->semaphore.signal = gen6_signal;
3239 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
3240 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
3241 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
3242 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
3243 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
3244 engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
3245 engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
3246 engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
3247 engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
3248 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
707d9cf9 3249 }
abd58f01 3250 }
e2f80391 3251 engine->init_hw = init_ring_common;
9a8a2213 3252
e2f80391 3253 return intel_init_ring_buffer(dev, engine);
9a8a2213
BW
3254}
3255
a7b9761d 3256int
4866d729 3257intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
a7b9761d 3258{
4a570db5 3259 struct intel_engine_cs *engine = req->engine;
a7b9761d
CW
3260 int ret;
3261
e2f80391 3262 if (!engine->gpu_caches_dirty)
a7b9761d
CW
3263 return 0;
3264
e2f80391 3265 ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
a7b9761d
CW
3266 if (ret)
3267 return ret;
3268
a84c3ae1 3269 trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
a7b9761d 3270
e2f80391 3271 engine->gpu_caches_dirty = false;
a7b9761d
CW
3272 return 0;
3273}
3274
3275int
2f20055d 3276intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
a7b9761d 3277{
4a570db5 3278 struct intel_engine_cs *engine = req->engine;
a7b9761d
CW
3279 uint32_t flush_domains;
3280 int ret;
3281
3282 flush_domains = 0;
e2f80391 3283 if (engine->gpu_caches_dirty)
a7b9761d
CW
3284 flush_domains = I915_GEM_GPU_DOMAINS;
3285
e2f80391 3286 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
a7b9761d
CW
3287 if (ret)
3288 return ret;
3289
a84c3ae1 3290 trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
a7b9761d 3291
e2f80391 3292 engine->gpu_caches_dirty = false;
a7b9761d
CW
3293 return 0;
3294}
e3efda49
CW
3295
3296void
117897f4 3297intel_stop_engine(struct intel_engine_cs *engine)
e3efda49
CW
3298{
3299 int ret;
3300
117897f4 3301 if (!intel_engine_initialized(engine))
e3efda49
CW
3302 return;
3303
666796da 3304 ret = intel_engine_idle(engine);
f4457ae7 3305 if (ret)
e3efda49 3306 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
0bc40be8 3307 engine->name, ret);
e3efda49 3308
0bc40be8 3309 stop_ring(engine);
e3efda49 3310}
This page took 0.716434 seconds and 5 git commands to generate.