drm/i915: enable rc6 on ilk again
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
CommitLineData
62fdfeaf
EA
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
62fdfeaf 32#include "i915_drv.h"
8187a2b7 33#include "i915_drm.h"
62fdfeaf 34#include "i915_trace.h"
881f47b6 35#include "intel_drv.h"
62fdfeaf 36
8d315287
JB
37/*
38 * 965+ support PIPE_CONTROL commands, which provide finer grained control
39 * over cache flushing.
40 */
41struct pipe_control {
42 struct drm_i915_gem_object *obj;
43 volatile u32 *cpu_page;
44 u32 gtt_offset;
45};
46
c7dca47b
CW
47static inline int ring_space(struct intel_ring_buffer *ring)
48{
49 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
50 if (space < 0)
51 space += ring->size;
52 return space;
53}
54
b72f3acb 55static int
46f0f8d1
CW
56gen2_render_ring_flush(struct intel_ring_buffer *ring,
57 u32 invalidate_domains,
58 u32 flush_domains)
59{
60 u32 cmd;
61 int ret;
62
63 cmd = MI_FLUSH;
31b14c9f 64 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
46f0f8d1
CW
65 cmd |= MI_NO_WRITE_FLUSH;
66
67 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
68 cmd |= MI_READ_FLUSH;
69
70 ret = intel_ring_begin(ring, 2);
71 if (ret)
72 return ret;
73
74 intel_ring_emit(ring, cmd);
75 intel_ring_emit(ring, MI_NOOP);
76 intel_ring_advance(ring);
77
78 return 0;
79}
80
81static int
82gen4_render_ring_flush(struct intel_ring_buffer *ring,
83 u32 invalidate_domains,
84 u32 flush_domains)
62fdfeaf 85{
78501eac 86 struct drm_device *dev = ring->dev;
6f392d54 87 u32 cmd;
b72f3acb 88 int ret;
6f392d54 89
36d527de
CW
90 /*
91 * read/write caches:
92 *
93 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
94 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
95 * also flushed at 2d versus 3d pipeline switches.
96 *
97 * read-only caches:
98 *
99 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
100 * MI_READ_FLUSH is set, and is always flushed on 965.
101 *
102 * I915_GEM_DOMAIN_COMMAND may not exist?
103 *
104 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
105 * invalidated when MI_EXE_FLUSH is set.
106 *
107 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
108 * invalidated with every MI_FLUSH.
109 *
110 * TLBs:
111 *
112 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
113 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
114 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
115 * are flushed at any MI_FLUSH.
116 */
117
118 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
46f0f8d1 119 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
36d527de 120 cmd &= ~MI_NO_WRITE_FLUSH;
36d527de
CW
121 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
122 cmd |= MI_EXE_FLUSH;
62fdfeaf 123
36d527de
CW
124 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
125 (IS_G4X(dev) || IS_GEN5(dev)))
126 cmd |= MI_INVALIDATE_ISP;
70eac33e 127
36d527de
CW
128 ret = intel_ring_begin(ring, 2);
129 if (ret)
130 return ret;
b72f3acb 131
36d527de
CW
132 intel_ring_emit(ring, cmd);
133 intel_ring_emit(ring, MI_NOOP);
134 intel_ring_advance(ring);
b72f3acb
CW
135
136 return 0;
8187a2b7
ZN
137}
138
8d315287
JB
139/**
140 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
141 * implementing two workarounds on gen6. From section 1.4.7.1
142 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
143 *
144 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
145 * produced by non-pipelined state commands), software needs to first
146 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
147 * 0.
148 *
149 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
150 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
151 *
152 * And the workaround for these two requires this workaround first:
153 *
154 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
155 * BEFORE the pipe-control with a post-sync op and no write-cache
156 * flushes.
157 *
158 * And this last workaround is tricky because of the requirements on
159 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
160 * volume 2 part 1:
161 *
162 * "1 of the following must also be set:
163 * - Render Target Cache Flush Enable ([12] of DW1)
164 * - Depth Cache Flush Enable ([0] of DW1)
165 * - Stall at Pixel Scoreboard ([1] of DW1)
166 * - Depth Stall ([13] of DW1)
167 * - Post-Sync Operation ([13] of DW1)
168 * - Notify Enable ([8] of DW1)"
169 *
170 * The cache flushes require the workaround flush that triggered this
171 * one, so we can't use it. Depth stall would trigger the same.
172 * Post-sync nonzero is what triggered this second workaround, so we
173 * can't use that one either. Notify enable is IRQs, which aren't
174 * really our business. That leaves only stall at scoreboard.
175 */
176static int
177intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
178{
179 struct pipe_control *pc = ring->private;
180 u32 scratch_addr = pc->gtt_offset + 128;
181 int ret;
182
183
184 ret = intel_ring_begin(ring, 6);
185 if (ret)
186 return ret;
187
188 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
189 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
190 PIPE_CONTROL_STALL_AT_SCOREBOARD);
191 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
192 intel_ring_emit(ring, 0); /* low dword */
193 intel_ring_emit(ring, 0); /* high dword */
194 intel_ring_emit(ring, MI_NOOP);
195 intel_ring_advance(ring);
196
197 ret = intel_ring_begin(ring, 6);
198 if (ret)
199 return ret;
200
201 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
202 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
203 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
204 intel_ring_emit(ring, 0);
205 intel_ring_emit(ring, 0);
206 intel_ring_emit(ring, MI_NOOP);
207 intel_ring_advance(ring);
208
209 return 0;
210}
211
212static int
213gen6_render_ring_flush(struct intel_ring_buffer *ring,
214 u32 invalidate_domains, u32 flush_domains)
215{
216 u32 flags = 0;
8d315287
JB
217 int ret;
218
8d315287
JB
219 /* Just flush everything. Experiments have shown that reducing the
220 * number of bits based on the write domains has little performance
221 * impact.
222 */
223 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
cc0f6398 224 flags |= PIPE_CONTROL_TLB_INVALIDATE;
8d315287
JB
225 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
226 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
227 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
228 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
229 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
230 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
97f209bc
DV
231 /*
232 * Ensure that any following seqno writes only happen when the render
233 * cache is indeed flushed (but only if the caller actually wants that).
234 */
235 if (flush_domains)
236 flags |= PIPE_CONTROL_CS_STALL;
8d315287 237
6c6cf5aa 238 ret = intel_ring_begin(ring, 4);
8d315287
JB
239 if (ret)
240 return ret;
241
6c6cf5aa 242 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
8d315287 243 intel_ring_emit(ring, flags);
6c6cf5aa
CW
244 intel_ring_emit(ring, 0);
245 intel_ring_emit(ring, 0);
8d315287
JB
246 intel_ring_advance(ring);
247
248 return 0;
249}
250
6c6cf5aa
CW
251static int
252gen6_render_ring_flush__wa(struct intel_ring_buffer *ring,
253 u32 invalidate_domains, u32 flush_domains)
254{
255 int ret;
256
257 /* Force SNB workarounds for PIPE_CONTROL flushes */
258 ret = intel_emit_post_sync_nonzero_flush(ring);
259 if (ret)
260 return ret;
261
262 return gen6_render_ring_flush(ring, invalidate_domains, flush_domains);
263}
264
78501eac 265static void ring_write_tail(struct intel_ring_buffer *ring,
297b0c5b 266 u32 value)
d46eefa2 267{
78501eac 268 drm_i915_private_t *dev_priv = ring->dev->dev_private;
297b0c5b 269 I915_WRITE_TAIL(ring, value);
d46eefa2
XH
270}
271
78501eac 272u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
8187a2b7 273{
78501eac
CW
274 drm_i915_private_t *dev_priv = ring->dev->dev_private;
275 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
3d281d8c 276 RING_ACTHD(ring->mmio_base) : ACTHD;
8187a2b7
ZN
277
278 return I915_READ(acthd_reg);
279}
280
78501eac 281static int init_ring_common(struct intel_ring_buffer *ring)
8187a2b7 282{
b7884eb4
DV
283 struct drm_device *dev = ring->dev;
284 drm_i915_private_t *dev_priv = dev->dev_private;
05394f39 285 struct drm_i915_gem_object *obj = ring->obj;
b7884eb4 286 int ret = 0;
8187a2b7 287 u32 head;
8187a2b7 288
b7884eb4
DV
289 if (HAS_FORCE_WAKE(dev))
290 gen6_gt_force_wake_get(dev_priv);
291
8187a2b7 292 /* Stop the ring if it's running. */
7f2ab699 293 I915_WRITE_CTL(ring, 0);
570ef608 294 I915_WRITE_HEAD(ring, 0);
78501eac 295 ring->write_tail(ring, 0);
8187a2b7
ZN
296
297 /* Initialize the ring. */
05394f39 298 I915_WRITE_START(ring, obj->gtt_offset);
570ef608 299 head = I915_READ_HEAD(ring) & HEAD_ADDR;
8187a2b7
ZN
300
301 /* G45 ring initialization fails to reset head to zero */
302 if (head != 0) {
6fd0d56e
CW
303 DRM_DEBUG_KMS("%s head not reset to zero "
304 "ctl %08x head %08x tail %08x start %08x\n",
305 ring->name,
306 I915_READ_CTL(ring),
307 I915_READ_HEAD(ring),
308 I915_READ_TAIL(ring),
309 I915_READ_START(ring));
8187a2b7 310
570ef608 311 I915_WRITE_HEAD(ring, 0);
8187a2b7 312
6fd0d56e
CW
313 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
314 DRM_ERROR("failed to set %s head to zero "
315 "ctl %08x head %08x tail %08x start %08x\n",
316 ring->name,
317 I915_READ_CTL(ring),
318 I915_READ_HEAD(ring),
319 I915_READ_TAIL(ring),
320 I915_READ_START(ring));
321 }
8187a2b7
ZN
322 }
323
7f2ab699 324 I915_WRITE_CTL(ring,
ae69b42a 325 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
5d031e5b 326 | RING_VALID);
8187a2b7 327
8187a2b7 328 /* If the head is still not zero, the ring is dead */
f01db988
SP
329 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
330 I915_READ_START(ring) == obj->gtt_offset &&
331 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
e74cfed5
CW
332 DRM_ERROR("%s initialization failed "
333 "ctl %08x head %08x tail %08x start %08x\n",
334 ring->name,
335 I915_READ_CTL(ring),
336 I915_READ_HEAD(ring),
337 I915_READ_TAIL(ring),
338 I915_READ_START(ring));
b7884eb4
DV
339 ret = -EIO;
340 goto out;
8187a2b7
ZN
341 }
342
78501eac
CW
343 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
344 i915_kernel_lost_context(ring->dev);
8187a2b7 345 else {
c7dca47b 346 ring->head = I915_READ_HEAD(ring);
870e86dd 347 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
c7dca47b 348 ring->space = ring_space(ring);
c3b20037 349 ring->last_retired_head = -1;
8187a2b7 350 }
1ec14ad3 351
b7884eb4
DV
352out:
353 if (HAS_FORCE_WAKE(dev))
354 gen6_gt_force_wake_put(dev_priv);
355
356 return ret;
8187a2b7
ZN
357}
358
c6df541c
CW
359static int
360init_pipe_control(struct intel_ring_buffer *ring)
361{
362 struct pipe_control *pc;
363 struct drm_i915_gem_object *obj;
364 int ret;
365
366 if (ring->private)
367 return 0;
368
369 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
370 if (!pc)
371 return -ENOMEM;
372
373 obj = i915_gem_alloc_object(ring->dev, 4096);
374 if (obj == NULL) {
375 DRM_ERROR("Failed to allocate seqno page\n");
376 ret = -ENOMEM;
377 goto err;
378 }
e4ffd173
CW
379
380 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
c6df541c
CW
381
382 ret = i915_gem_object_pin(obj, 4096, true);
383 if (ret)
384 goto err_unref;
385
386 pc->gtt_offset = obj->gtt_offset;
387 pc->cpu_page = kmap(obj->pages[0]);
388 if (pc->cpu_page == NULL)
389 goto err_unpin;
390
391 pc->obj = obj;
392 ring->private = pc;
393 return 0;
394
395err_unpin:
396 i915_gem_object_unpin(obj);
397err_unref:
398 drm_gem_object_unreference(&obj->base);
399err:
400 kfree(pc);
401 return ret;
402}
403
404static void
405cleanup_pipe_control(struct intel_ring_buffer *ring)
406{
407 struct pipe_control *pc = ring->private;
408 struct drm_i915_gem_object *obj;
409
410 if (!ring->private)
411 return;
412
413 obj = pc->obj;
414 kunmap(obj->pages[0]);
415 i915_gem_object_unpin(obj);
416 drm_gem_object_unreference(&obj->base);
417
418 kfree(pc);
419 ring->private = NULL;
420}
421
78501eac 422static int init_render_ring(struct intel_ring_buffer *ring)
8187a2b7 423{
78501eac 424 struct drm_device *dev = ring->dev;
1ec14ad3 425 struct drm_i915_private *dev_priv = dev->dev_private;
78501eac 426 int ret = init_ring_common(ring);
a69ffdbf 427
a6c45cf0 428 if (INTEL_INFO(dev)->gen > 3) {
6b26c86d 429 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
b095cd0a
JB
430 if (IS_GEN7(dev))
431 I915_WRITE(GFX_MODE_GEN7,
6b26c86d
DV
432 _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
433 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
8187a2b7 434 }
78501eac 435
8d315287 436 if (INTEL_INFO(dev)->gen >= 5) {
c6df541c
CW
437 ret = init_pipe_control(ring);
438 if (ret)
439 return ret;
440 }
441
5e13a0c5 442 if (IS_GEN6(dev)) {
3a69ddd6
KG
443 /* From the Sandybridge PRM, volume 1 part 3, page 24:
444 * "If this bit is set, STCunit will have LRA as replacement
445 * policy. [...] This bit must be reset. LRA replacement
446 * policy is not supported."
447 */
448 I915_WRITE(CACHE_MODE_0,
5e13a0c5 449 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
12b0286f
BW
450
451 /* This is not explicitly set for GEN6, so read the register.
452 * see intel_ring_mi_set_context() for why we care.
453 * TODO: consider explicitly setting the bit for GEN5
454 */
455 ring->itlb_before_ctx_switch =
456 !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
84f9f938
BW
457 }
458
6b26c86d
DV
459 if (INTEL_INFO(dev)->gen >= 6)
460 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
84f9f938 461
e1ef7cc2 462 if (HAS_L3_GPU_CACHE(dev))
15b9f80e
BW
463 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
464
8187a2b7
ZN
465 return ret;
466}
467
c6df541c
CW
468static void render_ring_cleanup(struct intel_ring_buffer *ring)
469{
470 if (!ring->private)
471 return;
472
473 cleanup_pipe_control(ring);
474}
475
1ec14ad3 476static void
c8c99b0f
BW
477update_mboxes(struct intel_ring_buffer *ring,
478 u32 seqno,
479 u32 mmio_offset)
1ec14ad3 480{
c8c99b0f
BW
481 intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
482 MI_SEMAPHORE_GLOBAL_GTT |
483 MI_SEMAPHORE_REGISTER |
484 MI_SEMAPHORE_UPDATE);
1ec14ad3 485 intel_ring_emit(ring, seqno);
c8c99b0f 486 intel_ring_emit(ring, mmio_offset);
1ec14ad3
CW
487}
488
c8c99b0f
BW
489/**
490 * gen6_add_request - Update the semaphore mailbox registers
491 *
492 * @ring - ring that is adding a request
493 * @seqno - return seqno stuck into the ring
494 *
495 * Update the mailbox registers in the *other* rings with the current seqno.
496 * This acts like a signal in the canonical semaphore.
497 */
1ec14ad3
CW
498static int
499gen6_add_request(struct intel_ring_buffer *ring,
c8c99b0f 500 u32 *seqno)
1ec14ad3 501{
c8c99b0f
BW
502 u32 mbox1_reg;
503 u32 mbox2_reg;
1ec14ad3
CW
504 int ret;
505
506 ret = intel_ring_begin(ring, 10);
507 if (ret)
508 return ret;
509
c8c99b0f
BW
510 mbox1_reg = ring->signal_mbox[0];
511 mbox2_reg = ring->signal_mbox[1];
1ec14ad3 512
53d227f2 513 *seqno = i915_gem_next_request_seqno(ring);
c8c99b0f
BW
514
515 update_mboxes(ring, *seqno, mbox1_reg);
516 update_mboxes(ring, *seqno, mbox2_reg);
1ec14ad3
CW
517 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
518 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
c8c99b0f 519 intel_ring_emit(ring, *seqno);
1ec14ad3
CW
520 intel_ring_emit(ring, MI_USER_INTERRUPT);
521 intel_ring_advance(ring);
522
1ec14ad3
CW
523 return 0;
524}
525
c8c99b0f
BW
526/**
527 * intel_ring_sync - sync the waiter to the signaller on seqno
528 *
529 * @waiter - ring that is waiting
530 * @signaller - ring which has, or will signal
531 * @seqno - seqno which the waiter will block on
532 */
533static int
686cb5f9
DV
534gen6_ring_sync(struct intel_ring_buffer *waiter,
535 struct intel_ring_buffer *signaller,
536 u32 seqno)
1ec14ad3
CW
537{
538 int ret;
c8c99b0f
BW
539 u32 dw1 = MI_SEMAPHORE_MBOX |
540 MI_SEMAPHORE_COMPARE |
541 MI_SEMAPHORE_REGISTER;
1ec14ad3 542
1500f7ea
BW
543 /* Throughout all of the GEM code, seqno passed implies our current
544 * seqno is >= the last seqno executed. However for hardware the
545 * comparison is strictly greater than.
546 */
547 seqno -= 1;
548
686cb5f9
DV
549 WARN_ON(signaller->semaphore_register[waiter->id] ==
550 MI_SEMAPHORE_SYNC_INVALID);
551
c8c99b0f 552 ret = intel_ring_begin(waiter, 4);
1ec14ad3
CW
553 if (ret)
554 return ret;
555
686cb5f9
DV
556 intel_ring_emit(waiter,
557 dw1 | signaller->semaphore_register[waiter->id]);
c8c99b0f
BW
558 intel_ring_emit(waiter, seqno);
559 intel_ring_emit(waiter, 0);
560 intel_ring_emit(waiter, MI_NOOP);
561 intel_ring_advance(waiter);
1ec14ad3
CW
562
563 return 0;
564}
565
c6df541c
CW
566#define PIPE_CONTROL_FLUSH(ring__, addr__) \
567do { \
fcbc34e4
KG
568 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
569 PIPE_CONTROL_DEPTH_STALL); \
c6df541c
CW
570 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
571 intel_ring_emit(ring__, 0); \
572 intel_ring_emit(ring__, 0); \
573} while (0)
574
575static int
576pc_render_add_request(struct intel_ring_buffer *ring,
577 u32 *result)
578{
53d227f2 579 u32 seqno = i915_gem_next_request_seqno(ring);
c6df541c
CW
580 struct pipe_control *pc = ring->private;
581 u32 scratch_addr = pc->gtt_offset + 128;
582 int ret;
583
584 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
585 * incoherent with writes to memory, i.e. completely fubar,
586 * so we need to use PIPE_NOTIFY instead.
587 *
588 * However, we also need to workaround the qword write
589 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
590 * memory before requesting an interrupt.
591 */
592 ret = intel_ring_begin(ring, 32);
593 if (ret)
594 return ret;
595
fcbc34e4 596 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
9d971b37
KG
597 PIPE_CONTROL_WRITE_FLUSH |
598 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
c6df541c
CW
599 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
600 intel_ring_emit(ring, seqno);
601 intel_ring_emit(ring, 0);
602 PIPE_CONTROL_FLUSH(ring, scratch_addr);
603 scratch_addr += 128; /* write to separate cachelines */
604 PIPE_CONTROL_FLUSH(ring, scratch_addr);
605 scratch_addr += 128;
606 PIPE_CONTROL_FLUSH(ring, scratch_addr);
607 scratch_addr += 128;
608 PIPE_CONTROL_FLUSH(ring, scratch_addr);
609 scratch_addr += 128;
610 PIPE_CONTROL_FLUSH(ring, scratch_addr);
611 scratch_addr += 128;
612 PIPE_CONTROL_FLUSH(ring, scratch_addr);
a71d8d94 613
fcbc34e4 614 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
9d971b37
KG
615 PIPE_CONTROL_WRITE_FLUSH |
616 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
c6df541c
CW
617 PIPE_CONTROL_NOTIFY);
618 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
619 intel_ring_emit(ring, seqno);
620 intel_ring_emit(ring, 0);
621 intel_ring_advance(ring);
622
623 *result = seqno;
624 return 0;
625}
626
4cd53c0c
DV
627static u32
628gen6_ring_get_seqno(struct intel_ring_buffer *ring)
629{
630 struct drm_device *dev = ring->dev;
631
632 /* Workaround to force correct ordering between irq and seqno writes on
633 * ivb (and maybe also on snb) by reading from a CS register (like
634 * ACTHD) before reading the status page. */
1c7eaac7 635 if (IS_GEN6(dev) || IS_GEN7(dev))
4cd53c0c
DV
636 intel_ring_get_active_head(ring);
637 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
638}
639
8187a2b7 640static u32
1ec14ad3 641ring_get_seqno(struct intel_ring_buffer *ring)
8187a2b7 642{
1ec14ad3
CW
643 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
644}
645
c6df541c
CW
646static u32
647pc_render_get_seqno(struct intel_ring_buffer *ring)
648{
649 struct pipe_control *pc = ring->private;
650 return pc->cpu_page[0];
651}
652
e48d8634
DV
653static bool
654gen5_ring_get_irq(struct intel_ring_buffer *ring)
655{
656 struct drm_device *dev = ring->dev;
657 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 658 unsigned long flags;
e48d8634
DV
659
660 if (!dev->irq_enabled)
661 return false;
662
7338aefa 663 spin_lock_irqsave(&dev_priv->irq_lock, flags);
f637fde4
DV
664 if (ring->irq_refcount++ == 0) {
665 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
666 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
667 POSTING_READ(GTIMR);
668 }
7338aefa 669 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
e48d8634
DV
670
671 return true;
672}
673
674static void
675gen5_ring_put_irq(struct intel_ring_buffer *ring)
676{
677 struct drm_device *dev = ring->dev;
678 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 679 unsigned long flags;
e48d8634 680
7338aefa 681 spin_lock_irqsave(&dev_priv->irq_lock, flags);
f637fde4
DV
682 if (--ring->irq_refcount == 0) {
683 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
684 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
685 POSTING_READ(GTIMR);
686 }
7338aefa 687 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
e48d8634
DV
688}
689
b13c2b96 690static bool
e3670319 691i9xx_ring_get_irq(struct intel_ring_buffer *ring)
62fdfeaf 692{
78501eac 693 struct drm_device *dev = ring->dev;
01a03331 694 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 695 unsigned long flags;
62fdfeaf 696
b13c2b96
CW
697 if (!dev->irq_enabled)
698 return false;
699
7338aefa 700 spin_lock_irqsave(&dev_priv->irq_lock, flags);
f637fde4
DV
701 if (ring->irq_refcount++ == 0) {
702 dev_priv->irq_mask &= ~ring->irq_enable_mask;
703 I915_WRITE(IMR, dev_priv->irq_mask);
704 POSTING_READ(IMR);
705 }
7338aefa 706 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
b13c2b96
CW
707
708 return true;
62fdfeaf
EA
709}
710
8187a2b7 711static void
e3670319 712i9xx_ring_put_irq(struct intel_ring_buffer *ring)
62fdfeaf 713{
78501eac 714 struct drm_device *dev = ring->dev;
01a03331 715 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 716 unsigned long flags;
62fdfeaf 717
7338aefa 718 spin_lock_irqsave(&dev_priv->irq_lock, flags);
f637fde4
DV
719 if (--ring->irq_refcount == 0) {
720 dev_priv->irq_mask |= ring->irq_enable_mask;
721 I915_WRITE(IMR, dev_priv->irq_mask);
722 POSTING_READ(IMR);
723 }
7338aefa 724 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
62fdfeaf
EA
725}
726
c2798b19
CW
727static bool
728i8xx_ring_get_irq(struct intel_ring_buffer *ring)
729{
730 struct drm_device *dev = ring->dev;
731 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 732 unsigned long flags;
c2798b19
CW
733
734 if (!dev->irq_enabled)
735 return false;
736
7338aefa 737 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c2798b19
CW
738 if (ring->irq_refcount++ == 0) {
739 dev_priv->irq_mask &= ~ring->irq_enable_mask;
740 I915_WRITE16(IMR, dev_priv->irq_mask);
741 POSTING_READ16(IMR);
742 }
7338aefa 743 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
c2798b19
CW
744
745 return true;
746}
747
748static void
749i8xx_ring_put_irq(struct intel_ring_buffer *ring)
750{
751 struct drm_device *dev = ring->dev;
752 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 753 unsigned long flags;
c2798b19 754
7338aefa 755 spin_lock_irqsave(&dev_priv->irq_lock, flags);
c2798b19
CW
756 if (--ring->irq_refcount == 0) {
757 dev_priv->irq_mask |= ring->irq_enable_mask;
758 I915_WRITE16(IMR, dev_priv->irq_mask);
759 POSTING_READ16(IMR);
760 }
7338aefa 761 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
c2798b19
CW
762}
763
78501eac 764void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
8187a2b7 765{
4593010b 766 struct drm_device *dev = ring->dev;
78501eac 767 drm_i915_private_t *dev_priv = ring->dev->dev_private;
4593010b
EA
768 u32 mmio = 0;
769
770 /* The ring status page addresses are no longer next to the rest of
771 * the ring registers as of gen7.
772 */
773 if (IS_GEN7(dev)) {
774 switch (ring->id) {
96154f2f 775 case RCS:
4593010b
EA
776 mmio = RENDER_HWS_PGA_GEN7;
777 break;
96154f2f 778 case BCS:
4593010b
EA
779 mmio = BLT_HWS_PGA_GEN7;
780 break;
96154f2f 781 case VCS:
4593010b
EA
782 mmio = BSD_HWS_PGA_GEN7;
783 break;
784 }
785 } else if (IS_GEN6(ring->dev)) {
786 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
787 } else {
788 mmio = RING_HWS_PGA(ring->mmio_base);
789 }
790
78501eac
CW
791 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
792 POSTING_READ(mmio);
8187a2b7
ZN
793}
794
b72f3acb 795static int
78501eac
CW
796bsd_ring_flush(struct intel_ring_buffer *ring,
797 u32 invalidate_domains,
798 u32 flush_domains)
d1b851fc 799{
b72f3acb
CW
800 int ret;
801
b72f3acb
CW
802 ret = intel_ring_begin(ring, 2);
803 if (ret)
804 return ret;
805
806 intel_ring_emit(ring, MI_FLUSH);
807 intel_ring_emit(ring, MI_NOOP);
808 intel_ring_advance(ring);
809 return 0;
d1b851fc
ZN
810}
811
3cce469c 812static int
8620a3a9 813i9xx_add_request(struct intel_ring_buffer *ring,
3cce469c 814 u32 *result)
d1b851fc
ZN
815{
816 u32 seqno;
3cce469c
CW
817 int ret;
818
819 ret = intel_ring_begin(ring, 4);
820 if (ret)
821 return ret;
6f392d54 822
53d227f2 823 seqno = i915_gem_next_request_seqno(ring);
6f392d54 824
3cce469c
CW
825 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
826 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
827 intel_ring_emit(ring, seqno);
828 intel_ring_emit(ring, MI_USER_INTERRUPT);
829 intel_ring_advance(ring);
d1b851fc 830
3cce469c
CW
831 *result = seqno;
832 return 0;
d1b851fc
ZN
833}
834
0f46832f 835static bool
25c06300 836gen6_ring_get_irq(struct intel_ring_buffer *ring)
0f46832f
CW
837{
838 struct drm_device *dev = ring->dev;
01a03331 839 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 840 unsigned long flags;
0f46832f
CW
841
842 if (!dev->irq_enabled)
843 return false;
844
4cd53c0c
DV
845 /* It looks like we need to prevent the gt from suspending while waiting
846 * for an notifiy irq, otherwise irqs seem to get lost on at least the
847 * blt/bsd rings on ivb. */
99ffa162 848 gen6_gt_force_wake_get(dev_priv);
4cd53c0c 849
7338aefa 850 spin_lock_irqsave(&dev_priv->irq_lock, flags);
01a03331 851 if (ring->irq_refcount++ == 0) {
e1ef7cc2 852 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
15b9f80e
BW
853 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
854 GEN6_RENDER_L3_PARITY_ERROR));
855 else
856 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
f637fde4
DV
857 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
858 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
859 POSTING_READ(GTIMR);
0f46832f 860 }
7338aefa 861 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
0f46832f
CW
862
863 return true;
864}
865
866static void
25c06300 867gen6_ring_put_irq(struct intel_ring_buffer *ring)
0f46832f
CW
868{
869 struct drm_device *dev = ring->dev;
01a03331 870 drm_i915_private_t *dev_priv = dev->dev_private;
7338aefa 871 unsigned long flags;
0f46832f 872
7338aefa 873 spin_lock_irqsave(&dev_priv->irq_lock, flags);
01a03331 874 if (--ring->irq_refcount == 0) {
e1ef7cc2 875 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
15b9f80e
BW
876 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
877 else
878 I915_WRITE_IMR(ring, ~0);
f637fde4
DV
879 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
880 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
881 POSTING_READ(GTIMR);
1ec14ad3 882 }
7338aefa 883 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
4cd53c0c 884
99ffa162 885 gen6_gt_force_wake_put(dev_priv);
d1b851fc
ZN
886}
887
d1b851fc 888static int
fb3256da 889i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
d1b851fc 890{
e1f99ce6 891 int ret;
78501eac 892
e1f99ce6
CW
893 ret = intel_ring_begin(ring, 2);
894 if (ret)
895 return ret;
896
78501eac 897 intel_ring_emit(ring,
65f56876
CW
898 MI_BATCH_BUFFER_START |
899 MI_BATCH_GTT |
78501eac 900 MI_BATCH_NON_SECURE_I965);
c4e7a414 901 intel_ring_emit(ring, offset);
78501eac
CW
902 intel_ring_advance(ring);
903
d1b851fc
ZN
904 return 0;
905}
906
8187a2b7 907static int
fb3256da 908i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
c4e7a414 909 u32 offset, u32 len)
62fdfeaf 910{
c4e7a414 911 int ret;
62fdfeaf 912
fb3256da
DV
913 ret = intel_ring_begin(ring, 4);
914 if (ret)
915 return ret;
62fdfeaf 916
fb3256da
DV
917 intel_ring_emit(ring, MI_BATCH_BUFFER);
918 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
919 intel_ring_emit(ring, offset + len - 8);
920 intel_ring_emit(ring, 0);
921 intel_ring_advance(ring);
e1f99ce6 922
fb3256da
DV
923 return 0;
924}
925
926static int
927i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
928 u32 offset, u32 len)
929{
930 int ret;
931
932 ret = intel_ring_begin(ring, 2);
933 if (ret)
934 return ret;
935
65f56876 936 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
fb3256da 937 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
c4e7a414 938 intel_ring_advance(ring);
62fdfeaf 939
62fdfeaf
EA
940 return 0;
941}
942
78501eac 943static void cleanup_status_page(struct intel_ring_buffer *ring)
62fdfeaf 944{
05394f39 945 struct drm_i915_gem_object *obj;
62fdfeaf 946
8187a2b7
ZN
947 obj = ring->status_page.obj;
948 if (obj == NULL)
62fdfeaf 949 return;
62fdfeaf 950
05394f39 951 kunmap(obj->pages[0]);
62fdfeaf 952 i915_gem_object_unpin(obj);
05394f39 953 drm_gem_object_unreference(&obj->base);
8187a2b7 954 ring->status_page.obj = NULL;
62fdfeaf
EA
955}
956
78501eac 957static int init_status_page(struct intel_ring_buffer *ring)
62fdfeaf 958{
78501eac 959 struct drm_device *dev = ring->dev;
05394f39 960 struct drm_i915_gem_object *obj;
62fdfeaf
EA
961 int ret;
962
62fdfeaf
EA
963 obj = i915_gem_alloc_object(dev, 4096);
964 if (obj == NULL) {
965 DRM_ERROR("Failed to allocate status page\n");
966 ret = -ENOMEM;
967 goto err;
968 }
e4ffd173
CW
969
970 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
62fdfeaf 971
75e9e915 972 ret = i915_gem_object_pin(obj, 4096, true);
62fdfeaf 973 if (ret != 0) {
62fdfeaf
EA
974 goto err_unref;
975 }
976
05394f39
CW
977 ring->status_page.gfx_addr = obj->gtt_offset;
978 ring->status_page.page_addr = kmap(obj->pages[0]);
8187a2b7 979 if (ring->status_page.page_addr == NULL) {
2e6c21ed 980 ret = -ENOMEM;
62fdfeaf
EA
981 goto err_unpin;
982 }
8187a2b7
ZN
983 ring->status_page.obj = obj;
984 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
62fdfeaf 985
78501eac 986 intel_ring_setup_status_page(ring);
8187a2b7
ZN
987 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
988 ring->name, ring->status_page.gfx_addr);
62fdfeaf
EA
989
990 return 0;
991
992err_unpin:
993 i915_gem_object_unpin(obj);
994err_unref:
05394f39 995 drm_gem_object_unreference(&obj->base);
62fdfeaf 996err:
8187a2b7 997 return ret;
62fdfeaf
EA
998}
999
c43b5634
BW
1000static int intel_init_ring_buffer(struct drm_device *dev,
1001 struct intel_ring_buffer *ring)
62fdfeaf 1002{
05394f39 1003 struct drm_i915_gem_object *obj;
dd2757f8 1004 struct drm_i915_private *dev_priv = dev->dev_private;
dd785e35
CW
1005 int ret;
1006
8187a2b7 1007 ring->dev = dev;
23bc5982
CW
1008 INIT_LIST_HEAD(&ring->active_list);
1009 INIT_LIST_HEAD(&ring->request_list);
dfc9ef2f 1010 ring->size = 32 * PAGE_SIZE;
0dc79fb2 1011
b259f673 1012 init_waitqueue_head(&ring->irq_queue);
62fdfeaf 1013
8187a2b7 1014 if (I915_NEED_GFX_HWS(dev)) {
78501eac 1015 ret = init_status_page(ring);
8187a2b7
ZN
1016 if (ret)
1017 return ret;
1018 }
62fdfeaf 1019
8187a2b7 1020 obj = i915_gem_alloc_object(dev, ring->size);
62fdfeaf
EA
1021 if (obj == NULL) {
1022 DRM_ERROR("Failed to allocate ringbuffer\n");
8187a2b7 1023 ret = -ENOMEM;
dd785e35 1024 goto err_hws;
62fdfeaf 1025 }
62fdfeaf 1026
05394f39 1027 ring->obj = obj;
8187a2b7 1028
75e9e915 1029 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
dd785e35
CW
1030 if (ret)
1031 goto err_unref;
62fdfeaf 1032
3eef8918
CW
1033 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1034 if (ret)
1035 goto err_unpin;
1036
dd2757f8
DV
1037 ring->virtual_start =
1038 ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
1039 ring->size);
4225d0f2 1040 if (ring->virtual_start == NULL) {
62fdfeaf 1041 DRM_ERROR("Failed to map ringbuffer.\n");
8187a2b7 1042 ret = -EINVAL;
dd785e35 1043 goto err_unpin;
62fdfeaf
EA
1044 }
1045
78501eac 1046 ret = ring->init(ring);
dd785e35
CW
1047 if (ret)
1048 goto err_unmap;
62fdfeaf 1049
55249baa
CW
1050 /* Workaround an erratum on the i830 which causes a hang if
1051 * the TAIL pointer points to within the last 2 cachelines
1052 * of the buffer.
1053 */
1054 ring->effective_size = ring->size;
27c1cbd0 1055 if (IS_I830(ring->dev) || IS_845G(ring->dev))
55249baa
CW
1056 ring->effective_size -= 128;
1057
c584fe47 1058 return 0;
dd785e35
CW
1059
1060err_unmap:
4225d0f2 1061 iounmap(ring->virtual_start);
dd785e35
CW
1062err_unpin:
1063 i915_gem_object_unpin(obj);
1064err_unref:
05394f39
CW
1065 drm_gem_object_unreference(&obj->base);
1066 ring->obj = NULL;
dd785e35 1067err_hws:
78501eac 1068 cleanup_status_page(ring);
8187a2b7 1069 return ret;
62fdfeaf
EA
1070}
1071
78501eac 1072void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
62fdfeaf 1073{
33626e6a
CW
1074 struct drm_i915_private *dev_priv;
1075 int ret;
1076
05394f39 1077 if (ring->obj == NULL)
62fdfeaf
EA
1078 return;
1079
33626e6a
CW
1080 /* Disable the ring buffer. The ring must be idle at this point */
1081 dev_priv = ring->dev->dev_private;
96f298aa 1082 ret = intel_wait_ring_idle(ring);
29ee3991
CW
1083 if (ret)
1084 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1085 ring->name, ret);
1086
33626e6a
CW
1087 I915_WRITE_CTL(ring, 0);
1088
4225d0f2 1089 iounmap(ring->virtual_start);
62fdfeaf 1090
05394f39
CW
1091 i915_gem_object_unpin(ring->obj);
1092 drm_gem_object_unreference(&ring->obj->base);
1093 ring->obj = NULL;
78501eac 1094
8d19215b
ZN
1095 if (ring->cleanup)
1096 ring->cleanup(ring);
1097
78501eac 1098 cleanup_status_page(ring);
62fdfeaf
EA
1099}
1100
78501eac 1101static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
62fdfeaf 1102{
4225d0f2 1103 uint32_t __iomem *virt;
55249baa 1104 int rem = ring->size - ring->tail;
62fdfeaf 1105
8187a2b7 1106 if (ring->space < rem) {
78501eac 1107 int ret = intel_wait_ring_buffer(ring, rem);
62fdfeaf
EA
1108 if (ret)
1109 return ret;
1110 }
62fdfeaf 1111
4225d0f2
DV
1112 virt = ring->virtual_start + ring->tail;
1113 rem /= 4;
1114 while (rem--)
1115 iowrite32(MI_NOOP, virt++);
62fdfeaf 1116
8187a2b7 1117 ring->tail = 0;
c7dca47b 1118 ring->space = ring_space(ring);
62fdfeaf
EA
1119
1120 return 0;
1121}
1122
a71d8d94
CW
1123static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1124{
a71d8d94
CW
1125 int ret;
1126
199b2bc2 1127 ret = i915_wait_seqno(ring, seqno);
b2da9fe5
BW
1128 if (!ret)
1129 i915_gem_retire_requests_ring(ring);
a71d8d94
CW
1130
1131 return ret;
1132}
1133
1134static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1135{
1136 struct drm_i915_gem_request *request;
1137 u32 seqno = 0;
1138 int ret;
1139
1140 i915_gem_retire_requests_ring(ring);
1141
1142 if (ring->last_retired_head != -1) {
1143 ring->head = ring->last_retired_head;
1144 ring->last_retired_head = -1;
1145 ring->space = ring_space(ring);
1146 if (ring->space >= n)
1147 return 0;
1148 }
1149
1150 list_for_each_entry(request, &ring->request_list, list) {
1151 int space;
1152
1153 if (request->tail == -1)
1154 continue;
1155
1156 space = request->tail - (ring->tail + 8);
1157 if (space < 0)
1158 space += ring->size;
1159 if (space >= n) {
1160 seqno = request->seqno;
1161 break;
1162 }
1163
1164 /* Consume this request in case we need more space than
1165 * is available and so need to prevent a race between
1166 * updating last_retired_head and direct reads of
1167 * I915_RING_HEAD. It also provides a nice sanity check.
1168 */
1169 request->tail = -1;
1170 }
1171
1172 if (seqno == 0)
1173 return -ENOSPC;
1174
1175 ret = intel_ring_wait_seqno(ring, seqno);
1176 if (ret)
1177 return ret;
1178
1179 if (WARN_ON(ring->last_retired_head == -1))
1180 return -ENOSPC;
1181
1182 ring->head = ring->last_retired_head;
1183 ring->last_retired_head = -1;
1184 ring->space = ring_space(ring);
1185 if (WARN_ON(ring->space < n))
1186 return -ENOSPC;
1187
1188 return 0;
1189}
1190
78501eac 1191int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
62fdfeaf 1192{
78501eac 1193 struct drm_device *dev = ring->dev;
cae5852d 1194 struct drm_i915_private *dev_priv = dev->dev_private;
78501eac 1195 unsigned long end;
a71d8d94 1196 int ret;
c7dca47b 1197
a71d8d94
CW
1198 ret = intel_ring_wait_request(ring, n);
1199 if (ret != -ENOSPC)
1200 return ret;
1201
db53a302 1202 trace_i915_ring_wait_begin(ring);
63ed2cb2
DV
1203 /* With GEM the hangcheck timer should kick us out of the loop,
1204 * leaving it early runs the risk of corrupting GEM state (due
1205 * to running on almost untested codepaths). But on resume
1206 * timers don't work yet, so prevent a complete hang in that
1207 * case by choosing an insanely large timeout. */
1208 end = jiffies + 60 * HZ;
e6bfaf85 1209
8187a2b7 1210 do {
c7dca47b
CW
1211 ring->head = I915_READ_HEAD(ring);
1212 ring->space = ring_space(ring);
62fdfeaf 1213 if (ring->space >= n) {
db53a302 1214 trace_i915_ring_wait_end(ring);
62fdfeaf
EA
1215 return 0;
1216 }
1217
1218 if (dev->primary->master) {
1219 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1220 if (master_priv->sarea_priv)
1221 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1222 }
d1b851fc 1223
e60a0b10 1224 msleep(1);
d6b2c790
DV
1225
1226 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
1227 if (ret)
1228 return ret;
8187a2b7 1229 } while (!time_after(jiffies, end));
db53a302 1230 trace_i915_ring_wait_end(ring);
8187a2b7
ZN
1231 return -EBUSY;
1232}
62fdfeaf 1233
e1f99ce6
CW
1234int intel_ring_begin(struct intel_ring_buffer *ring,
1235 int num_dwords)
8187a2b7 1236{
de2b9985 1237 drm_i915_private_t *dev_priv = ring->dev->dev_private;
be26a10b 1238 int n = 4*num_dwords;
e1f99ce6 1239 int ret;
78501eac 1240
de2b9985
DV
1241 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
1242 if (ret)
1243 return ret;
21dd3734 1244
55249baa 1245 if (unlikely(ring->tail + n > ring->effective_size)) {
e1f99ce6
CW
1246 ret = intel_wrap_ring_buffer(ring);
1247 if (unlikely(ret))
1248 return ret;
1249 }
78501eac 1250
e1f99ce6
CW
1251 if (unlikely(ring->space < n)) {
1252 ret = intel_wait_ring_buffer(ring, n);
1253 if (unlikely(ret))
1254 return ret;
1255 }
d97ed339
CW
1256
1257 ring->space -= n;
e1f99ce6 1258 return 0;
8187a2b7 1259}
62fdfeaf 1260
78501eac 1261void intel_ring_advance(struct intel_ring_buffer *ring)
8187a2b7 1262{
e5eb3d63
DV
1263 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1264
d97ed339 1265 ring->tail &= ring->size - 1;
e5eb3d63
DV
1266 if (dev_priv->stop_rings & intel_ring_flag(ring))
1267 return;
78501eac 1268 ring->write_tail(ring, ring->tail);
8187a2b7 1269}
62fdfeaf 1270
881f47b6 1271
78501eac 1272static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
297b0c5b 1273 u32 value)
881f47b6 1274{
0206e353 1275 drm_i915_private_t *dev_priv = ring->dev->dev_private;
881f47b6
XH
1276
1277 /* Every tail move must follow the sequence below */
12f55818
CW
1278
1279 /* Disable notification that the ring is IDLE. The GT
1280 * will then assume that it is busy and bring it out of rc6.
1281 */
0206e353 1282 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
12f55818
CW
1283 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1284
1285 /* Clear the context id. Here be magic! */
1286 I915_WRITE64(GEN6_BSD_RNCID, 0x0);
0206e353 1287
12f55818 1288 /* Wait for the ring not to be idle, i.e. for it to wake up. */
0206e353 1289 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
12f55818
CW
1290 GEN6_BSD_SLEEP_INDICATOR) == 0,
1291 50))
1292 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
0206e353 1293
12f55818 1294 /* Now that the ring is fully powered up, update the tail */
0206e353 1295 I915_WRITE_TAIL(ring, value);
12f55818
CW
1296 POSTING_READ(RING_TAIL(ring->mmio_base));
1297
1298 /* Let the ring send IDLE messages to the GT again,
1299 * and so let it sleep to conserve power when idle.
1300 */
0206e353 1301 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
12f55818 1302 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
881f47b6
XH
1303}
1304
b72f3acb 1305static int gen6_ring_flush(struct intel_ring_buffer *ring,
71a77e07 1306 u32 invalidate, u32 flush)
881f47b6 1307{
71a77e07 1308 uint32_t cmd;
b72f3acb
CW
1309 int ret;
1310
b72f3acb
CW
1311 ret = intel_ring_begin(ring, 4);
1312 if (ret)
1313 return ret;
1314
71a77e07
CW
1315 cmd = MI_FLUSH_DW;
1316 if (invalidate & I915_GEM_GPU_DOMAINS)
1317 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1318 intel_ring_emit(ring, cmd);
b72f3acb
CW
1319 intel_ring_emit(ring, 0);
1320 intel_ring_emit(ring, 0);
71a77e07 1321 intel_ring_emit(ring, MI_NOOP);
b72f3acb
CW
1322 intel_ring_advance(ring);
1323 return 0;
881f47b6
XH
1324}
1325
1326static int
78501eac 1327gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
c4e7a414 1328 u32 offset, u32 len)
881f47b6 1329{
0206e353 1330 int ret;
ab6f8e32 1331
0206e353
AJ
1332 ret = intel_ring_begin(ring, 2);
1333 if (ret)
1334 return ret;
e1f99ce6 1335
0206e353
AJ
1336 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1337 /* bit0-7 is the length on GEN6+ */
1338 intel_ring_emit(ring, offset);
1339 intel_ring_advance(ring);
ab6f8e32 1340
0206e353 1341 return 0;
881f47b6
XH
1342}
1343
549f7365
CW
1344/* Blitter support (SandyBridge+) */
1345
b72f3acb 1346static int blt_ring_flush(struct intel_ring_buffer *ring,
71a77e07 1347 u32 invalidate, u32 flush)
8d19215b 1348{
71a77e07 1349 uint32_t cmd;
b72f3acb
CW
1350 int ret;
1351
6a233c78 1352 ret = intel_ring_begin(ring, 4);
b72f3acb
CW
1353 if (ret)
1354 return ret;
1355
71a77e07
CW
1356 cmd = MI_FLUSH_DW;
1357 if (invalidate & I915_GEM_DOMAIN_RENDER)
1358 cmd |= MI_INVALIDATE_TLB;
1359 intel_ring_emit(ring, cmd);
b72f3acb
CW
1360 intel_ring_emit(ring, 0);
1361 intel_ring_emit(ring, 0);
71a77e07 1362 intel_ring_emit(ring, MI_NOOP);
b72f3acb
CW
1363 intel_ring_advance(ring);
1364 return 0;
8d19215b
ZN
1365}
1366
5c1143bb
XH
1367int intel_init_render_ring_buffer(struct drm_device *dev)
1368{
1369 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 1370 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
5c1143bb 1371
59465b5f
DV
1372 ring->name = "render ring";
1373 ring->id = RCS;
1374 ring->mmio_base = RENDER_RING_BASE;
1375
1ec14ad3
CW
1376 if (INTEL_INFO(dev)->gen >= 6) {
1377 ring->add_request = gen6_add_request;
8d315287 1378 ring->flush = gen6_render_ring_flush;
6c6cf5aa
CW
1379 if (INTEL_INFO(dev)->gen == 6)
1380 ring->flush = gen6_render_ring_flush__wa;
25c06300
BW
1381 ring->irq_get = gen6_ring_get_irq;
1382 ring->irq_put = gen6_ring_put_irq;
6a848ccb 1383 ring->irq_enable_mask = GT_USER_INTERRUPT;
4cd53c0c 1384 ring->get_seqno = gen6_ring_get_seqno;
686cb5f9 1385 ring->sync_to = gen6_ring_sync;
59465b5f
DV
1386 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1387 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1388 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
1389 ring->signal_mbox[0] = GEN6_VRSYNC;
1390 ring->signal_mbox[1] = GEN6_BRSYNC;
c6df541c
CW
1391 } else if (IS_GEN5(dev)) {
1392 ring->add_request = pc_render_add_request;
46f0f8d1 1393 ring->flush = gen4_render_ring_flush;
c6df541c 1394 ring->get_seqno = pc_render_get_seqno;
e48d8634
DV
1395 ring->irq_get = gen5_ring_get_irq;
1396 ring->irq_put = gen5_ring_put_irq;
e3670319 1397 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
59465b5f 1398 } else {
8620a3a9 1399 ring->add_request = i9xx_add_request;
46f0f8d1
CW
1400 if (INTEL_INFO(dev)->gen < 4)
1401 ring->flush = gen2_render_ring_flush;
1402 else
1403 ring->flush = gen4_render_ring_flush;
59465b5f 1404 ring->get_seqno = ring_get_seqno;
c2798b19
CW
1405 if (IS_GEN2(dev)) {
1406 ring->irq_get = i8xx_ring_get_irq;
1407 ring->irq_put = i8xx_ring_put_irq;
1408 } else {
1409 ring->irq_get = i9xx_ring_get_irq;
1410 ring->irq_put = i9xx_ring_put_irq;
1411 }
e3670319 1412 ring->irq_enable_mask = I915_USER_INTERRUPT;
1ec14ad3 1413 }
59465b5f 1414 ring->write_tail = ring_write_tail;
fb3256da
DV
1415 if (INTEL_INFO(dev)->gen >= 6)
1416 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1417 else if (INTEL_INFO(dev)->gen >= 4)
1418 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1419 else if (IS_I830(dev) || IS_845G(dev))
1420 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1421 else
1422 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
59465b5f
DV
1423 ring->init = init_render_ring;
1424 ring->cleanup = render_ring_cleanup;
1425
5c1143bb
XH
1426
1427 if (!I915_NEED_GFX_HWS(dev)) {
1ec14ad3
CW
1428 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1429 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
5c1143bb
XH
1430 }
1431
1ec14ad3 1432 return intel_init_ring_buffer(dev, ring);
5c1143bb
XH
1433}
1434
e8616b6c
CW
1435int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1436{
1437 drm_i915_private_t *dev_priv = dev->dev_private;
1438 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1439
59465b5f
DV
1440 ring->name = "render ring";
1441 ring->id = RCS;
1442 ring->mmio_base = RENDER_RING_BASE;
1443
e8616b6c 1444 if (INTEL_INFO(dev)->gen >= 6) {
b4178f8a
DV
1445 /* non-kms not supported on gen6+ */
1446 return -ENODEV;
e8616b6c 1447 }
28f0cbf7
DV
1448
1449 /* Note: gem is not supported on gen5/ilk without kms (the corresponding
1450 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
1451 * the special gen5 functions. */
1452 ring->add_request = i9xx_add_request;
46f0f8d1
CW
1453 if (INTEL_INFO(dev)->gen < 4)
1454 ring->flush = gen2_render_ring_flush;
1455 else
1456 ring->flush = gen4_render_ring_flush;
28f0cbf7 1457 ring->get_seqno = ring_get_seqno;
c2798b19
CW
1458 if (IS_GEN2(dev)) {
1459 ring->irq_get = i8xx_ring_get_irq;
1460 ring->irq_put = i8xx_ring_put_irq;
1461 } else {
1462 ring->irq_get = i9xx_ring_get_irq;
1463 ring->irq_put = i9xx_ring_put_irq;
1464 }
28f0cbf7 1465 ring->irq_enable_mask = I915_USER_INTERRUPT;
59465b5f 1466 ring->write_tail = ring_write_tail;
fb3256da
DV
1467 if (INTEL_INFO(dev)->gen >= 4)
1468 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1469 else if (IS_I830(dev) || IS_845G(dev))
1470 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1471 else
1472 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
59465b5f
DV
1473 ring->init = init_render_ring;
1474 ring->cleanup = render_ring_cleanup;
e8616b6c 1475
f3234706
KP
1476 if (!I915_NEED_GFX_HWS(dev))
1477 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1478
e8616b6c
CW
1479 ring->dev = dev;
1480 INIT_LIST_HEAD(&ring->active_list);
1481 INIT_LIST_HEAD(&ring->request_list);
e8616b6c
CW
1482
1483 ring->size = size;
1484 ring->effective_size = ring->size;
1485 if (IS_I830(ring->dev))
1486 ring->effective_size -= 128;
1487
4225d0f2
DV
1488 ring->virtual_start = ioremap_wc(start, size);
1489 if (ring->virtual_start == NULL) {
e8616b6c
CW
1490 DRM_ERROR("can not ioremap virtual address for"
1491 " ring buffer\n");
1492 return -ENOMEM;
1493 }
1494
e8616b6c
CW
1495 return 0;
1496}
1497
5c1143bb
XH
1498int intel_init_bsd_ring_buffer(struct drm_device *dev)
1499{
1500 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 1501 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
5c1143bb 1502
58fa3835
DV
1503 ring->name = "bsd ring";
1504 ring->id = VCS;
1505
0fd2c201 1506 ring->write_tail = ring_write_tail;
58fa3835
DV
1507 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1508 ring->mmio_base = GEN6_BSD_RING_BASE;
0fd2c201
DV
1509 /* gen6 bsd needs a special wa for tail updates */
1510 if (IS_GEN6(dev))
1511 ring->write_tail = gen6_bsd_ring_write_tail;
58fa3835
DV
1512 ring->flush = gen6_ring_flush;
1513 ring->add_request = gen6_add_request;
1514 ring->get_seqno = gen6_ring_get_seqno;
1515 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1516 ring->irq_get = gen6_ring_get_irq;
1517 ring->irq_put = gen6_ring_put_irq;
1518 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
686cb5f9 1519 ring->sync_to = gen6_ring_sync;
58fa3835
DV
1520 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
1521 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
1522 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
1523 ring->signal_mbox[0] = GEN6_RVSYNC;
1524 ring->signal_mbox[1] = GEN6_BVSYNC;
1525 } else {
1526 ring->mmio_base = BSD_RING_BASE;
58fa3835 1527 ring->flush = bsd_ring_flush;
8620a3a9 1528 ring->add_request = i9xx_add_request;
58fa3835 1529 ring->get_seqno = ring_get_seqno;
e48d8634 1530 if (IS_GEN5(dev)) {
e3670319 1531 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
e48d8634
DV
1532 ring->irq_get = gen5_ring_get_irq;
1533 ring->irq_put = gen5_ring_put_irq;
1534 } else {
e3670319 1535 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
e48d8634
DV
1536 ring->irq_get = i9xx_ring_get_irq;
1537 ring->irq_put = i9xx_ring_put_irq;
1538 }
fb3256da 1539 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
58fa3835
DV
1540 }
1541 ring->init = init_ring_common;
1542
5c1143bb 1543
1ec14ad3 1544 return intel_init_ring_buffer(dev, ring);
5c1143bb 1545}
549f7365
CW
1546
1547int intel_init_blt_ring_buffer(struct drm_device *dev)
1548{
1549 drm_i915_private_t *dev_priv = dev->dev_private;
1ec14ad3 1550 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
549f7365 1551
3535d9dd
DV
1552 ring->name = "blitter ring";
1553 ring->id = BCS;
1554
1555 ring->mmio_base = BLT_RING_BASE;
1556 ring->write_tail = ring_write_tail;
1557 ring->flush = blt_ring_flush;
1558 ring->add_request = gen6_add_request;
1559 ring->get_seqno = gen6_ring_get_seqno;
1560 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1561 ring->irq_get = gen6_ring_get_irq;
1562 ring->irq_put = gen6_ring_put_irq;
1563 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
686cb5f9 1564 ring->sync_to = gen6_ring_sync;
3535d9dd
DV
1565 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
1566 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
1567 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
1568 ring->signal_mbox[0] = GEN6_RBSYNC;
1569 ring->signal_mbox[1] = GEN6_VBSYNC;
1570 ring->init = init_ring_common;
549f7365 1571
1ec14ad3 1572 return intel_init_ring_buffer(dev, ring);
549f7365 1573}
a7b9761d
CW
1574
1575int
1576intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
1577{
1578 int ret;
1579
1580 if (!ring->gpu_caches_dirty)
1581 return 0;
1582
1583 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
1584 if (ret)
1585 return ret;
1586
1587 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
1588
1589 ring->gpu_caches_dirty = false;
1590 return 0;
1591}
1592
1593int
1594intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
1595{
1596 uint32_t flush_domains;
1597 int ret;
1598
1599 flush_domains = 0;
1600 if (ring->gpu_caches_dirty)
1601 flush_domains = I915_GEM_GPU_DOMAINS;
1602
1603 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
1604 if (ret)
1605 return ret;
1606
1607 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
1608
1609 ring->gpu_caches_dirty = false;
1610 return 0;
1611}
This page took 0.27358 seconds and 5 git commands to generate.