Commit | Line | Data |
---|---|---|
62fdfeaf EA |
1 | /* |
2 | * Copyright © 2008-2010 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * Zou Nan hai <nanhai.zou@intel.com> | |
26 | * Xiang Hai hao<haihao.xiang@intel.com> | |
27 | * | |
28 | */ | |
29 | ||
30 | #include "drmP.h" | |
31 | #include "drm.h" | |
62fdfeaf | 32 | #include "i915_drv.h" |
8187a2b7 | 33 | #include "i915_drm.h" |
62fdfeaf | 34 | #include "i915_trace.h" |
881f47b6 | 35 | #include "intel_drv.h" |
62fdfeaf | 36 | |
8d315287 JB |
37 | /* |
38 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | |
39 | * over cache flushing. | |
40 | */ | |
41 | struct pipe_control { | |
42 | struct drm_i915_gem_object *obj; | |
43 | volatile u32 *cpu_page; | |
44 | u32 gtt_offset; | |
45 | }; | |
46 | ||
c7dca47b CW |
47 | static inline int ring_space(struct intel_ring_buffer *ring) |
48 | { | |
49 | int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); | |
50 | if (space < 0) | |
51 | space += ring->size; | |
52 | return space; | |
53 | } | |
54 | ||
6f392d54 CW |
55 | static u32 i915_gem_get_seqno(struct drm_device *dev) |
56 | { | |
57 | drm_i915_private_t *dev_priv = dev->dev_private; | |
58 | u32 seqno; | |
59 | ||
60 | seqno = dev_priv->next_seqno; | |
61 | ||
62 | /* reserve 0 for non-seqno */ | |
63 | if (++dev_priv->next_seqno == 0) | |
64 | dev_priv->next_seqno = 1; | |
65 | ||
66 | return seqno; | |
67 | } | |
68 | ||
b72f3acb | 69 | static int |
78501eac | 70 | render_ring_flush(struct intel_ring_buffer *ring, |
ab6f8e32 CW |
71 | u32 invalidate_domains, |
72 | u32 flush_domains) | |
62fdfeaf | 73 | { |
78501eac | 74 | struct drm_device *dev = ring->dev; |
6f392d54 | 75 | u32 cmd; |
b72f3acb | 76 | int ret; |
6f392d54 | 77 | |
36d527de CW |
78 | /* |
79 | * read/write caches: | |
80 | * | |
81 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is | |
82 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is | |
83 | * also flushed at 2d versus 3d pipeline switches. | |
84 | * | |
85 | * read-only caches: | |
86 | * | |
87 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if | |
88 | * MI_READ_FLUSH is set, and is always flushed on 965. | |
89 | * | |
90 | * I915_GEM_DOMAIN_COMMAND may not exist? | |
91 | * | |
92 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is | |
93 | * invalidated when MI_EXE_FLUSH is set. | |
94 | * | |
95 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is | |
96 | * invalidated with every MI_FLUSH. | |
97 | * | |
98 | * TLBs: | |
99 | * | |
100 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND | |
101 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and | |
102 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER | |
103 | * are flushed at any MI_FLUSH. | |
104 | */ | |
105 | ||
106 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; | |
107 | if ((invalidate_domains|flush_domains) & | |
108 | I915_GEM_DOMAIN_RENDER) | |
109 | cmd &= ~MI_NO_WRITE_FLUSH; | |
110 | if (INTEL_INFO(dev)->gen < 4) { | |
62fdfeaf | 111 | /* |
36d527de CW |
112 | * On the 965, the sampler cache always gets flushed |
113 | * and this bit is reserved. | |
62fdfeaf | 114 | */ |
36d527de CW |
115 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) |
116 | cmd |= MI_READ_FLUSH; | |
117 | } | |
118 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) | |
119 | cmd |= MI_EXE_FLUSH; | |
62fdfeaf | 120 | |
36d527de CW |
121 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && |
122 | (IS_G4X(dev) || IS_GEN5(dev))) | |
123 | cmd |= MI_INVALIDATE_ISP; | |
70eac33e | 124 | |
36d527de CW |
125 | ret = intel_ring_begin(ring, 2); |
126 | if (ret) | |
127 | return ret; | |
b72f3acb | 128 | |
36d527de CW |
129 | intel_ring_emit(ring, cmd); |
130 | intel_ring_emit(ring, MI_NOOP); | |
131 | intel_ring_advance(ring); | |
b72f3acb CW |
132 | |
133 | return 0; | |
8187a2b7 ZN |
134 | } |
135 | ||
8d315287 JB |
136 | /** |
137 | * Emits a PIPE_CONTROL with a non-zero post-sync operation, for | |
138 | * implementing two workarounds on gen6. From section 1.4.7.1 | |
139 | * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: | |
140 | * | |
141 | * [DevSNB-C+{W/A}] Before any depth stall flush (including those | |
142 | * produced by non-pipelined state commands), software needs to first | |
143 | * send a PIPE_CONTROL with no bits set except Post-Sync Operation != | |
144 | * 0. | |
145 | * | |
146 | * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable | |
147 | * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. | |
148 | * | |
149 | * And the workaround for these two requires this workaround first: | |
150 | * | |
151 | * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent | |
152 | * BEFORE the pipe-control with a post-sync op and no write-cache | |
153 | * flushes. | |
154 | * | |
155 | * And this last workaround is tricky because of the requirements on | |
156 | * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM | |
157 | * volume 2 part 1: | |
158 | * | |
159 | * "1 of the following must also be set: | |
160 | * - Render Target Cache Flush Enable ([12] of DW1) | |
161 | * - Depth Cache Flush Enable ([0] of DW1) | |
162 | * - Stall at Pixel Scoreboard ([1] of DW1) | |
163 | * - Depth Stall ([13] of DW1) | |
164 | * - Post-Sync Operation ([13] of DW1) | |
165 | * - Notify Enable ([8] of DW1)" | |
166 | * | |
167 | * The cache flushes require the workaround flush that triggered this | |
168 | * one, so we can't use it. Depth stall would trigger the same. | |
169 | * Post-sync nonzero is what triggered this second workaround, so we | |
170 | * can't use that one either. Notify enable is IRQs, which aren't | |
171 | * really our business. That leaves only stall at scoreboard. | |
172 | */ | |
173 | static int | |
174 | intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) | |
175 | { | |
176 | struct pipe_control *pc = ring->private; | |
177 | u32 scratch_addr = pc->gtt_offset + 128; | |
178 | int ret; | |
179 | ||
180 | ||
181 | ret = intel_ring_begin(ring, 6); | |
182 | if (ret) | |
183 | return ret; | |
184 | ||
185 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | |
186 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | | |
187 | PIPE_CONTROL_STALL_AT_SCOREBOARD); | |
188 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | |
189 | intel_ring_emit(ring, 0); /* low dword */ | |
190 | intel_ring_emit(ring, 0); /* high dword */ | |
191 | intel_ring_emit(ring, MI_NOOP); | |
192 | intel_ring_advance(ring); | |
193 | ||
194 | ret = intel_ring_begin(ring, 6); | |
195 | if (ret) | |
196 | return ret; | |
197 | ||
198 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | |
199 | intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); | |
200 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | |
201 | intel_ring_emit(ring, 0); | |
202 | intel_ring_emit(ring, 0); | |
203 | intel_ring_emit(ring, MI_NOOP); | |
204 | intel_ring_advance(ring); | |
205 | ||
206 | return 0; | |
207 | } | |
208 | ||
209 | static int | |
210 | gen6_render_ring_flush(struct intel_ring_buffer *ring, | |
211 | u32 invalidate_domains, u32 flush_domains) | |
212 | { | |
213 | u32 flags = 0; | |
214 | struct pipe_control *pc = ring->private; | |
215 | u32 scratch_addr = pc->gtt_offset + 128; | |
216 | int ret; | |
217 | ||
218 | /* Force SNB workarounds for PIPE_CONTROL flushes */ | |
219 | intel_emit_post_sync_nonzero_flush(ring); | |
220 | ||
221 | /* Just flush everything. Experiments have shown that reducing the | |
222 | * number of bits based on the write domains has little performance | |
223 | * impact. | |
224 | */ | |
225 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | |
226 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | |
227 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | |
228 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | |
229 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | |
230 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | |
231 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | |
232 | ||
233 | ret = intel_ring_begin(ring, 6); | |
234 | if (ret) | |
235 | return ret; | |
236 | ||
237 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | |
238 | intel_ring_emit(ring, flags); | |
239 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); | |
240 | intel_ring_emit(ring, 0); /* lower dword */ | |
241 | intel_ring_emit(ring, 0); /* uppwer dword */ | |
242 | intel_ring_emit(ring, MI_NOOP); | |
243 | intel_ring_advance(ring); | |
244 | ||
245 | return 0; | |
246 | } | |
247 | ||
78501eac | 248 | static void ring_write_tail(struct intel_ring_buffer *ring, |
297b0c5b | 249 | u32 value) |
d46eefa2 | 250 | { |
78501eac | 251 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
297b0c5b | 252 | I915_WRITE_TAIL(ring, value); |
d46eefa2 XH |
253 | } |
254 | ||
78501eac | 255 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) |
8187a2b7 | 256 | { |
78501eac CW |
257 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
258 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? | |
3d281d8c | 259 | RING_ACTHD(ring->mmio_base) : ACTHD; |
8187a2b7 ZN |
260 | |
261 | return I915_READ(acthd_reg); | |
262 | } | |
263 | ||
78501eac | 264 | static int init_ring_common(struct intel_ring_buffer *ring) |
8187a2b7 | 265 | { |
78501eac | 266 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
05394f39 | 267 | struct drm_i915_gem_object *obj = ring->obj; |
8187a2b7 | 268 | u32 head; |
8187a2b7 ZN |
269 | |
270 | /* Stop the ring if it's running. */ | |
7f2ab699 | 271 | I915_WRITE_CTL(ring, 0); |
570ef608 | 272 | I915_WRITE_HEAD(ring, 0); |
78501eac | 273 | ring->write_tail(ring, 0); |
8187a2b7 ZN |
274 | |
275 | /* Initialize the ring. */ | |
05394f39 | 276 | I915_WRITE_START(ring, obj->gtt_offset); |
570ef608 | 277 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
8187a2b7 ZN |
278 | |
279 | /* G45 ring initialization fails to reset head to zero */ | |
280 | if (head != 0) { | |
6fd0d56e CW |
281 | DRM_DEBUG_KMS("%s head not reset to zero " |
282 | "ctl %08x head %08x tail %08x start %08x\n", | |
283 | ring->name, | |
284 | I915_READ_CTL(ring), | |
285 | I915_READ_HEAD(ring), | |
286 | I915_READ_TAIL(ring), | |
287 | I915_READ_START(ring)); | |
8187a2b7 | 288 | |
570ef608 | 289 | I915_WRITE_HEAD(ring, 0); |
8187a2b7 | 290 | |
6fd0d56e CW |
291 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { |
292 | DRM_ERROR("failed to set %s head to zero " | |
293 | "ctl %08x head %08x tail %08x start %08x\n", | |
294 | ring->name, | |
295 | I915_READ_CTL(ring), | |
296 | I915_READ_HEAD(ring), | |
297 | I915_READ_TAIL(ring), | |
298 | I915_READ_START(ring)); | |
299 | } | |
8187a2b7 ZN |
300 | } |
301 | ||
7f2ab699 | 302 | I915_WRITE_CTL(ring, |
ae69b42a | 303 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
6aa56062 | 304 | | RING_REPORT_64K | RING_VALID); |
8187a2b7 | 305 | |
8187a2b7 | 306 | /* If the head is still not zero, the ring is dead */ |
176f28eb | 307 | if ((I915_READ_CTL(ring) & RING_VALID) == 0 || |
05394f39 | 308 | I915_READ_START(ring) != obj->gtt_offset || |
176f28eb | 309 | (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { |
e74cfed5 CW |
310 | DRM_ERROR("%s initialization failed " |
311 | "ctl %08x head %08x tail %08x start %08x\n", | |
312 | ring->name, | |
313 | I915_READ_CTL(ring), | |
314 | I915_READ_HEAD(ring), | |
315 | I915_READ_TAIL(ring), | |
316 | I915_READ_START(ring)); | |
317 | return -EIO; | |
8187a2b7 ZN |
318 | } |
319 | ||
78501eac CW |
320 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) |
321 | i915_kernel_lost_context(ring->dev); | |
8187a2b7 | 322 | else { |
c7dca47b | 323 | ring->head = I915_READ_HEAD(ring); |
870e86dd | 324 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
c7dca47b | 325 | ring->space = ring_space(ring); |
8187a2b7 | 326 | } |
1ec14ad3 | 327 | |
8187a2b7 ZN |
328 | return 0; |
329 | } | |
330 | ||
c6df541c CW |
331 | static int |
332 | init_pipe_control(struct intel_ring_buffer *ring) | |
333 | { | |
334 | struct pipe_control *pc; | |
335 | struct drm_i915_gem_object *obj; | |
336 | int ret; | |
337 | ||
338 | if (ring->private) | |
339 | return 0; | |
340 | ||
341 | pc = kmalloc(sizeof(*pc), GFP_KERNEL); | |
342 | if (!pc) | |
343 | return -ENOMEM; | |
344 | ||
345 | obj = i915_gem_alloc_object(ring->dev, 4096); | |
346 | if (obj == NULL) { | |
347 | DRM_ERROR("Failed to allocate seqno page\n"); | |
348 | ret = -ENOMEM; | |
349 | goto err; | |
350 | } | |
e4ffd173 CW |
351 | |
352 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | |
c6df541c CW |
353 | |
354 | ret = i915_gem_object_pin(obj, 4096, true); | |
355 | if (ret) | |
356 | goto err_unref; | |
357 | ||
358 | pc->gtt_offset = obj->gtt_offset; | |
359 | pc->cpu_page = kmap(obj->pages[0]); | |
360 | if (pc->cpu_page == NULL) | |
361 | goto err_unpin; | |
362 | ||
363 | pc->obj = obj; | |
364 | ring->private = pc; | |
365 | return 0; | |
366 | ||
367 | err_unpin: | |
368 | i915_gem_object_unpin(obj); | |
369 | err_unref: | |
370 | drm_gem_object_unreference(&obj->base); | |
371 | err: | |
372 | kfree(pc); | |
373 | return ret; | |
374 | } | |
375 | ||
376 | static void | |
377 | cleanup_pipe_control(struct intel_ring_buffer *ring) | |
378 | { | |
379 | struct pipe_control *pc = ring->private; | |
380 | struct drm_i915_gem_object *obj; | |
381 | ||
382 | if (!ring->private) | |
383 | return; | |
384 | ||
385 | obj = pc->obj; | |
386 | kunmap(obj->pages[0]); | |
387 | i915_gem_object_unpin(obj); | |
388 | drm_gem_object_unreference(&obj->base); | |
389 | ||
390 | kfree(pc); | |
391 | ring->private = NULL; | |
392 | } | |
393 | ||
78501eac | 394 | static int init_render_ring(struct intel_ring_buffer *ring) |
8187a2b7 | 395 | { |
78501eac | 396 | struct drm_device *dev = ring->dev; |
1ec14ad3 | 397 | struct drm_i915_private *dev_priv = dev->dev_private; |
78501eac | 398 | int ret = init_ring_common(ring); |
a69ffdbf | 399 | |
a6c45cf0 | 400 | if (INTEL_INFO(dev)->gen > 3) { |
78501eac | 401 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
65d3eb1e | 402 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
a69ffdbf ZW |
403 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
404 | I915_WRITE(MI_MODE, mode); | |
b095cd0a JB |
405 | if (IS_GEN7(dev)) |
406 | I915_WRITE(GFX_MODE_GEN7, | |
407 | GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | | |
408 | GFX_MODE_ENABLE(GFX_REPLAY_MODE)); | |
8187a2b7 | 409 | } |
78501eac | 410 | |
8d315287 | 411 | if (INTEL_INFO(dev)->gen >= 5) { |
c6df541c CW |
412 | ret = init_pipe_control(ring); |
413 | if (ret) | |
414 | return ret; | |
415 | } | |
416 | ||
8187a2b7 ZN |
417 | return ret; |
418 | } | |
419 | ||
c6df541c CW |
420 | static void render_ring_cleanup(struct intel_ring_buffer *ring) |
421 | { | |
422 | if (!ring->private) | |
423 | return; | |
424 | ||
425 | cleanup_pipe_control(ring); | |
426 | } | |
427 | ||
1ec14ad3 | 428 | static void |
c8c99b0f BW |
429 | update_mboxes(struct intel_ring_buffer *ring, |
430 | u32 seqno, | |
431 | u32 mmio_offset) | |
1ec14ad3 | 432 | { |
c8c99b0f BW |
433 | intel_ring_emit(ring, MI_SEMAPHORE_MBOX | |
434 | MI_SEMAPHORE_GLOBAL_GTT | | |
435 | MI_SEMAPHORE_REGISTER | | |
436 | MI_SEMAPHORE_UPDATE); | |
1ec14ad3 | 437 | intel_ring_emit(ring, seqno); |
c8c99b0f | 438 | intel_ring_emit(ring, mmio_offset); |
1ec14ad3 CW |
439 | } |
440 | ||
c8c99b0f BW |
441 | /** |
442 | * gen6_add_request - Update the semaphore mailbox registers | |
443 | * | |
444 | * @ring - ring that is adding a request | |
445 | * @seqno - return seqno stuck into the ring | |
446 | * | |
447 | * Update the mailbox registers in the *other* rings with the current seqno. | |
448 | * This acts like a signal in the canonical semaphore. | |
449 | */ | |
1ec14ad3 CW |
450 | static int |
451 | gen6_add_request(struct intel_ring_buffer *ring, | |
c8c99b0f | 452 | u32 *seqno) |
1ec14ad3 | 453 | { |
c8c99b0f BW |
454 | u32 mbox1_reg; |
455 | u32 mbox2_reg; | |
1ec14ad3 CW |
456 | int ret; |
457 | ||
458 | ret = intel_ring_begin(ring, 10); | |
459 | if (ret) | |
460 | return ret; | |
461 | ||
c8c99b0f BW |
462 | mbox1_reg = ring->signal_mbox[0]; |
463 | mbox2_reg = ring->signal_mbox[1]; | |
1ec14ad3 | 464 | |
c8c99b0f BW |
465 | *seqno = i915_gem_get_seqno(ring->dev); |
466 | ||
467 | update_mboxes(ring, *seqno, mbox1_reg); | |
468 | update_mboxes(ring, *seqno, mbox2_reg); | |
1ec14ad3 CW |
469 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
470 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
c8c99b0f | 471 | intel_ring_emit(ring, *seqno); |
1ec14ad3 CW |
472 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
473 | intel_ring_advance(ring); | |
474 | ||
1ec14ad3 CW |
475 | return 0; |
476 | } | |
477 | ||
c8c99b0f BW |
478 | /** |
479 | * intel_ring_sync - sync the waiter to the signaller on seqno | |
480 | * | |
481 | * @waiter - ring that is waiting | |
482 | * @signaller - ring which has, or will signal | |
483 | * @seqno - seqno which the waiter will block on | |
484 | */ | |
485 | static int | |
486 | intel_ring_sync(struct intel_ring_buffer *waiter, | |
487 | struct intel_ring_buffer *signaller, | |
488 | int ring, | |
1ec14ad3 CW |
489 | u32 seqno) |
490 | { | |
491 | int ret; | |
c8c99b0f BW |
492 | u32 dw1 = MI_SEMAPHORE_MBOX | |
493 | MI_SEMAPHORE_COMPARE | | |
494 | MI_SEMAPHORE_REGISTER; | |
1ec14ad3 | 495 | |
c8c99b0f | 496 | ret = intel_ring_begin(waiter, 4); |
1ec14ad3 CW |
497 | if (ret) |
498 | return ret; | |
499 | ||
c8c99b0f BW |
500 | intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]); |
501 | intel_ring_emit(waiter, seqno); | |
502 | intel_ring_emit(waiter, 0); | |
503 | intel_ring_emit(waiter, MI_NOOP); | |
504 | intel_ring_advance(waiter); | |
1ec14ad3 CW |
505 | |
506 | return 0; | |
507 | } | |
508 | ||
c8c99b0f BW |
509 | /* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */ |
510 | int | |
511 | render_ring_sync_to(struct intel_ring_buffer *waiter, | |
512 | struct intel_ring_buffer *signaller, | |
513 | u32 seqno) | |
514 | { | |
515 | WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID); | |
516 | return intel_ring_sync(waiter, | |
517 | signaller, | |
518 | RCS, | |
519 | seqno); | |
520 | } | |
521 | ||
522 | /* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */ | |
523 | int | |
524 | gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter, | |
525 | struct intel_ring_buffer *signaller, | |
526 | u32 seqno) | |
527 | { | |
528 | WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID); | |
529 | return intel_ring_sync(waiter, | |
530 | signaller, | |
531 | VCS, | |
532 | seqno); | |
533 | } | |
534 | ||
535 | /* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */ | |
536 | int | |
537 | gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter, | |
538 | struct intel_ring_buffer *signaller, | |
539 | u32 seqno) | |
540 | { | |
541 | WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID); | |
542 | return intel_ring_sync(waiter, | |
543 | signaller, | |
544 | BCS, | |
545 | seqno); | |
546 | } | |
547 | ||
548 | ||
549 | ||
c6df541c CW |
550 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
551 | do { \ | |
fcbc34e4 KG |
552 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ |
553 | PIPE_CONTROL_DEPTH_STALL); \ | |
c6df541c CW |
554 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
555 | intel_ring_emit(ring__, 0); \ | |
556 | intel_ring_emit(ring__, 0); \ | |
557 | } while (0) | |
558 | ||
559 | static int | |
560 | pc_render_add_request(struct intel_ring_buffer *ring, | |
561 | u32 *result) | |
562 | { | |
563 | struct drm_device *dev = ring->dev; | |
564 | u32 seqno = i915_gem_get_seqno(dev); | |
565 | struct pipe_control *pc = ring->private; | |
566 | u32 scratch_addr = pc->gtt_offset + 128; | |
567 | int ret; | |
568 | ||
569 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently | |
570 | * incoherent with writes to memory, i.e. completely fubar, | |
571 | * so we need to use PIPE_NOTIFY instead. | |
572 | * | |
573 | * However, we also need to workaround the qword write | |
574 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to | |
575 | * memory before requesting an interrupt. | |
576 | */ | |
577 | ret = intel_ring_begin(ring, 32); | |
578 | if (ret) | |
579 | return ret; | |
580 | ||
fcbc34e4 | 581 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
9d971b37 KG |
582 | PIPE_CONTROL_WRITE_FLUSH | |
583 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); | |
c6df541c CW |
584 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
585 | intel_ring_emit(ring, seqno); | |
586 | intel_ring_emit(ring, 0); | |
587 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
588 | scratch_addr += 128; /* write to separate cachelines */ | |
589 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
590 | scratch_addr += 128; | |
591 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
592 | scratch_addr += 128; | |
593 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
594 | scratch_addr += 128; | |
595 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
596 | scratch_addr += 128; | |
597 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | |
fcbc34e4 | 598 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
9d971b37 KG |
599 | PIPE_CONTROL_WRITE_FLUSH | |
600 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | | |
c6df541c CW |
601 | PIPE_CONTROL_NOTIFY); |
602 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | |
603 | intel_ring_emit(ring, seqno); | |
604 | intel_ring_emit(ring, 0); | |
605 | intel_ring_advance(ring); | |
606 | ||
607 | *result = seqno; | |
608 | return 0; | |
609 | } | |
610 | ||
1ec14ad3 CW |
611 | static int |
612 | render_ring_add_request(struct intel_ring_buffer *ring, | |
613 | u32 *result) | |
614 | { | |
615 | struct drm_device *dev = ring->dev; | |
616 | u32 seqno = i915_gem_get_seqno(dev); | |
617 | int ret; | |
3cce469c | 618 | |
1ec14ad3 CW |
619 | ret = intel_ring_begin(ring, 4); |
620 | if (ret) | |
621 | return ret; | |
3cce469c | 622 | |
1ec14ad3 CW |
623 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
624 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
625 | intel_ring_emit(ring, seqno); | |
626 | intel_ring_emit(ring, MI_USER_INTERRUPT); | |
3cce469c | 627 | intel_ring_advance(ring); |
1ec14ad3 | 628 | |
3cce469c CW |
629 | *result = seqno; |
630 | return 0; | |
62fdfeaf EA |
631 | } |
632 | ||
8187a2b7 | 633 | static u32 |
1ec14ad3 | 634 | ring_get_seqno(struct intel_ring_buffer *ring) |
8187a2b7 | 635 | { |
1ec14ad3 CW |
636 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
637 | } | |
638 | ||
c6df541c CW |
639 | static u32 |
640 | pc_render_get_seqno(struct intel_ring_buffer *ring) | |
641 | { | |
642 | struct pipe_control *pc = ring->private; | |
643 | return pc->cpu_page[0]; | |
644 | } | |
645 | ||
0f46832f CW |
646 | static void |
647 | ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | |
648 | { | |
649 | dev_priv->gt_irq_mask &= ~mask; | |
650 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
651 | POSTING_READ(GTIMR); | |
652 | } | |
653 | ||
654 | static void | |
655 | ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | |
656 | { | |
657 | dev_priv->gt_irq_mask |= mask; | |
658 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
659 | POSTING_READ(GTIMR); | |
660 | } | |
661 | ||
662 | static void | |
663 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | |
664 | { | |
665 | dev_priv->irq_mask &= ~mask; | |
666 | I915_WRITE(IMR, dev_priv->irq_mask); | |
667 | POSTING_READ(IMR); | |
668 | } | |
669 | ||
670 | static void | |
671 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | |
672 | { | |
673 | dev_priv->irq_mask |= mask; | |
674 | I915_WRITE(IMR, dev_priv->irq_mask); | |
675 | POSTING_READ(IMR); | |
676 | } | |
677 | ||
b13c2b96 | 678 | static bool |
1ec14ad3 | 679 | render_ring_get_irq(struct intel_ring_buffer *ring) |
62fdfeaf | 680 | { |
78501eac | 681 | struct drm_device *dev = ring->dev; |
01a03331 | 682 | drm_i915_private_t *dev_priv = dev->dev_private; |
62fdfeaf | 683 | |
b13c2b96 CW |
684 | if (!dev->irq_enabled) |
685 | return false; | |
686 | ||
0dc79fb2 | 687 | spin_lock(&ring->irq_lock); |
01a03331 | 688 | if (ring->irq_refcount++ == 0) { |
62fdfeaf | 689 | if (HAS_PCH_SPLIT(dev)) |
0f46832f CW |
690 | ironlake_enable_irq(dev_priv, |
691 | GT_PIPE_NOTIFY | GT_USER_INTERRUPT); | |
62fdfeaf EA |
692 | else |
693 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | |
694 | } | |
0dc79fb2 | 695 | spin_unlock(&ring->irq_lock); |
b13c2b96 CW |
696 | |
697 | return true; | |
62fdfeaf EA |
698 | } |
699 | ||
8187a2b7 | 700 | static void |
1ec14ad3 | 701 | render_ring_put_irq(struct intel_ring_buffer *ring) |
62fdfeaf | 702 | { |
78501eac | 703 | struct drm_device *dev = ring->dev; |
01a03331 | 704 | drm_i915_private_t *dev_priv = dev->dev_private; |
62fdfeaf | 705 | |
0dc79fb2 | 706 | spin_lock(&ring->irq_lock); |
01a03331 | 707 | if (--ring->irq_refcount == 0) { |
62fdfeaf | 708 | if (HAS_PCH_SPLIT(dev)) |
0f46832f CW |
709 | ironlake_disable_irq(dev_priv, |
710 | GT_USER_INTERRUPT | | |
711 | GT_PIPE_NOTIFY); | |
62fdfeaf EA |
712 | else |
713 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | |
714 | } | |
0dc79fb2 | 715 | spin_unlock(&ring->irq_lock); |
62fdfeaf EA |
716 | } |
717 | ||
78501eac | 718 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
8187a2b7 | 719 | { |
4593010b | 720 | struct drm_device *dev = ring->dev; |
78501eac | 721 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
4593010b EA |
722 | u32 mmio = 0; |
723 | ||
724 | /* The ring status page addresses are no longer next to the rest of | |
725 | * the ring registers as of gen7. | |
726 | */ | |
727 | if (IS_GEN7(dev)) { | |
728 | switch (ring->id) { | |
729 | case RING_RENDER: | |
730 | mmio = RENDER_HWS_PGA_GEN7; | |
731 | break; | |
732 | case RING_BLT: | |
733 | mmio = BLT_HWS_PGA_GEN7; | |
734 | break; | |
735 | case RING_BSD: | |
736 | mmio = BSD_HWS_PGA_GEN7; | |
737 | break; | |
738 | } | |
739 | } else if (IS_GEN6(ring->dev)) { | |
740 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); | |
741 | } else { | |
742 | mmio = RING_HWS_PGA(ring->mmio_base); | |
743 | } | |
744 | ||
78501eac CW |
745 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
746 | POSTING_READ(mmio); | |
8187a2b7 ZN |
747 | } |
748 | ||
b72f3acb | 749 | static int |
78501eac CW |
750 | bsd_ring_flush(struct intel_ring_buffer *ring, |
751 | u32 invalidate_domains, | |
752 | u32 flush_domains) | |
d1b851fc | 753 | { |
b72f3acb CW |
754 | int ret; |
755 | ||
b72f3acb CW |
756 | ret = intel_ring_begin(ring, 2); |
757 | if (ret) | |
758 | return ret; | |
759 | ||
760 | intel_ring_emit(ring, MI_FLUSH); | |
761 | intel_ring_emit(ring, MI_NOOP); | |
762 | intel_ring_advance(ring); | |
763 | return 0; | |
d1b851fc ZN |
764 | } |
765 | ||
3cce469c | 766 | static int |
78501eac | 767 | ring_add_request(struct intel_ring_buffer *ring, |
3cce469c | 768 | u32 *result) |
d1b851fc ZN |
769 | { |
770 | u32 seqno; | |
3cce469c CW |
771 | int ret; |
772 | ||
773 | ret = intel_ring_begin(ring, 4); | |
774 | if (ret) | |
775 | return ret; | |
6f392d54 | 776 | |
78501eac | 777 | seqno = i915_gem_get_seqno(ring->dev); |
6f392d54 | 778 | |
3cce469c CW |
779 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
780 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | |
781 | intel_ring_emit(ring, seqno); | |
782 | intel_ring_emit(ring, MI_USER_INTERRUPT); | |
783 | intel_ring_advance(ring); | |
d1b851fc | 784 | |
3cce469c CW |
785 | *result = seqno; |
786 | return 0; | |
d1b851fc ZN |
787 | } |
788 | ||
0f46832f CW |
789 | static bool |
790 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | |
791 | { | |
792 | struct drm_device *dev = ring->dev; | |
01a03331 | 793 | drm_i915_private_t *dev_priv = dev->dev_private; |
0f46832f CW |
794 | |
795 | if (!dev->irq_enabled) | |
796 | return false; | |
797 | ||
0dc79fb2 | 798 | spin_lock(&ring->irq_lock); |
01a03331 | 799 | if (ring->irq_refcount++ == 0) { |
0f46832f CW |
800 | ring->irq_mask &= ~rflag; |
801 | I915_WRITE_IMR(ring, ring->irq_mask); | |
802 | ironlake_enable_irq(dev_priv, gflag); | |
0f46832f | 803 | } |
0dc79fb2 | 804 | spin_unlock(&ring->irq_lock); |
0f46832f CW |
805 | |
806 | return true; | |
807 | } | |
808 | ||
809 | static void | |
810 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | |
811 | { | |
812 | struct drm_device *dev = ring->dev; | |
01a03331 | 813 | drm_i915_private_t *dev_priv = dev->dev_private; |
0f46832f | 814 | |
0dc79fb2 | 815 | spin_lock(&ring->irq_lock); |
01a03331 | 816 | if (--ring->irq_refcount == 0) { |
0f46832f CW |
817 | ring->irq_mask |= rflag; |
818 | I915_WRITE_IMR(ring, ring->irq_mask); | |
819 | ironlake_disable_irq(dev_priv, gflag); | |
1ec14ad3 | 820 | } |
0dc79fb2 | 821 | spin_unlock(&ring->irq_lock); |
d1b851fc ZN |
822 | } |
823 | ||
b13c2b96 | 824 | static bool |
1ec14ad3 | 825 | bsd_ring_get_irq(struct intel_ring_buffer *ring) |
d1b851fc | 826 | { |
5bfa1063 FB |
827 | struct drm_device *dev = ring->dev; |
828 | drm_i915_private_t *dev_priv = dev->dev_private; | |
829 | ||
830 | if (!dev->irq_enabled) | |
831 | return false; | |
832 | ||
833 | spin_lock(&ring->irq_lock); | |
834 | if (ring->irq_refcount++ == 0) { | |
835 | if (IS_G4X(dev)) | |
836 | i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); | |
837 | else | |
838 | ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT); | |
839 | } | |
840 | spin_unlock(&ring->irq_lock); | |
841 | ||
842 | return true; | |
1ec14ad3 CW |
843 | } |
844 | static void | |
845 | bsd_ring_put_irq(struct intel_ring_buffer *ring) | |
846 | { | |
5bfa1063 FB |
847 | struct drm_device *dev = ring->dev; |
848 | drm_i915_private_t *dev_priv = dev->dev_private; | |
849 | ||
850 | spin_lock(&ring->irq_lock); | |
851 | if (--ring->irq_refcount == 0) { | |
852 | if (IS_G4X(dev)) | |
853 | i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); | |
854 | else | |
855 | ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT); | |
856 | } | |
857 | spin_unlock(&ring->irq_lock); | |
d1b851fc ZN |
858 | } |
859 | ||
860 | static int | |
c4e7a414 | 861 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
d1b851fc | 862 | { |
e1f99ce6 | 863 | int ret; |
78501eac | 864 | |
e1f99ce6 CW |
865 | ret = intel_ring_begin(ring, 2); |
866 | if (ret) | |
867 | return ret; | |
868 | ||
78501eac | 869 | intel_ring_emit(ring, |
c4e7a414 | 870 | MI_BATCH_BUFFER_START | (2 << 6) | |
78501eac | 871 | MI_BATCH_NON_SECURE_I965); |
c4e7a414 | 872 | intel_ring_emit(ring, offset); |
78501eac CW |
873 | intel_ring_advance(ring); |
874 | ||
d1b851fc ZN |
875 | return 0; |
876 | } | |
877 | ||
8187a2b7 | 878 | static int |
78501eac | 879 | render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
c4e7a414 | 880 | u32 offset, u32 len) |
62fdfeaf | 881 | { |
78501eac | 882 | struct drm_device *dev = ring->dev; |
c4e7a414 | 883 | int ret; |
62fdfeaf | 884 | |
c4e7a414 CW |
885 | if (IS_I830(dev) || IS_845G(dev)) { |
886 | ret = intel_ring_begin(ring, 4); | |
887 | if (ret) | |
888 | return ret; | |
62fdfeaf | 889 | |
c4e7a414 CW |
890 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
891 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); | |
892 | intel_ring_emit(ring, offset + len - 8); | |
893 | intel_ring_emit(ring, 0); | |
894 | } else { | |
895 | ret = intel_ring_begin(ring, 2); | |
896 | if (ret) | |
897 | return ret; | |
e1f99ce6 | 898 | |
c4e7a414 CW |
899 | if (INTEL_INFO(dev)->gen >= 4) { |
900 | intel_ring_emit(ring, | |
901 | MI_BATCH_BUFFER_START | (2 << 6) | | |
902 | MI_BATCH_NON_SECURE_I965); | |
903 | intel_ring_emit(ring, offset); | |
62fdfeaf | 904 | } else { |
c4e7a414 CW |
905 | intel_ring_emit(ring, |
906 | MI_BATCH_BUFFER_START | (2 << 6)); | |
907 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); | |
62fdfeaf EA |
908 | } |
909 | } | |
c4e7a414 | 910 | intel_ring_advance(ring); |
62fdfeaf | 911 | |
62fdfeaf EA |
912 | return 0; |
913 | } | |
914 | ||
78501eac | 915 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
62fdfeaf | 916 | { |
78501eac | 917 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
05394f39 | 918 | struct drm_i915_gem_object *obj; |
62fdfeaf | 919 | |
8187a2b7 ZN |
920 | obj = ring->status_page.obj; |
921 | if (obj == NULL) | |
62fdfeaf | 922 | return; |
62fdfeaf | 923 | |
05394f39 | 924 | kunmap(obj->pages[0]); |
62fdfeaf | 925 | i915_gem_object_unpin(obj); |
05394f39 | 926 | drm_gem_object_unreference(&obj->base); |
8187a2b7 | 927 | ring->status_page.obj = NULL; |
62fdfeaf EA |
928 | |
929 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | |
62fdfeaf EA |
930 | } |
931 | ||
78501eac | 932 | static int init_status_page(struct intel_ring_buffer *ring) |
62fdfeaf | 933 | { |
78501eac | 934 | struct drm_device *dev = ring->dev; |
62fdfeaf | 935 | drm_i915_private_t *dev_priv = dev->dev_private; |
05394f39 | 936 | struct drm_i915_gem_object *obj; |
62fdfeaf EA |
937 | int ret; |
938 | ||
62fdfeaf EA |
939 | obj = i915_gem_alloc_object(dev, 4096); |
940 | if (obj == NULL) { | |
941 | DRM_ERROR("Failed to allocate status page\n"); | |
942 | ret = -ENOMEM; | |
943 | goto err; | |
944 | } | |
e4ffd173 CW |
945 | |
946 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); | |
62fdfeaf | 947 | |
75e9e915 | 948 | ret = i915_gem_object_pin(obj, 4096, true); |
62fdfeaf | 949 | if (ret != 0) { |
62fdfeaf EA |
950 | goto err_unref; |
951 | } | |
952 | ||
05394f39 CW |
953 | ring->status_page.gfx_addr = obj->gtt_offset; |
954 | ring->status_page.page_addr = kmap(obj->pages[0]); | |
8187a2b7 | 955 | if (ring->status_page.page_addr == NULL) { |
62fdfeaf | 956 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
62fdfeaf EA |
957 | goto err_unpin; |
958 | } | |
8187a2b7 ZN |
959 | ring->status_page.obj = obj; |
960 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | |
62fdfeaf | 961 | |
78501eac | 962 | intel_ring_setup_status_page(ring); |
8187a2b7 ZN |
963 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
964 | ring->name, ring->status_page.gfx_addr); | |
62fdfeaf EA |
965 | |
966 | return 0; | |
967 | ||
968 | err_unpin: | |
969 | i915_gem_object_unpin(obj); | |
970 | err_unref: | |
05394f39 | 971 | drm_gem_object_unreference(&obj->base); |
62fdfeaf | 972 | err: |
8187a2b7 | 973 | return ret; |
62fdfeaf EA |
974 | } |
975 | ||
8187a2b7 | 976 | int intel_init_ring_buffer(struct drm_device *dev, |
ab6f8e32 | 977 | struct intel_ring_buffer *ring) |
62fdfeaf | 978 | { |
05394f39 | 979 | struct drm_i915_gem_object *obj; |
dd785e35 CW |
980 | int ret; |
981 | ||
8187a2b7 | 982 | ring->dev = dev; |
23bc5982 CW |
983 | INIT_LIST_HEAD(&ring->active_list); |
984 | INIT_LIST_HEAD(&ring->request_list); | |
64193406 | 985 | INIT_LIST_HEAD(&ring->gpu_write_list); |
0dc79fb2 | 986 | |
b259f673 | 987 | init_waitqueue_head(&ring->irq_queue); |
0dc79fb2 | 988 | spin_lock_init(&ring->irq_lock); |
0f46832f | 989 | ring->irq_mask = ~0; |
62fdfeaf | 990 | |
8187a2b7 | 991 | if (I915_NEED_GFX_HWS(dev)) { |
78501eac | 992 | ret = init_status_page(ring); |
8187a2b7 ZN |
993 | if (ret) |
994 | return ret; | |
995 | } | |
62fdfeaf | 996 | |
8187a2b7 | 997 | obj = i915_gem_alloc_object(dev, ring->size); |
62fdfeaf EA |
998 | if (obj == NULL) { |
999 | DRM_ERROR("Failed to allocate ringbuffer\n"); | |
8187a2b7 | 1000 | ret = -ENOMEM; |
dd785e35 | 1001 | goto err_hws; |
62fdfeaf | 1002 | } |
62fdfeaf | 1003 | |
05394f39 | 1004 | ring->obj = obj; |
8187a2b7 | 1005 | |
75e9e915 | 1006 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); |
dd785e35 CW |
1007 | if (ret) |
1008 | goto err_unref; | |
62fdfeaf | 1009 | |
8187a2b7 | 1010 | ring->map.size = ring->size; |
05394f39 | 1011 | ring->map.offset = dev->agp->base + obj->gtt_offset; |
62fdfeaf EA |
1012 | ring->map.type = 0; |
1013 | ring->map.flags = 0; | |
1014 | ring->map.mtrr = 0; | |
1015 | ||
1016 | drm_core_ioremap_wc(&ring->map, dev); | |
1017 | if (ring->map.handle == NULL) { | |
1018 | DRM_ERROR("Failed to map ringbuffer.\n"); | |
8187a2b7 | 1019 | ret = -EINVAL; |
dd785e35 | 1020 | goto err_unpin; |
62fdfeaf EA |
1021 | } |
1022 | ||
8187a2b7 | 1023 | ring->virtual_start = ring->map.handle; |
78501eac | 1024 | ret = ring->init(ring); |
dd785e35 CW |
1025 | if (ret) |
1026 | goto err_unmap; | |
62fdfeaf | 1027 | |
55249baa CW |
1028 | /* Workaround an erratum on the i830 which causes a hang if |
1029 | * the TAIL pointer points to within the last 2 cachelines | |
1030 | * of the buffer. | |
1031 | */ | |
1032 | ring->effective_size = ring->size; | |
1033 | if (IS_I830(ring->dev)) | |
1034 | ring->effective_size -= 128; | |
1035 | ||
c584fe47 | 1036 | return 0; |
dd785e35 CW |
1037 | |
1038 | err_unmap: | |
1039 | drm_core_ioremapfree(&ring->map, dev); | |
1040 | err_unpin: | |
1041 | i915_gem_object_unpin(obj); | |
1042 | err_unref: | |
05394f39 CW |
1043 | drm_gem_object_unreference(&obj->base); |
1044 | ring->obj = NULL; | |
dd785e35 | 1045 | err_hws: |
78501eac | 1046 | cleanup_status_page(ring); |
8187a2b7 | 1047 | return ret; |
62fdfeaf EA |
1048 | } |
1049 | ||
78501eac | 1050 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
62fdfeaf | 1051 | { |
33626e6a CW |
1052 | struct drm_i915_private *dev_priv; |
1053 | int ret; | |
1054 | ||
05394f39 | 1055 | if (ring->obj == NULL) |
62fdfeaf EA |
1056 | return; |
1057 | ||
33626e6a CW |
1058 | /* Disable the ring buffer. The ring must be idle at this point */ |
1059 | dev_priv = ring->dev->dev_private; | |
96f298aa | 1060 | ret = intel_wait_ring_idle(ring); |
29ee3991 CW |
1061 | if (ret) |
1062 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | |
1063 | ring->name, ret); | |
1064 | ||
33626e6a CW |
1065 | I915_WRITE_CTL(ring, 0); |
1066 | ||
78501eac | 1067 | drm_core_ioremapfree(&ring->map, ring->dev); |
62fdfeaf | 1068 | |
05394f39 CW |
1069 | i915_gem_object_unpin(ring->obj); |
1070 | drm_gem_object_unreference(&ring->obj->base); | |
1071 | ring->obj = NULL; | |
78501eac | 1072 | |
8d19215b ZN |
1073 | if (ring->cleanup) |
1074 | ring->cleanup(ring); | |
1075 | ||
78501eac | 1076 | cleanup_status_page(ring); |
62fdfeaf EA |
1077 | } |
1078 | ||
78501eac | 1079 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
62fdfeaf | 1080 | { |
8187a2b7 | 1081 | unsigned int *virt; |
55249baa | 1082 | int rem = ring->size - ring->tail; |
62fdfeaf | 1083 | |
8187a2b7 | 1084 | if (ring->space < rem) { |
78501eac | 1085 | int ret = intel_wait_ring_buffer(ring, rem); |
62fdfeaf EA |
1086 | if (ret) |
1087 | return ret; | |
1088 | } | |
62fdfeaf | 1089 | |
8187a2b7 | 1090 | virt = (unsigned int *)(ring->virtual_start + ring->tail); |
1741dd4a CW |
1091 | rem /= 8; |
1092 | while (rem--) { | |
62fdfeaf | 1093 | *virt++ = MI_NOOP; |
1741dd4a CW |
1094 | *virt++ = MI_NOOP; |
1095 | } | |
62fdfeaf | 1096 | |
8187a2b7 | 1097 | ring->tail = 0; |
c7dca47b | 1098 | ring->space = ring_space(ring); |
62fdfeaf EA |
1099 | |
1100 | return 0; | |
1101 | } | |
1102 | ||
78501eac | 1103 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
62fdfeaf | 1104 | { |
78501eac | 1105 | struct drm_device *dev = ring->dev; |
cae5852d | 1106 | struct drm_i915_private *dev_priv = dev->dev_private; |
78501eac | 1107 | unsigned long end; |
6aa56062 CW |
1108 | u32 head; |
1109 | ||
c7dca47b CW |
1110 | /* If the reported head position has wrapped or hasn't advanced, |
1111 | * fallback to the slow and accurate path. | |
1112 | */ | |
1113 | head = intel_read_status_page(ring, 4); | |
1114 | if (head > ring->head) { | |
1115 | ring->head = head; | |
1116 | ring->space = ring_space(ring); | |
1117 | if (ring->space >= n) | |
1118 | return 0; | |
1119 | } | |
1120 | ||
db53a302 | 1121 | trace_i915_ring_wait_begin(ring); |
8187a2b7 ZN |
1122 | end = jiffies + 3 * HZ; |
1123 | do { | |
c7dca47b CW |
1124 | ring->head = I915_READ_HEAD(ring); |
1125 | ring->space = ring_space(ring); | |
62fdfeaf | 1126 | if (ring->space >= n) { |
db53a302 | 1127 | trace_i915_ring_wait_end(ring); |
62fdfeaf EA |
1128 | return 0; |
1129 | } | |
1130 | ||
1131 | if (dev->primary->master) { | |
1132 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
1133 | if (master_priv->sarea_priv) | |
1134 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | |
1135 | } | |
d1b851fc | 1136 | |
e60a0b10 | 1137 | msleep(1); |
f4e0b29b CW |
1138 | if (atomic_read(&dev_priv->mm.wedged)) |
1139 | return -EAGAIN; | |
8187a2b7 | 1140 | } while (!time_after(jiffies, end)); |
db53a302 | 1141 | trace_i915_ring_wait_end(ring); |
8187a2b7 ZN |
1142 | return -EBUSY; |
1143 | } | |
62fdfeaf | 1144 | |
e1f99ce6 CW |
1145 | int intel_ring_begin(struct intel_ring_buffer *ring, |
1146 | int num_dwords) | |
8187a2b7 | 1147 | { |
21dd3734 | 1148 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
be26a10b | 1149 | int n = 4*num_dwords; |
e1f99ce6 | 1150 | int ret; |
78501eac | 1151 | |
21dd3734 CW |
1152 | if (unlikely(atomic_read(&dev_priv->mm.wedged))) |
1153 | return -EIO; | |
1154 | ||
55249baa | 1155 | if (unlikely(ring->tail + n > ring->effective_size)) { |
e1f99ce6 CW |
1156 | ret = intel_wrap_ring_buffer(ring); |
1157 | if (unlikely(ret)) | |
1158 | return ret; | |
1159 | } | |
78501eac | 1160 | |
e1f99ce6 CW |
1161 | if (unlikely(ring->space < n)) { |
1162 | ret = intel_wait_ring_buffer(ring, n); | |
1163 | if (unlikely(ret)) | |
1164 | return ret; | |
1165 | } | |
d97ed339 CW |
1166 | |
1167 | ring->space -= n; | |
e1f99ce6 | 1168 | return 0; |
8187a2b7 | 1169 | } |
62fdfeaf | 1170 | |
78501eac | 1171 | void intel_ring_advance(struct intel_ring_buffer *ring) |
8187a2b7 | 1172 | { |
d97ed339 | 1173 | ring->tail &= ring->size - 1; |
78501eac | 1174 | ring->write_tail(ring, ring->tail); |
8187a2b7 | 1175 | } |
62fdfeaf | 1176 | |
e070868e | 1177 | static const struct intel_ring_buffer render_ring = { |
8187a2b7 | 1178 | .name = "render ring", |
9220434a | 1179 | .id = RING_RENDER, |
333e9fe9 | 1180 | .mmio_base = RENDER_RING_BASE, |
8187a2b7 | 1181 | .size = 32 * PAGE_SIZE, |
8187a2b7 | 1182 | .init = init_render_ring, |
297b0c5b | 1183 | .write_tail = ring_write_tail, |
8187a2b7 ZN |
1184 | .flush = render_ring_flush, |
1185 | .add_request = render_ring_add_request, | |
1ec14ad3 CW |
1186 | .get_seqno = ring_get_seqno, |
1187 | .irq_get = render_ring_get_irq, | |
1188 | .irq_put = render_ring_put_irq, | |
78501eac | 1189 | .dispatch_execbuffer = render_ring_dispatch_execbuffer, |
0206e353 | 1190 | .cleanup = render_ring_cleanup, |
c8c99b0f BW |
1191 | .sync_to = render_ring_sync_to, |
1192 | .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID, | |
1193 | MI_SEMAPHORE_SYNC_RV, | |
1194 | MI_SEMAPHORE_SYNC_RB}, | |
1195 | .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC}, | |
8187a2b7 | 1196 | }; |
d1b851fc ZN |
1197 | |
1198 | /* ring buffer for bit-stream decoder */ | |
1199 | ||
e070868e | 1200 | static const struct intel_ring_buffer bsd_ring = { |
d1b851fc | 1201 | .name = "bsd ring", |
9220434a | 1202 | .id = RING_BSD, |
333e9fe9 | 1203 | .mmio_base = BSD_RING_BASE, |
d1b851fc | 1204 | .size = 32 * PAGE_SIZE, |
78501eac | 1205 | .init = init_ring_common, |
297b0c5b | 1206 | .write_tail = ring_write_tail, |
d1b851fc | 1207 | .flush = bsd_ring_flush, |
549f7365 | 1208 | .add_request = ring_add_request, |
1ec14ad3 CW |
1209 | .get_seqno = ring_get_seqno, |
1210 | .irq_get = bsd_ring_get_irq, | |
1211 | .irq_put = bsd_ring_put_irq, | |
78501eac | 1212 | .dispatch_execbuffer = ring_dispatch_execbuffer, |
d1b851fc | 1213 | }; |
5c1143bb | 1214 | |
881f47b6 | 1215 | |
78501eac | 1216 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
297b0c5b | 1217 | u32 value) |
881f47b6 | 1218 | { |
0206e353 | 1219 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
881f47b6 XH |
1220 | |
1221 | /* Every tail move must follow the sequence below */ | |
0206e353 AJ |
1222 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
1223 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | |
1224 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); | |
1225 | I915_WRITE(GEN6_BSD_RNCID, 0x0); | |
1226 | ||
1227 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & | |
1228 | GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, | |
1229 | 50)) | |
1230 | DRM_ERROR("timed out waiting for IDLE Indicator\n"); | |
1231 | ||
1232 | I915_WRITE_TAIL(ring, value); | |
1233 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | |
1234 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | |
1235 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | |
881f47b6 XH |
1236 | } |
1237 | ||
b72f3acb | 1238 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
71a77e07 | 1239 | u32 invalidate, u32 flush) |
881f47b6 | 1240 | { |
71a77e07 | 1241 | uint32_t cmd; |
b72f3acb CW |
1242 | int ret; |
1243 | ||
b72f3acb CW |
1244 | ret = intel_ring_begin(ring, 4); |
1245 | if (ret) | |
1246 | return ret; | |
1247 | ||
71a77e07 CW |
1248 | cmd = MI_FLUSH_DW; |
1249 | if (invalidate & I915_GEM_GPU_DOMAINS) | |
1250 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; | |
1251 | intel_ring_emit(ring, cmd); | |
b72f3acb CW |
1252 | intel_ring_emit(ring, 0); |
1253 | intel_ring_emit(ring, 0); | |
71a77e07 | 1254 | intel_ring_emit(ring, MI_NOOP); |
b72f3acb CW |
1255 | intel_ring_advance(ring); |
1256 | return 0; | |
881f47b6 XH |
1257 | } |
1258 | ||
1259 | static int | |
78501eac | 1260 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
c4e7a414 | 1261 | u32 offset, u32 len) |
881f47b6 | 1262 | { |
0206e353 | 1263 | int ret; |
ab6f8e32 | 1264 | |
0206e353 AJ |
1265 | ret = intel_ring_begin(ring, 2); |
1266 | if (ret) | |
1267 | return ret; | |
e1f99ce6 | 1268 | |
0206e353 AJ |
1269 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); |
1270 | /* bit0-7 is the length on GEN6+ */ | |
1271 | intel_ring_emit(ring, offset); | |
1272 | intel_ring_advance(ring); | |
ab6f8e32 | 1273 | |
0206e353 | 1274 | return 0; |
881f47b6 XH |
1275 | } |
1276 | ||
0f46832f CW |
1277 | static bool |
1278 | gen6_render_ring_get_irq(struct intel_ring_buffer *ring) | |
1279 | { | |
1280 | return gen6_ring_get_irq(ring, | |
1281 | GT_USER_INTERRUPT, | |
1282 | GEN6_RENDER_USER_INTERRUPT); | |
1283 | } | |
1284 | ||
1285 | static void | |
1286 | gen6_render_ring_put_irq(struct intel_ring_buffer *ring) | |
1287 | { | |
1288 | return gen6_ring_put_irq(ring, | |
1289 | GT_USER_INTERRUPT, | |
1290 | GEN6_RENDER_USER_INTERRUPT); | |
1291 | } | |
1292 | ||
b13c2b96 | 1293 | static bool |
1ec14ad3 CW |
1294 | gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) |
1295 | { | |
0f46832f CW |
1296 | return gen6_ring_get_irq(ring, |
1297 | GT_GEN6_BSD_USER_INTERRUPT, | |
1298 | GEN6_BSD_USER_INTERRUPT); | |
1ec14ad3 CW |
1299 | } |
1300 | ||
1301 | static void | |
1302 | gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) | |
1303 | { | |
0f46832f CW |
1304 | return gen6_ring_put_irq(ring, |
1305 | GT_GEN6_BSD_USER_INTERRUPT, | |
1306 | GEN6_BSD_USER_INTERRUPT); | |
1ec14ad3 CW |
1307 | } |
1308 | ||
881f47b6 | 1309 | /* ring buffer for Video Codec for Gen6+ */ |
e070868e | 1310 | static const struct intel_ring_buffer gen6_bsd_ring = { |
1ec14ad3 CW |
1311 | .name = "gen6 bsd ring", |
1312 | .id = RING_BSD, | |
1313 | .mmio_base = GEN6_BSD_RING_BASE, | |
1314 | .size = 32 * PAGE_SIZE, | |
1315 | .init = init_ring_common, | |
1316 | .write_tail = gen6_bsd_ring_write_tail, | |
1317 | .flush = gen6_ring_flush, | |
1318 | .add_request = gen6_add_request, | |
1319 | .get_seqno = ring_get_seqno, | |
1320 | .irq_get = gen6_bsd_ring_get_irq, | |
1321 | .irq_put = gen6_bsd_ring_put_irq, | |
1322 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, | |
c8c99b0f BW |
1323 | .sync_to = gen6_bsd_ring_sync_to, |
1324 | .semaphore_register = {MI_SEMAPHORE_SYNC_VR, | |
1325 | MI_SEMAPHORE_SYNC_INVALID, | |
1326 | MI_SEMAPHORE_SYNC_VB}, | |
1327 | .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC}, | |
549f7365 CW |
1328 | }; |
1329 | ||
1330 | /* Blitter support (SandyBridge+) */ | |
1331 | ||
b13c2b96 | 1332 | static bool |
1ec14ad3 | 1333 | blt_ring_get_irq(struct intel_ring_buffer *ring) |
549f7365 | 1334 | { |
0f46832f CW |
1335 | return gen6_ring_get_irq(ring, |
1336 | GT_BLT_USER_INTERRUPT, | |
1337 | GEN6_BLITTER_USER_INTERRUPT); | |
549f7365 | 1338 | } |
1ec14ad3 | 1339 | |
549f7365 | 1340 | static void |
1ec14ad3 | 1341 | blt_ring_put_irq(struct intel_ring_buffer *ring) |
549f7365 | 1342 | { |
0f46832f CW |
1343 | gen6_ring_put_irq(ring, |
1344 | GT_BLT_USER_INTERRUPT, | |
1345 | GEN6_BLITTER_USER_INTERRUPT); | |
549f7365 CW |
1346 | } |
1347 | ||
8d19215b ZN |
1348 | |
1349 | /* Workaround for some stepping of SNB, | |
1350 | * each time when BLT engine ring tail moved, | |
1351 | * the first command in the ring to be parsed | |
1352 | * should be MI_BATCH_BUFFER_START | |
1353 | */ | |
1354 | #define NEED_BLT_WORKAROUND(dev) \ | |
1355 | (IS_GEN6(dev) && (dev->pdev->revision < 8)) | |
1356 | ||
1357 | static inline struct drm_i915_gem_object * | |
1358 | to_blt_workaround(struct intel_ring_buffer *ring) | |
1359 | { | |
1360 | return ring->private; | |
1361 | } | |
1362 | ||
1363 | static int blt_ring_init(struct intel_ring_buffer *ring) | |
1364 | { | |
1365 | if (NEED_BLT_WORKAROUND(ring->dev)) { | |
1366 | struct drm_i915_gem_object *obj; | |
27153f72 | 1367 | u32 *ptr; |
8d19215b ZN |
1368 | int ret; |
1369 | ||
05394f39 | 1370 | obj = i915_gem_alloc_object(ring->dev, 4096); |
8d19215b ZN |
1371 | if (obj == NULL) |
1372 | return -ENOMEM; | |
1373 | ||
05394f39 | 1374 | ret = i915_gem_object_pin(obj, 4096, true); |
8d19215b ZN |
1375 | if (ret) { |
1376 | drm_gem_object_unreference(&obj->base); | |
1377 | return ret; | |
1378 | } | |
1379 | ||
1380 | ptr = kmap(obj->pages[0]); | |
27153f72 CW |
1381 | *ptr++ = MI_BATCH_BUFFER_END; |
1382 | *ptr++ = MI_NOOP; | |
8d19215b ZN |
1383 | kunmap(obj->pages[0]); |
1384 | ||
05394f39 | 1385 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
8d19215b | 1386 | if (ret) { |
05394f39 | 1387 | i915_gem_object_unpin(obj); |
8d19215b ZN |
1388 | drm_gem_object_unreference(&obj->base); |
1389 | return ret; | |
1390 | } | |
1391 | ||
1392 | ring->private = obj; | |
1393 | } | |
1394 | ||
1395 | return init_ring_common(ring); | |
1396 | } | |
1397 | ||
1398 | static int blt_ring_begin(struct intel_ring_buffer *ring, | |
1399 | int num_dwords) | |
1400 | { | |
1401 | if (ring->private) { | |
1402 | int ret = intel_ring_begin(ring, num_dwords+2); | |
1403 | if (ret) | |
1404 | return ret; | |
1405 | ||
1406 | intel_ring_emit(ring, MI_BATCH_BUFFER_START); | |
1407 | intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset); | |
1408 | ||
1409 | return 0; | |
1410 | } else | |
1411 | return intel_ring_begin(ring, 4); | |
1412 | } | |
1413 | ||
b72f3acb | 1414 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
71a77e07 | 1415 | u32 invalidate, u32 flush) |
8d19215b | 1416 | { |
71a77e07 | 1417 | uint32_t cmd; |
b72f3acb CW |
1418 | int ret; |
1419 | ||
b72f3acb CW |
1420 | ret = blt_ring_begin(ring, 4); |
1421 | if (ret) | |
1422 | return ret; | |
1423 | ||
71a77e07 CW |
1424 | cmd = MI_FLUSH_DW; |
1425 | if (invalidate & I915_GEM_DOMAIN_RENDER) | |
1426 | cmd |= MI_INVALIDATE_TLB; | |
1427 | intel_ring_emit(ring, cmd); | |
b72f3acb CW |
1428 | intel_ring_emit(ring, 0); |
1429 | intel_ring_emit(ring, 0); | |
71a77e07 | 1430 | intel_ring_emit(ring, MI_NOOP); |
b72f3acb CW |
1431 | intel_ring_advance(ring); |
1432 | return 0; | |
8d19215b ZN |
1433 | } |
1434 | ||
8d19215b ZN |
1435 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) |
1436 | { | |
1437 | if (!ring->private) | |
1438 | return; | |
1439 | ||
1440 | i915_gem_object_unpin(ring->private); | |
1441 | drm_gem_object_unreference(ring->private); | |
1442 | ring->private = NULL; | |
1443 | } | |
1444 | ||
549f7365 | 1445 | static const struct intel_ring_buffer gen6_blt_ring = { |
0206e353 AJ |
1446 | .name = "blt ring", |
1447 | .id = RING_BLT, | |
1448 | .mmio_base = BLT_RING_BASE, | |
1449 | .size = 32 * PAGE_SIZE, | |
1450 | .init = blt_ring_init, | |
1451 | .write_tail = ring_write_tail, | |
1452 | .flush = blt_ring_flush, | |
1453 | .add_request = gen6_add_request, | |
1454 | .get_seqno = ring_get_seqno, | |
c8c99b0f BW |
1455 | .irq_get = blt_ring_get_irq, |
1456 | .irq_put = blt_ring_put_irq, | |
0206e353 | 1457 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
c8c99b0f BW |
1458 | .cleanup = blt_ring_cleanup, |
1459 | .sync_to = gen6_blt_ring_sync_to, | |
1460 | .semaphore_register = {MI_SEMAPHORE_SYNC_BR, | |
1461 | MI_SEMAPHORE_SYNC_BV, | |
1462 | MI_SEMAPHORE_SYNC_INVALID}, | |
1463 | .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC}, | |
881f47b6 XH |
1464 | }; |
1465 | ||
5c1143bb XH |
1466 | int intel_init_render_ring_buffer(struct drm_device *dev) |
1467 | { | |
1468 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1ec14ad3 | 1469 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
5c1143bb | 1470 | |
1ec14ad3 CW |
1471 | *ring = render_ring; |
1472 | if (INTEL_INFO(dev)->gen >= 6) { | |
1473 | ring->add_request = gen6_add_request; | |
8d315287 | 1474 | ring->flush = gen6_render_ring_flush; |
0f46832f CW |
1475 | ring->irq_get = gen6_render_ring_get_irq; |
1476 | ring->irq_put = gen6_render_ring_put_irq; | |
c6df541c CW |
1477 | } else if (IS_GEN5(dev)) { |
1478 | ring->add_request = pc_render_add_request; | |
1479 | ring->get_seqno = pc_render_get_seqno; | |
1ec14ad3 | 1480 | } |
5c1143bb XH |
1481 | |
1482 | if (!I915_NEED_GFX_HWS(dev)) { | |
1ec14ad3 CW |
1483 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
1484 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | |
5c1143bb XH |
1485 | } |
1486 | ||
1ec14ad3 | 1487 | return intel_init_ring_buffer(dev, ring); |
5c1143bb XH |
1488 | } |
1489 | ||
e8616b6c CW |
1490 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) |
1491 | { | |
1492 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1493 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | |
1494 | ||
1495 | *ring = render_ring; | |
1496 | if (INTEL_INFO(dev)->gen >= 6) { | |
1497 | ring->add_request = gen6_add_request; | |
1498 | ring->irq_get = gen6_render_ring_get_irq; | |
1499 | ring->irq_put = gen6_render_ring_put_irq; | |
1500 | } else if (IS_GEN5(dev)) { | |
1501 | ring->add_request = pc_render_add_request; | |
1502 | ring->get_seqno = pc_render_get_seqno; | |
1503 | } | |
1504 | ||
f3234706 KP |
1505 | if (!I915_NEED_GFX_HWS(dev)) |
1506 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; | |
1507 | ||
e8616b6c CW |
1508 | ring->dev = dev; |
1509 | INIT_LIST_HEAD(&ring->active_list); | |
1510 | INIT_LIST_HEAD(&ring->request_list); | |
1511 | INIT_LIST_HEAD(&ring->gpu_write_list); | |
1512 | ||
1513 | ring->size = size; | |
1514 | ring->effective_size = ring->size; | |
1515 | if (IS_I830(ring->dev)) | |
1516 | ring->effective_size -= 128; | |
1517 | ||
1518 | ring->map.offset = start; | |
1519 | ring->map.size = size; | |
1520 | ring->map.type = 0; | |
1521 | ring->map.flags = 0; | |
1522 | ring->map.mtrr = 0; | |
1523 | ||
1524 | drm_core_ioremap_wc(&ring->map, dev); | |
1525 | if (ring->map.handle == NULL) { | |
1526 | DRM_ERROR("can not ioremap virtual address for" | |
1527 | " ring buffer\n"); | |
1528 | return -ENOMEM; | |
1529 | } | |
1530 | ||
1531 | ring->virtual_start = (void __force __iomem *)ring->map.handle; | |
1532 | return 0; | |
1533 | } | |
1534 | ||
5c1143bb XH |
1535 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
1536 | { | |
1537 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1ec14ad3 | 1538 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; |
5c1143bb | 1539 | |
65d3eb1e | 1540 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
1ec14ad3 | 1541 | *ring = gen6_bsd_ring; |
881f47b6 | 1542 | else |
1ec14ad3 | 1543 | *ring = bsd_ring; |
5c1143bb | 1544 | |
1ec14ad3 | 1545 | return intel_init_ring_buffer(dev, ring); |
5c1143bb | 1546 | } |
549f7365 CW |
1547 | |
1548 | int intel_init_blt_ring_buffer(struct drm_device *dev) | |
1549 | { | |
1550 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1ec14ad3 | 1551 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
549f7365 | 1552 | |
1ec14ad3 | 1553 | *ring = gen6_blt_ring; |
549f7365 | 1554 | |
1ec14ad3 | 1555 | return intel_init_ring_buffer(dev, ring); |
549f7365 | 1556 | } |