2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "intel_ringbuffer.h"
27 #include "intel_lrc.h"
29 static const struct engine_info
{
35 int (*init_legacy
)(struct intel_engine_cs
*engine
);
36 int (*init_execlists
)(struct intel_engine_cs
*engine
);
39 .name
= "render ring",
40 .exec_id
= I915_EXEC_RENDER
,
41 .guc_id
= GUC_RENDER_ENGINE
,
42 .mmio_base
= RENDER_RING_BASE
,
43 .irq_shift
= GEN8_RCS_IRQ_SHIFT
,
44 .init_execlists
= logical_render_ring_init
,
45 .init_legacy
= intel_init_render_ring_buffer
,
48 .name
= "blitter ring",
49 .exec_id
= I915_EXEC_BLT
,
50 .guc_id
= GUC_BLITTER_ENGINE
,
51 .mmio_base
= BLT_RING_BASE
,
52 .irq_shift
= GEN8_BCS_IRQ_SHIFT
,
53 .init_execlists
= logical_xcs_ring_init
,
54 .init_legacy
= intel_init_blt_ring_buffer
,
58 .exec_id
= I915_EXEC_BSD
,
59 .guc_id
= GUC_VIDEO_ENGINE
,
60 .mmio_base
= GEN6_BSD_RING_BASE
,
61 .irq_shift
= GEN8_VCS1_IRQ_SHIFT
,
62 .init_execlists
= logical_xcs_ring_init
,
63 .init_legacy
= intel_init_bsd_ring_buffer
,
67 .exec_id
= I915_EXEC_BSD
,
68 .guc_id
= GUC_VIDEO_ENGINE2
,
69 .mmio_base
= GEN8_BSD2_RING_BASE
,
70 .irq_shift
= GEN8_VCS2_IRQ_SHIFT
,
71 .init_execlists
= logical_xcs_ring_init
,
72 .init_legacy
= intel_init_bsd2_ring_buffer
,
75 .name
= "video enhancement ring",
76 .exec_id
= I915_EXEC_VEBOX
,
77 .guc_id
= GUC_VIDEOENHANCE_ENGINE
,
78 .mmio_base
= VEBOX_RING_BASE
,
79 .irq_shift
= GEN8_VECS_IRQ_SHIFT
,
80 .init_execlists
= logical_xcs_ring_init
,
81 .init_legacy
= intel_init_vebox_ring_buffer
,
85 static struct intel_engine_cs
*
86 intel_engine_setup(struct drm_i915_private
*dev_priv
,
87 enum intel_engine_id id
)
89 const struct engine_info
*info
= &intel_engines
[id
];
90 struct intel_engine_cs
*engine
= &dev_priv
->engine
[id
];
93 engine
->i915
= dev_priv
;
94 engine
->name
= info
->name
;
95 engine
->exec_id
= info
->exec_id
;
96 engine
->hw_id
= engine
->guc_id
= info
->guc_id
;
97 engine
->mmio_base
= info
->mmio_base
;
98 engine
->irq_shift
= info
->irq_shift
;
104 * intel_engines_init() - allocate, populate and init the Engine Command Streamers
107 * Return: non-zero if the initialization failed.
109 int intel_engines_init(struct drm_device
*dev
)
111 struct drm_i915_private
*dev_priv
= to_i915(dev
);
112 struct intel_device_info
*device_info
= mkwrite_device_info(dev_priv
);
113 unsigned int mask
= 0;
114 int (*init
)(struct intel_engine_cs
*engine
);
118 WARN_ON(INTEL_INFO(dev_priv
)->ring_mask
== 0);
119 WARN_ON(INTEL_INFO(dev_priv
)->ring_mask
&
120 GENMASK(sizeof(mask
) * BITS_PER_BYTE
- 1, I915_NUM_ENGINES
));
122 for (i
= 0; i
< ARRAY_SIZE(intel_engines
); i
++) {
123 if (!HAS_ENGINE(dev_priv
, i
))
126 if (i915
.enable_execlists
)
127 init
= intel_engines
[i
].init_execlists
;
129 init
= intel_engines
[i
].init_legacy
;
134 ret
= init(intel_engine_setup(dev_priv
, i
));
138 mask
|= ENGINE_MASK(i
);
142 * Catch failures to update intel_engines table when the new engines
143 * are added to the driver by a warning and disabling the forgotten
146 if (WARN_ON(mask
!= INTEL_INFO(dev_priv
)->ring_mask
))
147 device_info
->ring_mask
= mask
;
149 device_info
->num_rings
= hweight32(mask
);
154 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
155 if (i915
.enable_execlists
)
156 intel_logical_ring_cleanup(&dev_priv
->engine
[i
]);
158 intel_engine_cleanup(&dev_priv
->engine
[i
]);
164 void intel_engine_init_seqno(struct intel_engine_cs
*engine
, u32 seqno
)
166 struct drm_i915_private
*dev_priv
= engine
->i915
;
168 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
169 * so long as the semaphore value in the register/page is greater
170 * than the sync value), so whenever we reset the seqno,
171 * so long as we reset the tracking semaphore value to 0, it will
172 * always be before the next request's seqno. If we don't reset
173 * the semaphore value, then when the seqno moves backwards all
174 * future waits will complete instantly (causing rendering corruption).
176 if (IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)) {
177 I915_WRITE(RING_SYNC_0(engine
->mmio_base
), 0);
178 I915_WRITE(RING_SYNC_1(engine
->mmio_base
), 0);
179 if (HAS_VEBOX(dev_priv
))
180 I915_WRITE(RING_SYNC_2(engine
->mmio_base
), 0);
182 if (dev_priv
->semaphore
) {
183 struct page
*page
= i915_vma_first_page(dev_priv
->semaphore
);
186 /* Semaphores are in noncoherent memory, flush to be safe */
187 semaphores
= kmap(page
);
188 memset(semaphores
+ GEN8_SEMAPHORE_OFFSET(engine
->id
, 0),
189 0, I915_NUM_ENGINES
* gen8_semaphore_seqno_size
);
190 drm_clflush_virt_range(semaphores
+ GEN8_SEMAPHORE_OFFSET(engine
->id
, 0),
191 I915_NUM_ENGINES
* gen8_semaphore_seqno_size
);
194 memset(engine
->semaphore
.sync_seqno
, 0,
195 sizeof(engine
->semaphore
.sync_seqno
));
197 intel_write_status_page(engine
, I915_GEM_HWS_INDEX
, seqno
);
198 if (engine
->irq_seqno_barrier
)
199 engine
->irq_seqno_barrier(engine
);
200 engine
->last_submitted_seqno
= seqno
;
202 engine
->hangcheck
.seqno
= seqno
;
204 /* After manually advancing the seqno, fake the interrupt in case
205 * there are any waiters for that seqno.
207 intel_engine_wakeup(engine
);
210 void intel_engine_init_hangcheck(struct intel_engine_cs
*engine
)
212 memset(&engine
->hangcheck
, 0, sizeof(engine
->hangcheck
));
213 clear_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
);
216 static void intel_engine_init_requests(struct intel_engine_cs
*engine
)
218 init_request_active(&engine
->last_request
, NULL
);
219 INIT_LIST_HEAD(&engine
->request_list
);
223 * intel_engines_setup_common - setup engine state not requiring hw access
224 * @engine: Engine to setup.
226 * Initializes @engine@ structure members shared between legacy and execlists
227 * submission modes which do not require hardware access.
229 * Typically done early in the submission mode specific engine setup stage.
231 void intel_engine_setup_common(struct intel_engine_cs
*engine
)
233 INIT_LIST_HEAD(&engine
->buffers
);
234 INIT_LIST_HEAD(&engine
->execlist_queue
);
235 spin_lock_init(&engine
->execlist_lock
);
237 engine
->fence_context
= fence_context_alloc(1);
239 intel_engine_init_requests(engine
);
240 intel_engine_init_hangcheck(engine
);
241 i915_gem_batch_pool_init(engine
, &engine
->batch_pool
);
244 int intel_engine_create_scratch(struct intel_engine_cs
*engine
, int size
)
246 struct drm_i915_gem_object
*obj
;
247 struct i915_vma
*vma
;
250 WARN_ON(engine
->scratch
);
252 obj
= i915_gem_object_create_stolen(&engine
->i915
->drm
, size
);
254 obj
= i915_gem_object_create(&engine
->i915
->drm
, size
);
256 DRM_ERROR("Failed to allocate scratch page\n");
260 vma
= i915_vma_create(obj
, &engine
->i915
->ggtt
.base
, NULL
);
266 ret
= i915_vma_pin(vma
, 0, 4096, PIN_GLOBAL
| PIN_HIGH
);
270 engine
->scratch
= vma
;
271 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08llx\n",
272 engine
->name
, vma
->node
.start
);
276 i915_gem_object_put(obj
);
280 static void intel_engine_cleanup_scratch(struct intel_engine_cs
*engine
)
282 struct i915_vma
*vma
;
284 vma
= fetch_and_zero(&engine
->scratch
);
293 * intel_engines_init_common - initialize cengine state which might require hw access
294 * @engine: Engine to initialize.
296 * Initializes @engine@ structure members shared between legacy and execlists
297 * submission modes which do require hardware access.
299 * Typcally done at later stages of submission mode specific engine setup.
301 * Returns zero on success or an error code on failure.
303 int intel_engine_init_common(struct intel_engine_cs
*engine
)
307 ret
= intel_engine_init_breadcrumbs(engine
);
311 return intel_engine_init_cmd_parser(engine
);
315 * intel_engines_cleanup_common - cleans up the engine state created by
316 * the common initiailizers.
317 * @engine: Engine to cleanup.
319 * This cleans up everything created by the common helpers.
321 void intel_engine_cleanup_common(struct intel_engine_cs
*engine
)
323 intel_engine_cleanup_scratch(engine
);
325 intel_engine_cleanup_cmd_parser(engine
);
326 intel_engine_fini_breadcrumbs(engine
);
327 i915_gem_batch_pool_fini(&engine
->batch_pool
);