2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
32 #include <drm/amdgpu_drm.h>
38 * IBs (Indirect Buffers) and areas of GPU accessible memory where
39 * commands are stored. You can put a pointer to the IB in the
40 * command ring and the hw will fetch the commands from the IB
41 * and execute them. Generally userspace acceleration drivers
42 * produce command buffers which are send to the kernel and
43 * put in IBs for execution by the requested ring.
45 static int amdgpu_debugfs_sa_init(struct amdgpu_device
*adev
);
48 * amdgpu_ib_get - request an IB (Indirect Buffer)
50 * @ring: ring index the IB is associated with
51 * @size: requested IB size
52 * @ib: IB object returned
54 * Request an IB (all asics). IBs are allocated using the
56 * Returns 0 on success, error on failure.
58 int amdgpu_ib_get(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
,
59 unsigned size
, struct amdgpu_ib
*ib
)
64 r
= amdgpu_sa_bo_new(&adev
->ring_tmp_bo
,
65 &ib
->sa_bo
, size
, 256);
67 dev_err(adev
->dev
, "failed to get a new IB (%d)\n", r
);
71 ib
->ptr
= amdgpu_sa_bo_cpu_addr(ib
->sa_bo
);
74 ib
->gpu_addr
= amdgpu_sa_bo_gpu_addr(ib
->sa_bo
);
81 * amdgpu_ib_free - free an IB (Indirect Buffer)
83 * @adev: amdgpu_device pointer
84 * @ib: IB object to free
85 * @f: the fence SA bo need wait on for the ib alloation
87 * Free an IB (all asics).
89 void amdgpu_ib_free(struct amdgpu_device
*adev
, struct amdgpu_ib
*ib
,
92 amdgpu_sa_bo_free(adev
, &ib
->sa_bo
, f
);
96 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
98 * @adev: amdgpu_device pointer
99 * @num_ibs: number of IBs to schedule
100 * @ibs: IB objects to schedule
101 * @f: fence created during this submission
103 * Schedule an IB on the associated ring (all asics).
104 * Returns 0 on success, error on failure.
106 * On SI, there are two parallel engines fed from the primary ring,
107 * the CE (Constant Engine) and the DE (Drawing Engine). Since
108 * resource descriptors have moved to memory, the CE allows you to
109 * prime the caches while the DE is updating register state so that
110 * the resource descriptors will be already in cache when the draw is
111 * processed. To accomplish this, the userspace driver submits two
112 * IBs, one for the CE and one for the DE. If there is a CE IB (called
113 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
114 * to SI there was just a DE IB.
116 int amdgpu_ib_schedule(struct amdgpu_ring
*ring
, unsigned num_ibs
,
117 struct amdgpu_ib
*ibs
, struct fence
*last_vm_update
,
118 struct amdgpu_job
*job
, struct fence
**f
)
120 struct amdgpu_device
*adev
= ring
->adev
;
121 struct amdgpu_ib
*ib
= &ibs
[0];
122 bool skip_preamble
, need_ctx_switch
;
123 unsigned patch_offset
= ~0;
124 struct amdgpu_vm
*vm
;
134 /* ring tests don't use a job */
144 dev_err(adev
->dev
, "couldn't schedule ib\n");
148 if (vm
&& !job
->vm_id
) {
149 dev_err(adev
->dev
, "VM IB without ID\n");
153 r
= amdgpu_ring_alloc(ring
, 256 * num_ibs
);
155 dev_err(adev
->dev
, "scheduling IB failed (%d).\n", r
);
159 if (ring
->type
== AMDGPU_RING_TYPE_SDMA
&& ring
->funcs
->init_cond_exec
)
160 patch_offset
= amdgpu_ring_init_cond_exec(ring
);
163 r
= amdgpu_vm_flush(ring
, job
->vm_id
, job
->vm_pd_addr
,
164 job
->gds_base
, job
->gds_size
,
165 job
->gws_base
, job
->gws_size
,
166 job
->oa_base
, job
->oa_size
);
168 amdgpu_ring_undo(ring
);
173 if (ring
->funcs
->emit_hdp_flush
)
174 amdgpu_ring_emit_hdp_flush(ring
);
176 /* always set cond_exec_polling to CONTINUE */
177 *ring
->cond_exe_cpu_addr
= 1;
179 skip_preamble
= ring
->current_ctx
== ctx
;
180 need_ctx_switch
= ring
->current_ctx
!= ctx
;
181 for (i
= 0; i
< num_ibs
; ++i
) {
184 /* drop preamble IBs if we don't have a context switch */
185 if ((ib
->flags
& AMDGPU_IB_FLAG_PREAMBLE
) && skip_preamble
)
188 amdgpu_ring_emit_ib(ring
, ib
, job
? job
->vm_id
: 0,
190 need_ctx_switch
= false;
193 if (ring
->funcs
->emit_hdp_invalidate
)
194 amdgpu_ring_emit_hdp_invalidate(ring
);
196 r
= amdgpu_fence_emit(ring
, &hwf
);
198 dev_err(adev
->dev
, "failed to emit fence (%d)\n", r
);
199 if (job
&& job
->vm_id
)
200 amdgpu_vm_reset_id(adev
, job
->vm_id
);
201 amdgpu_ring_undo(ring
);
205 /* wrap the last IB with fence */
206 if (job
&& job
->uf_bo
) {
207 uint64_t addr
= amdgpu_bo_gpu_offset(job
->uf_bo
);
209 addr
+= job
->uf_offset
;
210 amdgpu_ring_emit_fence(ring
, addr
, job
->uf_sequence
,
211 AMDGPU_FENCE_FLAG_64BIT
);
217 if (patch_offset
!= ~0 && ring
->funcs
->patch_cond_exec
)
218 amdgpu_ring_patch_cond_exec(ring
, patch_offset
);
220 ring
->current_ctx
= ctx
;
221 amdgpu_ring_commit(ring
);
226 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
228 * @adev: amdgpu_device pointer
230 * Initialize the suballocator to manage a pool of memory
231 * for use as IBs (all asics).
232 * Returns 0 on success, error on failure.
234 int amdgpu_ib_pool_init(struct amdgpu_device
*adev
)
238 if (adev
->ib_pool_ready
) {
241 r
= amdgpu_sa_bo_manager_init(adev
, &adev
->ring_tmp_bo
,
242 AMDGPU_IB_POOL_SIZE
*64*1024,
243 AMDGPU_GPU_PAGE_SIZE
,
244 AMDGPU_GEM_DOMAIN_GTT
);
249 r
= amdgpu_sa_bo_manager_start(adev
, &adev
->ring_tmp_bo
);
254 adev
->ib_pool_ready
= true;
255 if (amdgpu_debugfs_sa_init(adev
)) {
256 dev_err(adev
->dev
, "failed to register debugfs file for SA\n");
262 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
264 * @adev: amdgpu_device pointer
266 * Tear down the suballocator managing the pool of memory
267 * for use as IBs (all asics).
269 void amdgpu_ib_pool_fini(struct amdgpu_device
*adev
)
271 if (adev
->ib_pool_ready
) {
272 amdgpu_sa_bo_manager_suspend(adev
, &adev
->ring_tmp_bo
);
273 amdgpu_sa_bo_manager_fini(adev
, &adev
->ring_tmp_bo
);
274 adev
->ib_pool_ready
= false;
279 * amdgpu_ib_ring_tests - test IBs on the rings
281 * @adev: amdgpu_device pointer
283 * Test an IB (Indirect Buffer) on each ring.
284 * If the test fails, disable the ring.
285 * Returns 0 on success, error if the primary GFX ring
288 int amdgpu_ib_ring_tests(struct amdgpu_device
*adev
)
293 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
294 struct amdgpu_ring
*ring
= adev
->rings
[i
];
296 if (!ring
|| !ring
->ready
)
299 r
= amdgpu_ring_test_ib(ring
);
303 if (ring
== &adev
->gfx
.gfx_ring
[0]) {
304 /* oh, oh, that's really bad */
305 DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r
);
306 adev
->accel_working
= false;
310 /* still not good, but we can live with it */
311 DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i
, r
);
321 #if defined(CONFIG_DEBUG_FS)
323 static int amdgpu_debugfs_sa_info(struct seq_file
*m
, void *data
)
325 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
326 struct drm_device
*dev
= node
->minor
->dev
;
327 struct amdgpu_device
*adev
= dev
->dev_private
;
329 amdgpu_sa_bo_dump_debug_info(&adev
->ring_tmp_bo
, m
);
335 static const struct drm_info_list amdgpu_debugfs_sa_list
[] = {
336 {"amdgpu_sa_info", &amdgpu_debugfs_sa_info
, 0, NULL
},
341 static int amdgpu_debugfs_sa_init(struct amdgpu_device
*adev
)
343 #if defined(CONFIG_DEBUG_FS)
344 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_sa_list
, 1);