2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
27 #include "amdgpu_ucode.h"
28 #include "amdgpu_trace.h"
32 #include "oss/oss_3_0_d.h"
33 #include "oss/oss_3_0_sh_mask.h"
35 #include "gmc/gmc_8_1_d.h"
36 #include "gmc/gmc_8_1_sh_mask.h"
38 #include "gca/gfx_8_0_d.h"
39 #include "gca/gfx_8_0_enum.h"
40 #include "gca/gfx_8_0_sh_mask.h"
42 #include "bif/bif_5_0_d.h"
43 #include "bif/bif_5_0_sh_mask.h"
45 #include "tonga_sdma_pkt_open.h"
47 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device
*adev
);
48 static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device
*adev
);
49 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device
*adev
);
50 static void sdma_v3_0_set_irq_funcs(struct amdgpu_device
*adev
);
52 MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
53 MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
54 MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
55 MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
56 MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
57 MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
59 static const u32 sdma_offsets
[SDMA_MAX_INSTANCE
] =
61 SDMA0_REGISTER_OFFSET
,
65 static const u32 golden_settings_tonga_a11
[] =
67 mmSDMA0_CHICKEN_BITS
, 0xfc910007, 0x00810007,
68 mmSDMA0_CLK_CTRL
, 0xff000fff, 0x00000000,
69 mmSDMA0_GFX_IB_CNTL
, 0x800f0111, 0x00000100,
70 mmSDMA0_RLC0_IB_CNTL
, 0x800f0111, 0x00000100,
71 mmSDMA0_RLC1_IB_CNTL
, 0x800f0111, 0x00000100,
72 mmSDMA1_CHICKEN_BITS
, 0xfc910007, 0x00810007,
73 mmSDMA1_CLK_CTRL
, 0xff000fff, 0x00000000,
74 mmSDMA1_GFX_IB_CNTL
, 0x800f0111, 0x00000100,
75 mmSDMA1_RLC0_IB_CNTL
, 0x800f0111, 0x00000100,
76 mmSDMA1_RLC1_IB_CNTL
, 0x800f0111, 0x00000100,
79 static const u32 tonga_mgcg_cgcg_init
[] =
81 mmSDMA0_CLK_CTRL
, 0xff000ff0, 0x00000100,
82 mmSDMA1_CLK_CTRL
, 0xff000ff0, 0x00000100
85 static const u32 golden_settings_fiji_a10
[] =
87 mmSDMA0_CHICKEN_BITS
, 0xfc910007, 0x00810007,
88 mmSDMA0_GFX_IB_CNTL
, 0x800f0111, 0x00000100,
89 mmSDMA0_RLC0_IB_CNTL
, 0x800f0111, 0x00000100,
90 mmSDMA0_RLC1_IB_CNTL
, 0x800f0111, 0x00000100,
91 mmSDMA1_CHICKEN_BITS
, 0xfc910007, 0x00810007,
92 mmSDMA1_GFX_IB_CNTL
, 0x800f0111, 0x00000100,
93 mmSDMA1_RLC0_IB_CNTL
, 0x800f0111, 0x00000100,
94 mmSDMA1_RLC1_IB_CNTL
, 0x800f0111, 0x00000100,
97 static const u32 fiji_mgcg_cgcg_init
[] =
99 mmSDMA0_CLK_CTRL
, 0xff000ff0, 0x00000100,
100 mmSDMA1_CLK_CTRL
, 0xff000ff0, 0x00000100
103 static const u32 cz_golden_settings_a11
[] =
105 mmSDMA0_CHICKEN_BITS
, 0xfc910007, 0x00810007,
106 mmSDMA0_CLK_CTRL
, 0xff000fff, 0x00000000,
107 mmSDMA0_GFX_IB_CNTL
, 0x00000100, 0x00000100,
108 mmSDMA0_POWER_CNTL
, 0x00000800, 0x0003c800,
109 mmSDMA0_RLC0_IB_CNTL
, 0x00000100, 0x00000100,
110 mmSDMA0_RLC1_IB_CNTL
, 0x00000100, 0x00000100,
111 mmSDMA1_CHICKEN_BITS
, 0xfc910007, 0x00810007,
112 mmSDMA1_CLK_CTRL
, 0xff000fff, 0x00000000,
113 mmSDMA1_GFX_IB_CNTL
, 0x00000100, 0x00000100,
114 mmSDMA1_POWER_CNTL
, 0x00000800, 0x0003c800,
115 mmSDMA1_RLC0_IB_CNTL
, 0x00000100, 0x00000100,
116 mmSDMA1_RLC1_IB_CNTL
, 0x00000100, 0x00000100,
119 static const u32 cz_mgcg_cgcg_init
[] =
121 mmSDMA0_CLK_CTRL
, 0xff000ff0, 0x00000100,
122 mmSDMA1_CLK_CTRL
, 0xff000ff0, 0x00000100
127 * Starting with CIK, the GPU has new asynchronous
128 * DMA engines. These engines are used for compute
129 * and gfx. There are two DMA engines (SDMA0, SDMA1)
130 * and each one supports 1 ring buffer used for gfx
131 * and 2 queues used for compute.
133 * The programming model is very similar to the CP
134 * (ring buffer, IBs, etc.), but sDMA has it's own
135 * packet format that is different from the PM4 format
136 * used by the CP. sDMA supports copying data, writing
137 * embedded data, solid fills, and a number of other
138 * things. It also has support for tiling/detiling of
142 static void sdma_v3_0_init_golden_registers(struct amdgpu_device
*adev
)
144 switch (adev
->asic_type
) {
146 amdgpu_program_register_sequence(adev
,
148 (const u32
)ARRAY_SIZE(fiji_mgcg_cgcg_init
));
149 amdgpu_program_register_sequence(adev
,
150 golden_settings_fiji_a10
,
151 (const u32
)ARRAY_SIZE(golden_settings_fiji_a10
));
154 amdgpu_program_register_sequence(adev
,
155 tonga_mgcg_cgcg_init
,
156 (const u32
)ARRAY_SIZE(tonga_mgcg_cgcg_init
));
157 amdgpu_program_register_sequence(adev
,
158 golden_settings_tonga_a11
,
159 (const u32
)ARRAY_SIZE(golden_settings_tonga_a11
));
162 amdgpu_program_register_sequence(adev
,
164 (const u32
)ARRAY_SIZE(cz_mgcg_cgcg_init
));
165 amdgpu_program_register_sequence(adev
,
166 cz_golden_settings_a11
,
167 (const u32
)ARRAY_SIZE(cz_golden_settings_a11
));
175 * sdma_v3_0_init_microcode - load ucode images from disk
177 * @adev: amdgpu_device pointer
179 * Use the firmware interface to load the ucode images into
180 * the driver (not loaded into hw).
181 * Returns 0 on success, error on failure.
183 static int sdma_v3_0_init_microcode(struct amdgpu_device
*adev
)
185 const char *chip_name
;
188 struct amdgpu_firmware_info
*info
= NULL
;
189 const struct common_firmware_header
*header
= NULL
;
190 const struct sdma_firmware_header_v1_0
*hdr
;
194 switch (adev
->asic_type
) {
202 chip_name
= "carrizo";
207 for (i
= 0; i
< SDMA_MAX_INSTANCE
; i
++) {
209 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_sdma.bin", chip_name
);
211 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_sdma1.bin", chip_name
);
212 err
= request_firmware(&adev
->sdma
[i
].fw
, fw_name
, adev
->dev
);
215 err
= amdgpu_ucode_validate(adev
->sdma
[i
].fw
);
218 hdr
= (const struct sdma_firmware_header_v1_0
*)adev
->sdma
[i
].fw
->data
;
219 adev
->sdma
[i
].fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
220 adev
->sdma
[i
].feature_version
= le32_to_cpu(hdr
->ucode_feature_version
);
222 if (adev
->firmware
.smu_load
) {
223 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_SDMA0
+ i
];
224 info
->ucode_id
= AMDGPU_UCODE_ID_SDMA0
+ i
;
225 info
->fw
= adev
->sdma
[i
].fw
;
226 header
= (const struct common_firmware_header
*)info
->fw
->data
;
227 adev
->firmware
.fw_size
+=
228 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
234 "sdma_v3_0: Failed to load firmware \"%s\"\n",
236 for (i
= 0; i
< SDMA_MAX_INSTANCE
; i
++) {
237 release_firmware(adev
->sdma
[i
].fw
);
238 adev
->sdma
[i
].fw
= NULL
;
245 * sdma_v3_0_ring_get_rptr - get the current read pointer
247 * @ring: amdgpu ring pointer
249 * Get the current rptr from the hardware (VI+).
251 static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring
*ring
)
255 /* XXX check if swapping is necessary on BE */
256 rptr
= ring
->adev
->wb
.wb
[ring
->rptr_offs
] >> 2;
262 * sdma_v3_0_ring_get_wptr - get the current write pointer
264 * @ring: amdgpu ring pointer
266 * Get the current wptr from the hardware (VI+).
268 static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring
*ring
)
270 struct amdgpu_device
*adev
= ring
->adev
;
273 if (ring
->use_doorbell
) {
274 /* XXX check if swapping is necessary on BE */
275 wptr
= ring
->adev
->wb
.wb
[ring
->wptr_offs
] >> 2;
277 int me
= (ring
== &ring
->adev
->sdma
[0].ring
) ? 0 : 1;
279 wptr
= RREG32(mmSDMA0_GFX_RB_WPTR
+ sdma_offsets
[me
]) >> 2;
286 * sdma_v3_0_ring_set_wptr - commit the write pointer
288 * @ring: amdgpu ring pointer
290 * Write the wptr back to the hardware (VI+).
292 static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring
*ring
)
294 struct amdgpu_device
*adev
= ring
->adev
;
296 if (ring
->use_doorbell
) {
297 /* XXX check if swapping is necessary on BE */
298 adev
->wb
.wb
[ring
->wptr_offs
] = ring
->wptr
<< 2;
299 WDOORBELL32(ring
->doorbell_index
, ring
->wptr
<< 2);
301 int me
= (ring
== &ring
->adev
->sdma
[0].ring
) ? 0 : 1;
303 WREG32(mmSDMA0_GFX_RB_WPTR
+ sdma_offsets
[me
], ring
->wptr
<< 2);
308 * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
310 * @ring: amdgpu ring pointer
311 * @ib: IB object to schedule
313 * Schedule an IB in the DMA ring (VI).
315 static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring
*ring
,
316 struct amdgpu_ib
*ib
)
318 u32 vmid
= (ib
->vm
? ib
->vm
->ids
[ring
->idx
].id
: 0) & 0xf;
319 u32 next_rptr
= ring
->wptr
+ 5;
321 while ((next_rptr
& 7) != 2)
325 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE
) |
326 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR
));
327 amdgpu_ring_write(ring
, lower_32_bits(ring
->next_rptr_gpu_addr
) & 0xfffffffc);
328 amdgpu_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
));
329 amdgpu_ring_write(ring
, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
330 amdgpu_ring_write(ring
, next_rptr
);
332 /* IB packet must end on a 8 DW boundary */
333 while ((ring
->wptr
& 7) != 2)
334 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_NOP
));
336 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT
) |
337 SDMA_PKT_INDIRECT_HEADER_VMID(vmid
));
338 /* base must be 32 byte aligned */
339 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
) & 0xffffffe0);
340 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
341 amdgpu_ring_write(ring
, ib
->length_dw
);
342 amdgpu_ring_write(ring
, 0);
343 amdgpu_ring_write(ring
, 0);
348 * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
350 * @ring: amdgpu ring pointer
352 * Emit an hdp flush packet on the requested DMA ring.
354 static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring
*ring
)
356 u32 ref_and_mask
= 0;
358 if (ring
== &ring
->adev
->sdma
[0].ring
)
359 ref_and_mask
= REG_SET_FIELD(ref_and_mask
, GPU_HDP_FLUSH_DONE
, SDMA0
, 1);
361 ref_and_mask
= REG_SET_FIELD(ref_and_mask
, GPU_HDP_FLUSH_DONE
, SDMA1
, 1);
363 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM
) |
364 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
365 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
366 amdgpu_ring_write(ring
, mmGPU_HDP_FLUSH_DONE
<< 2);
367 amdgpu_ring_write(ring
, mmGPU_HDP_FLUSH_REQ
<< 2);
368 amdgpu_ring_write(ring
, ref_and_mask
); /* reference */
369 amdgpu_ring_write(ring
, ref_and_mask
); /* mask */
370 amdgpu_ring_write(ring
, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
371 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
375 * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
377 * @ring: amdgpu ring pointer
378 * @fence: amdgpu fence object
380 * Add a DMA fence packet to the ring to write
381 * the fence seq number and DMA trap packet to generate
382 * an interrupt if needed (VI).
384 static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
387 bool write64bit
= flags
& AMDGPU_FENCE_FLAG_64BIT
;
388 /* write the fence */
389 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE
));
390 amdgpu_ring_write(ring
, lower_32_bits(addr
));
391 amdgpu_ring_write(ring
, upper_32_bits(addr
));
392 amdgpu_ring_write(ring
, lower_32_bits(seq
));
394 /* optionally write high bits as well */
397 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE
));
398 amdgpu_ring_write(ring
, lower_32_bits(addr
));
399 amdgpu_ring_write(ring
, upper_32_bits(addr
));
400 amdgpu_ring_write(ring
, upper_32_bits(seq
));
403 /* generate an interrupt */
404 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP
));
405 amdgpu_ring_write(ring
, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
410 * sdma_v3_0_ring_emit_semaphore - emit a semaphore on the dma ring
412 * @ring: amdgpu_ring structure holding ring information
413 * @semaphore: amdgpu semaphore object
414 * @emit_wait: wait or signal semaphore
416 * Add a DMA semaphore packet to the ring wait on or signal
419 static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring
*ring
,
420 struct amdgpu_semaphore
*semaphore
,
423 u64 addr
= semaphore
->gpu_addr
;
424 u32 sig
= emit_wait
? 0 : 1;
426 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_SEM
) |
427 SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig
));
428 amdgpu_ring_write(ring
, lower_32_bits(addr
) & 0xfffffff8);
429 amdgpu_ring_write(ring
, upper_32_bits(addr
));
435 * sdma_v3_0_gfx_stop - stop the gfx async dma engines
437 * @adev: amdgpu_device pointer
439 * Stop the gfx async dma ring buffers (VI).
441 static void sdma_v3_0_gfx_stop(struct amdgpu_device
*adev
)
443 struct amdgpu_ring
*sdma0
= &adev
->sdma
[0].ring
;
444 struct amdgpu_ring
*sdma1
= &adev
->sdma
[1].ring
;
445 u32 rb_cntl
, ib_cntl
;
448 if ((adev
->mman
.buffer_funcs_ring
== sdma0
) ||
449 (adev
->mman
.buffer_funcs_ring
== sdma1
))
450 amdgpu_ttm_set_active_vram_size(adev
, adev
->mc
.visible_vram_size
);
452 for (i
= 0; i
< SDMA_MAX_INSTANCE
; i
++) {
453 rb_cntl
= RREG32(mmSDMA0_GFX_RB_CNTL
+ sdma_offsets
[i
]);
454 rb_cntl
= REG_SET_FIELD(rb_cntl
, SDMA0_GFX_RB_CNTL
, RB_ENABLE
, 0);
455 WREG32(mmSDMA0_GFX_RB_CNTL
+ sdma_offsets
[i
], rb_cntl
);
456 ib_cntl
= RREG32(mmSDMA0_GFX_IB_CNTL
+ sdma_offsets
[i
]);
457 ib_cntl
= REG_SET_FIELD(ib_cntl
, SDMA0_GFX_IB_CNTL
, IB_ENABLE
, 0);
458 WREG32(mmSDMA0_GFX_IB_CNTL
+ sdma_offsets
[i
], ib_cntl
);
460 sdma0
->ready
= false;
461 sdma1
->ready
= false;
465 * sdma_v3_0_rlc_stop - stop the compute async dma engines
467 * @adev: amdgpu_device pointer
469 * Stop the compute async dma queues (VI).
471 static void sdma_v3_0_rlc_stop(struct amdgpu_device
*adev
)
477 * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch
479 * @adev: amdgpu_device pointer
480 * @enable: enable/disable the DMA MEs context switch.
482 * Halt or unhalt the async dma engines context switch (VI).
484 static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device
*adev
, bool enable
)
489 for (i
= 0; i
< SDMA_MAX_INSTANCE
; i
++) {
490 f32_cntl
= RREG32(mmSDMA0_CNTL
+ sdma_offsets
[i
]);
492 f32_cntl
= REG_SET_FIELD(f32_cntl
, SDMA0_CNTL
,
493 AUTO_CTXSW_ENABLE
, 1);
495 f32_cntl
= REG_SET_FIELD(f32_cntl
, SDMA0_CNTL
,
496 AUTO_CTXSW_ENABLE
, 0);
497 WREG32(mmSDMA0_CNTL
+ sdma_offsets
[i
], f32_cntl
);
502 * sdma_v3_0_enable - stop the async dma engines
504 * @adev: amdgpu_device pointer
505 * @enable: enable/disable the DMA MEs.
507 * Halt or unhalt the async dma engines (VI).
509 static void sdma_v3_0_enable(struct amdgpu_device
*adev
, bool enable
)
514 if (enable
== false) {
515 sdma_v3_0_gfx_stop(adev
);
516 sdma_v3_0_rlc_stop(adev
);
519 for (i
= 0; i
< SDMA_MAX_INSTANCE
; i
++) {
520 f32_cntl
= RREG32(mmSDMA0_F32_CNTL
+ sdma_offsets
[i
]);
522 f32_cntl
= REG_SET_FIELD(f32_cntl
, SDMA0_F32_CNTL
, HALT
, 0);
524 f32_cntl
= REG_SET_FIELD(f32_cntl
, SDMA0_F32_CNTL
, HALT
, 1);
525 WREG32(mmSDMA0_F32_CNTL
+ sdma_offsets
[i
], f32_cntl
);
530 * sdma_v3_0_gfx_resume - setup and start the async dma engines
532 * @adev: amdgpu_device pointer
534 * Set up the gfx DMA ring buffers and enable them (VI).
535 * Returns 0 for success, error for failure.
537 static int sdma_v3_0_gfx_resume(struct amdgpu_device
*adev
)
539 struct amdgpu_ring
*ring
;
540 u32 rb_cntl
, ib_cntl
;
546 for (i
= 0; i
< SDMA_MAX_INSTANCE
; i
++) {
547 ring
= &adev
->sdma
[i
].ring
;
548 wb_offset
= (ring
->rptr_offs
* 4);
550 mutex_lock(&adev
->srbm_mutex
);
551 for (j
= 0; j
< 16; j
++) {
552 vi_srbm_select(adev
, 0, 0, 0, j
);
554 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR
+ sdma_offsets
[i
], 0);
555 WREG32(mmSDMA0_GFX_APE1_CNTL
+ sdma_offsets
[i
], 0);
557 vi_srbm_select(adev
, 0, 0, 0, 0);
558 mutex_unlock(&adev
->srbm_mutex
);
560 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL
+ sdma_offsets
[i
], 0);
562 /* Set ring buffer size in dwords */
563 rb_bufsz
= order_base_2(ring
->ring_size
/ 4);
564 rb_cntl
= RREG32(mmSDMA0_GFX_RB_CNTL
+ sdma_offsets
[i
]);
565 rb_cntl
= REG_SET_FIELD(rb_cntl
, SDMA0_GFX_RB_CNTL
, RB_SIZE
, rb_bufsz
);
567 rb_cntl
= REG_SET_FIELD(rb_cntl
, SDMA0_GFX_RB_CNTL
, RB_SWAP_ENABLE
, 1);
568 rb_cntl
= REG_SET_FIELD(rb_cntl
, SDMA0_GFX_RB_CNTL
,
569 RPTR_WRITEBACK_SWAP_ENABLE
, 1);
571 WREG32(mmSDMA0_GFX_RB_CNTL
+ sdma_offsets
[i
], rb_cntl
);
573 /* Initialize the ring buffer's read and write pointers */
574 WREG32(mmSDMA0_GFX_RB_RPTR
+ sdma_offsets
[i
], 0);
575 WREG32(mmSDMA0_GFX_RB_WPTR
+ sdma_offsets
[i
], 0);
577 /* set the wb address whether it's enabled or not */
578 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI
+ sdma_offsets
[i
],
579 upper_32_bits(adev
->wb
.gpu_addr
+ wb_offset
) & 0xFFFFFFFF);
580 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO
+ sdma_offsets
[i
],
581 lower_32_bits(adev
->wb
.gpu_addr
+ wb_offset
) & 0xFFFFFFFC);
583 rb_cntl
= REG_SET_FIELD(rb_cntl
, SDMA0_GFX_RB_CNTL
, RPTR_WRITEBACK_ENABLE
, 1);
585 WREG32(mmSDMA0_GFX_RB_BASE
+ sdma_offsets
[i
], ring
->gpu_addr
>> 8);
586 WREG32(mmSDMA0_GFX_RB_BASE_HI
+ sdma_offsets
[i
], ring
->gpu_addr
>> 40);
589 WREG32(mmSDMA0_GFX_RB_WPTR
+ sdma_offsets
[i
], ring
->wptr
<< 2);
591 doorbell
= RREG32(mmSDMA0_GFX_DOORBELL
+ sdma_offsets
[i
]);
593 if (ring
->use_doorbell
) {
594 doorbell
= REG_SET_FIELD(doorbell
, SDMA0_GFX_DOORBELL
,
595 OFFSET
, ring
->doorbell_index
);
596 doorbell
= REG_SET_FIELD(doorbell
, SDMA0_GFX_DOORBELL
, ENABLE
, 1);
598 doorbell
= REG_SET_FIELD(doorbell
, SDMA0_GFX_DOORBELL
, ENABLE
, 0);
600 WREG32(mmSDMA0_GFX_DOORBELL
+ sdma_offsets
[i
], doorbell
);
603 rb_cntl
= REG_SET_FIELD(rb_cntl
, SDMA0_GFX_RB_CNTL
, RB_ENABLE
, 1);
604 WREG32(mmSDMA0_GFX_RB_CNTL
+ sdma_offsets
[i
], rb_cntl
);
606 ib_cntl
= RREG32(mmSDMA0_GFX_IB_CNTL
+ sdma_offsets
[i
]);
607 ib_cntl
= REG_SET_FIELD(ib_cntl
, SDMA0_GFX_IB_CNTL
, IB_ENABLE
, 1);
609 ib_cntl
= REG_SET_FIELD(ib_cntl
, SDMA0_GFX_IB_CNTL
, IB_SWAP_ENABLE
, 1);
612 WREG32(mmSDMA0_GFX_IB_CNTL
+ sdma_offsets
[i
], ib_cntl
);
616 r
= amdgpu_ring_test_ring(ring
);
622 if (adev
->mman
.buffer_funcs_ring
== ring
)
623 amdgpu_ttm_set_active_vram_size(adev
, adev
->mc
.real_vram_size
);
630 * sdma_v3_0_rlc_resume - setup and start the async dma engines
632 * @adev: amdgpu_device pointer
634 * Set up the compute DMA queues and enable them (VI).
635 * Returns 0 for success, error for failure.
637 static int sdma_v3_0_rlc_resume(struct amdgpu_device
*adev
)
644 * sdma_v3_0_load_microcode - load the sDMA ME ucode
646 * @adev: amdgpu_device pointer
648 * Loads the sDMA0/1 ucode.
649 * Returns 0 for success, -EINVAL if the ucode is not available.
651 static int sdma_v3_0_load_microcode(struct amdgpu_device
*adev
)
653 const struct sdma_firmware_header_v1_0
*hdr
;
654 const __le32
*fw_data
;
658 if (!adev
->sdma
[0].fw
|| !adev
->sdma
[1].fw
)
662 sdma_v3_0_enable(adev
, false);
664 for (i
= 0; i
< SDMA_MAX_INSTANCE
; i
++) {
665 hdr
= (const struct sdma_firmware_header_v1_0
*)adev
->sdma
[i
].fw
->data
;
666 amdgpu_ucode_print_sdma_hdr(&hdr
->header
);
667 fw_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
668 fw_data
= (const __le32
*)
669 (adev
->sdma
[i
].fw
->data
+
670 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
671 WREG32(mmSDMA0_UCODE_ADDR
+ sdma_offsets
[i
], 0);
672 for (j
= 0; j
< fw_size
; j
++)
673 WREG32(mmSDMA0_UCODE_DATA
+ sdma_offsets
[i
], le32_to_cpup(fw_data
++));
674 WREG32(mmSDMA0_UCODE_ADDR
+ sdma_offsets
[i
], adev
->sdma
[i
].fw_version
);
681 * sdma_v3_0_start - setup and start the async dma engines
683 * @adev: amdgpu_device pointer
685 * Set up the DMA engines and enable them (VI).
686 * Returns 0 for success, error for failure.
688 static int sdma_v3_0_start(struct amdgpu_device
*adev
)
692 if (!adev
->firmware
.smu_load
) {
693 r
= sdma_v3_0_load_microcode(adev
);
697 r
= adev
->smu
.smumgr_funcs
->check_fw_load_finish(adev
,
698 AMDGPU_UCODE_ID_SDMA0
);
701 r
= adev
->smu
.smumgr_funcs
->check_fw_load_finish(adev
,
702 AMDGPU_UCODE_ID_SDMA1
);
708 sdma_v3_0_enable(adev
, true);
709 /* enable sdma ring preemption */
710 sdma_v3_0_ctx_switch_enable(adev
, true);
712 /* start the gfx rings and rlc compute queues */
713 r
= sdma_v3_0_gfx_resume(adev
);
716 r
= sdma_v3_0_rlc_resume(adev
);
724 * sdma_v3_0_ring_test_ring - simple async dma engine test
726 * @ring: amdgpu_ring structure holding ring information
728 * Test the DMA engine by writing using it to write an
729 * value to memory. (VI).
730 * Returns 0 for success, error for failure.
732 static int sdma_v3_0_ring_test_ring(struct amdgpu_ring
*ring
)
734 struct amdgpu_device
*adev
= ring
->adev
;
741 r
= amdgpu_wb_get(adev
, &index
);
743 dev_err(adev
->dev
, "(%d) failed to allocate wb slot\n", r
);
747 gpu_addr
= adev
->wb
.gpu_addr
+ (index
* 4);
749 adev
->wb
.wb
[index
] = cpu_to_le32(tmp
);
751 r
= amdgpu_ring_lock(ring
, 5);
753 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring
->idx
, r
);
754 amdgpu_wb_free(adev
, index
);
758 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE
) |
759 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR
));
760 amdgpu_ring_write(ring
, lower_32_bits(gpu_addr
));
761 amdgpu_ring_write(ring
, upper_32_bits(gpu_addr
));
762 amdgpu_ring_write(ring
, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
763 amdgpu_ring_write(ring
, 0xDEADBEEF);
764 amdgpu_ring_unlock_commit(ring
);
766 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
767 tmp
= le32_to_cpu(adev
->wb
.wb
[index
]);
768 if (tmp
== 0xDEADBEEF)
773 if (i
< adev
->usec_timeout
) {
774 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring
->idx
, i
);
776 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
780 amdgpu_wb_free(adev
, index
);
786 * sdma_v3_0_ring_test_ib - test an IB on the DMA engine
788 * @ring: amdgpu_ring structure holding ring information
790 * Test a simple IB in the DMA ring (VI).
791 * Returns 0 on success, error on failure.
793 static int sdma_v3_0_ring_test_ib(struct amdgpu_ring
*ring
)
795 struct amdgpu_device
*adev
= ring
->adev
;
797 struct fence
*f
= NULL
;
804 r
= amdgpu_wb_get(adev
, &index
);
806 dev_err(adev
->dev
, "(%d) failed to allocate wb slot\n", r
);
810 gpu_addr
= adev
->wb
.gpu_addr
+ (index
* 4);
812 adev
->wb
.wb
[index
] = cpu_to_le32(tmp
);
813 r
= amdgpu_ib_get(ring
, NULL
, 256, &ib
);
815 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r
);
819 ib
.ptr
[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE
) |
820 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR
);
821 ib
.ptr
[1] = lower_32_bits(gpu_addr
);
822 ib
.ptr
[2] = upper_32_bits(gpu_addr
);
823 ib
.ptr
[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
824 ib
.ptr
[4] = 0xDEADBEEF;
825 ib
.ptr
[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP
);
826 ib
.ptr
[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP
);
827 ib
.ptr
[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP
);
830 r
= amdgpu_sched_ib_submit_kernel_helper(adev
, ring
, &ib
, 1, NULL
,
831 AMDGPU_FENCE_OWNER_UNDEFINED
,
836 r
= fence_wait(f
, false);
838 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r
);
841 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
842 tmp
= le32_to_cpu(adev
->wb
.wb
[index
]);
843 if (tmp
== 0xDEADBEEF)
847 if (i
< adev
->usec_timeout
) {
848 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
852 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp
);
857 amdgpu_ib_free(adev
, &ib
);
859 amdgpu_wb_free(adev
, index
);
864 * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART
866 * @ib: indirect buffer to fill with commands
867 * @pe: addr of the page entry
868 * @src: src addr to copy from
869 * @count: number of page entries to update
871 * Update PTEs by copying them from the GART using sDMA (CIK).
873 static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib
*ib
,
874 uint64_t pe
, uint64_t src
,
878 unsigned bytes
= count
* 8;
879 if (bytes
> 0x1FFFF8)
882 ib
->ptr
[ib
->length_dw
++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY
) |
883 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR
);
884 ib
->ptr
[ib
->length_dw
++] = bytes
;
885 ib
->ptr
[ib
->length_dw
++] = 0; /* src/dst endian swap */
886 ib
->ptr
[ib
->length_dw
++] = lower_32_bits(src
);
887 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(src
);
888 ib
->ptr
[ib
->length_dw
++] = lower_32_bits(pe
);
889 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
898 * sdma_v3_0_vm_write_pte - update PTEs by writing them manually
900 * @ib: indirect buffer to fill with commands
901 * @pe: addr of the page entry
902 * @addr: dst addr to write into pe
903 * @count: number of page entries to update
904 * @incr: increase next addr by incr bytes
905 * @flags: access flags
907 * Update PTEs by writing them manually using sDMA (CIK).
909 static void sdma_v3_0_vm_write_pte(struct amdgpu_ib
*ib
,
911 uint64_t addr
, unsigned count
,
912 uint32_t incr
, uint32_t flags
)
922 /* for non-physically contiguous pages (system) */
923 ib
->ptr
[ib
->length_dw
++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE
) |
924 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR
);
925 ib
->ptr
[ib
->length_dw
++] = pe
;
926 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
927 ib
->ptr
[ib
->length_dw
++] = ndw
;
928 for (; ndw
> 0; ndw
-= 2, --count
, pe
+= 8) {
929 if (flags
& AMDGPU_PTE_SYSTEM
) {
930 value
= amdgpu_vm_map_gart(ib
->ring
->adev
, addr
);
931 value
&= 0xFFFFFFFFFFFFF000ULL
;
932 } else if (flags
& AMDGPU_PTE_VALID
) {
939 ib
->ptr
[ib
->length_dw
++] = value
;
940 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
946 * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA
948 * @ib: indirect buffer to fill with commands
949 * @pe: addr of the page entry
950 * @addr: dst addr to write into pe
951 * @count: number of page entries to update
952 * @incr: increase next addr by incr bytes
953 * @flags: access flags
955 * Update the page tables using sDMA (CIK).
957 static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib
*ib
,
959 uint64_t addr
, unsigned count
,
960 uint32_t incr
, uint32_t flags
)
970 if (flags
& AMDGPU_PTE_VALID
)
975 /* for physically contiguous pages (vram) */
976 ib
->ptr
[ib
->length_dw
++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE
);
977 ib
->ptr
[ib
->length_dw
++] = pe
; /* dst addr */
978 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
979 ib
->ptr
[ib
->length_dw
++] = flags
; /* mask */
980 ib
->ptr
[ib
->length_dw
++] = 0;
981 ib
->ptr
[ib
->length_dw
++] = value
; /* value */
982 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
983 ib
->ptr
[ib
->length_dw
++] = incr
; /* increment size */
984 ib
->ptr
[ib
->length_dw
++] = 0;
985 ib
->ptr
[ib
->length_dw
++] = ndw
; /* number of entries */
994 * sdma_v3_0_vm_pad_ib - pad the IB to the required number of dw
996 * @ib: indirect buffer to fill with padding
999 static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib
*ib
)
1001 while (ib
->length_dw
& 0x7)
1002 ib
->ptr
[ib
->length_dw
++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP
);
1006 * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
1008 * @ring: amdgpu_ring pointer
1009 * @vm: amdgpu_vm pointer
1011 * Update the page table base and flush the VM TLB
1014 static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
1015 unsigned vm_id
, uint64_t pd_addr
)
1017 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE
) |
1018 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1020 amdgpu_ring_write(ring
, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ vm_id
));
1022 amdgpu_ring_write(ring
, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ vm_id
- 8));
1024 amdgpu_ring_write(ring
, pd_addr
>> 12);
1027 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE
) |
1028 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1029 amdgpu_ring_write(ring
, mmVM_INVALIDATE_REQUEST
);
1030 amdgpu_ring_write(ring
, 1 << vm_id
);
1032 /* wait for flush */
1033 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM
) |
1034 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1035 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
1036 amdgpu_ring_write(ring
, mmVM_INVALIDATE_REQUEST
<< 2);
1037 amdgpu_ring_write(ring
, 0);
1038 amdgpu_ring_write(ring
, 0); /* reference */
1039 amdgpu_ring_write(ring
, 0); /* mask */
1040 amdgpu_ring_write(ring
, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1041 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
1044 static int sdma_v3_0_early_init(void *handle
)
1046 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1048 sdma_v3_0_set_ring_funcs(adev
);
1049 sdma_v3_0_set_buffer_funcs(adev
);
1050 sdma_v3_0_set_vm_pte_funcs(adev
);
1051 sdma_v3_0_set_irq_funcs(adev
);
1056 static int sdma_v3_0_sw_init(void *handle
)
1058 struct amdgpu_ring
*ring
;
1060 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1062 /* SDMA trap event */
1063 r
= amdgpu_irq_add_id(adev
, 224, &adev
->sdma_trap_irq
);
1067 /* SDMA Privileged inst */
1068 r
= amdgpu_irq_add_id(adev
, 241, &adev
->sdma_illegal_inst_irq
);
1072 /* SDMA Privileged inst */
1073 r
= amdgpu_irq_add_id(adev
, 247, &adev
->sdma_illegal_inst_irq
);
1077 r
= sdma_v3_0_init_microcode(adev
);
1079 DRM_ERROR("Failed to load sdma firmware!\n");
1083 ring
= &adev
->sdma
[0].ring
;
1084 ring
->ring_obj
= NULL
;
1085 ring
->use_doorbell
= true;
1086 ring
->doorbell_index
= AMDGPU_DOORBELL_sDMA_ENGINE0
;
1088 ring
= &adev
->sdma
[1].ring
;
1089 ring
->ring_obj
= NULL
;
1090 ring
->use_doorbell
= true;
1091 ring
->doorbell_index
= AMDGPU_DOORBELL_sDMA_ENGINE1
;
1093 ring
= &adev
->sdma
[0].ring
;
1094 sprintf(ring
->name
, "sdma0");
1095 r
= amdgpu_ring_init(adev
, ring
, 256 * 1024,
1096 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP
), 0xf,
1097 &adev
->sdma_trap_irq
, AMDGPU_SDMA_IRQ_TRAP0
,
1098 AMDGPU_RING_TYPE_SDMA
);
1102 ring
= &adev
->sdma
[1].ring
;
1103 sprintf(ring
->name
, "sdma1");
1104 r
= amdgpu_ring_init(adev
, ring
, 256 * 1024,
1105 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP
), 0xf,
1106 &adev
->sdma_trap_irq
, AMDGPU_SDMA_IRQ_TRAP1
,
1107 AMDGPU_RING_TYPE_SDMA
);
1114 static int sdma_v3_0_sw_fini(void *handle
)
1116 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1118 amdgpu_ring_fini(&adev
->sdma
[0].ring
);
1119 amdgpu_ring_fini(&adev
->sdma
[1].ring
);
1124 static int sdma_v3_0_hw_init(void *handle
)
1127 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1129 sdma_v3_0_init_golden_registers(adev
);
1131 r
= sdma_v3_0_start(adev
);
1138 static int sdma_v3_0_hw_fini(void *handle
)
1140 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1142 sdma_v3_0_ctx_switch_enable(adev
, false);
1143 sdma_v3_0_enable(adev
, false);
1148 static int sdma_v3_0_suspend(void *handle
)
1150 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1152 return sdma_v3_0_hw_fini(adev
);
1155 static int sdma_v3_0_resume(void *handle
)
1157 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1159 return sdma_v3_0_hw_init(adev
);
1162 static bool sdma_v3_0_is_idle(void *handle
)
1164 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1165 u32 tmp
= RREG32(mmSRBM_STATUS2
);
1167 if (tmp
& (SRBM_STATUS2__SDMA_BUSY_MASK
|
1168 SRBM_STATUS2__SDMA1_BUSY_MASK
))
1174 static int sdma_v3_0_wait_for_idle(void *handle
)
1178 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1180 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1181 tmp
= RREG32(mmSRBM_STATUS2
) & (SRBM_STATUS2__SDMA_BUSY_MASK
|
1182 SRBM_STATUS2__SDMA1_BUSY_MASK
);
1191 static void sdma_v3_0_print_status(void *handle
)
1194 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1196 dev_info(adev
->dev
, "VI SDMA registers\n");
1197 dev_info(adev
->dev
, " SRBM_STATUS2=0x%08X\n",
1198 RREG32(mmSRBM_STATUS2
));
1199 for (i
= 0; i
< SDMA_MAX_INSTANCE
; i
++) {
1200 dev_info(adev
->dev
, " SDMA%d_STATUS_REG=0x%08X\n",
1201 i
, RREG32(mmSDMA0_STATUS_REG
+ sdma_offsets
[i
]));
1202 dev_info(adev
->dev
, " SDMA%d_F32_CNTL=0x%08X\n",
1203 i
, RREG32(mmSDMA0_F32_CNTL
+ sdma_offsets
[i
]));
1204 dev_info(adev
->dev
, " SDMA%d_CNTL=0x%08X\n",
1205 i
, RREG32(mmSDMA0_CNTL
+ sdma_offsets
[i
]));
1206 dev_info(adev
->dev
, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
1207 i
, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL
+ sdma_offsets
[i
]));
1208 dev_info(adev
->dev
, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
1209 i
, RREG32(mmSDMA0_GFX_IB_CNTL
+ sdma_offsets
[i
]));
1210 dev_info(adev
->dev
, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
1211 i
, RREG32(mmSDMA0_GFX_RB_CNTL
+ sdma_offsets
[i
]));
1212 dev_info(adev
->dev
, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
1213 i
, RREG32(mmSDMA0_GFX_RB_RPTR
+ sdma_offsets
[i
]));
1214 dev_info(adev
->dev
, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
1215 i
, RREG32(mmSDMA0_GFX_RB_WPTR
+ sdma_offsets
[i
]));
1216 dev_info(adev
->dev
, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
1217 i
, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI
+ sdma_offsets
[i
]));
1218 dev_info(adev
->dev
, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
1219 i
, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO
+ sdma_offsets
[i
]));
1220 dev_info(adev
->dev
, " SDMA%d_GFX_RB_BASE=0x%08X\n",
1221 i
, RREG32(mmSDMA0_GFX_RB_BASE
+ sdma_offsets
[i
]));
1222 dev_info(adev
->dev
, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
1223 i
, RREG32(mmSDMA0_GFX_RB_BASE_HI
+ sdma_offsets
[i
]));
1224 dev_info(adev
->dev
, " SDMA%d_GFX_DOORBELL=0x%08X\n",
1225 i
, RREG32(mmSDMA0_GFX_DOORBELL
+ sdma_offsets
[i
]));
1226 mutex_lock(&adev
->srbm_mutex
);
1227 for (j
= 0; j
< 16; j
++) {
1228 vi_srbm_select(adev
, 0, 0, 0, j
);
1229 dev_info(adev
->dev
, " VM %d:\n", j
);
1230 dev_info(adev
->dev
, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n",
1231 i
, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR
+ sdma_offsets
[i
]));
1232 dev_info(adev
->dev
, " SDMA%d_GFX_APE1_CNTL=0x%08X\n",
1233 i
, RREG32(mmSDMA0_GFX_APE1_CNTL
+ sdma_offsets
[i
]));
1235 vi_srbm_select(adev
, 0, 0, 0, 0);
1236 mutex_unlock(&adev
->srbm_mutex
);
1240 static int sdma_v3_0_soft_reset(void *handle
)
1242 u32 srbm_soft_reset
= 0;
1243 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1244 u32 tmp
= RREG32(mmSRBM_STATUS2
);
1246 if (tmp
& SRBM_STATUS2__SDMA_BUSY_MASK
) {
1248 tmp
= RREG32(mmSDMA0_F32_CNTL
+ SDMA0_REGISTER_OFFSET
);
1249 tmp
= REG_SET_FIELD(tmp
, SDMA0_F32_CNTL
, HALT
, 0);
1250 WREG32(mmSDMA0_F32_CNTL
+ SDMA0_REGISTER_OFFSET
, tmp
);
1251 srbm_soft_reset
|= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK
;
1253 if (tmp
& SRBM_STATUS2__SDMA1_BUSY_MASK
) {
1255 tmp
= RREG32(mmSDMA0_F32_CNTL
+ SDMA1_REGISTER_OFFSET
);
1256 tmp
= REG_SET_FIELD(tmp
, SDMA0_F32_CNTL
, HALT
, 0);
1257 WREG32(mmSDMA0_F32_CNTL
+ SDMA1_REGISTER_OFFSET
, tmp
);
1258 srbm_soft_reset
|= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK
;
1261 if (srbm_soft_reset
) {
1262 sdma_v3_0_print_status((void *)adev
);
1264 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1265 tmp
|= srbm_soft_reset
;
1266 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
1267 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1268 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1272 tmp
&= ~srbm_soft_reset
;
1273 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1274 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1276 /* Wait a little for things to settle down */
1279 sdma_v3_0_print_status((void *)adev
);
1285 static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device
*adev
,
1286 struct amdgpu_irq_src
*source
,
1288 enum amdgpu_interrupt_state state
)
1293 case AMDGPU_SDMA_IRQ_TRAP0
:
1295 case AMDGPU_IRQ_STATE_DISABLE
:
1296 sdma_cntl
= RREG32(mmSDMA0_CNTL
+ SDMA0_REGISTER_OFFSET
);
1297 sdma_cntl
= REG_SET_FIELD(sdma_cntl
, SDMA0_CNTL
, TRAP_ENABLE
, 0);
1298 WREG32(mmSDMA0_CNTL
+ SDMA0_REGISTER_OFFSET
, sdma_cntl
);
1300 case AMDGPU_IRQ_STATE_ENABLE
:
1301 sdma_cntl
= RREG32(mmSDMA0_CNTL
+ SDMA0_REGISTER_OFFSET
);
1302 sdma_cntl
= REG_SET_FIELD(sdma_cntl
, SDMA0_CNTL
, TRAP_ENABLE
, 1);
1303 WREG32(mmSDMA0_CNTL
+ SDMA0_REGISTER_OFFSET
, sdma_cntl
);
1309 case AMDGPU_SDMA_IRQ_TRAP1
:
1311 case AMDGPU_IRQ_STATE_DISABLE
:
1312 sdma_cntl
= RREG32(mmSDMA0_CNTL
+ SDMA1_REGISTER_OFFSET
);
1313 sdma_cntl
= REG_SET_FIELD(sdma_cntl
, SDMA0_CNTL
, TRAP_ENABLE
, 0);
1314 WREG32(mmSDMA0_CNTL
+ SDMA1_REGISTER_OFFSET
, sdma_cntl
);
1316 case AMDGPU_IRQ_STATE_ENABLE
:
1317 sdma_cntl
= RREG32(mmSDMA0_CNTL
+ SDMA1_REGISTER_OFFSET
);
1318 sdma_cntl
= REG_SET_FIELD(sdma_cntl
, SDMA0_CNTL
, TRAP_ENABLE
, 1);
1319 WREG32(mmSDMA0_CNTL
+ SDMA1_REGISTER_OFFSET
, sdma_cntl
);
1331 static int sdma_v3_0_process_trap_irq(struct amdgpu_device
*adev
,
1332 struct amdgpu_irq_src
*source
,
1333 struct amdgpu_iv_entry
*entry
)
1335 u8 instance_id
, queue_id
;
1337 instance_id
= (entry
->ring_id
& 0x3) >> 0;
1338 queue_id
= (entry
->ring_id
& 0xc) >> 2;
1339 DRM_DEBUG("IH: SDMA trap\n");
1340 switch (instance_id
) {
1344 amdgpu_fence_process(&adev
->sdma
[0].ring
);
1357 amdgpu_fence_process(&adev
->sdma
[1].ring
);
1371 static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device
*adev
,
1372 struct amdgpu_irq_src
*source
,
1373 struct amdgpu_iv_entry
*entry
)
1375 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1376 schedule_work(&adev
->reset_work
);
1380 static int sdma_v3_0_set_clockgating_state(void *handle
,
1381 enum amd_clockgating_state state
)
1386 static int sdma_v3_0_set_powergating_state(void *handle
,
1387 enum amd_powergating_state state
)
1392 const struct amd_ip_funcs sdma_v3_0_ip_funcs
= {
1393 .early_init
= sdma_v3_0_early_init
,
1395 .sw_init
= sdma_v3_0_sw_init
,
1396 .sw_fini
= sdma_v3_0_sw_fini
,
1397 .hw_init
= sdma_v3_0_hw_init
,
1398 .hw_fini
= sdma_v3_0_hw_fini
,
1399 .suspend
= sdma_v3_0_suspend
,
1400 .resume
= sdma_v3_0_resume
,
1401 .is_idle
= sdma_v3_0_is_idle
,
1402 .wait_for_idle
= sdma_v3_0_wait_for_idle
,
1403 .soft_reset
= sdma_v3_0_soft_reset
,
1404 .print_status
= sdma_v3_0_print_status
,
1405 .set_clockgating_state
= sdma_v3_0_set_clockgating_state
,
1406 .set_powergating_state
= sdma_v3_0_set_powergating_state
,
1410 * sdma_v3_0_ring_is_lockup - Check if the DMA engine is locked up
1412 * @ring: amdgpu_ring structure holding ring information
1414 * Check if the async DMA engine is locked up (VI).
1415 * Returns true if the engine appears to be locked up, false if not.
1417 static bool sdma_v3_0_ring_is_lockup(struct amdgpu_ring
*ring
)
1420 if (sdma_v3_0_is_idle(ring
->adev
)) {
1421 amdgpu_ring_lockup_update(ring
);
1424 return amdgpu_ring_test_lockup(ring
);
1427 static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs
= {
1428 .get_rptr
= sdma_v3_0_ring_get_rptr
,
1429 .get_wptr
= sdma_v3_0_ring_get_wptr
,
1430 .set_wptr
= sdma_v3_0_ring_set_wptr
,
1432 .emit_ib
= sdma_v3_0_ring_emit_ib
,
1433 .emit_fence
= sdma_v3_0_ring_emit_fence
,
1434 .emit_semaphore
= sdma_v3_0_ring_emit_semaphore
,
1435 .emit_vm_flush
= sdma_v3_0_ring_emit_vm_flush
,
1436 .emit_hdp_flush
= sdma_v3_0_ring_emit_hdp_flush
,
1437 .test_ring
= sdma_v3_0_ring_test_ring
,
1438 .test_ib
= sdma_v3_0_ring_test_ib
,
1439 .is_lockup
= sdma_v3_0_ring_is_lockup
,
1442 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device
*adev
)
1444 adev
->sdma
[0].ring
.funcs
= &sdma_v3_0_ring_funcs
;
1445 adev
->sdma
[1].ring
.funcs
= &sdma_v3_0_ring_funcs
;
1448 static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs
= {
1449 .set
= sdma_v3_0_set_trap_irq_state
,
1450 .process
= sdma_v3_0_process_trap_irq
,
1453 static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs
= {
1454 .process
= sdma_v3_0_process_illegal_inst_irq
,
1457 static void sdma_v3_0_set_irq_funcs(struct amdgpu_device
*adev
)
1459 adev
->sdma_trap_irq
.num_types
= AMDGPU_SDMA_IRQ_LAST
;
1460 adev
->sdma_trap_irq
.funcs
= &sdma_v3_0_trap_irq_funcs
;
1461 adev
->sdma_illegal_inst_irq
.funcs
= &sdma_v3_0_illegal_inst_irq_funcs
;
1465 * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine
1467 * @ring: amdgpu_ring structure holding ring information
1468 * @src_offset: src GPU address
1469 * @dst_offset: dst GPU address
1470 * @byte_count: number of bytes to xfer
1472 * Copy GPU buffers using the DMA engine (VI).
1473 * Used by the amdgpu ttm implementation to move pages if
1474 * registered as the asic copy callback.
1476 static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ring
*ring
,
1477 uint64_t src_offset
,
1478 uint64_t dst_offset
,
1479 uint32_t byte_count
)
1481 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_COPY
) |
1482 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR
));
1483 amdgpu_ring_write(ring
, byte_count
);
1484 amdgpu_ring_write(ring
, 0); /* src/dst endian swap */
1485 amdgpu_ring_write(ring
, lower_32_bits(src_offset
));
1486 amdgpu_ring_write(ring
, upper_32_bits(src_offset
));
1487 amdgpu_ring_write(ring
, lower_32_bits(dst_offset
));
1488 amdgpu_ring_write(ring
, upper_32_bits(dst_offset
));
1492 * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine
1494 * @ring: amdgpu_ring structure holding ring information
1495 * @src_data: value to write to buffer
1496 * @dst_offset: dst GPU address
1497 * @byte_count: number of bytes to xfer
1499 * Fill GPU buffers using the DMA engine (VI).
1501 static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ring
*ring
,
1503 uint64_t dst_offset
,
1504 uint32_t byte_count
)
1506 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL
));
1507 amdgpu_ring_write(ring
, lower_32_bits(dst_offset
));
1508 amdgpu_ring_write(ring
, upper_32_bits(dst_offset
));
1509 amdgpu_ring_write(ring
, src_data
);
1510 amdgpu_ring_write(ring
, byte_count
);
1513 static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs
= {
1514 .copy_max_bytes
= 0x1fffff,
1516 .emit_copy_buffer
= sdma_v3_0_emit_copy_buffer
,
1518 .fill_max_bytes
= 0x1fffff,
1520 .emit_fill_buffer
= sdma_v3_0_emit_fill_buffer
,
1523 static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device
*adev
)
1525 if (adev
->mman
.buffer_funcs
== NULL
) {
1526 adev
->mman
.buffer_funcs
= &sdma_v3_0_buffer_funcs
;
1527 adev
->mman
.buffer_funcs_ring
= &adev
->sdma
[0].ring
;
1531 static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs
= {
1532 .copy_pte
= sdma_v3_0_vm_copy_pte
,
1533 .write_pte
= sdma_v3_0_vm_write_pte
,
1534 .set_pte_pde
= sdma_v3_0_vm_set_pte_pde
,
1535 .pad_ib
= sdma_v3_0_vm_pad_ib
,
1538 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device
*adev
)
1540 if (adev
->vm_manager
.vm_pte_funcs
== NULL
) {
1541 adev
->vm_manager
.vm_pte_funcs
= &sdma_v3_0_vm_pte_funcs
;
1542 adev
->vm_manager
.vm_pte_funcs_ring
= &adev
->sdma
[0].ring
;
1543 adev
->vm_manager
.vm_pte_funcs_ring
->is_pte_ring
= true;