2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christian König <christian.koenig@amd.com>
25 #include <linux/firmware.h>
28 #include "amdgpu_uvd.h"
30 #include "uvd/uvd_6_0_d.h"
31 #include "uvd/uvd_6_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
35 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device
*adev
);
36 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device
*adev
);
37 static int uvd_v6_0_start(struct amdgpu_device
*adev
);
38 static void uvd_v6_0_stop(struct amdgpu_device
*adev
);
41 * uvd_v6_0_ring_get_rptr - get read pointer
43 * @ring: amdgpu_ring pointer
45 * Returns the current hardware read pointer
47 static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring
*ring
)
49 struct amdgpu_device
*adev
= ring
->adev
;
51 return RREG32(mmUVD_RBC_RB_RPTR
);
55 * uvd_v6_0_ring_get_wptr - get write pointer
57 * @ring: amdgpu_ring pointer
59 * Returns the current hardware write pointer
61 static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring
*ring
)
63 struct amdgpu_device
*adev
= ring
->adev
;
65 return RREG32(mmUVD_RBC_RB_WPTR
);
69 * uvd_v6_0_ring_set_wptr - set write pointer
71 * @ring: amdgpu_ring pointer
73 * Commits the write pointer to the hardware
75 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring
*ring
)
77 struct amdgpu_device
*adev
= ring
->adev
;
79 WREG32(mmUVD_RBC_RB_WPTR
, ring
->wptr
);
82 static int uvd_v6_0_early_init(void *handle
)
84 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
86 uvd_v6_0_set_ring_funcs(adev
);
87 uvd_v6_0_set_irq_funcs(adev
);
92 static int uvd_v6_0_sw_init(void *handle
)
94 struct amdgpu_ring
*ring
;
96 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
99 r
= amdgpu_irq_add_id(adev
, 124, &adev
->uvd
.irq
);
103 r
= amdgpu_uvd_sw_init(adev
);
107 r
= amdgpu_uvd_resume(adev
);
111 ring
= &adev
->uvd
.ring
;
112 sprintf(ring
->name
, "uvd");
113 r
= amdgpu_ring_init(adev
, ring
, 4096, CP_PACKET2
, 0xf,
114 &adev
->uvd
.irq
, 0, AMDGPU_RING_TYPE_UVD
);
119 static int uvd_v6_0_sw_fini(void *handle
)
122 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
124 r
= amdgpu_uvd_suspend(adev
);
128 r
= amdgpu_uvd_sw_fini(adev
);
136 * uvd_v6_0_hw_init - start and test UVD block
138 * @adev: amdgpu_device pointer
140 * Initialize the hardware, boot up the VCPU and do some testing
142 static int uvd_v6_0_hw_init(void *handle
)
144 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
145 struct amdgpu_ring
*ring
= &adev
->uvd
.ring
;
149 r
= uvd_v6_0_start(adev
);
154 r
= amdgpu_ring_test_ring(ring
);
160 r
= amdgpu_ring_alloc(ring
, 10);
162 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r
);
166 tmp
= PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL
, 0);
167 amdgpu_ring_write(ring
, tmp
);
168 amdgpu_ring_write(ring
, 0xFFFFF);
170 tmp
= PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL
, 0);
171 amdgpu_ring_write(ring
, tmp
);
172 amdgpu_ring_write(ring
, 0xFFFFF);
174 tmp
= PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL
, 0);
175 amdgpu_ring_write(ring
, tmp
);
176 amdgpu_ring_write(ring
, 0xFFFFF);
178 /* Clear timeout status bits */
179 amdgpu_ring_write(ring
, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS
, 0));
180 amdgpu_ring_write(ring
, 0x8);
182 amdgpu_ring_write(ring
, PACKET0(mmUVD_SEMA_CNTL
, 0));
183 amdgpu_ring_write(ring
, 3);
185 amdgpu_ring_commit(ring
);
189 DRM_INFO("UVD initialized successfully.\n");
195 * uvd_v6_0_hw_fini - stop the hardware block
197 * @adev: amdgpu_device pointer
199 * Stop the UVD block, mark ring as not ready any more
201 static int uvd_v6_0_hw_fini(void *handle
)
203 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
204 struct amdgpu_ring
*ring
= &adev
->uvd
.ring
;
212 static int uvd_v6_0_suspend(void *handle
)
215 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
217 /* Skip this for APU for now */
218 if (!(adev
->flags
& AMD_IS_APU
)) {
219 r
= amdgpu_uvd_suspend(adev
);
223 r
= uvd_v6_0_hw_fini(adev
);
230 static int uvd_v6_0_resume(void *handle
)
233 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
235 /* Skip this for APU for now */
236 if (!(adev
->flags
& AMD_IS_APU
)) {
237 r
= amdgpu_uvd_resume(adev
);
241 r
= uvd_v6_0_hw_init(adev
);
249 * uvd_v6_0_mc_resume - memory controller programming
251 * @adev: amdgpu_device pointer
253 * Let the UVD memory controller know it's offsets
255 static void uvd_v6_0_mc_resume(struct amdgpu_device
*adev
)
260 /* programm memory controller bits 0-27 */
261 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
262 lower_32_bits(adev
->uvd
.gpu_addr
));
263 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
264 upper_32_bits(adev
->uvd
.gpu_addr
));
266 offset
= AMDGPU_UVD_FIRMWARE_OFFSET
;
267 size
= AMDGPU_GPU_PAGE_ALIGN(adev
->uvd
.fw
->size
+ 4);
268 WREG32(mmUVD_VCPU_CACHE_OFFSET0
, offset
>> 3);
269 WREG32(mmUVD_VCPU_CACHE_SIZE0
, size
);
272 size
= AMDGPU_UVD_STACK_SIZE
;
273 WREG32(mmUVD_VCPU_CACHE_OFFSET1
, offset
>> 3);
274 WREG32(mmUVD_VCPU_CACHE_SIZE1
, size
);
277 size
= AMDGPU_UVD_HEAP_SIZE
;
278 WREG32(mmUVD_VCPU_CACHE_OFFSET2
, offset
>> 3);
279 WREG32(mmUVD_VCPU_CACHE_SIZE2
, size
);
281 WREG32(mmUVD_UDEC_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
282 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
283 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
286 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device
*adev
,
291 data
= RREG32(mmUVD_CGC_GATE
);
292 data1
= RREG32(mmUVD_SUVD_CGC_GATE
);
294 data
|= UVD_CGC_GATE__SYS_MASK
|
295 UVD_CGC_GATE__UDEC_MASK
|
296 UVD_CGC_GATE__MPEG2_MASK
|
297 UVD_CGC_GATE__RBC_MASK
|
298 UVD_CGC_GATE__LMI_MC_MASK
|
299 UVD_CGC_GATE__IDCT_MASK
|
300 UVD_CGC_GATE__MPRD_MASK
|
301 UVD_CGC_GATE__MPC_MASK
|
302 UVD_CGC_GATE__LBSI_MASK
|
303 UVD_CGC_GATE__LRBBM_MASK
|
304 UVD_CGC_GATE__UDEC_RE_MASK
|
305 UVD_CGC_GATE__UDEC_CM_MASK
|
306 UVD_CGC_GATE__UDEC_IT_MASK
|
307 UVD_CGC_GATE__UDEC_DB_MASK
|
308 UVD_CGC_GATE__UDEC_MP_MASK
|
309 UVD_CGC_GATE__WCB_MASK
|
310 UVD_CGC_GATE__VCPU_MASK
|
311 UVD_CGC_GATE__SCPU_MASK
;
312 data1
|= UVD_SUVD_CGC_GATE__SRE_MASK
|
313 UVD_SUVD_CGC_GATE__SIT_MASK
|
314 UVD_SUVD_CGC_GATE__SMP_MASK
|
315 UVD_SUVD_CGC_GATE__SCM_MASK
|
316 UVD_SUVD_CGC_GATE__SDB_MASK
|
317 UVD_SUVD_CGC_GATE__SRE_H264_MASK
|
318 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
|
319 UVD_SUVD_CGC_GATE__SIT_H264_MASK
|
320 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
|
321 UVD_SUVD_CGC_GATE__SCM_H264_MASK
|
322 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
|
323 UVD_SUVD_CGC_GATE__SDB_H264_MASK
|
324 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
;
326 data
&= ~(UVD_CGC_GATE__SYS_MASK
|
327 UVD_CGC_GATE__UDEC_MASK
|
328 UVD_CGC_GATE__MPEG2_MASK
|
329 UVD_CGC_GATE__RBC_MASK
|
330 UVD_CGC_GATE__LMI_MC_MASK
|
331 UVD_CGC_GATE__LMI_UMC_MASK
|
332 UVD_CGC_GATE__IDCT_MASK
|
333 UVD_CGC_GATE__MPRD_MASK
|
334 UVD_CGC_GATE__MPC_MASK
|
335 UVD_CGC_GATE__LBSI_MASK
|
336 UVD_CGC_GATE__LRBBM_MASK
|
337 UVD_CGC_GATE__UDEC_RE_MASK
|
338 UVD_CGC_GATE__UDEC_CM_MASK
|
339 UVD_CGC_GATE__UDEC_IT_MASK
|
340 UVD_CGC_GATE__UDEC_DB_MASK
|
341 UVD_CGC_GATE__UDEC_MP_MASK
|
342 UVD_CGC_GATE__WCB_MASK
|
343 UVD_CGC_GATE__VCPU_MASK
|
344 UVD_CGC_GATE__SCPU_MASK
);
345 data1
&= ~(UVD_SUVD_CGC_GATE__SRE_MASK
|
346 UVD_SUVD_CGC_GATE__SIT_MASK
|
347 UVD_SUVD_CGC_GATE__SMP_MASK
|
348 UVD_SUVD_CGC_GATE__SCM_MASK
|
349 UVD_SUVD_CGC_GATE__SDB_MASK
|
350 UVD_SUVD_CGC_GATE__SRE_H264_MASK
|
351 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
|
352 UVD_SUVD_CGC_GATE__SIT_H264_MASK
|
353 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
|
354 UVD_SUVD_CGC_GATE__SCM_H264_MASK
|
355 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
|
356 UVD_SUVD_CGC_GATE__SDB_H264_MASK
|
357 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
);
359 WREG32(mmUVD_CGC_GATE
, data
);
360 WREG32(mmUVD_SUVD_CGC_GATE
, data1
);
363 static void tonga_set_uvd_clock_gating_branches(struct amdgpu_device
*adev
,
368 data
= RREG32(mmUVD_CGC_GATE
);
369 data1
= RREG32(mmUVD_SUVD_CGC_GATE
);
371 data
|= UVD_CGC_GATE__SYS_MASK
|
372 UVD_CGC_GATE__UDEC_MASK
|
373 UVD_CGC_GATE__MPEG2_MASK
|
374 UVD_CGC_GATE__RBC_MASK
|
375 UVD_CGC_GATE__LMI_MC_MASK
|
376 UVD_CGC_GATE__IDCT_MASK
|
377 UVD_CGC_GATE__MPRD_MASK
|
378 UVD_CGC_GATE__MPC_MASK
|
379 UVD_CGC_GATE__LBSI_MASK
|
380 UVD_CGC_GATE__LRBBM_MASK
|
381 UVD_CGC_GATE__UDEC_RE_MASK
|
382 UVD_CGC_GATE__UDEC_CM_MASK
|
383 UVD_CGC_GATE__UDEC_IT_MASK
|
384 UVD_CGC_GATE__UDEC_DB_MASK
|
385 UVD_CGC_GATE__UDEC_MP_MASK
|
386 UVD_CGC_GATE__WCB_MASK
|
387 UVD_CGC_GATE__VCPU_MASK
|
388 UVD_CGC_GATE__SCPU_MASK
;
389 data1
|= UVD_SUVD_CGC_GATE__SRE_MASK
|
390 UVD_SUVD_CGC_GATE__SIT_MASK
|
391 UVD_SUVD_CGC_GATE__SMP_MASK
|
392 UVD_SUVD_CGC_GATE__SCM_MASK
|
393 UVD_SUVD_CGC_GATE__SDB_MASK
;
395 data
&= ~(UVD_CGC_GATE__SYS_MASK
|
396 UVD_CGC_GATE__UDEC_MASK
|
397 UVD_CGC_GATE__MPEG2_MASK
|
398 UVD_CGC_GATE__RBC_MASK
|
399 UVD_CGC_GATE__LMI_MC_MASK
|
400 UVD_CGC_GATE__LMI_UMC_MASK
|
401 UVD_CGC_GATE__IDCT_MASK
|
402 UVD_CGC_GATE__MPRD_MASK
|
403 UVD_CGC_GATE__MPC_MASK
|
404 UVD_CGC_GATE__LBSI_MASK
|
405 UVD_CGC_GATE__LRBBM_MASK
|
406 UVD_CGC_GATE__UDEC_RE_MASK
|
407 UVD_CGC_GATE__UDEC_CM_MASK
|
408 UVD_CGC_GATE__UDEC_IT_MASK
|
409 UVD_CGC_GATE__UDEC_DB_MASK
|
410 UVD_CGC_GATE__UDEC_MP_MASK
|
411 UVD_CGC_GATE__WCB_MASK
|
412 UVD_CGC_GATE__VCPU_MASK
|
413 UVD_CGC_GATE__SCPU_MASK
);
414 data1
&= ~(UVD_SUVD_CGC_GATE__SRE_MASK
|
415 UVD_SUVD_CGC_GATE__SIT_MASK
|
416 UVD_SUVD_CGC_GATE__SMP_MASK
|
417 UVD_SUVD_CGC_GATE__SCM_MASK
|
418 UVD_SUVD_CGC_GATE__SDB_MASK
);
420 WREG32(mmUVD_CGC_GATE
, data
);
421 WREG32(mmUVD_SUVD_CGC_GATE
, data1
);
424 static void uvd_v6_0_set_uvd_dynamic_clock_mode(struct amdgpu_device
*adev
,
427 u32 data
, data1
= 0, data2
;
429 /* Always un-gate UVD REGS bit */
430 data
= RREG32(mmUVD_CGC_GATE
);
431 data
&= ~(UVD_CGC_GATE__REGS_MASK
);
432 WREG32(mmUVD_CGC_GATE
, data
);
434 data
= RREG32(mmUVD_CGC_CTRL
);
435 data
&= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK
|
436 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK
);
437 data
|= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
|
438 1 << REG_FIELD_SHIFT(UVD_CGC_CTRL
, CLK_GATE_DLY_TIMER
) |
439 4 << REG_FIELD_SHIFT(UVD_CGC_CTRL
, CLK_OFF_DELAY
);
441 data2
= RREG32(mmUVD_SUVD_CGC_CTRL
);
443 data
&= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
|
444 UVD_CGC_CTRL__UDEC_CM_MODE_MASK
|
445 UVD_CGC_CTRL__UDEC_IT_MODE_MASK
|
446 UVD_CGC_CTRL__UDEC_DB_MODE_MASK
|
447 UVD_CGC_CTRL__UDEC_MP_MODE_MASK
|
448 UVD_CGC_CTRL__SYS_MODE_MASK
|
449 UVD_CGC_CTRL__UDEC_MODE_MASK
|
450 UVD_CGC_CTRL__MPEG2_MODE_MASK
|
451 UVD_CGC_CTRL__REGS_MODE_MASK
|
452 UVD_CGC_CTRL__RBC_MODE_MASK
|
453 UVD_CGC_CTRL__LMI_MC_MODE_MASK
|
454 UVD_CGC_CTRL__LMI_UMC_MODE_MASK
|
455 UVD_CGC_CTRL__IDCT_MODE_MASK
|
456 UVD_CGC_CTRL__MPRD_MODE_MASK
|
457 UVD_CGC_CTRL__MPC_MODE_MASK
|
458 UVD_CGC_CTRL__LBSI_MODE_MASK
|
459 UVD_CGC_CTRL__LRBBM_MODE_MASK
|
460 UVD_CGC_CTRL__WCB_MODE_MASK
|
461 UVD_CGC_CTRL__VCPU_MODE_MASK
|
462 UVD_CGC_CTRL__JPEG_MODE_MASK
|
463 UVD_CGC_CTRL__SCPU_MODE_MASK
);
464 data1
|= UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK
|
465 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK
;
466 data1
&= ~UVD_CGC_CTRL2__GATER_DIV_ID_MASK
;
467 data1
|= 7 << REG_FIELD_SHIFT(UVD_CGC_CTRL2
, GATER_DIV_ID
);
468 data2
&= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
|
469 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
|
470 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
|
471 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
|
472 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
);
474 data
|= UVD_CGC_CTRL__UDEC_RE_MODE_MASK
|
475 UVD_CGC_CTRL__UDEC_CM_MODE_MASK
|
476 UVD_CGC_CTRL__UDEC_IT_MODE_MASK
|
477 UVD_CGC_CTRL__UDEC_DB_MODE_MASK
|
478 UVD_CGC_CTRL__UDEC_MP_MODE_MASK
|
479 UVD_CGC_CTRL__SYS_MODE_MASK
|
480 UVD_CGC_CTRL__UDEC_MODE_MASK
|
481 UVD_CGC_CTRL__MPEG2_MODE_MASK
|
482 UVD_CGC_CTRL__REGS_MODE_MASK
|
483 UVD_CGC_CTRL__RBC_MODE_MASK
|
484 UVD_CGC_CTRL__LMI_MC_MODE_MASK
|
485 UVD_CGC_CTRL__LMI_UMC_MODE_MASK
|
486 UVD_CGC_CTRL__IDCT_MODE_MASK
|
487 UVD_CGC_CTRL__MPRD_MODE_MASK
|
488 UVD_CGC_CTRL__MPC_MODE_MASK
|
489 UVD_CGC_CTRL__LBSI_MODE_MASK
|
490 UVD_CGC_CTRL__LRBBM_MODE_MASK
|
491 UVD_CGC_CTRL__WCB_MODE_MASK
|
492 UVD_CGC_CTRL__VCPU_MODE_MASK
|
493 UVD_CGC_CTRL__SCPU_MODE_MASK
;
494 data2
|= UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
|
495 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
|
496 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
|
497 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
|
498 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
;
500 WREG32(mmUVD_CGC_CTRL
, data
);
501 WREG32(mmUVD_SUVD_CGC_CTRL
, data2
);
503 data
= RREG32_UVD_CTX(ixUVD_CGC_CTRL2
);
504 data
&= ~(REG_FIELD_MASK(UVD_CGC_CTRL2
, DYN_OCLK_RAMP_EN
) |
505 REG_FIELD_MASK(UVD_CGC_CTRL2
, DYN_RCLK_RAMP_EN
) |
506 REG_FIELD_MASK(UVD_CGC_CTRL2
, GATER_DIV_ID
));
507 data1
&= (REG_FIELD_MASK(UVD_CGC_CTRL2
, DYN_OCLK_RAMP_EN
) |
508 REG_FIELD_MASK(UVD_CGC_CTRL2
, DYN_RCLK_RAMP_EN
) |
509 REG_FIELD_MASK(UVD_CGC_CTRL2
, GATER_DIV_ID
));
511 WREG32_UVD_CTX(ixUVD_CGC_CTRL2
, data
);
515 * uvd_v6_0_start - start UVD block
517 * @adev: amdgpu_device pointer
519 * Setup and start the UVD block
521 static int uvd_v6_0_start(struct amdgpu_device
*adev
)
523 struct amdgpu_ring
*ring
= &adev
->uvd
.ring
;
524 uint32_t rb_bufsz
, tmp
;
525 uint32_t lmi_swap_cntl
;
526 uint32_t mp_swap_cntl
;
530 WREG32_P(mmUVD_POWER_STATUS
, 0, ~(1 << 2));
532 /* disable byte swapping */
536 uvd_v6_0_mc_resume(adev
);
538 /* Set dynamic clock gating in S/W control mode */
539 if (adev
->cg_flags
& AMD_CG_SUPPORT_UVD_MGCG
) {
540 if (adev
->flags
& AMD_IS_APU
)
541 cz_set_uvd_clock_gating_branches(adev
, false);
543 tonga_set_uvd_clock_gating_branches(adev
, false);
544 uvd_v6_0_set_uvd_dynamic_clock_mode(adev
, true);
546 /* disable clock gating */
547 uint32_t data
= RREG32(mmUVD_CGC_CTRL
);
548 data
&= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
549 WREG32(mmUVD_CGC_CTRL
, data
);
552 /* disable interupt */
553 WREG32_P(mmUVD_MASTINT_EN
, 0, ~(1 << 1));
555 /* stall UMC and register bus before resetting VCPU */
556 WREG32_P(mmUVD_LMI_CTRL2
, 1 << 8, ~(1 << 8));
559 /* put LMI, VCPU, RBC etc... into reset */
560 WREG32(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
|
561 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
| UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK
|
562 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK
| UVD_SOFT_RESET__CSM_SOFT_RESET_MASK
|
563 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK
| UVD_SOFT_RESET__TAP_SOFT_RESET_MASK
|
564 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
);
567 /* take UVD block out of reset */
568 WREG32_P(mmSRBM_SOFT_RESET
, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK
);
571 /* initialize UVD memory controller */
572 WREG32(mmUVD_LMI_CTRL
, 0x40 | (1 << 8) | (1 << 13) |
573 (1 << 21) | (1 << 9) | (1 << 20));
576 /* swap (8 in 32) RB and IB */
580 WREG32(mmUVD_LMI_SWAP_CNTL
, lmi_swap_cntl
);
581 WREG32(mmUVD_MP_SWAP_CNTL
, mp_swap_cntl
);
583 WREG32(mmUVD_MPC_SET_MUXA0
, 0x40c2040);
584 WREG32(mmUVD_MPC_SET_MUXA1
, 0x0);
585 WREG32(mmUVD_MPC_SET_MUXB0
, 0x40c2040);
586 WREG32(mmUVD_MPC_SET_MUXB1
, 0x0);
587 WREG32(mmUVD_MPC_SET_ALU
, 0);
588 WREG32(mmUVD_MPC_SET_MUX
, 0x88);
590 /* take all subblocks out of reset, except VCPU */
591 WREG32(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
594 /* enable VCPU clock */
595 WREG32(mmUVD_VCPU_CNTL
, 1 << 9);
598 WREG32_P(mmUVD_LMI_CTRL2
, 0, ~(1 << 8));
600 /* boot up the VCPU */
601 WREG32(mmUVD_SOFT_RESET
, 0);
604 for (i
= 0; i
< 10; ++i
) {
607 for (j
= 0; j
< 100; ++j
) {
608 status
= RREG32(mmUVD_STATUS
);
617 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
618 WREG32_P(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
,
619 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
621 WREG32_P(mmUVD_SOFT_RESET
, 0,
622 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
628 DRM_ERROR("UVD not responding, giving up!!!\n");
631 /* enable master interrupt */
632 WREG32_P(mmUVD_MASTINT_EN
, 3 << 1, ~(3 << 1));
634 /* clear the bit 4 of UVD_STATUS */
635 WREG32_P(mmUVD_STATUS
, 0, ~(2 << 1));
637 rb_bufsz
= order_base_2(ring
->ring_size
);
639 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BUFSZ
, rb_bufsz
);
640 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BLKSZ
, 1);
641 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_FETCH
, 1);
642 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_WPTR_POLL_EN
, 0);
643 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_UPDATE
, 1);
644 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_RPTR_WR_EN
, 1);
645 /* force RBC into idle state */
646 WREG32(mmUVD_RBC_RB_CNTL
, tmp
);
648 /* set the write pointer delay */
649 WREG32(mmUVD_RBC_RB_WPTR_CNTL
, 0);
651 /* set the wb address */
652 WREG32(mmUVD_RBC_RB_RPTR_ADDR
, (upper_32_bits(ring
->gpu_addr
) >> 2));
654 /* programm the RB_BASE for ring buffer */
655 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW
,
656 lower_32_bits(ring
->gpu_addr
));
657 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH
,
658 upper_32_bits(ring
->gpu_addr
));
660 /* Initialize the ring buffer's read and write pointers */
661 WREG32(mmUVD_RBC_RB_RPTR
, 0);
663 ring
->wptr
= RREG32(mmUVD_RBC_RB_RPTR
);
664 WREG32(mmUVD_RBC_RB_WPTR
, ring
->wptr
);
666 WREG32_P(mmUVD_RBC_RB_CNTL
, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK
);
672 * uvd_v6_0_stop - stop UVD block
674 * @adev: amdgpu_device pointer
678 static void uvd_v6_0_stop(struct amdgpu_device
*adev
)
680 /* force RBC into idle state */
681 WREG32(mmUVD_RBC_RB_CNTL
, 0x11010101);
683 /* Stall UMC and register bus before resetting VCPU */
684 WREG32_P(mmUVD_LMI_CTRL2
, 1 << 8, ~(1 << 8));
687 /* put VCPU into reset */
688 WREG32(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
691 /* disable VCPU clock */
692 WREG32(mmUVD_VCPU_CNTL
, 0x0);
694 /* Unstall UMC and register bus */
695 WREG32_P(mmUVD_LMI_CTRL2
, 0, ~(1 << 8));
699 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
701 * @ring: amdgpu_ring pointer
702 * @fence: fence to emit
704 * Write a fence and a trap command to the ring.
706 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
709 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
711 amdgpu_ring_write(ring
, PACKET0(mmUVD_CONTEXT_ID
, 0));
712 amdgpu_ring_write(ring
, seq
);
713 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA0
, 0));
714 amdgpu_ring_write(ring
, addr
& 0xffffffff);
715 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA1
, 0));
716 amdgpu_ring_write(ring
, upper_32_bits(addr
) & 0xff);
717 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_CMD
, 0));
718 amdgpu_ring_write(ring
, 0);
720 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA0
, 0));
721 amdgpu_ring_write(ring
, 0);
722 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA1
, 0));
723 amdgpu_ring_write(ring
, 0);
724 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_CMD
, 0));
725 amdgpu_ring_write(ring
, 2);
729 * uvd_v6_0_ring_test_ring - register write test
731 * @ring: amdgpu_ring pointer
733 * Test if we can successfully write to the context register
735 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring
*ring
)
737 struct amdgpu_device
*adev
= ring
->adev
;
742 WREG32(mmUVD_CONTEXT_ID
, 0xCAFEDEAD);
743 r
= amdgpu_ring_alloc(ring
, 3);
745 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
749 amdgpu_ring_write(ring
, PACKET0(mmUVD_CONTEXT_ID
, 0));
750 amdgpu_ring_write(ring
, 0xDEADBEEF);
751 amdgpu_ring_commit(ring
);
752 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
753 tmp
= RREG32(mmUVD_CONTEXT_ID
);
754 if (tmp
== 0xDEADBEEF)
759 if (i
< adev
->usec_timeout
) {
760 DRM_INFO("ring test on %d succeeded in %d usecs\n",
763 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
771 * uvd_v6_0_ring_emit_ib - execute indirect buffer
773 * @ring: amdgpu_ring pointer
774 * @ib: indirect buffer to execute
776 * Write ring commands to execute the indirect buffer
778 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring
*ring
,
779 struct amdgpu_ib
*ib
)
781 amdgpu_ring_write(ring
, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW
, 0));
782 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
783 amdgpu_ring_write(ring
, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH
, 0));
784 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
785 amdgpu_ring_write(ring
, PACKET0(mmUVD_RBC_IB_SIZE
, 0));
786 amdgpu_ring_write(ring
, ib
->length_dw
);
790 * uvd_v6_0_ring_test_ib - test ib execution
792 * @ring: amdgpu_ring pointer
794 * Test if we can successfully execute an IB
796 static int uvd_v6_0_ring_test_ib(struct amdgpu_ring
*ring
)
798 struct fence
*fence
= NULL
;
801 r
= amdgpu_uvd_get_create_msg(ring
, 1, NULL
);
803 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r
);
807 r
= amdgpu_uvd_get_destroy_msg(ring
, 1, true, &fence
);
809 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r
);
813 r
= fence_wait(fence
, false);
815 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r
);
818 DRM_INFO("ib test on ring %d succeeded\n", ring
->idx
);
824 static bool uvd_v6_0_is_idle(void *handle
)
826 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
828 return !(RREG32(mmSRBM_STATUS
) & SRBM_STATUS__UVD_BUSY_MASK
);
831 static int uvd_v6_0_wait_for_idle(void *handle
)
834 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
836 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
837 if (!(RREG32(mmSRBM_STATUS
) & SRBM_STATUS__UVD_BUSY_MASK
))
843 static int uvd_v6_0_soft_reset(void *handle
)
845 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
849 WREG32_P(mmSRBM_SOFT_RESET
, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK
,
850 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK
);
853 return uvd_v6_0_start(adev
);
856 static void uvd_v6_0_print_status(void *handle
)
858 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
859 dev_info(adev
->dev
, "UVD 6.0 registers\n");
860 dev_info(adev
->dev
, " UVD_SEMA_ADDR_LOW=0x%08X\n",
861 RREG32(mmUVD_SEMA_ADDR_LOW
));
862 dev_info(adev
->dev
, " UVD_SEMA_ADDR_HIGH=0x%08X\n",
863 RREG32(mmUVD_SEMA_ADDR_HIGH
));
864 dev_info(adev
->dev
, " UVD_SEMA_CMD=0x%08X\n",
865 RREG32(mmUVD_SEMA_CMD
));
866 dev_info(adev
->dev
, " UVD_GPCOM_VCPU_CMD=0x%08X\n",
867 RREG32(mmUVD_GPCOM_VCPU_CMD
));
868 dev_info(adev
->dev
, " UVD_GPCOM_VCPU_DATA0=0x%08X\n",
869 RREG32(mmUVD_GPCOM_VCPU_DATA0
));
870 dev_info(adev
->dev
, " UVD_GPCOM_VCPU_DATA1=0x%08X\n",
871 RREG32(mmUVD_GPCOM_VCPU_DATA1
));
872 dev_info(adev
->dev
, " UVD_ENGINE_CNTL=0x%08X\n",
873 RREG32(mmUVD_ENGINE_CNTL
));
874 dev_info(adev
->dev
, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
875 RREG32(mmUVD_UDEC_ADDR_CONFIG
));
876 dev_info(adev
->dev
, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
877 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG
));
878 dev_info(adev
->dev
, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
879 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG
));
880 dev_info(adev
->dev
, " UVD_SEMA_CNTL=0x%08X\n",
881 RREG32(mmUVD_SEMA_CNTL
));
882 dev_info(adev
->dev
, " UVD_LMI_EXT40_ADDR=0x%08X\n",
883 RREG32(mmUVD_LMI_EXT40_ADDR
));
884 dev_info(adev
->dev
, " UVD_CTX_INDEX=0x%08X\n",
885 RREG32(mmUVD_CTX_INDEX
));
886 dev_info(adev
->dev
, " UVD_CTX_DATA=0x%08X\n",
887 RREG32(mmUVD_CTX_DATA
));
888 dev_info(adev
->dev
, " UVD_CGC_GATE=0x%08X\n",
889 RREG32(mmUVD_CGC_GATE
));
890 dev_info(adev
->dev
, " UVD_CGC_CTRL=0x%08X\n",
891 RREG32(mmUVD_CGC_CTRL
));
892 dev_info(adev
->dev
, " UVD_LMI_CTRL2=0x%08X\n",
893 RREG32(mmUVD_LMI_CTRL2
));
894 dev_info(adev
->dev
, " UVD_MASTINT_EN=0x%08X\n",
895 RREG32(mmUVD_MASTINT_EN
));
896 dev_info(adev
->dev
, " UVD_LMI_ADDR_EXT=0x%08X\n",
897 RREG32(mmUVD_LMI_ADDR_EXT
));
898 dev_info(adev
->dev
, " UVD_LMI_CTRL=0x%08X\n",
899 RREG32(mmUVD_LMI_CTRL
));
900 dev_info(adev
->dev
, " UVD_LMI_SWAP_CNTL=0x%08X\n",
901 RREG32(mmUVD_LMI_SWAP_CNTL
));
902 dev_info(adev
->dev
, " UVD_MP_SWAP_CNTL=0x%08X\n",
903 RREG32(mmUVD_MP_SWAP_CNTL
));
904 dev_info(adev
->dev
, " UVD_MPC_SET_MUXA0=0x%08X\n",
905 RREG32(mmUVD_MPC_SET_MUXA0
));
906 dev_info(adev
->dev
, " UVD_MPC_SET_MUXA1=0x%08X\n",
907 RREG32(mmUVD_MPC_SET_MUXA1
));
908 dev_info(adev
->dev
, " UVD_MPC_SET_MUXB0=0x%08X\n",
909 RREG32(mmUVD_MPC_SET_MUXB0
));
910 dev_info(adev
->dev
, " UVD_MPC_SET_MUXB1=0x%08X\n",
911 RREG32(mmUVD_MPC_SET_MUXB1
));
912 dev_info(adev
->dev
, " UVD_MPC_SET_MUX=0x%08X\n",
913 RREG32(mmUVD_MPC_SET_MUX
));
914 dev_info(adev
->dev
, " UVD_MPC_SET_ALU=0x%08X\n",
915 RREG32(mmUVD_MPC_SET_ALU
));
916 dev_info(adev
->dev
, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
917 RREG32(mmUVD_VCPU_CACHE_OFFSET0
));
918 dev_info(adev
->dev
, " UVD_VCPU_CACHE_SIZE0=0x%08X\n",
919 RREG32(mmUVD_VCPU_CACHE_SIZE0
));
920 dev_info(adev
->dev
, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
921 RREG32(mmUVD_VCPU_CACHE_OFFSET1
));
922 dev_info(adev
->dev
, " UVD_VCPU_CACHE_SIZE1=0x%08X\n",
923 RREG32(mmUVD_VCPU_CACHE_SIZE1
));
924 dev_info(adev
->dev
, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
925 RREG32(mmUVD_VCPU_CACHE_OFFSET2
));
926 dev_info(adev
->dev
, " UVD_VCPU_CACHE_SIZE2=0x%08X\n",
927 RREG32(mmUVD_VCPU_CACHE_SIZE2
));
928 dev_info(adev
->dev
, " UVD_VCPU_CNTL=0x%08X\n",
929 RREG32(mmUVD_VCPU_CNTL
));
930 dev_info(adev
->dev
, " UVD_SOFT_RESET=0x%08X\n",
931 RREG32(mmUVD_SOFT_RESET
));
932 dev_info(adev
->dev
, " UVD_RBC_IB_SIZE=0x%08X\n",
933 RREG32(mmUVD_RBC_IB_SIZE
));
934 dev_info(adev
->dev
, " UVD_RBC_RB_RPTR=0x%08X\n",
935 RREG32(mmUVD_RBC_RB_RPTR
));
936 dev_info(adev
->dev
, " UVD_RBC_RB_WPTR=0x%08X\n",
937 RREG32(mmUVD_RBC_RB_WPTR
));
938 dev_info(adev
->dev
, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
939 RREG32(mmUVD_RBC_RB_WPTR_CNTL
));
940 dev_info(adev
->dev
, " UVD_RBC_RB_CNTL=0x%08X\n",
941 RREG32(mmUVD_RBC_RB_CNTL
));
942 dev_info(adev
->dev
, " UVD_STATUS=0x%08X\n",
943 RREG32(mmUVD_STATUS
));
944 dev_info(adev
->dev
, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
945 RREG32(mmUVD_SEMA_TIMEOUT_STATUS
));
946 dev_info(adev
->dev
, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
947 RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL
));
948 dev_info(adev
->dev
, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
949 RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL
));
950 dev_info(adev
->dev
, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
951 RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL
));
952 dev_info(adev
->dev
, " UVD_CONTEXT_ID=0x%08X\n",
953 RREG32(mmUVD_CONTEXT_ID
));
954 dev_info(adev
->dev
, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
955 RREG32(mmUVD_UDEC_ADDR_CONFIG
));
956 dev_info(adev
->dev
, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
957 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG
));
958 dev_info(adev
->dev
, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
959 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG
));
962 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device
*adev
,
963 struct amdgpu_irq_src
*source
,
965 enum amdgpu_interrupt_state state
)
971 static int uvd_v6_0_process_interrupt(struct amdgpu_device
*adev
,
972 struct amdgpu_irq_src
*source
,
973 struct amdgpu_iv_entry
*entry
)
975 DRM_DEBUG("IH: UVD TRAP\n");
976 amdgpu_fence_process(&adev
->uvd
.ring
);
980 static int uvd_v6_0_set_clockgating_state(void *handle
,
981 enum amd_clockgating_state state
)
983 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
984 bool enable
= (state
== AMD_CG_STATE_GATE
) ? true : false;
986 if (!(adev
->cg_flags
& AMD_CG_SUPPORT_UVD_MGCG
))
990 if (adev
->flags
& AMD_IS_APU
)
991 cz_set_uvd_clock_gating_branches(adev
, enable
);
993 tonga_set_uvd_clock_gating_branches(adev
, enable
);
994 uvd_v6_0_set_uvd_dynamic_clock_mode(adev
, true);
996 uint32_t data
= RREG32(mmUVD_CGC_CTRL
);
997 data
&= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
998 WREG32(mmUVD_CGC_CTRL
, data
);
1004 static int uvd_v6_0_set_powergating_state(void *handle
,
1005 enum amd_powergating_state state
)
1007 /* This doesn't actually powergate the UVD block.
1008 * That's done in the dpm code via the SMC. This
1009 * just re-inits the block as necessary. The actual
1010 * gating still happens in the dpm code. We should
1011 * revisit this when there is a cleaner line between
1012 * the smc and the hw blocks
1014 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1016 if (!(adev
->pg_flags
& AMD_PG_SUPPORT_UVD
))
1019 if (state
== AMD_PG_STATE_GATE
) {
1020 uvd_v6_0_stop(adev
);
1023 return uvd_v6_0_start(adev
);
1027 const struct amd_ip_funcs uvd_v6_0_ip_funcs
= {
1028 .early_init
= uvd_v6_0_early_init
,
1030 .sw_init
= uvd_v6_0_sw_init
,
1031 .sw_fini
= uvd_v6_0_sw_fini
,
1032 .hw_init
= uvd_v6_0_hw_init
,
1033 .hw_fini
= uvd_v6_0_hw_fini
,
1034 .suspend
= uvd_v6_0_suspend
,
1035 .resume
= uvd_v6_0_resume
,
1036 .is_idle
= uvd_v6_0_is_idle
,
1037 .wait_for_idle
= uvd_v6_0_wait_for_idle
,
1038 .soft_reset
= uvd_v6_0_soft_reset
,
1039 .print_status
= uvd_v6_0_print_status
,
1040 .set_clockgating_state
= uvd_v6_0_set_clockgating_state
,
1041 .set_powergating_state
= uvd_v6_0_set_powergating_state
,
1044 static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs
= {
1045 .get_rptr
= uvd_v6_0_ring_get_rptr
,
1046 .get_wptr
= uvd_v6_0_ring_get_wptr
,
1047 .set_wptr
= uvd_v6_0_ring_set_wptr
,
1048 .parse_cs
= amdgpu_uvd_ring_parse_cs
,
1049 .emit_ib
= uvd_v6_0_ring_emit_ib
,
1050 .emit_fence
= uvd_v6_0_ring_emit_fence
,
1051 .test_ring
= uvd_v6_0_ring_test_ring
,
1052 .test_ib
= uvd_v6_0_ring_test_ib
,
1053 .insert_nop
= amdgpu_ring_insert_nop
,
1054 .pad_ib
= amdgpu_ring_generic_pad_ib
,
1057 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device
*adev
)
1059 adev
->uvd
.ring
.funcs
= &uvd_v6_0_ring_funcs
;
1062 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs
= {
1063 .set
= uvd_v6_0_set_interrupt_state
,
1064 .process
= uvd_v6_0_process_interrupt
,
1067 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device
*adev
)
1069 adev
->uvd
.irq
.num_types
= 1;
1070 adev
->uvd
.irq
.funcs
= &uvd_v6_0_irq_funcs
;