2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christian König <christian.koenig@amd.com>
25 #include <linux/firmware.h>
28 #include "amdgpu_uvd.h"
30 #include "uvd/uvd_5_0_d.h"
31 #include "uvd/uvd_5_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "bif/bif_5_0_d.h"
37 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device
*adev
);
38 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device
*adev
);
39 static int uvd_v5_0_start(struct amdgpu_device
*adev
);
40 static void uvd_v5_0_stop(struct amdgpu_device
*adev
);
43 * uvd_v5_0_ring_get_rptr - get read pointer
45 * @ring: amdgpu_ring pointer
47 * Returns the current hardware read pointer
49 static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring
*ring
)
51 struct amdgpu_device
*adev
= ring
->adev
;
53 return RREG32(mmUVD_RBC_RB_RPTR
);
57 * uvd_v5_0_ring_get_wptr - get write pointer
59 * @ring: amdgpu_ring pointer
61 * Returns the current hardware write pointer
63 static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring
*ring
)
65 struct amdgpu_device
*adev
= ring
->adev
;
67 return RREG32(mmUVD_RBC_RB_WPTR
);
71 * uvd_v5_0_ring_set_wptr - set write pointer
73 * @ring: amdgpu_ring pointer
75 * Commits the write pointer to the hardware
77 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring
*ring
)
79 struct amdgpu_device
*adev
= ring
->adev
;
81 WREG32(mmUVD_RBC_RB_WPTR
, ring
->wptr
);
84 static int uvd_v5_0_early_init(void *handle
)
86 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
88 uvd_v5_0_set_ring_funcs(adev
);
89 uvd_v5_0_set_irq_funcs(adev
);
94 static int uvd_v5_0_sw_init(void *handle
)
96 struct amdgpu_ring
*ring
;
97 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
101 r
= amdgpu_irq_add_id(adev
, 124, &adev
->uvd
.irq
);
105 r
= amdgpu_uvd_sw_init(adev
);
109 r
= amdgpu_uvd_resume(adev
);
113 ring
= &adev
->uvd
.ring
;
114 sprintf(ring
->name
, "uvd");
115 r
= amdgpu_ring_init(adev
, ring
, 512, CP_PACKET2
, 0xf,
116 &adev
->uvd
.irq
, 0, AMDGPU_RING_TYPE_UVD
);
121 static int uvd_v5_0_sw_fini(void *handle
)
124 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
126 r
= amdgpu_uvd_suspend(adev
);
130 r
= amdgpu_uvd_sw_fini(adev
);
138 * uvd_v5_0_hw_init - start and test UVD block
140 * @adev: amdgpu_device pointer
142 * Initialize the hardware, boot up the VCPU and do some testing
144 static int uvd_v5_0_hw_init(void *handle
)
146 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
147 struct amdgpu_ring
*ring
= &adev
->uvd
.ring
;
151 /* raise clocks while booting up the VCPU */
152 amdgpu_asic_set_uvd_clocks(adev
, 53300, 40000);
154 r
= uvd_v5_0_start(adev
);
159 r
= amdgpu_ring_test_ring(ring
);
165 r
= amdgpu_ring_alloc(ring
, 10);
167 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r
);
171 tmp
= PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL
, 0);
172 amdgpu_ring_write(ring
, tmp
);
173 amdgpu_ring_write(ring
, 0xFFFFF);
175 tmp
= PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL
, 0);
176 amdgpu_ring_write(ring
, tmp
);
177 amdgpu_ring_write(ring
, 0xFFFFF);
179 tmp
= PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL
, 0);
180 amdgpu_ring_write(ring
, tmp
);
181 amdgpu_ring_write(ring
, 0xFFFFF);
183 /* Clear timeout status bits */
184 amdgpu_ring_write(ring
, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS
, 0));
185 amdgpu_ring_write(ring
, 0x8);
187 amdgpu_ring_write(ring
, PACKET0(mmUVD_SEMA_CNTL
, 0));
188 amdgpu_ring_write(ring
, 3);
190 amdgpu_ring_commit(ring
);
193 /* lower clocks again */
194 amdgpu_asic_set_uvd_clocks(adev
, 0, 0);
197 DRM_INFO("UVD initialized successfully.\n");
203 * uvd_v5_0_hw_fini - stop the hardware block
205 * @adev: amdgpu_device pointer
207 * Stop the UVD block, mark ring as not ready any more
209 static int uvd_v5_0_hw_fini(void *handle
)
211 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
212 struct amdgpu_ring
*ring
= &adev
->uvd
.ring
;
220 static int uvd_v5_0_suspend(void *handle
)
223 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
225 r
= uvd_v5_0_hw_fini(adev
);
229 r
= amdgpu_uvd_suspend(adev
);
236 static int uvd_v5_0_resume(void *handle
)
239 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
241 r
= amdgpu_uvd_resume(adev
);
245 r
= uvd_v5_0_hw_init(adev
);
253 * uvd_v5_0_mc_resume - memory controller programming
255 * @adev: amdgpu_device pointer
257 * Let the UVD memory controller know it's offsets
259 static void uvd_v5_0_mc_resume(struct amdgpu_device
*adev
)
264 /* programm memory controller bits 0-27 */
265 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
266 lower_32_bits(adev
->uvd
.gpu_addr
));
267 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
268 upper_32_bits(adev
->uvd
.gpu_addr
));
270 offset
= AMDGPU_UVD_FIRMWARE_OFFSET
;
271 size
= AMDGPU_GPU_PAGE_ALIGN(adev
->uvd
.fw
->size
+ 4);
272 WREG32(mmUVD_VCPU_CACHE_OFFSET0
, offset
>> 3);
273 WREG32(mmUVD_VCPU_CACHE_SIZE0
, size
);
276 size
= AMDGPU_UVD_HEAP_SIZE
;
277 WREG32(mmUVD_VCPU_CACHE_OFFSET1
, offset
>> 3);
278 WREG32(mmUVD_VCPU_CACHE_SIZE1
, size
);
281 size
= AMDGPU_UVD_STACK_SIZE
+
282 (AMDGPU_UVD_SESSION_SIZE
* adev
->uvd
.max_handles
);
283 WREG32(mmUVD_VCPU_CACHE_OFFSET2
, offset
>> 3);
284 WREG32(mmUVD_VCPU_CACHE_SIZE2
, size
);
286 WREG32(mmUVD_UDEC_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
287 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
288 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
292 * uvd_v5_0_start - start UVD block
294 * @adev: amdgpu_device pointer
296 * Setup and start the UVD block
298 static int uvd_v5_0_start(struct amdgpu_device
*adev
)
300 struct amdgpu_ring
*ring
= &adev
->uvd
.ring
;
301 uint32_t rb_bufsz
, tmp
;
302 uint32_t lmi_swap_cntl
;
303 uint32_t mp_swap_cntl
;
307 WREG32_P(mmUVD_POWER_STATUS
, 0, ~(1 << 2));
309 /* disable byte swapping */
313 uvd_v5_0_mc_resume(adev
);
315 /* disable clock gating */
316 WREG32(mmUVD_CGC_GATE
, 0);
318 /* disable interupt */
319 WREG32_P(mmUVD_MASTINT_EN
, 0, ~(1 << 1));
321 /* stall UMC and register bus before resetting VCPU */
322 WREG32_P(mmUVD_LMI_CTRL2
, 1 << 8, ~(1 << 8));
325 /* put LMI, VCPU, RBC etc... into reset */
326 WREG32(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
|
327 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
| UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK
|
328 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK
| UVD_SOFT_RESET__CSM_SOFT_RESET_MASK
|
329 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK
| UVD_SOFT_RESET__TAP_SOFT_RESET_MASK
|
330 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
);
333 /* take UVD block out of reset */
334 WREG32_P(mmSRBM_SOFT_RESET
, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK
);
337 /* initialize UVD memory controller */
338 WREG32(mmUVD_LMI_CTRL
, 0x40 | (1 << 8) | (1 << 13) |
339 (1 << 21) | (1 << 9) | (1 << 20));
342 /* swap (8 in 32) RB and IB */
346 WREG32(mmUVD_LMI_SWAP_CNTL
, lmi_swap_cntl
);
347 WREG32(mmUVD_MP_SWAP_CNTL
, mp_swap_cntl
);
349 WREG32(mmUVD_MPC_SET_MUXA0
, 0x40c2040);
350 WREG32(mmUVD_MPC_SET_MUXA1
, 0x0);
351 WREG32(mmUVD_MPC_SET_MUXB0
, 0x40c2040);
352 WREG32(mmUVD_MPC_SET_MUXB1
, 0x0);
353 WREG32(mmUVD_MPC_SET_ALU
, 0);
354 WREG32(mmUVD_MPC_SET_MUX
, 0x88);
356 /* take all subblocks out of reset, except VCPU */
357 WREG32(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
360 /* enable VCPU clock */
361 WREG32(mmUVD_VCPU_CNTL
, 1 << 9);
364 WREG32_P(mmUVD_LMI_CTRL2
, 0, ~(1 << 8));
366 /* boot up the VCPU */
367 WREG32(mmUVD_SOFT_RESET
, 0);
370 for (i
= 0; i
< 10; ++i
) {
372 for (j
= 0; j
< 100; ++j
) {
373 status
= RREG32(mmUVD_STATUS
);
382 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
383 WREG32_P(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
,
384 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
386 WREG32_P(mmUVD_SOFT_RESET
, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
392 DRM_ERROR("UVD not responding, giving up!!!\n");
395 /* enable master interrupt */
396 WREG32_P(mmUVD_MASTINT_EN
, 3 << 1, ~(3 << 1));
398 /* clear the bit 4 of UVD_STATUS */
399 WREG32_P(mmUVD_STATUS
, 0, ~(2 << 1));
401 rb_bufsz
= order_base_2(ring
->ring_size
);
403 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BUFSZ
, rb_bufsz
);
404 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BLKSZ
, 1);
405 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_FETCH
, 1);
406 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_WPTR_POLL_EN
, 0);
407 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_UPDATE
, 1);
408 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_RPTR_WR_EN
, 1);
409 /* force RBC into idle state */
410 WREG32(mmUVD_RBC_RB_CNTL
, tmp
);
412 /* set the write pointer delay */
413 WREG32(mmUVD_RBC_RB_WPTR_CNTL
, 0);
415 /* set the wb address */
416 WREG32(mmUVD_RBC_RB_RPTR_ADDR
, (upper_32_bits(ring
->gpu_addr
) >> 2));
418 /* programm the RB_BASE for ring buffer */
419 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW
,
420 lower_32_bits(ring
->gpu_addr
));
421 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH
,
422 upper_32_bits(ring
->gpu_addr
));
424 /* Initialize the ring buffer's read and write pointers */
425 WREG32(mmUVD_RBC_RB_RPTR
, 0);
427 ring
->wptr
= RREG32(mmUVD_RBC_RB_RPTR
);
428 WREG32(mmUVD_RBC_RB_WPTR
, ring
->wptr
);
430 WREG32_P(mmUVD_RBC_RB_CNTL
, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK
);
436 * uvd_v5_0_stop - stop UVD block
438 * @adev: amdgpu_device pointer
442 static void uvd_v5_0_stop(struct amdgpu_device
*adev
)
444 /* force RBC into idle state */
445 WREG32(mmUVD_RBC_RB_CNTL
, 0x11010101);
447 /* Stall UMC and register bus before resetting VCPU */
448 WREG32_P(mmUVD_LMI_CTRL2
, 1 << 8, ~(1 << 8));
451 /* put VCPU into reset */
452 WREG32(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
455 /* disable VCPU clock */
456 WREG32(mmUVD_VCPU_CNTL
, 0x0);
458 /* Unstall UMC and register bus */
459 WREG32_P(mmUVD_LMI_CTRL2
, 0, ~(1 << 8));
463 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
465 * @ring: amdgpu_ring pointer
466 * @fence: fence to emit
468 * Write a fence and a trap command to the ring.
470 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
473 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
475 amdgpu_ring_write(ring
, PACKET0(mmUVD_CONTEXT_ID
, 0));
476 amdgpu_ring_write(ring
, seq
);
477 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA0
, 0));
478 amdgpu_ring_write(ring
, addr
& 0xffffffff);
479 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA1
, 0));
480 amdgpu_ring_write(ring
, upper_32_bits(addr
) & 0xff);
481 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_CMD
, 0));
482 amdgpu_ring_write(ring
, 0);
484 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA0
, 0));
485 amdgpu_ring_write(ring
, 0);
486 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA1
, 0));
487 amdgpu_ring_write(ring
, 0);
488 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_CMD
, 0));
489 amdgpu_ring_write(ring
, 2);
493 * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
495 * @ring: amdgpu_ring pointer
497 * Emits an hdp flush.
499 static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring
*ring
)
501 amdgpu_ring_write(ring
, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL
, 0));
502 amdgpu_ring_write(ring
, 0);
506 * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
508 * @ring: amdgpu_ring pointer
510 * Emits an hdp invalidate.
512 static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring
*ring
)
514 amdgpu_ring_write(ring
, PACKET0(mmHDP_DEBUG0
, 0));
515 amdgpu_ring_write(ring
, 1);
519 * uvd_v5_0_ring_test_ring - register write test
521 * @ring: amdgpu_ring pointer
523 * Test if we can successfully write to the context register
525 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring
*ring
)
527 struct amdgpu_device
*adev
= ring
->adev
;
532 WREG32(mmUVD_CONTEXT_ID
, 0xCAFEDEAD);
533 r
= amdgpu_ring_alloc(ring
, 3);
535 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
539 amdgpu_ring_write(ring
, PACKET0(mmUVD_CONTEXT_ID
, 0));
540 amdgpu_ring_write(ring
, 0xDEADBEEF);
541 amdgpu_ring_commit(ring
);
542 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
543 tmp
= RREG32(mmUVD_CONTEXT_ID
);
544 if (tmp
== 0xDEADBEEF)
549 if (i
< adev
->usec_timeout
) {
550 DRM_INFO("ring test on %d succeeded in %d usecs\n",
553 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
561 * uvd_v5_0_ring_emit_ib - execute indirect buffer
563 * @ring: amdgpu_ring pointer
564 * @ib: indirect buffer to execute
566 * Write ring commands to execute the indirect buffer
568 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring
*ring
,
569 struct amdgpu_ib
*ib
,
570 unsigned vm_id
, bool ctx_switch
)
572 amdgpu_ring_write(ring
, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW
, 0));
573 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
574 amdgpu_ring_write(ring
, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH
, 0));
575 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
576 amdgpu_ring_write(ring
, PACKET0(mmUVD_RBC_IB_SIZE
, 0));
577 amdgpu_ring_write(ring
, ib
->length_dw
);
580 static bool uvd_v5_0_is_idle(void *handle
)
582 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
584 return !(RREG32(mmSRBM_STATUS
) & SRBM_STATUS__UVD_BUSY_MASK
);
587 static int uvd_v5_0_wait_for_idle(void *handle
)
590 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
592 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
593 if (!(RREG32(mmSRBM_STATUS
) & SRBM_STATUS__UVD_BUSY_MASK
))
599 static int uvd_v5_0_soft_reset(void *handle
)
601 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
605 WREG32_P(mmSRBM_SOFT_RESET
, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK
,
606 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK
);
609 return uvd_v5_0_start(adev
);
612 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device
*adev
,
613 struct amdgpu_irq_src
*source
,
615 enum amdgpu_interrupt_state state
)
621 static int uvd_v5_0_process_interrupt(struct amdgpu_device
*adev
,
622 struct amdgpu_irq_src
*source
,
623 struct amdgpu_iv_entry
*entry
)
625 DRM_DEBUG("IH: UVD TRAP\n");
626 amdgpu_fence_process(&adev
->uvd
.ring
);
630 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device
*adev
)
632 uint32_t data
, data1
, data2
, suvd_flags
;
634 data
= RREG32(mmUVD_CGC_CTRL
);
635 data1
= RREG32(mmUVD_SUVD_CGC_GATE
);
636 data2
= RREG32(mmUVD_SUVD_CGC_CTRL
);
638 data
&= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK
|
639 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK
);
641 suvd_flags
= UVD_SUVD_CGC_GATE__SRE_MASK
|
642 UVD_SUVD_CGC_GATE__SIT_MASK
|
643 UVD_SUVD_CGC_GATE__SMP_MASK
|
644 UVD_SUVD_CGC_GATE__SCM_MASK
|
645 UVD_SUVD_CGC_GATE__SDB_MASK
;
647 data
|= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
|
648 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL
, CLK_GATE_DLY_TIMER
)) |
649 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL
, CLK_OFF_DELAY
));
651 data
&= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
|
652 UVD_CGC_CTRL__UDEC_CM_MODE_MASK
|
653 UVD_CGC_CTRL__UDEC_IT_MODE_MASK
|
654 UVD_CGC_CTRL__UDEC_DB_MODE_MASK
|
655 UVD_CGC_CTRL__UDEC_MP_MODE_MASK
|
656 UVD_CGC_CTRL__SYS_MODE_MASK
|
657 UVD_CGC_CTRL__UDEC_MODE_MASK
|
658 UVD_CGC_CTRL__MPEG2_MODE_MASK
|
659 UVD_CGC_CTRL__REGS_MODE_MASK
|
660 UVD_CGC_CTRL__RBC_MODE_MASK
|
661 UVD_CGC_CTRL__LMI_MC_MODE_MASK
|
662 UVD_CGC_CTRL__LMI_UMC_MODE_MASK
|
663 UVD_CGC_CTRL__IDCT_MODE_MASK
|
664 UVD_CGC_CTRL__MPRD_MODE_MASK
|
665 UVD_CGC_CTRL__MPC_MODE_MASK
|
666 UVD_CGC_CTRL__LBSI_MODE_MASK
|
667 UVD_CGC_CTRL__LRBBM_MODE_MASK
|
668 UVD_CGC_CTRL__WCB_MODE_MASK
|
669 UVD_CGC_CTRL__VCPU_MODE_MASK
|
670 UVD_CGC_CTRL__JPEG_MODE_MASK
|
671 UVD_CGC_CTRL__SCPU_MODE_MASK
);
672 data2
&= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
|
673 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
|
674 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
|
675 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
|
676 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
);
679 WREG32(mmUVD_CGC_CTRL
, data
);
680 WREG32(mmUVD_CGC_GATE
, 0);
681 WREG32(mmUVD_SUVD_CGC_GATE
, data1
);
682 WREG32(mmUVD_SUVD_CGC_CTRL
, data2
);
686 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device
*adev
)
688 uint32_t data
, data1
, cgc_flags
, suvd_flags
;
690 data
= RREG32(mmUVD_CGC_GATE
);
691 data1
= RREG32(mmUVD_SUVD_CGC_GATE
);
693 cgc_flags
= UVD_CGC_GATE__SYS_MASK
|
694 UVD_CGC_GATE__UDEC_MASK
|
695 UVD_CGC_GATE__MPEG2_MASK
|
696 UVD_CGC_GATE__RBC_MASK
|
697 UVD_CGC_GATE__LMI_MC_MASK
|
698 UVD_CGC_GATE__IDCT_MASK
|
699 UVD_CGC_GATE__MPRD_MASK
|
700 UVD_CGC_GATE__MPC_MASK
|
701 UVD_CGC_GATE__LBSI_MASK
|
702 UVD_CGC_GATE__LRBBM_MASK
|
703 UVD_CGC_GATE__UDEC_RE_MASK
|
704 UVD_CGC_GATE__UDEC_CM_MASK
|
705 UVD_CGC_GATE__UDEC_IT_MASK
|
706 UVD_CGC_GATE__UDEC_DB_MASK
|
707 UVD_CGC_GATE__UDEC_MP_MASK
|
708 UVD_CGC_GATE__WCB_MASK
|
709 UVD_CGC_GATE__VCPU_MASK
|
710 UVD_CGC_GATE__SCPU_MASK
;
712 suvd_flags
= UVD_SUVD_CGC_GATE__SRE_MASK
|
713 UVD_SUVD_CGC_GATE__SIT_MASK
|
714 UVD_SUVD_CGC_GATE__SMP_MASK
|
715 UVD_SUVD_CGC_GATE__SCM_MASK
|
716 UVD_SUVD_CGC_GATE__SDB_MASK
;
721 WREG32(mmUVD_CGC_GATE
, data
);
722 WREG32(mmUVD_SUVD_CGC_GATE
, data1
);
726 static int uvd_v5_0_set_clockgating_state(void *handle
,
727 enum amd_clockgating_state state
)
729 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
730 bool enable
= (state
== AMD_CG_STATE_GATE
) ? true : false;
731 static int curstate
= -1;
733 if (!(adev
->cg_flags
& AMD_CG_SUPPORT_UVD_MGCG
))
736 if (curstate
== state
)
741 /* disable HW gating and enable Sw gating */
742 uvd_v5_0_set_sw_clock_gating(adev
);
744 /* wait for STATUS to clear */
745 if (uvd_v5_0_wait_for_idle(handle
))
748 /* enable HW gates because UVD is idle */
749 /* uvd_v5_0_set_hw_clock_gating(adev); */
755 static int uvd_v5_0_set_powergating_state(void *handle
,
756 enum amd_powergating_state state
)
758 /* This doesn't actually powergate the UVD block.
759 * That's done in the dpm code via the SMC. This
760 * just re-inits the block as necessary. The actual
761 * gating still happens in the dpm code. We should
762 * revisit this when there is a cleaner line between
763 * the smc and the hw blocks
765 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
767 if (!(adev
->pg_flags
& AMD_PG_SUPPORT_UVD
))
770 if (state
== AMD_PG_STATE_GATE
) {
774 return uvd_v5_0_start(adev
);
778 const struct amd_ip_funcs uvd_v5_0_ip_funcs
= {
780 .early_init
= uvd_v5_0_early_init
,
782 .sw_init
= uvd_v5_0_sw_init
,
783 .sw_fini
= uvd_v5_0_sw_fini
,
784 .hw_init
= uvd_v5_0_hw_init
,
785 .hw_fini
= uvd_v5_0_hw_fini
,
786 .suspend
= uvd_v5_0_suspend
,
787 .resume
= uvd_v5_0_resume
,
788 .is_idle
= uvd_v5_0_is_idle
,
789 .wait_for_idle
= uvd_v5_0_wait_for_idle
,
790 .soft_reset
= uvd_v5_0_soft_reset
,
791 .set_clockgating_state
= uvd_v5_0_set_clockgating_state
,
792 .set_powergating_state
= uvd_v5_0_set_powergating_state
,
795 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs
= {
796 .get_rptr
= uvd_v5_0_ring_get_rptr
,
797 .get_wptr
= uvd_v5_0_ring_get_wptr
,
798 .set_wptr
= uvd_v5_0_ring_set_wptr
,
799 .parse_cs
= amdgpu_uvd_ring_parse_cs
,
800 .emit_ib
= uvd_v5_0_ring_emit_ib
,
801 .emit_fence
= uvd_v5_0_ring_emit_fence
,
802 .emit_hdp_flush
= uvd_v5_0_ring_emit_hdp_flush
,
803 .emit_hdp_invalidate
= uvd_v5_0_ring_emit_hdp_invalidate
,
804 .test_ring
= uvd_v5_0_ring_test_ring
,
805 .test_ib
= amdgpu_uvd_ring_test_ib
,
806 .insert_nop
= amdgpu_ring_insert_nop
,
807 .pad_ib
= amdgpu_ring_generic_pad_ib
,
808 .begin_use
= amdgpu_uvd_ring_begin_use
,
809 .end_use
= amdgpu_uvd_ring_end_use
,
812 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device
*adev
)
814 adev
->uvd
.ring
.funcs
= &uvd_v5_0_ring_funcs
;
817 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs
= {
818 .set
= uvd_v5_0_set_interrupt_state
,
819 .process
= uvd_v5_0_process_interrupt
,
822 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device
*adev
)
824 adev
->uvd
.irq
.num_types
= 1;
825 adev
->uvd
.irq
.funcs
= &uvd_v5_0_irq_funcs
;