2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
25 * Authors: Christian König <christian.koenig@amd.com>
28 #include <linux/firmware.h>
31 #include "amdgpu_vce.h"
33 #include "vce/vce_3_0_d.h"
34 #include "vce/vce_3_0_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
37 #include "gca/gfx_8_0_d.h"
38 #include "smu/smu_7_1_2_d.h"
39 #include "smu/smu_7_1_2_sh_mask.h"
41 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
42 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
43 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
44 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
45 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
47 #define VCE_V3_0_FW_SIZE (384 * 1024)
48 #define VCE_V3_0_STACK_SIZE (64 * 1024)
49 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
51 static void vce_v3_0_mc_resume(struct amdgpu_device
*adev
, int idx
);
52 static void vce_v3_0_set_ring_funcs(struct amdgpu_device
*adev
);
53 static void vce_v3_0_set_irq_funcs(struct amdgpu_device
*adev
);
56 * vce_v3_0_ring_get_rptr - get read pointer
58 * @ring: amdgpu_ring pointer
60 * Returns the current hardware read pointer
62 static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring
*ring
)
64 struct amdgpu_device
*adev
= ring
->adev
;
66 if (ring
== &adev
->vce
.ring
[0])
67 return RREG32(mmVCE_RB_RPTR
);
69 return RREG32(mmVCE_RB_RPTR2
);
73 * vce_v3_0_ring_get_wptr - get write pointer
75 * @ring: amdgpu_ring pointer
77 * Returns the current hardware write pointer
79 static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring
*ring
)
81 struct amdgpu_device
*adev
= ring
->adev
;
83 if (ring
== &adev
->vce
.ring
[0])
84 return RREG32(mmVCE_RB_WPTR
);
86 return RREG32(mmVCE_RB_WPTR2
);
90 * vce_v3_0_ring_set_wptr - set write pointer
92 * @ring: amdgpu_ring pointer
94 * Commits the write pointer to the hardware
96 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring
*ring
)
98 struct amdgpu_device
*adev
= ring
->adev
;
100 if (ring
== &adev
->vce
.ring
[0])
101 WREG32(mmVCE_RB_WPTR
, ring
->wptr
);
103 WREG32(mmVCE_RB_WPTR2
, ring
->wptr
);
106 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device
*adev
, bool override
)
110 tmp
= data
= RREG32(mmVCE_RB_ARB_CTRL
);
112 data
|= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK
;
114 data
&= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK
;
117 WREG32(mmVCE_RB_ARB_CTRL
, data
);
120 static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device
*adev
,
124 /* Set Override to disable Clock Gating */
125 vce_v3_0_override_vce_clock_gating(adev
, true);
128 /* Force CLOCK ON for VCE_CLOCK_GATING_B,
129 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
130 * VREG can be FORCE ON or set to Dynamic, but can't be OFF
132 tmp
= data
= RREG32(mmVCE_CLOCK_GATING_B
);
136 WREG32(mmVCE_CLOCK_GATING_B
, data
);
138 /* Force CLOCK ON for VCE_UENC_CLOCK_GATING,
139 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
141 tmp
= data
= RREG32(mmVCE_UENC_CLOCK_GATING
);
145 WREG32(mmVCE_UENC_CLOCK_GATING
, data
);
147 /* set VCE_UENC_CLOCK_GATING_2 */
148 tmp
= data
= RREG32(mmVCE_UENC_CLOCK_GATING_2
);
152 WREG32(mmVCE_UENC_CLOCK_GATING_2
, data
);
154 /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */
155 tmp
= data
= RREG32(mmVCE_UENC_REG_CLOCK_GATING
);
158 WREG32(mmVCE_UENC_REG_CLOCK_GATING
, data
);
160 /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */
161 tmp
= data
= RREG32(mmVCE_UENC_DMA_DCLK_CTRL
);
162 data
|= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK
|
163 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK
|
164 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK
|
167 WREG32(mmVCE_UENC_DMA_DCLK_CTRL
, data
);
169 /* Force CLOCK OFF for VCE_CLOCK_GATING_B,
170 * {*, *_FORCE_OFF} = {*, 1}
171 * set VREG to Dynamic, as it can't be OFF
173 tmp
= data
= RREG32(mmVCE_CLOCK_GATING_B
);
177 WREG32(mmVCE_CLOCK_GATING_B
, data
);
178 /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING,
179 * Force ClOCK OFF takes precedent over Force CLOCK ON setting.
180 * {*_FORCE_ON, *_FORCE_OFF} = {*, 1}
182 tmp
= data
= RREG32(mmVCE_UENC_CLOCK_GATING
);
185 WREG32(mmVCE_UENC_CLOCK_GATING
, data
);
186 /* Set VCE_UENC_CLOCK_GATING_2 */
187 tmp
= data
= RREG32(mmVCE_UENC_CLOCK_GATING_2
);
190 WREG32(mmVCE_UENC_CLOCK_GATING_2
, data
);
191 /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */
192 tmp
= data
= RREG32(mmVCE_UENC_REG_CLOCK_GATING
);
195 WREG32(mmVCE_UENC_REG_CLOCK_GATING
, data
);
196 /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */
197 tmp
= data
= RREG32(mmVCE_UENC_DMA_DCLK_CTRL
);
198 data
&= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK
|
199 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK
|
200 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK
|
203 WREG32(mmVCE_UENC_DMA_DCLK_CTRL
, data
);
205 vce_v3_0_override_vce_clock_gating(adev
, false);
209 * vce_v3_0_start - start VCE block
211 * @adev: amdgpu_device pointer
213 * Setup and start the VCE block
215 static int vce_v3_0_start(struct amdgpu_device
*adev
)
217 struct amdgpu_ring
*ring
;
220 mutex_lock(&adev
->grbm_idx_mutex
);
221 for (idx
= 0; idx
< 2; ++idx
) {
223 if (adev
->vce
.harvest_config
& (1 << idx
))
227 WREG32_P(mmGRBM_GFX_INDEX
, 0,
228 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
230 WREG32_P(mmGRBM_GFX_INDEX
,
231 GRBM_GFX_INDEX__VCE_INSTANCE_MASK
,
232 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
234 vce_v3_0_mc_resume(adev
, idx
);
237 WREG32_P(mmVCE_STATUS
, 1, ~1);
238 if (adev
->asic_type
>= CHIP_STONEY
)
239 WREG32_P(mmVCE_VCPU_CNTL
, 1, ~0x200001);
241 WREG32_P(mmVCE_VCPU_CNTL
, VCE_VCPU_CNTL__CLK_EN_MASK
,
242 ~VCE_VCPU_CNTL__CLK_EN_MASK
);
244 WREG32_P(mmVCE_SOFT_RESET
,
245 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
,
246 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
250 WREG32_P(mmVCE_SOFT_RESET
, 0,
251 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
253 for (i
= 0; i
< 10; ++i
) {
255 for (j
= 0; j
< 100; ++j
) {
256 status
= RREG32(mmVCE_STATUS
);
265 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
266 WREG32_P(mmVCE_SOFT_RESET
,
267 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
,
268 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
270 WREG32_P(mmVCE_SOFT_RESET
, 0,
271 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
276 /* clear BUSY flag */
277 WREG32_P(mmVCE_STATUS
, 0, ~1);
279 /* Set Clock-Gating off */
280 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCE_MGCG
)
281 vce_v3_0_set_vce_sw_clock_gating(adev
, false);
284 DRM_ERROR("VCE not responding, giving up!!!\n");
285 mutex_unlock(&adev
->grbm_idx_mutex
);
290 WREG32_P(mmGRBM_GFX_INDEX
, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
291 mutex_unlock(&adev
->grbm_idx_mutex
);
293 ring
= &adev
->vce
.ring
[0];
294 WREG32(mmVCE_RB_RPTR
, ring
->wptr
);
295 WREG32(mmVCE_RB_WPTR
, ring
->wptr
);
296 WREG32(mmVCE_RB_BASE_LO
, ring
->gpu_addr
);
297 WREG32(mmVCE_RB_BASE_HI
, upper_32_bits(ring
->gpu_addr
));
298 WREG32(mmVCE_RB_SIZE
, ring
->ring_size
/ 4);
300 ring
= &adev
->vce
.ring
[1];
301 WREG32(mmVCE_RB_RPTR2
, ring
->wptr
);
302 WREG32(mmVCE_RB_WPTR2
, ring
->wptr
);
303 WREG32(mmVCE_RB_BASE_LO2
, ring
->gpu_addr
);
304 WREG32(mmVCE_RB_BASE_HI2
, upper_32_bits(ring
->gpu_addr
));
305 WREG32(mmVCE_RB_SIZE2
, ring
->ring_size
/ 4);
310 #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
311 #define VCE_HARVEST_FUSE_MACRO__SHIFT 27
312 #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
314 static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device
*adev
)
318 /* Fiji, Stoney are single pipe */
319 if ((adev
->asic_type
== CHIP_FIJI
) ||
320 (adev
->asic_type
== CHIP_STONEY
))
321 return AMDGPU_VCE_HARVEST_VCE1
;
323 /* Tonga and CZ are dual or single pipe */
324 if (adev
->flags
& AMD_IS_APU
)
325 tmp
= (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS
) &
326 VCE_HARVEST_FUSE_MACRO__MASK
) >>
327 VCE_HARVEST_FUSE_MACRO__SHIFT
;
329 tmp
= (RREG32_SMC(ixCC_HARVEST_FUSES
) &
330 CC_HARVEST_FUSES__VCE_DISABLE_MASK
) >>
331 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT
;
335 return AMDGPU_VCE_HARVEST_VCE0
;
337 return AMDGPU_VCE_HARVEST_VCE1
;
339 return AMDGPU_VCE_HARVEST_VCE0
| AMDGPU_VCE_HARVEST_VCE1
;
345 static int vce_v3_0_early_init(void *handle
)
347 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
349 adev
->vce
.harvest_config
= vce_v3_0_get_harvest_config(adev
);
351 if ((adev
->vce
.harvest_config
&
352 (AMDGPU_VCE_HARVEST_VCE0
| AMDGPU_VCE_HARVEST_VCE1
)) ==
353 (AMDGPU_VCE_HARVEST_VCE0
| AMDGPU_VCE_HARVEST_VCE1
))
356 vce_v3_0_set_ring_funcs(adev
);
357 vce_v3_0_set_irq_funcs(adev
);
362 static int vce_v3_0_sw_init(void *handle
)
364 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
365 struct amdgpu_ring
*ring
;
369 r
= amdgpu_irq_add_id(adev
, 167, &adev
->vce
.irq
);
373 r
= amdgpu_vce_sw_init(adev
, VCE_V3_0_FW_SIZE
+
374 (VCE_V3_0_STACK_SIZE
+ VCE_V3_0_DATA_SIZE
) * 2);
378 r
= amdgpu_vce_resume(adev
);
382 ring
= &adev
->vce
.ring
[0];
383 sprintf(ring
->name
, "vce0");
384 r
= amdgpu_ring_init(adev
, ring
, 4096, VCE_CMD_NO_OP
, 0xf,
385 &adev
->vce
.irq
, 0, AMDGPU_RING_TYPE_VCE
);
389 ring
= &adev
->vce
.ring
[1];
390 sprintf(ring
->name
, "vce1");
391 r
= amdgpu_ring_init(adev
, ring
, 4096, VCE_CMD_NO_OP
, 0xf,
392 &adev
->vce
.irq
, 0, AMDGPU_RING_TYPE_VCE
);
399 static int vce_v3_0_sw_fini(void *handle
)
402 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
404 r
= amdgpu_vce_suspend(adev
);
408 r
= amdgpu_vce_sw_fini(adev
);
415 static int vce_v3_0_hw_init(void *handle
)
418 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
420 r
= vce_v3_0_start(adev
);
424 adev
->vce
.ring
[0].ready
= false;
425 adev
->vce
.ring
[1].ready
= false;
427 for (i
= 0; i
< 2; i
++) {
428 r
= amdgpu_ring_test_ring(&adev
->vce
.ring
[i
]);
432 adev
->vce
.ring
[i
].ready
= true;
435 DRM_INFO("VCE initialized successfully.\n");
440 static int vce_v3_0_hw_fini(void *handle
)
445 static int vce_v3_0_suspend(void *handle
)
448 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
450 r
= vce_v3_0_hw_fini(adev
);
454 r
= amdgpu_vce_suspend(adev
);
461 static int vce_v3_0_resume(void *handle
)
464 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
466 r
= amdgpu_vce_resume(adev
);
470 r
= vce_v3_0_hw_init(adev
);
477 static void vce_v3_0_mc_resume(struct amdgpu_device
*adev
, int idx
)
479 uint32_t offset
, size
;
481 WREG32_P(mmVCE_CLOCK_GATING_A
, 0, ~(1 << 16));
482 WREG32_P(mmVCE_UENC_CLOCK_GATING
, 0x1FF000, ~0xFF9FF000);
483 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING
, 0x3F, ~0x3F);
484 WREG32(mmVCE_CLOCK_GATING_B
, 0xf7);
486 WREG32(mmVCE_LMI_CTRL
, 0x00398000);
487 WREG32_P(mmVCE_LMI_CACHE_CTRL
, 0x0, ~0x1);
488 WREG32(mmVCE_LMI_SWAP_CNTL
, 0);
489 WREG32(mmVCE_LMI_SWAP_CNTL1
, 0);
490 WREG32(mmVCE_LMI_VM_CTRL
, 0);
491 if (adev
->asic_type
>= CHIP_STONEY
) {
492 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0
, (adev
->vce
.gpu_addr
>> 8));
493 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1
, (adev
->vce
.gpu_addr
>> 8));
494 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2
, (adev
->vce
.gpu_addr
>> 8));
496 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR
, (adev
->vce
.gpu_addr
>> 8));
497 offset
= AMDGPU_VCE_FIRMWARE_OFFSET
;
498 size
= VCE_V3_0_FW_SIZE
;
499 WREG32(mmVCE_VCPU_CACHE_OFFSET0
, offset
& 0x7fffffff);
500 WREG32(mmVCE_VCPU_CACHE_SIZE0
, size
);
504 size
= VCE_V3_0_STACK_SIZE
;
505 WREG32(mmVCE_VCPU_CACHE_OFFSET1
, offset
& 0x7fffffff);
506 WREG32(mmVCE_VCPU_CACHE_SIZE1
, size
);
508 size
= VCE_V3_0_DATA_SIZE
;
509 WREG32(mmVCE_VCPU_CACHE_OFFSET2
, offset
& 0x7fffffff);
510 WREG32(mmVCE_VCPU_CACHE_SIZE2
, size
);
512 offset
+= size
+ VCE_V3_0_STACK_SIZE
+ VCE_V3_0_DATA_SIZE
;
513 size
= VCE_V3_0_STACK_SIZE
;
514 WREG32(mmVCE_VCPU_CACHE_OFFSET1
, offset
& 0xfffffff);
515 WREG32(mmVCE_VCPU_CACHE_SIZE1
, size
);
517 size
= VCE_V3_0_DATA_SIZE
;
518 WREG32(mmVCE_VCPU_CACHE_OFFSET2
, offset
& 0xfffffff);
519 WREG32(mmVCE_VCPU_CACHE_SIZE2
, size
);
522 WREG32_P(mmVCE_LMI_CTRL2
, 0x0, ~0x100);
524 WREG32_P(mmVCE_SYS_INT_EN
, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
,
525 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
);
528 static bool vce_v3_0_is_idle(void *handle
)
530 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
533 mask
|= (adev
->vce
.harvest_config
& AMDGPU_VCE_HARVEST_VCE0
) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK
;
534 mask
|= (adev
->vce
.harvest_config
& AMDGPU_VCE_HARVEST_VCE1
) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK
;
536 return !(RREG32(mmSRBM_STATUS2
) & mask
);
539 static int vce_v3_0_wait_for_idle(void *handle
)
542 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
544 for (i
= 0; i
< adev
->usec_timeout
; i
++)
545 if (vce_v3_0_is_idle(handle
))
551 static int vce_v3_0_soft_reset(void *handle
)
553 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
556 mask
|= (adev
->vce
.harvest_config
& AMDGPU_VCE_HARVEST_VCE0
) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK
;
557 mask
|= (adev
->vce
.harvest_config
& AMDGPU_VCE_HARVEST_VCE1
) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK
;
559 WREG32_P(mmSRBM_SOFT_RESET
, mask
,
560 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK
|
561 SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK
));
564 return vce_v3_0_start(adev
);
567 static void vce_v3_0_print_status(void *handle
)
569 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
571 dev_info(adev
->dev
, "VCE 3.0 registers\n");
572 dev_info(adev
->dev
, " VCE_STATUS=0x%08X\n",
573 RREG32(mmVCE_STATUS
));
574 dev_info(adev
->dev
, " VCE_VCPU_CNTL=0x%08X\n",
575 RREG32(mmVCE_VCPU_CNTL
));
576 dev_info(adev
->dev
, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
577 RREG32(mmVCE_VCPU_CACHE_OFFSET0
));
578 dev_info(adev
->dev
, " VCE_VCPU_CACHE_SIZE0=0x%08X\n",
579 RREG32(mmVCE_VCPU_CACHE_SIZE0
));
580 dev_info(adev
->dev
, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
581 RREG32(mmVCE_VCPU_CACHE_OFFSET1
));
582 dev_info(adev
->dev
, " VCE_VCPU_CACHE_SIZE1=0x%08X\n",
583 RREG32(mmVCE_VCPU_CACHE_SIZE1
));
584 dev_info(adev
->dev
, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
585 RREG32(mmVCE_VCPU_CACHE_OFFSET2
));
586 dev_info(adev
->dev
, " VCE_VCPU_CACHE_SIZE2=0x%08X\n",
587 RREG32(mmVCE_VCPU_CACHE_SIZE2
));
588 dev_info(adev
->dev
, " VCE_SOFT_RESET=0x%08X\n",
589 RREG32(mmVCE_SOFT_RESET
));
590 dev_info(adev
->dev
, " VCE_RB_BASE_LO2=0x%08X\n",
591 RREG32(mmVCE_RB_BASE_LO2
));
592 dev_info(adev
->dev
, " VCE_RB_BASE_HI2=0x%08X\n",
593 RREG32(mmVCE_RB_BASE_HI2
));
594 dev_info(adev
->dev
, " VCE_RB_SIZE2=0x%08X\n",
595 RREG32(mmVCE_RB_SIZE2
));
596 dev_info(adev
->dev
, " VCE_RB_RPTR2=0x%08X\n",
597 RREG32(mmVCE_RB_RPTR2
));
598 dev_info(adev
->dev
, " VCE_RB_WPTR2=0x%08X\n",
599 RREG32(mmVCE_RB_WPTR2
));
600 dev_info(adev
->dev
, " VCE_RB_BASE_LO=0x%08X\n",
601 RREG32(mmVCE_RB_BASE_LO
));
602 dev_info(adev
->dev
, " VCE_RB_BASE_HI=0x%08X\n",
603 RREG32(mmVCE_RB_BASE_HI
));
604 dev_info(adev
->dev
, " VCE_RB_SIZE=0x%08X\n",
605 RREG32(mmVCE_RB_SIZE
));
606 dev_info(adev
->dev
, " VCE_RB_RPTR=0x%08X\n",
607 RREG32(mmVCE_RB_RPTR
));
608 dev_info(adev
->dev
, " VCE_RB_WPTR=0x%08X\n",
609 RREG32(mmVCE_RB_WPTR
));
610 dev_info(adev
->dev
, " VCE_CLOCK_GATING_A=0x%08X\n",
611 RREG32(mmVCE_CLOCK_GATING_A
));
612 dev_info(adev
->dev
, " VCE_CLOCK_GATING_B=0x%08X\n",
613 RREG32(mmVCE_CLOCK_GATING_B
));
614 dev_info(adev
->dev
, " VCE_UENC_CLOCK_GATING=0x%08X\n",
615 RREG32(mmVCE_UENC_CLOCK_GATING
));
616 dev_info(adev
->dev
, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
617 RREG32(mmVCE_UENC_REG_CLOCK_GATING
));
618 dev_info(adev
->dev
, " VCE_SYS_INT_EN=0x%08X\n",
619 RREG32(mmVCE_SYS_INT_EN
));
620 dev_info(adev
->dev
, " VCE_LMI_CTRL2=0x%08X\n",
621 RREG32(mmVCE_LMI_CTRL2
));
622 dev_info(adev
->dev
, " VCE_LMI_CTRL=0x%08X\n",
623 RREG32(mmVCE_LMI_CTRL
));
624 dev_info(adev
->dev
, " VCE_LMI_VM_CTRL=0x%08X\n",
625 RREG32(mmVCE_LMI_VM_CTRL
));
626 dev_info(adev
->dev
, " VCE_LMI_SWAP_CNTL=0x%08X\n",
627 RREG32(mmVCE_LMI_SWAP_CNTL
));
628 dev_info(adev
->dev
, " VCE_LMI_SWAP_CNTL1=0x%08X\n",
629 RREG32(mmVCE_LMI_SWAP_CNTL1
));
630 dev_info(adev
->dev
, " VCE_LMI_CACHE_CTRL=0x%08X\n",
631 RREG32(mmVCE_LMI_CACHE_CTRL
));
634 static int vce_v3_0_set_interrupt_state(struct amdgpu_device
*adev
,
635 struct amdgpu_irq_src
*source
,
637 enum amdgpu_interrupt_state state
)
641 if (state
== AMDGPU_IRQ_STATE_ENABLE
)
642 val
|= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
;
644 WREG32_P(mmVCE_SYS_INT_EN
, val
, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
);
648 static int vce_v3_0_process_interrupt(struct amdgpu_device
*adev
,
649 struct amdgpu_irq_src
*source
,
650 struct amdgpu_iv_entry
*entry
)
652 DRM_DEBUG("IH: VCE\n");
654 WREG32_P(mmVCE_SYS_INT_STATUS
,
655 VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK
,
656 ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK
);
658 switch (entry
->src_data
) {
661 amdgpu_fence_process(&adev
->vce
.ring
[entry
->src_data
]);
664 DRM_ERROR("Unhandled interrupt: %d %d\n",
665 entry
->src_id
, entry
->src_data
);
672 static int vce_v3_0_set_clockgating_state(void *handle
,
673 enum amd_clockgating_state state
)
675 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
676 bool enable
= (state
== AMD_CG_STATE_GATE
) ? true : false;
679 if (!(adev
->cg_flags
& AMD_CG_SUPPORT_VCE_MGCG
))
682 mutex_lock(&adev
->grbm_idx_mutex
);
683 for (i
= 0; i
< 2; i
++) {
684 /* Program VCE Instance 0 or 1 if not harvested */
685 if (adev
->vce
.harvest_config
& (1 << i
))
689 WREG32_P(mmGRBM_GFX_INDEX
, 0,
690 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
692 WREG32_P(mmGRBM_GFX_INDEX
,
693 GRBM_GFX_INDEX__VCE_INSTANCE_MASK
,
694 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
697 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
698 uint32_t data
= RREG32(mmVCE_CLOCK_GATING_A
);
699 data
&= ~(0xf | 0xff0);
700 data
|= ((0x0 << 0) | (0x04 << 4));
701 WREG32(mmVCE_CLOCK_GATING_A
, data
);
703 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
704 data
= RREG32(mmVCE_UENC_CLOCK_GATING
);
705 data
&= ~(0xf | 0xff0);
706 data
|= ((0x0 << 0) | (0x04 << 4));
707 WREG32(mmVCE_UENC_CLOCK_GATING
, data
);
710 vce_v3_0_set_vce_sw_clock_gating(adev
, enable
);
713 WREG32_P(mmGRBM_GFX_INDEX
, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
714 mutex_unlock(&adev
->grbm_idx_mutex
);
719 static int vce_v3_0_set_powergating_state(void *handle
,
720 enum amd_powergating_state state
)
722 /* This doesn't actually powergate the VCE block.
723 * That's done in the dpm code via the SMC. This
724 * just re-inits the block as necessary. The actual
725 * gating still happens in the dpm code. We should
726 * revisit this when there is a cleaner line between
727 * the smc and the hw blocks
729 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
731 if (!(adev
->pg_flags
& AMD_PG_SUPPORT_VCE
))
734 if (state
== AMD_PG_STATE_GATE
)
735 /* XXX do we need a vce_v3_0_stop()? */
738 return vce_v3_0_start(adev
);
741 const struct amd_ip_funcs vce_v3_0_ip_funcs
= {
742 .early_init
= vce_v3_0_early_init
,
744 .sw_init
= vce_v3_0_sw_init
,
745 .sw_fini
= vce_v3_0_sw_fini
,
746 .hw_init
= vce_v3_0_hw_init
,
747 .hw_fini
= vce_v3_0_hw_fini
,
748 .suspend
= vce_v3_0_suspend
,
749 .resume
= vce_v3_0_resume
,
750 .is_idle
= vce_v3_0_is_idle
,
751 .wait_for_idle
= vce_v3_0_wait_for_idle
,
752 .soft_reset
= vce_v3_0_soft_reset
,
753 .print_status
= vce_v3_0_print_status
,
754 .set_clockgating_state
= vce_v3_0_set_clockgating_state
,
755 .set_powergating_state
= vce_v3_0_set_powergating_state
,
758 static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs
= {
759 .get_rptr
= vce_v3_0_ring_get_rptr
,
760 .get_wptr
= vce_v3_0_ring_get_wptr
,
761 .set_wptr
= vce_v3_0_ring_set_wptr
,
762 .parse_cs
= amdgpu_vce_ring_parse_cs
,
763 .emit_ib
= amdgpu_vce_ring_emit_ib
,
764 .emit_fence
= amdgpu_vce_ring_emit_fence
,
765 .test_ring
= amdgpu_vce_ring_test_ring
,
766 .test_ib
= amdgpu_vce_ring_test_ib
,
767 .insert_nop
= amdgpu_ring_insert_nop
,
768 .pad_ib
= amdgpu_ring_generic_pad_ib
,
771 static void vce_v3_0_set_ring_funcs(struct amdgpu_device
*adev
)
773 adev
->vce
.ring
[0].funcs
= &vce_v3_0_ring_funcs
;
774 adev
->vce
.ring
[1].funcs
= &vce_v3_0_ring_funcs
;
777 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs
= {
778 .set
= vce_v3_0_set_interrupt_state
,
779 .process
= vce_v3_0_process_interrupt
,
782 static void vce_v3_0_set_irq_funcs(struct amdgpu_device
*adev
)
784 adev
->vce
.irq
.num_types
= 1;
785 adev
->vce
.irq
.funcs
= &vce_v3_0_irq_funcs
;