2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
25 * Authors: Christian König <christian.koenig@amd.com>
28 #include <linux/firmware.h>
31 #include "amdgpu_vce.h"
33 #include "vce/vce_3_0_d.h"
34 #include "vce/vce_3_0_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
37 #include "gca/gfx_8_0_d.h"
38 #include "smu/smu_7_1_2_d.h"
39 #include "smu/smu_7_1_2_sh_mask.h"
41 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
42 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
43 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
44 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
45 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
46 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
48 #define VCE_V3_0_FW_SIZE (384 * 1024)
49 #define VCE_V3_0_STACK_SIZE (64 * 1024)
50 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
52 static void vce_v3_0_mc_resume(struct amdgpu_device
*adev
, int idx
);
53 static void vce_v3_0_set_ring_funcs(struct amdgpu_device
*adev
);
54 static void vce_v3_0_set_irq_funcs(struct amdgpu_device
*adev
);
55 static int vce_v3_0_wait_for_idle(void *handle
);
58 * vce_v3_0_ring_get_rptr - get read pointer
60 * @ring: amdgpu_ring pointer
62 * Returns the current hardware read pointer
64 static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring
*ring
)
66 struct amdgpu_device
*adev
= ring
->adev
;
68 if (ring
== &adev
->vce
.ring
[0])
69 return RREG32(mmVCE_RB_RPTR
);
71 return RREG32(mmVCE_RB_RPTR2
);
75 * vce_v3_0_ring_get_wptr - get write pointer
77 * @ring: amdgpu_ring pointer
79 * Returns the current hardware write pointer
81 static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring
*ring
)
83 struct amdgpu_device
*adev
= ring
->adev
;
85 if (ring
== &adev
->vce
.ring
[0])
86 return RREG32(mmVCE_RB_WPTR
);
88 return RREG32(mmVCE_RB_WPTR2
);
92 * vce_v3_0_ring_set_wptr - set write pointer
94 * @ring: amdgpu_ring pointer
96 * Commits the write pointer to the hardware
98 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring
*ring
)
100 struct amdgpu_device
*adev
= ring
->adev
;
102 if (ring
== &adev
->vce
.ring
[0])
103 WREG32(mmVCE_RB_WPTR
, ring
->wptr
);
105 WREG32(mmVCE_RB_WPTR2
, ring
->wptr
);
108 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device
*adev
, bool override
)
112 tmp
= data
= RREG32(mmVCE_RB_ARB_CTRL
);
114 data
|= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK
;
116 data
&= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK
;
119 WREG32(mmVCE_RB_ARB_CTRL
, data
);
122 static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device
*adev
,
126 /* Set Override to disable Clock Gating */
127 vce_v3_0_override_vce_clock_gating(adev
, true);
130 /* Force CLOCK ON for VCE_CLOCK_GATING_B,
131 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
132 * VREG can be FORCE ON or set to Dynamic, but can't be OFF
134 tmp
= data
= RREG32(mmVCE_CLOCK_GATING_B
);
138 WREG32(mmVCE_CLOCK_GATING_B
, data
);
140 /* Force CLOCK ON for VCE_UENC_CLOCK_GATING,
141 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
143 tmp
= data
= RREG32(mmVCE_UENC_CLOCK_GATING
);
147 WREG32(mmVCE_UENC_CLOCK_GATING
, data
);
149 /* set VCE_UENC_CLOCK_GATING_2 */
150 tmp
= data
= RREG32(mmVCE_UENC_CLOCK_GATING_2
);
154 WREG32(mmVCE_UENC_CLOCK_GATING_2
, data
);
156 /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */
157 tmp
= data
= RREG32(mmVCE_UENC_REG_CLOCK_GATING
);
160 WREG32(mmVCE_UENC_REG_CLOCK_GATING
, data
);
162 /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */
163 tmp
= data
= RREG32(mmVCE_UENC_DMA_DCLK_CTRL
);
164 data
|= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK
|
165 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK
|
166 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK
|
169 WREG32(mmVCE_UENC_DMA_DCLK_CTRL
, data
);
171 /* Force CLOCK OFF for VCE_CLOCK_GATING_B,
172 * {*, *_FORCE_OFF} = {*, 1}
173 * set VREG to Dynamic, as it can't be OFF
175 tmp
= data
= RREG32(mmVCE_CLOCK_GATING_B
);
179 WREG32(mmVCE_CLOCK_GATING_B
, data
);
180 /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING,
181 * Force ClOCK OFF takes precedent over Force CLOCK ON setting.
182 * {*_FORCE_ON, *_FORCE_OFF} = {*, 1}
184 tmp
= data
= RREG32(mmVCE_UENC_CLOCK_GATING
);
187 WREG32(mmVCE_UENC_CLOCK_GATING
, data
);
188 /* Set VCE_UENC_CLOCK_GATING_2 */
189 tmp
= data
= RREG32(mmVCE_UENC_CLOCK_GATING_2
);
192 WREG32(mmVCE_UENC_CLOCK_GATING_2
, data
);
193 /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */
194 tmp
= data
= RREG32(mmVCE_UENC_REG_CLOCK_GATING
);
197 WREG32(mmVCE_UENC_REG_CLOCK_GATING
, data
);
198 /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */
199 tmp
= data
= RREG32(mmVCE_UENC_DMA_DCLK_CTRL
);
200 data
&= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK
|
201 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK
|
202 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK
|
205 WREG32(mmVCE_UENC_DMA_DCLK_CTRL
, data
);
207 vce_v3_0_override_vce_clock_gating(adev
, false);
210 static int vce_v3_0_firmware_loaded(struct amdgpu_device
*adev
)
214 for (i
= 0; i
< 10; ++i
) {
215 for (j
= 0; j
< 100; ++j
) {
216 uint32_t status
= RREG32(mmVCE_STATUS
);
218 if (status
& VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK
)
223 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
224 WREG32_P(mmVCE_SOFT_RESET
,
225 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
,
226 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
228 WREG32_P(mmVCE_SOFT_RESET
, 0,
229 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
237 * vce_v3_0_start - start VCE block
239 * @adev: amdgpu_device pointer
241 * Setup and start the VCE block
243 static int vce_v3_0_start(struct amdgpu_device
*adev
)
245 struct amdgpu_ring
*ring
;
248 ring
= &adev
->vce
.ring
[0];
249 WREG32(mmVCE_RB_RPTR
, ring
->wptr
);
250 WREG32(mmVCE_RB_WPTR
, ring
->wptr
);
251 WREG32(mmVCE_RB_BASE_LO
, ring
->gpu_addr
);
252 WREG32(mmVCE_RB_BASE_HI
, upper_32_bits(ring
->gpu_addr
));
253 WREG32(mmVCE_RB_SIZE
, ring
->ring_size
/ 4);
255 ring
= &adev
->vce
.ring
[1];
256 WREG32(mmVCE_RB_RPTR2
, ring
->wptr
);
257 WREG32(mmVCE_RB_WPTR2
, ring
->wptr
);
258 WREG32(mmVCE_RB_BASE_LO2
, ring
->gpu_addr
);
259 WREG32(mmVCE_RB_BASE_HI2
, upper_32_bits(ring
->gpu_addr
));
260 WREG32(mmVCE_RB_SIZE2
, ring
->ring_size
/ 4);
262 mutex_lock(&adev
->grbm_idx_mutex
);
263 for (idx
= 0; idx
< 2; ++idx
) {
264 if (adev
->vce
.harvest_config
& (1 << idx
))
268 WREG32_P(mmGRBM_GFX_INDEX
, 0,
269 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
271 WREG32_P(mmGRBM_GFX_INDEX
,
272 GRBM_GFX_INDEX__VCE_INSTANCE_MASK
,
273 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
275 vce_v3_0_mc_resume(adev
, idx
);
277 WREG32_P(mmVCE_STATUS
, VCE_STATUS__JOB_BUSY_MASK
,
278 ~VCE_STATUS__JOB_BUSY_MASK
);
280 if (adev
->asic_type
>= CHIP_STONEY
)
281 WREG32_P(mmVCE_VCPU_CNTL
, 1, ~0x200001);
283 WREG32_P(mmVCE_VCPU_CNTL
, VCE_VCPU_CNTL__CLK_EN_MASK
,
284 ~VCE_VCPU_CNTL__CLK_EN_MASK
);
286 WREG32_P(mmVCE_SOFT_RESET
, 0,
287 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
291 r
= vce_v3_0_firmware_loaded(adev
);
293 /* clear BUSY flag */
294 WREG32_P(mmVCE_STATUS
, 0, ~VCE_STATUS__JOB_BUSY_MASK
);
296 /* Set Clock-Gating off */
297 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCE_MGCG
)
298 vce_v3_0_set_vce_sw_clock_gating(adev
, false);
301 DRM_ERROR("VCE not responding, giving up!!!\n");
302 mutex_unlock(&adev
->grbm_idx_mutex
);
307 WREG32_P(mmGRBM_GFX_INDEX
, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
308 mutex_unlock(&adev
->grbm_idx_mutex
);
313 static int vce_v3_0_stop(struct amdgpu_device
*adev
)
317 mutex_lock(&adev
->grbm_idx_mutex
);
318 for (idx
= 0; idx
< 2; ++idx
) {
319 if (adev
->vce
.harvest_config
& (1 << idx
))
323 WREG32_P(mmGRBM_GFX_INDEX
, 0,
324 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
326 WREG32_P(mmGRBM_GFX_INDEX
,
327 GRBM_GFX_INDEX__VCE_INSTANCE_MASK
,
328 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
330 if (adev
->asic_type
>= CHIP_STONEY
)
331 WREG32_P(mmVCE_VCPU_CNTL
, 0, ~0x200001);
333 WREG32_P(mmVCE_VCPU_CNTL
, 0,
334 ~VCE_VCPU_CNTL__CLK_EN_MASK
);
336 WREG32_P(mmVCE_SOFT_RESET
,
337 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
,
338 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
340 /* clear BUSY flag */
341 WREG32_P(mmVCE_STATUS
, 0, ~VCE_STATUS__JOB_BUSY_MASK
);
343 /* Set Clock-Gating off */
344 if (adev
->cg_flags
& AMD_CG_SUPPORT_VCE_MGCG
)
345 vce_v3_0_set_vce_sw_clock_gating(adev
, false);
348 WREG32_P(mmGRBM_GFX_INDEX
, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
349 mutex_unlock(&adev
->grbm_idx_mutex
);
354 #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
355 #define VCE_HARVEST_FUSE_MACRO__SHIFT 27
356 #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
358 static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device
*adev
)
362 /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
363 if ((adev
->asic_type
== CHIP_FIJI
) ||
364 (adev
->asic_type
== CHIP_STONEY
) ||
365 (adev
->asic_type
== CHIP_POLARIS10
) ||
366 (adev
->asic_type
== CHIP_POLARIS11
))
367 return AMDGPU_VCE_HARVEST_VCE1
;
369 /* Tonga and CZ are dual or single pipe */
370 if (adev
->flags
& AMD_IS_APU
)
371 tmp
= (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS
) &
372 VCE_HARVEST_FUSE_MACRO__MASK
) >>
373 VCE_HARVEST_FUSE_MACRO__SHIFT
;
375 tmp
= (RREG32_SMC(ixCC_HARVEST_FUSES
) &
376 CC_HARVEST_FUSES__VCE_DISABLE_MASK
) >>
377 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT
;
381 return AMDGPU_VCE_HARVEST_VCE0
;
383 return AMDGPU_VCE_HARVEST_VCE1
;
385 return AMDGPU_VCE_HARVEST_VCE0
| AMDGPU_VCE_HARVEST_VCE1
;
391 static int vce_v3_0_early_init(void *handle
)
393 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
395 adev
->vce
.harvest_config
= vce_v3_0_get_harvest_config(adev
);
397 if ((adev
->vce
.harvest_config
&
398 (AMDGPU_VCE_HARVEST_VCE0
| AMDGPU_VCE_HARVEST_VCE1
)) ==
399 (AMDGPU_VCE_HARVEST_VCE0
| AMDGPU_VCE_HARVEST_VCE1
))
402 vce_v3_0_set_ring_funcs(adev
);
403 vce_v3_0_set_irq_funcs(adev
);
408 static int vce_v3_0_sw_init(void *handle
)
410 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
411 struct amdgpu_ring
*ring
;
415 r
= amdgpu_irq_add_id(adev
, 167, &adev
->vce
.irq
);
419 r
= amdgpu_vce_sw_init(adev
, VCE_V3_0_FW_SIZE
+
420 (VCE_V3_0_STACK_SIZE
+ VCE_V3_0_DATA_SIZE
) * 2);
424 r
= amdgpu_vce_resume(adev
);
428 ring
= &adev
->vce
.ring
[0];
429 sprintf(ring
->name
, "vce0");
430 r
= amdgpu_ring_init(adev
, ring
, 512, VCE_CMD_NO_OP
, 0xf,
431 &adev
->vce
.irq
, 0, AMDGPU_RING_TYPE_VCE
);
435 ring
= &adev
->vce
.ring
[1];
436 sprintf(ring
->name
, "vce1");
437 r
= amdgpu_ring_init(adev
, ring
, 512, VCE_CMD_NO_OP
, 0xf,
438 &adev
->vce
.irq
, 0, AMDGPU_RING_TYPE_VCE
);
445 static int vce_v3_0_sw_fini(void *handle
)
448 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
450 r
= amdgpu_vce_suspend(adev
);
454 r
= amdgpu_vce_sw_fini(adev
);
461 static int vce_v3_0_hw_init(void *handle
)
464 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
466 r
= vce_v3_0_start(adev
);
470 adev
->vce
.ring
[0].ready
= false;
471 adev
->vce
.ring
[1].ready
= false;
473 for (i
= 0; i
< 2; i
++) {
474 r
= amdgpu_ring_test_ring(&adev
->vce
.ring
[i
]);
478 adev
->vce
.ring
[i
].ready
= true;
481 DRM_INFO("VCE initialized successfully.\n");
486 static int vce_v3_0_hw_fini(void *handle
)
489 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
491 r
= vce_v3_0_wait_for_idle(handle
);
495 return vce_v3_0_stop(adev
);
498 static int vce_v3_0_suspend(void *handle
)
501 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
503 r
= vce_v3_0_hw_fini(adev
);
507 r
= amdgpu_vce_suspend(adev
);
514 static int vce_v3_0_resume(void *handle
)
517 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
519 r
= amdgpu_vce_resume(adev
);
523 r
= vce_v3_0_hw_init(adev
);
530 static void vce_v3_0_mc_resume(struct amdgpu_device
*adev
, int idx
)
532 uint32_t offset
, size
;
534 WREG32_P(mmVCE_CLOCK_GATING_A
, 0, ~(1 << 16));
535 WREG32_P(mmVCE_UENC_CLOCK_GATING
, 0x1FF000, ~0xFF9FF000);
536 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING
, 0x3F, ~0x3F);
537 WREG32(mmVCE_CLOCK_GATING_B
, 0xf7);
539 WREG32(mmVCE_LMI_CTRL
, 0x00398000);
540 WREG32_P(mmVCE_LMI_CACHE_CTRL
, 0x0, ~0x1);
541 WREG32(mmVCE_LMI_SWAP_CNTL
, 0);
542 WREG32(mmVCE_LMI_SWAP_CNTL1
, 0);
543 WREG32(mmVCE_LMI_VM_CTRL
, 0);
544 if (adev
->asic_type
>= CHIP_STONEY
) {
545 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0
, (adev
->vce
.gpu_addr
>> 8));
546 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1
, (adev
->vce
.gpu_addr
>> 8));
547 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2
, (adev
->vce
.gpu_addr
>> 8));
549 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR
, (adev
->vce
.gpu_addr
>> 8));
550 offset
= AMDGPU_VCE_FIRMWARE_OFFSET
;
551 size
= VCE_V3_0_FW_SIZE
;
552 WREG32(mmVCE_VCPU_CACHE_OFFSET0
, offset
& 0x7fffffff);
553 WREG32(mmVCE_VCPU_CACHE_SIZE0
, size
);
557 size
= VCE_V3_0_STACK_SIZE
;
558 WREG32(mmVCE_VCPU_CACHE_OFFSET1
, offset
& 0x7fffffff);
559 WREG32(mmVCE_VCPU_CACHE_SIZE1
, size
);
561 size
= VCE_V3_0_DATA_SIZE
;
562 WREG32(mmVCE_VCPU_CACHE_OFFSET2
, offset
& 0x7fffffff);
563 WREG32(mmVCE_VCPU_CACHE_SIZE2
, size
);
565 offset
+= size
+ VCE_V3_0_STACK_SIZE
+ VCE_V3_0_DATA_SIZE
;
566 size
= VCE_V3_0_STACK_SIZE
;
567 WREG32(mmVCE_VCPU_CACHE_OFFSET1
, offset
& 0xfffffff);
568 WREG32(mmVCE_VCPU_CACHE_SIZE1
, size
);
570 size
= VCE_V3_0_DATA_SIZE
;
571 WREG32(mmVCE_VCPU_CACHE_OFFSET2
, offset
& 0xfffffff);
572 WREG32(mmVCE_VCPU_CACHE_SIZE2
, size
);
575 WREG32_P(mmVCE_LMI_CTRL2
, 0x0, ~0x100);
577 WREG32_P(mmVCE_SYS_INT_EN
, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
,
578 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
);
581 static bool vce_v3_0_is_idle(void *handle
)
583 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
586 mask
|= (adev
->vce
.harvest_config
& AMDGPU_VCE_HARVEST_VCE0
) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK
;
587 mask
|= (adev
->vce
.harvest_config
& AMDGPU_VCE_HARVEST_VCE1
) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK
;
589 return !(RREG32(mmSRBM_STATUS2
) & mask
);
592 static int vce_v3_0_wait_for_idle(void *handle
)
595 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
597 for (i
= 0; i
< adev
->usec_timeout
; i
++)
598 if (vce_v3_0_is_idle(handle
))
604 static int vce_v3_0_soft_reset(void *handle
)
606 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
609 mask
|= (adev
->vce
.harvest_config
& AMDGPU_VCE_HARVEST_VCE0
) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK
;
610 mask
|= (adev
->vce
.harvest_config
& AMDGPU_VCE_HARVEST_VCE1
) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK
;
612 WREG32_P(mmSRBM_SOFT_RESET
, mask
,
613 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK
|
614 SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK
));
617 return vce_v3_0_start(adev
);
620 static int vce_v3_0_set_interrupt_state(struct amdgpu_device
*adev
,
621 struct amdgpu_irq_src
*source
,
623 enum amdgpu_interrupt_state state
)
627 if (state
== AMDGPU_IRQ_STATE_ENABLE
)
628 val
|= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
;
630 WREG32_P(mmVCE_SYS_INT_EN
, val
, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
);
634 static int vce_v3_0_process_interrupt(struct amdgpu_device
*adev
,
635 struct amdgpu_irq_src
*source
,
636 struct amdgpu_iv_entry
*entry
)
638 DRM_DEBUG("IH: VCE\n");
640 WREG32_P(mmVCE_SYS_INT_STATUS
,
641 VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK
,
642 ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK
);
644 switch (entry
->src_data
) {
647 amdgpu_fence_process(&adev
->vce
.ring
[entry
->src_data
]);
650 DRM_ERROR("Unhandled interrupt: %d %d\n",
651 entry
->src_id
, entry
->src_data
);
658 static void vce_v3_set_bypass_mode(struct amdgpu_device
*adev
, bool enable
)
660 u32 tmp
= RREG32_SMC(ixGCK_DFS_BYPASS_CNTL
);
663 tmp
|= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK
;
665 tmp
&= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK
;
667 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL
, tmp
);
670 static int vce_v3_0_set_clockgating_state(void *handle
,
671 enum amd_clockgating_state state
)
673 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
674 bool enable
= (state
== AMD_CG_STATE_GATE
) ? true : false;
677 if (adev
->asic_type
== CHIP_POLARIS10
)
678 vce_v3_set_bypass_mode(adev
, enable
);
680 if (!(adev
->cg_flags
& AMD_CG_SUPPORT_VCE_MGCG
))
683 mutex_lock(&adev
->grbm_idx_mutex
);
684 for (i
= 0; i
< 2; i
++) {
685 /* Program VCE Instance 0 or 1 if not harvested */
686 if (adev
->vce
.harvest_config
& (1 << i
))
690 WREG32_P(mmGRBM_GFX_INDEX
, 0,
691 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
693 WREG32_P(mmGRBM_GFX_INDEX
,
694 GRBM_GFX_INDEX__VCE_INSTANCE_MASK
,
695 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
698 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
699 uint32_t data
= RREG32(mmVCE_CLOCK_GATING_A
);
700 data
&= ~(0xf | 0xff0);
701 data
|= ((0x0 << 0) | (0x04 << 4));
702 WREG32(mmVCE_CLOCK_GATING_A
, data
);
704 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
705 data
= RREG32(mmVCE_UENC_CLOCK_GATING
);
706 data
&= ~(0xf | 0xff0);
707 data
|= ((0x0 << 0) | (0x04 << 4));
708 WREG32(mmVCE_UENC_CLOCK_GATING
, data
);
711 vce_v3_0_set_vce_sw_clock_gating(adev
, enable
);
714 WREG32_P(mmGRBM_GFX_INDEX
, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK
);
715 mutex_unlock(&adev
->grbm_idx_mutex
);
720 static int vce_v3_0_set_powergating_state(void *handle
,
721 enum amd_powergating_state state
)
723 /* This doesn't actually powergate the VCE block.
724 * That's done in the dpm code via the SMC. This
725 * just re-inits the block as necessary. The actual
726 * gating still happens in the dpm code. We should
727 * revisit this when there is a cleaner line between
728 * the smc and the hw blocks
730 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
732 if (!(adev
->pg_flags
& AMD_PG_SUPPORT_VCE
))
735 if (state
== AMD_PG_STATE_GATE
)
736 /* XXX do we need a vce_v3_0_stop()? */
739 return vce_v3_0_start(adev
);
742 const struct amd_ip_funcs vce_v3_0_ip_funcs
= {
744 .early_init
= vce_v3_0_early_init
,
746 .sw_init
= vce_v3_0_sw_init
,
747 .sw_fini
= vce_v3_0_sw_fini
,
748 .hw_init
= vce_v3_0_hw_init
,
749 .hw_fini
= vce_v3_0_hw_fini
,
750 .suspend
= vce_v3_0_suspend
,
751 .resume
= vce_v3_0_resume
,
752 .is_idle
= vce_v3_0_is_idle
,
753 .wait_for_idle
= vce_v3_0_wait_for_idle
,
754 .soft_reset
= vce_v3_0_soft_reset
,
755 .set_clockgating_state
= vce_v3_0_set_clockgating_state
,
756 .set_powergating_state
= vce_v3_0_set_powergating_state
,
759 static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs
= {
760 .get_rptr
= vce_v3_0_ring_get_rptr
,
761 .get_wptr
= vce_v3_0_ring_get_wptr
,
762 .set_wptr
= vce_v3_0_ring_set_wptr
,
763 .parse_cs
= amdgpu_vce_ring_parse_cs
,
764 .emit_ib
= amdgpu_vce_ring_emit_ib
,
765 .emit_fence
= amdgpu_vce_ring_emit_fence
,
766 .test_ring
= amdgpu_vce_ring_test_ring
,
767 .test_ib
= amdgpu_vce_ring_test_ib
,
768 .insert_nop
= amdgpu_ring_insert_nop
,
769 .pad_ib
= amdgpu_ring_generic_pad_ib
,
770 .begin_use
= amdgpu_vce_ring_begin_use
,
771 .end_use
= amdgpu_vce_ring_end_use
,
774 static void vce_v3_0_set_ring_funcs(struct amdgpu_device
*adev
)
776 adev
->vce
.ring
[0].funcs
= &vce_v3_0_ring_funcs
;
777 adev
->vce
.ring
[1].funcs
= &vce_v3_0_ring_funcs
;
780 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs
= {
781 .set
= vce_v3_0_set_interrupt_state
,
782 .process
= vce_v3_0_process_interrupt
,
785 static void vce_v3_0_set_irq_funcs(struct amdgpu_device
*adev
)
787 adev
->vce
.irq
.num_types
= 1;
788 adev
->vce
.irq
.funcs
= &vce_v3_0_irq_funcs
;