Merge branch 'fix/rt5645' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / vce_v3_0.c
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * Authors: Christian König <christian.koenig@amd.com>
26 */
27
28 #include <linux/firmware.h>
29 #include <drm/drmP.h>
30 #include "amdgpu.h"
31 #include "amdgpu_vce.h"
32 #include "vid.h"
33 #include "vce/vce_3_0_d.h"
34 #include "vce/vce_3_0_sh_mask.h"
35 #include "oss/oss_2_0_d.h"
36 #include "oss/oss_2_0_sh_mask.h"
37 #include "gca/gfx_8_0_d.h"
38 #include "smu/smu_7_1_2_d.h"
39 #include "smu/smu_7_1_2_sh_mask.h"
40
41 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
42 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
43
44 #define VCE_V3_0_FW_SIZE (384 * 1024)
45 #define VCE_V3_0_STACK_SIZE (64 * 1024)
46 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
47
48 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
49 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
50 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
51
52 /**
53 * vce_v3_0_ring_get_rptr - get read pointer
54 *
55 * @ring: amdgpu_ring pointer
56 *
57 * Returns the current hardware read pointer
58 */
59 static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
60 {
61 struct amdgpu_device *adev = ring->adev;
62
63 if (ring == &adev->vce.ring[0])
64 return RREG32(mmVCE_RB_RPTR);
65 else
66 return RREG32(mmVCE_RB_RPTR2);
67 }
68
69 /**
70 * vce_v3_0_ring_get_wptr - get write pointer
71 *
72 * @ring: amdgpu_ring pointer
73 *
74 * Returns the current hardware write pointer
75 */
76 static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
77 {
78 struct amdgpu_device *adev = ring->adev;
79
80 if (ring == &adev->vce.ring[0])
81 return RREG32(mmVCE_RB_WPTR);
82 else
83 return RREG32(mmVCE_RB_WPTR2);
84 }
85
86 /**
87 * vce_v3_0_ring_set_wptr - set write pointer
88 *
89 * @ring: amdgpu_ring pointer
90 *
91 * Commits the write pointer to the hardware
92 */
93 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
94 {
95 struct amdgpu_device *adev = ring->adev;
96
97 if (ring == &adev->vce.ring[0])
98 WREG32(mmVCE_RB_WPTR, ring->wptr);
99 else
100 WREG32(mmVCE_RB_WPTR2, ring->wptr);
101 }
102
103 /**
104 * vce_v3_0_start - start VCE block
105 *
106 * @adev: amdgpu_device pointer
107 *
108 * Setup and start the VCE block
109 */
110 static int vce_v3_0_start(struct amdgpu_device *adev)
111 {
112 struct amdgpu_ring *ring;
113 int idx, i, j, r;
114
115 mutex_lock(&adev->grbm_idx_mutex);
116 for (idx = 0; idx < 2; ++idx) {
117
118 if (adev->vce.harvest_config & (1 << idx))
119 continue;
120
121 if(idx == 0)
122 WREG32_P(mmGRBM_GFX_INDEX, 0,
123 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
124 else
125 WREG32_P(mmGRBM_GFX_INDEX,
126 GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
127 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
128
129 vce_v3_0_mc_resume(adev, idx);
130
131 /* set BUSY flag */
132 WREG32_P(mmVCE_STATUS, 1, ~1);
133
134 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
135 ~VCE_VCPU_CNTL__CLK_EN_MASK);
136
137 WREG32_P(mmVCE_SOFT_RESET,
138 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
139 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
140
141 mdelay(100);
142
143 WREG32_P(mmVCE_SOFT_RESET, 0,
144 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
145
146 for (i = 0; i < 10; ++i) {
147 uint32_t status;
148 for (j = 0; j < 100; ++j) {
149 status = RREG32(mmVCE_STATUS);
150 if (status & 2)
151 break;
152 mdelay(10);
153 }
154 r = 0;
155 if (status & 2)
156 break;
157
158 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
159 WREG32_P(mmVCE_SOFT_RESET,
160 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
161 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
162 mdelay(10);
163 WREG32_P(mmVCE_SOFT_RESET, 0,
164 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
165 mdelay(10);
166 r = -1;
167 }
168
169 /* clear BUSY flag */
170 WREG32_P(mmVCE_STATUS, 0, ~1);
171
172 if (r) {
173 DRM_ERROR("VCE not responding, giving up!!!\n");
174 mutex_unlock(&adev->grbm_idx_mutex);
175 return r;
176 }
177 }
178
179 WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
180 mutex_unlock(&adev->grbm_idx_mutex);
181
182 ring = &adev->vce.ring[0];
183 WREG32(mmVCE_RB_RPTR, ring->wptr);
184 WREG32(mmVCE_RB_WPTR, ring->wptr);
185 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
186 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
187 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
188
189 ring = &adev->vce.ring[1];
190 WREG32(mmVCE_RB_RPTR2, ring->wptr);
191 WREG32(mmVCE_RB_WPTR2, ring->wptr);
192 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
193 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
194 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
195
196 return 0;
197 }
198
199 #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
200 #define VCE_HARVEST_FUSE_MACRO__SHIFT 27
201 #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
202
203 static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
204 {
205 u32 tmp;
206 unsigned ret;
207
208 if (adev->flags & AMDGPU_IS_APU)
209 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
210 VCE_HARVEST_FUSE_MACRO__MASK) >>
211 VCE_HARVEST_FUSE_MACRO__SHIFT;
212 else
213 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
214 CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
215 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
216
217 switch (tmp) {
218 case 1:
219 ret = AMDGPU_VCE_HARVEST_VCE0;
220 break;
221 case 2:
222 ret = AMDGPU_VCE_HARVEST_VCE1;
223 break;
224 case 3:
225 ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
226 break;
227 default:
228 ret = 0;
229 }
230
231 return ret;
232 }
233
234 static int vce_v3_0_early_init(void *handle)
235 {
236 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
237
238 adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
239
240 if ((adev->vce.harvest_config &
241 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
242 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
243 return -ENOENT;
244
245 vce_v3_0_set_ring_funcs(adev);
246 vce_v3_0_set_irq_funcs(adev);
247
248 return 0;
249 }
250
251 static int vce_v3_0_sw_init(void *handle)
252 {
253 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
254 struct amdgpu_ring *ring;
255 int r;
256
257 /* VCE */
258 r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
259 if (r)
260 return r;
261
262 r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
263 (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
264 if (r)
265 return r;
266
267 r = amdgpu_vce_resume(adev);
268 if (r)
269 return r;
270
271 ring = &adev->vce.ring[0];
272 sprintf(ring->name, "vce0");
273 r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
274 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
275 if (r)
276 return r;
277
278 ring = &adev->vce.ring[1];
279 sprintf(ring->name, "vce1");
280 r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
281 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
282 if (r)
283 return r;
284
285 return r;
286 }
287
288 static int vce_v3_0_sw_fini(void *handle)
289 {
290 int r;
291 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
292
293 r = amdgpu_vce_suspend(adev);
294 if (r)
295 return r;
296
297 r = amdgpu_vce_sw_fini(adev);
298 if (r)
299 return r;
300
301 return r;
302 }
303
304 static int vce_v3_0_hw_init(void *handle)
305 {
306 struct amdgpu_ring *ring;
307 int r;
308 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
309
310 r = vce_v3_0_start(adev);
311 if (r)
312 return r;
313
314 ring = &adev->vce.ring[0];
315 ring->ready = true;
316 r = amdgpu_ring_test_ring(ring);
317 if (r) {
318 ring->ready = false;
319 return r;
320 }
321
322 ring = &adev->vce.ring[1];
323 ring->ready = true;
324 r = amdgpu_ring_test_ring(ring);
325 if (r) {
326 ring->ready = false;
327 return r;
328 }
329
330 DRM_INFO("VCE initialized successfully.\n");
331
332 return 0;
333 }
334
335 static int vce_v3_0_hw_fini(void *handle)
336 {
337 return 0;
338 }
339
340 static int vce_v3_0_suspend(void *handle)
341 {
342 int r;
343 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
344
345 r = vce_v3_0_hw_fini(adev);
346 if (r)
347 return r;
348
349 r = amdgpu_vce_suspend(adev);
350 if (r)
351 return r;
352
353 return r;
354 }
355
356 static int vce_v3_0_resume(void *handle)
357 {
358 int r;
359 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
360
361 r = amdgpu_vce_resume(adev);
362 if (r)
363 return r;
364
365 r = vce_v3_0_hw_init(adev);
366 if (r)
367 return r;
368
369 return r;
370 }
371
372 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
373 {
374 uint32_t offset, size;
375
376 WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
377 WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
378 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
379 WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
380
381 WREG32(mmVCE_LMI_CTRL, 0x00398000);
382 WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
383 WREG32(mmVCE_LMI_SWAP_CNTL, 0);
384 WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
385 WREG32(mmVCE_LMI_VM_CTRL, 0);
386
387 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
388 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
389 size = VCE_V3_0_FW_SIZE;
390 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
391 WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
392
393 if (idx == 0) {
394 offset += size;
395 size = VCE_V3_0_STACK_SIZE;
396 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
397 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
398 offset += size;
399 size = VCE_V3_0_DATA_SIZE;
400 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
401 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
402 } else {
403 offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
404 size = VCE_V3_0_STACK_SIZE;
405 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
406 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
407 offset += size;
408 size = VCE_V3_0_DATA_SIZE;
409 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
410 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
411 }
412
413 WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
414
415 WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
416 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
417 }
418
419 static bool vce_v3_0_is_idle(void *handle)
420 {
421 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
422
423 return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
424 }
425
426 static int vce_v3_0_wait_for_idle(void *handle)
427 {
428 unsigned i;
429 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
430
431 for (i = 0; i < adev->usec_timeout; i++) {
432 if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
433 return 0;
434 }
435 return -ETIMEDOUT;
436 }
437
438 static int vce_v3_0_soft_reset(void *handle)
439 {
440 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
441
442 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
443 ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
444 mdelay(5);
445
446 return vce_v3_0_start(adev);
447 }
448
449 static void vce_v3_0_print_status(void *handle)
450 {
451 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
452
453 dev_info(adev->dev, "VCE 3.0 registers\n");
454 dev_info(adev->dev, " VCE_STATUS=0x%08X\n",
455 RREG32(mmVCE_STATUS));
456 dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n",
457 RREG32(mmVCE_VCPU_CNTL));
458 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
459 RREG32(mmVCE_VCPU_CACHE_OFFSET0));
460 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n",
461 RREG32(mmVCE_VCPU_CACHE_SIZE0));
462 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
463 RREG32(mmVCE_VCPU_CACHE_OFFSET1));
464 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n",
465 RREG32(mmVCE_VCPU_CACHE_SIZE1));
466 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
467 RREG32(mmVCE_VCPU_CACHE_OFFSET2));
468 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n",
469 RREG32(mmVCE_VCPU_CACHE_SIZE2));
470 dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n",
471 RREG32(mmVCE_SOFT_RESET));
472 dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n",
473 RREG32(mmVCE_RB_BASE_LO2));
474 dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n",
475 RREG32(mmVCE_RB_BASE_HI2));
476 dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n",
477 RREG32(mmVCE_RB_SIZE2));
478 dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n",
479 RREG32(mmVCE_RB_RPTR2));
480 dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n",
481 RREG32(mmVCE_RB_WPTR2));
482 dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n",
483 RREG32(mmVCE_RB_BASE_LO));
484 dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n",
485 RREG32(mmVCE_RB_BASE_HI));
486 dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n",
487 RREG32(mmVCE_RB_SIZE));
488 dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n",
489 RREG32(mmVCE_RB_RPTR));
490 dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n",
491 RREG32(mmVCE_RB_WPTR));
492 dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n",
493 RREG32(mmVCE_CLOCK_GATING_A));
494 dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n",
495 RREG32(mmVCE_CLOCK_GATING_B));
496 dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n",
497 RREG32(mmVCE_UENC_CLOCK_GATING));
498 dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
499 RREG32(mmVCE_UENC_REG_CLOCK_GATING));
500 dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n",
501 RREG32(mmVCE_SYS_INT_EN));
502 dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n",
503 RREG32(mmVCE_LMI_CTRL2));
504 dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n",
505 RREG32(mmVCE_LMI_CTRL));
506 dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n",
507 RREG32(mmVCE_LMI_VM_CTRL));
508 dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n",
509 RREG32(mmVCE_LMI_SWAP_CNTL));
510 dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n",
511 RREG32(mmVCE_LMI_SWAP_CNTL1));
512 dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n",
513 RREG32(mmVCE_LMI_CACHE_CTRL));
514 }
515
516 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
517 struct amdgpu_irq_src *source,
518 unsigned type,
519 enum amdgpu_interrupt_state state)
520 {
521 uint32_t val = 0;
522
523 if (state == AMDGPU_IRQ_STATE_ENABLE)
524 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
525
526 WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
527 return 0;
528 }
529
530 static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
531 struct amdgpu_irq_src *source,
532 struct amdgpu_iv_entry *entry)
533 {
534 DRM_DEBUG("IH: VCE\n");
535 switch (entry->src_data) {
536 case 0:
537 amdgpu_fence_process(&adev->vce.ring[0]);
538 break;
539 case 1:
540 amdgpu_fence_process(&adev->vce.ring[1]);
541 break;
542 default:
543 DRM_ERROR("Unhandled interrupt: %d %d\n",
544 entry->src_id, entry->src_data);
545 break;
546 }
547
548 return 0;
549 }
550
551 static int vce_v3_0_set_clockgating_state(void *handle,
552 enum amd_clockgating_state state)
553 {
554 return 0;
555 }
556
557 static int vce_v3_0_set_powergating_state(void *handle,
558 enum amd_powergating_state state)
559 {
560 /* This doesn't actually powergate the VCE block.
561 * That's done in the dpm code via the SMC. This
562 * just re-inits the block as necessary. The actual
563 * gating still happens in the dpm code. We should
564 * revisit this when there is a cleaner line between
565 * the smc and the hw blocks
566 */
567 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
568
569 if (state == AMD_PG_STATE_GATE)
570 /* XXX do we need a vce_v3_0_stop()? */
571 return 0;
572 else
573 return vce_v3_0_start(adev);
574 }
575
576 const struct amd_ip_funcs vce_v3_0_ip_funcs = {
577 .early_init = vce_v3_0_early_init,
578 .late_init = NULL,
579 .sw_init = vce_v3_0_sw_init,
580 .sw_fini = vce_v3_0_sw_fini,
581 .hw_init = vce_v3_0_hw_init,
582 .hw_fini = vce_v3_0_hw_fini,
583 .suspend = vce_v3_0_suspend,
584 .resume = vce_v3_0_resume,
585 .is_idle = vce_v3_0_is_idle,
586 .wait_for_idle = vce_v3_0_wait_for_idle,
587 .soft_reset = vce_v3_0_soft_reset,
588 .print_status = vce_v3_0_print_status,
589 .set_clockgating_state = vce_v3_0_set_clockgating_state,
590 .set_powergating_state = vce_v3_0_set_powergating_state,
591 };
592
593 static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
594 .get_rptr = vce_v3_0_ring_get_rptr,
595 .get_wptr = vce_v3_0_ring_get_wptr,
596 .set_wptr = vce_v3_0_ring_set_wptr,
597 .parse_cs = amdgpu_vce_ring_parse_cs,
598 .emit_ib = amdgpu_vce_ring_emit_ib,
599 .emit_fence = amdgpu_vce_ring_emit_fence,
600 .emit_semaphore = amdgpu_vce_ring_emit_semaphore,
601 .test_ring = amdgpu_vce_ring_test_ring,
602 .test_ib = amdgpu_vce_ring_test_ib,
603 .is_lockup = amdgpu_ring_test_lockup,
604 };
605
606 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
607 {
608 adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
609 adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
610 }
611
612 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
613 .set = vce_v3_0_set_interrupt_state,
614 .process = vce_v3_0_process_interrupt,
615 };
616
617 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
618 {
619 adev->vce.irq.num_types = 1;
620 adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
621 };
This page took 0.050195 seconds and 5 git commands to generate.