2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
25 * Authors: Christian König <christian.koenig@amd.com>
28 #include <linux/firmware.h>
29 #include <linux/module.h>
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT_MS 1000
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI "radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
49 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
53 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
54 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
56 #ifdef CONFIG_DRM_AMDGPU_CIK
57 MODULE_FIRMWARE(FIRMWARE_BONAIRE
);
58 MODULE_FIRMWARE(FIRMWARE_KABINI
);
59 MODULE_FIRMWARE(FIRMWARE_KAVERI
);
60 MODULE_FIRMWARE(FIRMWARE_HAWAII
);
61 MODULE_FIRMWARE(FIRMWARE_MULLINS
);
63 MODULE_FIRMWARE(FIRMWARE_TONGA
);
64 MODULE_FIRMWARE(FIRMWARE_CARRIZO
);
65 MODULE_FIRMWARE(FIRMWARE_FIJI
);
66 MODULE_FIRMWARE(FIRMWARE_STONEY
);
67 MODULE_FIRMWARE(FIRMWARE_POLARIS10
);
68 MODULE_FIRMWARE(FIRMWARE_POLARIS11
);
70 static void amdgpu_vce_idle_work_handler(struct work_struct
*work
);
73 * amdgpu_vce_init - allocate memory, load vce firmware
75 * @adev: amdgpu_device pointer
77 * First step to get VCE online, allocate memory and load the firmware
79 int amdgpu_vce_sw_init(struct amdgpu_device
*adev
, unsigned long size
)
81 struct amdgpu_ring
*ring
;
82 struct amd_sched_rq
*rq
;
84 const struct common_firmware_header
*hdr
;
85 unsigned ucode_version
, version_major
, version_minor
, binary_id
;
88 INIT_DELAYED_WORK(&adev
->vce
.idle_work
, amdgpu_vce_idle_work_handler
);
90 switch (adev
->asic_type
) {
91 #ifdef CONFIG_DRM_AMDGPU_CIK
93 fw_name
= FIRMWARE_BONAIRE
;
96 fw_name
= FIRMWARE_KAVERI
;
99 fw_name
= FIRMWARE_KABINI
;
102 fw_name
= FIRMWARE_HAWAII
;
105 fw_name
= FIRMWARE_MULLINS
;
109 fw_name
= FIRMWARE_TONGA
;
112 fw_name
= FIRMWARE_CARRIZO
;
115 fw_name
= FIRMWARE_FIJI
;
118 fw_name
= FIRMWARE_STONEY
;
121 fw_name
= FIRMWARE_POLARIS10
;
124 fw_name
= FIRMWARE_POLARIS11
;
131 r
= request_firmware(&adev
->vce
.fw
, fw_name
, adev
->dev
);
133 dev_err(adev
->dev
, "amdgpu_vce: Can't load firmware \"%s\"\n",
138 r
= amdgpu_ucode_validate(adev
->vce
.fw
);
140 dev_err(adev
->dev
, "amdgpu_vce: Can't validate firmware \"%s\"\n",
142 release_firmware(adev
->vce
.fw
);
147 hdr
= (const struct common_firmware_header
*)adev
->vce
.fw
->data
;
149 ucode_version
= le32_to_cpu(hdr
->ucode_version
);
150 version_major
= (ucode_version
>> 20) & 0xfff;
151 version_minor
= (ucode_version
>> 8) & 0xfff;
152 binary_id
= ucode_version
& 0xff;
153 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
154 version_major
, version_minor
, binary_id
);
155 adev
->vce
.fw_version
= ((version_major
<< 24) | (version_minor
<< 16) |
158 /* allocate firmware, stack and heap BO */
160 r
= amdgpu_bo_create(adev
, size
, PAGE_SIZE
, true,
161 AMDGPU_GEM_DOMAIN_VRAM
,
162 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
,
163 NULL
, NULL
, &adev
->vce
.vcpu_bo
);
165 dev_err(adev
->dev
, "(%d) failed to allocate VCE bo\n", r
);
169 r
= amdgpu_bo_reserve(adev
->vce
.vcpu_bo
, false);
171 amdgpu_bo_unref(&adev
->vce
.vcpu_bo
);
172 dev_err(adev
->dev
, "(%d) failed to reserve VCE bo\n", r
);
176 r
= amdgpu_bo_pin(adev
->vce
.vcpu_bo
, AMDGPU_GEM_DOMAIN_VRAM
,
177 &adev
->vce
.gpu_addr
);
178 amdgpu_bo_unreserve(adev
->vce
.vcpu_bo
);
180 amdgpu_bo_unref(&adev
->vce
.vcpu_bo
);
181 dev_err(adev
->dev
, "(%d) VCE bo pin failed\n", r
);
186 ring
= &adev
->vce
.ring
[0];
187 rq
= &ring
->sched
.sched_rq
[AMD_SCHED_PRIORITY_NORMAL
];
188 r
= amd_sched_entity_init(&ring
->sched
, &adev
->vce
.entity
,
189 rq
, amdgpu_sched_jobs
);
191 DRM_ERROR("Failed setting up VCE run queue.\n");
195 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
) {
196 atomic_set(&adev
->vce
.handles
[i
], 0);
197 adev
->vce
.filp
[i
] = NULL
;
204 * amdgpu_vce_fini - free memory
206 * @adev: amdgpu_device pointer
208 * Last step on VCE teardown, free firmware memory
210 int amdgpu_vce_sw_fini(struct amdgpu_device
*adev
)
212 if (adev
->vce
.vcpu_bo
== NULL
)
215 amd_sched_entity_fini(&adev
->vce
.ring
[0].sched
, &adev
->vce
.entity
);
217 amdgpu_bo_unref(&adev
->vce
.vcpu_bo
);
219 amdgpu_ring_fini(&adev
->vce
.ring
[0]);
220 amdgpu_ring_fini(&adev
->vce
.ring
[1]);
222 release_firmware(adev
->vce
.fw
);
228 * amdgpu_vce_suspend - unpin VCE fw memory
230 * @adev: amdgpu_device pointer
233 int amdgpu_vce_suspend(struct amdgpu_device
*adev
)
237 if (adev
->vce
.vcpu_bo
== NULL
)
240 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
)
241 if (atomic_read(&adev
->vce
.handles
[i
]))
244 if (i
== AMDGPU_MAX_VCE_HANDLES
)
247 /* TODO: suspending running encoding sessions isn't supported */
252 * amdgpu_vce_resume - pin VCE fw memory
254 * @adev: amdgpu_device pointer
257 int amdgpu_vce_resume(struct amdgpu_device
*adev
)
260 const struct common_firmware_header
*hdr
;
264 if (adev
->vce
.vcpu_bo
== NULL
)
267 r
= amdgpu_bo_reserve(adev
->vce
.vcpu_bo
, false);
269 dev_err(adev
->dev
, "(%d) failed to reserve VCE bo\n", r
);
273 r
= amdgpu_bo_kmap(adev
->vce
.vcpu_bo
, &cpu_addr
);
275 amdgpu_bo_unreserve(adev
->vce
.vcpu_bo
);
276 dev_err(adev
->dev
, "(%d) VCE map failed\n", r
);
280 hdr
= (const struct common_firmware_header
*)adev
->vce
.fw
->data
;
281 offset
= le32_to_cpu(hdr
->ucode_array_offset_bytes
);
282 memcpy(cpu_addr
, (adev
->vce
.fw
->data
) + offset
,
283 (adev
->vce
.fw
->size
) - offset
);
285 amdgpu_bo_kunmap(adev
->vce
.vcpu_bo
);
287 amdgpu_bo_unreserve(adev
->vce
.vcpu_bo
);
293 * amdgpu_vce_idle_work_handler - power off VCE
295 * @work: pointer to work structure
297 * power of VCE when it's not used any more
299 static void amdgpu_vce_idle_work_handler(struct work_struct
*work
)
301 struct amdgpu_device
*adev
=
302 container_of(work
, struct amdgpu_device
, vce
.idle_work
.work
);
304 if ((amdgpu_fence_count_emitted(&adev
->vce
.ring
[0]) == 0) &&
305 (amdgpu_fence_count_emitted(&adev
->vce
.ring
[1]) == 0)) {
306 if (adev
->pm
.dpm_enabled
) {
307 amdgpu_dpm_enable_vce(adev
, false);
309 amdgpu_asic_set_vce_clocks(adev
, 0, 0);
312 schedule_delayed_work(&adev
->vce
.idle_work
,
313 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS
));
318 * amdgpu_vce_note_usage - power up VCE
320 * @adev: amdgpu_device pointer
322 * Make sure VCE is powerd up when we want to use it
324 static void amdgpu_vce_note_usage(struct amdgpu_device
*adev
)
326 bool streams_changed
= false;
327 bool set_clocks
= !cancel_delayed_work_sync(&adev
->vce
.idle_work
);
328 set_clocks
&= schedule_delayed_work(&adev
->vce
.idle_work
,
329 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS
));
331 if (adev
->pm
.dpm_enabled
) {
332 /* XXX figure out if the streams changed */
333 streams_changed
= false;
336 if (set_clocks
|| streams_changed
) {
337 if (adev
->pm
.dpm_enabled
) {
338 amdgpu_dpm_enable_vce(adev
, true);
340 amdgpu_asic_set_vce_clocks(adev
, 53300, 40000);
346 * amdgpu_vce_free_handles - free still open VCE handles
348 * @adev: amdgpu_device pointer
349 * @filp: drm file pointer
351 * Close all VCE handles still open by this file pointer
353 void amdgpu_vce_free_handles(struct amdgpu_device
*adev
, struct drm_file
*filp
)
355 struct amdgpu_ring
*ring
= &adev
->vce
.ring
[0];
357 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
) {
358 uint32_t handle
= atomic_read(&adev
->vce
.handles
[i
]);
359 if (!handle
|| adev
->vce
.filp
[i
] != filp
)
362 amdgpu_vce_note_usage(adev
);
364 r
= amdgpu_vce_get_destroy_msg(ring
, handle
, false, NULL
);
366 DRM_ERROR("Error destroying VCE handle (%d)!\n", r
);
368 adev
->vce
.filp
[i
] = NULL
;
369 atomic_set(&adev
->vce
.handles
[i
], 0);
374 * amdgpu_vce_get_create_msg - generate a VCE create msg
376 * @adev: amdgpu_device pointer
377 * @ring: ring we should submit the msg to
378 * @handle: VCE session handle to use
379 * @fence: optional fence to return
381 * Open up a stream for HW test
383 int amdgpu_vce_get_create_msg(struct amdgpu_ring
*ring
, uint32_t handle
,
384 struct fence
**fence
)
386 const unsigned ib_size_dw
= 1024;
387 struct amdgpu_job
*job
;
388 struct amdgpu_ib
*ib
;
389 struct fence
*f
= NULL
;
393 r
= amdgpu_job_alloc_with_ib(ring
->adev
, ib_size_dw
* 4, &job
);
399 dummy
= ib
->gpu_addr
+ 1024;
401 /* stitch together an VCE create msg */
403 ib
->ptr
[ib
->length_dw
++] = 0x0000000c; /* len */
404 ib
->ptr
[ib
->length_dw
++] = 0x00000001; /* session cmd */
405 ib
->ptr
[ib
->length_dw
++] = handle
;
407 if ((ring
->adev
->vce
.fw_version
>> 24) >= 52)
408 ib
->ptr
[ib
->length_dw
++] = 0x00000040; /* len */
410 ib
->ptr
[ib
->length_dw
++] = 0x00000030; /* len */
411 ib
->ptr
[ib
->length_dw
++] = 0x01000001; /* create cmd */
412 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
413 ib
->ptr
[ib
->length_dw
++] = 0x00000042;
414 ib
->ptr
[ib
->length_dw
++] = 0x0000000a;
415 ib
->ptr
[ib
->length_dw
++] = 0x00000001;
416 ib
->ptr
[ib
->length_dw
++] = 0x00000080;
417 ib
->ptr
[ib
->length_dw
++] = 0x00000060;
418 ib
->ptr
[ib
->length_dw
++] = 0x00000100;
419 ib
->ptr
[ib
->length_dw
++] = 0x00000100;
420 ib
->ptr
[ib
->length_dw
++] = 0x0000000c;
421 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
422 if ((ring
->adev
->vce
.fw_version
>> 24) >= 52) {
423 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
424 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
425 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
426 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
429 ib
->ptr
[ib
->length_dw
++] = 0x00000014; /* len */
430 ib
->ptr
[ib
->length_dw
++] = 0x05000005; /* feedback buffer */
431 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(dummy
);
432 ib
->ptr
[ib
->length_dw
++] = dummy
;
433 ib
->ptr
[ib
->length_dw
++] = 0x00000001;
435 for (i
= ib
->length_dw
; i
< ib_size_dw
; ++i
)
438 r
= amdgpu_ib_schedule(ring
, 1, ib
, NULL
, &f
);
443 amdgpu_job_free(job
);
445 *fence
= fence_get(f
);
450 amdgpu_job_free(job
);
455 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
457 * @adev: amdgpu_device pointer
458 * @ring: ring we should submit the msg to
459 * @handle: VCE session handle to use
460 * @fence: optional fence to return
462 * Close up a stream for HW test or if userspace failed to do so
464 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring
*ring
, uint32_t handle
,
465 bool direct
, struct fence
**fence
)
467 const unsigned ib_size_dw
= 1024;
468 struct amdgpu_job
*job
;
469 struct amdgpu_ib
*ib
;
470 struct fence
*f
= NULL
;
474 r
= amdgpu_job_alloc_with_ib(ring
->adev
, ib_size_dw
* 4, &job
);
479 dummy
= ib
->gpu_addr
+ 1024;
481 /* stitch together an VCE destroy msg */
483 ib
->ptr
[ib
->length_dw
++] = 0x0000000c; /* len */
484 ib
->ptr
[ib
->length_dw
++] = 0x00000001; /* session cmd */
485 ib
->ptr
[ib
->length_dw
++] = handle
;
487 ib
->ptr
[ib
->length_dw
++] = 0x00000014; /* len */
488 ib
->ptr
[ib
->length_dw
++] = 0x05000005; /* feedback buffer */
489 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(dummy
);
490 ib
->ptr
[ib
->length_dw
++] = dummy
;
491 ib
->ptr
[ib
->length_dw
++] = 0x00000001;
493 ib
->ptr
[ib
->length_dw
++] = 0x00000008; /* len */
494 ib
->ptr
[ib
->length_dw
++] = 0x02000001; /* destroy cmd */
496 for (i
= ib
->length_dw
; i
< ib_size_dw
; ++i
)
500 r
= amdgpu_ib_schedule(ring
, 1, ib
, NULL
, &f
);
505 amdgpu_job_free(job
);
507 r
= amdgpu_job_submit(job
, ring
, &ring
->adev
->vce
.entity
,
508 AMDGPU_FENCE_OWNER_UNDEFINED
, &f
);
514 *fence
= fence_get(f
);
519 amdgpu_job_free(job
);
524 * amdgpu_vce_cs_reloc - command submission relocation
527 * @lo: address of lower dword
528 * @hi: address of higher dword
529 * @size: minimum size
531 * Patch relocation inside command stream with real buffer address
533 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser
*p
, uint32_t ib_idx
,
534 int lo
, int hi
, unsigned size
, uint32_t index
)
536 struct amdgpu_bo_va_mapping
*mapping
;
537 struct amdgpu_bo
*bo
;
540 if (index
== 0xffffffff)
543 addr
= ((uint64_t)amdgpu_get_ib_value(p
, ib_idx
, lo
)) |
544 ((uint64_t)amdgpu_get_ib_value(p
, ib_idx
, hi
)) << 32;
545 addr
+= ((uint64_t)size
) * ((uint64_t)index
);
547 mapping
= amdgpu_cs_find_mapping(p
, addr
, &bo
);
548 if (mapping
== NULL
) {
549 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
550 addr
, lo
, hi
, size
, index
);
554 if ((addr
+ (uint64_t)size
) >
555 ((uint64_t)mapping
->it
.last
+ 1) * AMDGPU_GPU_PAGE_SIZE
) {
556 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
561 addr
-= ((uint64_t)mapping
->it
.start
) * AMDGPU_GPU_PAGE_SIZE
;
562 addr
+= amdgpu_bo_gpu_offset(bo
);
563 addr
-= ((uint64_t)size
) * ((uint64_t)index
);
565 amdgpu_set_ib_value(p
, ib_idx
, lo
, lower_32_bits(addr
));
566 amdgpu_set_ib_value(p
, ib_idx
, hi
, upper_32_bits(addr
));
572 * amdgpu_vce_validate_handle - validate stream handle
575 * @handle: handle to validate
576 * @allocated: allocated a new handle?
578 * Validates the handle and return the found session index or -EINVAL
579 * we we don't have another free session index.
581 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser
*p
,
582 uint32_t handle
, bool *allocated
)
588 /* validate the handle */
589 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
) {
590 if (atomic_read(&p
->adev
->vce
.handles
[i
]) == handle
) {
591 if (p
->adev
->vce
.filp
[i
] != p
->filp
) {
592 DRM_ERROR("VCE handle collision detected!\n");
599 /* handle not found try to alloc a new one */
600 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
) {
601 if (!atomic_cmpxchg(&p
->adev
->vce
.handles
[i
], 0, handle
)) {
602 p
->adev
->vce
.filp
[i
] = p
->filp
;
603 p
->adev
->vce
.img_size
[i
] = 0;
609 DRM_ERROR("No more free VCE handles!\n");
614 * amdgpu_vce_cs_parse - parse and validate the command stream
619 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser
*p
, uint32_t ib_idx
)
621 struct amdgpu_ib
*ib
= &p
->job
->ibs
[ib_idx
];
622 unsigned fb_idx
= 0, bs_idx
= 0;
623 int session_idx
= -1;
624 bool destroyed
= false;
625 bool created
= false;
626 bool allocated
= false;
627 uint32_t tmp
, handle
= 0;
628 uint32_t *size
= &tmp
;
629 int i
, r
= 0, idx
= 0;
631 amdgpu_vce_note_usage(p
->adev
);
633 while (idx
< ib
->length_dw
) {
634 uint32_t len
= amdgpu_get_ib_value(p
, ib_idx
, idx
);
635 uint32_t cmd
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 1);
637 if ((len
< 8) || (len
& 3)) {
638 DRM_ERROR("invalid VCE command length (%d)!\n", len
);
644 DRM_ERROR("No other command allowed after destroy!\n");
650 case 0x00000001: // session
651 handle
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 2);
652 session_idx
= amdgpu_vce_validate_handle(p
, handle
,
656 size
= &p
->adev
->vce
.img_size
[session_idx
];
659 case 0x00000002: // task info
660 fb_idx
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 6);
661 bs_idx
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 7);
664 case 0x01000001: // create
667 DRM_ERROR("Handle already in use!\n");
672 *size
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 8) *
673 amdgpu_get_ib_value(p
, ib_idx
, idx
+ 10) *
677 case 0x04000001: // config extension
678 case 0x04000002: // pic control
679 case 0x04000005: // rate control
680 case 0x04000007: // motion estimation
681 case 0x04000008: // rdo
682 case 0x04000009: // vui
683 case 0x05000002: // auxiliary buffer
686 case 0x03000001: // encode
687 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 10, idx
+ 9,
692 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 12, idx
+ 11,
698 case 0x02000001: // destroy
702 case 0x05000001: // context buffer
703 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 3, idx
+ 2,
709 case 0x05000004: // video bitstream buffer
710 tmp
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 4);
711 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 3, idx
+ 2,
717 case 0x05000005: // feedback buffer
718 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 3, idx
+ 2,
725 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd
);
730 if (session_idx
== -1) {
731 DRM_ERROR("no session command at start of IB\n");
739 if (allocated
&& !created
) {
740 DRM_ERROR("New session without create command!\n");
745 if ((!r
&& destroyed
) || (r
&& allocated
)) {
747 * IB contains a destroy msg or we have allocated an
748 * handle and got an error, anyway free the handle
750 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
)
751 atomic_cmpxchg(&p
->adev
->vce
.handles
[i
], handle
, 0);
758 * amdgpu_vce_ring_emit_ib - execute indirect buffer
760 * @ring: engine to use
761 * @ib: the IB to execute
764 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring
*ring
, struct amdgpu_ib
*ib
)
766 amdgpu_ring_write(ring
, VCE_CMD_IB
);
767 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
768 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
769 amdgpu_ring_write(ring
, ib
->length_dw
);
773 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
775 * @ring: engine to use
779 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
782 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
784 amdgpu_ring_write(ring
, VCE_CMD_FENCE
);
785 amdgpu_ring_write(ring
, addr
);
786 amdgpu_ring_write(ring
, upper_32_bits(addr
));
787 amdgpu_ring_write(ring
, seq
);
788 amdgpu_ring_write(ring
, VCE_CMD_TRAP
);
789 amdgpu_ring_write(ring
, VCE_CMD_END
);
793 * amdgpu_vce_ring_test_ring - test if VCE ring is working
795 * @ring: the engine to test on
798 int amdgpu_vce_ring_test_ring(struct amdgpu_ring
*ring
)
800 struct amdgpu_device
*adev
= ring
->adev
;
801 uint32_t rptr
= amdgpu_ring_get_rptr(ring
);
805 r
= amdgpu_ring_alloc(ring
, 16);
807 DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
811 amdgpu_ring_write(ring
, VCE_CMD_END
);
812 amdgpu_ring_commit(ring
);
814 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
815 if (amdgpu_ring_get_rptr(ring
) != rptr
)
820 if (i
< adev
->usec_timeout
) {
821 DRM_INFO("ring test on %d succeeded in %d usecs\n",
824 DRM_ERROR("amdgpu: ring %d test failed\n",
833 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
835 * @ring: the engine to test on
838 int amdgpu_vce_ring_test_ib(struct amdgpu_ring
*ring
)
840 struct fence
*fence
= NULL
;
843 /* skip vce ring1 ib test for now, since it's not reliable */
844 if (ring
== &ring
->adev
->vce
.ring
[1])
847 r
= amdgpu_vce_get_create_msg(ring
, 1, NULL
);
849 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r
);
853 r
= amdgpu_vce_get_destroy_msg(ring
, 1, true, &fence
);
855 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r
);
859 r
= fence_wait(fence
, false);
861 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r
);
863 DRM_INFO("ib test on ring %d succeeded\n", ring
->idx
);