drm/amdgpu: change ELM/BAF to Polaris10/Polaris11
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vce.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * Authors: Christian König <christian.koenig@amd.com>
26 */
27
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30 #include <drm/drmP.h>
31 #include <drm/drm.h>
32
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
36 #include "cikd.h"
37
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT_MS 1000
40
41 /* Firmware Names */
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI "radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
48 #endif
49 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
53 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
54 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
55
56 #ifdef CONFIG_DRM_AMDGPU_CIK
57 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
58 MODULE_FIRMWARE(FIRMWARE_KABINI);
59 MODULE_FIRMWARE(FIRMWARE_KAVERI);
60 MODULE_FIRMWARE(FIRMWARE_HAWAII);
61 MODULE_FIRMWARE(FIRMWARE_MULLINS);
62 #endif
63 MODULE_FIRMWARE(FIRMWARE_TONGA);
64 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
65 MODULE_FIRMWARE(FIRMWARE_FIJI);
66 MODULE_FIRMWARE(FIRMWARE_STONEY);
67 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
68 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
69
70 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
71
72 /**
73 * amdgpu_vce_init - allocate memory, load vce firmware
74 *
75 * @adev: amdgpu_device pointer
76 *
77 * First step to get VCE online, allocate memory and load the firmware
78 */
79 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
80 {
81 struct amdgpu_ring *ring;
82 struct amd_sched_rq *rq;
83 const char *fw_name;
84 const struct common_firmware_header *hdr;
85 unsigned ucode_version, version_major, version_minor, binary_id;
86 int i, r;
87
88 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
89
90 switch (adev->asic_type) {
91 #ifdef CONFIG_DRM_AMDGPU_CIK
92 case CHIP_BONAIRE:
93 fw_name = FIRMWARE_BONAIRE;
94 break;
95 case CHIP_KAVERI:
96 fw_name = FIRMWARE_KAVERI;
97 break;
98 case CHIP_KABINI:
99 fw_name = FIRMWARE_KABINI;
100 break;
101 case CHIP_HAWAII:
102 fw_name = FIRMWARE_HAWAII;
103 break;
104 case CHIP_MULLINS:
105 fw_name = FIRMWARE_MULLINS;
106 break;
107 #endif
108 case CHIP_TONGA:
109 fw_name = FIRMWARE_TONGA;
110 break;
111 case CHIP_CARRIZO:
112 fw_name = FIRMWARE_CARRIZO;
113 break;
114 case CHIP_FIJI:
115 fw_name = FIRMWARE_FIJI;
116 break;
117 case CHIP_STONEY:
118 fw_name = FIRMWARE_STONEY;
119 break;
120 case CHIP_POLARIS10:
121 fw_name = FIRMWARE_POLARIS10;
122 break;
123 case CHIP_POLARIS11:
124 fw_name = FIRMWARE_POLARIS11;
125 break;
126
127 default:
128 return -EINVAL;
129 }
130
131 r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
132 if (r) {
133 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
134 fw_name);
135 return r;
136 }
137
138 r = amdgpu_ucode_validate(adev->vce.fw);
139 if (r) {
140 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
141 fw_name);
142 release_firmware(adev->vce.fw);
143 adev->vce.fw = NULL;
144 return r;
145 }
146
147 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
148
149 ucode_version = le32_to_cpu(hdr->ucode_version);
150 version_major = (ucode_version >> 20) & 0xfff;
151 version_minor = (ucode_version >> 8) & 0xfff;
152 binary_id = ucode_version & 0xff;
153 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
154 version_major, version_minor, binary_id);
155 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
156 (binary_id << 8));
157
158 /* allocate firmware, stack and heap BO */
159
160 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
161 AMDGPU_GEM_DOMAIN_VRAM,
162 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
163 NULL, NULL, &adev->vce.vcpu_bo);
164 if (r) {
165 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
166 return r;
167 }
168
169 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
170 if (r) {
171 amdgpu_bo_unref(&adev->vce.vcpu_bo);
172 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
173 return r;
174 }
175
176 r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
177 &adev->vce.gpu_addr);
178 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
179 if (r) {
180 amdgpu_bo_unref(&adev->vce.vcpu_bo);
181 dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
182 return r;
183 }
184
185
186 ring = &adev->vce.ring[0];
187 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
188 r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
189 rq, amdgpu_sched_jobs);
190 if (r != 0) {
191 DRM_ERROR("Failed setting up VCE run queue.\n");
192 return r;
193 }
194
195 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
196 atomic_set(&adev->vce.handles[i], 0);
197 adev->vce.filp[i] = NULL;
198 }
199
200 return 0;
201 }
202
203 /**
204 * amdgpu_vce_fini - free memory
205 *
206 * @adev: amdgpu_device pointer
207 *
208 * Last step on VCE teardown, free firmware memory
209 */
210 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
211 {
212 if (adev->vce.vcpu_bo == NULL)
213 return 0;
214
215 amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
216
217 amdgpu_bo_unref(&adev->vce.vcpu_bo);
218
219 amdgpu_ring_fini(&adev->vce.ring[0]);
220 amdgpu_ring_fini(&adev->vce.ring[1]);
221
222 release_firmware(adev->vce.fw);
223
224 return 0;
225 }
226
227 /**
228 * amdgpu_vce_suspend - unpin VCE fw memory
229 *
230 * @adev: amdgpu_device pointer
231 *
232 */
233 int amdgpu_vce_suspend(struct amdgpu_device *adev)
234 {
235 int i;
236
237 if (adev->vce.vcpu_bo == NULL)
238 return 0;
239
240 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
241 if (atomic_read(&adev->vce.handles[i]))
242 break;
243
244 if (i == AMDGPU_MAX_VCE_HANDLES)
245 return 0;
246
247 /* TODO: suspending running encoding sessions isn't supported */
248 return -EINVAL;
249 }
250
251 /**
252 * amdgpu_vce_resume - pin VCE fw memory
253 *
254 * @adev: amdgpu_device pointer
255 *
256 */
257 int amdgpu_vce_resume(struct amdgpu_device *adev)
258 {
259 void *cpu_addr;
260 const struct common_firmware_header *hdr;
261 unsigned offset;
262 int r;
263
264 if (adev->vce.vcpu_bo == NULL)
265 return -EINVAL;
266
267 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
268 if (r) {
269 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
270 return r;
271 }
272
273 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
274 if (r) {
275 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
276 dev_err(adev->dev, "(%d) VCE map failed\n", r);
277 return r;
278 }
279
280 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
281 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
282 memcpy(cpu_addr, (adev->vce.fw->data) + offset,
283 (adev->vce.fw->size) - offset);
284
285 amdgpu_bo_kunmap(adev->vce.vcpu_bo);
286
287 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
288
289 return 0;
290 }
291
292 /**
293 * amdgpu_vce_idle_work_handler - power off VCE
294 *
295 * @work: pointer to work structure
296 *
297 * power of VCE when it's not used any more
298 */
299 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
300 {
301 struct amdgpu_device *adev =
302 container_of(work, struct amdgpu_device, vce.idle_work.work);
303
304 if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
305 (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
306 if (adev->pm.dpm_enabled) {
307 amdgpu_dpm_enable_vce(adev, false);
308 } else {
309 amdgpu_asic_set_vce_clocks(adev, 0, 0);
310 }
311 } else {
312 schedule_delayed_work(&adev->vce.idle_work,
313 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
314 }
315 }
316
317 /**
318 * amdgpu_vce_note_usage - power up VCE
319 *
320 * @adev: amdgpu_device pointer
321 *
322 * Make sure VCE is powerd up when we want to use it
323 */
324 static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
325 {
326 bool streams_changed = false;
327 bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
328 set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
329 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
330
331 if (adev->pm.dpm_enabled) {
332 /* XXX figure out if the streams changed */
333 streams_changed = false;
334 }
335
336 if (set_clocks || streams_changed) {
337 if (adev->pm.dpm_enabled) {
338 amdgpu_dpm_enable_vce(adev, true);
339 } else {
340 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
341 }
342 }
343 }
344
345 /**
346 * amdgpu_vce_free_handles - free still open VCE handles
347 *
348 * @adev: amdgpu_device pointer
349 * @filp: drm file pointer
350 *
351 * Close all VCE handles still open by this file pointer
352 */
353 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
354 {
355 struct amdgpu_ring *ring = &adev->vce.ring[0];
356 int i, r;
357 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
358 uint32_t handle = atomic_read(&adev->vce.handles[i]);
359 if (!handle || adev->vce.filp[i] != filp)
360 continue;
361
362 amdgpu_vce_note_usage(adev);
363
364 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
365 if (r)
366 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
367
368 adev->vce.filp[i] = NULL;
369 atomic_set(&adev->vce.handles[i], 0);
370 }
371 }
372
373 /**
374 * amdgpu_vce_get_create_msg - generate a VCE create msg
375 *
376 * @adev: amdgpu_device pointer
377 * @ring: ring we should submit the msg to
378 * @handle: VCE session handle to use
379 * @fence: optional fence to return
380 *
381 * Open up a stream for HW test
382 */
383 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
384 struct fence **fence)
385 {
386 const unsigned ib_size_dw = 1024;
387 struct amdgpu_job *job;
388 struct amdgpu_ib *ib;
389 struct fence *f = NULL;
390 uint64_t dummy;
391 int i, r;
392
393 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
394 if (r)
395 return r;
396
397 ib = &job->ibs[0];
398
399 dummy = ib->gpu_addr + 1024;
400
401 /* stitch together an VCE create msg */
402 ib->length_dw = 0;
403 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
404 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
405 ib->ptr[ib->length_dw++] = handle;
406
407 if ((ring->adev->vce.fw_version >> 24) >= 52)
408 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
409 else
410 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
411 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
412 ib->ptr[ib->length_dw++] = 0x00000000;
413 ib->ptr[ib->length_dw++] = 0x00000042;
414 ib->ptr[ib->length_dw++] = 0x0000000a;
415 ib->ptr[ib->length_dw++] = 0x00000001;
416 ib->ptr[ib->length_dw++] = 0x00000080;
417 ib->ptr[ib->length_dw++] = 0x00000060;
418 ib->ptr[ib->length_dw++] = 0x00000100;
419 ib->ptr[ib->length_dw++] = 0x00000100;
420 ib->ptr[ib->length_dw++] = 0x0000000c;
421 ib->ptr[ib->length_dw++] = 0x00000000;
422 if ((ring->adev->vce.fw_version >> 24) >= 52) {
423 ib->ptr[ib->length_dw++] = 0x00000000;
424 ib->ptr[ib->length_dw++] = 0x00000000;
425 ib->ptr[ib->length_dw++] = 0x00000000;
426 ib->ptr[ib->length_dw++] = 0x00000000;
427 }
428
429 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
430 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
431 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
432 ib->ptr[ib->length_dw++] = dummy;
433 ib->ptr[ib->length_dw++] = 0x00000001;
434
435 for (i = ib->length_dw; i < ib_size_dw; ++i)
436 ib->ptr[i] = 0x0;
437
438 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
439 job->fence = f;
440 if (r)
441 goto err;
442
443 amdgpu_job_free(job);
444 if (fence)
445 *fence = fence_get(f);
446 fence_put(f);
447 return 0;
448
449 err:
450 amdgpu_job_free(job);
451 return r;
452 }
453
454 /**
455 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
456 *
457 * @adev: amdgpu_device pointer
458 * @ring: ring we should submit the msg to
459 * @handle: VCE session handle to use
460 * @fence: optional fence to return
461 *
462 * Close up a stream for HW test or if userspace failed to do so
463 */
464 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
465 bool direct, struct fence **fence)
466 {
467 const unsigned ib_size_dw = 1024;
468 struct amdgpu_job *job;
469 struct amdgpu_ib *ib;
470 struct fence *f = NULL;
471 uint64_t dummy;
472 int i, r;
473
474 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
475 if (r)
476 return r;
477
478 ib = &job->ibs[0];
479 dummy = ib->gpu_addr + 1024;
480
481 /* stitch together an VCE destroy msg */
482 ib->length_dw = 0;
483 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
484 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
485 ib->ptr[ib->length_dw++] = handle;
486
487 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
488 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
489 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
490 ib->ptr[ib->length_dw++] = dummy;
491 ib->ptr[ib->length_dw++] = 0x00000001;
492
493 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
494 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
495
496 for (i = ib->length_dw; i < ib_size_dw; ++i)
497 ib->ptr[i] = 0x0;
498
499 if (direct) {
500 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
501 job->fence = f;
502 if (r)
503 goto err;
504
505 amdgpu_job_free(job);
506 } else {
507 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
508 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
509 if (r)
510 goto err;
511 }
512
513 if (fence)
514 *fence = fence_get(f);
515 fence_put(f);
516 return 0;
517
518 err:
519 amdgpu_job_free(job);
520 return r;
521 }
522
523 /**
524 * amdgpu_vce_cs_reloc - command submission relocation
525 *
526 * @p: parser context
527 * @lo: address of lower dword
528 * @hi: address of higher dword
529 * @size: minimum size
530 *
531 * Patch relocation inside command stream with real buffer address
532 */
533 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
534 int lo, int hi, unsigned size, uint32_t index)
535 {
536 struct amdgpu_bo_va_mapping *mapping;
537 struct amdgpu_bo *bo;
538 uint64_t addr;
539
540 if (index == 0xffffffff)
541 index = 0;
542
543 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
544 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
545 addr += ((uint64_t)size) * ((uint64_t)index);
546
547 mapping = amdgpu_cs_find_mapping(p, addr, &bo);
548 if (mapping == NULL) {
549 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
550 addr, lo, hi, size, index);
551 return -EINVAL;
552 }
553
554 if ((addr + (uint64_t)size) >
555 ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
556 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
557 addr, lo, hi);
558 return -EINVAL;
559 }
560
561 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
562 addr += amdgpu_bo_gpu_offset(bo);
563 addr -= ((uint64_t)size) * ((uint64_t)index);
564
565 amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
566 amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
567
568 return 0;
569 }
570
571 /**
572 * amdgpu_vce_validate_handle - validate stream handle
573 *
574 * @p: parser context
575 * @handle: handle to validate
576 * @allocated: allocated a new handle?
577 *
578 * Validates the handle and return the found session index or -EINVAL
579 * we we don't have another free session index.
580 */
581 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
582 uint32_t handle, bool *allocated)
583 {
584 unsigned i;
585
586 *allocated = false;
587
588 /* validate the handle */
589 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
590 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
591 if (p->adev->vce.filp[i] != p->filp) {
592 DRM_ERROR("VCE handle collision detected!\n");
593 return -EINVAL;
594 }
595 return i;
596 }
597 }
598
599 /* handle not found try to alloc a new one */
600 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
601 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
602 p->adev->vce.filp[i] = p->filp;
603 p->adev->vce.img_size[i] = 0;
604 *allocated = true;
605 return i;
606 }
607 }
608
609 DRM_ERROR("No more free VCE handles!\n");
610 return -EINVAL;
611 }
612
613 /**
614 * amdgpu_vce_cs_parse - parse and validate the command stream
615 *
616 * @p: parser context
617 *
618 */
619 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
620 {
621 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
622 unsigned fb_idx = 0, bs_idx = 0;
623 int session_idx = -1;
624 bool destroyed = false;
625 bool created = false;
626 bool allocated = false;
627 uint32_t tmp, handle = 0;
628 uint32_t *size = &tmp;
629 int i, r = 0, idx = 0;
630
631 amdgpu_vce_note_usage(p->adev);
632
633 while (idx < ib->length_dw) {
634 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
635 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
636
637 if ((len < 8) || (len & 3)) {
638 DRM_ERROR("invalid VCE command length (%d)!\n", len);
639 r = -EINVAL;
640 goto out;
641 }
642
643 if (destroyed) {
644 DRM_ERROR("No other command allowed after destroy!\n");
645 r = -EINVAL;
646 goto out;
647 }
648
649 switch (cmd) {
650 case 0x00000001: // session
651 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
652 session_idx = amdgpu_vce_validate_handle(p, handle,
653 &allocated);
654 if (session_idx < 0)
655 return session_idx;
656 size = &p->adev->vce.img_size[session_idx];
657 break;
658
659 case 0x00000002: // task info
660 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
661 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
662 break;
663
664 case 0x01000001: // create
665 created = true;
666 if (!allocated) {
667 DRM_ERROR("Handle already in use!\n");
668 r = -EINVAL;
669 goto out;
670 }
671
672 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
673 amdgpu_get_ib_value(p, ib_idx, idx + 10) *
674 8 * 3 / 2;
675 break;
676
677 case 0x04000001: // config extension
678 case 0x04000002: // pic control
679 case 0x04000005: // rate control
680 case 0x04000007: // motion estimation
681 case 0x04000008: // rdo
682 case 0x04000009: // vui
683 case 0x05000002: // auxiliary buffer
684 break;
685
686 case 0x03000001: // encode
687 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
688 *size, 0);
689 if (r)
690 goto out;
691
692 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
693 *size / 3, 0);
694 if (r)
695 goto out;
696 break;
697
698 case 0x02000001: // destroy
699 destroyed = true;
700 break;
701
702 case 0x05000001: // context buffer
703 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
704 *size * 2, 0);
705 if (r)
706 goto out;
707 break;
708
709 case 0x05000004: // video bitstream buffer
710 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
711 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
712 tmp, bs_idx);
713 if (r)
714 goto out;
715 break;
716
717 case 0x05000005: // feedback buffer
718 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
719 4096, fb_idx);
720 if (r)
721 goto out;
722 break;
723
724 default:
725 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
726 r = -EINVAL;
727 goto out;
728 }
729
730 if (session_idx == -1) {
731 DRM_ERROR("no session command at start of IB\n");
732 r = -EINVAL;
733 goto out;
734 }
735
736 idx += len / 4;
737 }
738
739 if (allocated && !created) {
740 DRM_ERROR("New session without create command!\n");
741 r = -ENOENT;
742 }
743
744 out:
745 if ((!r && destroyed) || (r && allocated)) {
746 /*
747 * IB contains a destroy msg or we have allocated an
748 * handle and got an error, anyway free the handle
749 */
750 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
751 atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
752 }
753
754 return r;
755 }
756
757 /**
758 * amdgpu_vce_ring_emit_ib - execute indirect buffer
759 *
760 * @ring: engine to use
761 * @ib: the IB to execute
762 *
763 */
764 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
765 {
766 amdgpu_ring_write(ring, VCE_CMD_IB);
767 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
768 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
769 amdgpu_ring_write(ring, ib->length_dw);
770 }
771
772 /**
773 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
774 *
775 * @ring: engine to use
776 * @fence: the fence
777 *
778 */
779 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
780 unsigned flags)
781 {
782 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
783
784 amdgpu_ring_write(ring, VCE_CMD_FENCE);
785 amdgpu_ring_write(ring, addr);
786 amdgpu_ring_write(ring, upper_32_bits(addr));
787 amdgpu_ring_write(ring, seq);
788 amdgpu_ring_write(ring, VCE_CMD_TRAP);
789 amdgpu_ring_write(ring, VCE_CMD_END);
790 }
791
792 /**
793 * amdgpu_vce_ring_test_ring - test if VCE ring is working
794 *
795 * @ring: the engine to test on
796 *
797 */
798 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
799 {
800 struct amdgpu_device *adev = ring->adev;
801 uint32_t rptr = amdgpu_ring_get_rptr(ring);
802 unsigned i;
803 int r;
804
805 r = amdgpu_ring_alloc(ring, 16);
806 if (r) {
807 DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
808 ring->idx, r);
809 return r;
810 }
811 amdgpu_ring_write(ring, VCE_CMD_END);
812 amdgpu_ring_commit(ring);
813
814 for (i = 0; i < adev->usec_timeout; i++) {
815 if (amdgpu_ring_get_rptr(ring) != rptr)
816 break;
817 DRM_UDELAY(1);
818 }
819
820 if (i < adev->usec_timeout) {
821 DRM_INFO("ring test on %d succeeded in %d usecs\n",
822 ring->idx, i);
823 } else {
824 DRM_ERROR("amdgpu: ring %d test failed\n",
825 ring->idx);
826 r = -ETIMEDOUT;
827 }
828
829 return r;
830 }
831
832 /**
833 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
834 *
835 * @ring: the engine to test on
836 *
837 */
838 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
839 {
840 struct fence *fence = NULL;
841 int r;
842
843 /* skip vce ring1 ib test for now, since it's not reliable */
844 if (ring == &ring->adev->vce.ring[1])
845 return 0;
846
847 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
848 if (r) {
849 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
850 goto error;
851 }
852
853 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
854 if (r) {
855 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
856 goto error;
857 }
858
859 r = fence_wait(fence, false);
860 if (r) {
861 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
862 } else {
863 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
864 }
865 error:
866 fence_put(fence);
867 return r;
868 }
This page took 0.055624 seconds and 5 git commands to generate.