Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vce.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * Authors: Christian König <christian.koenig@amd.com>
26 */
27
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30 #include <drm/drmP.h>
31 #include <drm/drm.h>
32
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
36 #include "cikd.h"
37
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT_MS 1000
40
41 /* Firmware Names */
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI "radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
48 #endif
49 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
53
54 #ifdef CONFIG_DRM_AMDGPU_CIK
55 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
56 MODULE_FIRMWARE(FIRMWARE_KABINI);
57 MODULE_FIRMWARE(FIRMWARE_KAVERI);
58 MODULE_FIRMWARE(FIRMWARE_HAWAII);
59 MODULE_FIRMWARE(FIRMWARE_MULLINS);
60 #endif
61 MODULE_FIRMWARE(FIRMWARE_TONGA);
62 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
63 MODULE_FIRMWARE(FIRMWARE_FIJI);
64 MODULE_FIRMWARE(FIRMWARE_STONEY);
65
66 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
67
68 /**
69 * amdgpu_vce_init - allocate memory, load vce firmware
70 *
71 * @adev: amdgpu_device pointer
72 *
73 * First step to get VCE online, allocate memory and load the firmware
74 */
75 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
76 {
77 struct amdgpu_ring *ring;
78 struct amd_sched_rq *rq;
79 const char *fw_name;
80 const struct common_firmware_header *hdr;
81 unsigned ucode_version, version_major, version_minor, binary_id;
82 int i, r;
83
84 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
85
86 switch (adev->asic_type) {
87 #ifdef CONFIG_DRM_AMDGPU_CIK
88 case CHIP_BONAIRE:
89 fw_name = FIRMWARE_BONAIRE;
90 break;
91 case CHIP_KAVERI:
92 fw_name = FIRMWARE_KAVERI;
93 break;
94 case CHIP_KABINI:
95 fw_name = FIRMWARE_KABINI;
96 break;
97 case CHIP_HAWAII:
98 fw_name = FIRMWARE_HAWAII;
99 break;
100 case CHIP_MULLINS:
101 fw_name = FIRMWARE_MULLINS;
102 break;
103 #endif
104 case CHIP_TONGA:
105 fw_name = FIRMWARE_TONGA;
106 break;
107 case CHIP_CARRIZO:
108 fw_name = FIRMWARE_CARRIZO;
109 break;
110 case CHIP_FIJI:
111 fw_name = FIRMWARE_FIJI;
112 break;
113 case CHIP_STONEY:
114 fw_name = FIRMWARE_STONEY;
115 break;
116
117 default:
118 return -EINVAL;
119 }
120
121 r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
122 if (r) {
123 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
124 fw_name);
125 return r;
126 }
127
128 r = amdgpu_ucode_validate(adev->vce.fw);
129 if (r) {
130 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
131 fw_name);
132 release_firmware(adev->vce.fw);
133 adev->vce.fw = NULL;
134 return r;
135 }
136
137 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
138
139 ucode_version = le32_to_cpu(hdr->ucode_version);
140 version_major = (ucode_version >> 20) & 0xfff;
141 version_minor = (ucode_version >> 8) & 0xfff;
142 binary_id = ucode_version & 0xff;
143 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
144 version_major, version_minor, binary_id);
145 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
146 (binary_id << 8));
147
148 /* allocate firmware, stack and heap BO */
149
150 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
151 AMDGPU_GEM_DOMAIN_VRAM,
152 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
153 NULL, NULL, &adev->vce.vcpu_bo);
154 if (r) {
155 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
156 return r;
157 }
158
159 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
160 if (r) {
161 amdgpu_bo_unref(&adev->vce.vcpu_bo);
162 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
163 return r;
164 }
165
166 r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
167 &adev->vce.gpu_addr);
168 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
169 if (r) {
170 amdgpu_bo_unref(&adev->vce.vcpu_bo);
171 dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
172 return r;
173 }
174
175
176 ring = &adev->vce.ring[0];
177 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
178 r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
179 rq, amdgpu_sched_jobs);
180 if (r != 0) {
181 DRM_ERROR("Failed setting up VCE run queue.\n");
182 return r;
183 }
184
185 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
186 atomic_set(&adev->vce.handles[i], 0);
187 adev->vce.filp[i] = NULL;
188 }
189
190 return 0;
191 }
192
193 /**
194 * amdgpu_vce_fini - free memory
195 *
196 * @adev: amdgpu_device pointer
197 *
198 * Last step on VCE teardown, free firmware memory
199 */
200 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
201 {
202 if (adev->vce.vcpu_bo == NULL)
203 return 0;
204
205 amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
206
207 amdgpu_bo_unref(&adev->vce.vcpu_bo);
208
209 amdgpu_ring_fini(&adev->vce.ring[0]);
210 amdgpu_ring_fini(&adev->vce.ring[1]);
211
212 release_firmware(adev->vce.fw);
213
214 return 0;
215 }
216
217 /**
218 * amdgpu_vce_suspend - unpin VCE fw memory
219 *
220 * @adev: amdgpu_device pointer
221 *
222 */
223 int amdgpu_vce_suspend(struct amdgpu_device *adev)
224 {
225 int i;
226
227 if (adev->vce.vcpu_bo == NULL)
228 return 0;
229
230 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
231 if (atomic_read(&adev->vce.handles[i]))
232 break;
233
234 if (i == AMDGPU_MAX_VCE_HANDLES)
235 return 0;
236
237 /* TODO: suspending running encoding sessions isn't supported */
238 return -EINVAL;
239 }
240
241 /**
242 * amdgpu_vce_resume - pin VCE fw memory
243 *
244 * @adev: amdgpu_device pointer
245 *
246 */
247 int amdgpu_vce_resume(struct amdgpu_device *adev)
248 {
249 void *cpu_addr;
250 const struct common_firmware_header *hdr;
251 unsigned offset;
252 int r;
253
254 if (adev->vce.vcpu_bo == NULL)
255 return -EINVAL;
256
257 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
258 if (r) {
259 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
260 return r;
261 }
262
263 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
264 if (r) {
265 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
266 dev_err(adev->dev, "(%d) VCE map failed\n", r);
267 return r;
268 }
269
270 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
271 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
272 memcpy(cpu_addr, (adev->vce.fw->data) + offset,
273 (adev->vce.fw->size) - offset);
274
275 amdgpu_bo_kunmap(adev->vce.vcpu_bo);
276
277 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
278
279 return 0;
280 }
281
282 /**
283 * amdgpu_vce_idle_work_handler - power off VCE
284 *
285 * @work: pointer to work structure
286 *
287 * power of VCE when it's not used any more
288 */
289 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
290 {
291 struct amdgpu_device *adev =
292 container_of(work, struct amdgpu_device, vce.idle_work.work);
293
294 if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
295 (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
296 if (adev->pm.dpm_enabled) {
297 amdgpu_dpm_enable_vce(adev, false);
298 } else {
299 amdgpu_asic_set_vce_clocks(adev, 0, 0);
300 }
301 } else {
302 schedule_delayed_work(&adev->vce.idle_work,
303 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
304 }
305 }
306
307 /**
308 * amdgpu_vce_note_usage - power up VCE
309 *
310 * @adev: amdgpu_device pointer
311 *
312 * Make sure VCE is powerd up when we want to use it
313 */
314 static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
315 {
316 bool streams_changed = false;
317 bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
318 set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
319 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
320
321 if (adev->pm.dpm_enabled) {
322 /* XXX figure out if the streams changed */
323 streams_changed = false;
324 }
325
326 if (set_clocks || streams_changed) {
327 if (adev->pm.dpm_enabled) {
328 amdgpu_dpm_enable_vce(adev, true);
329 } else {
330 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
331 }
332 }
333 }
334
335 /**
336 * amdgpu_vce_free_handles - free still open VCE handles
337 *
338 * @adev: amdgpu_device pointer
339 * @filp: drm file pointer
340 *
341 * Close all VCE handles still open by this file pointer
342 */
343 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
344 {
345 struct amdgpu_ring *ring = &adev->vce.ring[0];
346 int i, r;
347 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
348 uint32_t handle = atomic_read(&adev->vce.handles[i]);
349 if (!handle || adev->vce.filp[i] != filp)
350 continue;
351
352 amdgpu_vce_note_usage(adev);
353
354 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
355 if (r)
356 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
357
358 adev->vce.filp[i] = NULL;
359 atomic_set(&adev->vce.handles[i], 0);
360 }
361 }
362
363 /**
364 * amdgpu_vce_get_create_msg - generate a VCE create msg
365 *
366 * @adev: amdgpu_device pointer
367 * @ring: ring we should submit the msg to
368 * @handle: VCE session handle to use
369 * @fence: optional fence to return
370 *
371 * Open up a stream for HW test
372 */
373 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
374 struct fence **fence)
375 {
376 const unsigned ib_size_dw = 1024;
377 struct amdgpu_job *job;
378 struct amdgpu_ib *ib;
379 struct fence *f = NULL;
380 uint64_t dummy;
381 int i, r;
382
383 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
384 if (r)
385 return r;
386
387 ib = &job->ibs[0];
388
389 dummy = ib->gpu_addr + 1024;
390
391 /* stitch together an VCE create msg */
392 ib->length_dw = 0;
393 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
394 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
395 ib->ptr[ib->length_dw++] = handle;
396
397 if ((ring->adev->vce.fw_version >> 24) >= 52)
398 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
399 else
400 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
401 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
402 ib->ptr[ib->length_dw++] = 0x00000000;
403 ib->ptr[ib->length_dw++] = 0x00000042;
404 ib->ptr[ib->length_dw++] = 0x0000000a;
405 ib->ptr[ib->length_dw++] = 0x00000001;
406 ib->ptr[ib->length_dw++] = 0x00000080;
407 ib->ptr[ib->length_dw++] = 0x00000060;
408 ib->ptr[ib->length_dw++] = 0x00000100;
409 ib->ptr[ib->length_dw++] = 0x00000100;
410 ib->ptr[ib->length_dw++] = 0x0000000c;
411 ib->ptr[ib->length_dw++] = 0x00000000;
412 if ((ring->adev->vce.fw_version >> 24) >= 52) {
413 ib->ptr[ib->length_dw++] = 0x00000000;
414 ib->ptr[ib->length_dw++] = 0x00000000;
415 ib->ptr[ib->length_dw++] = 0x00000000;
416 ib->ptr[ib->length_dw++] = 0x00000000;
417 }
418
419 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
420 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
421 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
422 ib->ptr[ib->length_dw++] = dummy;
423 ib->ptr[ib->length_dw++] = 0x00000001;
424
425 for (i = ib->length_dw; i < ib_size_dw; ++i)
426 ib->ptr[i] = 0x0;
427
428 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
429 job->fence = f;
430 if (r)
431 goto err;
432
433 amdgpu_job_free(job);
434 if (fence)
435 *fence = fence_get(f);
436 fence_put(f);
437 return 0;
438
439 err:
440 amdgpu_job_free(job);
441 return r;
442 }
443
444 /**
445 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
446 *
447 * @adev: amdgpu_device pointer
448 * @ring: ring we should submit the msg to
449 * @handle: VCE session handle to use
450 * @fence: optional fence to return
451 *
452 * Close up a stream for HW test or if userspace failed to do so
453 */
454 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
455 bool direct, struct fence **fence)
456 {
457 const unsigned ib_size_dw = 1024;
458 struct amdgpu_job *job;
459 struct amdgpu_ib *ib;
460 struct fence *f = NULL;
461 uint64_t dummy;
462 int i, r;
463
464 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
465 if (r)
466 return r;
467
468 ib = &job->ibs[0];
469 dummy = ib->gpu_addr + 1024;
470
471 /* stitch together an VCE destroy msg */
472 ib->length_dw = 0;
473 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
474 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
475 ib->ptr[ib->length_dw++] = handle;
476
477 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
478 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
479 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
480 ib->ptr[ib->length_dw++] = dummy;
481 ib->ptr[ib->length_dw++] = 0x00000001;
482
483 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
484 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
485
486 for (i = ib->length_dw; i < ib_size_dw; ++i)
487 ib->ptr[i] = 0x0;
488
489 if (direct) {
490 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
491 job->fence = f;
492 if (r)
493 goto err;
494
495 amdgpu_job_free(job);
496 } else {
497 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
498 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
499 if (r)
500 goto err;
501 }
502
503 if (fence)
504 *fence = fence_get(f);
505 fence_put(f);
506 return 0;
507
508 err:
509 amdgpu_job_free(job);
510 return r;
511 }
512
513 /**
514 * amdgpu_vce_cs_reloc - command submission relocation
515 *
516 * @p: parser context
517 * @lo: address of lower dword
518 * @hi: address of higher dword
519 * @size: minimum size
520 *
521 * Patch relocation inside command stream with real buffer address
522 */
523 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
524 int lo, int hi, unsigned size, uint32_t index)
525 {
526 struct amdgpu_bo_va_mapping *mapping;
527 struct amdgpu_bo *bo;
528 uint64_t addr;
529
530 if (index == 0xffffffff)
531 index = 0;
532
533 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
534 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
535 addr += ((uint64_t)size) * ((uint64_t)index);
536
537 mapping = amdgpu_cs_find_mapping(p, addr, &bo);
538 if (mapping == NULL) {
539 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
540 addr, lo, hi, size, index);
541 return -EINVAL;
542 }
543
544 if ((addr + (uint64_t)size) >
545 ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
546 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
547 addr, lo, hi);
548 return -EINVAL;
549 }
550
551 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
552 addr += amdgpu_bo_gpu_offset(bo);
553 addr -= ((uint64_t)size) * ((uint64_t)index);
554
555 amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
556 amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
557
558 return 0;
559 }
560
561 /**
562 * amdgpu_vce_validate_handle - validate stream handle
563 *
564 * @p: parser context
565 * @handle: handle to validate
566 * @allocated: allocated a new handle?
567 *
568 * Validates the handle and return the found session index or -EINVAL
569 * we we don't have another free session index.
570 */
571 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
572 uint32_t handle, bool *allocated)
573 {
574 unsigned i;
575
576 *allocated = false;
577
578 /* validate the handle */
579 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
580 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
581 if (p->adev->vce.filp[i] != p->filp) {
582 DRM_ERROR("VCE handle collision detected!\n");
583 return -EINVAL;
584 }
585 return i;
586 }
587 }
588
589 /* handle not found try to alloc a new one */
590 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
591 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
592 p->adev->vce.filp[i] = p->filp;
593 p->adev->vce.img_size[i] = 0;
594 *allocated = true;
595 return i;
596 }
597 }
598
599 DRM_ERROR("No more free VCE handles!\n");
600 return -EINVAL;
601 }
602
603 /**
604 * amdgpu_vce_cs_parse - parse and validate the command stream
605 *
606 * @p: parser context
607 *
608 */
609 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
610 {
611 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
612 unsigned fb_idx = 0, bs_idx = 0;
613 int session_idx = -1;
614 bool destroyed = false;
615 bool created = false;
616 bool allocated = false;
617 uint32_t tmp, handle = 0;
618 uint32_t *size = &tmp;
619 int i, r = 0, idx = 0;
620
621 amdgpu_vce_note_usage(p->adev);
622
623 while (idx < ib->length_dw) {
624 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
625 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
626
627 if ((len < 8) || (len & 3)) {
628 DRM_ERROR("invalid VCE command length (%d)!\n", len);
629 r = -EINVAL;
630 goto out;
631 }
632
633 if (destroyed) {
634 DRM_ERROR("No other command allowed after destroy!\n");
635 r = -EINVAL;
636 goto out;
637 }
638
639 switch (cmd) {
640 case 0x00000001: // session
641 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
642 session_idx = amdgpu_vce_validate_handle(p, handle,
643 &allocated);
644 if (session_idx < 0)
645 return session_idx;
646 size = &p->adev->vce.img_size[session_idx];
647 break;
648
649 case 0x00000002: // task info
650 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
651 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
652 break;
653
654 case 0x01000001: // create
655 created = true;
656 if (!allocated) {
657 DRM_ERROR("Handle already in use!\n");
658 r = -EINVAL;
659 goto out;
660 }
661
662 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
663 amdgpu_get_ib_value(p, ib_idx, idx + 10) *
664 8 * 3 / 2;
665 break;
666
667 case 0x04000001: // config extension
668 case 0x04000002: // pic control
669 case 0x04000005: // rate control
670 case 0x04000007: // motion estimation
671 case 0x04000008: // rdo
672 case 0x04000009: // vui
673 case 0x05000002: // auxiliary buffer
674 break;
675
676 case 0x03000001: // encode
677 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
678 *size, 0);
679 if (r)
680 goto out;
681
682 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
683 *size / 3, 0);
684 if (r)
685 goto out;
686 break;
687
688 case 0x02000001: // destroy
689 destroyed = true;
690 break;
691
692 case 0x05000001: // context buffer
693 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
694 *size * 2, 0);
695 if (r)
696 goto out;
697 break;
698
699 case 0x05000004: // video bitstream buffer
700 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
701 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
702 tmp, bs_idx);
703 if (r)
704 goto out;
705 break;
706
707 case 0x05000005: // feedback buffer
708 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
709 4096, fb_idx);
710 if (r)
711 goto out;
712 break;
713
714 default:
715 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
716 r = -EINVAL;
717 goto out;
718 }
719
720 if (session_idx == -1) {
721 DRM_ERROR("no session command at start of IB\n");
722 r = -EINVAL;
723 goto out;
724 }
725
726 idx += len / 4;
727 }
728
729 if (allocated && !created) {
730 DRM_ERROR("New session without create command!\n");
731 r = -ENOENT;
732 }
733
734 out:
735 if ((!r && destroyed) || (r && allocated)) {
736 /*
737 * IB contains a destroy msg or we have allocated an
738 * handle and got an error, anyway free the handle
739 */
740 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
741 atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
742 }
743
744 return r;
745 }
746
747 /**
748 * amdgpu_vce_ring_emit_ib - execute indirect buffer
749 *
750 * @ring: engine to use
751 * @ib: the IB to execute
752 *
753 */
754 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
755 {
756 amdgpu_ring_write(ring, VCE_CMD_IB);
757 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
758 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
759 amdgpu_ring_write(ring, ib->length_dw);
760 }
761
762 /**
763 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
764 *
765 * @ring: engine to use
766 * @fence: the fence
767 *
768 */
769 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
770 unsigned flags)
771 {
772 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
773
774 amdgpu_ring_write(ring, VCE_CMD_FENCE);
775 amdgpu_ring_write(ring, addr);
776 amdgpu_ring_write(ring, upper_32_bits(addr));
777 amdgpu_ring_write(ring, seq);
778 amdgpu_ring_write(ring, VCE_CMD_TRAP);
779 amdgpu_ring_write(ring, VCE_CMD_END);
780 }
781
782 /**
783 * amdgpu_vce_ring_test_ring - test if VCE ring is working
784 *
785 * @ring: the engine to test on
786 *
787 */
788 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
789 {
790 struct amdgpu_device *adev = ring->adev;
791 uint32_t rptr = amdgpu_ring_get_rptr(ring);
792 unsigned i;
793 int r;
794
795 r = amdgpu_ring_alloc(ring, 16);
796 if (r) {
797 DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
798 ring->idx, r);
799 return r;
800 }
801 amdgpu_ring_write(ring, VCE_CMD_END);
802 amdgpu_ring_commit(ring);
803
804 for (i = 0; i < adev->usec_timeout; i++) {
805 if (amdgpu_ring_get_rptr(ring) != rptr)
806 break;
807 DRM_UDELAY(1);
808 }
809
810 if (i < adev->usec_timeout) {
811 DRM_INFO("ring test on %d succeeded in %d usecs\n",
812 ring->idx, i);
813 } else {
814 DRM_ERROR("amdgpu: ring %d test failed\n",
815 ring->idx);
816 r = -ETIMEDOUT;
817 }
818
819 return r;
820 }
821
822 /**
823 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
824 *
825 * @ring: the engine to test on
826 *
827 */
828 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
829 {
830 struct fence *fence = NULL;
831 int r;
832
833 /* skip vce ring1 ib test for now, since it's not reliable */
834 if (ring == &ring->adev->vce.ring[1])
835 return 0;
836
837 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
838 if (r) {
839 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
840 goto error;
841 }
842
843 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
844 if (r) {
845 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
846 goto error;
847 }
848
849 r = fence_wait(fence, false);
850 if (r) {
851 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
852 } else {
853 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
854 }
855 error:
856 fence_put(fence);
857 return r;
858 }
This page took 0.048312 seconds and 6 git commands to generate.