Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2011 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Christian König <deathsimple@vodafone.de> | |
29 | */ | |
30 | ||
31 | #include <linux/firmware.h> | |
32 | #include <linux/module.h> | |
33 | #include <drm/drmP.h> | |
34 | #include <drm/drm.h> | |
35 | ||
36 | #include "amdgpu.h" | |
37 | #include "amdgpu_pm.h" | |
38 | #include "amdgpu_uvd.h" | |
39 | #include "cikd.h" | |
40 | #include "uvd/uvd_4_2_d.h" | |
41 | ||
42 | /* 1 second timeout */ | |
43 | #define UVD_IDLE_TIMEOUT_MS 1000 | |
44 | ||
45 | /* Firmware Names */ | |
46 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
47 | #define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin" | |
48 | #define FIRMWARE_KABINI "radeon/kabini_uvd.bin" | |
49 | #define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin" | |
50 | #define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin" | |
51 | #define FIRMWARE_MULLINS "radeon/mullins_uvd.bin" | |
52 | #endif | |
c65444fe JZ |
53 | #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" |
54 | #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" | |
974ee3db | 55 | #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin" |
a39c8cea | 56 | #define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin" |
d38ceaf9 AD |
57 | |
58 | /** | |
59 | * amdgpu_uvd_cs_ctx - Command submission parser context | |
60 | * | |
61 | * Used for emulating virtual memory support on UVD 4.2. | |
62 | */ | |
63 | struct amdgpu_uvd_cs_ctx { | |
64 | struct amdgpu_cs_parser *parser; | |
65 | unsigned reg, count; | |
66 | unsigned data0, data1; | |
67 | unsigned idx; | |
68 | unsigned ib_idx; | |
69 | ||
70 | /* does the IB has a msg command */ | |
71 | bool has_msg_cmd; | |
72 | ||
73 | /* minimum buffer sizes */ | |
74 | unsigned *buf_sizes; | |
75 | }; | |
76 | ||
77 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
78 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); | |
79 | MODULE_FIRMWARE(FIRMWARE_KABINI); | |
80 | MODULE_FIRMWARE(FIRMWARE_KAVERI); | |
81 | MODULE_FIRMWARE(FIRMWARE_HAWAII); | |
82 | MODULE_FIRMWARE(FIRMWARE_MULLINS); | |
83 | #endif | |
84 | MODULE_FIRMWARE(FIRMWARE_TONGA); | |
85 | MODULE_FIRMWARE(FIRMWARE_CARRIZO); | |
974ee3db | 86 | MODULE_FIRMWARE(FIRMWARE_FIJI); |
a39c8cea | 87 | MODULE_FIRMWARE(FIRMWARE_STONEY); |
d38ceaf9 AD |
88 | |
89 | static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); | |
90 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work); | |
91 | ||
92 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |
93 | { | |
94 | unsigned long bo_size; | |
95 | const char *fw_name; | |
96 | const struct common_firmware_header *hdr; | |
97 | unsigned version_major, version_minor, family_id; | |
98 | int i, r; | |
99 | ||
100 | INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); | |
101 | ||
102 | switch (adev->asic_type) { | |
103 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
104 | case CHIP_BONAIRE: | |
105 | fw_name = FIRMWARE_BONAIRE; | |
106 | break; | |
107 | case CHIP_KABINI: | |
108 | fw_name = FIRMWARE_KABINI; | |
109 | break; | |
110 | case CHIP_KAVERI: | |
111 | fw_name = FIRMWARE_KAVERI; | |
112 | break; | |
113 | case CHIP_HAWAII: | |
114 | fw_name = FIRMWARE_HAWAII; | |
115 | break; | |
116 | case CHIP_MULLINS: | |
117 | fw_name = FIRMWARE_MULLINS; | |
118 | break; | |
119 | #endif | |
120 | case CHIP_TONGA: | |
121 | fw_name = FIRMWARE_TONGA; | |
122 | break; | |
974ee3db DZ |
123 | case CHIP_FIJI: |
124 | fw_name = FIRMWARE_FIJI; | |
125 | break; | |
d38ceaf9 AD |
126 | case CHIP_CARRIZO: |
127 | fw_name = FIRMWARE_CARRIZO; | |
128 | break; | |
a39c8cea SL |
129 | case CHIP_STONEY: |
130 | fw_name = FIRMWARE_STONEY; | |
131 | break; | |
d38ceaf9 AD |
132 | default: |
133 | return -EINVAL; | |
134 | } | |
135 | ||
136 | r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); | |
137 | if (r) { | |
138 | dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n", | |
139 | fw_name); | |
140 | return r; | |
141 | } | |
142 | ||
143 | r = amdgpu_ucode_validate(adev->uvd.fw); | |
144 | if (r) { | |
145 | dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n", | |
146 | fw_name); | |
147 | release_firmware(adev->uvd.fw); | |
148 | adev->uvd.fw = NULL; | |
149 | return r; | |
150 | } | |
151 | ||
152 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; | |
153 | family_id = le32_to_cpu(hdr->ucode_version) & 0xff; | |
154 | version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; | |
155 | version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; | |
156 | DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", | |
157 | version_major, version_minor, family_id); | |
158 | ||
159 | bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) | |
160 | + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; | |
161 | r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, | |
857d913d AD |
162 | AMDGPU_GEM_DOMAIN_VRAM, |
163 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | |
72d7668b | 164 | NULL, NULL, &adev->uvd.vcpu_bo); |
d38ceaf9 AD |
165 | if (r) { |
166 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); | |
167 | return r; | |
168 | } | |
169 | ||
170 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); | |
171 | if (r) { | |
172 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | |
173 | dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r); | |
174 | return r; | |
175 | } | |
176 | ||
177 | r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, | |
178 | &adev->uvd.gpu_addr); | |
179 | if (r) { | |
180 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | |
181 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | |
182 | dev_err(adev->dev, "(%d) UVD bo pin failed\n", r); | |
183 | return r; | |
184 | } | |
185 | ||
186 | r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr); | |
187 | if (r) { | |
188 | dev_err(adev->dev, "(%d) UVD map failed\n", r); | |
189 | return r; | |
190 | } | |
191 | ||
192 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | |
193 | ||
194 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | |
195 | atomic_set(&adev->uvd.handles[i], 0); | |
196 | adev->uvd.filp[i] = NULL; | |
197 | } | |
198 | ||
199 | /* from uvd v5.0 HW addressing capacity increased to 64 bits */ | |
5fc3aeeb | 200 | if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) |
d38ceaf9 AD |
201 | adev->uvd.address_64_bit = true; |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
206 | int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) | |
207 | { | |
208 | int r; | |
209 | ||
210 | if (adev->uvd.vcpu_bo == NULL) | |
211 | return 0; | |
212 | ||
213 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); | |
214 | if (!r) { | |
215 | amdgpu_bo_kunmap(adev->uvd.vcpu_bo); | |
216 | amdgpu_bo_unpin(adev->uvd.vcpu_bo); | |
217 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | |
218 | } | |
219 | ||
220 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | |
221 | ||
222 | amdgpu_ring_fini(&adev->uvd.ring); | |
223 | ||
224 | release_firmware(adev->uvd.fw); | |
225 | ||
226 | return 0; | |
227 | } | |
228 | ||
229 | int amdgpu_uvd_suspend(struct amdgpu_device *adev) | |
230 | { | |
8f8202f7 CK |
231 | struct amdgpu_ring *ring = &adev->uvd.ring; |
232 | int i, r; | |
d38ceaf9 AD |
233 | |
234 | if (adev->uvd.vcpu_bo == NULL) | |
235 | return 0; | |
236 | ||
8f8202f7 CK |
237 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { |
238 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); | |
239 | if (handle != 0) { | |
240 | struct fence *fence; | |
d38ceaf9 | 241 | |
8f8202f7 | 242 | amdgpu_uvd_note_usage(adev); |
d38ceaf9 | 243 | |
8f8202f7 CK |
244 | r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); |
245 | if (r) { | |
246 | DRM_ERROR("Error destroying UVD (%d)!\n", r); | |
247 | continue; | |
248 | } | |
d38ceaf9 | 249 | |
8f8202f7 CK |
250 | fence_wait(fence, false); |
251 | fence_put(fence); | |
d38ceaf9 | 252 | |
8f8202f7 CK |
253 | adev->uvd.filp[i] = NULL; |
254 | atomic_set(&adev->uvd.handles[i], 0); | |
255 | } | |
256 | } | |
d38ceaf9 AD |
257 | |
258 | return 0; | |
259 | } | |
260 | ||
261 | int amdgpu_uvd_resume(struct amdgpu_device *adev) | |
262 | { | |
263 | unsigned size; | |
264 | void *ptr; | |
265 | const struct common_firmware_header *hdr; | |
266 | unsigned offset; | |
267 | ||
268 | if (adev->uvd.vcpu_bo == NULL) | |
269 | return -EINVAL; | |
270 | ||
271 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; | |
272 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); | |
273 | memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, | |
274 | (adev->uvd.fw->size) - offset); | |
275 | ||
276 | size = amdgpu_bo_size(adev->uvd.vcpu_bo); | |
277 | size -= le32_to_cpu(hdr->ucode_size_bytes); | |
278 | ptr = adev->uvd.cpu_addr; | |
279 | ptr += le32_to_cpu(hdr->ucode_size_bytes); | |
280 | ||
8f8202f7 | 281 | memset(ptr, 0, size); |
d38ceaf9 AD |
282 | |
283 | return 0; | |
284 | } | |
285 | ||
286 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |
287 | { | |
288 | struct amdgpu_ring *ring = &adev->uvd.ring; | |
289 | int i, r; | |
290 | ||
291 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | |
292 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); | |
293 | if (handle != 0 && adev->uvd.filp[i] == filp) { | |
0e3f154a | 294 | struct fence *fence; |
d38ceaf9 AD |
295 | |
296 | amdgpu_uvd_note_usage(adev); | |
297 | ||
298 | r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); | |
299 | if (r) { | |
300 | DRM_ERROR("Error destroying UVD (%d)!\n", r); | |
301 | continue; | |
302 | } | |
303 | ||
0e3f154a CZ |
304 | fence_wait(fence, false); |
305 | fence_put(fence); | |
d38ceaf9 AD |
306 | |
307 | adev->uvd.filp[i] = NULL; | |
308 | atomic_set(&adev->uvd.handles[i], 0); | |
309 | } | |
310 | } | |
311 | } | |
312 | ||
313 | static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo) | |
314 | { | |
315 | int i; | |
316 | for (i = 0; i < rbo->placement.num_placement; ++i) { | |
317 | rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; | |
318 | rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; | |
319 | } | |
320 | } | |
321 | ||
322 | /** | |
323 | * amdgpu_uvd_cs_pass1 - first parsing round | |
324 | * | |
325 | * @ctx: UVD parser context | |
326 | * | |
327 | * Make sure UVD message and feedback buffers are in VRAM and | |
328 | * nobody is violating an 256MB boundary. | |
329 | */ | |
330 | static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) | |
331 | { | |
332 | struct amdgpu_bo_va_mapping *mapping; | |
333 | struct amdgpu_bo *bo; | |
334 | uint32_t cmd, lo, hi; | |
335 | uint64_t addr; | |
336 | int r = 0; | |
337 | ||
338 | lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); | |
339 | hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); | |
340 | addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); | |
341 | ||
342 | mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); | |
343 | if (mapping == NULL) { | |
344 | DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); | |
345 | return -EINVAL; | |
346 | } | |
347 | ||
348 | if (!ctx->parser->adev->uvd.address_64_bit) { | |
349 | /* check if it's a message or feedback command */ | |
350 | cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; | |
351 | if (cmd == 0x0 || cmd == 0x3) { | |
352 | /* yes, force it into VRAM */ | |
353 | uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; | |
354 | amdgpu_ttm_placement_from_domain(bo, domain); | |
355 | } | |
356 | amdgpu_uvd_force_into_uvd_segment(bo); | |
357 | ||
358 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
359 | } | |
360 | ||
361 | return r; | |
362 | } | |
363 | ||
364 | /** | |
365 | * amdgpu_uvd_cs_msg_decode - handle UVD decode message | |
366 | * | |
367 | * @msg: pointer to message structure | |
368 | * @buf_sizes: returned buffer sizes | |
369 | * | |
370 | * Peek into the decode message and calculate the necessary buffer sizes. | |
371 | */ | |
372 | static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) | |
373 | { | |
374 | unsigned stream_type = msg[4]; | |
375 | unsigned width = msg[6]; | |
376 | unsigned height = msg[7]; | |
377 | unsigned dpb_size = msg[9]; | |
378 | unsigned pitch = msg[28]; | |
379 | unsigned level = msg[57]; | |
380 | ||
381 | unsigned width_in_mb = width / 16; | |
382 | unsigned height_in_mb = ALIGN(height / 16, 2); | |
383 | unsigned fs_in_mb = width_in_mb * height_in_mb; | |
384 | ||
21df89a5 JZ |
385 | unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; |
386 | unsigned min_ctx_size = 0; | |
d38ceaf9 AD |
387 | |
388 | image_size = width * height; | |
389 | image_size += image_size / 2; | |
390 | image_size = ALIGN(image_size, 1024); | |
391 | ||
392 | switch (stream_type) { | |
393 | case 0: /* H264 */ | |
394 | case 7: /* H264 Perf */ | |
395 | switch(level) { | |
396 | case 30: | |
397 | num_dpb_buffer = 8100 / fs_in_mb; | |
398 | break; | |
399 | case 31: | |
400 | num_dpb_buffer = 18000 / fs_in_mb; | |
401 | break; | |
402 | case 32: | |
403 | num_dpb_buffer = 20480 / fs_in_mb; | |
404 | break; | |
405 | case 41: | |
406 | num_dpb_buffer = 32768 / fs_in_mb; | |
407 | break; | |
408 | case 42: | |
409 | num_dpb_buffer = 34816 / fs_in_mb; | |
410 | break; | |
411 | case 50: | |
412 | num_dpb_buffer = 110400 / fs_in_mb; | |
413 | break; | |
414 | case 51: | |
415 | num_dpb_buffer = 184320 / fs_in_mb; | |
416 | break; | |
417 | default: | |
418 | num_dpb_buffer = 184320 / fs_in_mb; | |
419 | break; | |
420 | } | |
421 | num_dpb_buffer++; | |
422 | if (num_dpb_buffer > 17) | |
423 | num_dpb_buffer = 17; | |
424 | ||
425 | /* reference picture buffer */ | |
426 | min_dpb_size = image_size * num_dpb_buffer; | |
427 | ||
428 | /* macroblock context buffer */ | |
429 | min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; | |
430 | ||
431 | /* IT surface buffer */ | |
432 | min_dpb_size += width_in_mb * height_in_mb * 32; | |
433 | break; | |
434 | ||
435 | case 1: /* VC1 */ | |
436 | ||
437 | /* reference picture buffer */ | |
438 | min_dpb_size = image_size * 3; | |
439 | ||
440 | /* CONTEXT_BUFFER */ | |
441 | min_dpb_size += width_in_mb * height_in_mb * 128; | |
442 | ||
443 | /* IT surface buffer */ | |
444 | min_dpb_size += width_in_mb * 64; | |
445 | ||
446 | /* DB surface buffer */ | |
447 | min_dpb_size += width_in_mb * 128; | |
448 | ||
449 | /* BP */ | |
450 | tmp = max(width_in_mb, height_in_mb); | |
451 | min_dpb_size += ALIGN(tmp * 7 * 16, 64); | |
452 | break; | |
453 | ||
454 | case 3: /* MPEG2 */ | |
455 | ||
456 | /* reference picture buffer */ | |
457 | min_dpb_size = image_size * 3; | |
458 | break; | |
459 | ||
460 | case 4: /* MPEG4 */ | |
461 | ||
462 | /* reference picture buffer */ | |
463 | min_dpb_size = image_size * 3; | |
464 | ||
465 | /* CM */ | |
466 | min_dpb_size += width_in_mb * height_in_mb * 64; | |
467 | ||
468 | /* IT surface buffer */ | |
469 | min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); | |
470 | break; | |
471 | ||
86fa0bdc CK |
472 | case 16: /* H265 */ |
473 | image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2; | |
474 | image_size = ALIGN(image_size, 256); | |
475 | ||
476 | num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; | |
477 | min_dpb_size = image_size * num_dpb_buffer; | |
8c8bac59 BZ |
478 | min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) |
479 | * 16 * num_dpb_buffer + 52 * 1024; | |
86fa0bdc CK |
480 | break; |
481 | ||
d38ceaf9 AD |
482 | default: |
483 | DRM_ERROR("UVD codec not handled %d!\n", stream_type); | |
484 | return -EINVAL; | |
485 | } | |
486 | ||
487 | if (width > pitch) { | |
488 | DRM_ERROR("Invalid UVD decoding target pitch!\n"); | |
489 | return -EINVAL; | |
490 | } | |
491 | ||
492 | if (dpb_size < min_dpb_size) { | |
493 | DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", | |
494 | dpb_size, min_dpb_size); | |
495 | return -EINVAL; | |
496 | } | |
497 | ||
498 | buf_sizes[0x1] = dpb_size; | |
499 | buf_sizes[0x2] = image_size; | |
8c8bac59 | 500 | buf_sizes[0x4] = min_ctx_size; |
d38ceaf9 AD |
501 | return 0; |
502 | } | |
503 | ||
504 | /** | |
505 | * amdgpu_uvd_cs_msg - handle UVD message | |
506 | * | |
507 | * @ctx: UVD parser context | |
508 | * @bo: buffer object containing the message | |
509 | * @offset: offset into the buffer object | |
510 | * | |
511 | * Peek into the UVD message and extract the session id. | |
512 | * Make sure that we don't open up to many sessions. | |
513 | */ | |
514 | static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |
515 | struct amdgpu_bo *bo, unsigned offset) | |
516 | { | |
517 | struct amdgpu_device *adev = ctx->parser->adev; | |
518 | int32_t *msg, msg_type, handle; | |
d38ceaf9 | 519 | void *ptr; |
4127a59e CK |
520 | long r; |
521 | int i; | |
d38ceaf9 AD |
522 | |
523 | if (offset & 0x3F) { | |
524 | DRM_ERROR("UVD messages must be 64 byte aligned!\n"); | |
525 | return -EINVAL; | |
526 | } | |
527 | ||
713293b8 CK |
528 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, |
529 | MAX_SCHEDULE_TIMEOUT); | |
4127a59e CK |
530 | if (r < 0) { |
531 | DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r); | |
713293b8 | 532 | return r; |
d38ceaf9 AD |
533 | } |
534 | ||
535 | r = amdgpu_bo_kmap(bo, &ptr); | |
536 | if (r) { | |
4127a59e | 537 | DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); |
d38ceaf9 AD |
538 | return r; |
539 | } | |
540 | ||
541 | msg = ptr + offset; | |
542 | ||
543 | msg_type = msg[1]; | |
544 | handle = msg[2]; | |
545 | ||
546 | if (handle == 0) { | |
547 | DRM_ERROR("Invalid UVD handle!\n"); | |
548 | return -EINVAL; | |
549 | } | |
550 | ||
5146419e LL |
551 | switch (msg_type) { |
552 | case 0: | |
553 | /* it's a create msg, calc image size (width * height) */ | |
554 | amdgpu_bo_kunmap(bo); | |
555 | ||
556 | /* try to alloc a new handle */ | |
557 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | |
558 | if (atomic_read(&adev->uvd.handles[i]) == handle) { | |
559 | DRM_ERROR("Handle 0x%x already in use!\n", handle); | |
560 | return -EINVAL; | |
561 | } | |
562 | ||
563 | if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { | |
564 | adev->uvd.filp[i] = ctx->parser->filp; | |
565 | return 0; | |
566 | } | |
567 | } | |
568 | ||
569 | DRM_ERROR("No more free UVD handles!\n"); | |
570 | return -EINVAL; | |
571 | ||
572 | case 1: | |
d38ceaf9 AD |
573 | /* it's a decode msg, calc buffer sizes */ |
574 | r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); | |
575 | amdgpu_bo_kunmap(bo); | |
576 | if (r) | |
577 | return r; | |
578 | ||
5146419e LL |
579 | /* validate the handle */ |
580 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | |
581 | if (atomic_read(&adev->uvd.handles[i]) == handle) { | |
582 | if (adev->uvd.filp[i] != ctx->parser->filp) { | |
583 | DRM_ERROR("UVD handle collision detected!\n"); | |
584 | return -EINVAL; | |
585 | } | |
586 | return 0; | |
587 | } | |
588 | } | |
589 | ||
590 | DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); | |
591 | return -ENOENT; | |
592 | ||
593 | case 2: | |
d38ceaf9 AD |
594 | /* it's a destroy msg, free the handle */ |
595 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) | |
596 | atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); | |
597 | amdgpu_bo_kunmap(bo); | |
598 | return 0; | |
d38ceaf9 | 599 | |
5146419e LL |
600 | default: |
601 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); | |
602 | return -EINVAL; | |
d38ceaf9 | 603 | } |
5146419e | 604 | BUG(); |
d38ceaf9 AD |
605 | return -EINVAL; |
606 | } | |
607 | ||
608 | /** | |
609 | * amdgpu_uvd_cs_pass2 - second parsing round | |
610 | * | |
611 | * @ctx: UVD parser context | |
612 | * | |
613 | * Patch buffer addresses, make sure buffer sizes are correct. | |
614 | */ | |
615 | static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) | |
616 | { | |
617 | struct amdgpu_bo_va_mapping *mapping; | |
618 | struct amdgpu_bo *bo; | |
d38ceaf9 AD |
619 | uint32_t cmd, lo, hi; |
620 | uint64_t start, end; | |
621 | uint64_t addr; | |
622 | int r; | |
623 | ||
624 | lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); | |
625 | hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); | |
626 | addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); | |
627 | ||
628 | mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); | |
629 | if (mapping == NULL) | |
630 | return -EINVAL; | |
631 | ||
632 | start = amdgpu_bo_gpu_offset(bo); | |
633 | ||
634 | end = (mapping->it.last + 1 - mapping->it.start); | |
635 | end = end * AMDGPU_GPU_PAGE_SIZE + start; | |
636 | ||
637 | addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; | |
638 | start += addr; | |
639 | ||
7270f839 CK |
640 | amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0, |
641 | lower_32_bits(start)); | |
642 | amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1, | |
643 | upper_32_bits(start)); | |
d38ceaf9 AD |
644 | |
645 | cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; | |
646 | if (cmd < 0x4) { | |
647 | if ((end - start) < ctx->buf_sizes[cmd]) { | |
648 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, | |
649 | (unsigned)(end - start), | |
650 | ctx->buf_sizes[cmd]); | |
651 | return -EINVAL; | |
652 | } | |
653 | ||
8c8bac59 BZ |
654 | } else if (cmd == 0x206) { |
655 | if ((end - start) < ctx->buf_sizes[4]) { | |
656 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, | |
657 | (unsigned)(end - start), | |
658 | ctx->buf_sizes[4]); | |
659 | return -EINVAL; | |
660 | } | |
d38ceaf9 AD |
661 | } else if ((cmd != 0x100) && (cmd != 0x204)) { |
662 | DRM_ERROR("invalid UVD command %X!\n", cmd); | |
663 | return -EINVAL; | |
664 | } | |
665 | ||
666 | if (!ctx->parser->adev->uvd.address_64_bit) { | |
667 | if ((start >> 28) != ((end - 1) >> 28)) { | |
668 | DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", | |
669 | start, end); | |
670 | return -EINVAL; | |
671 | } | |
672 | ||
673 | if ((cmd == 0 || cmd == 0x3) && | |
674 | (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { | |
675 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", | |
676 | start, end); | |
677 | return -EINVAL; | |
678 | } | |
679 | } | |
680 | ||
681 | if (cmd == 0) { | |
682 | ctx->has_msg_cmd = true; | |
683 | r = amdgpu_uvd_cs_msg(ctx, bo, addr); | |
684 | if (r) | |
685 | return r; | |
686 | } else if (!ctx->has_msg_cmd) { | |
687 | DRM_ERROR("Message needed before other commands are send!\n"); | |
688 | return -EINVAL; | |
689 | } | |
690 | ||
691 | return 0; | |
692 | } | |
693 | ||
694 | /** | |
695 | * amdgpu_uvd_cs_reg - parse register writes | |
696 | * | |
697 | * @ctx: UVD parser context | |
698 | * @cb: callback function | |
699 | * | |
700 | * Parse the register writes, call cb on each complete command. | |
701 | */ | |
702 | static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, | |
703 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) | |
704 | { | |
50838c8c | 705 | struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; |
d38ceaf9 AD |
706 | int i, r; |
707 | ||
708 | ctx->idx++; | |
709 | for (i = 0; i <= ctx->count; ++i) { | |
710 | unsigned reg = ctx->reg + i; | |
711 | ||
712 | if (ctx->idx >= ib->length_dw) { | |
713 | DRM_ERROR("Register command after end of CS!\n"); | |
714 | return -EINVAL; | |
715 | } | |
716 | ||
717 | switch (reg) { | |
718 | case mmUVD_GPCOM_VCPU_DATA0: | |
719 | ctx->data0 = ctx->idx; | |
720 | break; | |
721 | case mmUVD_GPCOM_VCPU_DATA1: | |
722 | ctx->data1 = ctx->idx; | |
723 | break; | |
724 | case mmUVD_GPCOM_VCPU_CMD: | |
725 | r = cb(ctx); | |
726 | if (r) | |
727 | return r; | |
728 | break; | |
729 | case mmUVD_ENGINE_CNTL: | |
730 | break; | |
731 | default: | |
732 | DRM_ERROR("Invalid reg 0x%X!\n", reg); | |
733 | return -EINVAL; | |
734 | } | |
735 | ctx->idx++; | |
736 | } | |
737 | return 0; | |
738 | } | |
739 | ||
740 | /** | |
741 | * amdgpu_uvd_cs_packets - parse UVD packets | |
742 | * | |
743 | * @ctx: UVD parser context | |
744 | * @cb: callback function | |
745 | * | |
746 | * Parse the command stream packets. | |
747 | */ | |
748 | static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, | |
749 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) | |
750 | { | |
50838c8c | 751 | struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; |
d38ceaf9 AD |
752 | int r; |
753 | ||
754 | for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) { | |
755 | uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx); | |
756 | unsigned type = CP_PACKET_GET_TYPE(cmd); | |
757 | switch (type) { | |
758 | case PACKET_TYPE0: | |
759 | ctx->reg = CP_PACKET0_GET_REG(cmd); | |
760 | ctx->count = CP_PACKET_GET_COUNT(cmd); | |
761 | r = amdgpu_uvd_cs_reg(ctx, cb); | |
762 | if (r) | |
763 | return r; | |
764 | break; | |
765 | case PACKET_TYPE2: | |
766 | ++ctx->idx; | |
767 | break; | |
768 | default: | |
769 | DRM_ERROR("Unknown packet type %d !\n", type); | |
770 | return -EINVAL; | |
771 | } | |
772 | } | |
773 | return 0; | |
774 | } | |
775 | ||
776 | /** | |
777 | * amdgpu_uvd_ring_parse_cs - UVD command submission parser | |
778 | * | |
779 | * @parser: Command submission parser context | |
780 | * | |
781 | * Parse the command stream, patch in addresses as necessary. | |
782 | */ | |
783 | int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) | |
784 | { | |
785 | struct amdgpu_uvd_cs_ctx ctx = {}; | |
786 | unsigned buf_sizes[] = { | |
787 | [0x00000000] = 2048, | |
8c8bac59 BZ |
788 | [0x00000001] = 0xFFFFFFFF, |
789 | [0x00000002] = 0xFFFFFFFF, | |
d38ceaf9 | 790 | [0x00000003] = 2048, |
8c8bac59 | 791 | [0x00000004] = 0xFFFFFFFF, |
d38ceaf9 | 792 | }; |
50838c8c | 793 | struct amdgpu_ib *ib = &parser->job->ibs[ib_idx]; |
d38ceaf9 AD |
794 | int r; |
795 | ||
796 | if (ib->length_dw % 16) { | |
797 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", | |
798 | ib->length_dw); | |
799 | return -EINVAL; | |
800 | } | |
801 | ||
802 | ctx.parser = parser; | |
803 | ctx.buf_sizes = buf_sizes; | |
804 | ctx.ib_idx = ib_idx; | |
805 | ||
806 | /* first round, make sure the buffers are actually in the UVD segment */ | |
807 | r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1); | |
808 | if (r) | |
809 | return r; | |
810 | ||
811 | /* second round, patch buffer addresses into the command stream */ | |
812 | r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2); | |
813 | if (r) | |
814 | return r; | |
815 | ||
816 | if (!ctx.has_msg_cmd) { | |
817 | DRM_ERROR("UVD-IBs need a msg command!\n"); | |
818 | return -EINVAL; | |
819 | } | |
820 | ||
821 | amdgpu_uvd_note_usage(ctx.parser->adev); | |
822 | ||
823 | return 0; | |
824 | } | |
825 | ||
826 | static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, | |
827 | struct amdgpu_bo *bo, | |
0e3f154a | 828 | struct fence **fence) |
d38ceaf9 AD |
829 | { |
830 | struct ttm_validate_buffer tv; | |
831 | struct ww_acquire_ctx ticket; | |
832 | struct list_head head; | |
d71518b5 CK |
833 | struct amdgpu_job *job; |
834 | struct amdgpu_ib *ib; | |
1763552e | 835 | struct fence *f = NULL; |
7b5ec431 | 836 | struct amdgpu_device *adev = ring->adev; |
d38ceaf9 AD |
837 | uint64_t addr; |
838 | int i, r; | |
839 | ||
840 | memset(&tv, 0, sizeof(tv)); | |
841 | tv.bo = &bo->tbo; | |
842 | ||
843 | INIT_LIST_HEAD(&head); | |
844 | list_add(&tv.head, &head); | |
845 | ||
846 | r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL); | |
847 | if (r) | |
848 | return r; | |
849 | ||
850 | if (!bo->adev->uvd.address_64_bit) { | |
851 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); | |
852 | amdgpu_uvd_force_into_uvd_segment(bo); | |
853 | } | |
854 | ||
855 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | |
856 | if (r) | |
857 | goto err; | |
d71518b5 CK |
858 | |
859 | r = amdgpu_job_alloc_with_ib(adev, 64, &job); | |
7b5ec431 | 860 | if (r) |
d71518b5 | 861 | goto err; |
d38ceaf9 | 862 | |
d71518b5 | 863 | ib = &job->ibs[0]; |
d38ceaf9 | 864 | addr = amdgpu_bo_gpu_offset(bo); |
7b5ec431 CZ |
865 | ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); |
866 | ib->ptr[1] = addr; | |
867 | ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); | |
868 | ib->ptr[3] = addr >> 32; | |
869 | ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); | |
870 | ib->ptr[5] = 0; | |
d38ceaf9 | 871 | for (i = 6; i < 16; ++i) |
7b5ec431 CZ |
872 | ib->ptr[i] = PACKET2(0); |
873 | ib->length_dw = 16; | |
d38ceaf9 | 874 | |
d71518b5 | 875 | r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_UNDEFINED, &f); |
d38ceaf9 | 876 | if (r) |
d71518b5 | 877 | goto err_free; |
d38ceaf9 | 878 | |
1763552e | 879 | ttm_eu_fence_buffer_objects(&ticket, &head, f); |
d38ceaf9 | 880 | |
7b5ec431 | 881 | if (fence) |
1763552e | 882 | *fence = fence_get(f); |
d38ceaf9 | 883 | amdgpu_bo_unref(&bo); |
281b4223 | 884 | fence_put(f); |
7b5ec431 | 885 | |
7b5ec431 | 886 | return 0; |
d71518b5 CK |
887 | |
888 | err_free: | |
889 | amdgpu_job_free(job); | |
890 | ||
d38ceaf9 AD |
891 | err: |
892 | ttm_eu_backoff_reservation(&ticket, &head); | |
893 | return r; | |
894 | } | |
895 | ||
896 | /* multiple fence commands without any stream commands in between can | |
897 | crash the vcpu so just try to emmit a dummy create/destroy msg to | |
898 | avoid this */ | |
899 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |
0e3f154a | 900 | struct fence **fence) |
d38ceaf9 AD |
901 | { |
902 | struct amdgpu_device *adev = ring->adev; | |
903 | struct amdgpu_bo *bo; | |
904 | uint32_t *msg; | |
905 | int r, i; | |
906 | ||
907 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | |
857d913d AD |
908 | AMDGPU_GEM_DOMAIN_VRAM, |
909 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | |
72d7668b | 910 | NULL, NULL, &bo); |
d38ceaf9 AD |
911 | if (r) |
912 | return r; | |
913 | ||
914 | r = amdgpu_bo_reserve(bo, false); | |
915 | if (r) { | |
916 | amdgpu_bo_unref(&bo); | |
917 | return r; | |
918 | } | |
919 | ||
920 | r = amdgpu_bo_kmap(bo, (void **)&msg); | |
921 | if (r) { | |
922 | amdgpu_bo_unreserve(bo); | |
923 | amdgpu_bo_unref(&bo); | |
924 | return r; | |
925 | } | |
926 | ||
927 | /* stitch together an UVD create msg */ | |
928 | msg[0] = cpu_to_le32(0x00000de4); | |
929 | msg[1] = cpu_to_le32(0x00000000); | |
930 | msg[2] = cpu_to_le32(handle); | |
931 | msg[3] = cpu_to_le32(0x00000000); | |
932 | msg[4] = cpu_to_le32(0x00000000); | |
933 | msg[5] = cpu_to_le32(0x00000000); | |
934 | msg[6] = cpu_to_le32(0x00000000); | |
935 | msg[7] = cpu_to_le32(0x00000780); | |
936 | msg[8] = cpu_to_le32(0x00000440); | |
937 | msg[9] = cpu_to_le32(0x00000000); | |
938 | msg[10] = cpu_to_le32(0x01b37000); | |
939 | for (i = 11; i < 1024; ++i) | |
940 | msg[i] = cpu_to_le32(0x0); | |
941 | ||
942 | amdgpu_bo_kunmap(bo); | |
943 | amdgpu_bo_unreserve(bo); | |
944 | ||
945 | return amdgpu_uvd_send_msg(ring, bo, fence); | |
946 | } | |
947 | ||
948 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |
0e3f154a | 949 | struct fence **fence) |
d38ceaf9 AD |
950 | { |
951 | struct amdgpu_device *adev = ring->adev; | |
952 | struct amdgpu_bo *bo; | |
953 | uint32_t *msg; | |
954 | int r, i; | |
955 | ||
956 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | |
857d913d AD |
957 | AMDGPU_GEM_DOMAIN_VRAM, |
958 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | |
72d7668b | 959 | NULL, NULL, &bo); |
d38ceaf9 AD |
960 | if (r) |
961 | return r; | |
962 | ||
963 | r = amdgpu_bo_reserve(bo, false); | |
964 | if (r) { | |
965 | amdgpu_bo_unref(&bo); | |
966 | return r; | |
967 | } | |
968 | ||
969 | r = amdgpu_bo_kmap(bo, (void **)&msg); | |
970 | if (r) { | |
971 | amdgpu_bo_unreserve(bo); | |
972 | amdgpu_bo_unref(&bo); | |
973 | return r; | |
974 | } | |
975 | ||
976 | /* stitch together an UVD destroy msg */ | |
977 | msg[0] = cpu_to_le32(0x00000de4); | |
978 | msg[1] = cpu_to_le32(0x00000002); | |
979 | msg[2] = cpu_to_le32(handle); | |
980 | msg[3] = cpu_to_le32(0x00000000); | |
981 | for (i = 4; i < 1024; ++i) | |
982 | msg[i] = cpu_to_le32(0x0); | |
983 | ||
984 | amdgpu_bo_kunmap(bo); | |
985 | amdgpu_bo_unreserve(bo); | |
986 | ||
987 | return amdgpu_uvd_send_msg(ring, bo, fence); | |
988 | } | |
989 | ||
990 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | |
991 | { | |
992 | struct amdgpu_device *adev = | |
993 | container_of(work, struct amdgpu_device, uvd.idle_work.work); | |
994 | unsigned i, fences, handles = 0; | |
995 | ||
996 | fences = amdgpu_fence_count_emitted(&adev->uvd.ring); | |
997 | ||
998 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) | |
999 | if (atomic_read(&adev->uvd.handles[i])) | |
1000 | ++handles; | |
1001 | ||
1002 | if (fences == 0 && handles == 0) { | |
1003 | if (adev->pm.dpm_enabled) { | |
1004 | amdgpu_dpm_enable_uvd(adev, false); | |
1005 | } else { | |
1006 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); | |
1007 | } | |
1008 | } else { | |
1009 | schedule_delayed_work(&adev->uvd.idle_work, | |
1010 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); | |
1011 | } | |
1012 | } | |
1013 | ||
1014 | static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) | |
1015 | { | |
1016 | bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); | |
1017 | set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, | |
1018 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); | |
1019 | ||
1020 | if (set_clocks) { | |
1021 | if (adev->pm.dpm_enabled) { | |
1022 | amdgpu_dpm_enable_uvd(adev, true); | |
1023 | } else { | |
1024 | amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); | |
1025 | } | |
1026 | } | |
1027 | } |