Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2011 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Christian König <deathsimple@vodafone.de> | |
29 | */ | |
30 | ||
31 | #include <linux/firmware.h> | |
32 | #include <linux/module.h> | |
33 | #include <drm/drmP.h> | |
34 | #include <drm/drm.h> | |
35 | ||
36 | #include "amdgpu.h" | |
37 | #include "amdgpu_pm.h" | |
38 | #include "amdgpu_uvd.h" | |
39 | #include "cikd.h" | |
40 | #include "uvd/uvd_4_2_d.h" | |
41 | ||
42 | /* 1 second timeout */ | |
43 | #define UVD_IDLE_TIMEOUT_MS 1000 | |
44 | ||
45 | /* Firmware Names */ | |
46 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
47 | #define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin" | |
48 | #define FIRMWARE_KABINI "radeon/kabini_uvd.bin" | |
49 | #define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin" | |
50 | #define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin" | |
51 | #define FIRMWARE_MULLINS "radeon/mullins_uvd.bin" | |
52 | #endif | |
c65444fe JZ |
53 | #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" |
54 | #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" | |
974ee3db | 55 | #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin" |
d38ceaf9 AD |
56 | |
57 | /** | |
58 | * amdgpu_uvd_cs_ctx - Command submission parser context | |
59 | * | |
60 | * Used for emulating virtual memory support on UVD 4.2. | |
61 | */ | |
62 | struct amdgpu_uvd_cs_ctx { | |
63 | struct amdgpu_cs_parser *parser; | |
64 | unsigned reg, count; | |
65 | unsigned data0, data1; | |
66 | unsigned idx; | |
67 | unsigned ib_idx; | |
68 | ||
69 | /* does the IB has a msg command */ | |
70 | bool has_msg_cmd; | |
71 | ||
72 | /* minimum buffer sizes */ | |
73 | unsigned *buf_sizes; | |
74 | }; | |
75 | ||
76 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
77 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); | |
78 | MODULE_FIRMWARE(FIRMWARE_KABINI); | |
79 | MODULE_FIRMWARE(FIRMWARE_KAVERI); | |
80 | MODULE_FIRMWARE(FIRMWARE_HAWAII); | |
81 | MODULE_FIRMWARE(FIRMWARE_MULLINS); | |
82 | #endif | |
83 | MODULE_FIRMWARE(FIRMWARE_TONGA); | |
84 | MODULE_FIRMWARE(FIRMWARE_CARRIZO); | |
974ee3db | 85 | MODULE_FIRMWARE(FIRMWARE_FIJI); |
d38ceaf9 AD |
86 | |
87 | static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); | |
88 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work); | |
89 | ||
90 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |
91 | { | |
92 | unsigned long bo_size; | |
93 | const char *fw_name; | |
94 | const struct common_firmware_header *hdr; | |
95 | unsigned version_major, version_minor, family_id; | |
96 | int i, r; | |
97 | ||
98 | INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); | |
99 | ||
100 | switch (adev->asic_type) { | |
101 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
102 | case CHIP_BONAIRE: | |
103 | fw_name = FIRMWARE_BONAIRE; | |
104 | break; | |
105 | case CHIP_KABINI: | |
106 | fw_name = FIRMWARE_KABINI; | |
107 | break; | |
108 | case CHIP_KAVERI: | |
109 | fw_name = FIRMWARE_KAVERI; | |
110 | break; | |
111 | case CHIP_HAWAII: | |
112 | fw_name = FIRMWARE_HAWAII; | |
113 | break; | |
114 | case CHIP_MULLINS: | |
115 | fw_name = FIRMWARE_MULLINS; | |
116 | break; | |
117 | #endif | |
118 | case CHIP_TONGA: | |
119 | fw_name = FIRMWARE_TONGA; | |
120 | break; | |
974ee3db DZ |
121 | case CHIP_FIJI: |
122 | fw_name = FIRMWARE_FIJI; | |
123 | break; | |
d38ceaf9 AD |
124 | case CHIP_CARRIZO: |
125 | fw_name = FIRMWARE_CARRIZO; | |
126 | break; | |
127 | default: | |
128 | return -EINVAL; | |
129 | } | |
130 | ||
131 | r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); | |
132 | if (r) { | |
133 | dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n", | |
134 | fw_name); | |
135 | return r; | |
136 | } | |
137 | ||
138 | r = amdgpu_ucode_validate(adev->uvd.fw); | |
139 | if (r) { | |
140 | dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n", | |
141 | fw_name); | |
142 | release_firmware(adev->uvd.fw); | |
143 | adev->uvd.fw = NULL; | |
144 | return r; | |
145 | } | |
146 | ||
147 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; | |
148 | family_id = le32_to_cpu(hdr->ucode_version) & 0xff; | |
149 | version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; | |
150 | version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; | |
151 | DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", | |
152 | version_major, version_minor, family_id); | |
153 | ||
154 | bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) | |
155 | + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; | |
156 | r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, | |
857d913d AD |
157 | AMDGPU_GEM_DOMAIN_VRAM, |
158 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | |
72d7668b | 159 | NULL, NULL, &adev->uvd.vcpu_bo); |
d38ceaf9 AD |
160 | if (r) { |
161 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); | |
162 | return r; | |
163 | } | |
164 | ||
165 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); | |
166 | if (r) { | |
167 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | |
168 | dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r); | |
169 | return r; | |
170 | } | |
171 | ||
172 | r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, | |
173 | &adev->uvd.gpu_addr); | |
174 | if (r) { | |
175 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | |
176 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | |
177 | dev_err(adev->dev, "(%d) UVD bo pin failed\n", r); | |
178 | return r; | |
179 | } | |
180 | ||
181 | r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr); | |
182 | if (r) { | |
183 | dev_err(adev->dev, "(%d) UVD map failed\n", r); | |
184 | return r; | |
185 | } | |
186 | ||
187 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | |
188 | ||
189 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | |
190 | atomic_set(&adev->uvd.handles[i], 0); | |
191 | adev->uvd.filp[i] = NULL; | |
192 | } | |
193 | ||
194 | /* from uvd v5.0 HW addressing capacity increased to 64 bits */ | |
5fc3aeeb | 195 | if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) |
d38ceaf9 AD |
196 | adev->uvd.address_64_bit = true; |
197 | ||
198 | return 0; | |
199 | } | |
200 | ||
201 | int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) | |
202 | { | |
203 | int r; | |
204 | ||
205 | if (adev->uvd.vcpu_bo == NULL) | |
206 | return 0; | |
207 | ||
208 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); | |
209 | if (!r) { | |
210 | amdgpu_bo_kunmap(adev->uvd.vcpu_bo); | |
211 | amdgpu_bo_unpin(adev->uvd.vcpu_bo); | |
212 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | |
213 | } | |
214 | ||
215 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | |
216 | ||
217 | amdgpu_ring_fini(&adev->uvd.ring); | |
218 | ||
219 | release_firmware(adev->uvd.fw); | |
220 | ||
221 | return 0; | |
222 | } | |
223 | ||
224 | int amdgpu_uvd_suspend(struct amdgpu_device *adev) | |
225 | { | |
8f8202f7 CK |
226 | struct amdgpu_ring *ring = &adev->uvd.ring; |
227 | int i, r; | |
d38ceaf9 AD |
228 | |
229 | if (adev->uvd.vcpu_bo == NULL) | |
230 | return 0; | |
231 | ||
8f8202f7 CK |
232 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { |
233 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); | |
234 | if (handle != 0) { | |
235 | struct fence *fence; | |
d38ceaf9 | 236 | |
8f8202f7 | 237 | amdgpu_uvd_note_usage(adev); |
d38ceaf9 | 238 | |
8f8202f7 CK |
239 | r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); |
240 | if (r) { | |
241 | DRM_ERROR("Error destroying UVD (%d)!\n", r); | |
242 | continue; | |
243 | } | |
d38ceaf9 | 244 | |
8f8202f7 CK |
245 | fence_wait(fence, false); |
246 | fence_put(fence); | |
d38ceaf9 | 247 | |
8f8202f7 CK |
248 | adev->uvd.filp[i] = NULL; |
249 | atomic_set(&adev->uvd.handles[i], 0); | |
250 | } | |
251 | } | |
d38ceaf9 AD |
252 | |
253 | return 0; | |
254 | } | |
255 | ||
256 | int amdgpu_uvd_resume(struct amdgpu_device *adev) | |
257 | { | |
258 | unsigned size; | |
259 | void *ptr; | |
260 | const struct common_firmware_header *hdr; | |
261 | unsigned offset; | |
262 | ||
263 | if (adev->uvd.vcpu_bo == NULL) | |
264 | return -EINVAL; | |
265 | ||
266 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; | |
267 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); | |
268 | memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, | |
269 | (adev->uvd.fw->size) - offset); | |
270 | ||
271 | size = amdgpu_bo_size(adev->uvd.vcpu_bo); | |
272 | size -= le32_to_cpu(hdr->ucode_size_bytes); | |
273 | ptr = adev->uvd.cpu_addr; | |
274 | ptr += le32_to_cpu(hdr->ucode_size_bytes); | |
275 | ||
8f8202f7 | 276 | memset(ptr, 0, size); |
d38ceaf9 AD |
277 | |
278 | return 0; | |
279 | } | |
280 | ||
281 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |
282 | { | |
283 | struct amdgpu_ring *ring = &adev->uvd.ring; | |
284 | int i, r; | |
285 | ||
286 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | |
287 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); | |
288 | if (handle != 0 && adev->uvd.filp[i] == filp) { | |
0e3f154a | 289 | struct fence *fence; |
d38ceaf9 AD |
290 | |
291 | amdgpu_uvd_note_usage(adev); | |
292 | ||
293 | r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); | |
294 | if (r) { | |
295 | DRM_ERROR("Error destroying UVD (%d)!\n", r); | |
296 | continue; | |
297 | } | |
298 | ||
0e3f154a CZ |
299 | fence_wait(fence, false); |
300 | fence_put(fence); | |
d38ceaf9 AD |
301 | |
302 | adev->uvd.filp[i] = NULL; | |
303 | atomic_set(&adev->uvd.handles[i], 0); | |
304 | } | |
305 | } | |
306 | } | |
307 | ||
308 | static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo) | |
309 | { | |
310 | int i; | |
311 | for (i = 0; i < rbo->placement.num_placement; ++i) { | |
312 | rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; | |
313 | rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; | |
314 | } | |
315 | } | |
316 | ||
317 | /** | |
318 | * amdgpu_uvd_cs_pass1 - first parsing round | |
319 | * | |
320 | * @ctx: UVD parser context | |
321 | * | |
322 | * Make sure UVD message and feedback buffers are in VRAM and | |
323 | * nobody is violating an 256MB boundary. | |
324 | */ | |
325 | static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) | |
326 | { | |
327 | struct amdgpu_bo_va_mapping *mapping; | |
328 | struct amdgpu_bo *bo; | |
329 | uint32_t cmd, lo, hi; | |
330 | uint64_t addr; | |
331 | int r = 0; | |
332 | ||
333 | lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); | |
334 | hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); | |
335 | addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); | |
336 | ||
337 | mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); | |
338 | if (mapping == NULL) { | |
339 | DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); | |
340 | return -EINVAL; | |
341 | } | |
342 | ||
343 | if (!ctx->parser->adev->uvd.address_64_bit) { | |
344 | /* check if it's a message or feedback command */ | |
345 | cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; | |
346 | if (cmd == 0x0 || cmd == 0x3) { | |
347 | /* yes, force it into VRAM */ | |
348 | uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; | |
349 | amdgpu_ttm_placement_from_domain(bo, domain); | |
350 | } | |
351 | amdgpu_uvd_force_into_uvd_segment(bo); | |
352 | ||
353 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
354 | } | |
355 | ||
356 | return r; | |
357 | } | |
358 | ||
359 | /** | |
360 | * amdgpu_uvd_cs_msg_decode - handle UVD decode message | |
361 | * | |
362 | * @msg: pointer to message structure | |
363 | * @buf_sizes: returned buffer sizes | |
364 | * | |
365 | * Peek into the decode message and calculate the necessary buffer sizes. | |
366 | */ | |
367 | static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) | |
368 | { | |
369 | unsigned stream_type = msg[4]; | |
370 | unsigned width = msg[6]; | |
371 | unsigned height = msg[7]; | |
372 | unsigned dpb_size = msg[9]; | |
373 | unsigned pitch = msg[28]; | |
374 | unsigned level = msg[57]; | |
375 | ||
376 | unsigned width_in_mb = width / 16; | |
377 | unsigned height_in_mb = ALIGN(height / 16, 2); | |
378 | unsigned fs_in_mb = width_in_mb * height_in_mb; | |
379 | ||
21df89a5 JZ |
380 | unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; |
381 | unsigned min_ctx_size = 0; | |
d38ceaf9 AD |
382 | |
383 | image_size = width * height; | |
384 | image_size += image_size / 2; | |
385 | image_size = ALIGN(image_size, 1024); | |
386 | ||
387 | switch (stream_type) { | |
388 | case 0: /* H264 */ | |
389 | case 7: /* H264 Perf */ | |
390 | switch(level) { | |
391 | case 30: | |
392 | num_dpb_buffer = 8100 / fs_in_mb; | |
393 | break; | |
394 | case 31: | |
395 | num_dpb_buffer = 18000 / fs_in_mb; | |
396 | break; | |
397 | case 32: | |
398 | num_dpb_buffer = 20480 / fs_in_mb; | |
399 | break; | |
400 | case 41: | |
401 | num_dpb_buffer = 32768 / fs_in_mb; | |
402 | break; | |
403 | case 42: | |
404 | num_dpb_buffer = 34816 / fs_in_mb; | |
405 | break; | |
406 | case 50: | |
407 | num_dpb_buffer = 110400 / fs_in_mb; | |
408 | break; | |
409 | case 51: | |
410 | num_dpb_buffer = 184320 / fs_in_mb; | |
411 | break; | |
412 | default: | |
413 | num_dpb_buffer = 184320 / fs_in_mb; | |
414 | break; | |
415 | } | |
416 | num_dpb_buffer++; | |
417 | if (num_dpb_buffer > 17) | |
418 | num_dpb_buffer = 17; | |
419 | ||
420 | /* reference picture buffer */ | |
421 | min_dpb_size = image_size * num_dpb_buffer; | |
422 | ||
423 | /* macroblock context buffer */ | |
424 | min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; | |
425 | ||
426 | /* IT surface buffer */ | |
427 | min_dpb_size += width_in_mb * height_in_mb * 32; | |
428 | break; | |
429 | ||
430 | case 1: /* VC1 */ | |
431 | ||
432 | /* reference picture buffer */ | |
433 | min_dpb_size = image_size * 3; | |
434 | ||
435 | /* CONTEXT_BUFFER */ | |
436 | min_dpb_size += width_in_mb * height_in_mb * 128; | |
437 | ||
438 | /* IT surface buffer */ | |
439 | min_dpb_size += width_in_mb * 64; | |
440 | ||
441 | /* DB surface buffer */ | |
442 | min_dpb_size += width_in_mb * 128; | |
443 | ||
444 | /* BP */ | |
445 | tmp = max(width_in_mb, height_in_mb); | |
446 | min_dpb_size += ALIGN(tmp * 7 * 16, 64); | |
447 | break; | |
448 | ||
449 | case 3: /* MPEG2 */ | |
450 | ||
451 | /* reference picture buffer */ | |
452 | min_dpb_size = image_size * 3; | |
453 | break; | |
454 | ||
455 | case 4: /* MPEG4 */ | |
456 | ||
457 | /* reference picture buffer */ | |
458 | min_dpb_size = image_size * 3; | |
459 | ||
460 | /* CM */ | |
461 | min_dpb_size += width_in_mb * height_in_mb * 64; | |
462 | ||
463 | /* IT surface buffer */ | |
464 | min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); | |
465 | break; | |
466 | ||
86fa0bdc CK |
467 | case 16: /* H265 */ |
468 | image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2; | |
469 | image_size = ALIGN(image_size, 256); | |
470 | ||
471 | num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; | |
472 | min_dpb_size = image_size * num_dpb_buffer; | |
8c8bac59 BZ |
473 | min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) |
474 | * 16 * num_dpb_buffer + 52 * 1024; | |
86fa0bdc CK |
475 | break; |
476 | ||
d38ceaf9 AD |
477 | default: |
478 | DRM_ERROR("UVD codec not handled %d!\n", stream_type); | |
479 | return -EINVAL; | |
480 | } | |
481 | ||
482 | if (width > pitch) { | |
483 | DRM_ERROR("Invalid UVD decoding target pitch!\n"); | |
484 | return -EINVAL; | |
485 | } | |
486 | ||
487 | if (dpb_size < min_dpb_size) { | |
488 | DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", | |
489 | dpb_size, min_dpb_size); | |
490 | return -EINVAL; | |
491 | } | |
492 | ||
493 | buf_sizes[0x1] = dpb_size; | |
494 | buf_sizes[0x2] = image_size; | |
8c8bac59 | 495 | buf_sizes[0x4] = min_ctx_size; |
d38ceaf9 AD |
496 | return 0; |
497 | } | |
498 | ||
499 | /** | |
500 | * amdgpu_uvd_cs_msg - handle UVD message | |
501 | * | |
502 | * @ctx: UVD parser context | |
503 | * @bo: buffer object containing the message | |
504 | * @offset: offset into the buffer object | |
505 | * | |
506 | * Peek into the UVD message and extract the session id. | |
507 | * Make sure that we don't open up to many sessions. | |
508 | */ | |
509 | static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |
510 | struct amdgpu_bo *bo, unsigned offset) | |
511 | { | |
512 | struct amdgpu_device *adev = ctx->parser->adev; | |
513 | int32_t *msg, msg_type, handle; | |
d38ceaf9 | 514 | void *ptr; |
4127a59e CK |
515 | long r; |
516 | int i; | |
d38ceaf9 AD |
517 | |
518 | if (offset & 0x3F) { | |
519 | DRM_ERROR("UVD messages must be 64 byte aligned!\n"); | |
520 | return -EINVAL; | |
521 | } | |
522 | ||
713293b8 CK |
523 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, |
524 | MAX_SCHEDULE_TIMEOUT); | |
4127a59e CK |
525 | if (r < 0) { |
526 | DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r); | |
713293b8 | 527 | return r; |
d38ceaf9 AD |
528 | } |
529 | ||
530 | r = amdgpu_bo_kmap(bo, &ptr); | |
531 | if (r) { | |
4127a59e | 532 | DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); |
d38ceaf9 AD |
533 | return r; |
534 | } | |
535 | ||
536 | msg = ptr + offset; | |
537 | ||
538 | msg_type = msg[1]; | |
539 | handle = msg[2]; | |
540 | ||
541 | if (handle == 0) { | |
542 | DRM_ERROR("Invalid UVD handle!\n"); | |
543 | return -EINVAL; | |
544 | } | |
545 | ||
546 | if (msg_type == 1) { | |
547 | /* it's a decode msg, calc buffer sizes */ | |
548 | r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); | |
549 | amdgpu_bo_kunmap(bo); | |
550 | if (r) | |
551 | return r; | |
552 | ||
553 | } else if (msg_type == 2) { | |
554 | /* it's a destroy msg, free the handle */ | |
555 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) | |
556 | atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); | |
557 | amdgpu_bo_kunmap(bo); | |
558 | return 0; | |
559 | } else { | |
560 | /* it's a create msg */ | |
561 | amdgpu_bo_kunmap(bo); | |
562 | ||
563 | if (msg_type != 0) { | |
564 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); | |
565 | return -EINVAL; | |
566 | } | |
567 | ||
568 | /* it's a create msg, no special handling needed */ | |
569 | } | |
570 | ||
571 | /* create or decode, validate the handle */ | |
572 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | |
573 | if (atomic_read(&adev->uvd.handles[i]) == handle) | |
574 | return 0; | |
575 | } | |
576 | ||
577 | /* handle not found try to alloc a new one */ | |
578 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | |
579 | if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { | |
580 | adev->uvd.filp[i] = ctx->parser->filp; | |
581 | return 0; | |
582 | } | |
583 | } | |
584 | ||
585 | DRM_ERROR("No more free UVD handles!\n"); | |
586 | return -EINVAL; | |
587 | } | |
588 | ||
589 | /** | |
590 | * amdgpu_uvd_cs_pass2 - second parsing round | |
591 | * | |
592 | * @ctx: UVD parser context | |
593 | * | |
594 | * Patch buffer addresses, make sure buffer sizes are correct. | |
595 | */ | |
596 | static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) | |
597 | { | |
598 | struct amdgpu_bo_va_mapping *mapping; | |
599 | struct amdgpu_bo *bo; | |
600 | struct amdgpu_ib *ib; | |
601 | uint32_t cmd, lo, hi; | |
602 | uint64_t start, end; | |
603 | uint64_t addr; | |
604 | int r; | |
605 | ||
606 | lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); | |
607 | hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); | |
608 | addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); | |
609 | ||
610 | mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); | |
611 | if (mapping == NULL) | |
612 | return -EINVAL; | |
613 | ||
614 | start = amdgpu_bo_gpu_offset(bo); | |
615 | ||
616 | end = (mapping->it.last + 1 - mapping->it.start); | |
617 | end = end * AMDGPU_GPU_PAGE_SIZE + start; | |
618 | ||
619 | addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; | |
620 | start += addr; | |
621 | ||
622 | ib = &ctx->parser->ibs[ctx->ib_idx]; | |
623 | ib->ptr[ctx->data0] = start & 0xFFFFFFFF; | |
624 | ib->ptr[ctx->data1] = start >> 32; | |
625 | ||
626 | cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; | |
627 | if (cmd < 0x4) { | |
628 | if ((end - start) < ctx->buf_sizes[cmd]) { | |
629 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, | |
630 | (unsigned)(end - start), | |
631 | ctx->buf_sizes[cmd]); | |
632 | return -EINVAL; | |
633 | } | |
634 | ||
8c8bac59 BZ |
635 | } else if (cmd == 0x206) { |
636 | if ((end - start) < ctx->buf_sizes[4]) { | |
637 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, | |
638 | (unsigned)(end - start), | |
639 | ctx->buf_sizes[4]); | |
640 | return -EINVAL; | |
641 | } | |
d38ceaf9 AD |
642 | } else if ((cmd != 0x100) && (cmd != 0x204)) { |
643 | DRM_ERROR("invalid UVD command %X!\n", cmd); | |
644 | return -EINVAL; | |
645 | } | |
646 | ||
647 | if (!ctx->parser->adev->uvd.address_64_bit) { | |
648 | if ((start >> 28) != ((end - 1) >> 28)) { | |
649 | DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", | |
650 | start, end); | |
651 | return -EINVAL; | |
652 | } | |
653 | ||
654 | if ((cmd == 0 || cmd == 0x3) && | |
655 | (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { | |
656 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", | |
657 | start, end); | |
658 | return -EINVAL; | |
659 | } | |
660 | } | |
661 | ||
662 | if (cmd == 0) { | |
663 | ctx->has_msg_cmd = true; | |
664 | r = amdgpu_uvd_cs_msg(ctx, bo, addr); | |
665 | if (r) | |
666 | return r; | |
667 | } else if (!ctx->has_msg_cmd) { | |
668 | DRM_ERROR("Message needed before other commands are send!\n"); | |
669 | return -EINVAL; | |
670 | } | |
671 | ||
672 | return 0; | |
673 | } | |
674 | ||
675 | /** | |
676 | * amdgpu_uvd_cs_reg - parse register writes | |
677 | * | |
678 | * @ctx: UVD parser context | |
679 | * @cb: callback function | |
680 | * | |
681 | * Parse the register writes, call cb on each complete command. | |
682 | */ | |
683 | static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, | |
684 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) | |
685 | { | |
686 | struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; | |
687 | int i, r; | |
688 | ||
689 | ctx->idx++; | |
690 | for (i = 0; i <= ctx->count; ++i) { | |
691 | unsigned reg = ctx->reg + i; | |
692 | ||
693 | if (ctx->idx >= ib->length_dw) { | |
694 | DRM_ERROR("Register command after end of CS!\n"); | |
695 | return -EINVAL; | |
696 | } | |
697 | ||
698 | switch (reg) { | |
699 | case mmUVD_GPCOM_VCPU_DATA0: | |
700 | ctx->data0 = ctx->idx; | |
701 | break; | |
702 | case mmUVD_GPCOM_VCPU_DATA1: | |
703 | ctx->data1 = ctx->idx; | |
704 | break; | |
705 | case mmUVD_GPCOM_VCPU_CMD: | |
706 | r = cb(ctx); | |
707 | if (r) | |
708 | return r; | |
709 | break; | |
710 | case mmUVD_ENGINE_CNTL: | |
711 | break; | |
712 | default: | |
713 | DRM_ERROR("Invalid reg 0x%X!\n", reg); | |
714 | return -EINVAL; | |
715 | } | |
716 | ctx->idx++; | |
717 | } | |
718 | return 0; | |
719 | } | |
720 | ||
721 | /** | |
722 | * amdgpu_uvd_cs_packets - parse UVD packets | |
723 | * | |
724 | * @ctx: UVD parser context | |
725 | * @cb: callback function | |
726 | * | |
727 | * Parse the command stream packets. | |
728 | */ | |
729 | static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, | |
730 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) | |
731 | { | |
732 | struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; | |
733 | int r; | |
734 | ||
735 | for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) { | |
736 | uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx); | |
737 | unsigned type = CP_PACKET_GET_TYPE(cmd); | |
738 | switch (type) { | |
739 | case PACKET_TYPE0: | |
740 | ctx->reg = CP_PACKET0_GET_REG(cmd); | |
741 | ctx->count = CP_PACKET_GET_COUNT(cmd); | |
742 | r = amdgpu_uvd_cs_reg(ctx, cb); | |
743 | if (r) | |
744 | return r; | |
745 | break; | |
746 | case PACKET_TYPE2: | |
747 | ++ctx->idx; | |
748 | break; | |
749 | default: | |
750 | DRM_ERROR("Unknown packet type %d !\n", type); | |
751 | return -EINVAL; | |
752 | } | |
753 | } | |
754 | return 0; | |
755 | } | |
756 | ||
757 | /** | |
758 | * amdgpu_uvd_ring_parse_cs - UVD command submission parser | |
759 | * | |
760 | * @parser: Command submission parser context | |
761 | * | |
762 | * Parse the command stream, patch in addresses as necessary. | |
763 | */ | |
764 | int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) | |
765 | { | |
766 | struct amdgpu_uvd_cs_ctx ctx = {}; | |
767 | unsigned buf_sizes[] = { | |
768 | [0x00000000] = 2048, | |
8c8bac59 BZ |
769 | [0x00000001] = 0xFFFFFFFF, |
770 | [0x00000002] = 0xFFFFFFFF, | |
d38ceaf9 | 771 | [0x00000003] = 2048, |
8c8bac59 | 772 | [0x00000004] = 0xFFFFFFFF, |
d38ceaf9 AD |
773 | }; |
774 | struct amdgpu_ib *ib = &parser->ibs[ib_idx]; | |
775 | int r; | |
776 | ||
777 | if (ib->length_dw % 16) { | |
778 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", | |
779 | ib->length_dw); | |
780 | return -EINVAL; | |
781 | } | |
782 | ||
783 | ctx.parser = parser; | |
784 | ctx.buf_sizes = buf_sizes; | |
785 | ctx.ib_idx = ib_idx; | |
786 | ||
787 | /* first round, make sure the buffers are actually in the UVD segment */ | |
788 | r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1); | |
789 | if (r) | |
790 | return r; | |
791 | ||
792 | /* second round, patch buffer addresses into the command stream */ | |
793 | r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2); | |
794 | if (r) | |
795 | return r; | |
796 | ||
797 | if (!ctx.has_msg_cmd) { | |
798 | DRM_ERROR("UVD-IBs need a msg command!\n"); | |
799 | return -EINVAL; | |
800 | } | |
801 | ||
802 | amdgpu_uvd_note_usage(ctx.parser->adev); | |
803 | ||
804 | return 0; | |
805 | } | |
806 | ||
7b5ec431 | 807 | static int amdgpu_uvd_free_job( |
4c7eb91c | 808 | struct amdgpu_job *job) |
7b5ec431 | 809 | { |
4c7eb91c JZ |
810 | amdgpu_ib_free(job->adev, job->ibs); |
811 | kfree(job->ibs); | |
7b5ec431 CZ |
812 | return 0; |
813 | } | |
814 | ||
d38ceaf9 AD |
815 | static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, |
816 | struct amdgpu_bo *bo, | |
0e3f154a | 817 | struct fence **fence) |
d38ceaf9 AD |
818 | { |
819 | struct ttm_validate_buffer tv; | |
820 | struct ww_acquire_ctx ticket; | |
821 | struct list_head head; | |
7b5ec431 | 822 | struct amdgpu_ib *ib = NULL; |
1763552e | 823 | struct fence *f = NULL; |
7b5ec431 | 824 | struct amdgpu_device *adev = ring->adev; |
d38ceaf9 AD |
825 | uint64_t addr; |
826 | int i, r; | |
827 | ||
828 | memset(&tv, 0, sizeof(tv)); | |
829 | tv.bo = &bo->tbo; | |
830 | ||
831 | INIT_LIST_HEAD(&head); | |
832 | list_add(&tv.head, &head); | |
833 | ||
834 | r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL); | |
835 | if (r) | |
836 | return r; | |
837 | ||
838 | if (!bo->adev->uvd.address_64_bit) { | |
839 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); | |
840 | amdgpu_uvd_force_into_uvd_segment(bo); | |
841 | } | |
842 | ||
843 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | |
844 | if (r) | |
845 | goto err; | |
7b5ec431 CZ |
846 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); |
847 | if (!ib) { | |
848 | r = -ENOMEM; | |
d38ceaf9 | 849 | goto err; |
7b5ec431 CZ |
850 | } |
851 | r = amdgpu_ib_get(ring, NULL, 64, ib); | |
852 | if (r) | |
853 | goto err1; | |
d38ceaf9 AD |
854 | |
855 | addr = amdgpu_bo_gpu_offset(bo); | |
7b5ec431 CZ |
856 | ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); |
857 | ib->ptr[1] = addr; | |
858 | ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); | |
859 | ib->ptr[3] = addr >> 32; | |
860 | ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); | |
861 | ib->ptr[5] = 0; | |
d38ceaf9 | 862 | for (i = 6; i < 16; ++i) |
7b5ec431 CZ |
863 | ib->ptr[i] = PACKET2(0); |
864 | ib->length_dw = 16; | |
d38ceaf9 | 865 | |
7b5ec431 CZ |
866 | r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, |
867 | &amdgpu_uvd_free_job, | |
1763552e CZ |
868 | AMDGPU_FENCE_OWNER_UNDEFINED, |
869 | &f); | |
d38ceaf9 | 870 | if (r) |
7b5ec431 | 871 | goto err2; |
d38ceaf9 | 872 | |
1763552e | 873 | ttm_eu_fence_buffer_objects(&ticket, &head, f); |
d38ceaf9 | 874 | |
7b5ec431 | 875 | if (fence) |
1763552e | 876 | *fence = fence_get(f); |
d38ceaf9 | 877 | amdgpu_bo_unref(&bo); |
281b4223 | 878 | fence_put(f); |
7b5ec431 CZ |
879 | if (amdgpu_enable_scheduler) |
880 | return 0; | |
881 | ||
882 | amdgpu_ib_free(ring->adev, ib); | |
883 | kfree(ib); | |
884 | return 0; | |
885 | err2: | |
886 | amdgpu_ib_free(ring->adev, ib); | |
887 | err1: | |
888 | kfree(ib); | |
d38ceaf9 AD |
889 | err: |
890 | ttm_eu_backoff_reservation(&ticket, &head); | |
891 | return r; | |
892 | } | |
893 | ||
894 | /* multiple fence commands without any stream commands in between can | |
895 | crash the vcpu so just try to emmit a dummy create/destroy msg to | |
896 | avoid this */ | |
897 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |
0e3f154a | 898 | struct fence **fence) |
d38ceaf9 AD |
899 | { |
900 | struct amdgpu_device *adev = ring->adev; | |
901 | struct amdgpu_bo *bo; | |
902 | uint32_t *msg; | |
903 | int r, i; | |
904 | ||
905 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | |
857d913d AD |
906 | AMDGPU_GEM_DOMAIN_VRAM, |
907 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | |
72d7668b | 908 | NULL, NULL, &bo); |
d38ceaf9 AD |
909 | if (r) |
910 | return r; | |
911 | ||
912 | r = amdgpu_bo_reserve(bo, false); | |
913 | if (r) { | |
914 | amdgpu_bo_unref(&bo); | |
915 | return r; | |
916 | } | |
917 | ||
918 | r = amdgpu_bo_kmap(bo, (void **)&msg); | |
919 | if (r) { | |
920 | amdgpu_bo_unreserve(bo); | |
921 | amdgpu_bo_unref(&bo); | |
922 | return r; | |
923 | } | |
924 | ||
925 | /* stitch together an UVD create msg */ | |
926 | msg[0] = cpu_to_le32(0x00000de4); | |
927 | msg[1] = cpu_to_le32(0x00000000); | |
928 | msg[2] = cpu_to_le32(handle); | |
929 | msg[3] = cpu_to_le32(0x00000000); | |
930 | msg[4] = cpu_to_le32(0x00000000); | |
931 | msg[5] = cpu_to_le32(0x00000000); | |
932 | msg[6] = cpu_to_le32(0x00000000); | |
933 | msg[7] = cpu_to_le32(0x00000780); | |
934 | msg[8] = cpu_to_le32(0x00000440); | |
935 | msg[9] = cpu_to_le32(0x00000000); | |
936 | msg[10] = cpu_to_le32(0x01b37000); | |
937 | for (i = 11; i < 1024; ++i) | |
938 | msg[i] = cpu_to_le32(0x0); | |
939 | ||
940 | amdgpu_bo_kunmap(bo); | |
941 | amdgpu_bo_unreserve(bo); | |
942 | ||
943 | return amdgpu_uvd_send_msg(ring, bo, fence); | |
944 | } | |
945 | ||
946 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |
0e3f154a | 947 | struct fence **fence) |
d38ceaf9 AD |
948 | { |
949 | struct amdgpu_device *adev = ring->adev; | |
950 | struct amdgpu_bo *bo; | |
951 | uint32_t *msg; | |
952 | int r, i; | |
953 | ||
954 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | |
857d913d AD |
955 | AMDGPU_GEM_DOMAIN_VRAM, |
956 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | |
72d7668b | 957 | NULL, NULL, &bo); |
d38ceaf9 AD |
958 | if (r) |
959 | return r; | |
960 | ||
961 | r = amdgpu_bo_reserve(bo, false); | |
962 | if (r) { | |
963 | amdgpu_bo_unref(&bo); | |
964 | return r; | |
965 | } | |
966 | ||
967 | r = amdgpu_bo_kmap(bo, (void **)&msg); | |
968 | if (r) { | |
969 | amdgpu_bo_unreserve(bo); | |
970 | amdgpu_bo_unref(&bo); | |
971 | return r; | |
972 | } | |
973 | ||
974 | /* stitch together an UVD destroy msg */ | |
975 | msg[0] = cpu_to_le32(0x00000de4); | |
976 | msg[1] = cpu_to_le32(0x00000002); | |
977 | msg[2] = cpu_to_le32(handle); | |
978 | msg[3] = cpu_to_le32(0x00000000); | |
979 | for (i = 4; i < 1024; ++i) | |
980 | msg[i] = cpu_to_le32(0x0); | |
981 | ||
982 | amdgpu_bo_kunmap(bo); | |
983 | amdgpu_bo_unreserve(bo); | |
984 | ||
985 | return amdgpu_uvd_send_msg(ring, bo, fence); | |
986 | } | |
987 | ||
988 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | |
989 | { | |
990 | struct amdgpu_device *adev = | |
991 | container_of(work, struct amdgpu_device, uvd.idle_work.work); | |
992 | unsigned i, fences, handles = 0; | |
993 | ||
994 | fences = amdgpu_fence_count_emitted(&adev->uvd.ring); | |
995 | ||
996 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) | |
997 | if (atomic_read(&adev->uvd.handles[i])) | |
998 | ++handles; | |
999 | ||
1000 | if (fences == 0 && handles == 0) { | |
1001 | if (adev->pm.dpm_enabled) { | |
1002 | amdgpu_dpm_enable_uvd(adev, false); | |
1003 | } else { | |
1004 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); | |
1005 | } | |
1006 | } else { | |
1007 | schedule_delayed_work(&adev->uvd.idle_work, | |
1008 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); | |
1009 | } | |
1010 | } | |
1011 | ||
1012 | static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) | |
1013 | { | |
1014 | bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); | |
1015 | set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, | |
1016 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); | |
1017 | ||
1018 | if (set_clocks) { | |
1019 | if (adev->pm.dpm_enabled) { | |
1020 | amdgpu_dpm_enable_uvd(adev, true); | |
1021 | } else { | |
1022 | amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); | |
1023 | } | |
1024 | } | |
1025 | } |