Commit | Line | Data |
---|---|---|
d5b1a78a EA |
1 | /* |
2 | * Copyright © 2014 Broadcom | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | */ | |
23 | ||
24 | /** | |
25 | * Command list validator for VC4. | |
26 | * | |
27 | * The VC4 has no IOMMU between it and system memory. So, a user with | |
28 | * access to execute command lists could escalate privilege by | |
29 | * overwriting system memory (drawing to it as a framebuffer) or | |
30 | * reading system memory it shouldn't (reading it as a texture, or | |
31 | * uniform data, or vertex data). | |
32 | * | |
33 | * This validates command lists to ensure that all accesses are within | |
34 | * the bounds of the GEM objects referenced. It explicitly whitelists | |
35 | * packets, and looks at the offsets in any address fields to make | |
36 | * sure they're constrained within the BOs they reference. | |
37 | * | |
38 | * Note that because of the validation that's happening anyway, this | |
39 | * is where GEM relocation processing happens. | |
40 | */ | |
41 | ||
42 | #include "uapi/drm/vc4_drm.h" | |
43 | #include "vc4_drv.h" | |
44 | #include "vc4_packet.h" | |
45 | ||
46 | #define VALIDATE_ARGS \ | |
47 | struct vc4_exec_info *exec, \ | |
48 | void *validated, \ | |
49 | void *untrusted | |
50 | ||
51 | /** Return the width in pixels of a 64-byte microtile. */ | |
52 | static uint32_t | |
53 | utile_width(int cpp) | |
54 | { | |
55 | switch (cpp) { | |
56 | case 1: | |
57 | case 2: | |
58 | return 8; | |
59 | case 4: | |
60 | return 4; | |
61 | case 8: | |
62 | return 2; | |
63 | default: | |
64 | DRM_ERROR("unknown cpp: %d\n", cpp); | |
65 | return 1; | |
66 | } | |
67 | } | |
68 | ||
69 | /** Return the height in pixels of a 64-byte microtile. */ | |
70 | static uint32_t | |
71 | utile_height(int cpp) | |
72 | { | |
73 | switch (cpp) { | |
74 | case 1: | |
75 | return 8; | |
76 | case 2: | |
77 | case 4: | |
78 | case 8: | |
79 | return 4; | |
80 | default: | |
81 | DRM_ERROR("unknown cpp: %d\n", cpp); | |
82 | return 1; | |
83 | } | |
84 | } | |
85 | ||
86 | /** | |
87 | * The texture unit decides what tiling format a particular miplevel is using | |
88 | * this function, so we lay out our miptrees accordingly. | |
89 | */ | |
90 | static bool | |
91 | size_is_lt(uint32_t width, uint32_t height, int cpp) | |
92 | { | |
93 | return (width <= 4 * utile_width(cpp) || | |
94 | height <= 4 * utile_height(cpp)); | |
95 | } | |
96 | ||
97 | struct drm_gem_cma_object * | |
98 | vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) | |
99 | { | |
100 | struct drm_gem_cma_object *obj; | |
101 | struct vc4_bo *bo; | |
102 | ||
103 | if (hindex >= exec->bo_count) { | |
104 | DRM_ERROR("BO index %d greater than BO count %d\n", | |
105 | hindex, exec->bo_count); | |
106 | return NULL; | |
107 | } | |
108 | obj = exec->bo[hindex]; | |
109 | bo = to_vc4_bo(&obj->base); | |
110 | ||
111 | if (bo->validated_shader) { | |
112 | DRM_ERROR("Trying to use shader BO as something other than " | |
113 | "a shader\n"); | |
114 | return NULL; | |
115 | } | |
116 | ||
117 | return obj; | |
118 | } | |
119 | ||
120 | static struct drm_gem_cma_object * | |
121 | vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index) | |
122 | { | |
123 | return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]); | |
124 | } | |
125 | ||
126 | static bool | |
127 | validate_bin_pos(struct vc4_exec_info *exec, void *untrusted, uint32_t pos) | |
128 | { | |
129 | /* Note that the untrusted pointer passed to these functions is | |
130 | * incremented past the packet byte. | |
131 | */ | |
132 | return (untrusted - 1 == exec->bin_u + pos); | |
133 | } | |
134 | ||
135 | static uint32_t | |
136 | gl_shader_rec_size(uint32_t pointer_bits) | |
137 | { | |
138 | uint32_t attribute_count = pointer_bits & 7; | |
139 | bool extended = pointer_bits & 8; | |
140 | ||
141 | if (attribute_count == 0) | |
142 | attribute_count = 8; | |
143 | ||
144 | if (extended) | |
145 | return 100 + attribute_count * 4; | |
146 | else | |
147 | return 36 + attribute_count * 8; | |
148 | } | |
149 | ||
150 | bool | |
151 | vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo, | |
152 | uint32_t offset, uint8_t tiling_format, | |
153 | uint32_t width, uint32_t height, uint8_t cpp) | |
154 | { | |
155 | uint32_t aligned_width, aligned_height, stride, size; | |
156 | uint32_t utile_w = utile_width(cpp); | |
157 | uint32_t utile_h = utile_height(cpp); | |
158 | ||
159 | /* The shaded vertex format stores signed 12.4 fixed point | |
160 | * (-2048,2047) offsets from the viewport center, so we should | |
161 | * never have a render target larger than 4096. The texture | |
162 | * unit can only sample from 2048x2048, so it's even more | |
163 | * restricted. This lets us avoid worrying about overflow in | |
164 | * our math. | |
165 | */ | |
166 | if (width > 4096 || height > 4096) { | |
167 | DRM_ERROR("Surface dimesions (%d,%d) too large", width, height); | |
168 | return false; | |
169 | } | |
170 | ||
171 | switch (tiling_format) { | |
172 | case VC4_TILING_FORMAT_LINEAR: | |
173 | aligned_width = round_up(width, utile_w); | |
174 | aligned_height = height; | |
175 | break; | |
176 | case VC4_TILING_FORMAT_T: | |
177 | aligned_width = round_up(width, utile_w * 8); | |
178 | aligned_height = round_up(height, utile_h * 8); | |
179 | break; | |
180 | case VC4_TILING_FORMAT_LT: | |
181 | aligned_width = round_up(width, utile_w); | |
182 | aligned_height = round_up(height, utile_h); | |
183 | break; | |
184 | default: | |
185 | DRM_ERROR("buffer tiling %d unsupported\n", tiling_format); | |
186 | return false; | |
187 | } | |
188 | ||
189 | stride = aligned_width * cpp; | |
190 | size = stride * aligned_height; | |
191 | ||
192 | if (size + offset < size || | |
193 | size + offset > fbo->base.size) { | |
194 | DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n", | |
195 | width, height, | |
196 | aligned_width, aligned_height, | |
197 | size, offset, fbo->base.size); | |
198 | return false; | |
199 | } | |
200 | ||
201 | return true; | |
202 | } | |
203 | ||
204 | static int | |
205 | validate_flush(VALIDATE_ARGS) | |
206 | { | |
207 | if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) { | |
208 | DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n"); | |
209 | return -EINVAL; | |
210 | } | |
211 | exec->found_flush = true; | |
212 | ||
213 | return 0; | |
214 | } | |
215 | ||
216 | static int | |
217 | validate_start_tile_binning(VALIDATE_ARGS) | |
218 | { | |
219 | if (exec->found_start_tile_binning_packet) { | |
220 | DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n"); | |
221 | return -EINVAL; | |
222 | } | |
223 | exec->found_start_tile_binning_packet = true; | |
224 | ||
225 | if (!exec->found_tile_binning_mode_config_packet) { | |
226 | DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); | |
227 | return -EINVAL; | |
228 | } | |
229 | ||
230 | return 0; | |
231 | } | |
232 | ||
233 | static int | |
234 | validate_increment_semaphore(VALIDATE_ARGS) | |
235 | { | |
236 | if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) { | |
237 | DRM_ERROR("Bin CL must end with " | |
238 | "VC4_PACKET_INCREMENT_SEMAPHORE\n"); | |
239 | return -EINVAL; | |
240 | } | |
241 | exec->found_increment_semaphore_packet = true; | |
242 | ||
243 | return 0; | |
244 | } | |
245 | ||
246 | static int | |
247 | validate_indexed_prim_list(VALIDATE_ARGS) | |
248 | { | |
249 | struct drm_gem_cma_object *ib; | |
250 | uint32_t length = *(uint32_t *)(untrusted + 1); | |
251 | uint32_t offset = *(uint32_t *)(untrusted + 5); | |
252 | uint32_t max_index = *(uint32_t *)(untrusted + 9); | |
253 | uint32_t index_size = (*(uint8_t *)(untrusted + 0) >> 4) ? 2 : 1; | |
254 | struct vc4_shader_state *shader_state; | |
255 | ||
256 | /* Check overflow condition */ | |
257 | if (exec->shader_state_count == 0) { | |
258 | DRM_ERROR("shader state must precede primitives\n"); | |
259 | return -EINVAL; | |
260 | } | |
261 | shader_state = &exec->shader_state[exec->shader_state_count - 1]; | |
262 | ||
263 | if (max_index > shader_state->max_index) | |
264 | shader_state->max_index = max_index; | |
265 | ||
266 | ib = vc4_use_handle(exec, 0); | |
267 | if (!ib) | |
268 | return -EINVAL; | |
269 | ||
270 | if (offset > ib->base.size || | |
271 | (ib->base.size - offset) / index_size < length) { | |
272 | DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n", | |
273 | offset, length, index_size, ib->base.size); | |
274 | return -EINVAL; | |
275 | } | |
276 | ||
277 | *(uint32_t *)(validated + 5) = ib->paddr + offset; | |
278 | ||
279 | return 0; | |
280 | } | |
281 | ||
282 | static int | |
283 | validate_gl_array_primitive(VALIDATE_ARGS) | |
284 | { | |
285 | uint32_t length = *(uint32_t *)(untrusted + 1); | |
286 | uint32_t base_index = *(uint32_t *)(untrusted + 5); | |
287 | uint32_t max_index; | |
288 | struct vc4_shader_state *shader_state; | |
289 | ||
290 | /* Check overflow condition */ | |
291 | if (exec->shader_state_count == 0) { | |
292 | DRM_ERROR("shader state must precede primitives\n"); | |
293 | return -EINVAL; | |
294 | } | |
295 | shader_state = &exec->shader_state[exec->shader_state_count - 1]; | |
296 | ||
297 | if (length + base_index < length) { | |
298 | DRM_ERROR("primitive vertex count overflow\n"); | |
299 | return -EINVAL; | |
300 | } | |
301 | max_index = length + base_index - 1; | |
302 | ||
303 | if (max_index > shader_state->max_index) | |
304 | shader_state->max_index = max_index; | |
305 | ||
306 | return 0; | |
307 | } | |
308 | ||
309 | static int | |
310 | validate_gl_shader_state(VALIDATE_ARGS) | |
311 | { | |
312 | uint32_t i = exec->shader_state_count++; | |
313 | ||
314 | if (i >= exec->shader_state_size) { | |
315 | DRM_ERROR("More requests for shader states than declared\n"); | |
316 | return -EINVAL; | |
317 | } | |
318 | ||
319 | exec->shader_state[i].addr = *(uint32_t *)untrusted; | |
320 | exec->shader_state[i].max_index = 0; | |
321 | ||
322 | if (exec->shader_state[i].addr & ~0xf) { | |
323 | DRM_ERROR("high bits set in GL shader rec reference\n"); | |
324 | return -EINVAL; | |
325 | } | |
326 | ||
327 | *(uint32_t *)validated = (exec->shader_rec_p + | |
328 | exec->shader_state[i].addr); | |
329 | ||
330 | exec->shader_rec_p += | |
331 | roundup(gl_shader_rec_size(exec->shader_state[i].addr), 16); | |
332 | ||
333 | return 0; | |
334 | } | |
335 | ||
336 | static int | |
337 | validate_tile_binning_config(VALIDATE_ARGS) | |
338 | { | |
339 | struct drm_device *dev = exec->exec_bo->base.dev; | |
340 | struct vc4_bo *tile_bo; | |
341 | uint8_t flags; | |
342 | uint32_t tile_state_size, tile_alloc_size; | |
343 | uint32_t tile_count; | |
344 | ||
345 | if (exec->found_tile_binning_mode_config_packet) { | |
346 | DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n"); | |
347 | return -EINVAL; | |
348 | } | |
349 | exec->found_tile_binning_mode_config_packet = true; | |
350 | ||
351 | exec->bin_tiles_x = *(uint8_t *)(untrusted + 12); | |
352 | exec->bin_tiles_y = *(uint8_t *)(untrusted + 13); | |
353 | tile_count = exec->bin_tiles_x * exec->bin_tiles_y; | |
354 | flags = *(uint8_t *)(untrusted + 14); | |
355 | ||
356 | if (exec->bin_tiles_x == 0 || | |
357 | exec->bin_tiles_y == 0) { | |
358 | DRM_ERROR("Tile binning config of %dx%d too small\n", | |
359 | exec->bin_tiles_x, exec->bin_tiles_y); | |
360 | return -EINVAL; | |
361 | } | |
362 | ||
363 | if (flags & (VC4_BIN_CONFIG_DB_NON_MS | | |
364 | VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) { | |
365 | DRM_ERROR("unsupported binning config flags 0x%02x\n", flags); | |
366 | return -EINVAL; | |
367 | } | |
368 | ||
369 | /* The tile state data array is 48 bytes per tile, and we put it at | |
370 | * the start of a BO containing both it and the tile alloc. | |
371 | */ | |
372 | tile_state_size = 48 * tile_count; | |
373 | ||
374 | /* Since the tile alloc array will follow us, align. */ | |
375 | exec->tile_alloc_offset = roundup(tile_state_size, 4096); | |
376 | ||
377 | *(uint8_t *)(validated + 14) = | |
378 | ((flags & ~(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK | | |
379 | VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK)) | | |
380 | VC4_BIN_CONFIG_AUTO_INIT_TSDA | | |
381 | VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32, | |
382 | VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE) | | |
383 | VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128, | |
384 | VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE)); | |
385 | ||
386 | /* Initial block size. */ | |
387 | tile_alloc_size = 32 * tile_count; | |
388 | ||
389 | /* | |
390 | * The initial allocation gets rounded to the next 256 bytes before | |
391 | * the hardware starts fulfilling further allocations. | |
392 | */ | |
393 | tile_alloc_size = roundup(tile_alloc_size, 256); | |
394 | ||
395 | /* Add space for the extra allocations. This is what gets used first, | |
396 | * before overflow memory. It must have at least 4096 bytes, but we | |
397 | * want to avoid overflow memory usage if possible. | |
398 | */ | |
399 | tile_alloc_size += 1024 * 1024; | |
400 | ||
401 | tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size, | |
402 | true); | |
403 | exec->tile_bo = &tile_bo->base; | |
404 | if (!exec->tile_bo) | |
405 | return -ENOMEM; | |
406 | list_add_tail(&tile_bo->unref_head, &exec->unref_list); | |
407 | ||
408 | /* tile alloc address. */ | |
409 | *(uint32_t *)(validated + 0) = (exec->tile_bo->paddr + | |
410 | exec->tile_alloc_offset); | |
411 | /* tile alloc size. */ | |
412 | *(uint32_t *)(validated + 4) = tile_alloc_size; | |
413 | /* tile state address. */ | |
414 | *(uint32_t *)(validated + 8) = exec->tile_bo->paddr; | |
415 | ||
416 | return 0; | |
417 | } | |
418 | ||
419 | static int | |
420 | validate_gem_handles(VALIDATE_ARGS) | |
421 | { | |
422 | memcpy(exec->bo_index, untrusted, sizeof(exec->bo_index)); | |
423 | return 0; | |
424 | } | |
425 | ||
426 | #define VC4_DEFINE_PACKET(packet, func) \ | |
427 | [packet] = { packet ## _SIZE, #packet, func } | |
428 | ||
429 | static const struct cmd_info { | |
430 | uint16_t len; | |
431 | const char *name; | |
432 | int (*func)(struct vc4_exec_info *exec, void *validated, | |
433 | void *untrusted); | |
434 | } cmd_info[] = { | |
435 | VC4_DEFINE_PACKET(VC4_PACKET_HALT, NULL), | |
436 | VC4_DEFINE_PACKET(VC4_PACKET_NOP, NULL), | |
437 | VC4_DEFINE_PACKET(VC4_PACKET_FLUSH, validate_flush), | |
438 | VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL, NULL), | |
439 | VC4_DEFINE_PACKET(VC4_PACKET_START_TILE_BINNING, | |
440 | validate_start_tile_binning), | |
441 | VC4_DEFINE_PACKET(VC4_PACKET_INCREMENT_SEMAPHORE, | |
442 | validate_increment_semaphore), | |
443 | ||
444 | VC4_DEFINE_PACKET(VC4_PACKET_GL_INDEXED_PRIMITIVE, | |
445 | validate_indexed_prim_list), | |
446 | VC4_DEFINE_PACKET(VC4_PACKET_GL_ARRAY_PRIMITIVE, | |
447 | validate_gl_array_primitive), | |
448 | ||
449 | VC4_DEFINE_PACKET(VC4_PACKET_PRIMITIVE_LIST_FORMAT, NULL), | |
450 | ||
451 | VC4_DEFINE_PACKET(VC4_PACKET_GL_SHADER_STATE, validate_gl_shader_state), | |
452 | ||
453 | VC4_DEFINE_PACKET(VC4_PACKET_CONFIGURATION_BITS, NULL), | |
454 | VC4_DEFINE_PACKET(VC4_PACKET_FLAT_SHADE_FLAGS, NULL), | |
455 | VC4_DEFINE_PACKET(VC4_PACKET_POINT_SIZE, NULL), | |
456 | VC4_DEFINE_PACKET(VC4_PACKET_LINE_WIDTH, NULL), | |
457 | VC4_DEFINE_PACKET(VC4_PACKET_RHT_X_BOUNDARY, NULL), | |
458 | VC4_DEFINE_PACKET(VC4_PACKET_DEPTH_OFFSET, NULL), | |
459 | VC4_DEFINE_PACKET(VC4_PACKET_CLIP_WINDOW, NULL), | |
460 | VC4_DEFINE_PACKET(VC4_PACKET_VIEWPORT_OFFSET, NULL), | |
461 | VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_XY_SCALING, NULL), | |
462 | /* Note: The docs say this was also 105, but it was 106 in the | |
463 | * initial userland code drop. | |
464 | */ | |
465 | VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_Z_SCALING, NULL), | |
466 | ||
467 | VC4_DEFINE_PACKET(VC4_PACKET_TILE_BINNING_MODE_CONFIG, | |
468 | validate_tile_binning_config), | |
469 | ||
470 | VC4_DEFINE_PACKET(VC4_PACKET_GEM_HANDLES, validate_gem_handles), | |
471 | }; | |
472 | ||
473 | int | |
474 | vc4_validate_bin_cl(struct drm_device *dev, | |
475 | void *validated, | |
476 | void *unvalidated, | |
477 | struct vc4_exec_info *exec) | |
478 | { | |
479 | uint32_t len = exec->args->bin_cl_size; | |
480 | uint32_t dst_offset = 0; | |
481 | uint32_t src_offset = 0; | |
482 | ||
483 | while (src_offset < len) { | |
484 | void *dst_pkt = validated + dst_offset; | |
485 | void *src_pkt = unvalidated + src_offset; | |
486 | u8 cmd = *(uint8_t *)src_pkt; | |
487 | const struct cmd_info *info; | |
488 | ||
489 | if (cmd >= ARRAY_SIZE(cmd_info)) { | |
490 | DRM_ERROR("0x%08x: packet %d out of bounds\n", | |
491 | src_offset, cmd); | |
492 | return -EINVAL; | |
493 | } | |
494 | ||
495 | info = &cmd_info[cmd]; | |
496 | if (!info->name) { | |
497 | DRM_ERROR("0x%08x: packet %d invalid\n", | |
498 | src_offset, cmd); | |
499 | return -EINVAL; | |
500 | } | |
501 | ||
502 | if (src_offset + info->len > len) { | |
503 | DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x " | |
504 | "exceeds bounds (0x%08x)\n", | |
505 | src_offset, cmd, info->name, info->len, | |
506 | src_offset + len); | |
507 | return -EINVAL; | |
508 | } | |
509 | ||
510 | if (cmd != VC4_PACKET_GEM_HANDLES) | |
511 | memcpy(dst_pkt, src_pkt, info->len); | |
512 | ||
513 | if (info->func && info->func(exec, | |
514 | dst_pkt + 1, | |
515 | src_pkt + 1)) { | |
516 | DRM_ERROR("0x%08x: packet %d (%s) failed to validate\n", | |
517 | src_offset, cmd, info->name); | |
518 | return -EINVAL; | |
519 | } | |
520 | ||
521 | src_offset += info->len; | |
522 | /* GEM handle loading doesn't produce HW packets. */ | |
523 | if (cmd != VC4_PACKET_GEM_HANDLES) | |
524 | dst_offset += info->len; | |
525 | ||
526 | /* When the CL hits halt, it'll stop reading anything else. */ | |
527 | if (cmd == VC4_PACKET_HALT) | |
528 | break; | |
529 | } | |
530 | ||
531 | exec->ct0ea = exec->ct0ca + dst_offset; | |
532 | ||
533 | if (!exec->found_start_tile_binning_packet) { | |
534 | DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n"); | |
535 | return -EINVAL; | |
536 | } | |
537 | ||
538 | /* The bin CL must be ended with INCREMENT_SEMAPHORE and FLUSH. The | |
539 | * semaphore is used to trigger the render CL to start up, and the | |
540 | * FLUSH is what caps the bin lists with | |
541 | * VC4_PACKET_RETURN_FROM_SUB_LIST (so they jump back to the main | |
542 | * render CL when they get called to) and actually triggers the queued | |
543 | * semaphore increment. | |
544 | */ | |
545 | if (!exec->found_increment_semaphore_packet || !exec->found_flush) { | |
546 | DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + " | |
547 | "VC4_PACKET_FLUSH\n"); | |
548 | return -EINVAL; | |
549 | } | |
550 | ||
551 | return 0; | |
552 | } | |
553 | ||
554 | static bool | |
555 | reloc_tex(struct vc4_exec_info *exec, | |
556 | void *uniform_data_u, | |
557 | struct vc4_texture_sample_info *sample, | |
558 | uint32_t texture_handle_index) | |
559 | ||
560 | { | |
561 | struct drm_gem_cma_object *tex; | |
562 | uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]); | |
563 | uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]); | |
564 | uint32_t p2 = (sample->p_offset[2] != ~0 ? | |
565 | *(uint32_t *)(uniform_data_u + sample->p_offset[2]) : 0); | |
566 | uint32_t p3 = (sample->p_offset[3] != ~0 ? | |
567 | *(uint32_t *)(uniform_data_u + sample->p_offset[3]) : 0); | |
568 | uint32_t *validated_p0 = exec->uniforms_v + sample->p_offset[0]; | |
569 | uint32_t offset = p0 & VC4_TEX_P0_OFFSET_MASK; | |
570 | uint32_t miplevels = VC4_GET_FIELD(p0, VC4_TEX_P0_MIPLVLS); | |
571 | uint32_t width = VC4_GET_FIELD(p1, VC4_TEX_P1_WIDTH); | |
572 | uint32_t height = VC4_GET_FIELD(p1, VC4_TEX_P1_HEIGHT); | |
573 | uint32_t cpp, tiling_format, utile_w, utile_h; | |
574 | uint32_t i; | |
575 | uint32_t cube_map_stride = 0; | |
576 | enum vc4_texture_data_type type; | |
577 | ||
578 | tex = vc4_use_bo(exec, texture_handle_index); | |
579 | if (!tex) | |
580 | return false; | |
581 | ||
582 | if (sample->is_direct) { | |
583 | uint32_t remaining_size = tex->base.size - p0; | |
584 | ||
585 | if (p0 > tex->base.size - 4) { | |
586 | DRM_ERROR("UBO offset greater than UBO size\n"); | |
587 | goto fail; | |
588 | } | |
589 | if (p1 > remaining_size - 4) { | |
590 | DRM_ERROR("UBO clamp would allow reads " | |
591 | "outside of UBO\n"); | |
592 | goto fail; | |
593 | } | |
594 | *validated_p0 = tex->paddr + p0; | |
595 | return true; | |
596 | } | |
597 | ||
598 | if (width == 0) | |
599 | width = 2048; | |
600 | if (height == 0) | |
601 | height = 2048; | |
602 | ||
603 | if (p0 & VC4_TEX_P0_CMMODE_MASK) { | |
604 | if (VC4_GET_FIELD(p2, VC4_TEX_P2_PTYPE) == | |
605 | VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) | |
606 | cube_map_stride = p2 & VC4_TEX_P2_CMST_MASK; | |
607 | if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) == | |
608 | VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) { | |
609 | if (cube_map_stride) { | |
610 | DRM_ERROR("Cube map stride set twice\n"); | |
611 | goto fail; | |
612 | } | |
613 | ||
614 | cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK; | |
615 | } | |
616 | if (!cube_map_stride) { | |
617 | DRM_ERROR("Cube map stride not set\n"); | |
618 | goto fail; | |
619 | } | |
620 | } | |
621 | ||
622 | type = (VC4_GET_FIELD(p0, VC4_TEX_P0_TYPE) | | |
623 | (VC4_GET_FIELD(p1, VC4_TEX_P1_TYPE4) << 4)); | |
624 | ||
625 | switch (type) { | |
626 | case VC4_TEXTURE_TYPE_RGBA8888: | |
627 | case VC4_TEXTURE_TYPE_RGBX8888: | |
628 | case VC4_TEXTURE_TYPE_RGBA32R: | |
629 | cpp = 4; | |
630 | break; | |
631 | case VC4_TEXTURE_TYPE_RGBA4444: | |
632 | case VC4_TEXTURE_TYPE_RGBA5551: | |
633 | case VC4_TEXTURE_TYPE_RGB565: | |
634 | case VC4_TEXTURE_TYPE_LUMALPHA: | |
635 | case VC4_TEXTURE_TYPE_S16F: | |
636 | case VC4_TEXTURE_TYPE_S16: | |
637 | cpp = 2; | |
638 | break; | |
639 | case VC4_TEXTURE_TYPE_LUMINANCE: | |
640 | case VC4_TEXTURE_TYPE_ALPHA: | |
641 | case VC4_TEXTURE_TYPE_S8: | |
642 | cpp = 1; | |
643 | break; | |
644 | case VC4_TEXTURE_TYPE_ETC1: | |
645 | case VC4_TEXTURE_TYPE_BW1: | |
646 | case VC4_TEXTURE_TYPE_A4: | |
647 | case VC4_TEXTURE_TYPE_A1: | |
648 | case VC4_TEXTURE_TYPE_RGBA64: | |
649 | case VC4_TEXTURE_TYPE_YUV422R: | |
650 | default: | |
651 | DRM_ERROR("Texture format %d unsupported\n", type); | |
652 | goto fail; | |
653 | } | |
654 | utile_w = utile_width(cpp); | |
655 | utile_h = utile_height(cpp); | |
656 | ||
657 | if (type == VC4_TEXTURE_TYPE_RGBA32R) { | |
658 | tiling_format = VC4_TILING_FORMAT_LINEAR; | |
659 | } else { | |
660 | if (size_is_lt(width, height, cpp)) | |
661 | tiling_format = VC4_TILING_FORMAT_LT; | |
662 | else | |
663 | tiling_format = VC4_TILING_FORMAT_T; | |
664 | } | |
665 | ||
666 | if (!vc4_check_tex_size(exec, tex, offset + cube_map_stride * 5, | |
667 | tiling_format, width, height, cpp)) { | |
668 | goto fail; | |
669 | } | |
670 | ||
671 | /* The mipmap levels are stored before the base of the texture. Make | |
672 | * sure there is actually space in the BO. | |
673 | */ | |
674 | for (i = 1; i <= miplevels; i++) { | |
675 | uint32_t level_width = max(width >> i, 1u); | |
676 | uint32_t level_height = max(height >> i, 1u); | |
677 | uint32_t aligned_width, aligned_height; | |
678 | uint32_t level_size; | |
679 | ||
680 | /* Once the levels get small enough, they drop from T to LT. */ | |
681 | if (tiling_format == VC4_TILING_FORMAT_T && | |
682 | size_is_lt(level_width, level_height, cpp)) { | |
683 | tiling_format = VC4_TILING_FORMAT_LT; | |
684 | } | |
685 | ||
686 | switch (tiling_format) { | |
687 | case VC4_TILING_FORMAT_T: | |
688 | aligned_width = round_up(level_width, utile_w * 8); | |
689 | aligned_height = round_up(level_height, utile_h * 8); | |
690 | break; | |
691 | case VC4_TILING_FORMAT_LT: | |
692 | aligned_width = round_up(level_width, utile_w); | |
693 | aligned_height = round_up(level_height, utile_h); | |
694 | break; | |
695 | default: | |
696 | aligned_width = round_up(level_width, utile_w); | |
697 | aligned_height = level_height; | |
698 | break; | |
699 | } | |
700 | ||
701 | level_size = aligned_width * cpp * aligned_height; | |
702 | ||
703 | if (offset < level_size) { | |
704 | DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db " | |
705 | "overflowed buffer bounds (offset %d)\n", | |
706 | i, level_width, level_height, | |
707 | aligned_width, aligned_height, | |
708 | level_size, offset); | |
709 | goto fail; | |
710 | } | |
711 | ||
712 | offset -= level_size; | |
713 | } | |
714 | ||
715 | *validated_p0 = tex->paddr + p0; | |
716 | ||
717 | return true; | |
718 | fail: | |
719 | DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0); | |
720 | DRM_INFO("Texture p1 at %d: 0x%08x\n", sample->p_offset[1], p1); | |
721 | DRM_INFO("Texture p2 at %d: 0x%08x\n", sample->p_offset[2], p2); | |
722 | DRM_INFO("Texture p3 at %d: 0x%08x\n", sample->p_offset[3], p3); | |
723 | return false; | |
724 | } | |
725 | ||
726 | static int | |
727 | validate_gl_shader_rec(struct drm_device *dev, | |
728 | struct vc4_exec_info *exec, | |
729 | struct vc4_shader_state *state) | |
730 | { | |
731 | uint32_t *src_handles; | |
732 | void *pkt_u, *pkt_v; | |
733 | static const uint32_t shader_reloc_offsets[] = { | |
734 | 4, /* fs */ | |
735 | 16, /* vs */ | |
736 | 28, /* cs */ | |
737 | }; | |
738 | uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets); | |
739 | struct drm_gem_cma_object *bo[shader_reloc_count + 8]; | |
740 | uint32_t nr_attributes, nr_relocs, packet_size; | |
741 | int i; | |
742 | ||
743 | nr_attributes = state->addr & 0x7; | |
744 | if (nr_attributes == 0) | |
745 | nr_attributes = 8; | |
746 | packet_size = gl_shader_rec_size(state->addr); | |
747 | ||
748 | nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes; | |
749 | if (nr_relocs * 4 > exec->shader_rec_size) { | |
750 | DRM_ERROR("overflowed shader recs reading %d handles " | |
751 | "from %d bytes left\n", | |
752 | nr_relocs, exec->shader_rec_size); | |
753 | return -EINVAL; | |
754 | } | |
755 | src_handles = exec->shader_rec_u; | |
756 | exec->shader_rec_u += nr_relocs * 4; | |
757 | exec->shader_rec_size -= nr_relocs * 4; | |
758 | ||
759 | if (packet_size > exec->shader_rec_size) { | |
760 | DRM_ERROR("overflowed shader recs copying %db packet " | |
761 | "from %d bytes left\n", | |
762 | packet_size, exec->shader_rec_size); | |
763 | return -EINVAL; | |
764 | } | |
765 | pkt_u = exec->shader_rec_u; | |
766 | pkt_v = exec->shader_rec_v; | |
767 | memcpy(pkt_v, pkt_u, packet_size); | |
768 | exec->shader_rec_u += packet_size; | |
769 | /* Shader recs have to be aligned to 16 bytes (due to the attribute | |
770 | * flags being in the low bytes), so round the next validated shader | |
771 | * rec address up. This should be safe, since we've got so many | |
772 | * relocations in a shader rec packet. | |
773 | */ | |
774 | BUG_ON(roundup(packet_size, 16) - packet_size > nr_relocs * 4); | |
775 | exec->shader_rec_v += roundup(packet_size, 16); | |
776 | exec->shader_rec_size -= packet_size; | |
777 | ||
778 | if (!(*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD)) { | |
779 | DRM_ERROR("Multi-threaded fragment shaders not supported.\n"); | |
780 | return -EINVAL; | |
781 | } | |
782 | ||
783 | for (i = 0; i < shader_reloc_count; i++) { | |
784 | if (src_handles[i] > exec->bo_count) { | |
785 | DRM_ERROR("Shader handle %d too big\n", src_handles[i]); | |
786 | return -EINVAL; | |
787 | } | |
788 | ||
789 | bo[i] = exec->bo[src_handles[i]]; | |
790 | if (!bo[i]) | |
791 | return -EINVAL; | |
792 | } | |
793 | for (i = shader_reloc_count; i < nr_relocs; i++) { | |
794 | bo[i] = vc4_use_bo(exec, src_handles[i]); | |
795 | if (!bo[i]) | |
796 | return -EINVAL; | |
797 | } | |
798 | ||
799 | for (i = 0; i < shader_reloc_count; i++) { | |
800 | struct vc4_validated_shader_info *validated_shader; | |
801 | uint32_t o = shader_reloc_offsets[i]; | |
802 | uint32_t src_offset = *(uint32_t *)(pkt_u + o); | |
803 | uint32_t *texture_handles_u; | |
804 | void *uniform_data_u; | |
805 | uint32_t tex; | |
806 | ||
807 | *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset; | |
808 | ||
809 | if (src_offset != 0) { | |
810 | DRM_ERROR("Shaders must be at offset 0 of " | |
811 | "the BO.\n"); | |
812 | return -EINVAL; | |
813 | } | |
814 | ||
815 | validated_shader = to_vc4_bo(&bo[i]->base)->validated_shader; | |
816 | if (!validated_shader) | |
817 | return -EINVAL; | |
818 | ||
819 | if (validated_shader->uniforms_src_size > | |
820 | exec->uniforms_size) { | |
821 | DRM_ERROR("Uniforms src buffer overflow\n"); | |
822 | return -EINVAL; | |
823 | } | |
824 | ||
825 | texture_handles_u = exec->uniforms_u; | |
826 | uniform_data_u = (texture_handles_u + | |
827 | validated_shader->num_texture_samples); | |
828 | ||
829 | memcpy(exec->uniforms_v, uniform_data_u, | |
830 | validated_shader->uniforms_size); | |
831 | ||
832 | for (tex = 0; | |
833 | tex < validated_shader->num_texture_samples; | |
834 | tex++) { | |
835 | if (!reloc_tex(exec, | |
836 | uniform_data_u, | |
837 | &validated_shader->texture_samples[tex], | |
838 | texture_handles_u[tex])) { | |
839 | return -EINVAL; | |
840 | } | |
841 | } | |
842 | ||
843 | *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p; | |
844 | ||
845 | exec->uniforms_u += validated_shader->uniforms_src_size; | |
846 | exec->uniforms_v += validated_shader->uniforms_size; | |
847 | exec->uniforms_p += validated_shader->uniforms_size; | |
848 | } | |
849 | ||
850 | for (i = 0; i < nr_attributes; i++) { | |
851 | struct drm_gem_cma_object *vbo = | |
852 | bo[ARRAY_SIZE(shader_reloc_offsets) + i]; | |
853 | uint32_t o = 36 + i * 8; | |
854 | uint32_t offset = *(uint32_t *)(pkt_u + o + 0); | |
855 | uint32_t attr_size = *(uint8_t *)(pkt_u + o + 4) + 1; | |
856 | uint32_t stride = *(uint8_t *)(pkt_u + o + 5); | |
857 | uint32_t max_index; | |
858 | ||
859 | if (state->addr & 0x8) | |
860 | stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff; | |
861 | ||
862 | if (vbo->base.size < offset || | |
863 | vbo->base.size - offset < attr_size) { | |
c671e1e3 | 864 | DRM_ERROR("BO offset overflow (%d + %d > %zu)\n", |
d5b1a78a EA |
865 | offset, attr_size, vbo->base.size); |
866 | return -EINVAL; | |
867 | } | |
868 | ||
869 | if (stride != 0) { | |
870 | max_index = ((vbo->base.size - offset - attr_size) / | |
871 | stride); | |
872 | if (state->max_index > max_index) { | |
873 | DRM_ERROR("primitives use index %d out of " | |
874 | "supplied %d\n", | |
875 | state->max_index, max_index); | |
876 | return -EINVAL; | |
877 | } | |
878 | } | |
879 | ||
880 | *(uint32_t *)(pkt_v + o) = vbo->paddr + offset; | |
881 | } | |
882 | ||
883 | return 0; | |
884 | } | |
885 | ||
886 | int | |
887 | vc4_validate_shader_recs(struct drm_device *dev, | |
888 | struct vc4_exec_info *exec) | |
889 | { | |
890 | uint32_t i; | |
891 | int ret = 0; | |
892 | ||
893 | for (i = 0; i < exec->shader_state_count; i++) { | |
894 | ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]); | |
895 | if (ret) | |
896 | return ret; | |
897 | } | |
898 | ||
899 | return ret; | |
900 | } |