Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
22 | * DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: | |
25 | * Jerome Glisse <glisse@freedesktop.org> | |
26 | */ | |
27 | #include <linux/list_sort.h> | |
28 | #include <drm/drmP.h> | |
29 | #include <drm/amdgpu_drm.h> | |
30 | #include "amdgpu.h" | |
31 | #include "amdgpu_trace.h" | |
32 | ||
33 | #define AMDGPU_CS_MAX_PRIORITY 32u | |
34 | #define AMDGPU_CS_NUM_BUCKETS (AMDGPU_CS_MAX_PRIORITY + 1) | |
35 | ||
36 | /* This is based on the bucket sort with O(n) time complexity. | |
37 | * An item with priority "i" is added to bucket[i]. The lists are then | |
38 | * concatenated in descending order. | |
39 | */ | |
40 | struct amdgpu_cs_buckets { | |
41 | struct list_head bucket[AMDGPU_CS_NUM_BUCKETS]; | |
42 | }; | |
43 | ||
44 | static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b) | |
45 | { | |
46 | unsigned i; | |
47 | ||
48 | for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) | |
49 | INIT_LIST_HEAD(&b->bucket[i]); | |
50 | } | |
51 | ||
52 | static void amdgpu_cs_buckets_add(struct amdgpu_cs_buckets *b, | |
53 | struct list_head *item, unsigned priority) | |
54 | { | |
55 | /* Since buffers which appear sooner in the relocation list are | |
56 | * likely to be used more often than buffers which appear later | |
57 | * in the list, the sort mustn't change the ordering of buffers | |
58 | * with the same priority, i.e. it must be stable. | |
59 | */ | |
60 | list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]); | |
61 | } | |
62 | ||
63 | static void amdgpu_cs_buckets_get_list(struct amdgpu_cs_buckets *b, | |
64 | struct list_head *out_list) | |
65 | { | |
66 | unsigned i; | |
67 | ||
68 | /* Connect the sorted buckets in the output list. */ | |
69 | for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) { | |
70 | list_splice(&b->bucket[i], out_list); | |
71 | } | |
72 | } | |
73 | ||
74 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | |
75 | u32 ip_instance, u32 ring, | |
76 | struct amdgpu_ring **out_ring) | |
77 | { | |
78 | /* Right now all IPs have only one instance - multiple rings. */ | |
79 | if (ip_instance != 0) { | |
80 | DRM_ERROR("invalid ip instance: %d\n", ip_instance); | |
81 | return -EINVAL; | |
82 | } | |
83 | ||
84 | switch (ip_type) { | |
85 | default: | |
86 | DRM_ERROR("unknown ip type: %d\n", ip_type); | |
87 | return -EINVAL; | |
88 | case AMDGPU_HW_IP_GFX: | |
89 | if (ring < adev->gfx.num_gfx_rings) { | |
90 | *out_ring = &adev->gfx.gfx_ring[ring]; | |
91 | } else { | |
92 | DRM_ERROR("only %d gfx rings are supported now\n", | |
93 | adev->gfx.num_gfx_rings); | |
94 | return -EINVAL; | |
95 | } | |
96 | break; | |
97 | case AMDGPU_HW_IP_COMPUTE: | |
98 | if (ring < adev->gfx.num_compute_rings) { | |
99 | *out_ring = &adev->gfx.compute_ring[ring]; | |
100 | } else { | |
101 | DRM_ERROR("only %d compute rings are supported now\n", | |
102 | adev->gfx.num_compute_rings); | |
103 | return -EINVAL; | |
104 | } | |
105 | break; | |
106 | case AMDGPU_HW_IP_DMA: | |
c113ea1c AD |
107 | if (ring < adev->sdma.num_instances) { |
108 | *out_ring = &adev->sdma.instance[ring].ring; | |
d38ceaf9 | 109 | } else { |
c113ea1c AD |
110 | DRM_ERROR("only %d SDMA rings are supported\n", |
111 | adev->sdma.num_instances); | |
d38ceaf9 AD |
112 | return -EINVAL; |
113 | } | |
114 | break; | |
115 | case AMDGPU_HW_IP_UVD: | |
116 | *out_ring = &adev->uvd.ring; | |
117 | break; | |
118 | case AMDGPU_HW_IP_VCE: | |
119 | if (ring < 2){ | |
120 | *out_ring = &adev->vce.ring[ring]; | |
121 | } else { | |
122 | DRM_ERROR("only two VCE rings are supported\n"); | |
123 | return -EINVAL; | |
124 | } | |
125 | break; | |
126 | } | |
127 | return 0; | |
128 | } | |
129 | ||
049fc527 CZ |
130 | struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, |
131 | struct drm_file *filp, | |
132 | struct amdgpu_ctx *ctx, | |
133 | struct amdgpu_ib *ibs, | |
134 | uint32_t num_ibs) | |
135 | { | |
136 | struct amdgpu_cs_parser *parser; | |
137 | int i; | |
138 | ||
139 | parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL); | |
140 | if (!parser) | |
141 | return NULL; | |
142 | ||
143 | parser->adev = adev; | |
144 | parser->filp = filp; | |
145 | parser->ctx = ctx; | |
146 | parser->ibs = ibs; | |
147 | parser->num_ibs = num_ibs; | |
049fc527 CZ |
148 | for (i = 0; i < num_ibs; i++) |
149 | ibs[i].ctx = ctx; | |
150 | ||
151 | return parser; | |
152 | } | |
153 | ||
d38ceaf9 AD |
154 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) |
155 | { | |
156 | union drm_amdgpu_cs *cs = data; | |
157 | uint64_t *chunk_array_user; | |
1d263474 | 158 | uint64_t *chunk_array; |
d38ceaf9 AD |
159 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
160 | unsigned size, i; | |
1d263474 | 161 | int ret; |
d38ceaf9 | 162 | |
1d263474 DC |
163 | if (cs->in.num_chunks == 0) |
164 | return 0; | |
165 | ||
166 | chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); | |
167 | if (!chunk_array) | |
168 | return -ENOMEM; | |
d38ceaf9 | 169 | |
3cb485f3 CK |
170 | p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); |
171 | if (!p->ctx) { | |
1d263474 DC |
172 | ret = -EINVAL; |
173 | goto free_chunk; | |
3cb485f3 | 174 | } |
1d263474 | 175 | |
a3348bb8 | 176 | p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); |
d38ceaf9 AD |
177 | |
178 | /* get chunks */ | |
179 | INIT_LIST_HEAD(&p->validated); | |
e60b344f | 180 | chunk_array_user = (uint64_t __user *)(cs->in.chunks); |
d38ceaf9 AD |
181 | if (copy_from_user(chunk_array, chunk_array_user, |
182 | sizeof(uint64_t)*cs->in.num_chunks)) { | |
1d263474 DC |
183 | ret = -EFAULT; |
184 | goto put_bo_list; | |
d38ceaf9 AD |
185 | } |
186 | ||
187 | p->nchunks = cs->in.num_chunks; | |
e60b344f | 188 | p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), |
d38ceaf9 | 189 | GFP_KERNEL); |
1d263474 DC |
190 | if (!p->chunks) { |
191 | ret = -ENOMEM; | |
192 | goto put_bo_list; | |
d38ceaf9 AD |
193 | } |
194 | ||
195 | for (i = 0; i < p->nchunks; i++) { | |
196 | struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; | |
197 | struct drm_amdgpu_cs_chunk user_chunk; | |
198 | uint32_t __user *cdata; | |
199 | ||
e60b344f | 200 | chunk_ptr = (void __user *)chunk_array[i]; |
d38ceaf9 AD |
201 | if (copy_from_user(&user_chunk, chunk_ptr, |
202 | sizeof(struct drm_amdgpu_cs_chunk))) { | |
1d263474 DC |
203 | ret = -EFAULT; |
204 | i--; | |
205 | goto free_partial_kdata; | |
d38ceaf9 AD |
206 | } |
207 | p->chunks[i].chunk_id = user_chunk.chunk_id; | |
208 | p->chunks[i].length_dw = user_chunk.length_dw; | |
d38ceaf9 AD |
209 | |
210 | size = p->chunks[i].length_dw; | |
e60b344f | 211 | cdata = (void __user *)user_chunk.chunk_data; |
d38ceaf9 AD |
212 | p->chunks[i].user_ptr = cdata; |
213 | ||
214 | p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); | |
215 | if (p->chunks[i].kdata == NULL) { | |
1d263474 DC |
216 | ret = -ENOMEM; |
217 | i--; | |
218 | goto free_partial_kdata; | |
d38ceaf9 AD |
219 | } |
220 | size *= sizeof(uint32_t); | |
221 | if (copy_from_user(p->chunks[i].kdata, cdata, size)) { | |
1d263474 DC |
222 | ret = -EFAULT; |
223 | goto free_partial_kdata; | |
d38ceaf9 AD |
224 | } |
225 | ||
9a5e8fb1 CK |
226 | switch (p->chunks[i].chunk_id) { |
227 | case AMDGPU_CHUNK_ID_IB: | |
228 | p->num_ibs++; | |
229 | break; | |
230 | ||
231 | case AMDGPU_CHUNK_ID_FENCE: | |
d38ceaf9 AD |
232 | size = sizeof(struct drm_amdgpu_cs_chunk_fence); |
233 | if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { | |
234 | uint32_t handle; | |
235 | struct drm_gem_object *gobj; | |
236 | struct drm_amdgpu_cs_chunk_fence *fence_data; | |
237 | ||
238 | fence_data = (void *)p->chunks[i].kdata; | |
239 | handle = fence_data->handle; | |
240 | gobj = drm_gem_object_lookup(p->adev->ddev, | |
241 | p->filp, handle); | |
242 | if (gobj == NULL) { | |
1d263474 DC |
243 | ret = -EINVAL; |
244 | goto free_partial_kdata; | |
d38ceaf9 AD |
245 | } |
246 | ||
247 | p->uf.bo = gem_to_amdgpu_bo(gobj); | |
248 | p->uf.offset = fence_data->offset; | |
249 | } else { | |
1d263474 DC |
250 | ret = -EINVAL; |
251 | goto free_partial_kdata; | |
d38ceaf9 | 252 | } |
9a5e8fb1 CK |
253 | break; |
254 | ||
2b48d323 CK |
255 | case AMDGPU_CHUNK_ID_DEPENDENCIES: |
256 | break; | |
257 | ||
9a5e8fb1 | 258 | default: |
1d263474 DC |
259 | ret = -EINVAL; |
260 | goto free_partial_kdata; | |
d38ceaf9 AD |
261 | } |
262 | } | |
263 | ||
e60b344f | 264 | |
b203dd95 | 265 | p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); |
1d263474 DC |
266 | if (!p->ibs) { |
267 | ret = -ENOMEM; | |
268 | goto free_all_kdata; | |
269 | } | |
d38ceaf9 | 270 | |
d38ceaf9 | 271 | kfree(chunk_array); |
1d263474 DC |
272 | return 0; |
273 | ||
274 | free_all_kdata: | |
275 | i = p->nchunks - 1; | |
276 | free_partial_kdata: | |
277 | for (; i >= 0; i--) | |
278 | drm_free_large(p->chunks[i].kdata); | |
279 | kfree(p->chunks); | |
280 | put_bo_list: | |
281 | if (p->bo_list) | |
282 | amdgpu_bo_list_put(p->bo_list); | |
283 | amdgpu_ctx_put(p->ctx); | |
284 | free_chunk: | |
285 | kfree(chunk_array); | |
286 | ||
287 | return ret; | |
d38ceaf9 AD |
288 | } |
289 | ||
290 | /* Returns how many bytes TTM can move per IB. | |
291 | */ | |
292 | static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) | |
293 | { | |
294 | u64 real_vram_size = adev->mc.real_vram_size; | |
295 | u64 vram_usage = atomic64_read(&adev->vram_usage); | |
296 | ||
297 | /* This function is based on the current VRAM usage. | |
298 | * | |
299 | * - If all of VRAM is free, allow relocating the number of bytes that | |
300 | * is equal to 1/4 of the size of VRAM for this IB. | |
301 | ||
302 | * - If more than one half of VRAM is occupied, only allow relocating | |
303 | * 1 MB of data for this IB. | |
304 | * | |
305 | * - From 0 to one half of used VRAM, the threshold decreases | |
306 | * linearly. | |
307 | * __________________ | |
308 | * 1/4 of -|\ | | |
309 | * VRAM | \ | | |
310 | * | \ | | |
311 | * | \ | | |
312 | * | \ | | |
313 | * | \ | | |
314 | * | \ | | |
315 | * | \________|1 MB | |
316 | * |----------------| | |
317 | * VRAM 0 % 100 % | |
318 | * used used | |
319 | * | |
320 | * Note: It's a threshold, not a limit. The threshold must be crossed | |
321 | * for buffer relocations to stop, so any buffer of an arbitrary size | |
322 | * can be moved as long as the threshold isn't crossed before | |
323 | * the relocation takes place. We don't want to disable buffer | |
324 | * relocations completely. | |
325 | * | |
326 | * The idea is that buffers should be placed in VRAM at creation time | |
327 | * and TTM should only do a minimum number of relocations during | |
328 | * command submission. In practice, you need to submit at least | |
329 | * a dozen IBs to move all buffers to VRAM if they are in GTT. | |
330 | * | |
331 | * Also, things can get pretty crazy under memory pressure and actual | |
332 | * VRAM usage can change a lot, so playing safe even at 50% does | |
333 | * consistently increase performance. | |
334 | */ | |
335 | ||
336 | u64 half_vram = real_vram_size >> 1; | |
337 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; | |
338 | u64 bytes_moved_threshold = half_free_vram >> 1; | |
339 | return max(bytes_moved_threshold, 1024*1024ull); | |
340 | } | |
341 | ||
a5b75058 CK |
342 | int amdgpu_cs_list_validate(struct amdgpu_device *adev, |
343 | struct amdgpu_vm *vm, | |
344 | struct list_head *validated) | |
d38ceaf9 | 345 | { |
d38ceaf9 | 346 | struct amdgpu_bo_list_entry *lobj; |
d38ceaf9 AD |
347 | struct amdgpu_bo *bo; |
348 | u64 bytes_moved = 0, initial_bytes_moved; | |
349 | u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); | |
350 | int r; | |
351 | ||
a5b75058 | 352 | list_for_each_entry(lobj, validated, tv.head) { |
d38ceaf9 AD |
353 | bo = lobj->robj; |
354 | if (!bo->pin_count) { | |
355 | u32 domain = lobj->prefered_domains; | |
356 | u32 current_domain = | |
357 | amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); | |
358 | ||
359 | /* Check if this buffer will be moved and don't move it | |
360 | * if we have moved too many buffers for this IB already. | |
361 | * | |
362 | * Note that this allows moving at least one buffer of | |
363 | * any size, because it doesn't take the current "bo" | |
364 | * into account. We don't want to disallow buffer moves | |
365 | * completely. | |
366 | */ | |
270e869d | 367 | if ((lobj->allowed_domains & current_domain) != 0 && |
d38ceaf9 AD |
368 | (domain & current_domain) == 0 && /* will be moved */ |
369 | bytes_moved > bytes_moved_threshold) { | |
370 | /* don't move it */ | |
371 | domain = current_domain; | |
372 | } | |
373 | ||
374 | retry: | |
375 | amdgpu_ttm_placement_from_domain(bo, domain); | |
376 | initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); | |
377 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | |
378 | bytes_moved += atomic64_read(&adev->num_bytes_moved) - | |
379 | initial_bytes_moved; | |
380 | ||
381 | if (unlikely(r)) { | |
382 | if (r != -ERESTARTSYS && domain != lobj->allowed_domains) { | |
383 | domain = lobj->allowed_domains; | |
384 | goto retry; | |
385 | } | |
d38ceaf9 AD |
386 | return r; |
387 | } | |
388 | } | |
389 | lobj->bo_va = amdgpu_vm_bo_find(vm, bo); | |
390 | } | |
391 | return 0; | |
392 | } | |
393 | ||
394 | static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) | |
395 | { | |
396 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | |
397 | struct amdgpu_cs_buckets buckets; | |
a5b75058 | 398 | struct list_head duplicates; |
840d5144 | 399 | bool need_mmap_lock = false; |
d38ceaf9 AD |
400 | int i, r; |
401 | ||
840d5144 | 402 | if (p->bo_list) { |
403 | need_mmap_lock = p->bo_list->has_userptr; | |
404 | amdgpu_cs_buckets_init(&buckets); | |
405 | for (i = 0; i < p->bo_list->num_entries; i++) | |
406 | amdgpu_cs_buckets_add(&buckets, &p->bo_list->array[i].tv.head, | |
407 | p->bo_list->array[i].priority); | |
d38ceaf9 | 408 | |
840d5144 | 409 | amdgpu_cs_buckets_get_list(&buckets, &p->validated); |
410 | } | |
d38ceaf9 | 411 | |
d38ceaf9 AD |
412 | p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, |
413 | &p->validated); | |
414 | ||
d38ceaf9 AD |
415 | if (need_mmap_lock) |
416 | down_read(¤t->mm->mmap_sem); | |
417 | ||
a5b75058 CK |
418 | INIT_LIST_HEAD(&duplicates); |
419 | r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); | |
420 | if (unlikely(r != 0)) | |
421 | goto error_reserve; | |
422 | ||
423 | r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); | |
424 | if (r) | |
425 | goto error_validate; | |
426 | ||
427 | r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates); | |
428 | ||
429 | error_validate: | |
430 | if (r) | |
431 | ttm_eu_backoff_reservation(&p->ticket, &p->validated); | |
d38ceaf9 | 432 | |
a5b75058 | 433 | error_reserve: |
d38ceaf9 AD |
434 | if (need_mmap_lock) |
435 | up_read(¤t->mm->mmap_sem); | |
436 | ||
437 | return r; | |
438 | } | |
439 | ||
440 | static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) | |
441 | { | |
442 | struct amdgpu_bo_list_entry *e; | |
443 | int r; | |
444 | ||
445 | list_for_each_entry(e, &p->validated, tv.head) { | |
446 | struct reservation_object *resv = e->robj->tbo.resv; | |
447 | r = amdgpu_sync_resv(p->adev, &p->ibs[0].sync, resv, p->filp); | |
448 | ||
449 | if (r) | |
450 | return r; | |
451 | } | |
452 | return 0; | |
453 | } | |
454 | ||
455 | static int cmp_size_smaller_first(void *priv, struct list_head *a, | |
456 | struct list_head *b) | |
457 | { | |
458 | struct amdgpu_bo_list_entry *la = list_entry(a, struct amdgpu_bo_list_entry, tv.head); | |
459 | struct amdgpu_bo_list_entry *lb = list_entry(b, struct amdgpu_bo_list_entry, tv.head); | |
460 | ||
461 | /* Sort A before B if A is smaller. */ | |
462 | return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; | |
463 | } | |
464 | ||
049fc527 CZ |
465 | static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) |
466 | { | |
d38ceaf9 AD |
467 | if (!error) { |
468 | /* Sort the buffer list from the smallest to largest buffer, | |
469 | * which affects the order of buffers in the LRU list. | |
470 | * This assures that the smallest buffers are added first | |
471 | * to the LRU list, so they are likely to be later evicted | |
472 | * first, instead of large buffers whose eviction is more | |
473 | * expensive. | |
474 | * | |
475 | * This slightly lowers the number of bytes moved by TTM | |
476 | * per frame under memory pressure. | |
477 | */ | |
478 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); | |
479 | ||
480 | ttm_eu_fence_buffer_objects(&parser->ticket, | |
481 | &parser->validated, | |
482 | &parser->ibs[parser->num_ibs-1].fence->base); | |
483 | } else if (backoff) { | |
484 | ttm_eu_backoff_reservation(&parser->ticket, | |
485 | &parser->validated); | |
486 | } | |
049fc527 | 487 | } |
d38ceaf9 | 488 | |
049fc527 CZ |
489 | static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) |
490 | { | |
491 | unsigned i; | |
3cb485f3 CK |
492 | if (parser->ctx) |
493 | amdgpu_ctx_put(parser->ctx); | |
a3348bb8 CZ |
494 | if (parser->bo_list) |
495 | amdgpu_bo_list_put(parser->bo_list); | |
496 | ||
d38ceaf9 AD |
497 | drm_free_large(parser->vm_bos); |
498 | for (i = 0; i < parser->nchunks; i++) | |
499 | drm_free_large(parser->chunks[i].kdata); | |
500 | kfree(parser->chunks); | |
049fc527 | 501 | if (!amdgpu_enable_scheduler) |
bb977d37 CZ |
502 | { |
503 | if (parser->ibs) | |
504 | for (i = 0; i < parser->num_ibs; i++) | |
505 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); | |
506 | kfree(parser->ibs); | |
507 | if (parser->uf.bo) | |
508 | drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); | |
509 | } | |
510 | ||
511 | kfree(parser); | |
d38ceaf9 AD |
512 | } |
513 | ||
351dba73 CK |
514 | /** |
515 | * cs_parser_fini() - clean parser states | |
516 | * @parser: parser structure holding parsing context. | |
517 | * @error: error number | |
518 | * | |
519 | * If error is set than unvalidate buffer, otherwise just free memory | |
520 | * used by parsing context. | |
521 | **/ | |
522 | static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) | |
523 | { | |
524 | amdgpu_cs_parser_fini_early(parser, error, backoff); | |
525 | amdgpu_cs_parser_fini_late(parser); | |
526 | } | |
527 | ||
d38ceaf9 AD |
528 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, |
529 | struct amdgpu_vm *vm) | |
530 | { | |
531 | struct amdgpu_device *adev = p->adev; | |
532 | struct amdgpu_bo_va *bo_va; | |
533 | struct amdgpu_bo *bo; | |
534 | int i, r; | |
535 | ||
536 | r = amdgpu_vm_update_page_directory(adev, vm); | |
537 | if (r) | |
538 | return r; | |
539 | ||
05906dec BN |
540 | r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence); |
541 | if (r) | |
542 | return r; | |
543 | ||
d38ceaf9 AD |
544 | r = amdgpu_vm_clear_freed(adev, vm); |
545 | if (r) | |
546 | return r; | |
547 | ||
548 | if (p->bo_list) { | |
549 | for (i = 0; i < p->bo_list->num_entries; i++) { | |
91e1a520 CK |
550 | struct fence *f; |
551 | ||
d38ceaf9 AD |
552 | /* ignore duplicates */ |
553 | bo = p->bo_list->array[i].robj; | |
554 | if (!bo) | |
555 | continue; | |
556 | ||
557 | bo_va = p->bo_list->array[i].bo_va; | |
558 | if (bo_va == NULL) | |
559 | continue; | |
560 | ||
561 | r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem); | |
562 | if (r) | |
563 | return r; | |
564 | ||
bb1e38a4 | 565 | f = bo_va->last_pt_update; |
91e1a520 CK |
566 | r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f); |
567 | if (r) | |
568 | return r; | |
d38ceaf9 | 569 | } |
b495bd3a CK |
570 | |
571 | } | |
572 | ||
573 | r = amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync); | |
574 | ||
575 | if (amdgpu_vm_debug && p->bo_list) { | |
576 | /* Invalidate all BOs to test for userspace bugs */ | |
577 | for (i = 0; i < p->bo_list->num_entries; i++) { | |
578 | /* ignore duplicates */ | |
579 | bo = p->bo_list->array[i].robj; | |
580 | if (!bo) | |
581 | continue; | |
582 | ||
583 | amdgpu_vm_bo_invalidate(adev, bo); | |
584 | } | |
d38ceaf9 AD |
585 | } |
586 | ||
b495bd3a | 587 | return r; |
d38ceaf9 AD |
588 | } |
589 | ||
590 | static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, | |
591 | struct amdgpu_cs_parser *parser) | |
592 | { | |
593 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | |
594 | struct amdgpu_vm *vm = &fpriv->vm; | |
595 | struct amdgpu_ring *ring; | |
596 | int i, r; | |
597 | ||
598 | if (parser->num_ibs == 0) | |
599 | return 0; | |
600 | ||
601 | /* Only for UVD/VCE VM emulation */ | |
602 | for (i = 0; i < parser->num_ibs; i++) { | |
603 | ring = parser->ibs[i].ring; | |
604 | if (ring->funcs->parse_cs) { | |
605 | r = amdgpu_ring_parse_cs(ring, parser, i); | |
606 | if (r) | |
607 | return r; | |
608 | } | |
609 | } | |
610 | ||
611 | mutex_lock(&vm->mutex); | |
612 | r = amdgpu_bo_vm_update_pte(parser, vm); | |
613 | if (r) { | |
614 | goto out; | |
615 | } | |
616 | amdgpu_cs_sync_rings(parser); | |
049fc527 CZ |
617 | if (!amdgpu_enable_scheduler) |
618 | r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, | |
619 | parser->filp); | |
d38ceaf9 AD |
620 | |
621 | out: | |
622 | mutex_unlock(&vm->mutex); | |
623 | return r; | |
624 | } | |
625 | ||
626 | static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r) | |
627 | { | |
628 | if (r == -EDEADLK) { | |
629 | r = amdgpu_gpu_reset(adev); | |
630 | if (!r) | |
631 | r = -EAGAIN; | |
632 | } | |
633 | return r; | |
634 | } | |
635 | ||
636 | static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |
637 | struct amdgpu_cs_parser *parser) | |
638 | { | |
639 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | |
640 | struct amdgpu_vm *vm = &fpriv->vm; | |
641 | int i, j; | |
642 | int r; | |
643 | ||
644 | for (i = 0, j = 0; i < parser->nchunks && j < parser->num_ibs; i++) { | |
645 | struct amdgpu_cs_chunk *chunk; | |
646 | struct amdgpu_ib *ib; | |
647 | struct drm_amdgpu_cs_chunk_ib *chunk_ib; | |
d38ceaf9 | 648 | struct amdgpu_ring *ring; |
d38ceaf9 AD |
649 | |
650 | chunk = &parser->chunks[i]; | |
651 | ib = &parser->ibs[j]; | |
652 | chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; | |
653 | ||
654 | if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) | |
655 | continue; | |
656 | ||
d38ceaf9 AD |
657 | r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type, |
658 | chunk_ib->ip_instance, chunk_ib->ring, | |
659 | &ring); | |
3ccec53c | 660 | if (r) |
d38ceaf9 | 661 | return r; |
d38ceaf9 AD |
662 | |
663 | if (ring->funcs->parse_cs) { | |
4802ce11 | 664 | struct amdgpu_bo_va_mapping *m; |
3ccec53c | 665 | struct amdgpu_bo *aobj = NULL; |
4802ce11 CK |
666 | uint64_t offset; |
667 | uint8_t *kptr; | |
3ccec53c | 668 | |
4802ce11 CK |
669 | m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start, |
670 | &aobj); | |
3ccec53c MO |
671 | if (!aobj) { |
672 | DRM_ERROR("IB va_start is invalid\n"); | |
673 | return -EINVAL; | |
d38ceaf9 AD |
674 | } |
675 | ||
4802ce11 CK |
676 | if ((chunk_ib->va_start + chunk_ib->ib_bytes) > |
677 | (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { | |
678 | DRM_ERROR("IB va_start+ib_bytes is invalid\n"); | |
679 | return -EINVAL; | |
680 | } | |
681 | ||
3ccec53c | 682 | /* the IB should be reserved at this point */ |
4802ce11 | 683 | r = amdgpu_bo_kmap(aobj, (void **)&kptr); |
d38ceaf9 | 684 | if (r) { |
d38ceaf9 AD |
685 | return r; |
686 | } | |
687 | ||
4802ce11 CK |
688 | offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; |
689 | kptr += chunk_ib->va_start - offset; | |
690 | ||
d38ceaf9 AD |
691 | r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib); |
692 | if (r) { | |
693 | DRM_ERROR("Failed to get ib !\n"); | |
d38ceaf9 AD |
694 | return r; |
695 | } | |
696 | ||
697 | memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); | |
698 | amdgpu_bo_kunmap(aobj); | |
d38ceaf9 AD |
699 | } else { |
700 | r = amdgpu_ib_get(ring, vm, 0, ib); | |
701 | if (r) { | |
702 | DRM_ERROR("Failed to get ib !\n"); | |
d38ceaf9 AD |
703 | return r; |
704 | } | |
705 | ||
706 | ib->gpu_addr = chunk_ib->va_start; | |
707 | } | |
d38ceaf9 | 708 | |
3ccec53c | 709 | ib->length_dw = chunk_ib->ib_bytes / 4; |
de807f81 | 710 | ib->flags = chunk_ib->flags; |
3cb485f3 | 711 | ib->ctx = parser->ctx; |
d38ceaf9 AD |
712 | j++; |
713 | } | |
714 | ||
715 | if (!parser->num_ibs) | |
716 | return 0; | |
717 | ||
718 | /* add GDS resources to first IB */ | |
719 | if (parser->bo_list) { | |
720 | struct amdgpu_bo *gds = parser->bo_list->gds_obj; | |
721 | struct amdgpu_bo *gws = parser->bo_list->gws_obj; | |
722 | struct amdgpu_bo *oa = parser->bo_list->oa_obj; | |
723 | struct amdgpu_ib *ib = &parser->ibs[0]; | |
724 | ||
725 | if (gds) { | |
726 | ib->gds_base = amdgpu_bo_gpu_offset(gds); | |
727 | ib->gds_size = amdgpu_bo_size(gds); | |
728 | } | |
729 | if (gws) { | |
730 | ib->gws_base = amdgpu_bo_gpu_offset(gws); | |
731 | ib->gws_size = amdgpu_bo_size(gws); | |
732 | } | |
733 | if (oa) { | |
734 | ib->oa_base = amdgpu_bo_gpu_offset(oa); | |
735 | ib->oa_size = amdgpu_bo_size(oa); | |
736 | } | |
737 | } | |
d38ceaf9 AD |
738 | /* wrap the last IB with user fence */ |
739 | if (parser->uf.bo) { | |
740 | struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1]; | |
741 | ||
742 | /* UVD & VCE fw doesn't support user fences */ | |
743 | if (ib->ring->type == AMDGPU_RING_TYPE_UVD || | |
744 | ib->ring->type == AMDGPU_RING_TYPE_VCE) | |
745 | return -EINVAL; | |
746 | ||
747 | ib->user = &parser->uf; | |
748 | } | |
749 | ||
750 | return 0; | |
751 | } | |
752 | ||
2b48d323 CK |
753 | static int amdgpu_cs_dependencies(struct amdgpu_device *adev, |
754 | struct amdgpu_cs_parser *p) | |
755 | { | |
76a1ea61 | 756 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
2b48d323 CK |
757 | struct amdgpu_ib *ib; |
758 | int i, j, r; | |
759 | ||
760 | if (!p->num_ibs) | |
761 | return 0; | |
762 | ||
763 | /* Add dependencies to first IB */ | |
764 | ib = &p->ibs[0]; | |
765 | for (i = 0; i < p->nchunks; ++i) { | |
766 | struct drm_amdgpu_cs_chunk_dep *deps; | |
767 | struct amdgpu_cs_chunk *chunk; | |
768 | unsigned num_deps; | |
769 | ||
770 | chunk = &p->chunks[i]; | |
771 | ||
772 | if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES) | |
773 | continue; | |
774 | ||
775 | deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; | |
776 | num_deps = chunk->length_dw * 4 / | |
777 | sizeof(struct drm_amdgpu_cs_chunk_dep); | |
778 | ||
779 | for (j = 0; j < num_deps; ++j) { | |
2b48d323 | 780 | struct amdgpu_ring *ring; |
76a1ea61 | 781 | struct amdgpu_ctx *ctx; |
21c16bf6 | 782 | struct fence *fence; |
2b48d323 CK |
783 | |
784 | r = amdgpu_cs_get_ring(adev, deps[j].ip_type, | |
785 | deps[j].ip_instance, | |
786 | deps[j].ring, &ring); | |
787 | if (r) | |
788 | return r; | |
789 | ||
76a1ea61 CK |
790 | ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id); |
791 | if (ctx == NULL) | |
792 | return -EINVAL; | |
793 | ||
21c16bf6 CK |
794 | fence = amdgpu_ctx_get_fence(ctx, ring, |
795 | deps[j].handle); | |
796 | if (IS_ERR(fence)) { | |
797 | r = PTR_ERR(fence); | |
76a1ea61 | 798 | amdgpu_ctx_put(ctx); |
2b48d323 | 799 | return r; |
91e1a520 | 800 | |
21c16bf6 CK |
801 | } else if (fence) { |
802 | r = amdgpu_sync_fence(adev, &ib->sync, fence); | |
803 | fence_put(fence); | |
804 | amdgpu_ctx_put(ctx); | |
805 | if (r) | |
806 | return r; | |
807 | } | |
2b48d323 CK |
808 | } |
809 | } | |
810 | ||
811 | return 0; | |
812 | } | |
813 | ||
4c7eb91c | 814 | static int amdgpu_cs_free_job(struct amdgpu_job *job) |
bb977d37 CZ |
815 | { |
816 | int i; | |
4c7eb91c JZ |
817 | if (job->ibs) |
818 | for (i = 0; i < job->num_ibs; i++) | |
819 | amdgpu_ib_free(job->adev, &job->ibs[i]); | |
820 | kfree(job->ibs); | |
821 | if (job->uf.bo) | |
822 | drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); | |
bb977d37 CZ |
823 | return 0; |
824 | } | |
825 | ||
049fc527 CZ |
826 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
827 | { | |
828 | struct amdgpu_device *adev = dev->dev_private; | |
829 | union drm_amdgpu_cs *cs = data; | |
830 | struct amdgpu_cs_parser *parser; | |
26a6980c CK |
831 | bool reserved_buffers = false; |
832 | int i, r; | |
049fc527 CZ |
833 | |
834 | down_read(&adev->exclusive_lock); | |
835 | if (!adev->accel_working) { | |
836 | up_read(&adev->exclusive_lock); | |
837 | return -EBUSY; | |
838 | } | |
2b48d323 | 839 | |
049fc527 CZ |
840 | parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); |
841 | if (!parser) | |
842 | return -ENOMEM; | |
843 | r = amdgpu_cs_parser_init(parser, data); | |
d38ceaf9 | 844 | if (r) { |
049fc527 | 845 | DRM_ERROR("Failed to initialize parser !\n"); |
1d263474 | 846 | kfree(parser); |
d38ceaf9 AD |
847 | up_read(&adev->exclusive_lock); |
848 | r = amdgpu_cs_handle_lockup(adev, r); | |
849 | return r; | |
850 | } | |
851 | ||
26a6980c CK |
852 | r = amdgpu_cs_parser_relocs(parser); |
853 | if (r == -ENOMEM) | |
854 | DRM_ERROR("Not enough memory for command submission!\n"); | |
855 | else if (r && r != -ERESTARTSYS) | |
856 | DRM_ERROR("Failed to process the buffer list %d!\n", r); | |
857 | else if (!r) { | |
858 | reserved_buffers = true; | |
859 | r = amdgpu_cs_ib_fill(adev, parser); | |
860 | } | |
861 | ||
862 | if (!r) { | |
863 | r = amdgpu_cs_dependencies(adev, parser); | |
864 | if (r) | |
865 | DRM_ERROR("Failed in the dependencies handling %d!\n", r); | |
866 | } | |
867 | ||
868 | if (r) | |
869 | goto out; | |
870 | ||
871 | for (i = 0; i < parser->num_ibs; i++) | |
872 | trace_amdgpu_cs(parser, i); | |
873 | ||
874 | r = amdgpu_cs_ib_vm_chunk(adev, parser); | |
4fe63117 CZ |
875 | if (r) |
876 | goto out; | |
877 | ||
049fc527 | 878 | if (amdgpu_enable_scheduler && parser->num_ibs) { |
bb977d37 | 879 | struct amdgpu_job *job; |
3c4adead | 880 | struct amdgpu_ring * ring = parser->ibs->ring; |
bb977d37 CZ |
881 | job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); |
882 | if (!job) | |
883 | return -ENOMEM; | |
4f839a24 | 884 | job->base.sched = &ring->sched; |
bb977d37 CZ |
885 | job->base.s_entity = &parser->ctx->rings[ring->idx].entity; |
886 | job->adev = parser->adev; | |
887 | job->ibs = parser->ibs; | |
888 | job->num_ibs = parser->num_ibs; | |
84f76ea6 | 889 | job->base.owner = parser->filp; |
bb977d37 CZ |
890 | mutex_init(&job->job_lock); |
891 | if (job->ibs[job->num_ibs - 1].user) { | |
892 | memcpy(&job->uf, &parser->uf, | |
893 | sizeof(struct amdgpu_user_fence)); | |
894 | job->ibs[job->num_ibs - 1].user = &job->uf; | |
895 | } | |
896 | ||
897 | job->free_job = amdgpu_cs_free_job; | |
898 | mutex_lock(&job->job_lock); | |
a6db8a33 | 899 | r = amd_sched_entity_push_job(&job->base); |
f556cb0c | 900 | if (r) { |
bb977d37 CZ |
901 | mutex_unlock(&job->job_lock); |
902 | amdgpu_cs_free_job(job); | |
903 | kfree(job); | |
f556cb0c CZ |
904 | goto out; |
905 | } | |
ce882e6d | 906 | cs->out.handle = |
3a185a33 | 907 | amdgpu_ctx_add_fence(parser->ctx, ring, |
ce882e6d | 908 | &job->base.s_fence->base); |
eb98d1c5 CK |
909 | parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle; |
910 | ||
c3b95d4f CZ |
911 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); |
912 | ttm_eu_fence_buffer_objects(&parser->ticket, | |
913 | &parser->validated, | |
bb977d37 | 914 | &job->base.s_fence->base); |
c3b95d4f | 915 | |
bb977d37 CZ |
916 | mutex_unlock(&job->job_lock); |
917 | amdgpu_cs_parser_fini_late(parser); | |
049fc527 CZ |
918 | up_read(&adev->exclusive_lock); |
919 | return 0; | |
d38ceaf9 AD |
920 | } |
921 | ||
049fc527 | 922 | cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; |
d38ceaf9 | 923 | out: |
26a6980c | 924 | amdgpu_cs_parser_fini(parser, r, reserved_buffers); |
d38ceaf9 AD |
925 | up_read(&adev->exclusive_lock); |
926 | r = amdgpu_cs_handle_lockup(adev, r); | |
927 | return r; | |
928 | } | |
929 | ||
930 | /** | |
931 | * amdgpu_cs_wait_ioctl - wait for a command submission to finish | |
932 | * | |
933 | * @dev: drm device | |
934 | * @data: data from userspace | |
935 | * @filp: file private | |
936 | * | |
937 | * Wait for the command submission identified by handle to finish. | |
938 | */ | |
939 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | |
940 | struct drm_file *filp) | |
941 | { | |
942 | union drm_amdgpu_wait_cs *wait = data; | |
943 | struct amdgpu_device *adev = dev->dev_private; | |
d38ceaf9 | 944 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); |
03507c4f | 945 | struct amdgpu_ring *ring = NULL; |
66b3cf2a | 946 | struct amdgpu_ctx *ctx; |
21c16bf6 | 947 | struct fence *fence; |
d38ceaf9 AD |
948 | long r; |
949 | ||
21c16bf6 CK |
950 | r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, |
951 | wait->in.ring, &ring); | |
952 | if (r) | |
953 | return r; | |
954 | ||
66b3cf2a JZ |
955 | ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); |
956 | if (ctx == NULL) | |
957 | return -EINVAL; | |
d38ceaf9 | 958 | |
4b559c90 CZ |
959 | fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); |
960 | if (IS_ERR(fence)) | |
961 | r = PTR_ERR(fence); | |
962 | else if (fence) { | |
963 | r = fence_wait_timeout(fence, true, timeout); | |
964 | fence_put(fence); | |
965 | } else | |
966 | r = 1; | |
049fc527 | 967 | |
66b3cf2a | 968 | amdgpu_ctx_put(ctx); |
d38ceaf9 AD |
969 | if (r < 0) |
970 | return r; | |
971 | ||
972 | memset(wait, 0, sizeof(*wait)); | |
973 | wait->out.status = (r == 0); | |
974 | ||
975 | return 0; | |
976 | } | |
977 | ||
978 | /** | |
979 | * amdgpu_cs_find_bo_va - find bo_va for VM address | |
980 | * | |
981 | * @parser: command submission parser context | |
982 | * @addr: VM address | |
983 | * @bo: resulting BO of the mapping found | |
984 | * | |
985 | * Search the buffer objects in the command submission context for a certain | |
986 | * virtual memory address. Returns allocation structure when found, NULL | |
987 | * otherwise. | |
988 | */ | |
989 | struct amdgpu_bo_va_mapping * | |
990 | amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, | |
991 | uint64_t addr, struct amdgpu_bo **bo) | |
992 | { | |
993 | struct amdgpu_bo_list_entry *reloc; | |
994 | struct amdgpu_bo_va_mapping *mapping; | |
995 | ||
996 | addr /= AMDGPU_GPU_PAGE_SIZE; | |
997 | ||
998 | list_for_each_entry(reloc, &parser->validated, tv.head) { | |
999 | if (!reloc->bo_va) | |
1000 | continue; | |
1001 | ||
7fc11959 CK |
1002 | list_for_each_entry(mapping, &reloc->bo_va->valids, list) { |
1003 | if (mapping->it.start > addr || | |
1004 | addr > mapping->it.last) | |
1005 | continue; | |
1006 | ||
1007 | *bo = reloc->bo_va->bo; | |
1008 | return mapping; | |
1009 | } | |
1010 | ||
1011 | list_for_each_entry(mapping, &reloc->bo_va->invalids, list) { | |
d38ceaf9 AD |
1012 | if (mapping->it.start > addr || |
1013 | addr > mapping->it.last) | |
1014 | continue; | |
1015 | ||
1016 | *bo = reloc->bo_va->bo; | |
1017 | return mapping; | |
1018 | } | |
1019 | } | |
1020 | ||
1021 | return NULL; | |
1022 | } |